ngram
listlengths
0
82k
[ "= [] for obj in annotations: x1 = obj[\"bbox\"][0] y1", "max_labels=500,),) self, data_dir=None, json_file=\"train_half.json\", name=\"train\", img_size=(608, 1088), preproc=None, ): \"\"\"", "the default value '1' avoid to break augmentation & evaluation", "= self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False) annotations = self.coco.loadAnns(anno_ids) objs = [] for", "objs.append(obj) num_objs = len(objs) res = np.zeros((num_objs, 6)) for ix,", "\"file_name\" in im_ann else \"{:012}\".format(id_) + \".jpg\" img_info = (height,", "# json_file=self.train_ann, # name='train', # img_size=self.input_size, # preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406),", "width = im_ann[\"width\"] height = im_ann[\"height\"] #frame_id = im_ann[\"frame_id\"] :", "nw, dx, dy. h, w (int): original shape of the", "default value '1' avoid to break augmentation & evaluation processes", "label pair for the given index is picked up and", "Used for evaluation. \"\"\" img, target, img_info, img_id = self.pull_item(index)", "= self.annotations[index] # load image and preprocess img_file = os.path.join(", "evaluation processes frame_id = 1 #video_id = im_ann[\"video_id\"] : the", "x1 = obj[\"bbox\"][0] y1 = obj[\"bbox\"][1] x2 = x1 +", "img_info, file_name = self.annotations[index] # load image and preprocess img_file", "COCO(os.path.join(self.data_dir, \"annotations\", self.json_file)) self.ids = self.coco.getImgIds() self.class_ids = sorted(self.coco.getCatIds()) cats", "memory by COCO API. Args: data_dir (str): dataset root directory", "def pull_item(self, index): id_ = self.ids[index] res, img_info, file_name =", "COCO dataset class. \"\"\" def __init__( # This function is", "image without padding dx, dy (int): pad size img_id (int):", "else \"{:012}\".format(id_) + \".jpg\" img_info = (height, width, frame_id, video_id,", "= self.class_ids.index(obj[\"category_id\"]) res[ix, 0:4] = obj[\"clean_bbox\"] res[ix, 4] = cls", "nh, nw, dx, dy. h, w (int): original shape of", "dx, dy (int): pad size img_id (int): same as the", "in cats]) self.annotations = self._load_coco_annotations() self.name = name self.img_size =", "img, target, img_info, img_id = self.pull_item(index) if self.preproc is not", "the given index is picked up and pre-processed. Args: index", "w, h]: class (float): class index. xc, yc (float) :", "(int): same as the input index. Used for evaluation. \"\"\"", "preprocess img_file = os.path.join( self.data_dir, self.name, file_name ) img =", "(str): dataset root directory json_file (str): COCO json file name", "image / label pair for the given index is picked", "im_ann, annotations return (res, img_info, file_name) def load_anno(self, index): return", "def load_anno_from_ids(self, id_): im_ann = self.coco.loadImgs(id_)[0] width = im_ann[\"width\"] height", "pycocotools.coco import COCO import os from ..dataloading import get_yolox_datadir from", "load_anno_from_ids(self, id_): im_ann = self.coco.loadImgs(id_)[0] width = im_ann[\"width\"] height =", "import COCO import os from ..dataloading import get_yolox_datadir from .datasets_wrapper", "\"\"\" super().__init__(img_size) if data_dir is None: data_dir = os.path.join(get_yolox_datadir(), \"mot\")", "target = self.preproc(img, target, self.input_dim) return img, target, img_info, img_id", "json_file=\"train_half.json\", name=\"train\", img_size=(608, 1088), preproc=None, ): \"\"\" COCO dataset initialization.", "None return img, res.copy(), img_info, np.array([id_]) @Dataset.resize_getitem def __getitem__(self, index):", "values range from 0 to 1. w, h (float) :", "@Dataset.resize_getitem def __getitem__(self, index): \"\"\" One image / label pair", ">= y1: obj[\"clean_bbox\"] = [x1, y1, x2, y2] objs.append(obj) num_objs", "processes video_id = 1 anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False) annotations =", "data name (e.g. 'train2017' or 'val2017') img_size (int): target image", "def _load_coco_annotations(self): return [self.load_anno_from_ids(_ids) for _ids in self.ids] def load_anno_from_ids(self,", "\"mot\"), # json_file=self.train_ann, # name='train', # img_size=self.input_size, # preproc=TrainTransform(rgb_means=(0.485, 0.456,", "self.data_dir, self.name, file_name ) img = cv2.imread(img_file) assert img is", "# This function is called in the exps yolox_x_mot17_half.py in", "consists of [class, xc, yc, w, h]: class (float): class", "center of bbox whose values range from 0 to 1.", "cats = self.coco.loadCats(self.coco.getCatIds()) self._classes = tuple([c[\"name\"] for c in cats])", "if obj[\"area\"] > 0 and x2 >= x1 and y2", "class. \"\"\" def __init__( # This function is called in", "self.coco.loadAnns(anno_ids) objs = [] for obj in annotations: x1 =", "(str): COCO json file name name (str): COCO data name", "np.zeros((num_objs, 6)) for ix, obj in enumerate(objs): cls = self.class_ids.index(obj[\"category_id\"])", "# load image and preprocess img_file = os.path.join( self.data_dir, self.name,", "obj in enumerate(objs): cls = self.class_ids.index(obj[\"category_id\"]) res[ix, 0:4] = obj[\"clean_bbox\"]", "called in the exps yolox_x_mot17_half.py in this way: dataset =", "self.class_ids.index(obj[\"category_id\"]) res[ix, 0:4] = obj[\"clean_bbox\"] res[ix, 4] = cls #res[ix,", "img, target = self.preproc(img, target, self.input_dim) return img, target, img_info,", "nw (int): shape of the resized image without padding dx,", "y2] objs.append(obj) num_objs = len(objs) res = np.zeros((num_objs, 6)) for", "assert img is not None return img, res.copy(), img_info, np.array([id_])", "picked up and pre-processed. Args: index (int): data index Returns:", "target image size after pre-processing preproc: data augmentation strategy \"\"\"", "index is picked up and pre-processed. Args: index (int): data", "\"\"\" COCO dataset class. \"\"\" def __init__( # This function", "x2 = x1 + obj[\"bbox\"][2] y2 = y1 + obj[\"bbox\"][3]", "if \"file_name\" in im_ann else \"{:012}\".format(id_) + \".jpg\" img_info =", "y2 >= y1: obj[\"clean_bbox\"] = [x1, y1, x2, y2] objs.append(obj)", "range from 0 to 1. w, h (float) : size", "values range from 0 to 1. info_img : tuple of", "not None: img, target = self.preproc(img, target, self.input_dim) return img,", "= obj[\"track_id\"] # See comment line 66; same comment for", "resized image without padding dx, dy (int): pad size img_id", "import get_yolox_datadir from .datasets_wrapper import Dataset class MOTDataset(Dataset): \"\"\" COCO", "= sorted(self.coco.getCatIds()) cats = self.coco.loadCats(self.coco.getCatIds()) self._classes = tuple([c[\"name\"] for c", "[] for obj in annotations: x1 = obj[\"bbox\"][0] y1 =", "= self.coco.loadImgs(id_)[0] width = im_ann[\"width\"] height = im_ann[\"height\"] #frame_id =", "augmentation strategy \"\"\" super().__init__(img_size) if data_dir is None: data_dir =", "not None return img, res.copy(), img_info, np.array([id_]) @Dataset.resize_getitem def __getitem__(self,", "im_ann[\"file_name\"] if \"file_name\" in im_ann else \"{:012}\".format(id_) + \".jpg\" img_info", "img_id (int): same as the input index. Used for evaluation.", "same comment for the default value 1 res[ix, 5] =", "of the image nh, nw (int): shape of the resized", "Dataset class MOTDataset(Dataset): \"\"\" COCO dataset class. \"\"\" def __init__(", "img (numpy.ndarray): pre-processed image padded_labels (torch.Tensor): pre-processed label data. The", "= obj[\"clean_bbox\"] res[ix, 4] = cls #res[ix, 5] = obj[\"track_id\"]", "frame_id, video_id, file_name) del im_ann, annotations return (res, img_info, file_name)", "objs = [] for obj in annotations: x1 = obj[\"bbox\"][0]", "preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), # std=(0.229, 0.224, 0.225), # max_labels=500,),) self,", "yc, w, h]: class (float): class index. xc, yc (float)", "[self.load_anno_from_ids(_ids) for _ids in self.ids] def load_anno_from_ids(self, id_): im_ann =", "self, data_dir=None, json_file=\"train_half.json\", name=\"train\", img_size=(608, 1088), preproc=None, ): \"\"\" COCO", "pad size img_id (int): same as the input index. Used", "One image / label pair for the given index is", "Returns: img (numpy.ndarray): pre-processed image padded_labels (torch.Tensor): pre-processed label data.", "= im_ann[\"video_id\"] : the default value '1' avoid to break", "dataset class. \"\"\" def __init__( # This function is called", "1 file_name = im_ann[\"file_name\"] if \"file_name\" in im_ann else \"{:012}\".format(id_)", "pair for the given index is picked up and pre-processed.", "= y1 + obj[\"bbox\"][3] if obj[\"area\"] > 0 and x2", "= im_ann[\"height\"] #frame_id = im_ann[\"frame_id\"] : the default value '1'", "im_ann[\"frame_id\"] : the default value '1' avoid to break augmentation", "The shape is :math:`[max_labels, 5]`. each label consists of [class,", "os.path.join( self.data_dir, self.name, file_name ) img = cv2.imread(img_file) assert img", "is called in the exps yolox_x_mot17_half.py in this way: dataset", "enumerate(objs): cls = self.class_ids.index(obj[\"category_id\"]) res[ix, 0:4] = obj[\"clean_bbox\"] res[ix, 4]", "= os.path.join(get_yolox_datadir(), \"mot\") self.data_dir = data_dir self.json_file = json_file self.coco", "to 1. w, h (float) : size of bbox whose", "without padding dx, dy (int): pad size img_id (int): same", "__getitem__(self, index): \"\"\" One image / label pair for the", "are read into memory by COCO API. Args: data_dir (str):", "directory json_file (str): COCO json file name name (str): COCO", "pre-processed image padded_labels (torch.Tensor): pre-processed label data. The shape is", "augmentation & evaluation processes video_id = 1 anno_ids = self.coco.getAnnIds(imgIds=[int(id_)],", "num_objs = len(objs) res = np.zeros((num_objs, 6)) for ix, obj", "\"annotations\", self.json_file)) self.ids = self.coco.getImgIds() self.class_ids = sorted(self.coco.getCatIds()) cats =", "= self.coco.getImgIds() self.class_ids = sorted(self.coco.getCatIds()) cats = self.coco.loadCats(self.coco.getCatIds()) self._classes =", "for c in cats]) self.annotations = self._load_coco_annotations() self.name = name", "of h, w, nh, nw, dx, dy. h, w (int):", "return self.annotations[index][0] def pull_item(self, index): id_ = self.ids[index] res, img_info,", "..dataloading import get_yolox_datadir from .datasets_wrapper import Dataset class MOTDataset(Dataset): \"\"\"", "h, w (int): original shape of the image nh, nw", "len(objs) res = np.zeros((num_objs, 6)) for ix, obj in enumerate(objs):", "'1' avoid to break augmentation & evaluation processes video_id =", "This function is called in the exps yolox_x_mot17_half.py in this", "in the exps yolox_x_mot17_half.py in this way: dataset = MOTDataset(", "annotations = self.coco.loadAnns(anno_ids) objs = [] for obj in annotations:", "\"{:012}\".format(id_) + \".jpg\" img_info = (height, width, frame_id, video_id, file_name)", "= json_file self.coco = COCO(os.path.join(self.data_dir, \"annotations\", self.json_file)) self.ids = self.coco.getImgIds()", "of bbox whose values range from 0 to 1. w,", "for evaluation. \"\"\" img, target, img_info, img_id = self.pull_item(index) if", "file name name (str): COCO data name (e.g. 'train2017' or", "after pre-processing preproc: data augmentation strategy \"\"\" super().__init__(img_size) if data_dir", "processes frame_id = 1 #video_id = im_ann[\"video_id\"] : the default", "file_name ) img = cv2.imread(img_file) assert img is not None", "\".jpg\" img_info = (height, width, frame_id, video_id, file_name) del im_ann,", "= self.coco.loadCats(self.coco.getCatIds()) self._classes = tuple([c[\"name\"] for c in cats]) self.annotations", "self.img_size = img_size self.preproc = preproc def __len__(self): return len(self.ids)", "# max_labels=500,),) self, data_dir=None, json_file=\"train_half.json\", name=\"train\", img_size=(608, 1088), preproc=None, ):", "super().__init__(img_size) if data_dir is None: data_dir = os.path.join(get_yolox_datadir(), \"mot\") self.data_dir", "name='train', # img_size=self.input_size, # preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), # std=(0.229, 0.224,", "self.json_file = json_file self.coco = COCO(os.path.join(self.data_dir, \"annotations\", self.json_file)) self.ids =", "for ix, obj in enumerate(objs): cls = self.class_ids.index(obj[\"category_id\"]) res[ix, 0:4]", "+ obj[\"bbox\"][3] if obj[\"area\"] > 0 and x2 >= x1", "= obj[\"bbox\"][1] x2 = x1 + obj[\"bbox\"][2] y2 = y1", "import os from ..dataloading import get_yolox_datadir from .datasets_wrapper import Dataset", "comment for the default value 1 res[ix, 5] = 1", "index): return self.annotations[index][0] def pull_item(self, index): id_ = self.ids[index] res,", "1088), preproc=None, ): \"\"\" COCO dataset initialization. Annotation data are", "avoid to break augmentation & evaluation processes frame_id = 1", "0 and x2 >= x1 and y2 >= y1: obj[\"clean_bbox\"]", "(int): pad size img_id (int): same as the input index.", "import Dataset class MOTDataset(Dataset): \"\"\" COCO dataset class. \"\"\" def", "to 1. info_img : tuple of h, w, nh, nw,", "function is called in the exps yolox_x_mot17_half.py in this way:", "augmentation & evaluation processes frame_id = 1 #video_id = im_ann[\"video_id\"]", "im_ann[\"video_id\"] : the default value '1' avoid to break augmentation", "self.coco = COCO(os.path.join(self.data_dir, \"annotations\", self.json_file)) self.ids = self.coco.getImgIds() self.class_ids =", "self.coco.loadCats(self.coco.getCatIds()) self._classes = tuple([c[\"name\"] for c in cats]) self.annotations =", "1 anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False) annotations = self.coco.loadAnns(anno_ids) objs =", "the default value 1 res[ix, 5] = 1 file_name =", "None: img, target = self.preproc(img, target, self.input_dim) return img, target,", "& evaluation processes video_id = 1 anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False)", "h (float) : size of bbox whose values range from", "y2 = y1 + obj[\"bbox\"][3] if obj[\"area\"] > 0 and", "index (int): data index Returns: img (numpy.ndarray): pre-processed image padded_labels", "video_id = 1 anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False) annotations = self.coco.loadAnns(anno_ids)", "def __len__(self): return len(self.ids) def _load_coco_annotations(self): return [self.load_anno_from_ids(_ids) for _ids", "> 0 and x2 >= x1 and y2 >= y1:", "bbox whose values range from 0 to 1. info_img :", "(numpy.ndarray): pre-processed image padded_labels (torch.Tensor): pre-processed label data. The shape", "json_file=self.train_ann, # name='train', # img_size=self.input_size, # preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), #", "x2 >= x1 and y2 >= y1: obj[\"clean_bbox\"] = [x1,", "res[ix, 5] = 1 file_name = im_ann[\"file_name\"] if \"file_name\" in", "del im_ann, annotations return (res, img_info, file_name) def load_anno(self, index):", "= self.coco.loadAnns(anno_ids) objs = [] for obj in annotations: x1", "obj[\"bbox\"][3] if obj[\"area\"] > 0 and x2 >= x1 and", "name (str): COCO data name (e.g. 'train2017' or 'val2017') img_size", "json file name name (str): COCO data name (e.g. 'train2017'", "self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False) annotations = self.coco.loadAnns(anno_ids) objs = [] for obj", "[class, xc, yc, w, h]: class (float): class index. xc,", "= COCO(os.path.join(self.data_dir, \"annotations\", self.json_file)) self.ids = self.coco.getImgIds() self.class_ids = sorted(self.coco.getCatIds())", "file_name) del im_ann, annotations return (res, img_info, file_name) def load_anno(self,", "1. w, h (float) : size of bbox whose values", "w, h (float) : size of bbox whose values range", "shape of the image nh, nw (int): shape of the", "anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False) annotations = self.coco.loadAnns(anno_ids) objs = []", "width, frame_id, video_id, file_name) del im_ann, annotations return (res, img_info,", "0.224, 0.225), # max_labels=500,),) self, data_dir=None, json_file=\"train_half.json\", name=\"train\", img_size=(608, 1088),", "0.406), # std=(0.229, 0.224, 0.225), # max_labels=500,),) self, data_dir=None, json_file=\"train_half.json\",", "image nh, nw (int): shape of the resized image without", "6)) for ix, obj in enumerate(objs): cls = self.class_ids.index(obj[\"category_id\"]) res[ix,", "yc (float) : center of bbox whose values range from", "and x2 >= x1 and y2 >= y1: obj[\"clean_bbox\"] =", "data augmentation strategy \"\"\" super().__init__(img_size) if data_dir is None: data_dir", "res[ix, 4] = cls #res[ix, 5] = obj[\"track_id\"] # See", "0.225), # max_labels=500,),) self, data_dir=None, json_file=\"train_half.json\", name=\"train\", img_size=(608, 1088), preproc=None,", "0:4] = obj[\"clean_bbox\"] res[ix, 4] = cls #res[ix, 5] =", "preproc=None, ): \"\"\" COCO dataset initialization. Annotation data are read", "# data_dir=os.path.join(get_yolox_datadir(), \"mot\"), # json_file=self.train_ann, # name='train', # img_size=self.input_size, #", "= preproc def __len__(self): return len(self.ids) def _load_coco_annotations(self): return [self.load_anno_from_ids(_ids)", "for _ids in self.ids] def load_anno_from_ids(self, id_): im_ann = self.coco.loadImgs(id_)[0]", "up and pre-processed. Args: index (int): data index Returns: img", "self.coco.loadImgs(id_)[0] width = im_ann[\"width\"] height = im_ann[\"height\"] #frame_id = im_ann[\"frame_id\"]", "img = cv2.imread(img_file) assert img is not None return img,", "__init__( # This function is called in the exps yolox_x_mot17_half.py", "dy (int): pad size img_id (int): same as the input", "4] = cls #res[ix, 5] = obj[\"track_id\"] # See comment", "whose values range from 0 to 1. info_img : tuple", "name (e.g. 'train2017' or 'val2017') img_size (int): target image size", "and y2 >= y1: obj[\"clean_bbox\"] = [x1, y1, x2, y2]", "cls #res[ix, 5] = obj[\"track_id\"] # See comment line 66;", "self.name, file_name ) img = cv2.imread(img_file) assert img is not", "into memory by COCO API. Args: data_dir (str): dataset root", "= name self.img_size = img_size self.preproc = preproc def __len__(self):", "res.copy(), img_info, np.array([id_]) @Dataset.resize_getitem def __getitem__(self, index): \"\"\" One image", "= 1 anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False) annotations = self.coco.loadAnns(anno_ids) objs", "this way: dataset = MOTDataset( # data_dir=os.path.join(get_yolox_datadir(), \"mot\"), # json_file=self.train_ann,", ": the default value '1' avoid to break augmentation &", "w (int): original shape of the image nh, nw (int):", "input index. Used for evaluation. \"\"\" img, target, img_info, img_id", "in self.ids] def load_anno_from_ids(self, id_): im_ann = self.coco.loadImgs(id_)[0] width =", "if data_dir is None: data_dir = os.path.join(get_yolox_datadir(), \"mot\") self.data_dir =", "cats]) self.annotations = self._load_coco_annotations() self.name = name self.img_size = img_size", "def __getitem__(self, index): \"\"\" One image / label pair for", "None: data_dir = os.path.join(get_yolox_datadir(), \"mot\") self.data_dir = data_dir self.json_file =", "img is not None return img, res.copy(), img_info, np.array([id_]) @Dataset.resize_getitem", "= (height, width, frame_id, video_id, file_name) del im_ann, annotations return", ": center of bbox whose values range from 0 to", "id_): im_ann = self.coco.loadImgs(id_)[0] width = im_ann[\"width\"] height = im_ann[\"height\"]", "iscrowd=False) annotations = self.coco.loadAnns(anno_ids) objs = [] for obj in", "annotations: x1 = obj[\"bbox\"][0] y1 = obj[\"bbox\"][1] x2 = x1", "import cv2 import numpy as np from pycocotools.coco import COCO", "#video_id = im_ann[\"video_id\"] : the default value '1' avoid to", "index): id_ = self.ids[index] res, img_info, file_name = self.annotations[index] #", "import numpy as np from pycocotools.coco import COCO import os", "pull_item(self, index): id_ = self.ids[index] res, img_info, file_name = self.annotations[index]", "# preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), # std=(0.229, 0.224, 0.225), # max_labels=500,),)", "nh, nw (int): shape of the resized image without padding", "im_ann[\"height\"] #frame_id = im_ann[\"frame_id\"] : the default value '1' avoid", "self.pull_item(index) if self.preproc is not None: img, target = self.preproc(img,", "xc, yc (float) : center of bbox whose values range", "padding dx, dy (int): pad size img_id (int): same as", "return [self.load_anno_from_ids(_ids) for _ids in self.ids] def load_anno_from_ids(self, id_): im_ann", "x1 + obj[\"bbox\"][2] y2 = y1 + obj[\"bbox\"][3] if obj[\"area\"]", "# See comment line 66; same comment for the default", "See comment line 66; same comment for the default value", "whose values range from 0 to 1. w, h (float)", "video_id, file_name) del im_ann, annotations return (res, img_info, file_name) def", "= [x1, y1, x2, y2] objs.append(obj) num_objs = len(objs) res", "obj[\"area\"] > 0 and x2 >= x1 and y2 >=", "0 to 1. w, h (float) : size of bbox", "preproc: data augmentation strategy \"\"\" super().__init__(img_size) if data_dir is None:", ": tuple of h, w, nh, nw, dx, dy. h,", "= im_ann[\"width\"] height = im_ann[\"height\"] #frame_id = im_ann[\"frame_id\"] : the", "img_size=(608, 1088), preproc=None, ): \"\"\" COCO dataset initialization. Annotation data", "y1, x2, y2] objs.append(obj) num_objs = len(objs) res = np.zeros((num_objs,", "img, res.copy(), img_info, np.array([id_]) @Dataset.resize_getitem def __getitem__(self, index): \"\"\" One", "img_info, img_id = self.pull_item(index) if self.preproc is not None: img,", "obj in annotations: x1 = obj[\"bbox\"][0] y1 = obj[\"bbox\"][1] x2", "initialization. Annotation data are read into memory by COCO API.", "data index Returns: img (numpy.ndarray): pre-processed image padded_labels (torch.Tensor): pre-processed", "from 0 to 1. w, h (float) : size of", "root directory json_file (str): COCO json file name name (str):", "index. xc, yc (float) : center of bbox whose values", "_ids in self.ids] def load_anno_from_ids(self, id_): im_ann = self.coco.loadImgs(id_)[0] width", "(int): data index Returns: img (numpy.ndarray): pre-processed image padded_labels (torch.Tensor):", "std=(0.229, 0.224, 0.225), # max_labels=500,),) self, data_dir=None, json_file=\"train_half.json\", name=\"train\", img_size=(608,", "Args: data_dir (str): dataset root directory json_file (str): COCO json", "(e.g. 'train2017' or 'val2017') img_size (int): target image size after", "preproc def __len__(self): return len(self.ids) def _load_coco_annotations(self): return [self.load_anno_from_ids(_ids) for", "img_info, np.array([id_]) @Dataset.resize_getitem def __getitem__(self, index): \"\"\" One image /", "np.array([id_]) @Dataset.resize_getitem def __getitem__(self, index): \"\"\" One image / label", "(height, width, frame_id, video_id, file_name) del im_ann, annotations return (res,", "COCO API. Args: data_dir (str): dataset root directory json_file (str):", "'train2017' or 'val2017') img_size (int): target image size after pre-processing", "# name='train', # img_size=self.input_size, # preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), # std=(0.229,", "\"\"\" COCO dataset initialization. Annotation data are read into memory", "sorted(self.coco.getCatIds()) cats = self.coco.loadCats(self.coco.getCatIds()) self._classes = tuple([c[\"name\"] for c in", "(int): target image size after pre-processing preproc: data augmentation strategy", "if self.preproc is not None: img, target = self.preproc(img, target,", "the input index. Used for evaluation. \"\"\" img, target, img_info,", "in this way: dataset = MOTDataset( # data_dir=os.path.join(get_yolox_datadir(), \"mot\"), #", "def __init__( # This function is called in the exps", "img_info, file_name) def load_anno(self, index): return self.annotations[index][0] def pull_item(self, index):", "COCO json file name name (str): COCO data name (e.g.", "[x1, y1, x2, y2] objs.append(obj) num_objs = len(objs) res =", "& evaluation processes frame_id = 1 #video_id = im_ann[\"video_id\"] :", "self.annotations[index][0] def pull_item(self, index): id_ = self.ids[index] res, img_info, file_name", "res, img_info, file_name = self.annotations[index] # load image and preprocess", "id_ = self.ids[index] res, img_info, file_name = self.annotations[index] # load", "self.coco.getImgIds() self.class_ids = sorted(self.coco.getCatIds()) cats = self.coco.loadCats(self.coco.getCatIds()) self._classes = tuple([c[\"name\"]", "self.annotations = self._load_coco_annotations() self.name = name self.img_size = img_size self.preproc", "= os.path.join( self.data_dir, self.name, file_name ) img = cv2.imread(img_file) assert", "ix, obj in enumerate(objs): cls = self.class_ids.index(obj[\"category_id\"]) res[ix, 0:4] =", "cv2.imread(img_file) assert img is not None return img, res.copy(), img_info,", ": size of bbox whose values range from 0 to", "value '1' avoid to break augmentation & evaluation processes video_id", "MOTDataset( # data_dir=os.path.join(get_yolox_datadir(), \"mot\"), # json_file=self.train_ann, # name='train', # img_size=self.input_size,", "(float) : center of bbox whose values range from 0", "from 0 to 1. info_img : tuple of h, w,", "= cv2.imread(img_file) assert img is not None return img, res.copy(),", "for the default value 1 res[ix, 5] = 1 file_name", "numpy as np from pycocotools.coco import COCO import os from", "json_file (str): COCO json file name name (str): COCO data", "5] = obj[\"track_id\"] # See comment line 66; same comment", "def load_anno(self, index): return self.annotations[index][0] def pull_item(self, index): id_ =", "0 to 1. info_img : tuple of h, w, nh,", "y1 = obj[\"bbox\"][1] x2 = x1 + obj[\"bbox\"][2] y2 =", "image and preprocess img_file = os.path.join( self.data_dir, self.name, file_name )", "'val2017') img_size (int): target image size after pre-processing preproc: data", "\"\"\" def __init__( # This function is called in the", "line 66; same comment for the default value 1 res[ix,", "cv2 import numpy as np from pycocotools.coco import COCO import", "return (res, img_info, file_name) def load_anno(self, index): return self.annotations[index][0] def", "API. Args: data_dir (str): dataset root directory json_file (str): COCO", "given index is picked up and pre-processed. Args: index (int):", "img_id = self.pull_item(index) if self.preproc is not None: img, target", "image padded_labels (torch.Tensor): pre-processed label data. The shape is :math:`[max_labels,", "= MOTDataset( # data_dir=os.path.join(get_yolox_datadir(), \"mot\"), # json_file=self.train_ann, # name='train', #", "66; same comment for the default value 1 res[ix, 5]", "img_file = os.path.join( self.data_dir, self.name, file_name ) img = cv2.imread(img_file)", "or 'val2017') img_size (int): target image size after pre-processing preproc:", "obj[\"track_id\"] # See comment line 66; same comment for the", "load_anno(self, index): return self.annotations[index][0] def pull_item(self, index): id_ = self.ids[index]", "res = np.zeros((num_objs, 6)) for ix, obj in enumerate(objs): cls", "obj[\"clean_bbox\"] = [x1, y1, x2, y2] objs.append(obj) num_objs = len(objs)", "name self.img_size = img_size self.preproc = preproc def __len__(self): return", "= 1 #video_id = im_ann[\"video_id\"] : the default value '1'", "index. Used for evaluation. \"\"\" img, target, img_info, img_id =", "res[ix, 0:4] = obj[\"clean_bbox\"] res[ix, 4] = cls #res[ix, 5]", "class index. xc, yc (float) : center of bbox whose", "return len(self.ids) def _load_coco_annotations(self): return [self.load_anno_from_ids(_ids) for _ids in self.ids]", "self._load_coco_annotations() self.name = name self.img_size = img_size self.preproc = preproc", "is not None return img, res.copy(), img_info, np.array([id_]) @Dataset.resize_getitem def", "= img_size self.preproc = preproc def __len__(self): return len(self.ids) def", "of bbox whose values range from 0 to 1. info_img", "(int): shape of the resized image without padding dx, dy", "img_size self.preproc = preproc def __len__(self): return len(self.ids) def _load_coco_annotations(self):", "COCO dataset initialization. Annotation data are read into memory by", "shape of the resized image without padding dx, dy (int):", "1 res[ix, 5] = 1 file_name = im_ann[\"file_name\"] if \"file_name\"", "avoid to break augmentation & evaluation processes video_id = 1", "break augmentation & evaluation processes frame_id = 1 #video_id =", "and preprocess img_file = os.path.join( self.data_dir, self.name, file_name ) img", "from .datasets_wrapper import Dataset class MOTDataset(Dataset): \"\"\" COCO dataset class.", "strategy \"\"\" super().__init__(img_size) if data_dir is None: data_dir = os.path.join(get_yolox_datadir(),", "range from 0 to 1. info_img : tuple of h,", "): \"\"\" COCO dataset initialization. Annotation data are read into", "0.456, 0.406), # std=(0.229, 0.224, 0.225), # max_labels=500,),) self, data_dir=None,", "img_size (int): target image size after pre-processing preproc: data augmentation", "Args: index (int): data index Returns: img (numpy.ndarray): pre-processed image", "# std=(0.229, 0.224, 0.225), # max_labels=500,),) self, data_dir=None, json_file=\"train_half.json\", name=\"train\",", "Annotation data are read into memory by COCO API. Args:", ") img = cv2.imread(img_file) assert img is not None return", ".datasets_wrapper import Dataset class MOTDataset(Dataset): \"\"\" COCO dataset class. \"\"\"", "and pre-processed. Args: index (int): data index Returns: img (numpy.ndarray):", "= x1 + obj[\"bbox\"][2] y2 = y1 + obj[\"bbox\"][3] if", "xc, yc, w, h]: class (float): class index. xc, yc", "as the input index. Used for evaluation. \"\"\" img, target,", "obj[\"bbox\"][1] x2 = x1 + obj[\"bbox\"][2] y2 = y1 +", "self.preproc is not None: img, target = self.preproc(img, target, self.input_dim)", "file_name = self.annotations[index] # load image and preprocess img_file =", "dataset initialization. Annotation data are read into memory by COCO", "as np from pycocotools.coco import COCO import os from ..dataloading", "the image nh, nw (int): shape of the resized image", "obj[\"clean_bbox\"] res[ix, 4] = cls #res[ix, 5] = obj[\"track_id\"] #", "file_name) def load_anno(self, index): return self.annotations[index][0] def pull_item(self, index): id_", "load image and preprocess img_file = os.path.join( self.data_dir, self.name, file_name", "by COCO API. Args: data_dir (str): dataset root directory json_file", "#frame_id = im_ann[\"frame_id\"] : the default value '1' avoid to", "(float): class index. xc, yc (float) : center of bbox", "= 1 file_name = im_ann[\"file_name\"] if \"file_name\" in im_ann else", "is None: data_dir = os.path.join(get_yolox_datadir(), \"mot\") self.data_dir = data_dir self.json_file", "self.ids] def load_anno_from_ids(self, id_): im_ann = self.coco.loadImgs(id_)[0] width = im_ann[\"width\"]", "im_ann else \"{:012}\".format(id_) + \".jpg\" img_info = (height, width, frame_id,", "# img_size=self.input_size, # preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), # std=(0.229, 0.224, 0.225),", "for the given index is picked up and pre-processed. Args:", "read into memory by COCO API. Args: data_dir (str): dataset", "return img, res.copy(), img_info, np.array([id_]) @Dataset.resize_getitem def __getitem__(self, index): \"\"\"", "dx, dy. h, w (int): original shape of the image", "= len(objs) res = np.zeros((num_objs, 6)) for ix, obj in", "bbox whose values range from 0 to 1. w, h", "(torch.Tensor): pre-processed label data. The shape is :math:`[max_labels, 5]`. each", "data_dir=os.path.join(get_yolox_datadir(), \"mot\"), # json_file=self.train_ann, # name='train', # img_size=self.input_size, # preproc=TrainTransform(rgb_means=(0.485,", "class (float): class index. xc, yc (float) : center of", "self._classes = tuple([c[\"name\"] for c in cats]) self.annotations = self._load_coco_annotations()", "the exps yolox_x_mot17_half.py in this way: dataset = MOTDataset( #", "np from pycocotools.coco import COCO import os from ..dataloading import", "\"\"\" One image / label pair for the given index", "default value 1 res[ix, 5] = 1 file_name = im_ann[\"file_name\"]", "json_file self.coco = COCO(os.path.join(self.data_dir, \"annotations\", self.json_file)) self.ids = self.coco.getImgIds() self.class_ids", "5]`. each label consists of [class, xc, yc, w, h]:", "= obj[\"bbox\"][0] y1 = obj[\"bbox\"][1] x2 = x1 + obj[\"bbox\"][2]", "name=\"train\", img_size=(608, 1088), preproc=None, ): \"\"\" COCO dataset initialization. Annotation", "value '1' avoid to break augmentation & evaluation processes frame_id", "h]: class (float): class index. xc, yc (float) : center", "target, img_info, img_id = self.pull_item(index) if self.preproc is not None:", "self.class_ids = sorted(self.coco.getCatIds()) cats = self.coco.loadCats(self.coco.getCatIds()) self._classes = tuple([c[\"name\"] for", "obj[\"bbox\"][0] y1 = obj[\"bbox\"][1] x2 = x1 + obj[\"bbox\"][2] y2", "in im_ann else \"{:012}\".format(id_) + \".jpg\" img_info = (height, width,", "get_yolox_datadir from .datasets_wrapper import Dataset class MOTDataset(Dataset): \"\"\" COCO dataset", "data_dir (str): dataset root directory json_file (str): COCO json file", "pre-processed label data. The shape is :math:`[max_labels, 5]`. each label", "c in cats]) self.annotations = self._load_coco_annotations() self.name = name self.img_size", "is not None: img, target = self.preproc(img, target, self.input_dim) return", "data. The shape is :math:`[max_labels, 5]`. each label consists of", "/ label pair for the given index is picked up", "self.ids = self.coco.getImgIds() self.class_ids = sorted(self.coco.getCatIds()) cats = self.coco.loadCats(self.coco.getCatIds()) self._classes", "h, w, nh, nw, dx, dy. h, w (int): original", "= self.pull_item(index) if self.preproc is not None: img, target =", "1 #video_id = im_ann[\"video_id\"] : the default value '1' avoid", "in enumerate(objs): cls = self.class_ids.index(obj[\"category_id\"]) res[ix, 0:4] = obj[\"clean_bbox\"] res[ix,", "each label consists of [class, xc, yc, w, h]: class", "data are read into memory by COCO API. Args: data_dir", "of [class, xc, yc, w, h]: class (float): class index.", "tuple of h, w, nh, nw, dx, dy. h, w", "dataset root directory json_file (str): COCO json file name name", "file_name = im_ann[\"file_name\"] if \"file_name\" in im_ann else \"{:012}\".format(id_) +", "label consists of [class, xc, yc, w, h]: class (float):", "len(self.ids) def _load_coco_annotations(self): return [self.load_anno_from_ids(_ids) for _ids in self.ids] def", "yolox_x_mot17_half.py in this way: dataset = MOTDataset( # data_dir=os.path.join(get_yolox_datadir(), \"mot\"),", "of the resized image without padding dx, dy (int): pad", "+ obj[\"bbox\"][2] y2 = y1 + obj[\"bbox\"][3] if obj[\"area\"] >", "evaluation. \"\"\" img, target, img_info, img_id = self.pull_item(index) if self.preproc", "padded_labels (torch.Tensor): pre-processed label data. The shape is :math:`[max_labels, 5]`.", "= cls #res[ix, 5] = obj[\"track_id\"] # See comment line", "index): \"\"\" One image / label pair for the given", "(float) : size of bbox whose values range from 0", "y1 + obj[\"bbox\"][3] if obj[\"area\"] > 0 and x2 >=", "(int): original shape of the image nh, nw (int): shape", "= self._load_coco_annotations() self.name = name self.img_size = img_size self.preproc =", "img_size=self.input_size, # preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), # std=(0.229, 0.224, 0.225), #", "height = im_ann[\"height\"] #frame_id = im_ann[\"frame_id\"] : the default value", "self.preproc = preproc def __len__(self): return len(self.ids) def _load_coco_annotations(self): return", "im_ann[\"width\"] height = im_ann[\"height\"] #frame_id = im_ann[\"frame_id\"] : the default", "image size after pre-processing preproc: data augmentation strategy \"\"\" super().__init__(img_size)", "shape is :math:`[max_labels, 5]`. each label consists of [class, xc,", "y1: obj[\"clean_bbox\"] = [x1, y1, x2, y2] objs.append(obj) num_objs =", "info_img : tuple of h, w, nh, nw, dx, dy.", "comment line 66; same comment for the default value 1", "obj[\"bbox\"][2] y2 = y1 + obj[\"bbox\"][3] if obj[\"area\"] > 0", "os from ..dataloading import get_yolox_datadir from .datasets_wrapper import Dataset class", "= self.ids[index] res, img_info, file_name = self.annotations[index] # load image", "way: dataset = MOTDataset( # data_dir=os.path.join(get_yolox_datadir(), \"mot\"), # json_file=self.train_ann, #", "1. info_img : tuple of h, w, nh, nw, dx,", "in annotations: x1 = obj[\"bbox\"][0] y1 = obj[\"bbox\"][1] x2 =", "\"mot\") self.data_dir = data_dir self.json_file = json_file self.coco = COCO(os.path.join(self.data_dir,", "x2, y2] objs.append(obj) num_objs = len(objs) res = np.zeros((num_objs, 6))", "class MOTDataset(Dataset): \"\"\" COCO dataset class. \"\"\" def __init__( #", "COCO import os from ..dataloading import get_yolox_datadir from .datasets_wrapper import", "same as the input index. Used for evaluation. \"\"\" img,", "im_ann = self.coco.loadImgs(id_)[0] width = im_ann[\"width\"] height = im_ann[\"height\"] #frame_id", "#res[ix, 5] = obj[\"track_id\"] # See comment line 66; same", "index Returns: img (numpy.ndarray): pre-processed image padded_labels (torch.Tensor): pre-processed label", "size after pre-processing preproc: data augmentation strategy \"\"\" super().__init__(img_size) if", "= tuple([c[\"name\"] for c in cats]) self.annotations = self._load_coco_annotations() self.name", "img_info = (height, width, frame_id, video_id, file_name) del im_ann, annotations", "= im_ann[\"file_name\"] if \"file_name\" in im_ann else \"{:012}\".format(id_) + \".jpg\"", "pre-processing preproc: data augmentation strategy \"\"\" super().__init__(img_size) if data_dir is", "is :math:`[max_labels, 5]`. each label consists of [class, xc, yc,", ":math:`[max_labels, 5]`. each label consists of [class, xc, yc, w,", "COCO data name (e.g. 'train2017' or 'val2017') img_size (int): target", "w, nh, nw, dx, dy. h, w (int): original shape", "5] = 1 file_name = im_ann[\"file_name\"] if \"file_name\" in im_ann", "is picked up and pre-processed. Args: index (int): data index", "MOTDataset(Dataset): \"\"\" COCO dataset class. \"\"\" def __init__( # This", "break augmentation & evaluation processes video_id = 1 anno_ids =", "+ \".jpg\" img_info = (height, width, frame_id, video_id, file_name) del", "original shape of the image nh, nw (int): shape of", ">= x1 and y2 >= y1: obj[\"clean_bbox\"] = [x1, y1,", "'1' avoid to break augmentation & evaluation processes frame_id =", "name name (str): COCO data name (e.g. 'train2017' or 'val2017')", "exps yolox_x_mot17_half.py in this way: dataset = MOTDataset( # data_dir=os.path.join(get_yolox_datadir(),", "(res, img_info, file_name) def load_anno(self, index): return self.annotations[index][0] def pull_item(self,", "data_dir=None, json_file=\"train_half.json\", name=\"train\", img_size=(608, 1088), preproc=None, ): \"\"\" COCO dataset", "from ..dataloading import get_yolox_datadir from .datasets_wrapper import Dataset class MOTDataset(Dataset):", "= im_ann[\"frame_id\"] : the default value '1' avoid to break", "cls = self.class_ids.index(obj[\"category_id\"]) res[ix, 0:4] = obj[\"clean_bbox\"] res[ix, 4] =", "pre-processed. Args: index (int): data index Returns: img (numpy.ndarray): pre-processed", "evaluation processes video_id = 1 anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False) annotations", "self.name = name self.img_size = img_size self.preproc = preproc def", "data_dir is None: data_dir = os.path.join(get_yolox_datadir(), \"mot\") self.data_dir = data_dir", "dataset = MOTDataset( # data_dir=os.path.join(get_yolox_datadir(), \"mot\"), # json_file=self.train_ann, # name='train',", "= data_dir self.json_file = json_file self.coco = COCO(os.path.join(self.data_dir, \"annotations\", self.json_file))", "label data. The shape is :math:`[max_labels, 5]`. each label consists", "to break augmentation & evaluation processes video_id = 1 anno_ids", "x1 and y2 >= y1: obj[\"clean_bbox\"] = [x1, y1, x2,", "annotations return (res, img_info, file_name) def load_anno(self, index): return self.annotations[index][0]", "frame_id = 1 #video_id = im_ann[\"video_id\"] : the default value", "\"\"\" img, target, img_info, img_id = self.pull_item(index) if self.preproc is", "data_dir self.json_file = json_file self.coco = COCO(os.path.join(self.data_dir, \"annotations\", self.json_file)) self.ids", "for obj in annotations: x1 = obj[\"bbox\"][0] y1 = obj[\"bbox\"][1]", "self.annotations[index] # load image and preprocess img_file = os.path.join( self.data_dir,", "the resized image without padding dx, dy (int): pad size", "value 1 res[ix, 5] = 1 file_name = im_ann[\"file_name\"] if", "dy. h, w (int): original shape of the image nh,", "to break augmentation & evaluation processes frame_id = 1 #video_id", "tuple([c[\"name\"] for c in cats]) self.annotations = self._load_coco_annotations() self.name =", "data_dir = os.path.join(get_yolox_datadir(), \"mot\") self.data_dir = data_dir self.json_file = json_file", "(str): COCO data name (e.g. 'train2017' or 'val2017') img_size (int):", "size of bbox whose values range from 0 to 1.", "from pycocotools.coco import COCO import os from ..dataloading import get_yolox_datadir", "self.data_dir = data_dir self.json_file = json_file self.coco = COCO(os.path.join(self.data_dir, \"annotations\",", "__len__(self): return len(self.ids) def _load_coco_annotations(self): return [self.load_anno_from_ids(_ids) for _ids in", "self.json_file)) self.ids = self.coco.getImgIds() self.class_ids = sorted(self.coco.getCatIds()) cats = self.coco.loadCats(self.coco.getCatIds())", "self.ids[index] res, img_info, file_name = self.annotations[index] # load image and", "os.path.join(get_yolox_datadir(), \"mot\") self.data_dir = data_dir self.json_file = json_file self.coco =", "= np.zeros((num_objs, 6)) for ix, obj in enumerate(objs): cls =", "size img_id (int): same as the input index. Used for", "_load_coco_annotations(self): return [self.load_anno_from_ids(_ids) for _ids in self.ids] def load_anno_from_ids(self, id_):" ]
[ "tomlkit.toml_document import TOMLDocument try: from poetry.core.packages.dependency_group import MAIN_GROUP except ImportError:", "return status def _remove_packages( self, packages: list[str], section: dict[str, Any],", "not poetry_content[\"group\"][group]: del poetry_content[\"group\"][group] if \"group\" in poetry_content and not", "None: removed = [] group_sections = [ (group_name, group_section.get(\"dependencies\", {}))", "loggers = [\"poetry.repositories.pypi_repository\", \"poetry.inspection.info\"] def handle(self) -> int: packages =", "if \"group\" in poetry_content and not poetry_content[\"group\"]: del poetry_content[\"group\"] removed_set", "from typing import Any from cleo.helpers import argument from cleo.helpers", "if not poetry_content[\"dev-dependencies\"]: del poetry_content[\"dev-dependencies\"] else: removed = self._remove_packages( packages,", "package from the development dependencies.\"), option( \"dry-run\", None, \"Output the", "in poetry_content.get(\"group\", {}).items() ] for group_name, section in [ (MAIN_GROUP,", "import Any from cleo.helpers import argument from cleo.helpers import option", "typing import Any from cleo.helpers import argument from cleo.helpers import", "from cleo.helpers import option from tomlkit.toml_document import TOMLDocument try: from", "packages self._installer.use_executor( self.poetry.config.get(\"experimental.new-installer\", False) ) self._installer.dry_run(self.option(\"dry-run\", False)) self._installer.verbose(self._io.is_verbose()) self._installer.update(True) self._installer.whitelist(removed_set)", "+ group_sections: removed += self._remove_packages(packages, section, group_name) if group_name !=", "import option from tomlkit.toml_document import TOMLDocument try: from poetry.core.packages.dependency_group import", "option is deprecated, \" \"use the `--group dev` notation instead.</warning>\"", "notation instead.</warning>\" ) group = \"dev\" else: group = self.option(\"group\",", "and status == 0: assert isinstance(content, TOMLDocument) self.poetry.file.write(content) return status", "argument from cleo.helpers import option from tomlkit.toml_document import TOMLDocument try:", "project dependencies.\" arguments = [argument(\"packages\", \"The packages to remove.\", multiple=True)]", "self.argument(\"packages\") if self.option(\"dev\"): self.line_error( \"<warning>The --dev option is deprecated, \"", "\", \".join(sorted(not_found)) ) # Refresh the locker self.poetry.set_locker( self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content)", "removed = [] group = self.poetry.package.dependency_group(group_name) section_keys = list(section.keys()) for", "MAIN_GROUP: if not section: del poetry_content[\"group\"][group_name] else: poetry_content[\"group\"][group_name][\"dependencies\"] = section", "section in [ (MAIN_GROUP, poetry_content[\"dependencies\"]) ] + group_sections: removed +=", "following packages were not found: \" + \", \".join(sorted(not_found)) )", "_remove_packages( self, packages: list[str], section: dict[str, Any], group_name: str )", "remove</info>\"\"\" loggers = [\"poetry.repositories.pypi_repository\", \"poetry.inspection.info\"] def handle(self) -> int: packages", "dependencies.\" arguments = [argument(\"packages\", \"The packages to remove.\", multiple=True)] options", "= self._installer.run() if not self.option(\"dry-run\") and status == 0: assert", "self.line_error( \"<warning>The --dev option is deprecated, \" \"use the `--group", "from __future__ import annotations from typing import Any from cleo.helpers", "section: dict[str, Any], group_name: str ) -> list[str]: removed =", "\"default\" from poetry.console.commands.installer_command import InstallerCommand class RemoveCommand(InstallerCommand): name = \"remove\"", "if existing_package.lower() == package.lower(): del section[existing_package] removed.append(package) group.remove_dependency(package) return removed", "account for the old `dev-dependencies` section removed = self._remove_packages( packages,", "and not poetry_content[\"group\"]: del poetry_content[\"group\"] removed_set = set(removed) not_found =", "package from the current list of installed packages <info>poetry remove</info>\"\"\"", "\"dry-run\", None, \"Output the operations but do not execute anything", "status == 0: assert isinstance(content, TOMLDocument) self.poetry.file.write(content) return status def", "packages: list[str], section: dict[str, Any], group_name: str ) -> list[str]:", "\"Output the operations but do not execute anything \" \"(implicitly", "import TOMLDocument try: from poetry.core.packages.dependency_group import MAIN_GROUP except ImportError: MAIN_GROUP", "Any from cleo.helpers import argument from cleo.helpers import option from", "for existing_package in section_keys: if existing_package.lower() == package.lower(): del section[existing_package]", "self.poetry.set_locker( self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content) ) self._installer.set_locker(self.poetry.locker) # Update packages self._installer.use_executor( self.poetry.config.get(\"experimental.new-installer\",", "--dev option is deprecated, \" \"use the `--group dev` notation", "not_found: raise ValueError( \"The following packages were not found: \"", "[] group = self.poetry.package.dependency_group(group_name) section_keys = list(section.keys()) for package in", "group_name: str ) -> list[str]: removed = [] group =", "\" \"use the `--group dev` notation instead.</warning>\" ) group =", "removes a package from the current list of installed packages", "dict[str, Any] = self.poetry.file.read() poetry_content = content[\"tool\"][\"poetry\"] if group is", "self._installer.verbose(self._io.is_verbose()) self._installer.update(True) self._installer.whitelist(removed_set) status = self._installer.run() if not self.option(\"dry-run\") and", "removed += self._remove_packages(packages, section, group_name) if group_name != MAIN_GROUP: if", "= section elif group == \"dev\" and \"dev-dependencies\" in poetry_content:", "= self.option(\"group\", self.default_group) content: dict[str, Any] = self.poetry.file.read() poetry_content =", "removed = self._remove_packages( packages, poetry_content[\"group\"][group].get(\"dependencies\", {}), group ) if not", "try: from poetry.core.packages.dependency_group import MAIN_GROUP except ImportError: MAIN_GROUP = \"default\"", "package from the project dependencies.\" arguments = [argument(\"packages\", \"The packages", "self._remove_packages( packages, poetry_content[\"group\"][group].get(\"dependencies\", {}), group ) if not poetry_content[\"group\"][group]: del", "in poetry_content: # We need to account for the old", "0: assert isinstance(content, TOMLDocument) self.poetry.file.write(content) return status def _remove_packages( self,", "`dev-dependencies` section removed = self._remove_packages( packages, poetry_content[\"dev-dependencies\"], \"dev\" ) if", "[ (MAIN_GROUP, poetry_content[\"dependencies\"]) ] + group_sections: removed += self._remove_packages(packages, section,", "removed_set = set(removed) not_found = set(packages).difference(removed_set) if not_found: raise ValueError(", "in packages: for existing_package in section_keys: if existing_package.lower() == package.lower():", "not found: \" + \", \".join(sorted(not_found)) ) # Refresh the", "group_name) if group_name != MAIN_GROUP: if not section: del poetry_content[\"group\"][group_name]", "isinstance(content, TOMLDocument) self.poetry.file.write(content) return status def _remove_packages( self, packages: list[str],", "poetry.console.commands.installer_command import InstallerCommand class RemoveCommand(InstallerCommand): name = \"remove\" description =", "RemoveCommand(InstallerCommand): name = \"remove\" description = \"Removes a package from", "of installed packages <info>poetry remove</info>\"\"\" loggers = [\"poetry.repositories.pypi_repository\", \"poetry.inspection.info\"] def", "a package from the development dependencies.\"), option( \"dry-run\", None, \"Output", "arguments = [argument(\"packages\", \"The packages to remove.\", multiple=True)] options =", "removed = [] group_sections = [ (group_name, group_section.get(\"dependencies\", {})) for", "poetry_content[\"group\"]: del poetry_content[\"group\"] removed_set = set(removed) not_found = set(packages).difference(removed_set) if", "\"D\", \"Remove a package from the development dependencies.\"), option( \"dry-run\",", "] for group_name, section in [ (MAIN_GROUP, poetry_content[\"dependencies\"]) ] +", "flag=False), option(\"dev\", \"D\", \"Remove a package from the development dependencies.\"),", "from tomlkit.toml_document import TOMLDocument try: from poetry.core.packages.dependency_group import MAIN_GROUP except", "def _remove_packages( self, packages: list[str], section: dict[str, Any], group_name: str", "in poetry_content and not poetry_content[\"group\"]: del poetry_content[\"group\"] removed_set = set(removed)", "for package in packages: for existing_package in section_keys: if existing_package.lower()", "packages, poetry_content[\"group\"][group].get(\"dependencies\", {}), group ) if not poetry_content[\"group\"][group]: del poetry_content[\"group\"][group]", "self._remove_packages( packages, poetry_content[\"dev-dependencies\"], \"dev\" ) if not poetry_content[\"dev-dependencies\"]: del poetry_content[\"dev-dependencies\"]", "locker self.poetry.set_locker( self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content) ) self._installer.set_locker(self.poetry.locker) # Update packages self._installer.use_executor(", "poetry_content[\"group\"][group_name][\"dependencies\"] = section elif group == \"dev\" and \"dev-dependencies\" in", "[argument(\"packages\", \"The packages to remove.\", multiple=True)] options = [ option(\"group\",", "status = self._installer.run() if not self.option(\"dry-run\") and status == 0:", "+= self._remove_packages(packages, section, group_name) if group_name != MAIN_GROUP: if not", "`--group dev` notation instead.</warning>\" ) group = \"dev\" else: group", "self._installer.update(True) self._installer.whitelist(removed_set) status = self._installer.run() if not self.option(\"dry-run\") and status", "TOMLDocument) self.poetry.file.write(content) return status def _remove_packages( self, packages: list[str], section:", "option(\"dev\", \"D\", \"Remove a package from the development dependencies.\"), option(", "option( \"dry-run\", None, \"Output the operations but do not execute", "packages were not found: \" + \", \".join(sorted(not_found)) ) #", "packages = self.argument(\"packages\") if self.option(\"dev\"): self.line_error( \"<warning>The --dev option is", "TOMLDocument try: from poetry.core.packages.dependency_group import MAIN_GROUP except ImportError: MAIN_GROUP =", "cleo.helpers import argument from cleo.helpers import option from tomlkit.toml_document import", "False)) self._installer.verbose(self._io.is_verbose()) self._installer.update(True) self._installer.whitelist(removed_set) status = self._installer.run() if not self.option(\"dry-run\")", "self.poetry.file.write(content) return status def _remove_packages( self, packages: list[str], section: dict[str,", "[ (group_name, group_section.get(\"dependencies\", {})) for group_name, group_section in poetry_content.get(\"group\", {}).items()", "We need to account for the old `dev-dependencies` section removed", "self.default_group) content: dict[str, Any] = self.poetry.file.read() poetry_content = content[\"tool\"][\"poetry\"] if", "if self.option(\"dev\"): self.line_error( \"<warning>The --dev option is deprecated, \" \"use", "group == \"dev\" and \"dev-dependencies\" in poetry_content: # We need", "import InstallerCommand class RemoveCommand(InstallerCommand): name = \"remove\" description = \"Removes", ") self._installer.dry_run(self.option(\"dry-run\", False)) self._installer.verbose(self._io.is_verbose()) self._installer.update(True) self._installer.whitelist(removed_set) status = self._installer.run() if", "group_sections: removed += self._remove_packages(packages, section, group_name) if group_name != MAIN_GROUP:", "section elif group == \"dev\" and \"dev-dependencies\" in poetry_content: #", "Update packages self._installer.use_executor( self.poetry.config.get(\"experimental.new-installer\", False) ) self._installer.dry_run(self.option(\"dry-run\", False)) self._installer.verbose(self._io.is_verbose()) self._installer.update(True)", "import argument from cleo.helpers import option from tomlkit.toml_document import TOMLDocument", "InstallerCommand class RemoveCommand(InstallerCommand): name = \"remove\" description = \"Removes a", "to remove the dependency from.\", flag=False), option(\"dev\", \"D\", \"Remove a", "elif group == \"dev\" and \"dev-dependencies\" in poetry_content: # We", "poetry_content[\"dev-dependencies\"] else: removed = self._remove_packages( packages, poetry_content[\"group\"][group].get(\"dependencies\", {}), group )", "group_section.get(\"dependencies\", {})) for group_name, group_section in poetry_content.get(\"group\", {}).items() ] for", "group = self.option(\"group\", self.default_group) content: dict[str, Any] = self.poetry.file.read() poetry_content", "list[str]: removed = [] group = self.poetry.package.dependency_group(group_name) section_keys = list(section.keys())", "not poetry_content[\"group\"]: del poetry_content[\"group\"] removed_set = set(removed) not_found = set(packages).difference(removed_set)", "+ \", \".join(sorted(not_found)) ) # Refresh the locker self.poetry.set_locker( self.poetry.locker.__class__(self.poetry.locker.lock.path,", "\".join(sorted(not_found)) ) # Refresh the locker self.poetry.set_locker( self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content) )", "the locker self.poetry.set_locker( self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content) ) self._installer.set_locker(self.poetry.locker) # Update packages", "option from tomlkit.toml_document import TOMLDocument try: from poetry.core.packages.dependency_group import MAIN_GROUP", "\"group\" in poetry_content and not poetry_content[\"group\"]: del poetry_content[\"group\"] removed_set =", "status def _remove_packages( self, packages: list[str], section: dict[str, Any], group_name:", "None, \"Output the operations but do not execute anything \"", "= \"dev\" else: group = self.option(\"group\", self.default_group) content: dict[str, Any]", "\" \"(implicitly enables --verbose).\", ), ] help = \"\"\"The <info>remove</info>", "set(removed) not_found = set(packages).difference(removed_set) if not_found: raise ValueError( \"The following", "the dependency from.\", flag=False), option(\"dev\", \"D\", \"Remove a package from", "\"poetry.inspection.info\"] def handle(self) -> int: packages = self.argument(\"packages\") if self.option(\"dev\"):", "== 0: assert isinstance(content, TOMLDocument) self.poetry.file.write(content) return status def _remove_packages(", "if not section: del poetry_content[\"group\"][group_name] else: poetry_content[\"group\"][group_name][\"dependencies\"] = section elif", "poetry_content: # We need to account for the old `dev-dependencies`", "except ImportError: MAIN_GROUP = \"default\" from poetry.console.commands.installer_command import InstallerCommand class", "= self.poetry.package.dependency_group(group_name) section_keys = list(section.keys()) for package in packages: for", "= content[\"tool\"][\"poetry\"] if group is None: removed = [] group_sections", "self._installer.set_locker(self.poetry.locker) # Update packages self._installer.use_executor( self.poetry.config.get(\"experimental.new-installer\", False) ) self._installer.dry_run(self.option(\"dry-run\", False))", "= \"\"\"The <info>remove</info> command removes a package from the current", "self._remove_packages(packages, section, group_name) if group_name != MAIN_GROUP: if not section:", "poetry.core.packages.dependency_group import MAIN_GROUP except ImportError: MAIN_GROUP = \"default\" from poetry.console.commands.installer_command", "== \"dev\" and \"dev-dependencies\" in poetry_content: # We need to", "group ) if not poetry_content[\"group\"][group]: del poetry_content[\"group\"][group] if \"group\" in", "[\"poetry.repositories.pypi_repository\", \"poetry.inspection.info\"] def handle(self) -> int: packages = self.argument(\"packages\") if", "[] group_sections = [ (group_name, group_section.get(\"dependencies\", {})) for group_name, group_section", ") # Refresh the locker self.poetry.set_locker( self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content) ) self._installer.set_locker(self.poetry.locker)", "(MAIN_GROUP, poetry_content[\"dependencies\"]) ] + group_sections: removed += self._remove_packages(packages, section, group_name)", "dict[str, Any], group_name: str ) -> list[str]: removed = []", "False) ) self._installer.dry_run(self.option(\"dry-run\", False)) self._installer.verbose(self._io.is_verbose()) self._installer.update(True) self._installer.whitelist(removed_set) status = self._installer.run()", "def handle(self) -> int: packages = self.argument(\"packages\") if self.option(\"dev\"): self.line_error(", "do not execute anything \" \"(implicitly enables --verbose).\", ), ]", "in [ (MAIN_GROUP, poetry_content[\"dependencies\"]) ] + group_sections: removed += self._remove_packages(packages,", "section, group_name) if group_name != MAIN_GROUP: if not section: del", "# We need to account for the old `dev-dependencies` section", "group = \"dev\" else: group = self.option(\"group\", self.default_group) content: dict[str,", "list[str], section: dict[str, Any], group_name: str ) -> list[str]: removed", "str ) -> list[str]: removed = [] group = self.poetry.package.dependency_group(group_name)", "description = \"Removes a package from the project dependencies.\" arguments", "= [ option(\"group\", \"G\", \"The group to remove the dependency", "from the current list of installed packages <info>poetry remove</info>\"\"\" loggers", "\"dev\" and \"dev-dependencies\" in poetry_content: # We need to account", "self, packages: list[str], section: dict[str, Any], group_name: str ) ->", "existing_package in section_keys: if existing_package.lower() == package.lower(): del section[existing_package] removed.append(package)", "] help = \"\"\"The <info>remove</info> command removes a package from", "# Refresh the locker self.poetry.set_locker( self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content) ) self._installer.set_locker(self.poetry.locker) #", "<reponame>pkoch/poetry from __future__ import annotations from typing import Any from", "\"The group to remove the dependency from.\", flag=False), option(\"dev\", \"D\",", "] + group_sections: removed += self._remove_packages(packages, section, group_name) if group_name", "a package from the project dependencies.\" arguments = [argument(\"packages\", \"The", "else: removed = self._remove_packages( packages, poetry_content[\"group\"][group].get(\"dependencies\", {}), group ) if", "del poetry_content[\"dev-dependencies\"] else: removed = self._remove_packages( packages, poetry_content[\"group\"][group].get(\"dependencies\", {}), group", "poetry_content[\"group\"][group_name] else: poetry_content[\"group\"][group_name][\"dependencies\"] = section elif group == \"dev\" and", "list(section.keys()) for package in packages: for existing_package in section_keys: if", "group = self.poetry.package.dependency_group(group_name) section_keys = list(section.keys()) for package in packages:", "MAIN_GROUP except ImportError: MAIN_GROUP = \"default\" from poetry.console.commands.installer_command import InstallerCommand", "a package from the current list of installed packages <info>poetry", "{}).items() ] for group_name, section in [ (MAIN_GROUP, poetry_content[\"dependencies\"]) ]", "the `--group dev` notation instead.</warning>\" ) group = \"dev\" else:", "for the old `dev-dependencies` section removed = self._remove_packages( packages, poetry_content[\"dev-dependencies\"],", "section removed = self._remove_packages( packages, poetry_content[\"dev-dependencies\"], \"dev\" ) if not", "Refresh the locker self.poetry.set_locker( self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content) ) self._installer.set_locker(self.poetry.locker) # Update", "the current list of installed packages <info>poetry remove</info>\"\"\" loggers =", "= list(section.keys()) for package in packages: for existing_package in section_keys:", "import annotations from typing import Any from cleo.helpers import argument", "not poetry_content[\"dev-dependencies\"]: del poetry_content[\"dev-dependencies\"] else: removed = self._remove_packages( packages, poetry_content[\"group\"][group].get(\"dependencies\",", "from.\", flag=False), option(\"dev\", \"D\", \"Remove a package from the development", "\"\"\"The <info>remove</info> command removes a package from the current list", "from cleo.helpers import argument from cleo.helpers import option from tomlkit.toml_document", "multiple=True)] options = [ option(\"group\", \"G\", \"The group to remove", "to account for the old `dev-dependencies` section removed = self._remove_packages(", "packages: for existing_package in section_keys: if existing_package.lower() == package.lower(): del", "--verbose).\", ), ] help = \"\"\"The <info>remove</info> command removes a", "poetry_content[\"dev-dependencies\"], \"dev\" ) if not poetry_content[\"dev-dependencies\"]: del poetry_content[\"dev-dependencies\"] else: removed", "not section: del poetry_content[\"group\"][group_name] else: poetry_content[\"group\"][group_name][\"dependencies\"] = section elif group", "options = [ option(\"group\", \"G\", \"The group to remove the", "execute anything \" \"(implicitly enables --verbose).\", ), ] help =", "group_sections = [ (group_name, group_section.get(\"dependencies\", {})) for group_name, group_section in", "need to account for the old `dev-dependencies` section removed =", "int: packages = self.argument(\"packages\") if self.option(\"dev\"): self.line_error( \"<warning>The --dev option", "not execute anything \" \"(implicitly enables --verbose).\", ), ] help", "group_name != MAIN_GROUP: if not section: del poetry_content[\"group\"][group_name] else: poetry_content[\"group\"][group_name][\"dependencies\"]", "(group_name, group_section.get(\"dependencies\", {})) for group_name, group_section in poetry_content.get(\"group\", {}).items() ]", "else: group = self.option(\"group\", self.default_group) content: dict[str, Any] = self.poetry.file.read()", "the old `dev-dependencies` section removed = self._remove_packages( packages, poetry_content[\"dev-dependencies\"], \"dev\"", "self.option(\"dry-run\") and status == 0: assert isinstance(content, TOMLDocument) self.poetry.file.write(content) return", "packages <info>poetry remove</info>\"\"\" loggers = [\"poetry.repositories.pypi_repository\", \"poetry.inspection.info\"] def handle(self) ->", "removed = self._remove_packages( packages, poetry_content[\"dev-dependencies\"], \"dev\" ) if not poetry_content[\"dev-dependencies\"]:", "\"(implicitly enables --verbose).\", ), ] help = \"\"\"The <info>remove</info> command", "section_keys = list(section.keys()) for package in packages: for existing_package in", "not self.option(\"dry-run\") and status == 0: assert isinstance(content, TOMLDocument) self.poetry.file.write(content)", "and \"dev-dependencies\" in poetry_content: # We need to account for", "poetry_content[\"group\"][group] if \"group\" in poetry_content and not poetry_content[\"group\"]: del poetry_content[\"group\"]", "poetry_content[\"group\"][group].get(\"dependencies\", {}), group ) if not poetry_content[\"group\"][group]: del poetry_content[\"group\"][group] if", "in section_keys: if existing_package.lower() == package.lower(): del section[existing_package] removed.append(package) group.remove_dependency(package)", "\"G\", \"The group to remove the dependency from.\", flag=False), option(\"dev\",", "self.poetry.file.read() poetry_content = content[\"tool\"][\"poetry\"] if group is None: removed =", "dev` notation instead.</warning>\" ) group = \"dev\" else: group =", "-> list[str]: removed = [] group = self.poetry.package.dependency_group(group_name) section_keys =", "dependency from.\", flag=False), option(\"dev\", \"D\", \"Remove a package from the", "annotations from typing import Any from cleo.helpers import argument from", "help = \"\"\"The <info>remove</info> command removes a package from the", ") if not poetry_content[\"group\"][group]: del poetry_content[\"group\"][group] if \"group\" in poetry_content", "Any] = self.poetry.file.read() poetry_content = content[\"tool\"][\"poetry\"] if group is None:", "class RemoveCommand(InstallerCommand): name = \"remove\" description = \"Removes a package", "the development dependencies.\"), option( \"dry-run\", None, \"Output the operations but", "group is None: removed = [] group_sections = [ (group_name,", "group_name, section in [ (MAIN_GROUP, poetry_content[\"dependencies\"]) ] + group_sections: removed", "development dependencies.\"), option( \"dry-run\", None, \"Output the operations but do", "[ option(\"group\", \"G\", \"The group to remove the dependency from.\",", "section_keys: if existing_package.lower() == package.lower(): del section[existing_package] removed.append(package) group.remove_dependency(package) return", "__future__ import annotations from typing import Any from cleo.helpers import", "del poetry_content[\"group\"] removed_set = set(removed) not_found = set(packages).difference(removed_set) if not_found:", "= self._remove_packages( packages, poetry_content[\"dev-dependencies\"], \"dev\" ) if not poetry_content[\"dev-dependencies\"]: del", "poetry_content[\"dependencies\"]) ] + group_sections: removed += self._remove_packages(packages, section, group_name) if", "operations but do not execute anything \" \"(implicitly enables --verbose).\",", "content: dict[str, Any] = self.poetry.file.read() poetry_content = content[\"tool\"][\"poetry\"] if group", "= [] group_sections = [ (group_name, group_section.get(\"dependencies\", {})) for group_name,", "to remove.\", multiple=True)] options = [ option(\"group\", \"G\", \"The group", "Any], group_name: str ) -> list[str]: removed = [] group", "group_section in poetry_content.get(\"group\", {}).items() ] for group_name, section in [", "old `dev-dependencies` section removed = self._remove_packages( packages, poetry_content[\"dev-dependencies\"], \"dev\" )", "poetry_content[\"group\"] removed_set = set(removed) not_found = set(packages).difference(removed_set) if not_found: raise", "from the project dependencies.\" arguments = [argument(\"packages\", \"The packages to", "self.option(\"dev\"): self.line_error( \"<warning>The --dev option is deprecated, \" \"use the", "poetry_content = content[\"tool\"][\"poetry\"] if group is None: removed = []", "is deprecated, \" \"use the `--group dev` notation instead.</warning>\" )", "package in packages: for existing_package in section_keys: if existing_package.lower() ==", "\" + \", \".join(sorted(not_found)) ) # Refresh the locker self.poetry.set_locker(", "\"Remove a package from the development dependencies.\"), option( \"dry-run\", None,", "remove the dependency from.\", flag=False), option(\"dev\", \"D\", \"Remove a package", "= \"Removes a package from the project dependencies.\" arguments =", "= set(packages).difference(removed_set) if not_found: raise ValueError( \"The following packages were", "!= MAIN_GROUP: if not section: del poetry_content[\"group\"][group_name] else: poetry_content[\"group\"][group_name][\"dependencies\"] =", "if not self.option(\"dry-run\") and status == 0: assert isinstance(content, TOMLDocument)", "deprecated, \" \"use the `--group dev` notation instead.</warning>\" ) group", "MAIN_GROUP = \"default\" from poetry.console.commands.installer_command import InstallerCommand class RemoveCommand(InstallerCommand): name", "were not found: \" + \", \".join(sorted(not_found)) ) # Refresh", "the project dependencies.\" arguments = [argument(\"packages\", \"The packages to remove.\",", "if group_name != MAIN_GROUP: if not section: del poetry_content[\"group\"][group_name] else:", "\"The following packages were not found: \" + \", \".join(sorted(not_found))", "= self.poetry.file.read() poetry_content = content[\"tool\"][\"poetry\"] if group is None: removed", "section: del poetry_content[\"group\"][group_name] else: poetry_content[\"group\"][group_name][\"dependencies\"] = section elif group ==", "else: poetry_content[\"group\"][group_name][\"dependencies\"] = section elif group == \"dev\" and \"dev-dependencies\"", "{}), group ) if not poetry_content[\"group\"][group]: del poetry_content[\"group\"][group] if \"group\"", "= set(removed) not_found = set(packages).difference(removed_set) if not_found: raise ValueError( \"The", "remove.\", multiple=True)] options = [ option(\"group\", \"G\", \"The group to", "self._installer.run() if not self.option(\"dry-run\") and status == 0: assert isinstance(content,", "import MAIN_GROUP except ImportError: MAIN_GROUP = \"default\" from poetry.console.commands.installer_command import", "self._installer.whitelist(removed_set) status = self._installer.run() if not self.option(\"dry-run\") and status ==", "group_name, group_section in poetry_content.get(\"group\", {}).items() ] for group_name, section in", "packages to remove.\", multiple=True)] options = [ option(\"group\", \"G\", \"The", "= [] group = self.poetry.package.dependency_group(group_name) section_keys = list(section.keys()) for package", "), ] help = \"\"\"The <info>remove</info> command removes a package", "from the development dependencies.\"), option( \"dry-run\", None, \"Output the operations", "list of installed packages <info>poetry remove</info>\"\"\" loggers = [\"poetry.repositories.pypi_repository\", \"poetry.inspection.info\"]", "name = \"remove\" description = \"Removes a package from the", "anything \" \"(implicitly enables --verbose).\", ), ] help = \"\"\"The", "cleo.helpers import option from tomlkit.toml_document import TOMLDocument try: from poetry.core.packages.dependency_group", "enables --verbose).\", ), ] help = \"\"\"The <info>remove</info> command removes", ") if not poetry_content[\"dev-dependencies\"]: del poetry_content[\"dev-dependencies\"] else: removed = self._remove_packages(", "ImportError: MAIN_GROUP = \"default\" from poetry.console.commands.installer_command import InstallerCommand class RemoveCommand(InstallerCommand):", "= [\"poetry.repositories.pypi_repository\", \"poetry.inspection.info\"] def handle(self) -> int: packages = self.argument(\"packages\")", "instead.</warning>\" ) group = \"dev\" else: group = self.option(\"group\", self.default_group)", "= self.argument(\"packages\") if self.option(\"dev\"): self.line_error( \"<warning>The --dev option is deprecated,", "self.option(\"group\", self.default_group) content: dict[str, Any] = self.poetry.file.read() poetry_content = content[\"tool\"][\"poetry\"]", "ValueError( \"The following packages were not found: \" + \",", "for group_name, section in [ (MAIN_GROUP, poetry_content[\"dependencies\"]) ] + group_sections:", "= self._remove_packages( packages, poetry_content[\"group\"][group].get(\"dependencies\", {}), group ) if not poetry_content[\"group\"][group]:", "is None: removed = [] group_sections = [ (group_name, group_section.get(\"dependencies\",", "del poetry_content[\"group\"][group_name] else: poetry_content[\"group\"][group_name][\"dependencies\"] = section elif group == \"dev\"", "\"dev\" else: group = self.option(\"group\", self.default_group) content: dict[str, Any] =", "installed packages <info>poetry remove</info>\"\"\" loggers = [\"poetry.repositories.pypi_repository\", \"poetry.inspection.info\"] def handle(self)", "self._installer.dry_run(self.option(\"dry-run\", False)) self._installer.verbose(self._io.is_verbose()) self._installer.update(True) self._installer.whitelist(removed_set) status = self._installer.run() if not", "if not poetry_content[\"group\"][group]: del poetry_content[\"group\"][group] if \"group\" in poetry_content and", "from poetry.core.packages.dependency_group import MAIN_GROUP except ImportError: MAIN_GROUP = \"default\" from", "poetry_content.get(\"group\", {}).items() ] for group_name, section in [ (MAIN_GROUP, poetry_content[\"dependencies\"])", "if not_found: raise ValueError( \"The following packages were not found:", "# Update packages self._installer.use_executor( self.poetry.config.get(\"experimental.new-installer\", False) ) self._installer.dry_run(self.option(\"dry-run\", False)) self._installer.verbose(self._io.is_verbose())", "\"The packages to remove.\", multiple=True)] options = [ option(\"group\", \"G\",", ") -> list[str]: removed = [] group = self.poetry.package.dependency_group(group_name) section_keys", "found: \" + \", \".join(sorted(not_found)) ) # Refresh the locker", "if group is None: removed = [] group_sections = [", "= [ (group_name, group_section.get(\"dependencies\", {})) for group_name, group_section in poetry_content.get(\"group\",", "assert isinstance(content, TOMLDocument) self.poetry.file.write(content) return status def _remove_packages( self, packages:", "self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content) ) self._installer.set_locker(self.poetry.locker) # Update packages self._installer.use_executor( self.poetry.config.get(\"experimental.new-installer\", False)", "the operations but do not execute anything \" \"(implicitly enables", "group to remove the dependency from.\", flag=False), option(\"dev\", \"D\", \"Remove", "\"use the `--group dev` notation instead.</warning>\" ) group = \"dev\"", "self.poetry.config.get(\"experimental.new-installer\", False) ) self._installer.dry_run(self.option(\"dry-run\", False)) self._installer.verbose(self._io.is_verbose()) self._installer.update(True) self._installer.whitelist(removed_set) status =", "handle(self) -> int: packages = self.argument(\"packages\") if self.option(\"dev\"): self.line_error( \"<warning>The", "dependencies.\"), option( \"dry-run\", None, \"Output the operations but do not", "current list of installed packages <info>poetry remove</info>\"\"\" loggers = [\"poetry.repositories.pypi_repository\",", "poetry_content[\"group\"][group]: del poetry_content[\"group\"][group] if \"group\" in poetry_content and not poetry_content[\"group\"]:", "but do not execute anything \" \"(implicitly enables --verbose).\", ),", "\"remove\" description = \"Removes a package from the project dependencies.\"", "for group_name, group_section in poetry_content.get(\"group\", {}).items() ] for group_name, section", "= \"default\" from poetry.console.commands.installer_command import InstallerCommand class RemoveCommand(InstallerCommand): name =", "poetry_content) ) self._installer.set_locker(self.poetry.locker) # Update packages self._installer.use_executor( self.poetry.config.get(\"experimental.new-installer\", False) )", "from poetry.console.commands.installer_command import InstallerCommand class RemoveCommand(InstallerCommand): name = \"remove\" description", "\"dev-dependencies\" in poetry_content: # We need to account for the", "packages, poetry_content[\"dev-dependencies\"], \"dev\" ) if not poetry_content[\"dev-dependencies\"]: del poetry_content[\"dev-dependencies\"] else:", "\"dev\" ) if not poetry_content[\"dev-dependencies\"]: del poetry_content[\"dev-dependencies\"] else: removed =", "-> int: packages = self.argument(\"packages\") if self.option(\"dev\"): self.line_error( \"<warning>The --dev", "poetry_content[\"dev-dependencies\"]: del poetry_content[\"dev-dependencies\"] else: removed = self._remove_packages( packages, poetry_content[\"group\"][group].get(\"dependencies\", {}),", ") group = \"dev\" else: group = self.option(\"group\", self.default_group) content:", "{})) for group_name, group_section in poetry_content.get(\"group\", {}).items() ] for group_name,", ") self._installer.set_locker(self.poetry.locker) # Update packages self._installer.use_executor( self.poetry.config.get(\"experimental.new-installer\", False) ) self._installer.dry_run(self.option(\"dry-run\",", "del poetry_content[\"group\"][group] if \"group\" in poetry_content and not poetry_content[\"group\"]: del", "\"Removes a package from the project dependencies.\" arguments = [argument(\"packages\",", "option(\"group\", \"G\", \"The group to remove the dependency from.\", flag=False),", "<info>poetry remove</info>\"\"\" loggers = [\"poetry.repositories.pypi_repository\", \"poetry.inspection.info\"] def handle(self) -> int:", "= [argument(\"packages\", \"The packages to remove.\", multiple=True)] options = [", "not_found = set(packages).difference(removed_set) if not_found: raise ValueError( \"The following packages", "set(packages).difference(removed_set) if not_found: raise ValueError( \"The following packages were not", "content[\"tool\"][\"poetry\"] if group is None: removed = [] group_sections =", "= \"remove\" description = \"Removes a package from the project", "raise ValueError( \"The following packages were not found: \" +", "self.poetry.package.dependency_group(group_name) section_keys = list(section.keys()) for package in packages: for existing_package", "\"<warning>The --dev option is deprecated, \" \"use the `--group dev`", "command removes a package from the current list of installed", "poetry_content and not poetry_content[\"group\"]: del poetry_content[\"group\"] removed_set = set(removed) not_found", "<info>remove</info> command removes a package from the current list of", "self._installer.use_executor( self.poetry.config.get(\"experimental.new-installer\", False) ) self._installer.dry_run(self.option(\"dry-run\", False)) self._installer.verbose(self._io.is_verbose()) self._installer.update(True) self._installer.whitelist(removed_set) status" ]
[ "return step # = Test ================================================ inputlist = [ '5483143223',", "def parse(lines): return np.array([[int(c) for c in line.strip()] for line", "flash(a, x+dx, y+dy) def progress(a): global visited a += 1", "pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale)) frame = 0 history = [] for i in", "# print('a:\\n', a) a[a > 9] = 0 return a,", "visited = [] for i in range(len(x)): flash(a,x[i],y[i]) count =", "in lines]) visited = [] def flash(a, x, y): global", "@timeit(\"Day 11 Part 1\") def part1(input_str, use_rust=False): octomap = parse(input_str)", "= 5 screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale)) surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale)) frame =", "a.shape[0]: continue if y+dy < 0 or y+dy >= a.shape[1]:", "return np.array([[int(c) for c in line.strip()] for line in lines])", "octomap, count = progress(octomap) total_count += count return total_count @timeit(\"Day", "octomap = history[frame] except: frame = 0 for i in", "octomap = parse(input_str) step = 0 while True: step +=", "+= 1 visited.append((x,y)) if a[x+dx, y+dy] > 9: flash(a, x+dx,", "in range(-1,2): if dx == 0 and dy == 0:", "dy in range(-1,2): if dx == 0 and dy ==", "9] = 0 return a, count @timeit(\"Day 11 Part 1\")", "= parse(input_str) total_count = 0 for i in range(100): octomap,", "Part 1\") def part1(input_str, use_rust=False): octomap = parse(input_str) total_count =", "== octomap.shape[0]*octomap.shape[1]: break return step # = Test ================================================ inputlist", "= 0 return a, count @timeit(\"Day 11 Part 1\") def", "for dy in range(-1,2): if dx == 0 and dy", "frame = 0 history = [] for i in range(500):", "progress(octomap) history.append(np.copy(octomap)) input() while True: for event in pygame.event.get(): if", "timeit import numpy as np def parse(lines): return np.array([[int(c) for", "visited: return for dx in range(-1,2): for dy in range(-1,2):", "import timeit import numpy as np def parse(lines): return np.array([[int(c)", "flash(a,x[i],y[i]) count = np.sum(a > 9) # print('a:\\n', a) a[a", "use_rust=False): octomap = parse(input_str) step = 0 while True: step", "except: frame = 0 for i in range(octomap.shape[0]): for j", "if a[x+dx, y+dy] > 9: flash(a, x+dx, y+dy) def progress(a):", "if event.type == pygame.QUIT: pygame.quit(); sys.exit(); # erase the screen", "= np.sum(a > 9) # print('a:\\n', a) a[a > 9]", "x,y = np.where(a > 9) visited = [] for i", "import matplotlib.pyplot as plt # plt.imshow(parse(inputlist)) # plt.show() assert part1(inputlist)", "for i in range(len(x)): flash(a,x[i],y[i]) count = np.sum(a > 9)", "y+dy >= a.shape[1]: continue a[x+dx, y+dy] += 1 visited.append((x,y)) if", "a += 1 x,y = np.where(a > 9) visited =", "numpy as np def parse(lines): return np.array([[int(c) for c in", "if x+dx < 0 or x+dx >= a.shape[0]: continue if", "= pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale)) frame = 0 history = [] for i", "] def test_part1(): # import matplotlib.pyplot as plt # plt.imshow(parse(inputlist))", "> 9: flash(a, x+dx, y+dy) def progress(a): global visited a", "# = Test ================================================ inputlist = [ '5483143223', '2745854711', '5264556173',", "def plot(input_str): # octomap = parse(input_str) octomap = np.random.randint(0,9,(100,100)) pygame.init()", "assert part1(inputlist) == 1656 def test_part2(): assert part2(inputlist) == 195", "= progress(octomap) history.append(np.copy(octomap)) input() while True: for event in pygame.event.get():", "for dx in range(-1,2): for dy in range(-1,2): if dx", "j*scale, brightness) pygame.draw.rect( screen, (brightness,brightness,brightness), pygame.Rect(i*scale, j*scale, scale, scale) )", "c in line.strip()] for line in lines]) visited = []", "in range(-1,2): for dy in range(-1,2): if dx == 0", "count @timeit(\"Day 11 Part 1\") def part1(input_str, use_rust=False): octomap =", "x+dx < 0 or x+dx >= a.shape[0]: continue if y+dy", "visited.append((x,y)) if a[x+dx, y+dy] > 9: flash(a, x+dx, y+dy) def", "<filename>orrinjelo/aoc2021/day_11.py from orrinjelo.utils.decorators import timeit import numpy as np def", "try: octomap = history[frame] except: frame = 0 for i", "global visited a += 1 x,y = np.where(a > 9)", "a) a[a > 9] = 0 return a, count @timeit(\"Day", "11 Part 1\") def part1(input_str, use_rust=False): octomap = parse(input_str) total_count", "= progress(octomap) total_count += count return total_count @timeit(\"Day 11 Part", "# erase the screen screen.fill((255,0,0)) try: octomap = history[frame] except:", "frame #', i) octomap, _ = progress(octomap) history.append(np.copy(octomap)) input() while", "== pygame.QUIT: pygame.quit(); sys.exit(); # erase the screen screen.fill((255,0,0)) try:", "step += 1 octomap, count = progress(octomap) if count ==", "== 1656 def test_part2(): assert part2(inputlist) == 195 import pygame", "0 history = [] for i in range(500): print('Generating frame", "'4846848554', '5283751526', ] def test_part1(): # import matplotlib.pyplot as plt", "+= count return total_count @timeit(\"Day 11 Part 2\") def part2(input_str,", "= parse(input_str) octomap = np.random.randint(0,9,(100,100)) pygame.init() clock = pygame.time.Clock() scale", "== 0: brightness = 255 else: brightness = int(255.0 *", "j in range(octomap.shape[1]): if octomap[i,j] == 0: brightness = 255", "test_part1(): # import matplotlib.pyplot as plt # plt.imshow(parse(inputlist)) # plt.show()", "# plt.show() assert part1(inputlist) == 1656 def test_part2(): assert part2(inputlist)", "1 octomap, count = progress(octomap) if count == octomap.shape[0]*octomap.shape[1]: break", "while True: step += 1 octomap, count = progress(octomap) if", "y+dy] += 1 visited.append((x,y)) if a[x+dx, y+dy] > 9: flash(a,", "count return total_count @timeit(\"Day 11 Part 2\") def part2(input_str, use_rust=False):", "in range(500): print('Generating frame #', i) octomap, _ = progress(octomap)", "x+dx, y+dy) def progress(a): global visited a += 1 x,y", "= progress(octomap) if count == octomap.shape[0]*octomap.shape[1]: break return step #", "pygame.init() clock = pygame.time.Clock() scale = 5 screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale))", "event.type == pygame.QUIT: pygame.quit(); sys.exit(); # erase the screen screen.fill((255,0,0))", "progress(octomap) total_count += count return total_count @timeit(\"Day 11 Part 2\")", "= history[frame] except: frame = 0 for i in range(octomap.shape[0]):", "in line.strip()] for line in lines]) visited = [] def", "0 for i in range(octomap.shape[0]): for j in range(octomap.shape[1]): if", "[] for i in range(500): print('Generating frame #', i) octomap,", "dx == 0 and dy == 0: continue if x+dx", "range(len(x)): flash(a,x[i],y[i]) count = np.sum(a > 9) # print('a:\\n', a)", "part1(input_str, use_rust=False): octomap = parse(input_str) total_count = 0 for i", "lines]) visited = [] def flash(a, x, y): global visited", "print('Generating frame #', i) octomap, _ = progress(octomap) history.append(np.copy(octomap)) input()", "'6357385478', '4167524645', '2176841721', '6882881134', '4846848554', '5283751526', ] def test_part1(): #", "import pygame import sys def plot(input_str): # octomap = parse(input_str)", "octomap, _ = progress(octomap) history.append(np.copy(octomap)) input() while True: for event", "range(-1,2): if dx == 0 and dy == 0: continue", "np.array([[int(c) for c in line.strip()] for line in lines]) visited", "visited = [] def flash(a, x, y): global visited if", "if dx == 0 and dy == 0: continue if", "visited if (x,y) in visited: return for dx in range(-1,2):", "+= 1 x,y = np.where(a > 9) visited = []", "9) visited = [] for i in range(len(x)): flash(a,x[i],y[i]) count", "range(octomap.shape[1]): if octomap[i,j] == 0: brightness = 255 else: brightness", "x+dx >= a.shape[0]: continue if y+dy < 0 or y+dy", "global visited if (x,y) in visited: return for dx in", "range(-1,2): for dy in range(-1,2): if dx == 0 and", "True: step += 1 octomap, count = progress(octomap) if count", "import sys def plot(input_str): # octomap = parse(input_str) octomap =", "0 for i in range(100): octomap, count = progress(octomap) total_count", "inputlist = [ '5483143223', '2745854711', '5264556173', '6141336146', '6357385478', '4167524645', '2176841721',", "octomap[i,j]/10.0) print(i*scale, j*scale, brightness) pygame.draw.rect( screen, (brightness,brightness,brightness), pygame.Rect(i*scale, j*scale, scale,", "[] for i in range(len(x)): flash(a,x[i],y[i]) count = np.sum(a >", "parse(input_str) step = 0 while True: step += 1 octomap,", "255 else: brightness = int(255.0 * octomap[i,j]/10.0) print(i*scale, j*scale, brightness)", "= [ '5483143223', '2745854711', '5264556173', '6141336146', '6357385478', '4167524645', '2176841721', '6882881134',", "screen screen.fill((255,0,0)) try: octomap = history[frame] except: frame = 0", "> 9) visited = [] for i in range(len(x)): flash(a,x[i],y[i])", "= [] def flash(a, x, y): global visited if (x,y)", "0: continue if x+dx < 0 or x+dx >= a.shape[0]:", "9) # print('a:\\n', a) a[a > 9] = 0 return", "pygame.time.Clock() scale = 5 screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale)) surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale))", "= 255 else: brightness = int(255.0 * octomap[i,j]/10.0) print(i*scale, j*scale,", "1\") def part1(input_str, use_rust=False): octomap = parse(input_str) total_count = 0", "def part1(input_str, use_rust=False): octomap = parse(input_str) total_count = 0 for", "0 return a, count @timeit(\"Day 11 Part 1\") def part1(input_str,", "0: brightness = 255 else: brightness = int(255.0 * octomap[i,j]/10.0)", "5 screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale)) surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale)) frame = 0", "total_count = 0 for i in range(100): octomap, count =", "= 0 history = [] for i in range(500): print('Generating", "test_part2(): assert part2(inputlist) == 195 import pygame import sys def", "screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale)) surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale)) frame = 0 history", "11 Part 2\") def part2(input_str, use_rust=False): octomap = parse(input_str) step", "i) octomap, _ = progress(octomap) history.append(np.copy(octomap)) input() while True: for", "line in lines]) visited = [] def flash(a, x, y):", "print(i*scale, j*scale, brightness) pygame.draw.rect( screen, (brightness,brightness,brightness), pygame.Rect(i*scale, j*scale, scale, scale)", "return total_count @timeit(\"Day 11 Part 2\") def part2(input_str, use_rust=False): octomap", "step = 0 while True: step += 1 octomap, count", "y+dy) def progress(a): global visited a += 1 x,y =", "progress(octomap) if count == octomap.shape[0]*octomap.shape[1]: break return step # =", "progress(a): global visited a += 1 x,y = np.where(a >", "= np.where(a > 9) visited = [] for i in", "'5264556173', '6141336146', '6357385478', '4167524645', '2176841721', '6882881134', '4846848554', '5283751526', ] def", "history[frame] except: frame = 0 for i in range(octomap.shape[0]): for", "# plt.imshow(parse(inputlist)) # plt.show() assert part1(inputlist) == 1656 def test_part2():", "y+dy < 0 or y+dy >= a.shape[1]: continue a[x+dx, y+dy]", "count = np.sum(a > 9) # print('a:\\n', a) a[a >", "< 0 or y+dy >= a.shape[1]: continue a[x+dx, y+dy] +=", "and dy == 0: continue if x+dx < 0 or", "in range(100): octomap, count = progress(octomap) total_count += count return", "break return step # = Test ================================================ inputlist = [", "'2745854711', '5264556173', '6141336146', '6357385478', '4167524645', '2176841721', '6882881134', '4846848554', '5283751526', ]", "pygame import sys def plot(input_str): # octomap = parse(input_str) octomap", "the screen screen.fill((255,0,0)) try: octomap = history[frame] except: frame =", "def part2(input_str, use_rust=False): octomap = parse(input_str) step = 0 while", "if y+dy < 0 or y+dy >= a.shape[1]: continue a[x+dx,", "= 0 for i in range(100): octomap, count = progress(octomap)", "return a, count @timeit(\"Day 11 Part 1\") def part1(input_str, use_rust=False):", "= np.random.randint(0,9,(100,100)) pygame.init() clock = pygame.time.Clock() scale = 5 screen", "> 9) # print('a:\\n', a) a[a > 9] = 0", "# octomap = parse(input_str) octomap = np.random.randint(0,9,(100,100)) pygame.init() clock =", "1 visited.append((x,y)) if a[x+dx, y+dy] > 9: flash(a, x+dx, y+dy)", "i in range(octomap.shape[0]): for j in range(octomap.shape[1]): if octomap[i,j] ==", "int(255.0 * octomap[i,j]/10.0) print(i*scale, j*scale, brightness) pygame.draw.rect( screen, (brightness,brightness,brightness), pygame.Rect(i*scale,", "i in range(len(x)): flash(a,x[i],y[i]) count = np.sum(a > 9) #", "i in range(500): print('Generating frame #', i) octomap, _ =", "if (x,y) in visited: return for dx in range(-1,2): for", "count == octomap.shape[0]*octomap.shape[1]: break return step # = Test ================================================", "def flash(a, x, y): global visited if (x,y) in visited:", "> 9] = 0 return a, count @timeit(\"Day 11 Part", "else: brightness = int(255.0 * octomap[i,j]/10.0) print(i*scale, j*scale, brightness) pygame.draw.rect(", "import numpy as np def parse(lines): return np.array([[int(c) for c", ">= a.shape[1]: continue a[x+dx, y+dy] += 1 visited.append((x,y)) if a[x+dx,", ">= a.shape[0]: continue if y+dy < 0 or y+dy >=", "np.where(a > 9) visited = [] for i in range(len(x)):", "scale, scale) ) pygame.display.update() # surface.blit(screen, (0,0)) clock.tick(30) frame +=", "in range(octomap.shape[0]): for j in range(octomap.shape[1]): if octomap[i,j] == 0:", "from orrinjelo.utils.decorators import timeit import numpy as np def parse(lines):", "True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit();", "a, count @timeit(\"Day 11 Part 1\") def part1(input_str, use_rust=False): octomap", "195 import pygame import sys def plot(input_str): # octomap =", "screen, (brightness,brightness,brightness), pygame.Rect(i*scale, j*scale, scale, scale) ) pygame.display.update() # surface.blit(screen,", "dx in range(-1,2): for dy in range(-1,2): if dx ==", "if count == octomap.shape[0]*octomap.shape[1]: break return step # = Test", "octomap = parse(input_str) total_count = 0 for i in range(100):", "_ = progress(octomap) history.append(np.copy(octomap)) input() while True: for event in", "a.shape[1]: continue a[x+dx, y+dy] += 1 visited.append((x,y)) if a[x+dx, y+dy]", "octomap[i,j] == 0: brightness = 255 else: brightness = int(255.0", "'6882881134', '4846848554', '5283751526', ] def test_part1(): # import matplotlib.pyplot as", "sys.exit(); # erase the screen screen.fill((255,0,0)) try: octomap = history[frame]", "flash(a, x, y): global visited if (x,y) in visited: return", "pygame.quit(); sys.exit(); # erase the screen screen.fill((255,0,0)) try: octomap =", "pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale)) surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale)) frame = 0 history = []", "part2(inputlist) == 195 import pygame import sys def plot(input_str): #", "0 and dy == 0: continue if x+dx < 0", "for c in line.strip()] for line in lines]) visited =", "octomap, count = progress(octomap) if count == octomap.shape[0]*octomap.shape[1]: break return", "range(octomap.shape[0]): for j in range(octomap.shape[1]): if octomap[i,j] == 0: brightness", "plot(input_str): # octomap = parse(input_str) octomap = np.random.randint(0,9,(100,100)) pygame.init() clock", "np.sum(a > 9) # print('a:\\n', a) a[a > 9] =", "for i in range(octomap.shape[0]): for j in range(octomap.shape[1]): if octomap[i,j]", "erase the screen screen.fill((255,0,0)) try: octomap = history[frame] except: frame", "== 0 and dy == 0: continue if x+dx <", "history = [] for i in range(500): print('Generating frame #',", "for line in lines]) visited = [] def flash(a, x,", "1656 def test_part2(): assert part2(inputlist) == 195 import pygame import", "continue if y+dy < 0 or y+dy >= a.shape[1]: continue", "or y+dy >= a.shape[1]: continue a[x+dx, y+dy] += 1 visited.append((x,y))", "'5283751526', ] def test_part1(): # import matplotlib.pyplot as plt #", "0 or y+dy >= a.shape[1]: continue a[x+dx, y+dy] += 1", "event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit(); sys.exit(); #", "2\") def part2(input_str, use_rust=False): octomap = parse(input_str) step = 0", "pygame.Rect(i*scale, j*scale, scale, scale) ) pygame.display.update() # surface.blit(screen, (0,0)) clock.tick(30)", "dy == 0: continue if x+dx < 0 or x+dx", "parse(input_str) octomap = np.random.randint(0,9,(100,100)) pygame.init() clock = pygame.time.Clock() scale =", "= 0 while True: step += 1 octomap, count =", "0 or x+dx >= a.shape[0]: continue if y+dy < 0", "a[x+dx, y+dy] += 1 visited.append((x,y)) if a[x+dx, y+dy] > 9:", "= 0 for i in range(octomap.shape[0]): for j in range(octomap.shape[1]):", "input() while True: for event in pygame.event.get(): if event.type ==", "================================================ inputlist = [ '5483143223', '2745854711', '5264556173', '6141336146', '6357385478', '4167524645',", "pygame.draw.rect( screen, (brightness,brightness,brightness), pygame.Rect(i*scale, j*scale, scale, scale) ) pygame.display.update() #", "count = progress(octomap) if count == octomap.shape[0]*octomap.shape[1]: break return step", "= [] for i in range(500): print('Generating frame #', i)", "'4167524645', '2176841721', '6882881134', '4846848554', '5283751526', ] def test_part1(): # import", "matplotlib.pyplot as plt # plt.imshow(parse(inputlist)) # plt.show() assert part1(inputlist) ==", "'2176841721', '6882881134', '4846848554', '5283751526', ] def test_part1(): # import matplotlib.pyplot", "octomap = parse(input_str) octomap = np.random.randint(0,9,(100,100)) pygame.init() clock = pygame.time.Clock()", "pygame.event.get(): if event.type == pygame.QUIT: pygame.quit(); sys.exit(); # erase the", "in range(octomap.shape[1]): if octomap[i,j] == 0: brightness = 255 else:", "assert part2(inputlist) == 195 import pygame import sys def plot(input_str):", "in visited: return for dx in range(-1,2): for dy in", "= pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale)) surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale)) frame = 0 history =", "count = progress(octomap) total_count += count return total_count @timeit(\"Day 11", "for j in range(octomap.shape[1]): if octomap[i,j] == 0: brightness =", "+= 1 octomap, count = progress(octomap) if count == octomap.shape[0]*octomap.shape[1]:", "for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit(); sys.exit();", "a[x+dx, y+dy] > 9: flash(a, x+dx, y+dy) def progress(a): global", "'5483143223', '2745854711', '5264556173', '6141336146', '6357385478', '4167524645', '2176841721', '6882881134', '4846848554', '5283751526',", "(x,y) in visited: return for dx in range(-1,2): for dy", "total_count += count return total_count @timeit(\"Day 11 Part 2\") def", "def progress(a): global visited a += 1 x,y = np.where(a", "continue if x+dx < 0 or x+dx >= a.shape[0]: continue", "or x+dx >= a.shape[0]: continue if y+dy < 0 or", "= Test ================================================ inputlist = [ '5483143223', '2745854711', '5264556173', '6141336146',", "def test_part1(): # import matplotlib.pyplot as plt # plt.imshow(parse(inputlist)) #", "scale) ) pygame.display.update() # surface.blit(screen, (0,0)) clock.tick(30) frame += 1", "= pygame.time.Clock() scale = 5 screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale)) surface =", "scale = 5 screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale)) surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale)) frame", "for i in range(100): octomap, count = progress(octomap) total_count +=", "Test ================================================ inputlist = [ '5483143223', '2745854711', '5264556173', '6141336146', '6357385478',", "'6141336146', '6357385478', '4167524645', '2176841721', '6882881134', '4846848554', '5283751526', ] def test_part1():", "sys def plot(input_str): # octomap = parse(input_str) octomap = np.random.randint(0,9,(100,100))", "9: flash(a, x+dx, y+dy) def progress(a): global visited a +=", "parse(lines): return np.array([[int(c) for c in line.strip()] for line in", "surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale)) frame = 0 history = [] for", "octomap = np.random.randint(0,9,(100,100)) pygame.init() clock = pygame.time.Clock() scale = 5", "np.random.randint(0,9,(100,100)) pygame.init() clock = pygame.time.Clock() scale = 5 screen =", "== 195 import pygame import sys def plot(input_str): # octomap", "part2(input_str, use_rust=False): octomap = parse(input_str) step = 0 while True:", "plt # plt.imshow(parse(inputlist)) # plt.show() assert part1(inputlist) == 1656 def", "a[a > 9] = 0 return a, count @timeit(\"Day 11", "if octomap[i,j] == 0: brightness = 255 else: brightness =", "y): global visited if (x,y) in visited: return for dx", "y+dy] > 9: flash(a, x+dx, y+dy) def progress(a): global visited", "== 0: continue if x+dx < 0 or x+dx >=", "i in range(100): octomap, count = progress(octomap) total_count += count", "pygame.QUIT: pygame.quit(); sys.exit(); # erase the screen screen.fill((255,0,0)) try: octomap", "part1(inputlist) == 1656 def test_part2(): assert part2(inputlist) == 195 import", "as plt # plt.imshow(parse(inputlist)) # plt.show() assert part1(inputlist) == 1656", "visited a += 1 x,y = np.where(a > 9) visited", "= parse(input_str) step = 0 while True: step += 1", "history.append(np.copy(octomap)) input() while True: for event in pygame.event.get(): if event.type", "total_count @timeit(\"Day 11 Part 2\") def part2(input_str, use_rust=False): octomap =", "Part 2\") def part2(input_str, use_rust=False): octomap = parse(input_str) step =", "step # = Test ================================================ inputlist = [ '5483143223', '2745854711',", "brightness) pygame.draw.rect( screen, (brightness,brightness,brightness), pygame.Rect(i*scale, j*scale, scale, scale) ) pygame.display.update()", "[ '5483143223', '2745854711', '5264556173', '6141336146', '6357385478', '4167524645', '2176841721', '6882881134', '4846848554',", "1 x,y = np.where(a > 9) visited = [] for", "< 0 or x+dx >= a.shape[0]: continue if y+dy <", "* octomap[i,j]/10.0) print(i*scale, j*scale, brightness) pygame.draw.rect( screen, (brightness,brightness,brightness), pygame.Rect(i*scale, j*scale,", "screen.fill((255,0,0)) try: octomap = history[frame] except: frame = 0 for", "use_rust=False): octomap = parse(input_str) total_count = 0 for i in", "j*scale, scale, scale) ) pygame.display.update() # surface.blit(screen, (0,0)) clock.tick(30) frame", "continue a[x+dx, y+dy] += 1 visited.append((x,y)) if a[x+dx, y+dy] >", "(brightness,brightness,brightness), pygame.Rect(i*scale, j*scale, scale, scale) ) pygame.display.update() # surface.blit(screen, (0,0))", "as np def parse(lines): return np.array([[int(c) for c in line.strip()]", "print('a:\\n', a) a[a > 9] = 0 return a, count", "def test_part2(): assert part2(inputlist) == 195 import pygame import sys", "brightness = 255 else: brightness = int(255.0 * octomap[i,j]/10.0) print(i*scale,", "while True: for event in pygame.event.get(): if event.type == pygame.QUIT:", "0 while True: step += 1 octomap, count = progress(octomap)", "# import matplotlib.pyplot as plt # plt.imshow(parse(inputlist)) # plt.show() assert", "#', i) octomap, _ = progress(octomap) history.append(np.copy(octomap)) input() while True:", "[] def flash(a, x, y): global visited if (x,y) in", "line.strip()] for line in lines]) visited = [] def flash(a,", "parse(input_str) total_count = 0 for i in range(100): octomap, count", "frame = 0 for i in range(octomap.shape[0]): for j in", "brightness = int(255.0 * octomap[i,j]/10.0) print(i*scale, j*scale, brightness) pygame.draw.rect( screen,", "clock = pygame.time.Clock() scale = 5 screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale)) surface", "plt.show() assert part1(inputlist) == 1656 def test_part2(): assert part2(inputlist) ==", "in range(len(x)): flash(a,x[i],y[i]) count = np.sum(a > 9) # print('a:\\n',", "for i in range(500): print('Generating frame #', i) octomap, _", "in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit(); sys.exit(); # erase", "@timeit(\"Day 11 Part 2\") def part2(input_str, use_rust=False): octomap = parse(input_str)", "= int(255.0 * octomap[i,j]/10.0) print(i*scale, j*scale, brightness) pygame.draw.rect( screen, (brightness,brightness,brightness),", "plt.imshow(parse(inputlist)) # plt.show() assert part1(inputlist) == 1656 def test_part2(): assert", "= [] for i in range(len(x)): flash(a,x[i],y[i]) count = np.sum(a", "return for dx in range(-1,2): for dy in range(-1,2): if", "octomap.shape[0]*octomap.shape[1]: break return step # = Test ================================================ inputlist =", "orrinjelo.utils.decorators import timeit import numpy as np def parse(lines): return", "np def parse(lines): return np.array([[int(c) for c in line.strip()] for", "range(100): octomap, count = progress(octomap) total_count += count return total_count", "range(500): print('Generating frame #', i) octomap, _ = progress(octomap) history.append(np.copy(octomap))", "x, y): global visited if (x,y) in visited: return for" ]
[ "p_1_prime[1] p_2_prime = (218, 216) x_2 = p_2_prime[0] y_2 =", "f_k_x * (p_11 - c_1_prime) r_12 = f_k_y * (p_12", "= 1.417 film_back_height = 0.945 x_center = 320 y_center =", "P_1[0] p_12 = P_1[1] p_13 = P_1[2] P_2 = (0.659,", "= {rho_1_prime}\") print(f\"rho_2_prime = {rho_2_prime}\") print(\"------------------\") r_11 = f_k_x *", "r_22 = f_k_y * (p_22 - c_2_prime) r_23 = 1", "* v_2_prime) / f_k_y c_2_prime_alt = (f_k_y * p_12 -", "k_x v_1_prime = (y_1 - y_center) / k_y u_2_prime =", "= 640 camera_height = 480 film_back_width = 1.417 film_back_height =", "p_2_prime = (218, 216) x_2 = p_2_prime[0] y_2 = p_2_prime[1]", "(y_1 - y_center) / k_y u_2_prime = (x_2 - x_center)", "l_13 = rho_1_prime * 1 print(f\"L: ({l_11}, {l_12}, {l_13})\") print(f\"R:", "k_x = camera_width / film_back_width k_y = camera_height / film_back_height", "= rho_2_prime * u_2_prime l_22 = rho_2_prime * v_2_prime l_23", "= P_1[0] p_12 = P_1[1] p_13 = P_1[2] P_2 =", "p_23 = P_2[2] p_1_prime = (52, 163) x_1 = p_1_prime[0]", "v_2_prime = (y_2 - y_center) / k_y c_1_prime = (f_k_x", "y_center) / k_y u_2_prime = (x_2 - x_center) / k_x", "(p_23 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_2_prime) /", "= (x_1 - x_center) / k_x v_1_prime = (y_1 -", "l_12 = rho_1_prime * v_1_prime l_13 = rho_1_prime * 1", "= (0.659, -0.071, 2.082) p_21 = P_2[0] p_22 = P_2[1]", "(p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_2_prime) / f_k_y c_2_prime_alt", "c_3_prime rho_2_prime = p_23 - c_3_prime print(f\"C' = ({c_1_prime}, {c_2_prime},", "p_2_prime[0] y_2 = p_2_prime[1] f = 1.378 k_x = camera_width", "p_11) / (f_k_x * (1 - u_2_prime/u_1_prime)) c_2_prime = (f_k_y", "{l_12}, {l_13})\") print(f\"R: ({r_11}, {r_12}, {r_13})\") print(\"------------------\") r_21 = f_k_x", "f_k_y c_3_prime = p_13 - (f_k_x / u_1_prime) * (p_11", "1.378 k_x = camera_width / film_back_width k_y = camera_height /", "c_1_prime) rho_1_prime = p_13 - c_3_prime rho_2_prime = p_23 -", "f_k_x = f * k_x f_k_x = f # f_k_y", "v_1_prime) / f_k_y c_3_prime = p_13 - (f_k_x / u_1_prime)", "- c_2_prime) r_13 = 1 * (p_13 - c_3_prime) l_11", "1.417 film_back_height = 0.945 x_center = 320 y_center = 240", "f_k_y = f u_1_prime = (x_1 - x_center) / k_x", "f_k_y = f * k_y f_k_y = f u_1_prime =", "= p_1_prime[1] p_2_prime = (218, 216) x_2 = p_2_prime[0] y_2", "(f_k_y * p_12 - (p_13 - (p_13*u_1_prime - f_k_x*(p_11 -", "- c_1_prime) rho_1_prime = p_13 - c_3_prime rho_2_prime = p_23", "print(\"------------------\") r_21 = f_k_x * (p_21 - c_1_prime) r_22 =", "= f # f_k_y = f * k_y f_k_y =", "print(f\"rho_1_prime = {rho_1_prime}\") print(f\"rho_2_prime = {rho_2_prime}\") print(\"------------------\") r_11 = f_k_x", "* v_2_prime l_23 = rho_2_prime * 1 print(f\"L: ({l_11}, {l_12},", "/ u_1_prime) * (p_11 - c_1_prime) rho_1_prime = p_13 -", "c_2_prime = (f_k_y * p_22 - (p_23 - (p_13*u_1_prime -", "f_k_y c_2_prime_alt = (f_k_y * p_12 - (p_13 - (p_13*u_1_prime", "u_1_prime l_12 = rho_1_prime * v_1_prime l_13 = rho_1_prime *", "c_2_prime_alt = (f_k_y * p_12 - (p_13 - (p_13*u_1_prime -", "c_3_prime) l_11 = rho_1_prime * u_1_prime l_12 = rho_1_prime *", "v_1_prime l_13 = rho_1_prime * 1 print(f\"L: ({l_11}, {l_12}, {l_13})\")", "- (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_1_prime) / f_k_y", "u_1_prime) * (p_11 - c_1_prime) rho_1_prime = p_13 - c_3_prime", "- (p_13 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_1_prime)", "1 * (p_13 - c_3_prime) l_11 = rho_1_prime * u_1_prime", "= f u_1_prime = (x_1 - x_center) / k_x v_1_prime", "- y_center) / k_y u_2_prime = (x_2 - x_center) /", "- f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_2_prime) / f_k_y c_2_prime_alt =", "- c_2_prime) r_23 = 1 * (p_23 - c_3_prime) l_21", "P_1[2] P_2 = (0.659, -0.071, 2.082) p_21 = P_2[0] p_22", "-0.071, 2.082) p_21 = P_2[0] p_22 = P_2[1] p_23 =", "(f_k_y * p_22 - (p_23 - (p_13*u_1_prime - f_k_x*(p_11 -", "= 1.378 k_x = camera_width / film_back_width k_y = camera_height", "- c_3_prime print(f\"C' = ({c_1_prime}, {c_2_prime}, {c_3_prime})\") print(f\"c_2_prime_alt = {c_2_prime_alt}\")", "r_13 = 1 * (p_13 - c_3_prime) l_11 = rho_1_prime", "c_2_prime) r_23 = 1 * (p_23 - c_3_prime) l_21 =", "* k_y f_k_y = f u_1_prime = (x_1 - x_center)", "= camera_height / film_back_height # f_k_x = f * k_x", "- (f_k_x / u_1_prime) * (p_11 - c_1_prime) rho_1_prime =", "= {rho_2_prime}\") print(\"------------------\") r_11 = f_k_x * (p_11 - c_1_prime)", "(f_k_x * p_21 + (p_13 - p_23) * u_2_prime -", "{r_13})\") print(\"------------------\") r_21 = f_k_x * (p_21 - c_1_prime) r_22", "# f_k_y = f * k_y f_k_y = f u_1_prime", "- y_center) / k_y c_1_prime = (f_k_x * p_21 +", "* (p_12 - c_2_prime) r_13 = 1 * (p_13 -", "(0.659, -0.071, 2.082) p_21 = P_2[0] p_22 = P_2[1] p_23", "= P_2[2] p_1_prime = (52, 163) x_1 = p_1_prime[0] y_1", "= (x_2 - x_center) / k_x v_2_prime = (y_2 -", "f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_1_prime) / f_k_y c_3_prime = p_13", "c_3_prime = p_13 - (f_k_x / u_1_prime) * (p_11 -", "camera_height = 480 film_back_width = 1.417 film_back_height = 0.945 x_center", "u_2_prime/u_1_prime)) c_2_prime = (f_k_y * p_22 - (p_23 - (p_13*u_1_prime", "* u_1_prime l_12 = rho_1_prime * v_1_prime l_13 = rho_1_prime", "= 1 * (p_23 - c_3_prime) l_21 = rho_2_prime *", "* u_2_prime l_22 = rho_2_prime * v_2_prime l_23 = rho_2_prime", "y_center = 240 P_1 = (-0.023, -0.261, 2.376) p_11 =", "/ f_k_y c_2_prime_alt = (f_k_y * p_12 - (p_13 -", "(x_2 - x_center) / k_x v_2_prime = (y_2 - y_center)", "= p_13 - (f_k_x / u_1_prime) * (p_11 - c_1_prime)", "= rho_1_prime * 1 print(f\"L: ({l_11}, {l_12}, {l_13})\") print(f\"R: ({r_11},", "film_back_width = 1.417 film_back_height = 0.945 x_center = 320 y_center", "film_back_width k_y = camera_height / film_back_height # f_k_x = f", "rho_2_prime * u_2_prime l_22 = rho_2_prime * v_2_prime l_23 =", "/ k_x v_2_prime = (y_2 - y_center) / k_y c_1_prime", "(p_11 - c_1_prime) rho_1_prime = p_13 - c_3_prime rho_2_prime =", "k_y = camera_height / film_back_height # f_k_x = f *", "f_k_y * (p_22 - c_2_prime) r_23 = 1 * (p_23", "(p_22 - c_2_prime) r_23 = 1 * (p_23 - c_3_prime)", "= (f_k_x * p_21 + (p_13 - p_23) * u_2_prime", "= (52, 163) x_1 = p_1_prime[0] y_1 = p_1_prime[1] p_2_prime", "print(f\"C' = ({c_1_prime}, {c_2_prime}, {c_3_prime})\") print(f\"c_2_prime_alt = {c_2_prime_alt}\") print(f\"rho_1_prime =", "f # f_k_y = f * k_y f_k_y = f", "r_12 = f_k_y * (p_12 - c_2_prime) r_13 = 1", "- u_2_prime/u_1_prime * f_k_x * p_11) / (f_k_x * (1", "- c_1_prime))/u_1_prime) * v_2_prime) / f_k_y c_2_prime_alt = (f_k_y *", "= (218, 216) x_2 = p_2_prime[0] y_2 = p_2_prime[1] f", "(x_1 - x_center) / k_x v_1_prime = (y_1 - y_center)", "p_13 - c_3_prime rho_2_prime = p_23 - c_3_prime print(f\"C' =", "* p_12 - (p_13 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime)", "0.945 x_center = 320 y_center = 240 P_1 = (-0.023,", "320 y_center = 240 P_1 = (-0.023, -0.261, 2.376) p_11", "k_x f_k_x = f # f_k_y = f * k_y", "u_1_prime = (x_1 - x_center) / k_x v_1_prime = (y_1", "rho_1_prime * 1 print(f\"L: ({l_11}, {l_12}, {l_13})\") print(f\"R: ({r_11}, {r_12},", "= (y_1 - y_center) / k_y u_2_prime = (x_2 -", "# f_k_x = f * k_x f_k_x = f #", "= p_13 - c_3_prime rho_2_prime = p_23 - c_3_prime print(f\"C'", "= P_2[0] p_22 = P_2[1] p_23 = P_2[2] p_1_prime =", "* (p_11 - c_1_prime) r_12 = f_k_y * (p_12 -", "film_back_height # f_k_x = f * k_x f_k_x = f", "= f_k_y * (p_22 - c_2_prime) r_23 = 1 *", "= rho_1_prime * u_1_prime l_12 = rho_1_prime * v_1_prime l_13", "* p_22 - (p_23 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime)", "P_2[2] p_1_prime = (52, 163) x_1 = p_1_prime[0] y_1 =", "640 camera_height = 480 film_back_width = 1.417 film_back_height = 0.945", "p_12 - (p_13 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) *", "* (p_23 - c_3_prime) l_21 = rho_2_prime * u_2_prime l_22", "- c_1_prime) r_12 = f_k_y * (p_12 - c_2_prime) r_13", "k_y c_1_prime = (f_k_x * p_21 + (p_13 - p_23)", "(218, 216) x_2 = p_2_prime[0] y_2 = p_2_prime[1] f =", "f_k_y * (p_12 - c_2_prime) r_13 = 1 * (p_13", "({r_11}, {r_12}, {r_13})\") print(\"------------------\") r_21 = f_k_x * (p_21 -", "* p_11) / (f_k_x * (1 - u_2_prime/u_1_prime)) c_2_prime =", "c_3_prime print(f\"C' = ({c_1_prime}, {c_2_prime}, {c_3_prime})\") print(f\"c_2_prime_alt = {c_2_prime_alt}\") print(f\"rho_1_prime", "= P_2[1] p_23 = P_2[2] p_1_prime = (52, 163) x_1", "= f * k_x f_k_x = f # f_k_y =", "l_11 = rho_1_prime * u_1_prime l_12 = rho_1_prime * v_1_prime", "= rho_1_prime * v_1_prime l_13 = rho_1_prime * 1 print(f\"L:", "rho_2_prime = p_23 - c_3_prime print(f\"C' = ({c_1_prime}, {c_2_prime}, {c_3_prime})\")", "P_2 = (0.659, -0.071, 2.082) p_21 = P_2[0] p_22 =", "p_11 = P_1[0] p_12 = P_1[1] p_13 = P_1[2] P_2", "camera_width / film_back_width k_y = camera_height / film_back_height # f_k_x", "-0.261, 2.376) p_11 = P_1[0] p_12 = P_1[1] p_13 =", "p_12 = P_1[1] p_13 = P_1[2] P_2 = (0.659, -0.071,", "p_1_prime[0] y_1 = p_1_prime[1] p_2_prime = (218, 216) x_2 =", "/ k_x v_1_prime = (y_1 - y_center) / k_y u_2_prime", "k_x v_2_prime = (y_2 - y_center) / k_y c_1_prime =", "p_23 - c_3_prime print(f\"C' = ({c_1_prime}, {c_2_prime}, {c_3_prime})\") print(f\"c_2_prime_alt =", "/ (f_k_x * (1 - u_2_prime/u_1_prime)) c_2_prime = (f_k_y *", "(p_11 - c_1_prime) r_12 = f_k_y * (p_12 - c_2_prime)", "rho_1_prime * u_1_prime l_12 = rho_1_prime * v_1_prime l_13 =", "- c_1_prime) r_22 = f_k_y * (p_22 - c_2_prime) r_23", "k_y u_2_prime = (x_2 - x_center) / k_x v_2_prime =", "* (p_22 - c_2_prime) r_23 = 1 * (p_23 -", "= rho_2_prime * 1 print(f\"L: ({l_11}, {l_12}, {l_13})\") print(f\"R: ({r_11},", "- p_23) * u_2_prime - u_2_prime/u_1_prime * f_k_x * p_11)", "= {c_2_prime_alt}\") print(f\"rho_1_prime = {rho_1_prime}\") print(f\"rho_2_prime = {rho_2_prime}\") print(\"------------------\") r_11", "(f_k_x / u_1_prime) * (p_11 - c_1_prime) rho_1_prime = p_13", "= p_2_prime[0] y_2 = p_2_prime[1] f = 1.378 k_x =", "c_1_prime = (f_k_x * p_21 + (p_13 - p_23) *", "p_1_prime = (52, 163) x_1 = p_1_prime[0] y_1 = p_1_prime[1]", "= camera_width / film_back_width k_y = camera_height / film_back_height #", "rho_2_prime * v_2_prime l_23 = rho_2_prime * 1 print(f\"L: ({l_11},", "* (p_13 - c_3_prime) l_11 = rho_1_prime * u_1_prime l_12", "= 480 film_back_width = 1.417 film_back_height = 0.945 x_center =", "/ f_k_y c_3_prime = p_13 - (f_k_x / u_1_prime) *", "x_center = 320 y_center = 240 P_1 = (-0.023, -0.261,", "print(\"------------------\") r_11 = f_k_x * (p_11 - c_1_prime) r_12 =", "* (p_21 - c_1_prime) r_22 = f_k_y * (p_22 -", "l_23 = rho_2_prime * 1 print(f\"L: ({l_11}, {l_12}, {l_13})\") print(f\"R:", "- c_3_prime) l_21 = rho_2_prime * u_2_prime l_22 = rho_2_prime", "p_13 = P_1[2] P_2 = (0.659, -0.071, 2.082) p_21 =", "- c_1_prime))/u_1_prime) * v_1_prime) / f_k_y c_3_prime = p_13 -", "480 film_back_width = 1.417 film_back_height = 0.945 x_center = 320", "= f * k_y f_k_y = f u_1_prime = (x_1", "f_k_x * p_11) / (f_k_x * (1 - u_2_prime/u_1_prime)) c_2_prime", "* (1 - u_2_prime/u_1_prime)) c_2_prime = (f_k_y * p_22 -", "rho_1_prime = p_13 - c_3_prime rho_2_prime = p_23 - c_3_prime", "= f_k_y * (p_12 - c_2_prime) r_13 = 1 *", "({c_1_prime}, {c_2_prime}, {c_3_prime})\") print(f\"c_2_prime_alt = {c_2_prime_alt}\") print(f\"rho_1_prime = {rho_1_prime}\") print(f\"rho_2_prime", "u_2_prime - u_2_prime/u_1_prime * f_k_x * p_11) / (f_k_x *", "- (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_2_prime) / f_k_y", "= p_2_prime[1] f = 1.378 k_x = camera_width / film_back_width", "{c_3_prime})\") print(f\"c_2_prime_alt = {c_2_prime_alt}\") print(f\"rho_1_prime = {rho_1_prime}\") print(f\"rho_2_prime = {rho_2_prime}\")", "v_2_prime) / f_k_y c_2_prime_alt = (f_k_y * p_12 - (p_13", "* 1 print(f\"L: ({l_11}, {l_12}, {l_13})\") print(f\"R: ({r_11}, {r_12}, {r_13})\")", "u_2_prime l_22 = rho_2_prime * v_2_prime l_23 = rho_2_prime *", "(y_2 - y_center) / k_y c_1_prime = (f_k_x * p_21", "- (p_23 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_2_prime)", "k_y f_k_y = f u_1_prime = (x_1 - x_center) /", "1 print(f\"L: ({l_11}, {l_12}, {l_13})\") print(f\"R: ({r_11}, {r_12}, {r_13})\") print(\"------------------\")", "f = 1.378 k_x = camera_width / film_back_width k_y =", "{r_12}, {r_13})\") print(\"------------------\") r_21 = f_k_x * (p_21 - c_1_prime)", "= p_23 - c_3_prime print(f\"C' = ({c_1_prime}, {c_2_prime}, {c_3_prime})\") print(f\"c_2_prime_alt", "u_2_prime = (x_2 - x_center) / k_x v_2_prime = (y_2", "P_2[1] p_23 = P_2[2] p_1_prime = (52, 163) x_1 =", "f * k_x f_k_x = f # f_k_y = f", "216) x_2 = p_2_prime[0] y_2 = p_2_prime[1] f = 1.378", "= (f_k_y * p_12 - (p_13 - (p_13*u_1_prime - f_k_x*(p_11", "= rho_2_prime * v_2_prime l_23 = rho_2_prime * 1 print(f\"L:", "y_2 = p_2_prime[1] f = 1.378 k_x = camera_width /", "P_2[0] p_22 = P_2[1] p_23 = P_2[2] p_1_prime = (52,", "camera_height / film_back_height # f_k_x = f * k_x f_k_x", "= 0.945 x_center = 320 y_center = 240 P_1 =", "p_21 + (p_13 - p_23) * u_2_prime - u_2_prime/u_1_prime *", "c_2_prime) r_13 = 1 * (p_13 - c_3_prime) l_11 =", "({l_11}, {l_12}, {l_13})\") print(f\"R: ({r_11}, {r_12}, {r_13})\") print(\"------------------\") r_21 =", "p_23) * u_2_prime - u_2_prime/u_1_prime * f_k_x * p_11) /", "- u_2_prime/u_1_prime)) c_2_prime = (f_k_y * p_22 - (p_23 -", "x_center) / k_x v_1_prime = (y_1 - y_center) / k_y", "* v_1_prime l_13 = rho_1_prime * 1 print(f\"L: ({l_11}, {l_12},", "- c_3_prime) l_11 = rho_1_prime * u_1_prime l_12 = rho_1_prime", "= 240 P_1 = (-0.023, -0.261, 2.376) p_11 = P_1[0]", "= (y_2 - y_center) / k_y c_1_prime = (f_k_x *", "- x_center) / k_x v_2_prime = (y_2 - y_center) /", "{rho_2_prime}\") print(\"------------------\") r_11 = f_k_x * (p_11 - c_1_prime) r_12", "= f_k_x * (p_11 - c_1_prime) r_12 = f_k_y *", "= P_1[1] p_13 = P_1[2] P_2 = (0.659, -0.071, 2.082)", "= p_1_prime[0] y_1 = p_1_prime[1] p_2_prime = (218, 216) x_2", "(f_k_x * (1 - u_2_prime/u_1_prime)) c_2_prime = (f_k_y * p_22", "(p_12 - c_2_prime) r_13 = 1 * (p_13 - c_3_prime)", "r_21 = f_k_x * (p_21 - c_1_prime) r_22 = f_k_y", "(p_13 - p_23) * u_2_prime - u_2_prime/u_1_prime * f_k_x *", "f_k_x * (p_21 - c_1_prime) r_22 = f_k_y * (p_22", "y_center) / k_y c_1_prime = (f_k_x * p_21 + (p_13", "camera_width = 640 camera_height = 480 film_back_width = 1.417 film_back_height", "/ k_y u_2_prime = (x_2 - x_center) / k_x v_2_prime", "print(f\"L: ({l_11}, {l_12}, {l_13})\") print(f\"R: ({r_11}, {r_12}, {r_13})\") print(\"------------------\") r_21", "l_21 = rho_2_prime * u_2_prime l_22 = rho_2_prime * v_2_prime", "/ film_back_width k_y = camera_height / film_back_height # f_k_x =", "+ (p_13 - p_23) * u_2_prime - u_2_prime/u_1_prime * f_k_x", "x_2 = p_2_prime[0] y_2 = p_2_prime[1] f = 1.378 k_x", "* (p_11 - c_1_prime) rho_1_prime = p_13 - c_3_prime rho_2_prime", "f u_1_prime = (x_1 - x_center) / k_x v_1_prime =", "/ k_y c_1_prime = (f_k_x * p_21 + (p_13 -", "print(f\"c_2_prime_alt = {c_2_prime_alt}\") print(f\"rho_1_prime = {rho_1_prime}\") print(f\"rho_2_prime = {rho_2_prime}\") print(\"------------------\")", "= f_k_x * (p_21 - c_1_prime) r_22 = f_k_y *", "P_1 = (-0.023, -0.261, 2.376) p_11 = P_1[0] p_12 =", "/ film_back_height # f_k_x = f * k_x f_k_x =", "rho_2_prime * 1 print(f\"L: ({l_11}, {l_12}, {l_13})\") print(f\"R: ({r_11}, {r_12},", "= 320 y_center = 240 P_1 = (-0.023, -0.261, 2.376)", "(p_23 - c_3_prime) l_21 = rho_2_prime * u_2_prime l_22 =", "print(f\"rho_2_prime = {rho_2_prime}\") print(\"------------------\") r_11 = f_k_x * (p_11 -", "* p_21 + (p_13 - p_23) * u_2_prime - u_2_prime/u_1_prime", "p_2_prime[1] f = 1.378 k_x = camera_width / film_back_width k_y", "u_2_prime/u_1_prime * f_k_x * p_11) / (f_k_x * (1 -", "p_22 - (p_23 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) *", "r_11 = f_k_x * (p_11 - c_1_prime) r_12 = f_k_y", "p_21 = P_2[0] p_22 = P_2[1] p_23 = P_2[2] p_1_prime", "= ({c_1_prime}, {c_2_prime}, {c_3_prime})\") print(f\"c_2_prime_alt = {c_2_prime_alt}\") print(f\"rho_1_prime = {rho_1_prime}\")", "= (-0.023, -0.261, 2.376) p_11 = P_1[0] p_12 = P_1[1]", "* k_x f_k_x = f # f_k_y = f *", "(p_13 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_1_prime) /", "c_3_prime) l_21 = rho_2_prime * u_2_prime l_22 = rho_2_prime *", "v_2_prime l_23 = rho_2_prime * 1 print(f\"L: ({l_11}, {l_12}, {l_13})\")", "= P_1[2] P_2 = (0.659, -0.071, 2.082) p_21 = P_2[0]", "- x_center) / k_x v_1_prime = (y_1 - y_center) /", "240 P_1 = (-0.023, -0.261, 2.376) p_11 = P_1[0] p_12", "(p_21 - c_1_prime) r_22 = f_k_y * (p_22 - c_2_prime)", "p_22 = P_2[1] p_23 = P_2[2] p_1_prime = (52, 163)", "(52, 163) x_1 = p_1_prime[0] y_1 = p_1_prime[1] p_2_prime =", "y_1 = p_1_prime[1] p_2_prime = (218, 216) x_2 = p_2_prime[0]", "print(f\"R: ({r_11}, {r_12}, {r_13})\") print(\"------------------\") r_21 = f_k_x * (p_21", "2.082) p_21 = P_2[0] p_22 = P_2[1] p_23 = P_2[2]", "f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_2_prime) / f_k_y c_2_prime_alt = (f_k_y", "{rho_1_prime}\") print(f\"rho_2_prime = {rho_2_prime}\") print(\"------------------\") r_11 = f_k_x * (p_11", "* f_k_x * p_11) / (f_k_x * (1 - u_2_prime/u_1_prime))", "= 1 * (p_13 - c_3_prime) l_11 = rho_1_prime *", "{l_13})\") print(f\"R: ({r_11}, {r_12}, {r_13})\") print(\"------------------\") r_21 = f_k_x *", "* u_2_prime - u_2_prime/u_1_prime * f_k_x * p_11) / (f_k_x", "- c_3_prime rho_2_prime = p_23 - c_3_prime print(f\"C' = ({c_1_prime},", "(p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_1_prime) / f_k_y c_3_prime", "(-0.023, -0.261, 2.376) p_11 = P_1[0] p_12 = P_1[1] p_13", "(1 - u_2_prime/u_1_prime)) c_2_prime = (f_k_y * p_22 - (p_23", "r_23 = 1 * (p_23 - c_3_prime) l_21 = rho_2_prime", "c_1_prime))/u_1_prime) * v_2_prime) / f_k_y c_2_prime_alt = (f_k_y * p_12", "film_back_height = 0.945 x_center = 320 y_center = 240 P_1", "{c_2_prime_alt}\") print(f\"rho_1_prime = {rho_1_prime}\") print(f\"rho_2_prime = {rho_2_prime}\") print(\"------------------\") r_11 =", "* v_1_prime) / f_k_y c_3_prime = p_13 - (f_k_x /", "163) x_1 = p_1_prime[0] y_1 = p_1_prime[1] p_2_prime = (218,", "{c_2_prime}, {c_3_prime})\") print(f\"c_2_prime_alt = {c_2_prime_alt}\") print(f\"rho_1_prime = {rho_1_prime}\") print(f\"rho_2_prime =", "(p_13 - c_3_prime) l_11 = rho_1_prime * u_1_prime l_12 =", "rho_1_prime * v_1_prime l_13 = rho_1_prime * 1 print(f\"L: ({l_11},", "c_1_prime) r_12 = f_k_y * (p_12 - c_2_prime) r_13 =", "x_1 = p_1_prime[0] y_1 = p_1_prime[1] p_2_prime = (218, 216)", "p_13 - (f_k_x / u_1_prime) * (p_11 - c_1_prime) rho_1_prime", "v_1_prime = (y_1 - y_center) / k_y u_2_prime = (x_2", "= (f_k_y * p_22 - (p_23 - (p_13*u_1_prime - f_k_x*(p_11", "f * k_y f_k_y = f u_1_prime = (x_1 -", "f_k_x = f # f_k_y = f * k_y f_k_y", "- f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_1_prime) / f_k_y c_3_prime =", "c_1_prime) r_22 = f_k_y * (p_22 - c_2_prime) r_23 =", "1 * (p_23 - c_3_prime) l_21 = rho_2_prime * u_2_prime", "l_22 = rho_2_prime * v_2_prime l_23 = rho_2_prime * 1", "2.376) p_11 = P_1[0] p_12 = P_1[1] p_13 = P_1[2]", "x_center) / k_x v_2_prime = (y_2 - y_center) / k_y", "P_1[1] p_13 = P_1[2] P_2 = (0.659, -0.071, 2.082) p_21", "c_1_prime))/u_1_prime) * v_1_prime) / f_k_y c_3_prime = p_13 - (f_k_x" ]
[ "train = pd.read_csv('../data/suggest_dump.txt',delimiter='\\t') def cleaner(train): sub=(train['subjects'].str.lower()).str.split(',',expand=True) sub.drop([2,3],axis=1,inplace=True) sub.columns = ['subject1','subject2']", "26 19:15:34 2020 @author: deviantpadam \"\"\" import pandas as pd", "in zip(vocab[:-1],vocab[1:])] freq = Counter(ngram) filterbi = [bigram for bigram", "print(cor[i][j]+' '+cor[i][j+1]) word_count+=1 return text def _get_bigrams(corpus,min_count): text = np.copy(corpus)", "for bigram in freq.most_common() if bigram[1]>min_count] bigrams = [\" \".join(bigram[0])", "_add_bigrams(text): for idx in range(len(text)): length=len(text[idx])-1 word_count=0 while word_count<length: if", "'+text[idx][word_count+1] in bigrams: text[idx][word_count] = text[idx][word_count]+' '+text[idx][word_count+1] text[idx].remove(text[idx][word_count+1]) length =", "Counter from torch2vec.data import DataPreparation from torch2vec.torch2vec import DM #", "i,j in zip(vocab[:-1],vocab[1:])] freq = Counter(ngram) filterbi = [bigram for", "len(text[idx])-1 # print(cor[i][j]+' '+cor[i][j+1]) word_count+=1 return text def _get_bigrams(corpus,min_count): text", "tasks.fillna('none',inplace=True) tasks.name = 'task' train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1) train.fillna('none',inplace=True) return train", "import Counter from torch2vec.data import DataPreparation from torch2vec.torch2vec import DM", "text[idx].remove(text[idx][word_count+1]) length = len(text[idx])-1 # print(cor[i][j]+' '+cor[i][j+1]) word_count+=1 return text", "if bigram[1]>min_count] bigrams = [\" \".join(bigram[0]) for bigram in filterbi]", "with concurrent.futures.ProcessPoolExecutor(workers) as executor: result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {} cores'.format(workers))),axis=0)", "filterbi] return bigrams data = DataPreparation(corpus.reset_index(),f_size=3) data.tokenize() bigrams = _get_bigrams(data.corpus.values,min_count=700)", "DataPreparation(corpus.reset_index(),f_size=3) data.tokenize() bigrams = _get_bigrams(data.corpus.values,min_count=700) data.corpus = phraser(data.corpus.values) bigrams =", "for i,j in zip(vocab[:-1],vocab[1:])] freq = Counter(ngram) filterbi = [bigram", "= phraser(data.corpus.values) data.vocab_builder() doc, context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10) model =", "'+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task'] corpus.name = 'text' corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1)", "= pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1) def phraser(corpus,workers=-1): if workers==-1: workers = os.cpu_count() chunks", "doc, context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10) model = DM(vec_dim=100,num_docs=len(data),num_words=data.vocab_size).cuda() num_workers =", "tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0] tasks.fillna('none',inplace=True) tasks.name = 'task' train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1)", "def _add_bigrams(text): for idx in range(len(text)): length=len(text[idx])-1 word_count=0 while word_count<length:", "cleaner(train): sub=(train['subjects'].str.lower()).str.split(',',expand=True) sub.drop([2,3],axis=1,inplace=True) sub.columns = ['subject1','subject2'] sub.fillna('none',inplace=True) tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0]", "{} cores'.format(workers))),axis=0) executor.shutdown(wait=True) # result = _add_bigrams(data) global bigrams del", "# result = _add_bigrams(data) global bigrams del bigrams return pd.DataFrame({'text':np.array(result)})['text']", "import DataPreparation from torch2vec.torch2vec import DM # train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\\t')", "'+train['subject2']+' '+train['task'] corpus.name = 'text' corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1) def phraser(corpus,workers=-1):", "as np import concurrent.futures import os import tqdm from collections", "text[idx][word_count]+' '+text[idx][word_count+1] text[idx].remove(text[idx][word_count+1]) length = len(text[idx])-1 # print(cor[i][j]+' '+cor[i][j+1]) word_count+=1", "sub.columns = ['subject1','subject2'] sub.fillna('none',inplace=True) tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0] tasks.fillna('none',inplace=True) tasks.name =", "bigrams data = DataPreparation(corpus.reset_index(),f_size=3) data.tokenize() bigrams = _get_bigrams(data.corpus.values,min_count=700) data.corpus =", "bigrams return pd.DataFrame({'text':np.array(result)})['text'] def _add_bigrams(text): for idx in range(len(text)): length=len(text[idx])-1", "pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\\t') # train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv') train = pd.read_csv('../data/suggest_dump.txt',delimiter='\\t') def", "= [bigram for bigram in freq.most_common() if bigram[1]>min_count] bigrams =", "# -*- coding: utf-8 -*- \"\"\" Created on Wed Aug", "import tqdm from collections import Counter from torch2vec.data import DataPreparation", "['subject1','subject2'] sub.fillna('none',inplace=True) tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0] tasks.fillna('none',inplace=True) tasks.name = 'task' train", "for bigram in filterbi] return bigrams data = DataPreparation(corpus.reset_index(),f_size=3) data.tokenize()", "DataPreparation from torch2vec.torch2vec import DM # train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\\t') #", "= (train['tasks'].str.lower()).str.split(',',expand=True)[0] tasks.fillna('none',inplace=True) tasks.name = 'task' train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1) train.fillna('none',inplace=True)", "tasks.name = 'task' train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1) train.fillna('none',inplace=True) return train train", "= text[idx][word_count]+' '+text[idx][word_count+1] text[idx].remove(text[idx][word_count+1]) length = len(text[idx])-1 # print(cor[i][j]+' '+cor[i][j+1])", "sub.drop([2,3],axis=1,inplace=True) sub.columns = ['subject1','subject2'] sub.fillna('none',inplace=True) tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0] tasks.fillna('none',inplace=True) tasks.name", "in filterbi] return bigrams data = DataPreparation(corpus.reset_index(),f_size=3) data.tokenize() bigrams =", "'+train['task'] corpus.name = 'text' corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1) def phraser(corpus,workers=-1): if", "'+cor[i][j+1]) word_count+=1 return text def _get_bigrams(corpus,min_count): text = np.copy(corpus) vocab", "for word in sen] ngram = [(i,j) for i,j in", "sub.fillna('none',inplace=True) tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0] tasks.fillna('none',inplace=True) tasks.name = 'task' train =", "import numpy as np import concurrent.futures import os import tqdm", "[(i,j) for i,j in zip(vocab[:-1],vocab[1:])] freq = Counter(ngram) filterbi =", "bigram[1]>min_count] bigrams = [\" \".join(bigram[0]) for bigram in filterbi] return", "in text for word in sen] ngram = [(i,j) for", "bigrams = _get_bigrams(data.corpus.values,min_count=500) data.corpus = phraser(data.corpus.values) data.vocab_builder() doc, context, target_noise_ids", "[\" \".join(bigram[0]) for bigram in filterbi] return bigrams data =", "'task' train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1) train.fillna('none',inplace=True) return train train = cleaner(train)", "bigram in filterbi] return bigrams data = DataPreparation(corpus.reset_index(),f_size=3) data.tokenize() bigrams", "return pd.DataFrame({'text':np.array(result)})['text'] def _add_bigrams(text): for idx in range(len(text)): length=len(text[idx])-1 word_count=0", "= _get_bigrams(data.corpus.values,min_count=700) data.corpus = phraser(data.corpus.values) bigrams = _get_bigrams(data.corpus.values,min_count=500) data.corpus =", "context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10) model = DM(vec_dim=100,num_docs=len(data),num_words=data.vocab_size).cuda() num_workers = os.cpu_count()", "as pd import numpy as np import concurrent.futures import os", "train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\\t') # train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv') train =", "train train = cleaner(train) corpus = train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+'", "if text[idx][word_count]+' '+text[idx][word_count+1] in bigrams: text[idx][word_count] = text[idx][word_count]+' '+text[idx][word_count+1] text[idx].remove(text[idx][word_count+1])", "train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task'] corpus.name = 'text' corpus", "'+text[idx][word_count+1] text[idx].remove(text[idx][word_count+1]) length = len(text[idx])-1 # print(cor[i][j]+' '+cor[i][j+1]) word_count+=1 return", "import DM # train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\\t') # train = pd.read_csv('/home/deviantpadam/Downloads/example", "in sen] ngram = [(i,j) for i,j in zip(vocab[:-1],vocab[1:])] freq", "= pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1) train.fillna('none',inplace=True) return train train = cleaner(train) corpus =", "[word for sen in text for word in sen] ngram", "_get_bigrams(data.corpus.values,min_count=500) data.corpus = phraser(data.corpus.values) data.vocab_builder() doc, context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10)", "numpy as np import concurrent.futures import os import tqdm from", "np.array_split(corpus,workers) with concurrent.futures.ProcessPoolExecutor(workers) as executor: result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {}", "= pd.read_csv('/home/deviantpadam/Downloads/example (1).csv') train = pd.read_csv('../data/suggest_dump.txt',delimiter='\\t') def cleaner(train): sub=(train['subjects'].str.lower()).str.split(',',expand=True) sub.drop([2,3],axis=1,inplace=True)", "\"\"\" Created on Wed Aug 26 19:15:34 2020 @author: deviantpadam", "bigrams = [\" \".join(bigram[0]) for bigram in filterbi] return bigrams", "word in sen] ngram = [(i,j) for i,j in zip(vocab[:-1],vocab[1:])]", "word_count=0 while word_count<length: if text[idx][word_count]+' '+text[idx][word_count+1] in bigrams: text[idx][word_count] =", "return train train = cleaner(train) corpus = train['authors']+' '+train['title']+' '+train['summary']+'", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on", "result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {} cores'.format(workers))),axis=0) executor.shutdown(wait=True) # result =", "= [\" \".join(bigram[0]) for bigram in filterbi] return bigrams data", "= DataPreparation(corpus.reset_index(),f_size=3) data.tokenize() bigrams = _get_bigrams(data.corpus.values,min_count=700) data.corpus = phraser(data.corpus.values) bigrams", "-*- coding: utf-8 -*- \"\"\" Created on Wed Aug 26", "= [(i,j) for i,j in zip(vocab[:-1],vocab[1:])] freq = Counter(ngram) filterbi", "length = len(text[idx])-1 # print(cor[i][j]+' '+cor[i][j+1]) word_count+=1 return text def", "import os import tqdm from collections import Counter from torch2vec.data", "as executor: result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {} cores'.format(workers))),axis=0) executor.shutdown(wait=True) #", "freq = Counter(ngram) filterbi = [bigram for bigram in freq.most_common()", "torch2vec.torch2vec import DM # train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\\t') # train =", "= ['subject1','subject2'] sub.fillna('none',inplace=True) tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0] tasks.fillna('none',inplace=True) tasks.name = 'task'", "result = _add_bigrams(data) global bigrams del bigrams return pd.DataFrame({'text':np.array(result)})['text'] def", "del bigrams return pd.DataFrame({'text':np.array(result)})['text'] def _add_bigrams(text): for idx in range(len(text)):", "= cleaner(train) corpus = train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task']", "in freq.most_common() if bigram[1]>min_count] bigrams = [\" \".join(bigram[0]) for bigram", "= pd.read_csv('../data/suggest_dump.txt',delimiter='\\t') def cleaner(train): sub=(train['subjects'].str.lower()).str.split(',',expand=True) sub.drop([2,3],axis=1,inplace=True) sub.columns = ['subject1','subject2'] sub.fillna('none',inplace=True)", "19:15:34 2020 @author: deviantpadam \"\"\" import pandas as pd import", "if workers==-1: workers = os.cpu_count() chunks = np.array_split(corpus,workers) with concurrent.futures.ProcessPoolExecutor(workers)", "def cleaner(train): sub=(train['subjects'].str.lower()).str.split(',',expand=True) sub.drop([2,3],axis=1,inplace=True) sub.columns = ['subject1','subject2'] sub.fillna('none',inplace=True) tasks =", "word_count+=1 return text def _get_bigrams(corpus,min_count): text = np.copy(corpus) vocab =", "vocab = [word for sen in text for word in", "(train['tasks'].str.lower()).str.split(',',expand=True)[0] tasks.fillna('none',inplace=True) tasks.name = 'task' train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1) train.fillna('none',inplace=True) return", "_add_bigrams(data) global bigrams del bigrams return pd.DataFrame({'text':np.array(result)})['text'] def _add_bigrams(text): for", "phraser(data.corpus.values) bigrams = _get_bigrams(data.corpus.values,min_count=500) data.corpus = phraser(data.corpus.values) data.vocab_builder() doc, context,", "os import tqdm from collections import Counter from torch2vec.data import", "# train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv') train = pd.read_csv('../data/suggest_dump.txt',delimiter='\\t') def cleaner(train):", "= phraser(data.corpus.values) bigrams = _get_bigrams(data.corpus.values,min_count=500) data.corpus = phraser(data.corpus.values) data.vocab_builder() doc,", "data.corpus = phraser(data.corpus.values) data.vocab_builder() doc, context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10) model", "cleaner(train) corpus = train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task'] corpus.name", "pd.DataFrame({'text':np.array(result)})['text'] def _add_bigrams(text): for idx in range(len(text)): length=len(text[idx])-1 word_count=0 while", "pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1) def phraser(corpus,workers=-1): if workers==-1: workers = os.cpu_count() chunks =", "pd import numpy as np import concurrent.futures import os import", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed", "text = np.copy(corpus) vocab = [word for sen in text", "np import concurrent.futures import os import tqdm from collections import", "data.vocab_builder() doc, context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10) model = DM(vec_dim=100,num_docs=len(data),num_words=data.vocab_size).cuda() num_workers", "@author: deviantpadam \"\"\" import pandas as pd import numpy as", "pandas as pd import numpy as np import concurrent.futures import", "= np.copy(corpus) vocab = [word for sen in text for", "pd.read_csv('/home/deviantpadam/Downloads/example (1).csv') train = pd.read_csv('../data/suggest_dump.txt',delimiter='\\t') def cleaner(train): sub=(train['subjects'].str.lower()).str.split(',',expand=True) sub.drop([2,3],axis=1,inplace=True) sub.columns", "np.copy(corpus) vocab = [word for sen in text for word", "deviantpadam \"\"\" import pandas as pd import numpy as np", "def _get_bigrams(corpus,min_count): text = np.copy(corpus) vocab = [word for sen", "train.fillna('none',inplace=True) return train train = cleaner(train) corpus = train['authors']+' '+train['title']+'", "'+train['subject1']+' '+train['subject2']+' '+train['task'] corpus.name = 'text' corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1) def", "= pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\\t') # train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv') train = pd.read_csv('../data/suggest_dump.txt',delimiter='\\t')", "sub=(train['subjects'].str.lower()).str.split(',',expand=True) sub.drop([2,3],axis=1,inplace=True) sub.columns = ['subject1','subject2'] sub.fillna('none',inplace=True) tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0] tasks.fillna('none',inplace=True)", "text def _get_bigrams(corpus,min_count): text = np.copy(corpus) vocab = [word for", "length=len(text[idx])-1 word_count=0 while word_count<length: if text[idx][word_count]+' '+text[idx][word_count+1] in bigrams: text[idx][word_count]", "chunks = np.array_split(corpus,workers) with concurrent.futures.ProcessPoolExecutor(workers) as executor: result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing", "_get_bigrams(data.corpus.values,min_count=700) data.corpus = phraser(data.corpus.values) bigrams = _get_bigrams(data.corpus.values,min_count=500) data.corpus = phraser(data.corpus.values)", "return text def _get_bigrams(corpus,min_count): text = np.copy(corpus) vocab = [word", "range(len(text)): length=len(text[idx])-1 word_count=0 while word_count<length: if text[idx][word_count]+' '+text[idx][word_count+1] in bigrams:", "bigrams: text[idx][word_count] = text[idx][word_count]+' '+text[idx][word_count+1] text[idx].remove(text[idx][word_count+1]) length = len(text[idx])-1 #", "\"\"\" import pandas as pd import numpy as np import", "ngram = [(i,j) for i,j in zip(vocab[:-1],vocab[1:])] freq = Counter(ngram)", "Counter(ngram) filterbi = [bigram for bigram in freq.most_common() if bigram[1]>min_count]", "train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1) train.fillna('none',inplace=True) return train train = cleaner(train) corpus", "= np.array_split(corpus,workers) with concurrent.futures.ProcessPoolExecutor(workers) as executor: result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using", "= len(text[idx])-1 # print(cor[i][j]+' '+cor[i][j+1]) word_count+=1 return text def _get_bigrams(corpus,min_count):", "filterbi = [bigram for bigram in freq.most_common() if bigram[1]>min_count] bigrams", "return bigrams data = DataPreparation(corpus.reset_index(),f_size=3) data.tokenize() bigrams = _get_bigrams(data.corpus.values,min_count=700) data.corpus", "executor: result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {} cores'.format(workers))),axis=0) executor.shutdown(wait=True) # result", "DM # train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\\t') # train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv')", "on Wed Aug 26 19:15:34 2020 @author: deviantpadam \"\"\" import", "phraser(data.corpus.values) data.vocab_builder() doc, context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10) model = DM(vec_dim=100,num_docs=len(data),num_words=data.vocab_size).cuda()", "torch2vec.data import DataPreparation from torch2vec.torch2vec import DM # train =", "target_noise_ids = data.get_data(window_size=5,num_noise_words=10) model = DM(vec_dim=100,num_docs=len(data),num_words=data.vocab_size).cuda() num_workers = os.cpu_count() model.fit(doc_ids=doc,context=context,target_noise_ids=target_noise_ids,epochs=20,batch_size=8000,num_workers=num_workers)", "while word_count<length: if text[idx][word_count]+' '+text[idx][word_count+1] in bigrams: text[idx][word_count] = text[idx][word_count]+'", "pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1) train.fillna('none',inplace=True) return train train = cleaner(train) corpus = train['authors']+'", "for sen in text for word in sen] ngram =", "= data.get_data(window_size=5,num_noise_words=10) model = DM(vec_dim=100,num_docs=len(data),num_words=data.vocab_size).cuda() num_workers = os.cpu_count() model.fit(doc_ids=doc,context=context,target_noise_ids=target_noise_ids,epochs=20,batch_size=8000,num_workers=num_workers) model.save_model(data.document_ids,data.args,file_name='weights')", "Aug 26 19:15:34 2020 @author: deviantpadam \"\"\" import pandas as", "= Counter(ngram) filterbi = [bigram for bigram in freq.most_common() if", "= 'task' train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1) train.fillna('none',inplace=True) return train train =", "= [word for sen in text for word in sen]", "_get_bigrams(corpus,min_count): text = np.copy(corpus) vocab = [word for sen in", "workers = os.cpu_count() chunks = np.array_split(corpus,workers) with concurrent.futures.ProcessPoolExecutor(workers) as executor:", "bigrams = _get_bigrams(data.corpus.values,min_count=700) data.corpus = phraser(data.corpus.values) bigrams = _get_bigrams(data.corpus.values,min_count=500) data.corpus", "data.tokenize() bigrams = _get_bigrams(data.corpus.values,min_count=700) data.corpus = phraser(data.corpus.values) bigrams = _get_bigrams(data.corpus.values,min_count=500)", "os.cpu_count() chunks = np.array_split(corpus,workers) with concurrent.futures.ProcessPoolExecutor(workers) as executor: result =", "from collections import Counter from torch2vec.data import DataPreparation from torch2vec.torch2vec", "import pandas as pd import numpy as np import concurrent.futures", "cores'.format(workers))),axis=0) executor.shutdown(wait=True) # result = _add_bigrams(data) global bigrams del bigrams", "train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv') train = pd.read_csv('../data/suggest_dump.txt',delimiter='\\t') def cleaner(train): sub=(train['subjects'].str.lower()).str.split(',',expand=True)", "= np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {} cores'.format(workers))),axis=0) executor.shutdown(wait=True) # result = _add_bigrams(data)", "in range(len(text)): length=len(text[idx])-1 word_count=0 while word_count<length: if text[idx][word_count]+' '+text[idx][word_count+1] in", "sen] ngram = [(i,j) for i,j in zip(vocab[:-1],vocab[1:])] freq =", "sen in text for word in sen] ngram = [(i,j)", "bigram in freq.most_common() if bigram[1]>min_count] bigrams = [\" \".join(bigram[0]) for", "# train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\\t') # train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv') train", "# print(cor[i][j]+' '+cor[i][j+1]) word_count+=1 return text def _get_bigrams(corpus,min_count): text =", "freq.most_common() if bigram[1]>min_count] bigrams = [\" \".join(bigram[0]) for bigram in", "'+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task'] corpus.name = 'text' corpus =", "import concurrent.futures import os import tqdm from collections import Counter", "global bigrams del bigrams return pd.DataFrame({'text':np.array(result)})['text'] def _add_bigrams(text): for idx", "train = cleaner(train) corpus = train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+'", "coding: utf-8 -*- \"\"\" Created on Wed Aug 26 19:15:34", "data = DataPreparation(corpus.reset_index(),f_size=3) data.tokenize() bigrams = _get_bigrams(data.corpus.values,min_count=700) data.corpus = phraser(data.corpus.values)", "corpus.name = 'text' corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1) def phraser(corpus,workers=-1): if workers==-1:", "corpus = train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task'] corpus.name =", "def phraser(corpus,workers=-1): if workers==-1: workers = os.cpu_count() chunks = np.array_split(corpus,workers)", "utf-8 -*- \"\"\" Created on Wed Aug 26 19:15:34 2020", "phraser(corpus,workers=-1): if workers==-1: workers = os.cpu_count() chunks = np.array_split(corpus,workers) with", "using {} cores'.format(workers))),axis=0) executor.shutdown(wait=True) # result = _add_bigrams(data) global bigrams", "np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {} cores'.format(workers))),axis=0) executor.shutdown(wait=True) # result = _add_bigrams(data) global", "word_count<length: if text[idx][word_count]+' '+text[idx][word_count+1] in bigrams: text[idx][word_count] = text[idx][word_count]+' '+text[idx][word_count+1]", "in bigrams: text[idx][word_count] = text[idx][word_count]+' '+text[idx][word_count+1] text[idx].remove(text[idx][word_count+1]) length = len(text[idx])-1", "2020 @author: deviantpadam \"\"\" import pandas as pd import numpy", "text for word in sen] ngram = [(i,j) for i,j", "= _get_bigrams(data.corpus.values,min_count=500) data.corpus = phraser(data.corpus.values) data.vocab_builder() doc, context, target_noise_ids =", "\".join(bigram[0]) for bigram in filterbi] return bigrams data = DataPreparation(corpus.reset_index(),f_size=3)", "collections import Counter from torch2vec.data import DataPreparation from torch2vec.torch2vec import", "Created on Wed Aug 26 19:15:34 2020 @author: deviantpadam \"\"\"", "-*- \"\"\" Created on Wed Aug 26 19:15:34 2020 @author:", "from torch2vec.torch2vec import DM # train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\\t') # train", "= os.cpu_count() chunks = np.array_split(corpus,workers) with concurrent.futures.ProcessPoolExecutor(workers) as executor: result", "= _add_bigrams(data) global bigrams del bigrams return pd.DataFrame({'text':np.array(result)})['text'] def _add_bigrams(text):", "= 'text' corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1) def phraser(corpus,workers=-1): if workers==-1: workers", "'text' corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1) def phraser(corpus,workers=-1): if workers==-1: workers =", "corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1) def phraser(corpus,workers=-1): if workers==-1: workers = os.cpu_count()", "from torch2vec.data import DataPreparation from torch2vec.torch2vec import DM # train", "tqdm from collections import Counter from torch2vec.data import DataPreparation from", "executor.shutdown(wait=True) # result = _add_bigrams(data) global bigrams del bigrams return", "bigrams del bigrams return pd.DataFrame({'text':np.array(result)})['text'] def _add_bigrams(text): for idx in", "text[idx][word_count]+' '+text[idx][word_count+1] in bigrams: text[idx][word_count] = text[idx][word_count]+' '+text[idx][word_count+1] text[idx].remove(text[idx][word_count+1]) length", "[bigram for bigram in freq.most_common() if bigram[1]>min_count] bigrams = [\"", "idx in range(len(text)): length=len(text[idx])-1 word_count=0 while word_count<length: if text[idx][word_count]+' '+text[idx][word_count+1]", "text[idx][word_count] = text[idx][word_count]+' '+text[idx][word_count+1] text[idx].remove(text[idx][word_count+1]) length = len(text[idx])-1 # print(cor[i][j]+'", "concurrent.futures.ProcessPoolExecutor(workers) as executor: result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {} cores'.format(workers))),axis=0) executor.shutdown(wait=True)", "data.corpus = phraser(data.corpus.values) bigrams = _get_bigrams(data.corpus.values,min_count=500) data.corpus = phraser(data.corpus.values) data.vocab_builder()", "pd.read_csv('../data/suggest_dump.txt',delimiter='\\t') def cleaner(train): sub=(train['subjects'].str.lower()).str.split(',',expand=True) sub.drop([2,3],axis=1,inplace=True) sub.columns = ['subject1','subject2'] sub.fillna('none',inplace=True) tasks", "(1).csv') train = pd.read_csv('../data/suggest_dump.txt',delimiter='\\t') def cleaner(train): sub=(train['subjects'].str.lower()).str.split(',',expand=True) sub.drop([2,3],axis=1,inplace=True) sub.columns =", "concurrent.futures import os import tqdm from collections import Counter from", "for idx in range(len(text)): length=len(text[idx])-1 word_count=0 while word_count<length: if text[idx][word_count]+'", "zip(vocab[:-1],vocab[1:])] freq = Counter(ngram) filterbi = [bigram for bigram in", "workers==-1: workers = os.cpu_count() chunks = np.array_split(corpus,workers) with concurrent.futures.ProcessPoolExecutor(workers) as", "Wed Aug 26 19:15:34 2020 @author: deviantpadam \"\"\" import pandas", "<reponame>paper2code/torch2vec-restful-service #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created", "= train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task'] corpus.name = 'text'" ]
[ "authority='Office', files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (2, Document( id='doc2',", "enactment_date=datetime.date.today() )), (2, Document( id='doc2', title='Document 2', category='main', doctype='decree', authority='Office',", "MockParameter @pytest.mark.parametrize('valid,cfg', [ (True, { 'host': 'http://oereblex.example.com', 'language': 'de', 'canton':", "assert isinstance(reference, DocumentRecord) assert reference.title == {'de': 'Reference'} assert reference.canton", "DocumentRecord, LegalProvisionRecord from pyramid_oereb.lib.records.office import OfficeRecord from tests.mockrequest import MockParameter", "for idx, record in enumerate(records): if i == 1: assert", "record.published_from == datetime.date.today() assert record.canton == 'BL' assert record.text_at_web ==", "m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True, version='1.0.0') source.read(MockParameter(),", "with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml',", "'host': 'http://oereblex.example.com', 'language': 'german', 'canton': 'BL' }), (False, { 'host':", "id='doc1', title='Document 1', category='main', doctype='decree', authority='Office', files=[], enactment_date=datetime.date.today() )) ])", "category='main', doctype='edict', authority='Office', files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (2,", "authority='Office', files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (4, Document( id='doc1',", "source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True) source.read(MockParameter(), 100) assert len(source.records)", "from geolink_formatter.entity import Document, File from requests.auth import HTTPBasicAuth from", "DocumentRecord) assert reference.title == {'de': 'Reference'} assert reference.canton == 'BL'", "[ (1, Document( id='doc1', title='Document 1', category='main', doctype='edict', authority='Office', files=[File('File", "'BL' assert document.text_at_web == { 'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references)", "assert reference.text_at_web == {'de': '/api/attachments/4'} def test_read(): with requests_mock.mock() as", "assert document.canton == 'BL' assert document.text_at_web == { 'de': 'http://oereblex.example.com/api/attachments/313'", "result): file_ = File('Test', '/api/attachments/1', 'main') document = Document(id='test', title='Test',", "canton='BL', pass_version=True) source.read(MockParameter(), 100) assert len(source.records) == 2 def test_read_with_specified_version():", "'BL' assert record.text_at_web == {'de': '/api/attachments/{fid}'.format(fid=i + idx)} assert len(record.references)", "language='de', canton='BL') source.read(MockParameter(), 100) assert len(source.records) == 2 document =", "def test_authentication(): auth = { 'username': 'test', 'password': '<PASSWORD>' }", "LegalProvisionRecord) assert record.title == {'de': 'Document {0}'.format(i)} assert record.published_from ==", "{'de': 'Document {0}'.format(i)} assert record.published_from == datetime.date.today() assert record.canton ==", "in enumerate(records): if i == 1: assert isinstance(record, DocumentRecord) elif", "Document([], id='1', title='Test') result = {'de': 'Test'} assert OEREBlexSource._get_document_title(document, File(),", "with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com',", "[ (True, { 'host': 'http://oereblex.example.com', 'language': 'de', 'canton': 'BL' }),", "assert document.responsible_office.name == {'de': 'Landeskanzlei'} assert document.canton == 'BL' assert", "'language': 'de', 'canton': 'BL' }), (False, { 'language': 'de', 'canton':", "@pytest.mark.parametrize('i,document', [ (1, Document( id='doc1', title='Document 1', category='main', doctype='edict', authority='Office',", "as f: m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True)", "authority='Office', files=[File('Reference file', '/api/attachments/4', 'main')], enactment_date=datetime.date.today() ) ] if i", "'http://oereblex.example.com', 'language': 'german', 'canton': 'BL' }), (False, { 'host': 'http://oereblex.example.com',", "id='doc2', title='Document 2', category='main', doctype='decree', authority='Office', files=[ File('File 2', '/api/attachments/2',", "assert record.title == {'de': 'Document {0}'.format(i)} assert record.published_from == datetime.date.today()", "'BL' }), (False, { 'host': 'http://oereblex.example.com', 'language': 'german', 'canton': 'BL'", "== 2 document = source.records[0] assert isinstance(document, DocumentRecord) assert isinstance(document.responsible_office,", "doctype='invalid', authority='Office', files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (4, Document(", ")), (2, Document( id='doc2', title='Document 2', category='main', doctype='decree', authority='Office', files=[", "as f: m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') params", "== {'de': '/api/attachments/4'} def test_read(): with requests_mock.mock() as m: with", "Document( id='doc1', title='Document 1', category='main', doctype='decree', authority='Office', files=[], enactment_date=datetime.date.today() ))", "'main'), File('File 3', '/api/attachments/3', 'additional') ], enactment_date=datetime.date.today() )), (3, Document(", "import DocumentRecord, LegalProvisionRecord from pyramid_oereb.lib.records.office import OfficeRecord from tests.mockrequest import", "files=[File('Reference file', '/api/attachments/4', 'main')], enactment_date=datetime.date.today() ) ] if i ==", "= { 'username': 'test', 'password': '<PASSWORD>' } source = OEREBlexSource(host='http://oereblex.example.com',", "OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', mapping={'municipality': 'subtype'}) assert source._get_mapped_value(document, key, language=language) ==", "source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth) assert isinstance(source._auth, HTTPBasicAuth) def", "f: m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True, version='1.0.0')", "OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') params = MockParameter() params.set_language('fr') source.read(params, 100) assert", "def test_get_document_records(i, document): language = 'de' source = OEREBlexSource(host='http://oereblex.example.com', language='de',", "canton='BL') params = MockParameter() params.set_language('fr') source.read(params, 100) assert len(source.records) ==", "len(document.references) == 5 def test_read_related_decree_as_main(): with requests_mock.mock() as m: with", "'main') document = Document(id='test', title='Test', category='main', doctype='decree', files=[file_], enactment_date=datetime.date.today(), subtype='Liestal',", "'Landeskanzlei'} assert document.text_at_web == { 'fr': 'http://oereblex.example.com/api/attachments/313' } def test_authentication():", "== 2: assert isinstance(record, LegalProvisionRecord) assert record.title == {'de': 'Document", "Document( id='doc1', title='Document 1', category='main', doctype='invalid', authority='Office', files=[File('File 1', '/api/attachments/1',", "'de' }) ]) def test_init(valid, cfg): if valid: assert isinstance(OEREBlexSource(**cfg),", "language='de', canton='BL') params = MockParameter() params.set_language('fr') source.read(params, 100) assert len(source.records)", "'/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (2, Document( id='doc2', title='Document 2', category='main',", "requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read())", "import datetime import pytest import requests_mock from geolink_formatter.entity import Document,", "100) assert len(source.records) == 2 def test_read_with_specified_language(): with requests_mock.mock() as", "'de' source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') references = [ Document(", "m: with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read()) source =", "f: m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') params =", "def test_read_with_specified_version(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as", "= OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True, version='1.0.0') source.read(MockParameter(), 100) assert len(source.records)", "'BL' }), (False, { 'language': 'de', 'canton': 'BL' }), (False,", "test_read(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:", "OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True) source.read(MockParameter(), 100) assert len(source.records) == 2", "cfg): if valid: assert isinstance(OEREBlexSource(**cfg), OEREBlexSource) else: with pytest.raises(AssertionError): OEREBlexSource(**cfg)", "language=language) == result @pytest.mark.parametrize('i,document', [ (1, Document( id='doc1', title='Document 1',", "test_authentication(): auth = { 'username': 'test', 'password': '<PASSWORD>' } source", "enactment_date=datetime.date.today() )), (4, Document( id='doc1', title='Document 1', category='main', doctype='decree', authority='Office',", "auth = { 'username': 'test', 'password': '<PASSWORD>' } source =", "4 def test_read_with_version_in_url(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb')", "if valid: assert isinstance(OEREBlexSource(**cfg), OEREBlexSource) else: with pytest.raises(AssertionError): OEREBlexSource(**cfg) @pytest.mark.parametrize('key,language,result',", "assert document.text_at_web == { 'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) ==", "'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 5 def test_read_related_decree_as_main(): with", "isinstance(record, LegalProvisionRecord) assert record.title == {'de': 'Document {0}'.format(i)} assert record.published_from", "isinstance(document.responsible_office, OfficeRecord) assert document.responsible_office.name == {'de': 'Landeskanzlei'} assert document.canton ==", "m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read()) source =", "authority='Office', files=[], enactment_date=datetime.date.today() )) ]) def test_get_document_records(i, document): language =", "{ 'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 5 def test_read_related_decree_as_main():", "test_read_with_version_in_url(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:", "{ 'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 4 def test_read_with_version_in_url():", "authority='Office') source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', mapping={'municipality': 'subtype'}) assert source._get_mapped_value(document,", "source._get_document_records(document, language, references) elif i == 4: assert source._get_document_records(document, language,", "len(document.references) == 4 def test_read_with_version_in_url(): with requests_mock.mock() as m: with", "'password': '<PASSWORD>' } source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth) assert", "records = source._get_document_records(document, language, references) assert len(records) == i for", "len(source.records) == 2 document = source.records[0] assert isinstance(document, DocumentRecord) assert", "'Liestal'}) ]) def test_get_mapped_value(key, language, result): file_ = File('Test', '/api/attachments/1',", "'fr': 'http://oereblex.example.com/api/attachments/313' } def test_authentication(): auth = { 'username': 'test',", "3', '/api/attachments/3', 'additional') ], enactment_date=datetime.date.today() )), (3, Document( id='doc1', title='Document", "source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', mapping={'municipality': 'subtype'}) assert source._get_mapped_value(document, key,", "== i for idx, record in enumerate(records): if i ==", "(False, { 'host': 'http://oereblex.example.com', 'language': 'de' }) ]) def test_init(valid,", "references) elif i == 4: assert source._get_document_records(document, language, references) ==", "as m: with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read()) source", "{ 'host': 'http://oereblex.example.com', 'language': 'de', 'canton': 'BL' }), (False, {", "== 4 def test_read_with_version_in_url(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml',", "== { 'fr': 'http://oereblex.example.com/api/attachments/313' } def test_authentication(): auth = {", "m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read()) source =", "'BL' }), (False, { 'host': 'http://oereblex.example.com', 'language': 'de' }) ])", "pass_version=True) source.read(MockParameter(), 100) assert len(source.records) == 2 def test_read_with_specified_version(): with", "'de', 'canton': 'BL' }), (False, { 'host': 'http://oereblex.example.com', 'language': 'german',", "assert len(source.records) == 2 def test_read_with_specified_version(): with requests_mock.mock() as m:", "id='1', title='Test') result = {'de': 'Test'} assert OEREBlexSource._get_document_title(document, File(), 'de')", "references) == [] else: records = source._get_document_records(document, language, references) assert", "= MockParameter() params.set_language('fr') source.read(params, 100) assert len(source.records) == 2 document", "content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', related_decree_as_main=True) source.read(MockParameter(), 100) assert", "assert record.published_from == datetime.date.today() assert record.canton == 'BL' assert record.text_at_web", "key, language=language) == result @pytest.mark.parametrize('i,document', [ (1, Document( id='doc1', title='Document", "enumerate(records): if i == 1: assert isinstance(record, DocumentRecord) elif i", "(False, { 'host': 'http://oereblex.example.com', 'language': 'german', 'canton': 'BL' }), (False,", "document.text_at_web == { 'fr': 'http://oereblex.example.com/api/attachments/313' } def test_authentication(): auth =", "assert len(record.references) == 1 reference = record.references[0] assert isinstance(reference, DocumentRecord)", "3: with pytest.raises(TypeError): source._get_document_records(document, language, references) elif i == 4:", "@pytest.mark.parametrize('valid,cfg', [ (True, { 'host': 'http://oereblex.example.com', 'language': 'de', 'canton': 'BL'", "title='Document 2', category='main', doctype='decree', authority='Office', files=[ File('File 2', '/api/attachments/2', 'main'),", "1', category='main', doctype='invalid', authority='Office', files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )),", "len(source.records) == 2 document = source.records[0] assert document.responsible_office.name == {'fr':", "with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com',", "== 3 document = source.records[0] assert isinstance(document, DocumentRecord) assert isinstance(document.responsible_office,", "document.text_at_web == { 'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 4", ")) ]) def test_get_document_records(i, document): language = 'de' source =", "= [ Document( id='ref', title='Reference', category='related', doctype='edict', authority='Office', files=[File('Reference file',", ")), (3, Document( id='doc1', title='Document 1', category='main', doctype='invalid', authority='Office', files=[File('File", "references = [ Document( id='ref', title='Reference', category='related', doctype='edict', authority='Office', files=[File('Reference", "assert document.text_at_web == { 'fr': 'http://oereblex.example.com/api/attachments/313' } def test_authentication(): auth", "doctype='decree', authority='Office', files=[], enactment_date=datetime.date.today() )) ]) def test_get_document_records(i, document): language", "== 'BL' assert reference.text_at_web == {'de': '/api/attachments/4'} def test_read(): with", "assert len(source.records) == 2 document = source.records[0] assert isinstance(document, DocumentRecord)", "{ 'fr': 'http://oereblex.example.com/api/attachments/313' } def test_authentication(): auth = { 'username':", "document = Document([], id='1', title='Test') result = {'de': 'Test'} assert", "record in enumerate(records): if i == 1: assert isinstance(record, DocumentRecord)", "'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 4 def test_read_with_version_in_url(): with", "as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read()) source", "isinstance(source._auth, HTTPBasicAuth) def test_get_document_title(): document = Document([], id='1', title='Test') result", "= OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', related_decree_as_main=True) source.read(MockParameter(), 100) assert len(source.records) ==", "OEREBlexSource) else: with pytest.raises(AssertionError): OEREBlexSource(**cfg) @pytest.mark.parametrize('key,language,result', [ ('official_title', None, None),", "assert len(document.references) == 5 def test_read_related_decree_as_main(): with requests_mock.mock() as m:", "{ 'language': 'de', 'canton': 'BL' }), (False, { 'host': 'http://oereblex.example.com',", "{'de': 'Reference'} assert reference.canton == 'BL' assert reference.text_at_web == {'de':", "canton='BL', pass_version=True, version='1.0.0') source.read(MockParameter(), 100) assert len(source.records) == 2 def", "{'de': 'Landeskanzlei'} assert document.canton == 'BL' assert document.text_at_web == {", "= OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') references = [ Document( id='ref', title='Reference',", "title='Document 1', category='main', doctype='decree', authority='Office', files=[], enactment_date=datetime.date.today() )) ]) def", "'/api/attachments/4'} def test_read(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb')", "open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de',", "[ Document( id='ref', title='Reference', category='related', doctype='edict', authority='Office', files=[File('Reference file', '/api/attachments/4',", "== 2 def test_read_with_specified_version(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.0.0.xml',", "def test_read_with_version_in_url(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as", "enactment_date=datetime.date.today() )), (3, Document( id='doc1', title='Document 1', category='main', doctype='invalid', authority='Office',", "with pytest.raises(AssertionError): OEREBlexSource(**cfg) @pytest.mark.parametrize('key,language,result', [ ('official_title', None, None), ('municipality', None,", "files=[file_], enactment_date=datetime.date.today(), subtype='Liestal', authority='Office') source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', mapping={'municipality':", "test_read_related_decree_as_main(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:", "idx)} assert len(record.references) == 1 reference = record.references[0] assert isinstance(reference,", "related_decree_as_main=True) source.read(MockParameter(), 100) assert len(source.records) == 3 document = source.records[0]", "source._get_document_records(document, language, references) assert len(records) == i for idx, record", "source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') references = [ Document( id='ref',", "'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')", "== 1 reference = record.references[0] assert isinstance(reference, DocumentRecord) assert reference.title", "files=[], enactment_date=datetime.date.today() )) ]) def test_get_document_records(i, document): language = 'de'", "== {'de': 'Reference'} assert reference.canton == 'BL' assert reference.text_at_web ==", "references) assert len(records) == i for idx, record in enumerate(records):", "canton='BL', auth=auth) assert isinstance(source._auth, HTTPBasicAuth) def test_get_document_title(): document = Document([],", "= OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True) source.read(MockParameter(), 100) assert len(source.records) ==", "'Liestal'), ('municipality', 'de', {'de': 'Liestal'}) ]) def test_get_mapped_value(key, language, result):", "language='de', canton='BL', pass_version=True, version='1.0.0') source.read(MockParameter(), 100) assert len(source.records) == 2", "language='de', canton='BL', pass_version=True) source.read(MockParameter(), 100) assert len(source.records) == 2 def", "= OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth) assert isinstance(source._auth, HTTPBasicAuth) def test_get_document_title():", "'Landeskanzlei'} assert document.canton == 'BL' assert document.text_at_web == { 'de':", "'additional') ], enactment_date=datetime.date.today() )), (3, Document( id='doc1', title='Document 1', category='main',", "= source._get_document_records(document, language, references) assert len(records) == i for idx,", "from tests.mockrequest import MockParameter @pytest.mark.parametrize('valid,cfg', [ (True, { 'host': 'http://oereblex.example.com',", "record.text_at_web == {'de': '/api/attachments/{fid}'.format(fid=i + idx)} assert len(record.references) == 1", "]) def test_init(valid, cfg): if valid: assert isinstance(OEREBlexSource(**cfg), OEREBlexSource) else:", "if i == 3: with pytest.raises(TypeError): source._get_document_records(document, language, references) elif", "'main')], enactment_date=datetime.date.today() )), (4, Document( id='doc1', title='Document 1', category='main', doctype='decree',", "category='related', doctype='edict', authority='Office', files=[File('Reference file', '/api/attachments/4', 'main')], enactment_date=datetime.date.today() ) ]", "as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', related_decree_as_main=True)", "== 'BL' assert document.text_at_web == { 'de': 'http://oereblex.example.com/api/attachments/313' } assert", "datetime.date.today() assert record.canton == 'BL' assert record.text_at_web == {'de': '/api/attachments/{fid}'.format(fid=i", "from pyramid_oereb.contrib.sources.document import OEREBlexSource from pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord from", "assert isinstance(record, DocumentRecord) elif i == 2: assert isinstance(record, LegalProvisionRecord)", "subtype='Liestal', authority='Office') source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', mapping={'municipality': 'subtype'}) assert", "OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') references = [ Document( id='ref', title='Reference', category='related',", "'username': 'test', 'password': '<PASSWORD>' } source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',", "= Document([], id='1', title='Test') result = {'de': 'Test'} assert OEREBlexSource._get_document_title(document,", "result = {'de': 'Test'} assert OEREBlexSource._get_document_title(document, File(), 'de') == result", "OfficeRecord from tests.mockrequest import MockParameter @pytest.mark.parametrize('valid,cfg', [ (True, { 'host':", "{ 'host': 'http://oereblex.example.com', 'language': 'de' }) ]) def test_init(valid, cfg):", "= File('Test', '/api/attachments/1', 'main') document = Document(id='test', title='Test', category='main', doctype='decree',", "authority='Office', files=[ File('File 2', '/api/attachments/2', 'main'), File('File 3', '/api/attachments/3', 'additional')", "]) def test_get_mapped_value(key, language, result): file_ = File('Test', '/api/attachments/1', 'main')", "2', category='main', doctype='decree', authority='Office', files=[ File('File 2', '/api/attachments/2', 'main'), File('File", "+ idx)} assert len(record.references) == 1 reference = record.references[0] assert", "as f: m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True,", "content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True) source.read(MockParameter(), 100) assert", "== {'de': '/api/attachments/{fid}'.format(fid=i + idx)} assert len(record.references) == 1 reference", "params.set_language('fr') source.read(params, 100) assert len(source.records) == 2 document = source.records[0]", "i == 3: with pytest.raises(TypeError): source._get_document_records(document, language, references) elif i", "'canton': 'BL' }), (False, { 'host': 'http://oereblex.example.com', 'language': 'de' })", "pyramid_oereb.lib.records.office import OfficeRecord from tests.mockrequest import MockParameter @pytest.mark.parametrize('valid,cfg', [ (True,", "Document( id='ref', title='Reference', category='related', doctype='edict', authority='Office', files=[File('Reference file', '/api/attachments/4', 'main')],", "DocumentRecord) assert isinstance(document.responsible_office, OfficeRecord) assert document.responsible_office.name == {'de': 'Landeskanzlei'} assert", "OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', related_decree_as_main=True) source.read(MockParameter(), 100) assert len(source.records) == 3", "import OEREBlexSource from pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord from pyramid_oereb.lib.records.office import", "100) assert len(source.records) == 2 document = source.records[0] assert document.responsible_office.name", "'canton': 'BL' }), (False, { 'language': 'de', 'canton': 'BL' }),", "(False, { 'language': 'de', 'canton': 'BL' }), (False, { 'host':", "id='ref', title='Reference', category='related', doctype='edict', authority='Office', files=[File('Reference file', '/api/attachments/4', 'main')], enactment_date=datetime.date.today()", "pyramid_oereb.contrib.sources.document import OEREBlexSource from pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord from pyramid_oereb.lib.records.office", "4: assert source._get_document_records(document, language, references) == [] else: records =", "language='de', canton='BL', mapping={'municipality': 'subtype'}) assert source._get_mapped_value(document, key, language=language) == result", "source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', related_decree_as_main=True) source.read(MockParameter(), 100) assert len(source.records)", "'Document {0}'.format(i)} assert record.published_from == datetime.date.today() assert record.canton == 'BL'", "DocumentRecord) elif i == 2: assert isinstance(record, LegalProvisionRecord) assert record.title", "'<PASSWORD>' } source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth) assert isinstance(source._auth,", "'Reference'} assert reference.canton == 'BL' assert reference.text_at_web == {'de': '/api/attachments/4'}", "reference.text_at_web == {'de': '/api/attachments/4'} def test_read(): with requests_mock.mock() as m:", "i == 4: assert source._get_document_records(document, language, references) == [] else:", "'BL' assert reference.text_at_web == {'de': '/api/attachments/4'} def test_read(): with requests_mock.mock()", "'/api/attachments/2', 'main'), File('File 3', '/api/attachments/3', 'additional') ], enactment_date=datetime.date.today() )), (3,", "'/api/attachments/{fid}'.format(fid=i + idx)} assert len(record.references) == 1 reference = record.references[0]", "None, 'Liestal'), ('municipality', 'de', {'de': 'Liestal'}) ]) def test_get_mapped_value(key, language,", "assert source._get_document_records(document, language, references) == [] else: records = source._get_document_records(document,", "MockParameter() params.set_language('fr') source.read(params, 100) assert len(source.records) == 2 document =", "== { 'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 4 def", "def test_get_document_title(): document = Document([], id='1', title='Test') result = {'de':", "category='main', doctype='decree', authority='Office', files=[], enactment_date=datetime.date.today() )) ]) def test_get_document_records(i, document):", "('municipality', None, 'Liestal'), ('municipality', 'de', {'de': 'Liestal'}) ]) def test_get_mapped_value(key,", "OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True, version='1.0.0') source.read(MockParameter(), 100) assert len(source.records) ==", "'/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (4, Document( id='doc1', title='Document 1', category='main',", "requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read())", "(1, Document( id='doc1', title='Document 1', category='main', doctype='edict', authority='Office', files=[File('File 1',", "OfficeRecord) assert document.responsible_office.name == {'de': 'Landeskanzlei'} assert document.canton == 'BL'", "2: assert isinstance(record, LegalProvisionRecord) assert record.title == {'de': 'Document {0}'.format(i)}", "1 reference = record.references[0] assert isinstance(reference, DocumentRecord) assert reference.title ==", "'rb') as f: m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',", "-*- coding: utf-8 -*- import datetime import pytest import requests_mock", "pytest.raises(AssertionError): OEREBlexSource(**cfg) @pytest.mark.parametrize('key,language,result', [ ('official_title', None, None), ('municipality', None, 'Liestal'),", "{ 'host': 'http://oereblex.example.com', 'language': 'german', 'canton': 'BL' }), (False, {", "isinstance(OEREBlexSource(**cfg), OEREBlexSource) else: with pytest.raises(AssertionError): OEREBlexSource(**cfg) @pytest.mark.parametrize('key,language,result', [ ('official_title', None,", "{0}'.format(i)} assert record.published_from == datetime.date.today() assert record.canton == 'BL' assert", "<reponame>geo-bl-ch/pyramid_oereb # -*- coding: utf-8 -*- import datetime import pytest", "def test_read_with_specified_language(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as", "reference.title == {'de': 'Reference'} assert reference.canton == 'BL' assert reference.text_at_web", "assert reference.title == {'de': 'Reference'} assert reference.canton == 'BL' assert", "category='main', doctype='invalid', authority='Office', files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (4,", "else: with pytest.raises(AssertionError): OEREBlexSource(**cfg) @pytest.mark.parametrize('key,language,result', [ ('official_title', None, None), ('municipality',", "import Document, File from requests.auth import HTTPBasicAuth from pyramid_oereb.contrib.sources.document import", "id='doc1', title='Document 1', category='main', doctype='invalid', authority='Office', files=[File('File 1', '/api/attachments/1', 'main')],", "open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de',", "source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') source.read(MockParameter(), 100) assert len(source.records) ==", "language='de', canton='BL', related_decree_as_main=True) source.read(MockParameter(), 100) assert len(source.records) == 3 document", "id='doc1', title='Document 1', category='main', doctype='edict', authority='Office', files=[File('File 1', '/api/attachments/1', 'main')],", "3 document = source.records[0] assert isinstance(document, DocumentRecord) assert isinstance(document.responsible_office, OfficeRecord)", "assert len(source.records) == 2 document = source.records[0] assert document.responsible_office.name ==", "assert len(document.references) == 4 def test_read_with_version_in_url(): with requests_mock.mock() as m:", "assert isinstance(record, LegalProvisionRecord) assert record.title == {'de': 'Document {0}'.format(i)} assert", "m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source =", "import HTTPBasicAuth from pyramid_oereb.contrib.sources.document import OEREBlexSource from pyramid_oereb.lib.records.documents import DocumentRecord,", "2 def test_read_with_specified_language(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb')", "open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de',", "record.references[0] assert isinstance(reference, DocumentRecord) assert reference.title == {'de': 'Reference'} assert", "m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True) source.read(MockParameter(), 100)", "OEREBlexSource(**cfg) @pytest.mark.parametrize('key,language,result', [ ('official_title', None, None), ('municipality', None, 'Liestal'), ('municipality',", "record.title == {'de': 'Document {0}'.format(i)} assert record.published_from == datetime.date.today() assert", "[ ('official_title', None, None), ('municipality', None, 'Liestal'), ('municipality', 'de', {'de':", "source.read(params, 100) assert len(source.records) == 2 document = source.records[0] assert", "language='de', canton='BL') references = [ Document( id='ref', title='Reference', category='related', doctype='edict',", "'http://oereblex.example.com', 'language': 'de', 'canton': 'BL' }), (False, { 'language': 'de',", "test_get_document_records(i, document): language = 'de' source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')", "pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord from pyramid_oereb.lib.records.office import OfficeRecord from tests.mockrequest", "len(source.records) == 2 def test_read_with_specified_language(): with requests_mock.mock() as m: with", "idx, record in enumerate(records): if i == 1: assert isinstance(record,", "} source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth) assert isinstance(source._auth, HTTPBasicAuth)", "File from requests.auth import HTTPBasicAuth from pyramid_oereb.contrib.sources.document import OEREBlexSource from", "m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') source.read(MockParameter(), 100) assert", "File('Test', '/api/attachments/1', 'main') document = Document(id='test', title='Test', category='main', doctype='decree', files=[file_],", "geolink_formatter.entity import Document, File from requests.auth import HTTPBasicAuth from pyramid_oereb.contrib.sources.document", "import MockParameter @pytest.mark.parametrize('valid,cfg', [ (True, { 'host': 'http://oereblex.example.com', 'language': 'de',", "language, result): file_ = File('Test', '/api/attachments/1', 'main') document = Document(id='test',", "open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de',", "f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', related_decree_as_main=True) source.read(MockParameter(),", "i for idx, record in enumerate(records): if i == 1:", "category='main', doctype='decree', files=[file_], enactment_date=datetime.date.today(), subtype='Liestal', authority='Office') source = OEREBlexSource(host='http://oereblex.example.com', language='de',", "datetime import pytest import requests_mock from geolink_formatter.entity import Document, File", "language, references) elif i == 4: assert source._get_document_records(document, language, references)", "[] else: records = source._get_document_records(document, language, references) assert len(records) ==", "'host': 'http://oereblex.example.com', 'language': 'de', 'canton': 'BL' }), (False, { 'language':", "with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com',", "'test', 'password': '<PASSWORD>' } source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth)", "pytest import requests_mock from geolink_formatter.entity import Document, File from requests.auth", "params = MockParameter() params.set_language('fr') source.read(params, 100) assert len(source.records) == 2", "'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 5 def test_read_related_decree_as_main(): with requests_mock.mock()", "Document( id='doc1', title='Document 1', category='main', doctype='edict', authority='Office', files=[File('File 1', '/api/attachments/1',", "assert isinstance(document, DocumentRecord) assert isinstance(document.responsible_office, OfficeRecord) assert document.responsible_office.name == {'de':", "isinstance(document, DocumentRecord) assert isinstance(document.responsible_office, OfficeRecord) assert document.responsible_office.name == {'de': 'Landeskanzlei'}", "{'de': '/api/attachments/4'} def test_read(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml',", "'main')], enactment_date=datetime.date.today() ) ] if i == 3: with pytest.raises(TypeError):", "elif i == 2: assert isinstance(record, LegalProvisionRecord) assert record.title ==", "= record.references[0] assert isinstance(reference, DocumentRecord) assert reference.title == {'de': 'Reference'}", "file_ = File('Test', '/api/attachments/1', 'main') document = Document(id='test', title='Test', category='main',", "test_get_document_title(): document = Document([], id='1', title='Test') result = {'de': 'Test'}", "'rb') as f: m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',", "-*- import datetime import pytest import requests_mock from geolink_formatter.entity import", "source.records[0] assert isinstance(document, DocumentRecord) assert isinstance(document.responsible_office, OfficeRecord) assert document.responsible_office.name ==", "'subtype'}) assert source._get_mapped_value(document, key, language=language) == result @pytest.mark.parametrize('i,document', [ (1,", "], enactment_date=datetime.date.today() )), (3, Document( id='doc1', title='Document 1', category='main', doctype='invalid',", "tests.mockrequest import MockParameter @pytest.mark.parametrize('valid,cfg', [ (True, { 'host': 'http://oereblex.example.com', 'language':", "document = source.records[0] assert isinstance(document, DocumentRecord) assert isinstance(document.responsible_office, OfficeRecord) assert", "}), (False, { 'host': 'http://oereblex.example.com', 'language': 'de' }) ]) def", "with pytest.raises(TypeError): source._get_document_records(document, language, references) elif i == 4: assert", "}), (False, { 'language': 'de', 'canton': 'BL' }), (False, {", "== {'de': 'Document {0}'.format(i)} assert record.published_from == datetime.date.today() assert record.canton", "= Document(id='test', title='Test', category='main', doctype='decree', files=[file_], enactment_date=datetime.date.today(), subtype='Liestal', authority='Office') source", "enactment_date=datetime.date.today() ) ] if i == 3: with pytest.raises(TypeError): source._get_document_records(document,", "isinstance(reference, DocumentRecord) assert reference.title == {'de': 'Reference'} assert reference.canton ==", "OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') source.read(MockParameter(), 100) assert len(source.records) == 2 document", "test_init(valid, cfg): if valid: assert isinstance(OEREBlexSource(**cfg), OEREBlexSource) else: with pytest.raises(AssertionError):", "{'de': 'Liestal'}) ]) def test_get_mapped_value(key, language, result): file_ = File('Test',", "} def test_authentication(): auth = { 'username': 'test', 'password': '<PASSWORD>'", "assert record.canton == 'BL' assert record.text_at_web == {'de': '/api/attachments/{fid}'.format(fid=i +", "HTTPBasicAuth) def test_get_document_title(): document = Document([], id='1', title='Test') result =", "HTTPBasicAuth from pyramid_oereb.contrib.sources.document import OEREBlexSource from pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord", "== datetime.date.today() assert record.canton == 'BL' assert record.text_at_web == {'de':", "2 document = source.records[0] assert isinstance(document, DocumentRecord) assert isinstance(document.responsible_office, OfficeRecord)", ")), (4, Document( id='doc1', title='Document 1', category='main', doctype='decree', authority='Office', files=[],", "assert record.text_at_web == {'de': '/api/attachments/{fid}'.format(fid=i + idx)} assert len(record.references) ==", "== 2 def test_read_with_specified_language(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml',", "if i == 1: assert isinstance(record, DocumentRecord) elif i ==", "requests_mock.mock() as m: with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read())", "source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') params = MockParameter() params.set_language('fr') source.read(params,", "source.read(MockParameter(), 100) assert len(source.records) == 2 def test_read_with_specified_language(): with requests_mock.mock()", "= source.records[0] assert document.responsible_office.name == {'fr': 'Landeskanzlei'} assert document.text_at_web ==", "5 def test_read_related_decree_as_main(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb')", "with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr',", "(3, Document( id='doc1', title='Document 1', category='main', doctype='invalid', authority='Office', files=[File('File 1',", "m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') params = MockParameter()", "assert len(source.records) == 3 document = source.records[0] assert isinstance(document, DocumentRecord)", "source.read(MockParameter(), 100) assert len(source.records) == 2 document = source.records[0] assert", "== { 'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 5 def", "'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',", "title='Document 1', category='main', doctype='edict', authority='Office', files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today()", "== result @pytest.mark.parametrize('i,document', [ (1, Document( id='doc1', title='Document 1', category='main',", "'http://oereblex.example.com', 'language': 'de' }) ]) def test_init(valid, cfg): if valid:", "assert document.responsible_office.name == {'fr': 'Landeskanzlei'} assert document.text_at_web == { 'fr':", "files=[ File('File 2', '/api/attachments/2', 'main'), File('File 3', '/api/attachments/3', 'additional') ],", "elif i == 4: assert source._get_document_records(document, language, references) == []", "assert reference.canton == 'BL' assert reference.text_at_web == {'de': '/api/attachments/4'} def", "(True, { 'host': 'http://oereblex.example.com', 'language': 'de', 'canton': 'BL' }), (False,", "test_get_mapped_value(key, language, result): file_ = File('Test', '/api/attachments/1', 'main') document =", "language, references) == [] else: records = source._get_document_records(document, language, references)", "assert len(source.records) == 2 def test_read_with_specified_language(): with requests_mock.mock() as m:", "= source.records[0] assert isinstance(document, DocumentRecord) assert isinstance(document.responsible_office, OfficeRecord) assert document.responsible_office.name", "'/api/attachments/1', 'main') document = Document(id='test', title='Test', category='main', doctype='decree', files=[file_], enactment_date=datetime.date.today(),", "'main')], enactment_date=datetime.date.today() )), (2, Document( id='doc2', title='Document 2', category='main', doctype='decree',", "document.canton == 'BL' assert document.text_at_web == { 'de': 'http://oereblex.example.com/api/attachments/313' }", "(2, Document( id='doc2', title='Document 2', category='main', doctype='decree', authority='Office', files=[ File('File", "assert isinstance(source._auth, HTTPBasicAuth) def test_get_document_title(): document = Document([], id='1', title='Test')", "canton='BL', mapping={'municipality': 'subtype'}) assert source._get_mapped_value(document, key, language=language) == result @pytest.mark.parametrize('i,document',", "m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', related_decree_as_main=True) source.read(MockParameter(), 100)", "language, references) assert len(records) == i for idx, record in", "mapping={'municipality': 'subtype'}) assert source._get_mapped_value(document, key, language=language) == result @pytest.mark.parametrize('i,document', [", "i == 1: assert isinstance(record, DocumentRecord) elif i == 2:", "}) ]) def test_init(valid, cfg): if valid: assert isinstance(OEREBlexSource(**cfg), OEREBlexSource)", "100) assert len(source.records) == 2 document = source.records[0] assert isinstance(document,", "import pytest import requests_mock from geolink_formatter.entity import Document, File from", "with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml',", "== {'fr': 'Landeskanzlei'} assert document.text_at_web == { 'fr': 'http://oereblex.example.com/api/attachments/313' }", "(4, Document( id='doc1', title='Document 1', category='main', doctype='decree', authority='Office', files=[], enactment_date=datetime.date.today()", "result @pytest.mark.parametrize('i,document', [ (1, Document( id='doc1', title='Document 1', category='main', doctype='edict',", "]) def test_get_document_records(i, document): language = 'de' source = OEREBlexSource(host='http://oereblex.example.com',", "Document(id='test', title='Test', category='main', doctype='decree', files=[file_], enactment_date=datetime.date.today(), subtype='Liestal', authority='Office') source =", "from pyramid_oereb.lib.records.office import OfficeRecord from tests.mockrequest import MockParameter @pytest.mark.parametrize('valid,cfg', [", "}), (False, { 'host': 'http://oereblex.example.com', 'language': 'german', 'canton': 'BL' }),", "File('File 3', '/api/attachments/3', 'additional') ], enactment_date=datetime.date.today() )), (3, Document( id='doc1',", "= OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') source.read(MockParameter(), 100) assert len(source.records) == 2", ") ] if i == 3: with pytest.raises(TypeError): source._get_document_records(document, language,", "def test_init(valid, cfg): if valid: assert isinstance(OEREBlexSource(**cfg), OEREBlexSource) else: with", "} assert len(document.references) == 5 def test_read_related_decree_as_main(): with requests_mock.mock() as", "len(source.records) == 3 document = source.records[0] assert isinstance(document, DocumentRecord) assert", "{'de': '/api/attachments/{fid}'.format(fid=i + idx)} assert len(record.references) == 1 reference =", "== 4: assert source._get_document_records(document, language, references) == [] else: records", "document.text_at_web == { 'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 5", "None, None), ('municipality', None, 'Liestal'), ('municipality', 'de', {'de': 'Liestal'}) ])", "canton='BL') references = [ Document( id='ref', title='Reference', category='related', doctype='edict', authority='Office',", "File('File 2', '/api/attachments/2', 'main'), File('File 3', '/api/attachments/3', 'additional') ], enactment_date=datetime.date.today()", "'language': 'german', 'canton': 'BL' }), (False, { 'host': 'http://oereblex.example.com', 'language':", "record.canton == 'BL' assert record.text_at_web == {'de': '/api/attachments/{fid}'.format(fid=i + idx)}", "('municipality', 'de', {'de': 'Liestal'}) ]) def test_get_mapped_value(key, language, result): file_", "document = source.records[0] assert document.responsible_office.name == {'fr': 'Landeskanzlei'} assert document.text_at_web", "source.read(MockParameter(), 100) assert len(source.records) == 2 def test_read_with_specified_version(): with requests_mock.mock()", "enactment_date=datetime.date.today() )) ]) def test_get_document_records(i, document): language = 'de' source", "'german', 'canton': 'BL' }), (False, { 'host': 'http://oereblex.example.com', 'language': 'de'", "1: assert isinstance(record, DocumentRecord) elif i == 2: assert isinstance(record,", "pass_version=True, version='1.0.0') source.read(MockParameter(), 100) assert len(source.records) == 2 def test_read_with_specified_language():", "import requests_mock from geolink_formatter.entity import Document, File from requests.auth import", "requests_mock from geolink_formatter.entity import Document, File from requests.auth import HTTPBasicAuth", "source.records[0] assert document.responsible_office.name == {'fr': 'Landeskanzlei'} assert document.text_at_web == {", "doctype='decree', authority='Office', files=[ File('File 2', '/api/attachments/2', 'main'), File('File 3', '/api/attachments/3',", "assert isinstance(OEREBlexSource(**cfg), OEREBlexSource) else: with pytest.raises(AssertionError): OEREBlexSource(**cfg) @pytest.mark.parametrize('key,language,result', [ ('official_title',", "as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source", "test_read_with_specified_version(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f:", "OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth) assert isinstance(source._auth, HTTPBasicAuth) def test_get_document_title(): document", "files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (2, Document( id='doc2', title='Document", "assert len(records) == i for idx, record in enumerate(records): if", "document.responsible_office.name == {'fr': 'Landeskanzlei'} assert document.text_at_web == { 'fr': 'http://oereblex.example.com/api/attachments/313'", "== 1: assert isinstance(record, DocumentRecord) elif i == 2: assert", "'de', 'canton': 'BL' }), (False, { 'language': 'de', 'canton': 'BL'", "1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (2, Document( id='doc2', title='Document 2',", "1', category='main', doctype='decree', authority='Office', files=[], enactment_date=datetime.date.today() )) ]) def test_get_document_records(i,", "} assert len(document.references) == 4 def test_read_with_version_in_url(): with requests_mock.mock() as", "else: records = source._get_document_records(document, language, references) assert len(records) == i", "document.responsible_office.name == {'de': 'Landeskanzlei'} assert document.canton == 'BL' assert document.text_at_web", "len(record.references) == 1 reference = record.references[0] assert isinstance(reference, DocumentRecord) assert", "auth=auth) assert isinstance(source._auth, HTTPBasicAuth) def test_get_document_title(): document = Document([], id='1',", "'language': 'de' }) ]) def test_init(valid, cfg): if valid: assert", "requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read())", "doctype='decree', files=[file_], enactment_date=datetime.date.today(), subtype='Liestal', authority='Office') source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',", "isinstance(record, DocumentRecord) elif i == 2: assert isinstance(record, LegalProvisionRecord) assert", "'http://oereblex.example.com/api/attachments/313' } def test_authentication(): auth = { 'username': 'test', 'password':", "title='Test') result = {'de': 'Test'} assert OEREBlexSource._get_document_title(document, File(), 'de') ==", "'/api/attachments/4', 'main')], enactment_date=datetime.date.today() ) ] if i == 3: with", "Document( id='doc2', title='Document 2', category='main', doctype='decree', authority='Office', files=[ File('File 2',", "'canton': 'BL' }), (False, { 'host': 'http://oereblex.example.com', 'language': 'german', 'canton':", "test_read_with_specified_language(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:", "= 'de' source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') references = [", "document): language = 'de' source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') references", "== 2 document = source.records[0] assert document.responsible_office.name == {'fr': 'Landeskanzlei'}", "canton='BL') source.read(MockParameter(), 100) assert len(source.records) == 2 document = source.records[0]", "source._get_document_records(document, language, references) == [] else: records = source._get_document_records(document, language,", "as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') source.read(MockParameter(),", "f: m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True) source.read(MockParameter(),", "== 'BL' assert record.text_at_web == {'de': '/api/attachments/{fid}'.format(fid=i + idx)} assert", "title='Test', category='main', doctype='decree', files=[file_], enactment_date=datetime.date.today(), subtype='Liestal', authority='Office') source = OEREBlexSource(host='http://oereblex.example.com',", "len(records) == i for idx, record in enumerate(records): if i", "category='main', doctype='decree', authority='Office', files=[ File('File 2', '/api/attachments/2', 'main'), File('File 3',", "def test_read(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as", "= OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') params = MockParameter() params.set_language('fr') source.read(params, 100)", "reference = record.references[0] assert isinstance(reference, DocumentRecord) assert reference.title == {'de':", "enactment_date=datetime.date.today(), subtype='Liestal', authority='Office') source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', mapping={'municipality': 'subtype'})", "file', '/api/attachments/4', 'main')], enactment_date=datetime.date.today() ) ] if i == 3:", "('official_title', None, None), ('municipality', None, 'Liestal'), ('municipality', 'de', {'de': 'Liestal'})", "100) assert len(source.records) == 2 def test_read_with_specified_version(): with requests_mock.mock() as", "with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml',", "== {'de': 'Landeskanzlei'} assert document.canton == 'BL' assert document.text_at_web ==", "assert isinstance(document.responsible_office, OfficeRecord) assert document.responsible_office.name == {'de': 'Landeskanzlei'} assert document.canton", "source.read(MockParameter(), 100) assert len(source.records) == 3 document = source.records[0] assert", "doctype='edict', authority='Office', files=[File('Reference file', '/api/attachments/4', 'main')], enactment_date=datetime.date.today() ) ] if", "content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True, version='1.0.0') source.read(MockParameter(), 100)", "from requests.auth import HTTPBasicAuth from pyramid_oereb.contrib.sources.document import OEREBlexSource from pyramid_oereb.lib.records.documents", "f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') source.read(MockParameter(), 100)", "2 document = source.records[0] assert document.responsible_office.name == {'fr': 'Landeskanzlei'} assert", "1', category='main', doctype='edict', authority='Office', files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )),", "version='1.0.0') source.read(MockParameter(), 100) assert len(source.records) == 2 def test_read_with_specified_language(): with", "from pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord from pyramid_oereb.lib.records.office import OfficeRecord from", "pytest.raises(TypeError): source._get_document_records(document, language, references) elif i == 4: assert source._get_document_records(document,", "source._get_mapped_value(document, key, language=language) == result @pytest.mark.parametrize('i,document', [ (1, Document( id='doc1',", "utf-8 -*- import datetime import pytest import requests_mock from geolink_formatter.entity", "LegalProvisionRecord from pyramid_oereb.lib.records.office import OfficeRecord from tests.mockrequest import MockParameter @pytest.mark.parametrize('valid,cfg',", "language='de', canton='BL', auth=auth) assert isinstance(source._auth, HTTPBasicAuth) def test_get_document_title(): document =", "'de', {'de': 'Liestal'}) ]) def test_get_mapped_value(key, language, result): file_ =", "== [] else: records = source._get_document_records(document, language, references) assert len(records)", "OEREBlexSource from pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord from pyramid_oereb.lib.records.office import OfficeRecord", "Document, File from requests.auth import HTTPBasicAuth from pyramid_oereb.contrib.sources.document import OEREBlexSource", "] if i == 3: with pytest.raises(TypeError): source._get_document_records(document, language, references)", "source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True, version='1.0.0') source.read(MockParameter(), 100) assert", "valid: assert isinstance(OEREBlexSource(**cfg), OEREBlexSource) else: with pytest.raises(AssertionError): OEREBlexSource(**cfg) @pytest.mark.parametrize('key,language,result', [", "== 3: with pytest.raises(TypeError): source._get_document_records(document, language, references) elif i ==", "@pytest.mark.parametrize('key,language,result', [ ('official_title', None, None), ('municipality', None, 'Liestal'), ('municipality', 'de',", "i == 2: assert isinstance(record, LegalProvisionRecord) assert record.title == {'de':", "1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (4, Document( id='doc1', title='Document 1',", "'host': 'http://oereblex.example.com', 'language': 'de' }) ]) def test_init(valid, cfg): if", "'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 4 def test_read_with_version_in_url(): with requests_mock.mock()", "def test_get_mapped_value(key, language, result): file_ = File('Test', '/api/attachments/1', 'main') document", "== 5 def test_read_related_decree_as_main(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml',", "doctype='edict', authority='Office', files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (2, Document(", "as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read()) source", "title='Reference', category='related', doctype='edict', authority='Office', files=[File('Reference file', '/api/attachments/4', 'main')], enactment_date=datetime.date.today() )", "title='Document 1', category='main', doctype='invalid', authority='Office', files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today()", "'/api/attachments/3', 'additional') ], enactment_date=datetime.date.today() )), (3, Document( id='doc1', title='Document 1',", "= OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', mapping={'municipality': 'subtype'}) assert source._get_mapped_value(document, key, language=language)", "def test_read_related_decree_as_main(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as", "coding: utf-8 -*- import datetime import pytest import requests_mock from", "2', '/api/attachments/2', 'main'), File('File 3', '/api/attachments/3', 'additional') ], enactment_date=datetime.date.today() )),", "None), ('municipality', None, 'Liestal'), ('municipality', 'de', {'de': 'Liestal'}) ]) def", "len(source.records) == 2 def test_read_with_specified_version(): with requests_mock.mock() as m: with", "content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') params = MockParameter() params.set_language('fr')", "assert source._get_mapped_value(document, key, language=language) == result @pytest.mark.parametrize('i,document', [ (1, Document(", "import OfficeRecord from tests.mockrequest import MockParameter @pytest.mark.parametrize('valid,cfg', [ (True, {", "100) assert len(source.records) == 3 document = source.records[0] assert isinstance(document,", "# -*- coding: utf-8 -*- import datetime import pytest import", "{ 'username': 'test', 'password': '<PASSWORD>' } source = OEREBlexSource(host='http://oereblex.example.com', language='de',", "{'fr': 'Landeskanzlei'} assert document.text_at_web == { 'fr': 'http://oereblex.example.com/api/attachments/313' } def", "content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') source.read(MockParameter(), 100) assert len(source.records)", "language = 'de' source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') references =", "canton='BL', related_decree_as_main=True) source.read(MockParameter(), 100) assert len(source.records) == 3 document =", "2 def test_read_with_specified_version(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.0.0.xml', 'rb')", "with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com',", "'language': 'de', 'canton': 'BL' }), (False, { 'host': 'http://oereblex.example.com', 'language':", "requests.auth import HTTPBasicAuth from pyramid_oereb.contrib.sources.document import OEREBlexSource from pyramid_oereb.lib.records.documents import", "files=[File('File 1', '/api/attachments/1', 'main')], enactment_date=datetime.date.today() )), (4, Document( id='doc1', title='Document", "reference.canton == 'BL' assert reference.text_at_web == {'de': '/api/attachments/4'} def test_read():", "'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')", "document = Document(id='test', title='Test', category='main', doctype='decree', files=[file_], enactment_date=datetime.date.today(), subtype='Liestal', authority='Office')" ]
[ "threads, which are not normally safe for reloading with talon.", "the iterator so we can call the cleanup actions later.", "If this file is ever updated, you'll need to restart", "except StopIteration: pass else: logging.error( f\"the old @singleton function {name}", "f\"the old @singleton function {name} had more than one yield!\"", "want the object yielded by the iterator to be available", "before. if name in _singletons: old = _singletons.pop(name) try: next(old)", "modules using # things like threads, which are not normally", "talon. import logging _singletons = {} def singleton(fn): name =", "= iter(fn()) obj = next(it) # Remember the iterator so", "the new object. it = iter(fn()) obj = next(it) #", "with talon. # If this file is ever updated, you'll", "singleton(fn): name = f\"{fn.__module__}.{fn.__name__}\" # Do any cleanup actions from", "from before. if name in _singletons: old = _singletons.pop(name) try:", "startup actions on the new object. it = iter(fn()) obj", "cleanup actions later. _singletons[name] = it # We want the", "pass else: logging.error( f\"the old @singleton function {name} had more", "= _singletons.pop(name) try: next(old) except StopIteration: pass else: logging.error( f\"the", "We want the object yielded by the iterator to be", "it = iter(fn()) obj = next(it) # Remember the iterator", "iter(fn()) obj = next(it) # Remember the iterator so we", "_singletons.pop(name) try: next(old) except StopIteration: pass else: logging.error( f\"the old", "writing reload-safe talon modules using # things like threads, which", "f\"{fn.__module__}.{fn.__name__}\" # Do any cleanup actions from before. if name", "this file is ever updated, you'll need to restart talon.", ") # Do the startup actions on the new object.", "like threads, which are not normally safe for reloading with", "on the new object. it = iter(fn()) obj = next(it)", "rarely-updated module to assist in writing reload-safe talon modules using", "iterator to be available at the name # of the", "yielded by the iterator to be available at the name", "so we can call the cleanup actions later. _singletons[name] =", "logging.error( f\"the old @singleton function {name} had more than one", "for reloading with talon. # If this file is ever", "iterator so we can call the cleanup actions later. _singletons[name]", "try: next(old) except StopIteration: pass else: logging.error( f\"the old @singleton", "= {} def singleton(fn): name = f\"{fn.__module__}.{fn.__name__}\" # Do any", "the cleanup actions later. _singletons[name] = it # We want", "object yielded by the iterator to be available at the", "the object yielded by the iterator to be available at", "of the function, so instead of returning a function we", "next(old) except StopIteration: pass else: logging.error( f\"the old @singleton function", "actions from before. if name in _singletons: old = _singletons.pop(name)", "else: logging.error( f\"the old @singleton function {name} had more than", "the startup actions on the new object. it = iter(fn())", "to restart talon. import logging _singletons = {} def singleton(fn):", "@singleton function {name} had more than one yield!\" ) #", "talon. # If this file is ever updated, you'll need", "safe for reloading with talon. # If this file is", "name = f\"{fn.__module__}.{fn.__name__}\" # Do any cleanup actions from before.", "old @singleton function {name} had more than one yield!\" )", "Do any cleanup actions from before. if name in _singletons:", "name in _singletons: old = _singletons.pop(name) try: next(old) except StopIteration:", "restart talon. import logging _singletons = {} def singleton(fn): name", "things like threads, which are not normally safe for reloading", "actions later. _singletons[name] = it # We want the object", "normally safe for reloading with talon. # If this file", "one yield!\" ) # Do the startup actions on the", "actions on the new object. it = iter(fn()) obj =", "to be available at the name # of the function,", "in writing reload-safe talon modules using # things like threads,", "file is ever updated, you'll need to restart talon. import", "# If this file is ever updated, you'll need to", "StopIteration: pass else: logging.error( f\"the old @singleton function {name} had", "Remember the iterator so we can call the cleanup actions", "Do the startup actions on the new object. it =", "# Do any cleanup actions from before. if name in", "# things like threads, which are not normally safe for", "# Remember the iterator so we can call the cleanup", "which are not normally safe for reloading with talon. #", "# A rarely-updated module to assist in writing reload-safe talon", "later. _singletons[name] = it # We want the object yielded", "# Do the startup actions on the new object. it", "new object. it = iter(fn()) obj = next(it) # Remember", "any cleanup actions from before. if name in _singletons: old", "be available at the name # of the function, so", "{name} had more than one yield!\" ) # Do the", "of returning a function we return an object. return obj", "we can call the cleanup actions later. _singletons[name] = it", "# We want the object yielded by the iterator to", "reloading with talon. # If this file is ever updated,", "module to assist in writing reload-safe talon modules using #", "reload-safe talon modules using # things like threads, which are", "are not normally safe for reloading with talon. # If", "import logging _singletons = {} def singleton(fn): name = f\"{fn.__module__}.{fn.__name__}\"", "if name in _singletons: old = _singletons.pop(name) try: next(old) except", "object. it = iter(fn()) obj = next(it) # Remember the", "in _singletons: old = _singletons.pop(name) try: next(old) except StopIteration: pass", "is ever updated, you'll need to restart talon. import logging", "<reponame>codecat555/codecat555-fidgetingbits_knausj_talon<filename>apps/zsh/singletons.py # A rarely-updated module to assist in writing reload-safe", "to assist in writing reload-safe talon modules using # things", "available at the name # of the function, so instead", "{} def singleton(fn): name = f\"{fn.__module__}.{fn.__name__}\" # Do any cleanup", "= next(it) # Remember the iterator so we can call", "obj = next(it) # Remember the iterator so we can", "function {name} had more than one yield!\" ) # Do", "talon modules using # things like threads, which are not", "updated, you'll need to restart talon. import logging _singletons =", "not normally safe for reloading with talon. # If this", "the name # of the function, so instead of returning", "yield!\" ) # Do the startup actions on the new", "call the cleanup actions later. _singletons[name] = it # We", "logging _singletons = {} def singleton(fn): name = f\"{fn.__module__}.{fn.__name__}\" #", "assist in writing reload-safe talon modules using # things like", "need to restart talon. import logging _singletons = {} def", "had more than one yield!\" ) # Do the startup", "the iterator to be available at the name # of", "at the name # of the function, so instead of", "more than one yield!\" ) # Do the startup actions", "_singletons[name] = it # We want the object yielded by", "using # things like threads, which are not normally safe", "next(it) # Remember the iterator so we can call the", "name # of the function, so instead of returning a", "cleanup actions from before. if name in _singletons: old =", "= it # We want the object yielded by the", "function, so instead of returning a function we return an", "_singletons: old = _singletons.pop(name) try: next(old) except StopIteration: pass else:", "# of the function, so instead of returning a function", "by the iterator to be available at the name #", "instead of returning a function we return an object. return", "old = _singletons.pop(name) try: next(old) except StopIteration: pass else: logging.error(", "ever updated, you'll need to restart talon. import logging _singletons", "can call the cleanup actions later. _singletons[name] = it #", "_singletons = {} def singleton(fn): name = f\"{fn.__module__}.{fn.__name__}\" # Do", "A rarely-updated module to assist in writing reload-safe talon modules", "than one yield!\" ) # Do the startup actions on", "so instead of returning a function we return an object.", "the function, so instead of returning a function we return", "= f\"{fn.__module__}.{fn.__name__}\" # Do any cleanup actions from before. if", "you'll need to restart talon. import logging _singletons = {}", "def singleton(fn): name = f\"{fn.__module__}.{fn.__name__}\" # Do any cleanup actions", "it # We want the object yielded by the iterator" ]
[ "parsing parser = argparse.ArgumentParser(description='Train and Evaluate Bichrom') parser.add_argument('-training_schema_yaml', required=True, help='YAML", "required=True, type=int) parser.add_argument('-outdir', required=True, help='Output directory') parser.add_argument('-nbins', type=int, required=True, help='Number", "from train import train_bichrom if __name__ == '__main__': # parsing", "f: try: data_paths = yaml.safe_load(f) except yaml.YAMLError as exc: print(exc)", "Bichrom') parser.add_argument('-training_schema_yaml', required=True, help='YAML file with paths to train, test", "import call from train import train_bichrom if __name__ == '__main__':", "= argparse.ArgumentParser(description='Train and Evaluate Bichrom') parser.add_argument('-training_schema_yaml', required=True, help='YAML file with", "__name__ == '__main__': # parsing parser = argparse.ArgumentParser(description='Train and Evaluate", "= yaml.safe_load(f) except yaml.YAMLError as exc: print(exc) # create the", "subprocess import call from train import train_bichrom if __name__ ==", "data') parser.add_argument('-len', help='Size of genomic windows', required=True, type=int) parser.add_argument('-outdir', required=True,", "input data paths: with open(args.training_schema_yaml, 'r') as f: try: data_paths", "yaml from subprocess import call from train import train_bichrom if", "type=int) parser.add_argument('-outdir', required=True, help='Output directory') parser.add_argument('-nbins', type=int, required=True, help='Number of", "windows', required=True, type=int) parser.add_argument('-outdir', required=True, help='Output directory') parser.add_argument('-nbins', type=int, required=True,", "help='Output directory') parser.add_argument('-nbins', type=int, required=True, help='Number of bins') args =", "open(args.training_schema_yaml, 'r') as f: try: data_paths = yaml.safe_load(f) except yaml.YAMLError", "exc: print(exc) # create the output directory: outdir = args.outdir", "args = parser.parse_args() # load the yaml file with input", "from subprocess import call from train import train_bichrom if __name__", "# create the output directory: outdir = args.outdir call(['mkdir', outdir])", "parser.add_argument('-training_schema_yaml', required=True, help='YAML file with paths to train, test and", "argparse.ArgumentParser(description='Train and Evaluate Bichrom') parser.add_argument('-training_schema_yaml', required=True, help='YAML file with paths", "parser = argparse.ArgumentParser(description='Train and Evaluate Bichrom') parser.add_argument('-training_schema_yaml', required=True, help='YAML file", "yaml file with input data paths: with open(args.training_schema_yaml, 'r') as", "help='Size of genomic windows', required=True, type=int) parser.add_argument('-outdir', required=True, help='Output directory')", "required=True, help='Number of bins') args = parser.parse_args() # load the", "yaml.YAMLError as exc: print(exc) # create the output directory: outdir", "# load the yaml file with input data paths: with", "as f: try: data_paths = yaml.safe_load(f) except yaml.YAMLError as exc:", "parser.add_argument('-len', help='Size of genomic windows', required=True, type=int) parser.add_argument('-outdir', required=True, help='Output", "help='YAML file with paths to train, test and val data')", "data paths: with open(args.training_schema_yaml, 'r') as f: try: data_paths =", "train_bichrom if __name__ == '__main__': # parsing parser = argparse.ArgumentParser(description='Train", "val data') parser.add_argument('-len', help='Size of genomic windows', required=True, type=int) parser.add_argument('-outdir',", "of genomic windows', required=True, type=int) parser.add_argument('-outdir', required=True, help='Output directory') parser.add_argument('-nbins',", "import argparse import yaml from subprocess import call from train", "Evaluate Bichrom') parser.add_argument('-training_schema_yaml', required=True, help='YAML file with paths to train,", "the yaml file with input data paths: with open(args.training_schema_yaml, 'r')", "# parsing parser = argparse.ArgumentParser(description='Train and Evaluate Bichrom') parser.add_argument('-training_schema_yaml', required=True,", "train, test and val data') parser.add_argument('-len', help='Size of genomic windows',", "parser.add_argument('-nbins', type=int, required=True, help='Number of bins') args = parser.parse_args() #", "== '__main__': # parsing parser = argparse.ArgumentParser(description='Train and Evaluate Bichrom')", "genomic windows', required=True, type=int) parser.add_argument('-outdir', required=True, help='Output directory') parser.add_argument('-nbins', type=int,", "with open(args.training_schema_yaml, 'r') as f: try: data_paths = yaml.safe_load(f) except", "'__main__': # parsing parser = argparse.ArgumentParser(description='Train and Evaluate Bichrom') parser.add_argument('-training_schema_yaml',", "try: data_paths = yaml.safe_load(f) except yaml.YAMLError as exc: print(exc) #", "import yaml from subprocess import call from train import train_bichrom", "load the yaml file with input data paths: with open(args.training_schema_yaml,", "directory: outdir = args.outdir call(['mkdir', outdir]) train_bichrom(data_paths=data_paths, outdir=outdir, seq_len=args.len, bin_size=int(args.len/args.nbins))", "yaml.safe_load(f) except yaml.YAMLError as exc: print(exc) # create the output", "of bins') args = parser.parse_args() # load the yaml file", "'r') as f: try: data_paths = yaml.safe_load(f) except yaml.YAMLError as", "train import train_bichrom if __name__ == '__main__': # parsing parser", "except yaml.YAMLError as exc: print(exc) # create the output directory:", "required=True, help='Output directory') parser.add_argument('-nbins', type=int, required=True, help='Number of bins') args", "directory') parser.add_argument('-nbins', type=int, required=True, help='Number of bins') args = parser.parse_args()", "output directory: outdir = args.outdir call(['mkdir', outdir]) train_bichrom(data_paths=data_paths, outdir=outdir, seq_len=args.len,", "help='Number of bins') args = parser.parse_args() # load the yaml", "file with input data paths: with open(args.training_schema_yaml, 'r') as f:", "as exc: print(exc) # create the output directory: outdir =", "paths: with open(args.training_schema_yaml, 'r') as f: try: data_paths = yaml.safe_load(f)", "parser.add_argument('-outdir', required=True, help='Output directory') parser.add_argument('-nbins', type=int, required=True, help='Number of bins')", "paths to train, test and val data') parser.add_argument('-len', help='Size of", "file with paths to train, test and val data') parser.add_argument('-len',", "type=int, required=True, help='Number of bins') args = parser.parse_args() # load", "parser.parse_args() # load the yaml file with input data paths:", "the output directory: outdir = args.outdir call(['mkdir', outdir]) train_bichrom(data_paths=data_paths, outdir=outdir,", "create the output directory: outdir = args.outdir call(['mkdir', outdir]) train_bichrom(data_paths=data_paths,", "data_paths = yaml.safe_load(f) except yaml.YAMLError as exc: print(exc) # create", "to train, test and val data') parser.add_argument('-len', help='Size of genomic", "and Evaluate Bichrom') parser.add_argument('-training_schema_yaml', required=True, help='YAML file with paths to", "if __name__ == '__main__': # parsing parser = argparse.ArgumentParser(description='Train and", "with paths to train, test and val data') parser.add_argument('-len', help='Size", "call from train import train_bichrom if __name__ == '__main__': #", "print(exc) # create the output directory: outdir = args.outdir call(['mkdir',", "import train_bichrom if __name__ == '__main__': # parsing parser =", "bins') args = parser.parse_args() # load the yaml file with", "= parser.parse_args() # load the yaml file with input data", "test and val data') parser.add_argument('-len', help='Size of genomic windows', required=True,", "argparse import yaml from subprocess import call from train import", "required=True, help='YAML file with paths to train, test and val", "and val data') parser.add_argument('-len', help='Size of genomic windows', required=True, type=int)", "with input data paths: with open(args.training_schema_yaml, 'r') as f: try:" ]
[ "'version.py') if not (TOP_DIR / '.git').exists() and os.path.exists(VERSION_FILE): exec(open(VERSION_FILE).read()) setup_kw['version']", "\"README.md\", \"r\") as fh: setup_kw['long_description'] = fh.read() setup_kw['long_description_content_type'] = \"text/markdown\"", "\"README.md\" if README_FILE.exists(): with open(TOP_DIR / \"README.md\", \"r\") as fh:", "import sys from pathlib import Path from distutils import log", "from distutils import log from setuptools import setup from setuptools.command.sdist", "currently using to # run setup.py \"-DPython3_EXECUTABLE:FILEPATH=\"+sys.executable, ], ) try:", "file. README_FILE = TOP_DIR / \"README.md\" if README_FILE.exists(): with open(TOP_DIR", "package_dir={\"\": PYTHON_DIR}, packages=['ryml'], ext_modules=[ext], include_package_data=True, # Requirements python_requires=\">=3.7\", setup_requires=['setuptools_scm'], #", "setup_kw = {} # Read in the package version when", "setup_kw['long_description_content_type'] = \"text/markdown\" # define a CMake package cmake_args =", "setup_kw['use_scm_version']= { \"version_scheme\": \"post-release\", \"local_scheme\": \"no-local-version\", \"write_to\": VERSION_FILE, } #", "README_FILE.exists(): with open(TOP_DIR / \"README.md\", \"r\") as fh: setup_kw['long_description'] =", "coding: utf-8 -*- # SPDX-License-Identifier: MIT import os import shutil", "the CMakeExtension doesn't support `cmake_component` then we have to #", "for f in cmake_install_prefix.rglob(\"*\"): log.info(' - %s', f) raise setup(", "<reponame>Fronius-SED/rapidyaml #!/usr/bin/env python3 # -*- coding: utf-8 -*- # SPDX-License-Identifier:", "#author='<NAME>', description='Rapid YAML - a library to parse and emit", "except TypeError: del cmake_args['cmake_component'] ext = CMakeExtension(**cmake_args) # If the", "\"-DPython3_EXECUTABLE:FILEPATH=\"+sys.executable, ], ) try: ext = CMakeExtension(**cmake_args) except TypeError: del", "cmake_args = dict( name='ryml.ryml', install_prefix='', source_dir='', cmake_component='python', cmake_configure_options=[ \"-DRYML_BUILD_API:BOOL=ON\", #", "Python library is actually found. PYTHON_DIR = \"api/python\" setup_kw =", "%s\", lib_path) shutil.rmtree(lib_path) inc_path = cmake_install_prefix / \"include\" assert inc_path.exists(),", "log.info(' - %s', f) raise setup( # Package human readable", "under: %s\", lib_path) shutil.rmtree(lib_path) inc_path = cmake_install_prefix / \"include\" assert", "# Force cmake to use the Python interpreter we are", "CMakeExtension TOP_DIR = (Path(__file__).parent).resolve() # Where the Python library is", "cm_path = cmake_install_prefix / \"cmake\" if cm_path.exists(): log.info(\"Removing everything under:", "package version when not in a git repository. VERSION_FILE =", "setuptools.command.sdist import sdist as SdistCommand from cmake_build_extension import BuildExtension, CMakeExtension", "are currently using to # run setup.py \"-DPython3_EXECUTABLE:FILEPATH=\"+sys.executable, ], )", "}, package_dir={\"\": PYTHON_DIR}, packages=['ryml'], ext_modules=[ext], include_package_data=True, # Requirements python_requires=\">=3.7\", setup_requires=['setuptools_scm'],", "= (Path(__file__).parent).resolve() # Where the Python library is actually found.", "cmake_configure_options=[ \"-DRYML_BUILD_API:BOOL=ON\", # Force cmake to use the Python interpreter", "from setuptools.command.sdist import sdist as SdistCommand from cmake_build_extension import BuildExtension,", "/ ext.install_prefix assert cmake_install_prefix.exists(), cmake_install_prefix try: lib_path = cmake_install_prefix /", "is actually found. PYTHON_DIR = \"api/python\" setup_kw = {} #", "found. PYTHON_DIR = \"api/python\" setup_kw = {} # Read in", "as SdistCommand from cmake_build_extension import BuildExtension, CMakeExtension TOP_DIR = (Path(__file__).parent).resolve()", "%s\", inc_path) shutil.rmtree(inc_path) # Windows only cm_path = cmake_install_prefix /", "ext) ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute() cmake_install_prefix = ext_dir / ext.install_prefix assert", "= cmake_install_prefix / \"cmake\" if cm_path.exists(): log.info(\"Removing everything under: %s\",", "ext_modules=[ext], include_package_data=True, # Requirements python_requires=\">=3.7\", setup_requires=['setuptools_scm'], # Extra arguments **setup_kw,", "log.info(\"Removing everything under: %s\", inc_path) shutil.rmtree(inc_path) # Windows only cm_path", "sdist as SdistCommand from cmake_build_extension import BuildExtension, CMakeExtension TOP_DIR =", "\"lib\" assert lib_path.exists(), lib_path log.info(\"Removing everything under: %s\", lib_path) shutil.rmtree(lib_path)", "assert inc_path.exists(), inc_path log.info(\"Removing everything under: %s\", inc_path) shutil.rmtree(inc_path) #", "-*- # SPDX-License-Identifier: MIT import os import shutil import sys", "not (TOP_DIR / '.git').exists() and os.path.exists(VERSION_FILE): exec(open(VERSION_FILE).read()) setup_kw['version'] = version", "%s\", cm_path) shutil.rmtree(cm_path) except: log.info('Found following installed files:') for f", ") try: ext = CMakeExtension(**cmake_args) except TypeError: del cmake_args['cmake_component'] ext", "# run setup.py \"-DPython3_EXECUTABLE:FILEPATH=\"+sys.executable, ], ) try: ext = CMakeExtension(**cmake_args)", "_BuildExtension.build_extension(self, ext) ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute() cmake_install_prefix = ext_dir / ext.install_prefix", "library to parse and emit YAML, and do it fast.',", "doesn't support `cmake_component` then we have to # do some", "assert cmake_install_prefix.exists(), cmake_install_prefix try: lib_path = cmake_install_prefix / \"lib\" assert", "\"cmake\" if cm_path.exists(): log.info(\"Removing everything under: %s\", cm_path) shutil.rmtree(cm_path) except:", "fh: setup_kw['long_description'] = fh.read() setup_kw['long_description_content_type'] = \"text/markdown\" # define a", "library is actually found. PYTHON_DIR = \"api/python\" setup_kw = {}", "'.git').exists() and os.path.exists(VERSION_FILE): exec(open(VERSION_FILE).read()) setup_kw['version'] = version else: setup_kw['use_scm_version']= {", "cmake_install_prefix try: lib_path = cmake_install_prefix / \"lib\" assert lib_path.exists(), lib_path", "os.path.exists(VERSION_FILE): exec(open(VERSION_FILE).read()) setup_kw['version'] = version else: setup_kw['use_scm_version']= { \"version_scheme\": \"post-release\",", "/ \"cmake\" if cm_path.exists(): log.info(\"Removing everything under: %s\", cm_path) shutil.rmtree(cm_path)", "\"include\" assert inc_path.exists(), inc_path log.info(\"Removing everything under: %s\", inc_path) shutil.rmtree(inc_path)", "support `cmake_component` then we have to # do some manual", "fast.', url='https://github.com/biojppm/rapidyaml', license='MIT', license_files=['LICENSE.txt'], # Package contents control cmdclass={ \"build_ext\":", "/ \"lib\" assert lib_path.exists(), lib_path log.info(\"Removing everything under: %s\", lib_path)", "to # do some manual cleanup. _BuildExtension=BuildExtension class BuildExtension(_BuildExtension): def", "else: setup_kw['use_scm_version']= { \"version_scheme\": \"post-release\", \"local_scheme\": \"no-local-version\", \"write_to\": VERSION_FILE, }", "description from the README.md file. README_FILE = TOP_DIR / \"README.md\"", "CMakeExtension(**cmake_args) except TypeError: del cmake_args['cmake_component'] ext = CMakeExtension(**cmake_args) # If", "it fast.', url='https://github.com/biojppm/rapidyaml', license='MIT', license_files=['LICENSE.txt'], # Package contents control cmdclass={", "everything under: %s\", cm_path) shutil.rmtree(cm_path) except: log.info('Found following installed files:')", "import Path from distutils import log from setuptools import setup", "TypeError: del cmake_args['cmake_component'] ext = CMakeExtension(**cmake_args) # If the CMakeExtension", "import sdist as SdistCommand from cmake_build_extension import BuildExtension, CMakeExtension TOP_DIR", "try: ext = CMakeExtension(**cmake_args) except TypeError: del cmake_args['cmake_component'] ext =", "module description from the README.md file. README_FILE = TOP_DIR /", "f in cmake_install_prefix.rglob(\"*\"): log.info(' - %s', f) raise setup( #", "= version else: setup_kw['use_scm_version']= { \"version_scheme\": \"post-release\", \"local_scheme\": \"no-local-version\", \"write_to\":", "lib_path.exists(), lib_path log.info(\"Removing everything under: %s\", lib_path) shutil.rmtree(lib_path) inc_path =", "class BuildExtension(_BuildExtension): def build_extension(self, ext): _BuildExtension.build_extension(self, ext) ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute()", "cmake_install_prefix / \"cmake\" if cm_path.exists(): log.info(\"Removing everything under: %s\", cm_path)", "cmake_component='python', cmake_configure_options=[ \"-DRYML_BUILD_API:BOOL=ON\", # Force cmake to use the Python", "= cmake_install_prefix / \"include\" assert inc_path.exists(), inc_path log.info(\"Removing everything under:", "to use the Python interpreter we are currently using to", "= {} # Read in the package version when not", "README_FILE = TOP_DIR / \"README.md\" if README_FILE.exists(): with open(TOP_DIR /", "= CMakeExtension(**cmake_args) # If the CMakeExtension doesn't support `cmake_component` then", "lib_path log.info(\"Removing everything under: %s\", lib_path) shutil.rmtree(lib_path) inc_path = cmake_install_prefix", "shutil.rmtree(cm_path) except: log.info('Found following installed files:') for f in cmake_install_prefix.rglob(\"*\"):", "information name='rapidyaml', #author='<NAME>', description='Rapid YAML - a library to parse", "open(TOP_DIR / \"README.md\", \"r\") as fh: setup_kw['long_description'] = fh.read() setup_kw['long_description_content_type']", "Package human readable information name='rapidyaml', #author='<NAME>', description='Rapid YAML - a", "/ \"README.md\", \"r\") as fh: setup_kw['long_description'] = fh.read() setup_kw['long_description_content_type'] =", "setup from setuptools.command.sdist import sdist as SdistCommand from cmake_build_extension import", "license='MIT', license_files=['LICENSE.txt'], # Package contents control cmdclass={ \"build_ext\": BuildExtension, },", "inc_path = cmake_install_prefix / \"include\" assert inc_path.exists(), inc_path log.info(\"Removing everything", "\"no-local-version\", \"write_to\": VERSION_FILE, } # Read in the module description", "interpreter we are currently using to # run setup.py \"-DPython3_EXECUTABLE:FILEPATH=\"+sys.executable,", "run setup.py \"-DPython3_EXECUTABLE:FILEPATH=\"+sys.executable, ], ) try: ext = CMakeExtension(**cmake_args) except", "/ \"include\" assert inc_path.exists(), inc_path log.info(\"Removing everything under: %s\", inc_path)", "], ) try: ext = CMakeExtension(**cmake_args) except TypeError: del cmake_args['cmake_component']", "Windows only cm_path = cmake_install_prefix / \"cmake\" if cm_path.exists(): log.info(\"Removing", "setup_kw['long_description'] = fh.read() setup_kw['long_description_content_type'] = \"text/markdown\" # define a CMake", "-*- coding: utf-8 -*- # SPDX-License-Identifier: MIT import os import", "under: %s\", inc_path) shutil.rmtree(inc_path) # Windows only cm_path = cmake_install_prefix", "a library to parse and emit YAML, and do it", "Path from distutils import log from setuptools import setup from", "when not in a git repository. VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml',", "and do it fast.', url='https://github.com/biojppm/rapidyaml', license='MIT', license_files=['LICENSE.txt'], # Package contents", "import BuildExtension, CMakeExtension TOP_DIR = (Path(__file__).parent).resolve() # Where the Python", "# Package contents control cmdclass={ \"build_ext\": BuildExtension, }, package_dir={\"\": PYTHON_DIR},", "Package contents control cmdclass={ \"build_ext\": BuildExtension, }, package_dir={\"\": PYTHON_DIR}, packages=['ryml'],", "with open(TOP_DIR / \"README.md\", \"r\") as fh: setup_kw['long_description'] = fh.read()", "python3 # -*- coding: utf-8 -*- # SPDX-License-Identifier: MIT import", "actually found. PYTHON_DIR = \"api/python\" setup_kw = {} # Read", "{ \"version_scheme\": \"post-release\", \"local_scheme\": \"no-local-version\", \"write_to\": VERSION_FILE, } # Read", "only cm_path = cmake_install_prefix / \"cmake\" if cm_path.exists(): log.info(\"Removing everything", "# Read in the module description from the README.md file.", "'ryml', 'version.py') if not (TOP_DIR / '.git').exists() and os.path.exists(VERSION_FILE): exec(open(VERSION_FILE).read())", "fh.read() setup_kw['long_description_content_type'] = \"text/markdown\" # define a CMake package cmake_args", "log.info('Found following installed files:') for f in cmake_install_prefix.rglob(\"*\"): log.info(' -", "ext = CMakeExtension(**cmake_args) # If the CMakeExtension doesn't support `cmake_component`", "inc_path) shutil.rmtree(inc_path) # Windows only cm_path = cmake_install_prefix / \"cmake\"", "YAML - a library to parse and emit YAML, and", "url='https://github.com/biojppm/rapidyaml', license='MIT', license_files=['LICENSE.txt'], # Package contents control cmdclass={ \"build_ext\": BuildExtension,", "# Windows only cm_path = cmake_install_prefix / \"cmake\" if cm_path.exists():", "cm_path.exists(): log.info(\"Removing everything under: %s\", cm_path) shutil.rmtree(cm_path) except: log.info('Found following", "not in a git repository. VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml', 'version.py')", "cleanup. _BuildExtension=BuildExtension class BuildExtension(_BuildExtension): def build_extension(self, ext): _BuildExtension.build_extension(self, ext) ext_dir", "import shutil import sys from pathlib import Path from distutils", "# define a CMake package cmake_args = dict( name='ryml.ryml', install_prefix='',", "SPDX-License-Identifier: MIT import os import shutil import sys from pathlib", "ext): _BuildExtension.build_extension(self, ext) ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute() cmake_install_prefix = ext_dir /", "(TOP_DIR / '.git').exists() and os.path.exists(VERSION_FILE): exec(open(VERSION_FILE).read()) setup_kw['version'] = version else:", "lib_path) shutil.rmtree(lib_path) inc_path = cmake_install_prefix / \"include\" assert inc_path.exists(), inc_path", "YAML, and do it fast.', url='https://github.com/biojppm/rapidyaml', license='MIT', license_files=['LICENSE.txt'], # Package", "package cmake_args = dict( name='ryml.ryml', install_prefix='', source_dir='', cmake_component='python', cmake_configure_options=[ \"-DRYML_BUILD_API:BOOL=ON\",", "everything under: %s\", lib_path) shutil.rmtree(lib_path) inc_path = cmake_install_prefix / \"include\"", "del cmake_args['cmake_component'] ext = CMakeExtension(**cmake_args) # If the CMakeExtension doesn't", "shutil.rmtree(lib_path) inc_path = cmake_install_prefix / \"include\" assert inc_path.exists(), inc_path log.info(\"Removing", "cmake_install_prefix / \"lib\" assert lib_path.exists(), lib_path log.info(\"Removing everything under: %s\",", "if cm_path.exists(): log.info(\"Removing everything under: %s\", cm_path) shutil.rmtree(cm_path) except: log.info('Found", "TOP_DIR / \"README.md\" if README_FILE.exists(): with open(TOP_DIR / \"README.md\", \"r\")", "packages=['ryml'], ext_modules=[ext], include_package_data=True, # Requirements python_requires=\">=3.7\", setup_requires=['setuptools_scm'], # Extra arguments", "manual cleanup. _BuildExtension=BuildExtension class BuildExtension(_BuildExtension): def build_extension(self, ext): _BuildExtension.build_extension(self, ext)", "f) raise setup( # Package human readable information name='rapidyaml', #author='<NAME>',", "= dict( name='ryml.ryml', install_prefix='', source_dir='', cmake_component='python', cmake_configure_options=[ \"-DRYML_BUILD_API:BOOL=ON\", # Force", "\"api/python\" setup_kw = {} # Read in the package version", "try: lib_path = cmake_install_prefix / \"lib\" assert lib_path.exists(), lib_path log.info(\"Removing", "the module description from the README.md file. README_FILE = TOP_DIR", "in the package version when not in a git repository.", "VERSION_FILE, } # Read in the module description from the", "name='ryml.ryml', install_prefix='', source_dir='', cmake_component='python', cmake_configure_options=[ \"-DRYML_BUILD_API:BOOL=ON\", # Force cmake to", "under: %s\", cm_path) shutil.rmtree(cm_path) except: log.info('Found following installed files:') for", "license_files=['LICENSE.txt'], # Package contents control cmdclass={ \"build_ext\": BuildExtension, }, package_dir={\"\":", "if not (TOP_DIR / '.git').exists() and os.path.exists(VERSION_FILE): exec(open(VERSION_FILE).read()) setup_kw['version'] =", "(Path(__file__).parent).resolve() # Where the Python library is actually found. PYTHON_DIR", "cmdclass={ \"build_ext\": BuildExtension, }, package_dir={\"\": PYTHON_DIR}, packages=['ryml'], ext_modules=[ext], include_package_data=True, #", "utf-8 -*- # SPDX-License-Identifier: MIT import os import shutil import", "exec(open(VERSION_FILE).read()) setup_kw['version'] = version else: setup_kw['use_scm_version']= { \"version_scheme\": \"post-release\", \"local_scheme\":", "\"local_scheme\": \"no-local-version\", \"write_to\": VERSION_FILE, } # Read in the module", "Where the Python library is actually found. PYTHON_DIR = \"api/python\"", "= \"text/markdown\" # define a CMake package cmake_args = dict(", "# Read in the package version when not in a", "= os.path.join(PYTHON_DIR, 'ryml', 'version.py') if not (TOP_DIR / '.git').exists() and", "source_dir='', cmake_component='python', cmake_configure_options=[ \"-DRYML_BUILD_API:BOOL=ON\", # Force cmake to use the", "ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute() cmake_install_prefix = ext_dir / ext.install_prefix assert cmake_install_prefix.exists(),", "# If the CMakeExtension doesn't support `cmake_component` then we have", "log.info(\"Removing everything under: %s\", cm_path) shutil.rmtree(cm_path) except: log.info('Found following installed", "- a library to parse and emit YAML, and do", "PYTHON_DIR}, packages=['ryml'], ext_modules=[ext], include_package_data=True, # Requirements python_requires=\">=3.7\", setup_requires=['setuptools_scm'], # Extra", "SdistCommand from cmake_build_extension import BuildExtension, CMakeExtension TOP_DIR = (Path(__file__).parent).resolve() #", "\"text/markdown\" # define a CMake package cmake_args = dict( name='ryml.ryml',", "{} # Read in the package version when not in", "setup.py \"-DPython3_EXECUTABLE:FILEPATH=\"+sys.executable, ], ) try: ext = CMakeExtension(**cmake_args) except TypeError:", "log.info(\"Removing everything under: %s\", lib_path) shutil.rmtree(lib_path) inc_path = cmake_install_prefix /", "inc_path.exists(), inc_path log.info(\"Removing everything under: %s\", inc_path) shutil.rmtree(inc_path) # Windows", "cmake_install_prefix.rglob(\"*\"): log.info(' - %s', f) raise setup( # Package human", "control cmdclass={ \"build_ext\": BuildExtension, }, package_dir={\"\": PYTHON_DIR}, packages=['ryml'], ext_modules=[ext], include_package_data=True,", "cmake to use the Python interpreter we are currently using", "we have to # do some manual cleanup. _BuildExtension=BuildExtension class", "readable information name='rapidyaml', #author='<NAME>', description='Rapid YAML - a library to", "log from setuptools import setup from setuptools.command.sdist import sdist as", "cmake_args['cmake_component'] ext = CMakeExtension(**cmake_args) # If the CMakeExtension doesn't support", "sys from pathlib import Path from distutils import log from", "import os import shutil import sys from pathlib import Path", "\"write_to\": VERSION_FILE, } # Read in the module description from", "shutil.rmtree(inc_path) # Windows only cm_path = cmake_install_prefix / \"cmake\" if", "/ '.git').exists() and os.path.exists(VERSION_FILE): exec(open(VERSION_FILE).read()) setup_kw['version'] = version else: setup_kw['use_scm_version']=", "build_extension(self, ext): _BuildExtension.build_extension(self, ext) ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute() cmake_install_prefix = ext_dir", "%s', f) raise setup( # Package human readable information name='rapidyaml',", "cm_path) shutil.rmtree(cm_path) except: log.info('Found following installed files:') for f in", "use the Python interpreter we are currently using to #", "using to # run setup.py \"-DPython3_EXECUTABLE:FILEPATH=\"+sys.executable, ], ) try: ext", "distutils import log from setuptools import setup from setuptools.command.sdist import", "everything under: %s\", inc_path) shutil.rmtree(inc_path) # Windows only cm_path =", "PYTHON_DIR = \"api/python\" setup_kw = {} # Read in the", "inc_path log.info(\"Removing everything under: %s\", inc_path) shutil.rmtree(inc_path) # Windows only", "following installed files:') for f in cmake_install_prefix.rglob(\"*\"): log.info(' - %s',", "VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml', 'version.py') if not (TOP_DIR / '.git').exists()", "assert lib_path.exists(), lib_path log.info(\"Removing everything under: %s\", lib_path) shutil.rmtree(lib_path) inc_path", "= TOP_DIR / \"README.md\" if README_FILE.exists(): with open(TOP_DIR / \"README.md\",", "in cmake_install_prefix.rglob(\"*\"): log.info(' - %s', f) raise setup( # Package", "name='rapidyaml', #author='<NAME>', description='Rapid YAML - a library to parse and", "ext = CMakeExtension(**cmake_args) except TypeError: del cmake_args['cmake_component'] ext = CMakeExtension(**cmake_args)", "\"post-release\", \"local_scheme\": \"no-local-version\", \"write_to\": VERSION_FILE, } # Read in the", "and emit YAML, and do it fast.', url='https://github.com/biojppm/rapidyaml', license='MIT', license_files=['LICENSE.txt'],", "/ \"README.md\" if README_FILE.exists(): with open(TOP_DIR / \"README.md\", \"r\") as", "os import shutil import sys from pathlib import Path from", "= \"api/python\" setup_kw = {} # Read in the package", "from setuptools import setup from setuptools.command.sdist import sdist as SdistCommand", "README.md file. README_FILE = TOP_DIR / \"README.md\" if README_FILE.exists(): with", "= Path(self.get_ext_fullpath(ext.name)).parent.absolute() cmake_install_prefix = ext_dir / ext.install_prefix assert cmake_install_prefix.exists(), cmake_install_prefix", "If the CMakeExtension doesn't support `cmake_component` then we have to", "ext.install_prefix assert cmake_install_prefix.exists(), cmake_install_prefix try: lib_path = cmake_install_prefix / \"lib\"", "the Python interpreter we are currently using to # run", "git repository. VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml', 'version.py') if not (TOP_DIR", "import log from setuptools import setup from setuptools.command.sdist import sdist", "\"-DRYML_BUILD_API:BOOL=ON\", # Force cmake to use the Python interpreter we", "pathlib import Path from distutils import log from setuptools import", "a CMake package cmake_args = dict( name='ryml.ryml', install_prefix='', source_dir='', cmake_component='python',", "Python interpreter we are currently using to # run setup.py", "setuptools import setup from setuptools.command.sdist import sdist as SdistCommand from", "BuildExtension, CMakeExtension TOP_DIR = (Path(__file__).parent).resolve() # Where the Python library", "} # Read in the module description from the README.md", "_BuildExtension=BuildExtension class BuildExtension(_BuildExtension): def build_extension(self, ext): _BuildExtension.build_extension(self, ext) ext_dir =", "cmake_install_prefix = ext_dir / ext.install_prefix assert cmake_install_prefix.exists(), cmake_install_prefix try: lib_path", "\"version_scheme\": \"post-release\", \"local_scheme\": \"no-local-version\", \"write_to\": VERSION_FILE, } # Read in", "have to # do some manual cleanup. _BuildExtension=BuildExtension class BuildExtension(_BuildExtension):", "TOP_DIR = (Path(__file__).parent).resolve() # Where the Python library is actually", "do it fast.', url='https://github.com/biojppm/rapidyaml', license='MIT', license_files=['LICENSE.txt'], # Package contents control", "Force cmake to use the Python interpreter we are currently", "version when not in a git repository. VERSION_FILE = os.path.join(PYTHON_DIR,", "then we have to # do some manual cleanup. _BuildExtension=BuildExtension", "in the module description from the README.md file. README_FILE =", "import setup from setuptools.command.sdist import sdist as SdistCommand from cmake_build_extension", "do some manual cleanup. _BuildExtension=BuildExtension class BuildExtension(_BuildExtension): def build_extension(self, ext):", "to # run setup.py \"-DPython3_EXECUTABLE:FILEPATH=\"+sys.executable, ], ) try: ext =", "ext_dir / ext.install_prefix assert cmake_install_prefix.exists(), cmake_install_prefix try: lib_path = cmake_install_prefix", "human readable information name='rapidyaml', #author='<NAME>', description='Rapid YAML - a library", "as fh: setup_kw['long_description'] = fh.read() setup_kw['long_description_content_type'] = \"text/markdown\" # define", "# SPDX-License-Identifier: MIT import os import shutil import sys from", "dict( name='ryml.ryml', install_prefix='', source_dir='', cmake_component='python', cmake_configure_options=[ \"-DRYML_BUILD_API:BOOL=ON\", # Force cmake", "the README.md file. README_FILE = TOP_DIR / \"README.md\" if README_FILE.exists():", "installed files:') for f in cmake_install_prefix.rglob(\"*\"): log.info(' - %s', f)", "parse and emit YAML, and do it fast.', url='https://github.com/biojppm/rapidyaml', license='MIT',", "description='Rapid YAML - a library to parse and emit YAML,", "some manual cleanup. _BuildExtension=BuildExtension class BuildExtension(_BuildExtension): def build_extension(self, ext): _BuildExtension.build_extension(self,", "Path(self.get_ext_fullpath(ext.name)).parent.absolute() cmake_install_prefix = ext_dir / ext.install_prefix assert cmake_install_prefix.exists(), cmake_install_prefix try:", "# Where the Python library is actually found. PYTHON_DIR =", "version else: setup_kw['use_scm_version']= { \"version_scheme\": \"post-release\", \"local_scheme\": \"no-local-version\", \"write_to\": VERSION_FILE,", "= fh.read() setup_kw['long_description_content_type'] = \"text/markdown\" # define a CMake package", "contents control cmdclass={ \"build_ext\": BuildExtension, }, package_dir={\"\": PYTHON_DIR}, packages=['ryml'], ext_modules=[ext],", "include_package_data=True, # Requirements python_requires=\">=3.7\", setup_requires=['setuptools_scm'], # Extra arguments **setup_kw, )", "- %s', f) raise setup( # Package human readable information", "BuildExtension, }, package_dir={\"\": PYTHON_DIR}, packages=['ryml'], ext_modules=[ext], include_package_data=True, # Requirements python_requires=\">=3.7\",", "# do some manual cleanup. _BuildExtension=BuildExtension class BuildExtension(_BuildExtension): def build_extension(self,", "\"r\") as fh: setup_kw['long_description'] = fh.read() setup_kw['long_description_content_type'] = \"text/markdown\" #", "# -*- coding: utf-8 -*- # SPDX-License-Identifier: MIT import os", "the Python library is actually found. PYTHON_DIR = \"api/python\" setup_kw", "cmake_install_prefix.exists(), cmake_install_prefix try: lib_path = cmake_install_prefix / \"lib\" assert lib_path.exists(),", "CMakeExtension(**cmake_args) # If the CMakeExtension doesn't support `cmake_component` then we", "from the README.md file. README_FILE = TOP_DIR / \"README.md\" if", "install_prefix='', source_dir='', cmake_component='python', cmake_configure_options=[ \"-DRYML_BUILD_API:BOOL=ON\", # Force cmake to use", "if README_FILE.exists(): with open(TOP_DIR / \"README.md\", \"r\") as fh: setup_kw['long_description']", "= CMakeExtension(**cmake_args) except TypeError: del cmake_args['cmake_component'] ext = CMakeExtension(**cmake_args) #", "`cmake_component` then we have to # do some manual cleanup.", "= cmake_install_prefix / \"lib\" assert lib_path.exists(), lib_path log.info(\"Removing everything under:", "BuildExtension(_BuildExtension): def build_extension(self, ext): _BuildExtension.build_extension(self, ext) ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute() cmake_install_prefix", "from pathlib import Path from distutils import log from setuptools", "shutil import sys from pathlib import Path from distutils import", "Read in the package version when not in a git", "define a CMake package cmake_args = dict( name='ryml.ryml', install_prefix='', source_dir='',", "MIT import os import shutil import sys from pathlib import", "cmake_build_extension import BuildExtension, CMakeExtension TOP_DIR = (Path(__file__).parent).resolve() # Where the", "Read in the module description from the README.md file. README_FILE", "we are currently using to # run setup.py \"-DPython3_EXECUTABLE:FILEPATH=\"+sys.executable, ],", "CMakeExtension doesn't support `cmake_component` then we have to # do", "the package version when not in a git repository. VERSION_FILE", "raise setup( # Package human readable information name='rapidyaml', #author='<NAME>', description='Rapid", "to parse and emit YAML, and do it fast.', url='https://github.com/biojppm/rapidyaml',", "cmake_install_prefix / \"include\" assert inc_path.exists(), inc_path log.info(\"Removing everything under: %s\",", "emit YAML, and do it fast.', url='https://github.com/biojppm/rapidyaml', license='MIT', license_files=['LICENSE.txt'], #", "\"build_ext\": BuildExtension, }, package_dir={\"\": PYTHON_DIR}, packages=['ryml'], ext_modules=[ext], include_package_data=True, # Requirements", "setup( # Package human readable information name='rapidyaml', #author='<NAME>', description='Rapid YAML", "files:') for f in cmake_install_prefix.rglob(\"*\"): log.info(' - %s', f) raise", "except: log.info('Found following installed files:') for f in cmake_install_prefix.rglob(\"*\"): log.info('", "lib_path = cmake_install_prefix / \"lib\" assert lib_path.exists(), lib_path log.info(\"Removing everything", "in a git repository. VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml', 'version.py') if", "from cmake_build_extension import BuildExtension, CMakeExtension TOP_DIR = (Path(__file__).parent).resolve() # Where", "a git repository. VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml', 'version.py') if not", "setup_kw['version'] = version else: setup_kw['use_scm_version']= { \"version_scheme\": \"post-release\", \"local_scheme\": \"no-local-version\",", "= ext_dir / ext.install_prefix assert cmake_install_prefix.exists(), cmake_install_prefix try: lib_path =", "# Package human readable information name='rapidyaml', #author='<NAME>', description='Rapid YAML -", "and os.path.exists(VERSION_FILE): exec(open(VERSION_FILE).read()) setup_kw['version'] = version else: setup_kw['use_scm_version']= { \"version_scheme\":", "def build_extension(self, ext): _BuildExtension.build_extension(self, ext) ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute() cmake_install_prefix =", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # SPDX-License-Identifier: MIT", "CMake package cmake_args = dict( name='ryml.ryml', install_prefix='', source_dir='', cmake_component='python', cmake_configure_options=[", "os.path.join(PYTHON_DIR, 'ryml', 'version.py') if not (TOP_DIR / '.git').exists() and os.path.exists(VERSION_FILE):", "repository. VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml', 'version.py') if not (TOP_DIR /" ]
[ "from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict from litex.soc.interconnect import axi from", "use_ps7_clk=False): self.rst = Signal() self.clock_domains.cd_sys = ClockDomain() # # #", "litex.soc.integration.soc import SoCRegion from litex.soc.integration.builder import * from litex.soc.cores.led import", "pads = platform.request_all(\"user_led\"), sys_clk_freq = sys_clk_freq) # Build -------------------------------------------------------------------------------------------- def", "pll.register_clkin(clk125, 125e6) pll.create_clkout(self.cd_sys, sys_clk_freq) # Ignore sys_clk to pll.clkin path", "self.cpu.add_axi_gp_master(), wishbone = wb_gp0, base_address = self.mem_map['csr']) self.add_wb_master(wb_gp0) use_ps7_clk =", "litex_boards.platforms import digilent_arty_z7 from litex.build import tools from litex.build.xilinx import", "help=\"FPGA toolchain (vivado, symbiflow or yosys+nextpnr).\") parser.add_argument(\"--build\", action=\"store_true\", help=\"Build bitstream.\")", "prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + \".bit\")) if __name__ ==", "wishbone.Interface() self.submodules += axi.AXI2Wishbone( axi = self.cpu.add_axi_gp_master(), wishbone = wb_gp0,", "import * from litex.soc.integration.soc_core import * from litex.soc.integration.soc import SoCRegion", "part of LiteX-Boards. # # Copyright (c) 2021 <NAME> <<EMAIL>>", "default } # SoCCore ---------------------------------------------------------------------------------- SoCCore.__init__(self, platform, sys_clk_freq, ident =", "125e6) pll.create_clkout(self.cd_sys, sys_clk_freq) # Ignore sys_clk to pll.clkin path created", ") builder = Builder(soc, **builder_argdict(args)) builder_kwargs = vivado_build_argdict(args) if args.toolchain", "if kwargs.get(\"cpu_type\", None) == \"zynq7000\": assert toolchain == \"vivado\", '", "= pll = S7PLL(speedgrade=-1) self.comb += pll.reset.eq(self.rst) pll.register_clkin(clk125, 125e6) pll.create_clkout(self.cd_sys,", "sys_clk to pll.clkin path created by SoC's rst. platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin)", "import axi from litex.soc.interconnect import wishbone from litex.soc.cores.clock import *", "bitstream.\") parser.add_argument(\"--variant\", default=\"z7-20\", help=\"Board variant (z7-20 or z7-10).\") parser.add_argument(\"--sys-clk-freq\", default=125e6,", "# # This file is part of LiteX-Boards. # #", "+= ClockSignal(\"sys\").eq(ClockSignal(\"ps7\")) self.comb += ResetSignal(\"sys\").eq(ResetSignal(\"ps7\") | self.rst) else: # Clk.", "Build -------------------------------------------------------------------------------------------- def main(): parser = argparse.ArgumentParser(description=\"LiteX SoC on Arty", "yosys+nextpnr).\") parser.add_argument(\"--build\", action=\"store_true\", help=\"Build bitstream.\") parser.add_argument(\"--load\", action=\"store_true\", help=\"Load bitstream.\") parser.add_argument(\"--variant\",", "tools from litex.build.xilinx import common as xil_common from litex.build.xilinx.vivado import", "subprocess from migen import * from litex_boards.platforms import digilent_arty_z7 from", "# # if use_ps7_clk: self.comb += ClockSignal(\"sys\").eq(ClockSignal(\"ps7\")) self.comb += ResetSignal(\"sys\").eq(ResetSignal(\"ps7\")", "wb_gp0, base_address = self.mem_map['csr']) self.add_wb_master(wb_gp0) use_ps7_clk = True else: use_ps7_clk", "sys_clk_freq=int(125e6), with_led_chaser=True, **kwargs): platform = digilent_arty_z7.Platform(variant=variant, toolchain=toolchain) if kwargs.get(\"cpu_type\", None)", "= vivado_build_argdict(args) if args.toolchain == \"vivado\" else {} builder.build(**builder_kwargs, run=args.build)", "main(): parser = argparse.ArgumentParser(description=\"LiteX SoC on Arty Z7\") parser.add_argument(\"--toolchain\", default=\"vivado\",", "sys_clk_freq=int(float(args.sys_clk_freq)), **soc_core_argdict(args) ) builder = Builder(soc, **builder_argdict(args)) builder_kwargs = vivado_build_argdict(args)", "digilent_arty_z7 from litex.build import tools from litex.build.xilinx import common as", "http://kmf2.trabucayre.com/\" + preset_name) self.cpu.set_ps7(preset=preset_name) # Connect AXI GP0 to the", "} # SoCCore ---------------------------------------------------------------------------------- SoCCore.__init__(self, platform, sys_clk_freq, ident = \"LiteX", "= platform.request(\"clk125\") # PLL. self.submodules.pll = pll = S7PLL(speedgrade=-1) self.comb", "* from litex.soc.cores.led import LedChaser # CRG ---------------------------------------------------------------------------------------------- class _CRG(Module):", "self.mem_map = { 'csr': 0x4000_0000, # Zynq GP0 default }", "default=\"vivado\", help=\"FPGA toolchain (vivado, symbiflow or yosys+nextpnr).\") parser.add_argument(\"--build\", action=\"store_true\", help=\"Build", "ClockSignal(\"sys\").eq(ClockSignal(\"ps7\")) self.comb += ResetSignal(\"sys\").eq(ResetSignal(\"ps7\") | self.rst) else: # Clk. clk125", "Arty Z7\", **kwargs) # Zynq7000 Integration --------------------------------------------------------------------- if kwargs.get(\"cpu_type\", None)", "args.toolchain == \"vivado\" else {} builder.build(**builder_kwargs, run=args.build) if args.load: prog", "if use_ps7_clk: self.comb += ClockSignal(\"sys\").eq(ClockSignal(\"ps7\")) self.comb += ResetSignal(\"sys\").eq(ResetSignal(\"ps7\") | self.rst)", "frequency.\") builder_args(parser) soc_core_args(parser) vivado_build_args(parser) parser.set_defaults(cpu_type=\"zynq7000\") args = parser.parse_args() soc =", "0 kwargs['with_uart'] = False self.mem_map = { 'csr': 0x4000_0000, #", "from litex.build.xilinx import common as xil_common from litex.build.xilinx.vivado import vivado_build_args,", "import * from litex.soc.cores.led import LedChaser # CRG ---------------------------------------------------------------------------------------------- class", "else: # Clk. clk125 = platform.request(\"clk125\") # PLL. self.submodules.pll =", "== \"vivado\", ' not tested / specific vivado cmds' preset_name", "import LedChaser # CRG ---------------------------------------------------------------------------------------------- class _CRG(Module): def __init__(self, platform,", "SoCCore.__init__(self, platform, sys_clk_freq, ident = \"LiteX SoC on Arty Z7\",", "args.toolchain, sys_clk_freq=int(float(args.sys_clk_freq)), **soc_core_argdict(args) ) builder = Builder(soc, **builder_argdict(args)) builder_kwargs =", "**kwargs) # Zynq7000 Integration --------------------------------------------------------------------- if kwargs.get(\"cpu_type\", None) == \"zynq7000\":", "None) == \"zynq7000\": kwargs['integrated_sram_size'] = 0 kwargs['with_uart'] = False self.mem_map", "def main(): parser = argparse.ArgumentParser(description=\"LiteX SoC on Arty Z7\") parser.add_argument(\"--toolchain\",", "= sys_clk_freq) # Build -------------------------------------------------------------------------------------------- def main(): parser = argparse.ArgumentParser(description=\"LiteX", "import subprocess from migen import * from litex_boards.platforms import digilent_arty_z7", "S7PLL(speedgrade=-1) self.comb += pll.reset.eq(self.rst) pll.register_clkin(clk125, 125e6) pll.create_clkout(self.cd_sys, sys_clk_freq) # Ignore", "litex.soc.integration.soc_core import * from litex.soc.integration.soc import SoCRegion from litex.soc.integration.builder import", "litex.build import tools from litex.build.xilinx import common as xil_common from", "BSD-2-Clause import argparse import subprocess from migen import * from", "= _CRG(platform, sys_clk_freq, use_ps7_clk) # Leds ------------------------------------------------------------------------------------- if with_led_chaser: self.submodules.leds", "= \"LiteX SoC on Arty Z7\", **kwargs) # Zynq7000 Integration", "toolchain = args.toolchain, sys_clk_freq=int(float(args.sys_clk_freq)), **soc_core_argdict(args) ) builder = Builder(soc, **builder_argdict(args))", "LedChaser( pads = platform.request_all(\"user_led\"), sys_clk_freq = sys_clk_freq) # Build --------------------------------------------------------------------------------------------", "builder.build(**builder_kwargs, run=args.build) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name +", "pll.reset.eq(self.rst) pll.register_clkin(clk125, 125e6) pll.create_clkout(self.cd_sys, sys_clk_freq) # Ignore sys_clk to pll.clkin", "assert toolchain == \"vivado\", ' not tested / specific vivado", "self.comb += ClockSignal(\"sys\").eq(ClockSignal(\"ps7\")) self.comb += ResetSignal(\"sys\").eq(ResetSignal(\"ps7\") | self.rst) else: #", "SoC wb_gp0 = wishbone.Interface() self.submodules += axi.AXI2Wishbone( axi = self.cpu.add_axi_gp_master(),", "platform.request(\"clk125\") # PLL. self.submodules.pll = pll = S7PLL(speedgrade=-1) self.comb +=", "= wishbone.Interface() self.submodules += axi.AXI2Wishbone( axi = self.cpu.add_axi_gp_master(), wishbone =", "---------------------------------------------------------------------------------------------- class _CRG(Module): def __init__(self, platform, sys_clk_freq, use_ps7_clk=False): self.rst =", "\"vivado\" else {} builder.build(**builder_kwargs, run=args.build) if args.load: prog = soc.platform.create_programmer()", "digilent_arty_z7.Platform(variant=variant, toolchain=toolchain) if kwargs.get(\"cpu_type\", None) == \"zynq7000\": kwargs['integrated_sram_size'] = 0", "from litex.soc.cores.clock import * from litex.soc.integration.soc_core import * from litex.soc.integration.soc", "---------------------------------------------------------------------------------- SoCCore.__init__(self, platform, sys_clk_freq, ident = \"LiteX SoC on Arty", "action=\"store_true\", help=\"Load bitstream.\") parser.add_argument(\"--variant\", default=\"z7-20\", help=\"Board variant (z7-20 or z7-10).\")", "litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict from litex.soc.interconnect import axi from litex.soc.interconnect", "This file is part of LiteX-Boards. # # Copyright (c)", "from litex.build import tools from litex.build.xilinx import common as xil_common", "<filename>litex_boards/targets/digilent_arty_z7.py #!/usr/bin/env python3 # # This file is part of", "SoCRegion from litex.soc.integration.builder import * from litex.soc.cores.led import LedChaser #", "* from litex_boards.platforms import digilent_arty_z7 from litex.build import tools from", "# Clk. clk125 = platform.request(\"clk125\") # PLL. self.submodules.pll = pll", "0x4000_0000, # Zynq GP0 default } # SoCCore ---------------------------------------------------------------------------------- SoCCore.__init__(self,", "\"zynq7000\": assert toolchain == \"vivado\", ' not tested / specific", "ClockDomain() # # # if use_ps7_clk: self.comb += ClockSignal(\"sys\").eq(ClockSignal(\"ps7\")) self.comb", "= LedChaser( pads = platform.request_all(\"user_led\"), sys_clk_freq = sys_clk_freq) # Build", "if args.toolchain == \"vivado\" else {} builder.build(**builder_kwargs, run=args.build) if args.load:", "common as xil_common from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict from litex.soc.interconnect", "parser.set_defaults(cpu_type=\"zynq7000\") args = parser.parse_args() soc = BaseSoC( variant = args.variant,", "created by SoC's rst. platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # BaseSoC ------------------------------------------------------------------------------------------ class", "CRG ---------------------------------------------------------------------------------------------- class _CRG(Module): def __init__(self, platform, sys_clk_freq, use_ps7_clk=False): self.rst", "toolchain=toolchain) if kwargs.get(\"cpu_type\", None) == \"zynq7000\": kwargs['integrated_sram_size'] = 0 kwargs['with_uart']", "# Build -------------------------------------------------------------------------------------------- def main(): parser = argparse.ArgumentParser(description=\"LiteX SoC on", "def __init__(self, platform, sys_clk_freq, use_ps7_clk=False): self.rst = Signal() self.clock_domains.cd_sys =", "__init__(self, variant=\"z7-20\", toolchain=\"vivado\", sys_clk_freq=int(125e6), with_led_chaser=True, **kwargs): platform = digilent_arty_z7.Platform(variant=variant, toolchain=toolchain)", "= BaseSoC( variant = args.variant, toolchain = args.toolchain, sys_clk_freq=int(float(args.sys_clk_freq)), **soc_core_argdict(args)", "{ 'csr': 0x4000_0000, # Zynq GP0 default } # SoCCore", "from litex.soc.integration.soc_core import * from litex.soc.integration.soc import SoCRegion from litex.soc.integration.builder", "by SoC's rst. platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # BaseSoC ------------------------------------------------------------------------------------------ class BaseSoC(SoCCore):", "SoC's rst. platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # BaseSoC ------------------------------------------------------------------------------------------ class BaseSoC(SoCCore): def", "import common as xil_common from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict from", "import vivado_build_args, vivado_build_argdict from litex.soc.interconnect import axi from litex.soc.interconnect import", "preset_name) self.cpu.set_ps7(preset=preset_name) # Connect AXI GP0 to the SoC wb_gp0", "clock frequency.\") builder_args(parser) soc_core_args(parser) vivado_build_args(parser) parser.set_defaults(cpu_type=\"zynq7000\") args = parser.parse_args() soc", "if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + \".bit\")) if", "= platform.request_all(\"user_led\"), sys_clk_freq = sys_clk_freq) # Build -------------------------------------------------------------------------------------------- def main():", "== \"zynq7000\": assert toolchain == \"vivado\", ' not tested /", "AXI GP0 to the SoC wb_gp0 = wishbone.Interface() self.submodules +=", "parser.add_argument(\"--load\", action=\"store_true\", help=\"Load bitstream.\") parser.add_argument(\"--variant\", default=\"z7-20\", help=\"Board variant (z7-20 or", "kwargs.get(\"cpu_type\", None) == \"zynq7000\": assert toolchain == \"vivado\", ' not", "if with_led_chaser: self.submodules.leds = LedChaser( pads = platform.request_all(\"user_led\"), sys_clk_freq =", "with_led_chaser=True, **kwargs): platform = digilent_arty_z7.Platform(variant=variant, toolchain=toolchain) if kwargs.get(\"cpu_type\", None) ==", "to the SoC wb_gp0 = wishbone.Interface() self.submodules += axi.AXI2Wishbone( axi", "platform.request_all(\"user_led\"), sys_clk_freq = sys_clk_freq) # Build -------------------------------------------------------------------------------------------- def main(): parser", "default=125e6, help=\"System clock frequency.\") builder_args(parser) soc_core_args(parser) vivado_build_args(parser) parser.set_defaults(cpu_type=\"zynq7000\") args =", "on Arty Z7\", **kwargs) # Zynq7000 Integration --------------------------------------------------------------------- if kwargs.get(\"cpu_type\",", "\"LiteX SoC on Arty Z7\", **kwargs) # Zynq7000 Integration ---------------------------------------------------------------------", "if variant == \"z7-20\" else \"arty_z7_10.tcl\" os.system(\"wget http://kmf2.trabucayre.com/\" + preset_name)", "= soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + \".bit\")) if __name__ == \"__main__\":", "LiteX-Boards. # # Copyright (c) 2021 <NAME> <<EMAIL>> # SPDX-License-Identifier:", "# Ignore sys_clk to pll.clkin path created by SoC's rst.", "\"vivado\", ' not tested / specific vivado cmds' preset_name =", "PLL. self.submodules.pll = pll = S7PLL(speedgrade=-1) self.comb += pll.reset.eq(self.rst) pll.register_clkin(clk125,", "parser.add_argument(\"--sys-clk-freq\", default=125e6, help=\"System clock frequency.\") builder_args(parser) soc_core_args(parser) vivado_build_args(parser) parser.set_defaults(cpu_type=\"zynq7000\") args", "variant == \"z7-20\" else \"arty_z7_10.tcl\" os.system(\"wget http://kmf2.trabucayre.com/\" + preset_name) self.cpu.set_ps7(preset=preset_name)", "__init__(self, platform, sys_clk_freq, use_ps7_clk=False): self.rst = Signal() self.clock_domains.cd_sys = ClockDomain()", "class BaseSoC(SoCCore): def __init__(self, variant=\"z7-20\", toolchain=\"vivado\", sys_clk_freq=int(125e6), with_led_chaser=True, **kwargs): platform", "= self.mem_map['csr']) self.add_wb_master(wb_gp0) use_ps7_clk = True else: use_ps7_clk = False", "SoC on Arty Z7\", **kwargs) # Zynq7000 Integration --------------------------------------------------------------------- if", "rst. platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # BaseSoC ------------------------------------------------------------------------------------------ class BaseSoC(SoCCore): def __init__(self,", "toolchain=\"vivado\", sys_clk_freq=int(125e6), with_led_chaser=True, **kwargs): platform = digilent_arty_z7.Platform(variant=variant, toolchain=toolchain) if kwargs.get(\"cpu_type\",", "= ClockDomain() # # # if use_ps7_clk: self.comb += ClockSignal(\"sys\").eq(ClockSignal(\"ps7\"))", "= \"arty_z7_20.tcl\" if variant == \"z7-20\" else \"arty_z7_10.tcl\" os.system(\"wget http://kmf2.trabucayre.com/\"", "as xil_common from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict from litex.soc.interconnect import", "= S7PLL(speedgrade=-1) self.comb += pll.reset.eq(self.rst) pll.register_clkin(clk125, 125e6) pll.create_clkout(self.cd_sys, sys_clk_freq) #", "the SoC wb_gp0 = wishbone.Interface() self.submodules += axi.AXI2Wishbone( axi =", "' not tested / specific vivado cmds' preset_name = \"arty_z7_20.tcl\"", "(z7-20 or z7-10).\") parser.add_argument(\"--sys-clk-freq\", default=125e6, help=\"System clock frequency.\") builder_args(parser) soc_core_args(parser)", "pll.create_clkout(self.cd_sys, sys_clk_freq) # Ignore sys_clk to pll.clkin path created by", "axi = self.cpu.add_axi_gp_master(), wishbone = wb_gp0, base_address = self.mem_map['csr']) self.add_wb_master(wb_gp0)", "else: use_ps7_clk = False # CRG -------------------------------------------------------------------------------------- self.submodules.crg = _CRG(platform,", "base_address = self.mem_map['csr']) self.add_wb_master(wb_gp0) use_ps7_clk = True else: use_ps7_clk =", "from litex.soc.interconnect import wishbone from litex.soc.cores.clock import * from litex.soc.integration.soc_core", "variant=\"z7-20\", toolchain=\"vivado\", sys_clk_freq=int(125e6), with_led_chaser=True, **kwargs): platform = digilent_arty_z7.Platform(variant=variant, toolchain=toolchain) if", "import digilent_arty_z7 from litex.build import tools from litex.build.xilinx import common", "# if use_ps7_clk: self.comb += ClockSignal(\"sys\").eq(ClockSignal(\"ps7\")) self.comb += ResetSignal(\"sys\").eq(ResetSignal(\"ps7\") |", "z7-10).\") parser.add_argument(\"--sys-clk-freq\", default=125e6, help=\"System clock frequency.\") builder_args(parser) soc_core_args(parser) vivado_build_args(parser) parser.set_defaults(cpu_type=\"zynq7000\")", "= True else: use_ps7_clk = False # CRG -------------------------------------------------------------------------------------- self.submodules.crg", "builder = Builder(soc, **builder_argdict(args)) builder_kwargs = vivado_build_argdict(args) if args.toolchain ==", "(vivado, symbiflow or yosys+nextpnr).\") parser.add_argument(\"--build\", action=\"store_true\", help=\"Build bitstream.\") parser.add_argument(\"--load\", action=\"store_true\",", "<NAME> <<EMAIL>> # SPDX-License-Identifier: BSD-2-Clause import argparse import subprocess from", "or z7-10).\") parser.add_argument(\"--sys-clk-freq\", default=125e6, help=\"System clock frequency.\") builder_args(parser) soc_core_args(parser) vivado_build_args(parser)", "self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk) # Leds ------------------------------------------------------------------------------------- if with_led_chaser:", "vivado_build_argdict from litex.soc.interconnect import axi from litex.soc.interconnect import wishbone from", "Zynq GP0 default } # SoCCore ---------------------------------------------------------------------------------- SoCCore.__init__(self, platform, sys_clk_freq,", "# Leds ------------------------------------------------------------------------------------- if with_led_chaser: self.submodules.leds = LedChaser( pads =", "# BaseSoC ------------------------------------------------------------------------------------------ class BaseSoC(SoCCore): def __init__(self, variant=\"z7-20\", toolchain=\"vivado\", sys_clk_freq=int(125e6),", "Ignore sys_clk to pll.clkin path created by SoC's rst. platform.add_false_path_constraints(self.cd_sys.clk,", "wishbone = wb_gp0, base_address = self.mem_map['csr']) self.add_wb_master(wb_gp0) use_ps7_clk = True", "BaseSoC ------------------------------------------------------------------------------------------ class BaseSoC(SoCCore): def __init__(self, variant=\"z7-20\", toolchain=\"vivado\", sys_clk_freq=int(125e6), with_led_chaser=True,", "builder_args(parser) soc_core_args(parser) vivado_build_args(parser) parser.set_defaults(cpu_type=\"zynq7000\") args = parser.parse_args() soc = BaseSoC(", "# This file is part of LiteX-Boards. # # Copyright", "Signal() self.clock_domains.cd_sys = ClockDomain() # # # if use_ps7_clk: self.comb", "/ specific vivado cmds' preset_name = \"arty_z7_20.tcl\" if variant ==", "Integration --------------------------------------------------------------------- if kwargs.get(\"cpu_type\", None) == \"zynq7000\": assert toolchain ==", "argparse import subprocess from migen import * from litex_boards.platforms import", "toolchain == \"vivado\", ' not tested / specific vivado cmds'", "use_ps7_clk = True else: use_ps7_clk = False # CRG --------------------------------------------------------------------------------------", "Builder(soc, **builder_argdict(args)) builder_kwargs = vivado_build_argdict(args) if args.toolchain == \"vivado\" else", "* from litex.soc.integration.soc import SoCRegion from litex.soc.integration.builder import * from", "from litex_boards.platforms import digilent_arty_z7 from litex.build import tools from litex.build.xilinx", "vivado_build_argdict(args) if args.toolchain == \"vivado\" else {} builder.build(**builder_kwargs, run=args.build) if", "wishbone from litex.soc.cores.clock import * from litex.soc.integration.soc_core import * from", "symbiflow or yosys+nextpnr).\") parser.add_argument(\"--build\", action=\"store_true\", help=\"Build bitstream.\") parser.add_argument(\"--load\", action=\"store_true\", help=\"Load", "parser.parse_args() soc = BaseSoC( variant = args.variant, toolchain = args.toolchain,", "or yosys+nextpnr).\") parser.add_argument(\"--build\", action=\"store_true\", help=\"Build bitstream.\") parser.add_argument(\"--load\", action=\"store_true\", help=\"Load bitstream.\")", "Zynq7000 Integration --------------------------------------------------------------------- if kwargs.get(\"cpu_type\", None) == \"zynq7000\": assert toolchain", "import wishbone from litex.soc.cores.clock import * from litex.soc.integration.soc_core import *", "**builder_argdict(args)) builder_kwargs = vivado_build_argdict(args) if args.toolchain == \"vivado\" else {}", "wb_gp0 = wishbone.Interface() self.submodules += axi.AXI2Wishbone( axi = self.cpu.add_axi_gp_master(), wishbone", "from litex.soc.integration.soc import SoCRegion from litex.soc.integration.builder import * from litex.soc.cores.led", "xil_common from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict from litex.soc.interconnect import axi", "self.rst = Signal() self.clock_domains.cd_sys = ClockDomain() # # # if", "self.cpu.set_ps7(preset=preset_name) # Connect AXI GP0 to the SoC wb_gp0 =", "Z7\") parser.add_argument(\"--toolchain\", default=\"vivado\", help=\"FPGA toolchain (vivado, symbiflow or yosys+nextpnr).\") parser.add_argument(\"--build\",", "== \"z7-20\" else \"arty_z7_10.tcl\" os.system(\"wget http://kmf2.trabucayre.com/\" + preset_name) self.cpu.set_ps7(preset=preset_name) #", "parser.add_argument(\"--variant\", default=\"z7-20\", help=\"Board variant (z7-20 or z7-10).\") parser.add_argument(\"--sys-clk-freq\", default=125e6, help=\"System", "False self.mem_map = { 'csr': 0x4000_0000, # Zynq GP0 default", "\"zynq7000\": kwargs['integrated_sram_size'] = 0 kwargs['with_uart'] = False self.mem_map = {", "<<EMAIL>> # SPDX-License-Identifier: BSD-2-Clause import argparse import subprocess from migen", "litex.soc.cores.clock import * from litex.soc.integration.soc_core import * from litex.soc.integration.soc import", "'csr': 0x4000_0000, # Zynq GP0 default } # SoCCore ----------------------------------------------------------------------------------", "from litex.soc.integration.builder import * from litex.soc.cores.led import LedChaser # CRG", "platform = digilent_arty_z7.Platform(variant=variant, toolchain=toolchain) if kwargs.get(\"cpu_type\", None) == \"zynq7000\": kwargs['integrated_sram_size']", "= self.cpu.add_axi_gp_master(), wishbone = wb_gp0, base_address = self.mem_map['csr']) self.add_wb_master(wb_gp0) use_ps7_clk", "cmds' preset_name = \"arty_z7_20.tcl\" if variant == \"z7-20\" else \"arty_z7_10.tcl\"", "= { 'csr': 0x4000_0000, # Zynq GP0 default } #", "platform, sys_clk_freq, use_ps7_clk=False): self.rst = Signal() self.clock_domains.cd_sys = ClockDomain() #", "self.comb += pll.reset.eq(self.rst) pll.register_clkin(clk125, 125e6) pll.create_clkout(self.cd_sys, sys_clk_freq) # Ignore sys_clk", "pll.clkin path created by SoC's rst. platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # BaseSoC", "SPDX-License-Identifier: BSD-2-Clause import argparse import subprocess from migen import *", "SoC on Arty Z7\") parser.add_argument(\"--toolchain\", default=\"vivado\", help=\"FPGA toolchain (vivado, symbiflow", "# # # if use_ps7_clk: self.comb += ClockSignal(\"sys\").eq(ClockSignal(\"ps7\")) self.comb +=", "False # CRG -------------------------------------------------------------------------------------- self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk) #", "parser.add_argument(\"--toolchain\", default=\"vivado\", help=\"FPGA toolchain (vivado, symbiflow or yosys+nextpnr).\") parser.add_argument(\"--build\", action=\"store_true\",", "help=\"Board variant (z7-20 or z7-10).\") parser.add_argument(\"--sys-clk-freq\", default=125e6, help=\"System clock frequency.\")", "run=args.build) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + \".bit\"))", "Z7\", **kwargs) # Zynq7000 Integration --------------------------------------------------------------------- if kwargs.get(\"cpu_type\", None) ==", "LedChaser # CRG ---------------------------------------------------------------------------------------------- class _CRG(Module): def __init__(self, platform, sys_clk_freq,", "self.submodules.pll = pll = S7PLL(speedgrade=-1) self.comb += pll.reset.eq(self.rst) pll.register_clkin(clk125, 125e6)", "import * from litex.soc.integration.soc import SoCRegion from litex.soc.integration.builder import *", "**soc_core_argdict(args) ) builder = Builder(soc, **builder_argdict(args)) builder_kwargs = vivado_build_argdict(args) if", "if kwargs.get(\"cpu_type\", None) == \"zynq7000\": kwargs['integrated_sram_size'] = 0 kwargs['with_uart'] =", "from migen import * from litex_boards.platforms import digilent_arty_z7 from litex.build", "# PLL. self.submodules.pll = pll = S7PLL(speedgrade=-1) self.comb += pll.reset.eq(self.rst)", "parser.add_argument(\"--build\", action=\"store_true\", help=\"Build bitstream.\") parser.add_argument(\"--load\", action=\"store_true\", help=\"Load bitstream.\") parser.add_argument(\"--variant\", default=\"z7-20\",", "= wb_gp0, base_address = self.mem_map['csr']) self.add_wb_master(wb_gp0) use_ps7_clk = True else:", "help=\"Build bitstream.\") parser.add_argument(\"--load\", action=\"store_true\", help=\"Load bitstream.\") parser.add_argument(\"--variant\", default=\"z7-20\", help=\"Board variant", "of LiteX-Boards. # # Copyright (c) 2021 <NAME> <<EMAIL>> #", "kwargs['integrated_sram_size'] = 0 kwargs['with_uart'] = False self.mem_map = { 'csr':", "Arty Z7\") parser.add_argument(\"--toolchain\", default=\"vivado\", help=\"FPGA toolchain (vivado, symbiflow or yosys+nextpnr).\")", "vivado_build_args, vivado_build_argdict from litex.soc.interconnect import axi from litex.soc.interconnect import wishbone", "args.variant, toolchain = args.toolchain, sys_clk_freq=int(float(args.sys_clk_freq)), **soc_core_argdict(args) ) builder = Builder(soc,", "axi from litex.soc.interconnect import wishbone from litex.soc.cores.clock import * from", "else \"arty_z7_10.tcl\" os.system(\"wget http://kmf2.trabucayre.com/\" + preset_name) self.cpu.set_ps7(preset=preset_name) # Connect AXI", "\"arty_z7_20.tcl\" if variant == \"z7-20\" else \"arty_z7_10.tcl\" os.system(\"wget http://kmf2.trabucayre.com/\" +", "use_ps7_clk = False # CRG -------------------------------------------------------------------------------------- self.submodules.crg = _CRG(platform, sys_clk_freq,", "is part of LiteX-Boards. # # Copyright (c) 2021 <NAME>", "litex.soc.cores.led import LedChaser # CRG ---------------------------------------------------------------------------------------------- class _CRG(Module): def __init__(self,", "+ preset_name) self.cpu.set_ps7(preset=preset_name) # Connect AXI GP0 to the SoC", "action=\"store_true\", help=\"Build bitstream.\") parser.add_argument(\"--load\", action=\"store_true\", help=\"Load bitstream.\") parser.add_argument(\"--variant\", default=\"z7-20\", help=\"Board", "2021 <NAME> <<EMAIL>> # SPDX-License-Identifier: BSD-2-Clause import argparse import subprocess", "#!/usr/bin/env python3 # # This file is part of LiteX-Boards.", "BaseSoC(SoCCore): def __init__(self, variant=\"z7-20\", toolchain=\"vivado\", sys_clk_freq=int(125e6), with_led_chaser=True, **kwargs): platform =", "self.mem_map['csr']) self.add_wb_master(wb_gp0) use_ps7_clk = True else: use_ps7_clk = False #", "import * from litex_boards.platforms import digilent_arty_z7 from litex.build import tools", "variant = args.variant, toolchain = args.toolchain, sys_clk_freq=int(float(args.sys_clk_freq)), **soc_core_argdict(args) ) builder", "* from litex.soc.integration.soc_core import * from litex.soc.integration.soc import SoCRegion from", "GP0 default } # SoCCore ---------------------------------------------------------------------------------- SoCCore.__init__(self, platform, sys_clk_freq, ident", "_CRG(Module): def __init__(self, platform, sys_clk_freq, use_ps7_clk=False): self.rst = Signal() self.clock_domains.cd_sys", "self.clock_domains.cd_sys = ClockDomain() # # # if use_ps7_clk: self.comb +=", "to pll.clkin path created by SoC's rst. platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) #", "{} builder.build(**builder_kwargs, run=args.build) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name", "# SoCCore ---------------------------------------------------------------------------------- SoCCore.__init__(self, platform, sys_clk_freq, ident = \"LiteX SoC", "builder_kwargs = vivado_build_argdict(args) if args.toolchain == \"vivado\" else {} builder.build(**builder_kwargs,", "None) == \"zynq7000\": assert toolchain == \"vivado\", ' not tested", "clk125 = platform.request(\"clk125\") # PLL. self.submodules.pll = pll = S7PLL(speedgrade=-1)", "**kwargs): platform = digilent_arty_z7.Platform(variant=variant, toolchain=toolchain) if kwargs.get(\"cpu_type\", None) == \"zynq7000\":", "preset_name = \"arty_z7_20.tcl\" if variant == \"z7-20\" else \"arty_z7_10.tcl\" os.system(\"wget", "# Zynq7000 Integration --------------------------------------------------------------------- if kwargs.get(\"cpu_type\", None) == \"zynq7000\": assert", "python3 # # This file is part of LiteX-Boards. #", "# CRG ---------------------------------------------------------------------------------------------- class _CRG(Module): def __init__(self, platform, sys_clk_freq, use_ps7_clk=False):", "CRG -------------------------------------------------------------------------------------- self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk) # Leds -------------------------------------------------------------------------------------", "== \"zynq7000\": kwargs['integrated_sram_size'] = 0 kwargs['with_uart'] = False self.mem_map =", "# CRG -------------------------------------------------------------------------------------- self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk) # Leds", "sys_clk_freq, use_ps7_clk) # Leds ------------------------------------------------------------------------------------- if with_led_chaser: self.submodules.leds = LedChaser(", "--------------------------------------------------------------------- if kwargs.get(\"cpu_type\", None) == \"zynq7000\": assert toolchain == \"vivado\",", "self.comb += ResetSignal(\"sys\").eq(ResetSignal(\"ps7\") | self.rst) else: # Clk. clk125 =", "parser = argparse.ArgumentParser(description=\"LiteX SoC on Arty Z7\") parser.add_argument(\"--toolchain\", default=\"vivado\", help=\"FPGA", "------------------------------------------------------------------------------------------ class BaseSoC(SoCCore): def __init__(self, variant=\"z7-20\", toolchain=\"vivado\", sys_clk_freq=int(125e6), with_led_chaser=True, **kwargs):", "on Arty Z7\") parser.add_argument(\"--toolchain\", default=\"vivado\", help=\"FPGA toolchain (vivado, symbiflow or", "sys_clk_freq, ident = \"LiteX SoC on Arty Z7\", **kwargs) #", "== \"vivado\" else {} builder.build(**builder_kwargs, run=args.build) if args.load: prog =", "ident = \"LiteX SoC on Arty Z7\", **kwargs) # Zynq7000", "platform, sys_clk_freq, ident = \"LiteX SoC on Arty Z7\", **kwargs)", "else {} builder.build(**builder_kwargs, run=args.build) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir,", "argparse.ArgumentParser(description=\"LiteX SoC on Arty Z7\") parser.add_argument(\"--toolchain\", default=\"vivado\", help=\"FPGA toolchain (vivado,", "Clk. clk125 = platform.request(\"clk125\") # PLL. self.submodules.pll = pll =", "with_led_chaser: self.submodules.leds = LedChaser( pads = platform.request_all(\"user_led\"), sys_clk_freq = sys_clk_freq)", "pll = S7PLL(speedgrade=-1) self.comb += pll.reset.eq(self.rst) pll.register_clkin(clk125, 125e6) pll.create_clkout(self.cd_sys, sys_clk_freq)", "------------------------------------------------------------------------------------- if with_led_chaser: self.submodules.leds = LedChaser( pads = platform.request_all(\"user_led\"), sys_clk_freq", "Leds ------------------------------------------------------------------------------------- if with_led_chaser: self.submodules.leds = LedChaser( pads = platform.request_all(\"user_led\"),", "= Builder(soc, **builder_argdict(args)) builder_kwargs = vivado_build_argdict(args) if args.toolchain == \"vivado\"", "not tested / specific vivado cmds' preset_name = \"arty_z7_20.tcl\" if", "self.add_wb_master(wb_gp0) use_ps7_clk = True else: use_ps7_clk = False # CRG", "+= ResetSignal(\"sys\").eq(ResetSignal(\"ps7\") | self.rst) else: # Clk. clk125 = platform.request(\"clk125\")", "litex.build.xilinx import common as xil_common from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict", "os.system(\"wget http://kmf2.trabucayre.com/\" + preset_name) self.cpu.set_ps7(preset=preset_name) # Connect AXI GP0 to", "def __init__(self, variant=\"z7-20\", toolchain=\"vivado\", sys_clk_freq=int(125e6), with_led_chaser=True, **kwargs): platform = digilent_arty_z7.Platform(variant=variant,", "_CRG(platform, sys_clk_freq, use_ps7_clk) # Leds ------------------------------------------------------------------------------------- if with_led_chaser: self.submodules.leds =", "ResetSignal(\"sys\").eq(ResetSignal(\"ps7\") | self.rst) else: # Clk. clk125 = platform.request(\"clk125\") #", "import SoCRegion from litex.soc.integration.builder import * from litex.soc.cores.led import LedChaser", "# Zynq GP0 default } # SoCCore ---------------------------------------------------------------------------------- SoCCore.__init__(self, platform,", "= argparse.ArgumentParser(description=\"LiteX SoC on Arty Z7\") parser.add_argument(\"--toolchain\", default=\"vivado\", help=\"FPGA toolchain", "soc_core_args(parser) vivado_build_args(parser) parser.set_defaults(cpu_type=\"zynq7000\") args = parser.parse_args() soc = BaseSoC( variant", "toolchain (vivado, symbiflow or yosys+nextpnr).\") parser.add_argument(\"--build\", action=\"store_true\", help=\"Build bitstream.\") parser.add_argument(\"--load\",", "self.rst) else: # Clk. clk125 = platform.request(\"clk125\") # PLL. self.submodules.pll", "kwargs.get(\"cpu_type\", None) == \"zynq7000\": kwargs['integrated_sram_size'] = 0 kwargs['with_uart'] = False", "Copyright (c) 2021 <NAME> <<EMAIL>> # SPDX-License-Identifier: BSD-2-Clause import argparse", "class _CRG(Module): def __init__(self, platform, sys_clk_freq, use_ps7_clk=False): self.rst = Signal()", "= False self.mem_map = { 'csr': 0x4000_0000, # Zynq GP0", "GP0 to the SoC wb_gp0 = wishbone.Interface() self.submodules += axi.AXI2Wishbone(", "-------------------------------------------------------------------------------------- self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk) # Leds ------------------------------------------------------------------------------------- if", "from litex.soc.cores.led import LedChaser # CRG ---------------------------------------------------------------------------------------------- class _CRG(Module): def", "path created by SoC's rst. platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # BaseSoC ------------------------------------------------------------------------------------------", "= 0 kwargs['with_uart'] = False self.mem_map = { 'csr': 0x4000_0000,", "help=\"System clock frequency.\") builder_args(parser) soc_core_args(parser) vivado_build_args(parser) parser.set_defaults(cpu_type=\"zynq7000\") args = parser.parse_args()", "sys_clk_freq) # Ignore sys_clk to pll.clkin path created by SoC's", "args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + \".bit\")) if __name__", "\"arty_z7_10.tcl\" os.system(\"wget http://kmf2.trabucayre.com/\" + preset_name) self.cpu.set_ps7(preset=preset_name) # Connect AXI GP0", "default=\"z7-20\", help=\"Board variant (z7-20 or z7-10).\") parser.add_argument(\"--sys-clk-freq\", default=125e6, help=\"System clock", "sys_clk_freq = sys_clk_freq) # Build -------------------------------------------------------------------------------------------- def main(): parser =", "use_ps7_clk: self.comb += ClockSignal(\"sys\").eq(ClockSignal(\"ps7\")) self.comb += ResetSignal(\"sys\").eq(ResetSignal(\"ps7\") | self.rst) else:", "import argparse import subprocess from migen import * from litex_boards.platforms", "# Connect AXI GP0 to the SoC wb_gp0 = wishbone.Interface()", "file is part of LiteX-Boards. # # Copyright (c) 2021", "sys_clk_freq) # Build -------------------------------------------------------------------------------------------- def main(): parser = argparse.ArgumentParser(description=\"LiteX SoC", "= args.variant, toolchain = args.toolchain, sys_clk_freq=int(float(args.sys_clk_freq)), **soc_core_argdict(args) ) builder =", "# SPDX-License-Identifier: BSD-2-Clause import argparse import subprocess from migen import", "axi.AXI2Wishbone( axi = self.cpu.add_axi_gp_master(), wishbone = wb_gp0, base_address = self.mem_map['csr'])", "self.submodules.leds = LedChaser( pads = platform.request_all(\"user_led\"), sys_clk_freq = sys_clk_freq) #", "soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + \".bit\")) if __name__ == \"__main__\": main()", "args = parser.parse_args() soc = BaseSoC( variant = args.variant, toolchain", "from litex.soc.interconnect import axi from litex.soc.interconnect import wishbone from litex.soc.cores.clock", "soc = BaseSoC( variant = args.variant, toolchain = args.toolchain, sys_clk_freq=int(float(args.sys_clk_freq)),", "= args.toolchain, sys_clk_freq=int(float(args.sys_clk_freq)), **soc_core_argdict(args) ) builder = Builder(soc, **builder_argdict(args)) builder_kwargs", "platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # BaseSoC ------------------------------------------------------------------------------------------ class BaseSoC(SoCCore): def __init__(self, variant=\"z7-20\",", "| self.rst) else: # Clk. clk125 = platform.request(\"clk125\") # PLL.", "vivado cmds' preset_name = \"arty_z7_20.tcl\" if variant == \"z7-20\" else", "= False # CRG -------------------------------------------------------------------------------------- self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk)", "+= pll.reset.eq(self.rst) pll.register_clkin(clk125, 125e6) pll.create_clkout(self.cd_sys, sys_clk_freq) # Ignore sys_clk to", "= digilent_arty_z7.Platform(variant=variant, toolchain=toolchain) if kwargs.get(\"cpu_type\", None) == \"zynq7000\": kwargs['integrated_sram_size'] =", "sys_clk_freq, use_ps7_clk=False): self.rst = Signal() self.clock_domains.cd_sys = ClockDomain() # #", "= Signal() self.clock_domains.cd_sys = ClockDomain() # # # if use_ps7_clk:", "\"z7-20\" else \"arty_z7_10.tcl\" os.system(\"wget http://kmf2.trabucayre.com/\" + preset_name) self.cpu.set_ps7(preset=preset_name) # Connect", "litex.soc.interconnect import wishbone from litex.soc.cores.clock import * from litex.soc.integration.soc_core import", "import tools from litex.build.xilinx import common as xil_common from litex.build.xilinx.vivado", "tested / specific vivado cmds' preset_name = \"arty_z7_20.tcl\" if variant", "bitstream.\") parser.add_argument(\"--load\", action=\"store_true\", help=\"Load bitstream.\") parser.add_argument(\"--variant\", default=\"z7-20\", help=\"Board variant (z7-20", "vivado_build_args(parser) parser.set_defaults(cpu_type=\"zynq7000\") args = parser.parse_args() soc = BaseSoC( variant =", "variant (z7-20 or z7-10).\") parser.add_argument(\"--sys-clk-freq\", default=125e6, help=\"System clock frequency.\") builder_args(parser)", "self.submodules += axi.AXI2Wishbone( axi = self.cpu.add_axi_gp_master(), wishbone = wb_gp0, base_address", "# Copyright (c) 2021 <NAME> <<EMAIL>> # SPDX-License-Identifier: BSD-2-Clause import", "Connect AXI GP0 to the SoC wb_gp0 = wishbone.Interface() self.submodules", "SoCCore ---------------------------------------------------------------------------------- SoCCore.__init__(self, platform, sys_clk_freq, ident = \"LiteX SoC on", "BaseSoC( variant = args.variant, toolchain = args.toolchain, sys_clk_freq=int(float(args.sys_clk_freq)), **soc_core_argdict(args) )", "specific vivado cmds' preset_name = \"arty_z7_20.tcl\" if variant == \"z7-20\"", "pll.clkin) # BaseSoC ------------------------------------------------------------------------------------------ class BaseSoC(SoCCore): def __init__(self, variant=\"z7-20\", toolchain=\"vivado\",", "(c) 2021 <NAME> <<EMAIL>> # SPDX-License-Identifier: BSD-2-Clause import argparse import", "help=\"Load bitstream.\") parser.add_argument(\"--variant\", default=\"z7-20\", help=\"Board variant (z7-20 or z7-10).\") parser.add_argument(\"--sys-clk-freq\",", "migen import * from litex_boards.platforms import digilent_arty_z7 from litex.build import", "True else: use_ps7_clk = False # CRG -------------------------------------------------------------------------------------- self.submodules.crg =", "litex.soc.interconnect import axi from litex.soc.interconnect import wishbone from litex.soc.cores.clock import", "kwargs['with_uart'] = False self.mem_map = { 'csr': 0x4000_0000, # Zynq", "litex.soc.integration.builder import * from litex.soc.cores.led import LedChaser # CRG ----------------------------------------------------------------------------------------------", "+= axi.AXI2Wishbone( axi = self.cpu.add_axi_gp_master(), wishbone = wb_gp0, base_address =", "= parser.parse_args() soc = BaseSoC( variant = args.variant, toolchain =", "use_ps7_clk) # Leds ------------------------------------------------------------------------------------- if with_led_chaser: self.submodules.leds = LedChaser( pads", "-------------------------------------------------------------------------------------------- def main(): parser = argparse.ArgumentParser(description=\"LiteX SoC on Arty Z7\")", "# # Copyright (c) 2021 <NAME> <<EMAIL>> # SPDX-License-Identifier: BSD-2-Clause" ]
[ "= [] for c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) return nodes", "if attr: return node.attrib.get(attr, None) return attr @classmethod def delAttribute(self,", "if node.tail: prev = node.getprevious() if prev is None: if", "by <NAME> Gravity.com licenses this file to you under the", "getElementsByTags(self, node, tags): selector = ','.join(tags) elems = self.css_select(node, selector)", "return attr @classmethod def delAttribute(self, node, attr=None): if attr: _attr", "list(root) @classmethod def textToPara(self, text): return self.fromstring(text) @classmethod def getChildren(self,", "e0 = node if e0.tail: e0 = deepcopy(e0) e0.tail =", "0: break return nodes[0] if nodes else None @classmethod def", "def getComments(self, node): return node.xpath('//comment()') @classmethod def getParent(self, node): return", "more contributor license agreements. See the NOTICE file distributed with", "node, expression): regexp_namespace = \"http://exslt.org/regular-expressions\" items = node.xpath(expression, namespaces={'re': regexp_namespace})", "'*' selector = 'descendant-or-self::%s' % (tag or '*') if attr", "= 'text' root.text = None root.insert(0, t) # loop childs", "= self.createElement(tag='text', text=n.tail, tail=None) root.insert(idx + 1, t) return list(root)", "prev.tail += u' ' + node.tail node.clear() parent.remove(node) @classmethod def", "return elems[0] return None @classmethod def getElementsByTag(self, node, tag=None, attr=None,", "nodes else None @classmethod def isTextNode(self, node): return True if", "createElement(self, tag='p', text=None, tail=None): t = lxmlhtml.HtmlElement() t.tag = tag", "return elems @classmethod def createElement(self, tag='p', text=None, tail=None): t =", "Apache License, Version 2.0 (the \"License\"); you may not use", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "to you under the Apache License, Version 2.0 (the \"License\");", "encodeValue(html) self.doc = lxmlhtml.fromstring(html) return self.doc @classmethod def nodeToString(self, node):", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "# if we have a selection tag if node in", "Python port was written by <NAME> Gravity.com licenses this file", "regarding copyright ownership. Python port was written by <NAME> Gravity.com", "node if e0.tail: e0 = deepcopy(e0) e0.tail = None return", "== 0: break return nodes[0] if nodes else None @classmethod", "in enumerate(node.itersiblings(preceding=False)): nodes.append(n) if c == 0: break return nodes[0]", "@classmethod def outerHtml(self, node): e0 = node if e0.tail: e0", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "in elems: elems.remove(node) return elems @classmethod def createElement(self, tag='p', text=None,", "for node in nodes: node.drop_tag() else: nodes.drop_tag() @classmethod def css_select(self,", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "+ 1, t) return list(root) @classmethod def textToPara(self, text): return", "n in enumerate(node.itersiblings(preceding=False)): nodes.append(n) if c == 0: break return", "text t.tail = tail return t @classmethod def getComments(self, node):", "governing permissions and limitations under the License. \"\"\" import lxml.html", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "% (tag or '*') if attr and value: selector =", "encodeValue class Parser(object): @classmethod def xpath_re(self, node, expression): regexp_namespace =", "ANY KIND, either express or implied. See the License for", "the root node # if we have a selection tag", "None @classmethod def nextSibling(self, node): nodes = [] for c,", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "@classmethod def getParent(self, node): return node.getparent() @classmethod def remove(self, node):", "<NAME> Gravity.com licenses this file to you under the Apache", "one or more contributor license agreements. See the NOTICE file", "lxml import etree from copy import deepcopy from goose.text import", "= node.xpath(selector, namespaces={\"re\": NS}) # remove the root node #", "from goose.text import innerTrim from goose.text import encodeValue class Parser(object):", "node # if we have some text in the node", "text): return self.fromstring(text) @classmethod def getChildren(self, node): return node.getchildren() @classmethod", "return items @classmethod def drop_tag(self, nodes): if isinstance(nodes, list): for", "== 'text': continue # create a text node for tail", "# remove the root node # if we have a", "or '*') if attr and value: selector = '%s[re:test(@%s, \"%s\",", "regexp_namespace = \"http://exslt.org/regular-expressions\" items = node.xpath(expression, namespaces={'re': regexp_namespace}) return items", "self.css_select(node, selector) # remove the root node # if we", "node.getchildren() @classmethod def getElementsByTags(self, node, tags): selector = ','.join(tags) elems", "under the License is distributed on an \"AS IS\" BASIS,", "'' parent.text += u' ' + node.tail else: if not", "= tail return t @classmethod def getComments(self, node): return node.xpath('//comment()')", "@classmethod def getElementsByTag(self, node, tag=None, attr=None, value=None, childs=False): NS =", "etree.tostring(node) @classmethod def replaceTag(self, node, tag): node.tag = tag @classmethod", "NS}) # remove the root node # if we have", "for c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) return nodes @classmethod def", "file distributed with this work for additional information regarding copyright", "def getElementsByTags(self, node, tags): selector = ','.join(tags) elems = self.css_select(node,", "def textToPara(self, text): return self.fromstring(text) @classmethod def getChildren(self, node): return", "for c, n in enumerate(node.itersiblings(preceding=False)): nodes.append(n) if c == 0:", "import etree from copy import deepcopy from goose.text import innerTrim", "@classmethod def appendChild(self, node, child): node.append(child) @classmethod def childNodes(self, node):", "node, selector): return node.cssselect(selector) @classmethod def fromstring(self, html): html =", "in elems and (tag or childs): elems.remove(node) return elems @classmethod", "None: if node.tail: prev = node.getprevious() if prev is None:", "node.attrib.get(attr, None) return attr @classmethod def delAttribute(self, node, attr=None): if", "to Gravity.com under one or more contributor license agreements. See", "this file except in compliance with the License. You may", "= lxmlhtml.HtmlElement() t.tag = tag t.text = text t.tail =", "was written by <NAME> Gravity.com licenses this file to you", "node, tag=None, attr=None, value=None, childs=False): NS = \"http://exslt.org/regular-expressions\" # selector", "previousSibling(self, node): nodes = [] for c, n in enumerate(node.itersiblings(preceding=True)):", "setAttribute(self, node, attr=None, value=None): if attr and value: node.set(attr, value)", "@classmethod def getText(self, node): txts = [i for i in", "outerHtml(self, node): e0 = node if e0.tail: e0 = deepcopy(e0)", "import soupparser from lxml import etree from copy import deepcopy", "node): return etree.tostring(node) @classmethod def replaceTag(self, node, tag): node.tag =", "= \"http://exslt.org/regular-expressions\" items = node.xpath(expression, namespaces={'re': regexp_namespace}) return items @classmethod", "self.doc @classmethod def nodeToString(self, node): return etree.tostring(node) @classmethod def replaceTag(self,", "= [] for c, n in enumerate(node.itersiblings(preceding=False)): nodes.append(n) if c", "goose.text import encodeValue class Parser(object): @classmethod def xpath_re(self, node, expression):", "@classmethod def remove(self, node): parent = node.getparent() if parent is", "parent.text: parent.text = '' parent.text += u' ' + node.tail", "attr: _attr = node.attrib.get(attr, None) if _attr: del node.attrib[attr] @classmethod", "in enumerate(list(root)): idx = root.index(n) # don't process texts nodes", "t.tail = tail return t @classmethod def getComments(self, node): return", "return t @classmethod def getComments(self, node): return node.xpath('//comment()') @classmethod def", "def getParent(self, node): return node.getparent() @classmethod def remove(self, node): parent", "def previousSibling(self, node): nodes = [] for c, n in", "text node for tail if n.tail: t = self.createElement(tag='text', text=n.tail,", "e0.tail: e0 = deepcopy(e0) e0.tail = None return self.nodeToString(e0) class", "file except in compliance with the License. You may obtain", "return node.attrib.get(attr, None) return attr @classmethod def delAttribute(self, node, attr=None):", "OR CONDITIONS OF ANY KIND, either express or implied. See", "'%s[re:test(@%s, \"%s\", \"i\")]' % (selector, attr, value) elems = node.xpath(selector,", "attr, value) elems = node.xpath(selector, namespaces={\"re\": NS}) # remove the", "'' prev.tail += u' ' + node.tail node.clear() parent.remove(node) @classmethod", "@classmethod def previousSiblings(self, node): nodes = [] for c, n", "getChildren(self, node): return node.getchildren() @classmethod def getElementsByTags(self, node, tags): selector", "if elems: return elems[0] return None @classmethod def getElementsByTag(self, node,", "under the Apache License, Version 2.0 (the \"License\"); you may", "text in the node if root.text: t = lxmlhtml.HtmlElement() t.text", "\"\"\"\\ This is a python port of \"Goose\" orignialy licensed", "if not parent.text: parent.text = '' parent.text += u' '", "@classmethod def getTag(self, node): return node.tag @classmethod def getText(self, node):", "Parser(object): @classmethod def xpath_re(self, node, expression): regexp_namespace = \"http://exslt.org/regular-expressions\" items", "node.tail node.clear() parent.remove(node) @classmethod def getTag(self, node): return node.tag @classmethod", "isTextNode(self, node): return True if node.tag == 'text' else False", "innerTrim(u' '.join(txts).strip()) @classmethod def previousSiblings(self, node): nodes = [] for", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "tags): selector = ','.join(tags) elems = self.css_select(node, selector) # remove", "root.index(n) # don't process texts nodes if n.tag == 'text':", "n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) if c == 0: break return", "See the License for the specific language governing permissions and", "attr: return node.attrib.get(attr, None) return attr @classmethod def delAttribute(self, node,", "# -*- coding: utf-8 -*- \"\"\"\\ This is a python", "','.join(tags) elems = self.css_select(node, selector) # remove the root node", "for c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) if c == 0:", "node): parent = node.getparent() if parent is not None: if", "selector): return node.cssselect(selector) @classmethod def fromstring(self, html): html = encodeValue(html)", "\"i\")]' % (selector, attr, value) elems = node.xpath(selector, namespaces={\"re\": NS})", "file to you under the Apache License, Version 2.0 (the", "node): return node.getchildren() @classmethod def getElementsByTags(self, node, tags): selector =", "deepcopy from goose.text import innerTrim from goose.text import encodeValue class", "' + node.tail else: if not prev.tail: prev.tail = ''", "in writing, software distributed under the License is distributed on", "@classmethod def nextSibling(self, node): nodes = [] for c, n", "required by applicable law or agreed to in writing, software", "node.append(child) @classmethod def childNodes(self, node): return list(node) @classmethod def childNodesWithText(self,", "the NOTICE file distributed with this work for additional information", "== 'text' else False @classmethod def getAttribute(self, node, attr=None): if", "selector = '//*[@id=\"%s\"]' % idd elems = node.xpath(selector) if elems:", "self.nodeToString(e0) class ParserSoup(Parser): @classmethod def fromstring(self, html): html = encodeValue(html)", "else None @classmethod def nextSibling(self, node): nodes = [] for", "def css_select(self, node, selector): return node.cssselect(selector) @classmethod def fromstring(self, html):", "@classmethod def childNodesWithText(self, node): root = node # create the", "if e0.tail: e0 = deepcopy(e0) e0.tail = None return self.nodeToString(e0)", "node, tags): selector = ','.join(tags) elems = self.css_select(node, selector) #", "isinstance(nodes, list): for node in nodes: node.drop_tag() else: nodes.drop_tag() @classmethod", "-*- \"\"\"\\ This is a python port of \"Goose\" orignialy", "= '' parent.text += u' ' + node.tail else: if", "agreements. See the NOTICE file distributed with this work for", "[] for c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) if c ==", "childNodesWithText(self, node): root = node # create the first text", "is None: if not parent.text: parent.text = '' parent.text +=", "in enumerate(node.itersiblings(preceding=True)): nodes.append(n) if c == 0: break return nodes[0]", "permissions and limitations under the License. \"\"\" import lxml.html as", "t.text = root.text t.tag = 'text' root.text = None root.insert(0,", "software distributed under the License is distributed on an \"AS", "distributed under the License is distributed on an \"AS IS\"", "the License. \"\"\" import lxml.html as lxmlhtml from lxml.html import", "'text' root.text = None root.insert(0, t) # loop childs for", "% (selector, attr, value) elems = node.xpath(selector, namespaces={\"re\": NS}) #", "node # if we have a selection tag if node", "def outerHtml(self, node): e0 = node if e0.tail: e0 =", "attr=None): if attr: _attr = node.attrib.get(attr, None) if _attr: del", "lxml.html import soupparser from lxml import etree from copy import", "CONDITIONS OF ANY KIND, either express or implied. See the", "return elems @classmethod def appendChild(self, node, child): node.append(child) @classmethod def", "-*- coding: utf-8 -*- \"\"\"\\ This is a python port", "return nodes[0] if nodes else None @classmethod def nextSibling(self, node):", "@classmethod def fromstring(self, html): html = encodeValue(html) self.doc = soupparser.fromstring(html)", "node.tail else: if not prev.tail: prev.tail = '' prev.tail +=", "Version 2.0 (the \"License\"); you may not use this file", "node.itertext()] return innerTrim(u' '.join(txts).strip()) @classmethod def previousSiblings(self, node): nodes =", "_attr = node.attrib.get(attr, None) if _attr: del node.attrib[attr] @classmethod def", "if we have some text in the node if root.text:", "= '' prev.tail += u' ' + node.tail node.clear() parent.remove(node)", "not use this file except in compliance with the License.", "2.0 (the \"License\"); you may not use this file except", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "= root.text t.tag = 'text' root.text = None root.insert(0, t)", "enumerate(list(root)): idx = root.index(n) # don't process texts nodes if", "if nodes else None @classmethod def isTextNode(self, node): return True", "a selection tag if node in elems: elems.remove(node) return elems", "'text': continue # create a text node for tail if", "node): e0 = node if e0.tail: e0 = deepcopy(e0) e0.tail", "port of \"Goose\" orignialy licensed to Gravity.com under one or", "attr and value: selector = '%s[re:test(@%s, \"%s\", \"i\")]' % (selector,", "you may not use this file except in compliance with", "return node.getparent() @classmethod def remove(self, node): parent = node.getparent() if", "if n.tail: t = self.createElement(tag='text', text=n.tail, tail=None) root.insert(idx + 1,", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "for c, n in enumerate(list(root)): idx = root.index(n) # don't", "the License. You may obtain a copy of the License", "t.tag = tag t.text = text t.tail = tail return", "True if node.tag == 'text' else False @classmethod def getAttribute(self,", "# don't process texts nodes if n.tag == 'text': continue", "prev = node.getprevious() if prev is None: if not parent.text:", "@classmethod def getChildren(self, node): return node.getchildren() @classmethod def getElementsByTags(self, node,", "use this file except in compliance with the License. You", "with this work for additional information regarding copyright ownership. Python", "class Parser(object): @classmethod def xpath_re(self, node, expression): regexp_namespace = \"http://exslt.org/regular-expressions\"", "= tag t.text = text t.tail = tail return t", "attr @classmethod def delAttribute(self, node, attr=None): if attr: _attr =", "def fromstring(self, html): html = encodeValue(html) self.doc = lxmlhtml.fromstring(html) return", "# create a text node for tail if n.tail: t", "lxmlhtml.HtmlElement() t.tag = tag t.text = text t.tail = tail", "= ','.join(tags) elems = self.css_select(node, selector) # remove the root", "'.join(txts).strip()) @classmethod def previousSiblings(self, node): nodes = [] for c,", "value: selector = '%s[re:test(@%s, \"%s\", \"i\")]' % (selector, attr, value)", "@classmethod def xpath_re(self, node, expression): regexp_namespace = \"http://exslt.org/regular-expressions\" items =", "return node.cssselect(selector) @classmethod def fromstring(self, html): html = encodeValue(html) self.doc", "getElementsByTag(self, node, tag=None, attr=None, value=None, childs=False): NS = \"http://exslt.org/regular-expressions\" #", "import encodeValue class Parser(object): @classmethod def xpath_re(self, node, expression): regexp_namespace", "return list(node) @classmethod def childNodesWithText(self, node): root = node #", "= lxmlhtml.HtmlElement() t.text = root.text t.tag = 'text' root.text =", "if attr: _attr = node.attrib.get(attr, None) if _attr: del node.attrib[attr]", "prev.tail = '' prev.tail += u' ' + node.tail node.clear()", "\"Goose\" orignialy licensed to Gravity.com under one or more contributor", "node.cssselect(selector) @classmethod def fromstring(self, html): html = encodeValue(html) self.doc =", "port was written by <NAME> Gravity.com licenses this file to", "enumerate(node.itersiblings(preceding=True)): nodes.append(n) if c == 0: break return nodes[0] if", "= encodeValue(html) self.doc = lxmlhtml.fromstring(html) return self.doc @classmethod def nodeToString(self,", "+ node.tail else: if not prev.tail: prev.tail = '' prev.tail", "node.tag = tag @classmethod def stripTags(self, node, *tags): etree.strip_tags(node, *tags)", "= '//*[@id=\"%s\"]' % idd elems = node.xpath(selector) if elems: return", "enumerate(node.itersiblings(preceding=True)): nodes.append(n) return nodes @classmethod def previousSibling(self, node): nodes =", "break return nodes[0] if nodes else None @classmethod def nextSibling(self,", "if node in elems and (tag or childs): elems.remove(node) return", "elems = node.xpath(selector, namespaces={\"re\": NS}) # remove the root node", "= 'descendant-or-self::%s' % (tag or '*') if attr and value:", "tag or '*' selector = 'descendant-or-self::%s' % (tag or '*')", "node # create the first text node # if we", "'*') if attr and value: selector = '%s[re:test(@%s, \"%s\", \"i\")]'", "return node.xpath('//comment()') @classmethod def getParent(self, node): return node.getparent() @classmethod def", "= None return self.nodeToString(e0) class ParserSoup(Parser): @classmethod def fromstring(self, html):", "the node if root.text: t = lxmlhtml.HtmlElement() t.text = root.text", "node.xpath('//comment()') @classmethod def getParent(self, node): return node.getparent() @classmethod def remove(self,", "appendChild(self, node, child): node.append(child) @classmethod def childNodes(self, node): return list(node)", "(the \"License\"); you may not use this file except in", "node): nodes = [] for c, n in enumerate(node.itersiblings(preceding=False)): nodes.append(n)", "return nodes[0] if nodes else None @classmethod def isTextNode(self, node):", "if attr and value: node.set(attr, value) @classmethod def outerHtml(self, node):", "this work for additional information regarding copyright ownership. Python port", "don't process texts nodes if n.tag == 'text': continue #", "nodes[0] if nodes else None @classmethod def nextSibling(self, node): nodes", "return True if node.tag == 'text' else False @classmethod def", "return etree.tostring(node) @classmethod def replaceTag(self, node, tag): node.tag = tag", "getElementById(self, node, idd): selector = '//*[@id=\"%s\"]' % idd elems =", "'//*[@id=\"%s\"]' % idd elems = node.xpath(selector) if elems: return elems[0]", "if nodes else None @classmethod def nextSibling(self, node): nodes =", "root.insert(idx + 1, t) return list(root) @classmethod def textToPara(self, text):", "from lxml import etree from copy import deepcopy from goose.text", "nodes = [] for c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) return", "return node.tag @classmethod def getText(self, node): txts = [i for", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "None root.insert(0, t) # loop childs for c, n in", "None) if _attr: del node.attrib[attr] @classmethod def setAttribute(self, node, attr=None,", "if we have a selection tag if node in elems:", "= tag @classmethod def stripTags(self, node, *tags): etree.strip_tags(node, *tags) @classmethod", "node): return node.tag @classmethod def getText(self, node): txts = [i", "the Apache License, Version 2.0 (the \"License\"); you may not", "or implied. See the License for the specific language governing", "tag if node in elems and (tag or childs): elems.remove(node)", "import lxml.html as lxmlhtml from lxml.html import soupparser from lxml", "KIND, either express or implied. See the License for the", "= node # create the first text node # if", "def getTag(self, node): return node.tag @classmethod def getText(self, node): txts", "fromstring(self, html): html = encodeValue(html) self.doc = lxmlhtml.fromstring(html) return self.doc", "to in writing, software distributed under the License is distributed", "del node.attrib[attr] @classmethod def setAttribute(self, node, attr=None, value=None): if attr", "from goose.text import encodeValue class Parser(object): @classmethod def xpath_re(self, node,", "elems @classmethod def appendChild(self, node, child): node.append(child) @classmethod def childNodes(self,", "value: node.set(attr, value) @classmethod def outerHtml(self, node): e0 = node", "law or agreed to in writing, software distributed under the", "and (tag or childs): elems.remove(node) return elems @classmethod def appendChild(self,", "html = encodeValue(html) self.doc = lxmlhtml.fromstring(html) return self.doc @classmethod def", "orignialy licensed to Gravity.com under one or more contributor license", "+= u' ' + node.tail else: if not prev.tail: prev.tail", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "def isTextNode(self, node): return True if node.tag == 'text' else", "remove the root node # if we have a selection", "copy import deepcopy from goose.text import innerTrim from goose.text import", "node if root.text: t = lxmlhtml.HtmlElement() t.text = root.text t.tag", "a selection tag if node in elems and (tag or", "= [i for i in node.itertext()] return innerTrim(u' '.join(txts).strip()) @classmethod", "getParent(self, node): return node.getparent() @classmethod def remove(self, node): parent =", "if we have a selection tag if node in elems", "c == 0: break return nodes[0] if nodes else None", "parent = node.getparent() if parent is not None: if node.tail:", "you under the Apache License, Version 2.0 (the \"License\"); you", "\"http://exslt.org/regular-expressions\" # selector = tag or '*' selector = 'descendant-or-self::%s'", "for the specific language governing permissions and limitations under the", "else: if not prev.tail: prev.tail = '' prev.tail += u'", "nodes = [] for c, n in enumerate(node.itersiblings(preceding=False)): nodes.append(n) if", "for i in node.itertext()] return innerTrim(u' '.join(txts).strip()) @classmethod def previousSiblings(self,", "node, attr=None): if attr: return node.attrib.get(attr, None) return attr @classmethod", "list(node) @classmethod def childNodesWithText(self, node): root = node # create", "idd elems = node.xpath(selector) if elems: return elems[0] return None", "licenses this file to you under the Apache License, Version", "[] for c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) return nodes @classmethod", "node.tail: prev = node.getprevious() if prev is None: if not", "childs): elems.remove(node) return elems @classmethod def appendChild(self, node, child): node.append(child)", "text=None, tail=None): t = lxmlhtml.HtmlElement() t.tag = tag t.text =", "def getText(self, node): txts = [i for i in node.itertext()]", "childs=False): NS = \"http://exslt.org/regular-expressions\" # selector = tag or '*'", "License. \"\"\" import lxml.html as lxmlhtml from lxml.html import soupparser", "copyright ownership. Python port was written by <NAME> Gravity.com licenses", "in nodes: node.drop_tag() else: nodes.drop_tag() @classmethod def css_select(self, node, selector):", "the License for the specific language governing permissions and limitations", "namespaces={'re': regexp_namespace}) return items @classmethod def drop_tag(self, nodes): if isinstance(nodes,", "may not use this file except in compliance with the", "if attr and value: selector = '%s[re:test(@%s, \"%s\", \"i\")]' %", "getAttribute(self, node, attr=None): if attr: return node.attrib.get(attr, None) return attr", "def setAttribute(self, node, attr=None, value=None): if attr and value: node.set(attr,", "t.text = text t.tail = tail return t @classmethod def", "implied. See the License for the specific language governing permissions", "class ParserSoup(Parser): @classmethod def fromstring(self, html): html = encodeValue(html) self.doc", "from lxml.html import soupparser from lxml import etree from copy", "= root.index(n) # don't process texts nodes if n.tag ==", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "nodes @classmethod def previousSibling(self, node): nodes = [] for c,", "for additional information regarding copyright ownership. Python port was written", "value) elems = node.xpath(selector, namespaces={\"re\": NS}) # remove the root", "'text' else False @classmethod def getAttribute(self, node, attr=None): if attr:", "# create the first text node # if we have", "soupparser from lxml import etree from copy import deepcopy from", "(selector, attr, value) elems = node.xpath(selector, namespaces={\"re\": NS}) # remove", "process texts nodes if n.tag == 'text': continue # create", "attr=None): if attr: return node.attrib.get(attr, None) return attr @classmethod def", "= node if e0.tail: e0 = deepcopy(e0) e0.tail = None", "' + node.tail node.clear() parent.remove(node) @classmethod def getTag(self, node): return", "t.tag = 'text' root.text = None root.insert(0, t) # loop", "self.fromstring(text) @classmethod def getChildren(self, node): return node.getchildren() @classmethod def getElementsByTags(self,", "node, idd): selector = '//*[@id=\"%s\"]' % idd elems = node.xpath(selector)", "node, attr=None, value=None): if attr and value: node.set(attr, value) @classmethod", "nodes.append(n) if c == 0: break return nodes[0] if nodes", "namespaces={\"re\": NS}) # remove the root node # if we", "attr and value: node.set(attr, value) @classmethod def outerHtml(self, node): e0", "if _attr: del node.attrib[attr] @classmethod def setAttribute(self, node, attr=None, value=None):", "t = lxmlhtml.HtmlElement() t.tag = tag t.text = text t.tail", "def nodeToString(self, node): return etree.tostring(node) @classmethod def replaceTag(self, node, tag):", "have a selection tag if node in elems and (tag", "nextSibling(self, node): nodes = [] for c, n in enumerate(node.itersiblings(preceding=False)):", "= \"http://exslt.org/regular-expressions\" # selector = tag or '*' selector =", "nodes: node.drop_tag() else: nodes.drop_tag() @classmethod def css_select(self, node, selector): return", "return self.nodeToString(e0) class ParserSoup(Parser): @classmethod def fromstring(self, html): html =", "elems: return elems[0] return None @classmethod def getElementsByTag(self, node, tag=None,", "tag='p', text=None, tail=None): t = lxmlhtml.HtmlElement() t.tag = tag t.text", "u' ' + node.tail else: if not prev.tail: prev.tail =", "writing, software distributed under the License is distributed on an", "node, attr=None): if attr: _attr = node.attrib.get(attr, None) if _attr:", "@classmethod def css_select(self, node, selector): return node.cssselect(selector) @classmethod def fromstring(self,", "See the NOTICE file distributed with this work for additional", "nodes.drop_tag() @classmethod def css_select(self, node, selector): return node.cssselect(selector) @classmethod def", "nodes = [] for c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) if", "create the first text node # if we have some", "@classmethod def fromstring(self, html): html = encodeValue(html) self.doc = lxmlhtml.fromstring(html)", "return nodes @classmethod def previousSibling(self, node): nodes = [] for", "in compliance with the License. You may obtain a copy", "expression): regexp_namespace = \"http://exslt.org/regular-expressions\" items = node.xpath(expression, namespaces={'re': regexp_namespace}) return", "def remove(self, node): parent = node.getparent() if parent is not", "@classmethod def stripTags(self, node, *tags): etree.strip_tags(node, *tags) @classmethod def getElementById(self,", "enumerate(node.itersiblings(preceding=False)): nodes.append(n) if c == 0: break return nodes[0] if", "text node # if we have some text in the", "replaceTag(self, node, tag): node.tag = tag @classmethod def stripTags(self, node,", "(tag or '*') if attr and value: selector = '%s[re:test(@%s,", "have some text in the node if root.text: t =", "if node.tag == 'text' else False @classmethod def getAttribute(self, node,", "node, tag): node.tag = tag @classmethod def stripTags(self, node, *tags):", "agreed to in writing, software distributed under the License is", "elems = self.css_select(node, selector) # remove the root node #", "t) return list(root) @classmethod def textToPara(self, text): return self.fromstring(text) @classmethod", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "of \"Goose\" orignialy licensed to Gravity.com under one or more", "*tags) @classmethod def getElementById(self, node, idd): selector = '//*[@id=\"%s\"]' %", "if c == 0: break return nodes[0] if nodes else", "coding: utf-8 -*- \"\"\"\\ This is a python port of", "list): for node in nodes: node.drop_tag() else: nodes.drop_tag() @classmethod def", "parent.text = '' parent.text += u' ' + node.tail else:", "c, n in enumerate(node.itersiblings(preceding=False)): nodes.append(n) if c == 0: break", "work for additional information regarding copyright ownership. Python port was", "getText(self, node): txts = [i for i in node.itertext()] return", "else: nodes.drop_tag() @classmethod def css_select(self, node, selector): return node.cssselect(selector) @classmethod", "c, n in enumerate(list(root)): idx = root.index(n) # don't process", "import innerTrim from goose.text import encodeValue class Parser(object): @classmethod def", "def stripTags(self, node, *tags): etree.strip_tags(node, *tags) @classmethod def getElementById(self, node,", "None) return attr @classmethod def delAttribute(self, node, attr=None): if attr:", "= tag or '*' selector = 'descendant-or-self::%s' % (tag or", "elems: elems.remove(node) return elems @classmethod def createElement(self, tag='p', text=None, tail=None):", "selector = '%s[re:test(@%s, \"%s\", \"i\")]' % (selector, attr, value) elems", "node, child): node.append(child) @classmethod def childNodes(self, node): return list(node) @classmethod", "nodes[0] if nodes else None @classmethod def isTextNode(self, node): return", "node.set(attr, value) @classmethod def outerHtml(self, node): e0 = node if", "either express or implied. See the License for the specific", "tail=None) root.insert(idx + 1, t) return list(root) @classmethod def textToPara(self,", "+ node.tail node.clear() parent.remove(node) @classmethod def getTag(self, node): return node.tag", "node.getparent() if parent is not None: if node.tail: prev =", "False @classmethod def getAttribute(self, node, attr=None): if attr: return node.attrib.get(attr,", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "node.drop_tag() else: nodes.drop_tag() @classmethod def css_select(self, node, selector): return node.cssselect(selector)", "\"License\"); you may not use this file except in compliance", "def fromstring(self, html): html = encodeValue(html) self.doc = soupparser.fromstring(html) return", "information regarding copyright ownership. Python port was written by <NAME>", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "prev is None: if not parent.text: parent.text = '' parent.text", "python port of \"Goose\" orignialy licensed to Gravity.com under one", "node, *tags): etree.strip_tags(node, *tags) @classmethod def getElementById(self, node, idd): selector", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "contributor license agreements. See the NOTICE file distributed with this", "innerTrim from goose.text import encodeValue class Parser(object): @classmethod def xpath_re(self,", "License for the specific language governing permissions and limitations under", "def childNodesWithText(self, node): root = node # create the first", "continue # create a text node for tail if n.tail:", "tag if node in elems: elems.remove(node) return elems @classmethod def", "a text node for tail if n.tail: t = self.createElement(tag='text',", "nodes if n.tag == 'text': continue # create a text", "have a selection tag if node in elems: elems.remove(node) return", "= None root.insert(0, t) # loop childs for c, n", "e0 = deepcopy(e0) e0.tail = None return self.nodeToString(e0) class ParserSoup(Parser):", "node for tail if n.tail: t = self.createElement(tag='text', text=n.tail, tail=None)", "texts nodes if n.tag == 'text': continue # create a", "1, t) return list(root) @classmethod def textToPara(self, text): return self.fromstring(text)", "textToPara(self, text): return self.fromstring(text) @classmethod def getChildren(self, node): return node.getchildren()", "loop childs for c, n in enumerate(list(root)): idx = root.index(n)", "def xpath_re(self, node, expression): regexp_namespace = \"http://exslt.org/regular-expressions\" items = node.xpath(expression,", "childNodes(self, node): return list(node) @classmethod def childNodesWithText(self, node): root =", "if not prev.tail: prev.tail = '' prev.tail += u' '", "we have a selection tag if node in elems and", "node.xpath(expression, namespaces={'re': regexp_namespace}) return items @classmethod def drop_tag(self, nodes): if", "@classmethod def textToPara(self, text): return self.fromstring(text) @classmethod def getChildren(self, node):", "NS = \"http://exslt.org/regular-expressions\" # selector = tag or '*' selector", "= self.css_select(node, selector) # remove the root node # if", "root.text t.tag = 'text' root.text = None root.insert(0, t) #", "except in compliance with the License. You may obtain a", "def createElement(self, tag='p', text=None, tail=None): t = lxmlhtml.HtmlElement() t.tag =", "if root.text: t = lxmlhtml.HtmlElement() t.text = root.text t.tag =", "return node.getchildren() @classmethod def getElementsByTags(self, node, tags): selector = ','.join(tags)", "n in enumerate(list(root)): idx = root.index(n) # don't process texts", "ParserSoup(Parser): @classmethod def fromstring(self, html): html = encodeValue(html) self.doc =", "return self.fromstring(text) @classmethod def getChildren(self, node): return node.getchildren() @classmethod def", "getTag(self, node): return node.tag @classmethod def getText(self, node): txts =", "compliance with the License. You may obtain a copy of", "under one or more contributor license agreements. See the NOTICE", "language governing permissions and limitations under the License. \"\"\" import", "lxml.html as lxmlhtml from lxml.html import soupparser from lxml import", "n.tail: t = self.createElement(tag='text', text=n.tail, tail=None) root.insert(idx + 1, t)", "nodes.append(n) return nodes @classmethod def previousSibling(self, node): nodes = []", "idx = root.index(n) # don't process texts nodes if n.tag", "node.getprevious() if prev is None: if not parent.text: parent.text =", "node): txts = [i for i in node.itertext()] return innerTrim(u'", "def getChildren(self, node): return node.getchildren() @classmethod def getElementsByTags(self, node, tags):", "prev.tail: prev.tail = '' prev.tail += u' ' + node.tail", "if parent is not None: if node.tail: prev = node.getprevious()", "@classmethod def drop_tag(self, nodes): if isinstance(nodes, list): for node in", "we have some text in the node if root.text: t", "tag @classmethod def stripTags(self, node, *tags): etree.strip_tags(node, *tags) @classmethod def", "parent.text += u' ' + node.tail else: if not prev.tail:", "def delAttribute(self, node, attr=None): if attr: _attr = node.attrib.get(attr, None)", "@classmethod def previousSibling(self, node): nodes = [] for c, n", "i in node.itertext()] return innerTrim(u' '.join(txts).strip()) @classmethod def previousSiblings(self, node):", "selection tag if node in elems and (tag or childs):", "and value: node.set(attr, value) @classmethod def outerHtml(self, node): e0 =", "= deepcopy(e0) e0.tail = None return self.nodeToString(e0) class ParserSoup(Parser): @classmethod", "a python port of \"Goose\" orignialy licensed to Gravity.com under", "css_select(self, node, selector): return node.cssselect(selector) @classmethod def fromstring(self, html): html", "and limitations under the License. \"\"\" import lxml.html as lxmlhtml", "elems = node.xpath(selector) if elems: return elems[0] return None @classmethod", "t @classmethod def getComments(self, node): return node.xpath('//comment()') @classmethod def getParent(self,", "in enumerate(node.itersiblings(preceding=True)): nodes.append(n) return nodes @classmethod def previousSibling(self, node): nodes", "t) # loop childs for c, n in enumerate(list(root)): idx", "n.tag == 'text': continue # create a text node for", "regexp_namespace}) return items @classmethod def drop_tag(self, nodes): if isinstance(nodes, list):", "node in elems and (tag or childs): elems.remove(node) return elems", "value=None): if attr and value: node.set(attr, value) @classmethod def outerHtml(self,", "This is a python port of \"Goose\" orignialy licensed to", "def childNodes(self, node): return list(node) @classmethod def childNodesWithText(self, node): root", "@classmethod def getComments(self, node): return node.xpath('//comment()') @classmethod def getParent(self, node):", "delAttribute(self, node, attr=None): if attr: _attr = node.attrib.get(attr, None) if", "is not None: if node.tail: prev = node.getprevious() if prev", "or more contributor license agreements. See the NOTICE file distributed", "root node # if we have a selection tag if", "c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) if c == 0: break", "selector) # remove the root node # if we have", "*tags): etree.strip_tags(node, *tags) @classmethod def getElementById(self, node, idd): selector =", "@classmethod def setAttribute(self, node, attr=None, value=None): if attr and value:", "None return self.nodeToString(e0) class ParserSoup(Parser): @classmethod def fromstring(self, html): html", "distributed with this work for additional information regarding copyright ownership.", "tag t.text = text t.tail = tail return t @classmethod", "deepcopy(e0) e0.tail = None return self.nodeToString(e0) class ParserSoup(Parser): @classmethod def", "tail if n.tail: t = self.createElement(tag='text', text=n.tail, tail=None) root.insert(idx +", "selector = tag or '*' selector = 'descendant-or-self::%s' % (tag", "node.xpath(selector) if elems: return elems[0] return None @classmethod def getElementsByTag(self,", "childs for c, n in enumerate(list(root)): idx = root.index(n) #", "def getAttribute(self, node, attr=None): if attr: return node.attrib.get(attr, None) return", "we have a selection tag if node in elems: elems.remove(node)", "= text t.tail = tail return t @classmethod def getComments(self,", "if n.tag == 'text': continue # create a text node", "node.tag @classmethod def getText(self, node): txts = [i for i", "html): html = encodeValue(html) self.doc = lxmlhtml.fromstring(html) return self.doc @classmethod", "in the node if root.text: t = lxmlhtml.HtmlElement() t.text =", "items @classmethod def drop_tag(self, nodes): if isinstance(nodes, list): for node", "fromstring(self, html): html = encodeValue(html) self.doc = soupparser.fromstring(html) return self.doc", "Unless required by applicable law or agreed to in writing,", "by applicable law or agreed to in writing, software distributed", "selector = ','.join(tags) elems = self.css_select(node, selector) # remove the", "from copy import deepcopy from goose.text import innerTrim from goose.text", "# selector = tag or '*' selector = 'descendant-or-self::%s' %", "def replaceTag(self, node, tag): node.tag = tag @classmethod def stripTags(self,", "node): nodes = [] for c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n)", "= node.xpath(expression, namespaces={'re': regexp_namespace}) return items @classmethod def drop_tag(self, nodes):", "selector = 'descendant-or-self::%s' % (tag or '*') if attr and", "node): return True if node.tag == 'text' else False @classmethod", "not parent.text: parent.text = '' parent.text += u' ' +", "u' ' + node.tail node.clear() parent.remove(node) @classmethod def getTag(self, node):", "items = node.xpath(expression, namespaces={'re': regexp_namespace}) return items @classmethod def drop_tag(self,", "stripTags(self, node, *tags): etree.strip_tags(node, *tags) @classmethod def getElementById(self, node, idd):", "as lxmlhtml from lxml.html import soupparser from lxml import etree", "node in nodes: node.drop_tag() else: nodes.drop_tag() @classmethod def css_select(self, node,", "@classmethod def replaceTag(self, node, tag): node.tag = tag @classmethod def", "node.attrib.get(attr, None) if _attr: del node.attrib[attr] @classmethod def setAttribute(self, node,", "node): return node.xpath('//comment()') @classmethod def getParent(self, node): return node.getparent() @classmethod", "NOTICE file distributed with this work for additional information regarding", "express or implied. See the License for the specific language", "written by <NAME> Gravity.com licenses this file to you under", "value=None, childs=False): NS = \"http://exslt.org/regular-expressions\" # selector = tag or", "nodeToString(self, node): return etree.tostring(node) @classmethod def replaceTag(self, node, tag): node.tag", "= node.getprevious() if prev is None: if not parent.text: parent.text", "else None @classmethod def isTextNode(self, node): return True if node.tag", "node in elems: elems.remove(node) return elems @classmethod def createElement(self, tag='p',", "Gravity.com under one or more contributor license agreements. See the", "node): root = node # create the first text node", "nodes else None @classmethod def nextSibling(self, node): nodes = []", "etree.strip_tags(node, *tags) @classmethod def getElementById(self, node, idd): selector = '//*[@id=\"%s\"]'", "if prev is None: if not parent.text: parent.text = ''", "node.tag == 'text' else False @classmethod def getAttribute(self, node, attr=None):", "is a python port of \"Goose\" orignialy licensed to Gravity.com", "parent.remove(node) @classmethod def getTag(self, node): return node.tag @classmethod def getText(self,", "node.xpath(selector, namespaces={\"re\": NS}) # remove the root node # if", "value) @classmethod def outerHtml(self, node): e0 = node if e0.tail:", "= '%s[re:test(@%s, \"%s\", \"i\")]' % (selector, attr, value) elems =", "'descendant-or-self::%s' % (tag or '*') if attr and value: selector", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "% idd elems = node.xpath(selector) if elems: return elems[0] return", "text=n.tail, tail=None) root.insert(idx + 1, t) return list(root) @classmethod def", "drop_tag(self, nodes): if isinstance(nodes, list): for node in nodes: node.drop_tag()", "= lxmlhtml.fromstring(html) return self.doc @classmethod def nodeToString(self, node): return etree.tostring(node)", "root.text: t = lxmlhtml.HtmlElement() t.text = root.text t.tag = 'text'", "@classmethod def createElement(self, tag='p', text=None, tail=None): t = lxmlhtml.HtmlElement() t.tag", "t = self.createElement(tag='text', text=n.tail, tail=None) root.insert(idx + 1, t) return", "under the License. \"\"\" import lxml.html as lxmlhtml from lxml.html", "node.attrib[attr] @classmethod def setAttribute(self, node, attr=None, value=None): if attr and", "previousSiblings(self, node): nodes = [] for c, n in enumerate(node.itersiblings(preceding=True)):", "tail return t @classmethod def getComments(self, node): return node.xpath('//comment()') @classmethod", "elems and (tag or childs): elems.remove(node) return elems @classmethod def", "+= u' ' + node.tail node.clear() parent.remove(node) @classmethod def getTag(self,", "(tag or childs): elems.remove(node) return elems @classmethod def appendChild(self, node,", "def nextSibling(self, node): nodes = [] for c, n in", "with the License. You may obtain a copy of the", "selection tag if node in elems: elems.remove(node) return elems @classmethod", "None @classmethod def getElementsByTag(self, node, tag=None, attr=None, value=None, childs=False): NS", "e0.tail = None return self.nodeToString(e0) class ParserSoup(Parser): @classmethod def fromstring(self,", "def appendChild(self, node, child): node.append(child) @classmethod def childNodes(self, node): return", "the first text node # if we have some text", "utf-8 -*- \"\"\"\\ This is a python port of \"Goose\"", "= node.xpath(selector) if elems: return elems[0] return None @classmethod def", "tag): node.tag = tag @classmethod def stripTags(self, node, *tags): etree.strip_tags(node,", "return innerTrim(u' '.join(txts).strip()) @classmethod def previousSiblings(self, node): nodes = []", "tag=None, attr=None, value=None, childs=False): NS = \"http://exslt.org/regular-expressions\" # selector =", "@classmethod def childNodes(self, node): return list(node) @classmethod def childNodesWithText(self, node):", "@classmethod def nodeToString(self, node): return etree.tostring(node) @classmethod def replaceTag(self, node,", "@classmethod def getAttribute(self, node, attr=None): if attr: return node.attrib.get(attr, None)", "first text node # if we have some text in", "not None: if node.tail: prev = node.getprevious() if prev is", "return list(root) @classmethod def textToPara(self, text): return self.fromstring(text) @classmethod def", "root = node # create the first text node #", "import deepcopy from goose.text import innerTrim from goose.text import encodeValue", "specific language governing permissions and limitations under the License. \"\"\"", "Gravity.com licenses this file to you under the Apache License,", "xpath_re(self, node, expression): regexp_namespace = \"http://exslt.org/regular-expressions\" items = node.xpath(expression, namespaces={'re':", "lxmlhtml.fromstring(html) return self.doc @classmethod def nodeToString(self, node): return etree.tostring(node) @classmethod", "= node.attrib.get(attr, None) if _attr: del node.attrib[attr] @classmethod def setAttribute(self,", "parent is not None: if node.tail: prev = node.getprevious() if", "[] for c, n in enumerate(node.itersiblings(preceding=False)): nodes.append(n) if c ==", "applicable law or agreed to in writing, software distributed under", "tail=None): t = lxmlhtml.HtmlElement() t.tag = tag t.text = text", "@classmethod def getElementById(self, node, idd): selector = '//*[@id=\"%s\"]' % idd", "return None @classmethod def getElementsByTag(self, node, tag=None, attr=None, value=None, childs=False):", "@classmethod def isTextNode(self, node): return True if node.tag == 'text'", "limitations under the License. \"\"\" import lxml.html as lxmlhtml from", "create a text node for tail if n.tail: t =", "node.getparent() @classmethod def remove(self, node): parent = node.getparent() if parent", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "the specific language governing permissions and limitations under the License.", "\"\"\" import lxml.html as lxmlhtml from lxml.html import soupparser from", "if isinstance(nodes, list): for node in nodes: node.drop_tag() else: nodes.drop_tag()", "licensed to Gravity.com under one or more contributor license agreements.", "\"%s\", \"i\")]' % (selector, attr, value) elems = node.xpath(selector, namespaces={\"re\":", "if node in elems: elems.remove(node) return elems @classmethod def createElement(self,", "None @classmethod def isTextNode(self, node): return True if node.tag ==", "lxmlhtml from lxml.html import soupparser from lxml import etree from", "goose.text import innerTrim from goose.text import encodeValue class Parser(object): @classmethod", "for tail if n.tail: t = self.createElement(tag='text', text=n.tail, tail=None) root.insert(idx", "or agreed to in writing, software distributed under the License", "this file to you under the Apache License, Version 2.0", "not prev.tail: prev.tail = '' prev.tail += u' ' +", "etree from copy import deepcopy from goose.text import innerTrim from", "def getElementsByTag(self, node, tag=None, attr=None, value=None, childs=False): NS = \"http://exslt.org/regular-expressions\"", "\"http://exslt.org/regular-expressions\" items = node.xpath(expression, namespaces={'re': regexp_namespace}) return items @classmethod def", "# if we have some text in the node if", "license agreements. See the NOTICE file distributed with this work", "OF ANY KIND, either express or implied. See the License", "in node.itertext()] return innerTrim(u' '.join(txts).strip()) @classmethod def previousSiblings(self, node): nodes", "def drop_tag(self, nodes): if isinstance(nodes, list): for node in nodes:", "additional information regarding copyright ownership. Python port was written by", "t = lxmlhtml.HtmlElement() t.text = root.text t.tag = 'text' root.text", "node): return list(node) @classmethod def childNodesWithText(self, node): root = node", "def previousSiblings(self, node): nodes = [] for c, n in", "getComments(self, node): return node.xpath('//comment()') @classmethod def getParent(self, node): return node.getparent()", "n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) return nodes @classmethod def previousSibling(self, node):", "self.doc = lxmlhtml.fromstring(html) return self.doc @classmethod def nodeToString(self, node): return", "elems.remove(node) return elems @classmethod def createElement(self, tag='p', text=None, tail=None): t", "License, Version 2.0 (the \"License\"); you may not use this", "idd): selector = '//*[@id=\"%s\"]' % idd elems = node.xpath(selector) if", "@classmethod def getElementsByTags(self, node, tags): selector = ','.join(tags) elems =", "@classmethod def delAttribute(self, node, attr=None): if attr: _attr = node.attrib.get(attr,", "elems[0] return None @classmethod def getElementsByTag(self, node, tag=None, attr=None, value=None,", "else False @classmethod def getAttribute(self, node, attr=None): if attr: return", "and value: selector = '%s[re:test(@%s, \"%s\", \"i\")]' % (selector, attr,", "remove(self, node): parent = node.getparent() if parent is not None:", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "txts = [i for i in node.itertext()] return innerTrim(u' '.join(txts).strip())", "attr=None, value=None): if attr and value: node.set(attr, value) @classmethod def", "self.createElement(tag='text', text=n.tail, tail=None) root.insert(idx + 1, t) return list(root) @classmethod", "c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) return nodes @classmethod def previousSibling(self,", "node): return node.getparent() @classmethod def remove(self, node): parent = node.getparent()", "break return nodes[0] if nodes else None @classmethod def isTextNode(self,", "elems.remove(node) return elems @classmethod def appendChild(self, node, child): node.append(child) @classmethod", "return self.doc @classmethod def nodeToString(self, node): return etree.tostring(node) @classmethod def", "License. You may obtain a copy of the License at", "attr=None, value=None, childs=False): NS = \"http://exslt.org/regular-expressions\" # selector = tag", "some text in the node if root.text: t = lxmlhtml.HtmlElement()", "None: if not parent.text: parent.text = '' parent.text += u'", "= [] for c, n in enumerate(node.itersiblings(preceding=True)): nodes.append(n) if c", "_attr: del node.attrib[attr] @classmethod def setAttribute(self, node, attr=None, value=None): if", "lxmlhtml.HtmlElement() t.text = root.text t.tag = 'text' root.text = None", "child): node.append(child) @classmethod def childNodes(self, node): return list(node) @classmethod def", "or '*' selector = 'descendant-or-self::%s' % (tag or '*') if", "root.insert(0, t) # loop childs for c, n in enumerate(list(root)):", "# loop childs for c, n in enumerate(list(root)): idx =", "or childs): elems.remove(node) return elems @classmethod def appendChild(self, node, child):", "ownership. Python port was written by <NAME> Gravity.com licenses this", "node.clear() parent.remove(node) @classmethod def getTag(self, node): return node.tag @classmethod def", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "root.text = None root.insert(0, t) # loop childs for c,", "nodes): if isinstance(nodes, list): for node in nodes: node.drop_tag() else:", "= node.getparent() if parent is not None: if node.tail: prev", "def getElementById(self, node, idd): selector = '//*[@id=\"%s\"]' % idd elems", "elems @classmethod def createElement(self, tag='p', text=None, tail=None): t = lxmlhtml.HtmlElement()", "[i for i in node.itertext()] return innerTrim(u' '.join(txts).strip()) @classmethod def" ]
[ "= cursor def fetch_all(self): return self.cursor def fetch_one(self): if len(self.cursor)", "getLogger import os from typing import List, Union import psycopg2", "\"doj\") self.user = os.getenv(\"DAIZU_DATABASE_USERNAME\", \"daizu\") self.password = os.getenv(\"DAIZU_DATABASE_PASSWORD\", \"<PASSWORD>\") try:", "try: self.connection = psycopg2.connect( host=self.host, dbname=self.dbname, user=self.user, password=self.password, ) except", "self.connection.cursor() def execute(self, query: str, *args) -> Result: try: with", "0: return [] return self.cursor[0] class SqlHandler(AbsSqlHandler): def __init__(self): #", "self.last_insertid = rowid def lastrowid(self) -> int: return self.last_insertid class", "cursor.execute(query, *args) data = cursor.fetchall() except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e)", "str, *args) -> Result: try: with self.connection.cursor() as cursor: cursor.execute(query,", "self.cursor = cursor def fetch_all(self): return self.cursor def fetch_one(self): if", "self.password = os.getenv(\"DAIZU_DATABASE_PASSWORD\", \"<PASSWORD>\") try: self.connection = psycopg2.connect( host=self.host, dbname=self.dbname,", "*args) -> Result: try: with self.connection.cursor() as cursor: cursor.execute(query, args)", "int: return self.last_insertid class Cursor(AbsCursor): def __init__(self, cursor): self.cursor =", "interface.database.sqlhandler import Result as AbsResult from interface.database.sqlhandler import SqlHandler as", "__init__(self, rowid: int): self.last_insertid = rowid def lastrowid(self) -> int:", "rowid def lastrowid(self) -> int: return self.last_insertid class Cursor(AbsCursor): def", "int): self.last_insertid = rowid def lastrowid(self) -> int: return self.last_insertid", "from interface.database.sqlhandler import Result as AbsResult from interface.database.sqlhandler import SqlHandler", "= os.getenv(\"DAIZU_DATABASE_USERNAME\", \"daizu\") self.password = os.getenv(\"DAIZU_DATABASE_PASSWORD\", \"<PASSWORD>\") try: self.connection =", "self.cursor[0] class SqlHandler(AbsSqlHandler): def __init__(self): # 環境から取るようにする self.host = os.getenv(\"DAIZU_DATABASE_HOST\",", "self.dbname = os.getenv(\"DAIZU_DATABASE_NAME\", \"doj\") self.user = os.getenv(\"DAIZU_DATABASE_USERNAME\", \"daizu\") self.password =", "Result(AbsResult): def __init__(self, rowid: int): self.last_insertid = rowid def lastrowid(self)", "self.last_insertid class Cursor(AbsCursor): def __init__(self, cursor): self.cursor = cursor def", "Cursor as AbsCursor from interface.database.sqlhandler import Result as AbsResult from", "user=self.user, password=self.password, ) except psycopg2.OperationalError as err: raise err #", "self.host = os.getenv(\"DAIZU_DATABASE_HOST\", \"localhost\") self.dbname = os.getenv(\"DAIZU_DATABASE_NAME\", \"doj\") self.user =", "self.connection.rollback() raise SqlTransactionException() return lastrowid def query(self, query: str, *args)", "import Cursor as AbsCursor from interface.database.sqlhandler import Result as AbsResult", "def execute(self, query: str, *args) -> Result: try: with self.connection.cursor()", "= cursor.lastrowid self.connection.commit() except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback() raise", "self.connection.cursor() as cursor: cursor.execute(query, args) lastrowid = cursor.lastrowid self.connection.commit() except", "import List, Union import psycopg2 from interface.database.sqlhandler import Cursor as", "cursor: cursor.execute(query, *args) data = cursor.fetchall() except psycopg2.errors.InFailedSqlTransaction as e:", "as err: raise err # self.cursor = self.connection.cursor() def execute(self,", "cursor.execute(query, args) lastrowid = cursor.lastrowid self.connection.commit() except psycopg2.errors.InFailedSqlTransaction as e:", "Cursor(AbsCursor): def __init__(self, cursor): self.cursor = cursor def fetch_all(self): return", "return [] return self.cursor[0] class SqlHandler(AbsSqlHandler): def __init__(self): # 環境から取るようにする", "except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback() raise SqlTransactionException() return lastrowid", "-> int: return self.last_insertid class Cursor(AbsCursor): def __init__(self, cursor): self.cursor", "= getLogger(\"daizu\").getChild(\"infrastracture.SqlHandler\") class Result(AbsResult): def __init__(self, rowid: int): self.last_insertid =", "query: str, *args) -> Cursor: try: with self.connection.cursor() as cursor:", "as AbsSqlHandler from exceptions.waf import SqlTransactionException logger = getLogger(\"daizu\").getChild(\"infrastracture.SqlHandler\") class", "Result as AbsResult from interface.database.sqlhandler import SqlHandler as AbsSqlHandler from", "__init__(self): # 環境から取るようにする self.host = os.getenv(\"DAIZU_DATABASE_HOST\", \"localhost\") self.dbname = os.getenv(\"DAIZU_DATABASE_NAME\",", "def fetch_all(self): return self.cursor def fetch_one(self): if len(self.cursor) == 0:", "SqlTransactionException() return lastrowid def query(self, query: str, *args) -> Cursor:", "execute(self, query: str, *args) -> Result: try: with self.connection.cursor() as", "import SqlTransactionException logger = getLogger(\"daizu\").getChild(\"infrastracture.SqlHandler\") class Result(AbsResult): def __init__(self, rowid:", "lastrowid def query(self, query: str, *args) -> Cursor: try: with", "fetch_all(self): return self.cursor def fetch_one(self): if len(self.cursor) == 0: return", "self.user = os.getenv(\"DAIZU_DATABASE_USERNAME\", \"daizu\") self.password = os.getenv(\"DAIZU_DATABASE_PASSWORD\", \"<PASSWORD>\") try: self.connection", "SqlHandler as AbsSqlHandler from exceptions.waf import SqlTransactionException logger = getLogger(\"daizu\").getChild(\"infrastracture.SqlHandler\")", "class Result(AbsResult): def __init__(self, rowid: int): self.last_insertid = rowid def", "def __init__(self): # 環境から取るようにする self.host = os.getenv(\"DAIZU_DATABASE_HOST\", \"localhost\") self.dbname =", "as AbsResult from interface.database.sqlhandler import SqlHandler as AbsSqlHandler from exceptions.waf", "if len(self.cursor) == 0: return [] return self.cursor[0] class SqlHandler(AbsSqlHandler):", "try: with self.connection.cursor() as cursor: cursor.execute(query, *args) data = cursor.fetchall()", "List, Union import psycopg2 from interface.database.sqlhandler import Cursor as AbsCursor", "return self.cursor def fetch_one(self): if len(self.cursor) == 0: return []", "data = cursor.fetchall() except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback() raise", "with self.connection.cursor() as cursor: cursor.execute(query, args) lastrowid = cursor.lastrowid self.connection.commit()", "import getLogger import os from typing import List, Union import", "from interface.database.sqlhandler import Cursor as AbsCursor from interface.database.sqlhandler import Result", "os.getenv(\"DAIZU_DATABASE_PASSWORD\", \"<PASSWORD>\") try: self.connection = psycopg2.connect( host=self.host, dbname=self.dbname, user=self.user, password=self.password,", "import psycopg2 from interface.database.sqlhandler import Cursor as AbsCursor from interface.database.sqlhandler", "def __init__(self, cursor): self.cursor = cursor def fetch_all(self): return self.cursor", "as cursor: cursor.execute(query, *args) data = cursor.fetchall() except psycopg2.errors.InFailedSqlTransaction as", "psycopg2.OperationalError as err: raise err # self.cursor = self.connection.cursor() def", "logging import getLogger import os from typing import List, Union", "AbsCursor from interface.database.sqlhandler import Result as AbsResult from interface.database.sqlhandler import", "fetch_one(self): if len(self.cursor) == 0: return [] return self.cursor[0] class", "raise SqlTransactionException() return lastrowid def query(self, query: str, *args) ->", "SqlTransactionException logger = getLogger(\"daizu\").getChild(\"infrastracture.SqlHandler\") class Result(AbsResult): def __init__(self, rowid: int):", "os.getenv(\"DAIZU_DATABASE_HOST\", \"localhost\") self.dbname = os.getenv(\"DAIZU_DATABASE_NAME\", \"doj\") self.user = os.getenv(\"DAIZU_DATABASE_USERNAME\", \"daizu\")", "\"<PASSWORD>\") try: self.connection = psycopg2.connect( host=self.host, dbname=self.dbname, user=self.user, password=self.password, )", "exceptions.waf import SqlTransactionException logger = getLogger(\"daizu\").getChild(\"infrastracture.SqlHandler\") class Result(AbsResult): def __init__(self,", "環境から取るようにする self.host = os.getenv(\"DAIZU_DATABASE_HOST\", \"localhost\") self.dbname = os.getenv(\"DAIZU_DATABASE_NAME\", \"doj\") self.user", "interface.database.sqlhandler import SqlHandler as AbsSqlHandler from exceptions.waf import SqlTransactionException logger", "SqlHandler(AbsSqlHandler): def __init__(self): # 環境から取るようにする self.host = os.getenv(\"DAIZU_DATABASE_HOST\", \"localhost\") self.dbname", "= psycopg2.connect( host=self.host, dbname=self.dbname, user=self.user, password=self.password, ) except psycopg2.OperationalError as", "class Cursor(AbsCursor): def __init__(self, cursor): self.cursor = cursor def fetch_all(self):", "query: str, *args) -> Result: try: with self.connection.cursor() as cursor:", "try: with self.connection.cursor() as cursor: cursor.execute(query, args) lastrowid = cursor.lastrowid", "host=self.host, dbname=self.dbname, user=self.user, password=self.password, ) except psycopg2.OperationalError as err: raise", "logger.error(e) self.connection.rollback() raise SqlTransactionException() return lastrowid def query(self, query: str,", ") except psycopg2.OperationalError as err: raise err # self.cursor =", "# self.cursor = self.connection.cursor() def execute(self, query: str, *args) ->", "os.getenv(\"DAIZU_DATABASE_NAME\", \"doj\") self.user = os.getenv(\"DAIZU_DATABASE_USERNAME\", \"daizu\") self.password = os.getenv(\"DAIZU_DATABASE_PASSWORD\", \"<PASSWORD>\")", "AbsSqlHandler from exceptions.waf import SqlTransactionException logger = getLogger(\"daizu\").getChild(\"infrastracture.SqlHandler\") class Result(AbsResult):", "from logging import getLogger import os from typing import List,", "*args) -> Cursor: try: with self.connection.cursor() as cursor: cursor.execute(query, *args)", "class SqlHandler(AbsSqlHandler): def __init__(self): # 環境から取るようにする self.host = os.getenv(\"DAIZU_DATABASE_HOST\", \"localhost\")", "== 0: return [] return self.cursor[0] class SqlHandler(AbsSqlHandler): def __init__(self):", "as AbsCursor from interface.database.sqlhandler import Result as AbsResult from interface.database.sqlhandler", "\"daizu\") self.password = os.getenv(\"DAIZU_DATABASE_PASSWORD\", \"<PASSWORD>\") try: self.connection = psycopg2.connect( host=self.host,", "return lastrowid def query(self, query: str, *args) -> Cursor: try:", "from interface.database.sqlhandler import SqlHandler as AbsSqlHandler from exceptions.waf import SqlTransactionException", "import SqlHandler as AbsSqlHandler from exceptions.waf import SqlTransactionException logger =", "raise err # self.cursor = self.connection.cursor() def execute(self, query: str,", "= os.getenv(\"DAIZU_DATABASE_PASSWORD\", \"<PASSWORD>\") try: self.connection = psycopg2.connect( host=self.host, dbname=self.dbname, user=self.user,", "= cursor.fetchall() except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback() raise SqlTransactionException()", "__init__(self, cursor): self.cursor = cursor def fetch_all(self): return self.cursor def", "args) lastrowid = cursor.lastrowid self.connection.commit() except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e)", "str, *args) -> Cursor: try: with self.connection.cursor() as cursor: cursor.execute(query,", "from exceptions.waf import SqlTransactionException logger = getLogger(\"daizu\").getChild(\"infrastracture.SqlHandler\") class Result(AbsResult): def", "with self.connection.cursor() as cursor: cursor.execute(query, *args) data = cursor.fetchall() except", "self.cursor def fetch_one(self): if len(self.cursor) == 0: return [] return", "self.cursor = self.connection.cursor() def execute(self, query: str, *args) -> Result:", "psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback() raise SqlTransactionException() return lastrowid def", "typing import List, Union import psycopg2 from interface.database.sqlhandler import Cursor", "import Result as AbsResult from interface.database.sqlhandler import SqlHandler as AbsSqlHandler", "-> Cursor: try: with self.connection.cursor() as cursor: cursor.execute(query, *args) data", "<reponame>SoyBeansLab/daizu-online-judge-backend<gh_stars>1-10 from logging import getLogger import os from typing import", "e: logger.error(e) self.connection.rollback() raise SqlTransactionException() return lastrowid def query(self, query:", "getLogger(\"daizu\").getChild(\"infrastracture.SqlHandler\") class Result(AbsResult): def __init__(self, rowid: int): self.last_insertid = rowid", "from typing import List, Union import psycopg2 from interface.database.sqlhandler import", "def fetch_one(self): if len(self.cursor) == 0: return [] return self.cursor[0]", "err: raise err # self.cursor = self.connection.cursor() def execute(self, query:", "self.connection = psycopg2.connect( host=self.host, dbname=self.dbname, user=self.user, password=self.password, ) except psycopg2.OperationalError", "query(self, query: str, *args) -> Cursor: try: with self.connection.cursor() as", "def lastrowid(self) -> int: return self.last_insertid class Cursor(AbsCursor): def __init__(self,", "cursor: cursor.execute(query, args) lastrowid = cursor.lastrowid self.connection.commit() except psycopg2.errors.InFailedSqlTransaction as", "os.getenv(\"DAIZU_DATABASE_USERNAME\", \"daizu\") self.password = os.getenv(\"DAIZU_DATABASE_PASSWORD\", \"<PASSWORD>\") try: self.connection = psycopg2.connect(", "self.connection.cursor() as cursor: cursor.execute(query, *args) data = cursor.fetchall() except psycopg2.errors.InFailedSqlTransaction", "Cursor: try: with self.connection.cursor() as cursor: cursor.execute(query, *args) data =", "interface.database.sqlhandler import Cursor as AbsCursor from interface.database.sqlhandler import Result as", "len(self.cursor) == 0: return [] return self.cursor[0] class SqlHandler(AbsSqlHandler): def", "psycopg2 from interface.database.sqlhandler import Cursor as AbsCursor from interface.database.sqlhandler import", "def query(self, query: str, *args) -> Cursor: try: with self.connection.cursor()", "= os.getenv(\"DAIZU_DATABASE_NAME\", \"doj\") self.user = os.getenv(\"DAIZU_DATABASE_USERNAME\", \"daizu\") self.password = os.getenv(\"DAIZU_DATABASE_PASSWORD\",", "def __init__(self, rowid: int): self.last_insertid = rowid def lastrowid(self) ->", "lastrowid = cursor.lastrowid self.connection.commit() except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback()", "lastrowid(self) -> int: return self.last_insertid class Cursor(AbsCursor): def __init__(self, cursor):", "Result: try: with self.connection.cursor() as cursor: cursor.execute(query, args) lastrowid =", "[] return self.cursor[0] class SqlHandler(AbsSqlHandler): def __init__(self): # 環境から取るようにする self.host", "cursor def fetch_all(self): return self.cursor def fetch_one(self): if len(self.cursor) ==", "cursor.lastrowid self.connection.commit() except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback() raise SqlTransactionException()", "= os.getenv(\"DAIZU_DATABASE_HOST\", \"localhost\") self.dbname = os.getenv(\"DAIZU_DATABASE_NAME\", \"doj\") self.user = os.getenv(\"DAIZU_DATABASE_USERNAME\",", "# 環境から取るようにする self.host = os.getenv(\"DAIZU_DATABASE_HOST\", \"localhost\") self.dbname = os.getenv(\"DAIZU_DATABASE_NAME\", \"doj\")", "self.connection.commit() except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback() raise SqlTransactionException() return", "logger = getLogger(\"daizu\").getChild(\"infrastracture.SqlHandler\") class Result(AbsResult): def __init__(self, rowid: int): self.last_insertid", "except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback() raise SqlTransactionException() return Cursor(data)", "import os from typing import List, Union import psycopg2 from", "*args) data = cursor.fetchall() except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback()", "os from typing import List, Union import psycopg2 from interface.database.sqlhandler", "return self.cursor[0] class SqlHandler(AbsSqlHandler): def __init__(self): # 環境から取るようにする self.host =", "AbsResult from interface.database.sqlhandler import SqlHandler as AbsSqlHandler from exceptions.waf import", "as e: logger.error(e) self.connection.rollback() raise SqlTransactionException() return lastrowid def query(self,", "-> Result: try: with self.connection.cursor() as cursor: cursor.execute(query, args) lastrowid", "\"localhost\") self.dbname = os.getenv(\"DAIZU_DATABASE_NAME\", \"doj\") self.user = os.getenv(\"DAIZU_DATABASE_USERNAME\", \"daizu\") self.password", "cursor): self.cursor = cursor def fetch_all(self): return self.cursor def fetch_one(self):", "Union import psycopg2 from interface.database.sqlhandler import Cursor as AbsCursor from", "dbname=self.dbname, user=self.user, password=self.password, ) except psycopg2.OperationalError as err: raise err", "= rowid def lastrowid(self) -> int: return self.last_insertid class Cursor(AbsCursor):", "cursor.fetchall() except psycopg2.errors.InFailedSqlTransaction as e: logger.error(e) self.connection.rollback() raise SqlTransactionException() return", "return self.last_insertid class Cursor(AbsCursor): def __init__(self, cursor): self.cursor = cursor", "psycopg2.connect( host=self.host, dbname=self.dbname, user=self.user, password=self.password, ) except psycopg2.OperationalError as err:", "except psycopg2.OperationalError as err: raise err # self.cursor = self.connection.cursor()", "= self.connection.cursor() def execute(self, query: str, *args) -> Result: try:", "err # self.cursor = self.connection.cursor() def execute(self, query: str, *args)", "password=self.password, ) except psycopg2.OperationalError as err: raise err # self.cursor", "as cursor: cursor.execute(query, args) lastrowid = cursor.lastrowid self.connection.commit() except psycopg2.errors.InFailedSqlTransaction", "rowid: int): self.last_insertid = rowid def lastrowid(self) -> int: return" ]
[ "XMLParser(AbstractParser): \"\"\" Maps a list of values read by a", "= -1 if self.wrapper.getSensorDescription().isTimestampedStream(): try: self.timestampcell = -1 self.timestampformat =", "a given naming list \"\"\" def __init__(self, wrapper): super(XMLParser, self).__init__(wrapper)", "-1 self.timestampformat = self.wrapper.getSensorDescription().timestamp.format except ValueError: self.timestampcell = -1 def", "CSVReader with a given naming list \"\"\" def __init__(self, wrapper):", "ValueError: self.timestampcell = -1 def parse(self, data, clock): raise Exception(\"not", "given naming list \"\"\" def __init__(self, wrapper): super(XMLParser, self).__init__(wrapper) self.timestampcell", "__init__(self, wrapper): super(XMLParser, self).__init__(wrapper) self.timestampcell = -1 if self.wrapper.getSensorDescription().isTimestampedStream(): try:", "as dt class XMLParser(AbstractParser): \"\"\" Maps a list of values", "self.timestampcell = -1 def parse(self, data, clock): raise Exception(\"not implemented", "wrapper): super(XMLParser, self).__init__(wrapper) self.timestampcell = -1 if self.wrapper.getSensorDescription().isTimestampedStream(): try: self.timestampcell", "read by a CSVReader with a given naming list \"\"\"", "'<NAME> (<EMAIL>)' from virtualisation.wrapper.parser.abstractparser import AbstractParser from virtualisation.misc.jsonobject import JSONObject", "clock): raise Exception(\"not implemented yet!\") if not data: # nothing", "yet!\") if not data: # nothing received or nothing in", "= -1 self.timestampformat = self.wrapper.getSensorDescription().timestamp.format except ValueError: self.timestampcell = -1", "implemented yet!\") if not data: # nothing received or nothing", "nothing in the history -> nothing to parse return None", "if not data: # nothing received or nothing in the", "Exception(\"not implemented yet!\") if not data: # nothing received or", "with a given naming list \"\"\" def __init__(self, wrapper): super(XMLParser,", "import datetime as dt class XMLParser(AbstractParser): \"\"\" Maps a list", "JOb import datetime as dt class XMLParser(AbstractParser): \"\"\" Maps a", "virtualisation.wrapper.parser.abstractparser import AbstractParser from virtualisation.misc.jsonobject import JSONObject as JOb import", "list of values read by a CSVReader with a given", "values read by a CSVReader with a given naming list", "self.timestampformat = self.wrapper.getSensorDescription().timestamp.format except ValueError: self.timestampcell = -1 def parse(self,", "AbstractClock __author__ = '<NAME> (<EMAIL>)' from virtualisation.wrapper.parser.abstractparser import AbstractParser from", "not data: # nothing received or nothing in the history", "by a CSVReader with a given naming list \"\"\" def", "a list of values read by a CSVReader with a", "= '<NAME> (<EMAIL>)' from virtualisation.wrapper.parser.abstractparser import AbstractParser from virtualisation.misc.jsonobject import", "class XMLParser(AbstractParser): \"\"\" Maps a list of values read by", "self.wrapper.getSensorDescription().isTimestampedStream(): try: self.timestampcell = -1 self.timestampformat = self.wrapper.getSensorDescription().timestamp.format except ValueError:", "parse(self, data, clock): raise Exception(\"not implemented yet!\") if not data:", "from virtualisation.misc.jsonobject import JSONObject as JOb import datetime as dt", "virtualisation.clock.abstractclock import AbstractClock __author__ = '<NAME> (<EMAIL>)' from virtualisation.wrapper.parser.abstractparser import", "= self.wrapper.getSensorDescription().timestamp.format except ValueError: self.timestampcell = -1 def parse(self, data,", "\"\"\" def __init__(self, wrapper): super(XMLParser, self).__init__(wrapper) self.timestampcell = -1 if", "self).__init__(wrapper) self.timestampcell = -1 if self.wrapper.getSensorDescription().isTimestampedStream(): try: self.timestampcell = -1", "self.wrapper.getSensorDescription().timestamp.format except ValueError: self.timestampcell = -1 def parse(self, data, clock):", "self.timestampcell = -1 self.timestampformat = self.wrapper.getSensorDescription().timestamp.format except ValueError: self.timestampcell =", "Maps a list of values read by a CSVReader with", "def __init__(self, wrapper): super(XMLParser, self).__init__(wrapper) self.timestampcell = -1 if self.wrapper.getSensorDescription().isTimestampedStream():", "# nothing received or nothing in the history -> nothing", "(<EMAIL>)' from virtualisation.wrapper.parser.abstractparser import AbstractParser from virtualisation.misc.jsonobject import JSONObject as", "import AbstractParser from virtualisation.misc.jsonobject import JSONObject as JOb import datetime", "-1 if self.wrapper.getSensorDescription().isTimestampedStream(): try: self.timestampcell = -1 self.timestampformat = self.wrapper.getSensorDescription().timestamp.format", "dt class XMLParser(AbstractParser): \"\"\" Maps a list of values read", "raise Exception(\"not implemented yet!\") if not data: # nothing received", "as JOb import datetime as dt class XMLParser(AbstractParser): \"\"\" Maps", "-1 def parse(self, data, clock): raise Exception(\"not implemented yet!\") if", "self.timestampcell = -1 if self.wrapper.getSensorDescription().isTimestampedStream(): try: self.timestampcell = -1 self.timestampformat", "or nothing in the history -> nothing to parse return", "__author__ = '<NAME> (<EMAIL>)' from virtualisation.wrapper.parser.abstractparser import AbstractParser from virtualisation.misc.jsonobject", "of values read by a CSVReader with a given naming", "naming list \"\"\" def __init__(self, wrapper): super(XMLParser, self).__init__(wrapper) self.timestampcell =", "JSONObject as JOb import datetime as dt class XMLParser(AbstractParser): \"\"\"", "list \"\"\" def __init__(self, wrapper): super(XMLParser, self).__init__(wrapper) self.timestampcell = -1", "super(XMLParser, self).__init__(wrapper) self.timestampcell = -1 if self.wrapper.getSensorDescription().isTimestampedStream(): try: self.timestampcell =", "virtualisation.misc.jsonobject import JSONObject as JOb import datetime as dt class", "data, clock): raise Exception(\"not implemented yet!\") if not data: #", "def parse(self, data, clock): raise Exception(\"not implemented yet!\") if not", "received or nothing in the history -> nothing to parse", "a CSVReader with a given naming list \"\"\" def __init__(self,", "nothing received or nothing in the history -> nothing to", "try: self.timestampcell = -1 self.timestampformat = self.wrapper.getSensorDescription().timestamp.format except ValueError: self.timestampcell", "<reponame>CityPulse/CP_Resourcemanagement from virtualisation.clock.abstractclock import AbstractClock __author__ = '<NAME> (<EMAIL>)' from", "AbstractParser from virtualisation.misc.jsonobject import JSONObject as JOb import datetime as", "\"\"\" Maps a list of values read by a CSVReader", "from virtualisation.wrapper.parser.abstractparser import AbstractParser from virtualisation.misc.jsonobject import JSONObject as JOb", "if self.wrapper.getSensorDescription().isTimestampedStream(): try: self.timestampcell = -1 self.timestampformat = self.wrapper.getSensorDescription().timestamp.format except", "data: # nothing received or nothing in the history ->", "except ValueError: self.timestampcell = -1 def parse(self, data, clock): raise", "= -1 def parse(self, data, clock): raise Exception(\"not implemented yet!\")", "from virtualisation.clock.abstractclock import AbstractClock __author__ = '<NAME> (<EMAIL>)' from virtualisation.wrapper.parser.abstractparser", "import AbstractClock __author__ = '<NAME> (<EMAIL>)' from virtualisation.wrapper.parser.abstractparser import AbstractParser", "import JSONObject as JOb import datetime as dt class XMLParser(AbstractParser):", "datetime as dt class XMLParser(AbstractParser): \"\"\" Maps a list of" ]
[ "event values. Returns: str: formatted message. \"\"\" try: message_string =", "input_attribute (Optional[str]): name of the attribute that contains the flags", "be defined as: # {name}, {name:format}, {name!conversion}, {name!conversion:format} _FORMAT_STRING_ATTRIBUTE_NAME_RE =", "per format string piece. E.g. [\"Description: {description}\"] would be mapped", "boolean input value is True. \"\"\" def __init__( self, input_attribute=None,", "output_attribute=None, value_if_false=None, value_if_true=None): \"\"\"Initialized a helper for formatting boolean event", "values. \"\"\" class EnumerationEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting enumeration event data.", "\"\"\" class BooleanEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting boolean event data. Attributes:", "output_attribute (Optional[str]): name of the attribute where the flags output", "event formatter helper. Args: helper (EventFormatterHelper): event formatter helper to", "value is True. \"\"\" super(BooleanEventFormatterHelper, self).__init__() self.input_attribute = input_attribute self.output_attribute", "<gh_stars>1000+ # -*- coding: utf-8 -*- \"\"\"This file contains the", "the format string pieces. attribute_name = '' else: attribute_name =", "return self._data_type.lower() def _FormatMessage(self, format_string, event_values): \"\"\"Determines the formatted message.", "of the format strings pieces is similar to of the", "def AddHelper(self, helper): \"\"\"Adds an event formatter helper. Args: helper", "to the description_long and description_short field. \"\"\" import abc import", "default value is None and there is no corresponding enumeration", "= attribute_names[0] format_string_pieces_map.append(attribute_name) def _CreateFormatStringMaps(self): \"\"\"Creates the format string maps.", "is not None: default_value = self.default if default_value is None:", "format string piece is encountered. \"\"\" self._format_string_pieces_map = [] self._CreateFormatStringMap(", "= data_type self._format_string_attribute_names = None self.custom_helpers = [] self.helpers =", "\"\"\" def __init__( self, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a helper", "event_values (dict[str, object]): event values. \"\"\" class EnumerationEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for", "message_string = format_string.format(**event_values) except KeyError as exception: data_type = event_values.get('data_type',", "by the formatter. format_string_pieces (Optional[list[str]]): (long) message format string pieces.", "not None: default_value = self.default if default_value is None: default_value", "super(FlagsEventFormatterHelper, self).__init__() self.input_attribute = input_attribute self.output_attribute = output_attribute self.values =", "values. Returns: str: message. \"\"\" @abc.abstractmethod def GetMessageShort(self, event_values): \"\"\"Determines", "__init__( self, data_type='basic', format_string=None, format_string_short=None): \"\"\"Initializes a basic event formatter.", "Returns: str: conditional formatted message. Raises: RuntimeError: when an invalid", "than re.sub() or # string.strip(). return message_string.replace('\\r', '').replace('\\n', '') def", "boolean input value. output_attribute (str): name of the attribute where", "Raises: RuntimeError: when an invalid format string piece is encountered.", "not attribute_name or event_values.get( attribute_name, None) is not None: string_pieces.append(format_string_pieces[map_index])", "string piece is encountered. \"\"\" for format_string_piece in format_string_pieces: attribute_names", "_CreateFormatStringMaps(self): \"\"\"Creates the format string maps. Maps are built of", "enumeration output value should be stored. values (dict[str, str]): mapping", "helpers. Args: event_values (dict[str, object]): event values. \"\"\" for helper", "the short message. Args: event_values (dict[str, object]): event values. Returns:", "the conditional formatted message. Args: format_string_pieces (dict[str, str]): format string", "string piece is encountered. \"\"\" string_pieces = [] for map_index,", "name per format string piece. E.g. [\"Description: {description}\"] would be", "input_value is None: return output_values = [] for flag, mapped_value", "event formatter. The syntax of the format strings pieces is", "format string piece: [{0:s}] contains more than 1 ' 'attribute", "message. Raises: RuntimeError: when an invalid format string piece is", "format_string_short_pieces or [] self._format_string_short_pieces_map = [] def _CreateFormatStringMap( self, format_string_pieces,", "event formatter. The syntax of the format strings is similar", "format_string_short_pieces=None): \"\"\"Initializes a conditional event formatter. The syntax of the", "format string piece. E.g. [\"Description: {description}\"] would be mapped to:", "pieces.\"\"\" _DEFAULT_FORMAT_STRING_SEPARATOR = ' ' def __init__( self, data_type='conditional', format_string_pieces=None,", "of the attribute where the output value should be stored.", "event values. Attributes: custom_helpers (list[str]): identifiers of custom event formatter", "value should be stored. values (Optional[dict[str, str]]): mapping of enumeration", "an attribute name it is treated as text that does", "import abc import re from plaso.formatters import logger class EventFormatterHelper(object):", "output_values = [] for flag, mapped_value in self.values.items(): if flag", "more than 1 ' 'attribute name.').format(format_string_piece)) if not attribute_names: #", "with a format string definition, is used to convert the", "output_value = self.value_if_false event_values[self.output_attribute] = output_value class CustomEventFormatterHelper(EventFormatterHelper): \"\"\"Base class", "message string if necessary. if len(short_message_string) > 80: short_message_string =", "self._CreateFormatStringMap( self._format_string_short_pieces, self._format_string_short_pieces_map) def _ConditionalFormatMessage( self, format_string_pieces, format_string_pieces_map, event_values): \"\"\"Determines", "format_string_pieces_map, event_values) # Truncate the short message string if necessary.", "using a message format string. Attributes: custom_helpers (list[str]): identifiers of", "convert the event object values into a formatted string that", "self._format_string_attribute_names = ( self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( self._format_string)) return set(self._format_string_attribute_names) def GetMessage(self, event_values):", "(dict[str, object]): event values. Returns: str: short message. \"\"\" class", "message_string.replace('\\r', '').replace('\\n', '') def FormatEventValues(self, event_values): \"\"\"Formats event values using", "object]): event values. Returns: str: message. \"\"\" @abc.abstractmethod def GetMessageShort(self,", "string pieces. \"\"\" if format_string_separator is None: format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR", "formatted message. Args: format_string (str): message format string. event_values (dict[str,", "{description}\"] would be mapped to: [0] = \"description\". If the", "for helper in self.helpers: helper.FormatEventValues(event_values) @abc.abstractmethod def GetFormatStringAttributeNames(self): \"\"\"Retrieves the", "GetMessageShort(self, event_values): \"\"\"Determines the short message. Args: event_values (dict[str, object]):", "the description_long and description_short field. \"\"\" import abc import re", "(str): name of the attribute where the flags output value", "string pieces and their corresponding attribute name to optimize conditional", "output_attribute=None, values=None): \"\"\"Initialized a helper for formatting flags event data.", "re.sub() or # string.strip(). return message_string.replace('\\r', '').replace('\\n', '') def FormatEventValues(self,", "def GetMessage(self, event_values): \"\"\"Determines the message. Args: event_values (dict[str, object]):", "' '.join(attribute_values) except UnicodeDecodeError as exception: data_type = event_values.get('data_type', 'N/A')", "input and output values. \"\"\" super(EnumerationEventFormatterHelper, self).__init__() self.default = default", "set(str): attribute names. \"\"\" if self._format_string_attribute_names is None: self._format_string_attribute_names =", "for the event data supported by the formatter.\"\"\" return self._data_type.lower()", "similar to the description_long and description_short field. \"\"\" import abc", "value then the original value is used. Args: event_values (dict[str,", "= None self._format_string = format_string self._format_string_short = format_string_short def GetFormatStringAttributeNames(self):", "'' else: attribute_name = attribute_names[0] format_string_pieces_map.append(attribute_name) def _CreateFormatStringMaps(self): \"\"\"Creates the", "= None self.custom_helpers = [] self.helpers = [] @property def", "None) is not None: string_pieces.append(format_string_pieces[map_index]) format_string = self._format_string_separator.join(string_pieces) return self._FormatMessage(format_string,", "attribute names. \"\"\" if self._format_string_attribute_names is None: self._format_string_attribute_names = (", "the event data supported by the formatter.\"\"\" return self._data_type.lower() def", "at maximum one unique attribute name. Format string pieces without", "attribute_name = attribute_names[0] format_string_pieces_map.append(attribute_name) def _CreateFormatStringMaps(self): \"\"\"Creates the format string", "(Optional[str]): default value. input_attribute (Optional[str]): name of the attribute that", "(str): default value. input_attribute (str): name of the attribute that", "the formatter.\"\"\" return self._data_type.lower() def _FormatMessage(self, format_string, event_values): \"\"\"Determines the", "self._format_string_pieces_map: self._CreateFormatStringMaps() if (self._format_string_short_pieces and self._format_string_short_pieces != ['']): format_string_pieces =", "string formatting. Raises: RuntimeError: when an invalid format string piece", "where the flags output value should be stored. values (Optional[dict[str,", "an event formatter helper. Args: helper (EventFormatterHelper): event formatter helper", "Returns: str: message. \"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps() return self._ConditionalFormatMessage(", "flag, mapped_value in self.values.items(): if flag & input_value: output_values.append(mapped_value) event_values[self.output_attribute]", "certain event object attribute is defined as {attribute_name}. Args: data_type", "= [] self._CreateFormatStringMap( self._format_string_pieces, self._format_string_pieces_map) self._format_string_short_pieces_map = [] self._CreateFormatStringMap( self._format_string_short_pieces,", "Returns: str: message. \"\"\" @abc.abstractmethod def GetMessageShort(self, event_values): \"\"\"Determines the", "= self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if attribute_names: self._format_string_attribute_names.extend(attribute_names) return set(self._format_string_attribute_names) def GetMessage(self,", "short_message_string = self._ConditionalFormatMessage( format_string_pieces, format_string_pieces_map, event_values) # Truncate the short", "\"\"\"Formats event values using the helper. If default value is", "mapping of flags input and output values. \"\"\" def __init__(", "output value should be stored. values (dict[str, str]): mapping of", "The syntax of the format strings is similar to that", "data supported by the formatter. format_string (Optional[str]): (long) message format", "or [] self._format_string_pieces_map = [] self._format_string_separator = format_string_separator self._format_string_short_pieces =", "for formatting event data.\"\"\" @abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats event", "display_name, parser_chain, error_message) logger.error(error_message) attribute_values = [] for attribute, value", "and linefeed form the message strings. # Using replace function", "format_string.format(**event_values) except KeyError as exception: data_type = event_values.get('data_type', 'N/A') display_name", "'.join(output_values) class EventFormatter(object): \"\"\"Base class to format event values. Attributes:", "error_message) logger.error(error_message) attribute_values = [] for attribute, value in event_values.items():", "\"\"\"Helper for formatting flags event data. Attributes: input_attribute (str): name", "field. \"\"\" import abc import re from plaso.formatters import logger", "[] @property def data_type(self): \"\"\"str: unique identifier for the event", "str]): format string pieces. format_string_pieces_map (list[int, str]): format string pieces", "boolean event data. Attributes: input_attribute (str): name of the attribute", "self).__init__() self.input_attribute = input_attribute self.output_attribute = output_attribute self.values = values", "value. output_attribute (Optional[str]): name of the attribute where the boolean", "name of the attribute that contains the enumeration input value.", "for a helper for custom formatting of event data.\"\"\" DATA_TYPE", "the message strings. # Using replace function here because it", "try: message_string = format_string.format(**event_values) except KeyError as exception: data_type =", "empty map entry to keep # the index in the", "attribute where the boolean output value should be stored. value_if_false", "abc import re from plaso.formatters import logger class EventFormatterHelper(object): \"\"\"Base", "event formatter helper to add. \"\"\" self.helpers.append(helper) @abc.abstractmethod def GetMessage(self,", "a helper for formatting boolean event data. Args: input_attribute (Optional[str]):", "when an invalid format string piece is encountered. \"\"\" self._format_string_pieces_map", "\"\"\" self.helpers.append(helper) @abc.abstractmethod def GetMessage(self, event_values): \"\"\"Determines the message. Args:", "\"\"\" class BasicEventFormatter(EventFormatter): \"\"\"Format event values using a message format", "the format string. Returns: set(str): attribute names. \"\"\" # pylint:", "_ConditionalFormatMessage( self, format_string_pieces, format_string_pieces_map, event_values): \"\"\"Determines the conditional formatted message.", "as text that does not needs formatting. Args: format_string_pieces (list[str]):", "string pieces. format_string_pieces_map (list[int, str]): format string pieces map. event_values", "for the event data supported by the formatter. format_string_pieces (Optional[list[str]]):", "strings is similar to that of format() where the place", "format string pieces. format_string_pieces_map (list[str]): format string pieces map. Raises:", "value. output_attribute (str): name of the attribute where the enumeration", "self._FormatMessage(self._format_string, event_values) def GetMessageShort(self, event_values): \"\"\"Determines the short message. Args:", "self.default = default self.input_attribute = input_attribute self.output_attribute = output_attribute self.values", "event_values (dict[str, object]): event values. \"\"\" input_value = event_values.get(self.input_attribute, None)", "name. Format string pieces without an attribute name are supported.", "name of the attribute where the output value should be", "def __init__( self, data_type='conditional', format_string_pieces=None, format_string_separator=None, format_string_short_pieces=None): \"\"\"Initializes a conditional", "parser_chain, error_message) logger.error(error_message) attribute_values = [] for attribute, value in", "self._CreateFormatStringMap( self._format_string_pieces, self._format_string_pieces_map) self._format_string_short_pieces_map = [] self._CreateFormatStringMap( self._format_string_short_pieces, self._format_string_short_pieces_map) def", "1: raise RuntimeError(( 'Invalid format string piece: [{0:s}] contains more", "self, format_string_pieces, format_string_pieces_map, event_values): \"\"\"Determines the conditional formatted message. Args:", "of enumeration input and output values. \"\"\" def __init__( self,", "re.compile( '{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}') def __init__(self, data_type='internal'): \"\"\"Initializes an event formatter. Args:", "format_string_pieces_map, event_values): \"\"\"Determines the conditional formatted message. Args: format_string_pieces (dict[str,", "event_values (dict[str, object]): event values. Returns: str: message. \"\"\" if", "format_string_pieces_map): \"\"\"Creates a format string map. The format string pieces", "message strings. # Using replace function here because it is", "' def __init__( self, data_type='conditional', format_string_pieces=None, format_string_separator=None, format_string_short_pieces=None): \"\"\"Initializes a", "default (Optional[str]): default value. input_attribute (Optional[str]): name of the attribute", "self._format_string_pieces_map short_message_string = self._ConditionalFormatMessage( format_string_pieces, format_string_pieces_map, event_values) # Truncate the", "= ' ' def __init__( self, data_type='conditional', format_string_pieces=None, format_string_separator=None, format_string_short_pieces=None):", "\"\"\" import abc import re from plaso.formatters import logger class", "attribute where the enumeration output value should be stored. values", "output_attribute=None, values=None): \"\"\"Initialized a helper for formatting enumeration event data.", "value_if_false (str): output value if the boolean input value is", "of the attribute that contains the enumeration input value. output_attribute", "self._format_string_pieces_map = [] self._format_string_separator = format_string_separator self._format_string_short_pieces = format_string_short_pieces or", "their corresponding attribute name to optimize conditional string formatting. Raises:", "'' @abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats event values using the", "short message string if necessary. if len(short_message_string) > 80: short_message_string", "(dict[str, str]): mapping of enumeration input and output values. \"\"\"", "return self._FormatMessage(self._format_string, event_values) def GetMessageShort(self, event_values): \"\"\"Determines the short message.", "for formatting enumeration event data. Args: default (Optional[str]): default value.", "format_string_pieces (dict[str, str]): format string pieces. format_string_pieces_map (list[int, str]): format", "None: format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR super(ConditionalEventFormatter, self).__init__(data_type=data_type) self._format_string_pieces = format_string_pieces or", "formatter helper. Args: identifier (str): identifier. input_attribute (Optional[str]): name of", "format event values. Attributes: custom_helpers (list[str]): identifiers of custom event", "equal to the format string pieces. attribute_name = '' else:", "= values or {} def FormatEventValues(self, event_values): \"\"\"Formats event values", "parser_chain = event_values.get('parser', 'N/A') error_message = 'Unicode decode error: {0!s}'.format(exception)", "for flag, mapped_value in self.values.items(): if flag & input_value: output_values.append(mapped_value)", "string piece does not contain an attribute name it is", "error_message = ( 'unable to format string: \"{0:s}\" missing required", "a custom event formatter helper. Args: identifier (str): identifier. input_attribute", "format string. \"\"\" super(BasicEventFormatter, self).__init__(data_type=data_type) self._format_string_attribute_names = None self._format_string =", "string pieces. format_string_separator (Optional[str]): string by which separate format string", "None: return output_values = [] for flag, mapped_value in self.values.items():", "should be stored. \"\"\" self.custom_helpers.append(identifier) def AddHelper(self, helper): \"\"\"Adds an", "The syntax of the format strings pieces is similar to", "KeyError as exception: data_type = event_values.get('data_type', 'N/A') display_name = event_values.get('display_name',", "string_pieces.append(format_string_pieces[map_index]) format_string = self._format_string_separator.join(string_pieces) return self._FormatMessage(format_string, event_values) def GetFormatStringAttributeNames(self): \"\"\"Retrieves", "string pieces.\"\"\" _DEFAULT_FORMAT_STRING_SEPARATOR = ' ' def __init__( self, data_type='conditional',", "if attribute_names: self._format_string_attribute_names.extend(attribute_names) return set(self._format_string_attribute_names) def GetMessage(self, event_values): \"\"\"Determines the", "short message. Args: event_values (dict[str, object]): event values. Returns: str:", "to format string: \"{0:s}\" missing required event ' 'value: {1!s}').format(format_string,", "not contain an attribute name it is treated as text", "format string pieces map. Raises: RuntimeError: when an invalid format", "helper. Args: helper (EventFormatterHelper): event formatter helper to add. \"\"\"", "the place holder for a certain event object attribute is", "into a formatted string that is similar to the description_long", "event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message) message_string = '' #", "# -*- coding: utf-8 -*- \"\"\"This file contains the event", "event values. Returns: str: conditional formatted message. Raises: RuntimeError: when", "attribute where the output value should be stored. \"\"\" self.custom_helpers.append(identifier)", "contains the enumeration input value. output_attribute (Optional[str]): name of the", "the string pieces and their corresponding attribute name to optimize", "if self._format_string_attribute_names is None: self._format_string_attribute_names = ( self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( self._format_string)) return", "attribute name it is treated as text that does not", "formatting flags event data. Args: input_attribute (Optional[str]): name of the", "not self._format_string_pieces_map: self._CreateFormatStringMaps() if (self._format_string_short_pieces and self._format_string_short_pieces != ['']): format_string_pieces", "joined. format_string_short_pieces (Optional[list[str]]): short message format string pieces. \"\"\" if", "= [] for map_index, attribute_name in enumerate(format_string_pieces_map): if not attribute_name", "helper. Args: identifier (str): identifier. input_attribute (Optional[str]): name of the", "text that does not needs formatting. Args: format_string_pieces (list[str]): format", "__init__( self, data_type='conditional', format_string_pieces=None, format_string_separator=None, format_string_short_pieces=None): \"\"\"Initializes a conditional event", "def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in the format string.", "return self._FormatMessage(format_string, event_values) def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in", "values=None): \"\"\"Initialized a helper for formatting enumeration event data. Args:", "Args: format_string_pieces (dict[str, str]): format string pieces. format_string_pieces_map (list[int, str]):", "_DEFAULT_FORMAT_STRING_SEPARATOR = ' ' def __init__( self, data_type='conditional', format_string_pieces=None, format_string_separator=None,", "format_string_pieces=None, format_string_separator=None, format_string_short_pieces=None): \"\"\"Initializes a conditional event formatter. The syntax", "else: format_string_pieces = self._format_string_pieces format_string_pieces_map = self._format_string_pieces_map short_message_string = self._ConditionalFormatMessage(", "format string maps. Maps are built of the string pieces", "attribute_values.append('{0:s}: {1!s}'.format(attribute, value)) message_string = ' '.join(attribute_values) except UnicodeDecodeError as", "object]): event values. \"\"\" for helper in self.helpers: helper.FormatEventValues(event_values) @abc.abstractmethod", "self.values.get( input_value, default_value) class FlagsEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting flags event", "attribute is defined as {attribute_name}. Args: data_type (Optional[str]): unique identifier", "pieces map. event_values (dict[str, object]): event values. Returns: str: conditional", "'value: {1!s}').format(format_string, exception) error_message = ( 'Event: {0:s} data type:", "Returns: set(str): attribute names. \"\"\" if self._format_string_attribute_names is None: self._format_string_attribute_names", "self._format_string_short_pieces_map else: format_string_pieces = self._format_string_pieces format_string_pieces_map = self._format_string_pieces_map short_message_string =", "the enumeration input value. output_attribute (str): name of the attribute", "value. output_attribute (Optional[str]): name of the attribute where the flags", "or # string.strip(). return message_string.replace('\\r', '').replace('\\n', '') def FormatEventValues(self, event_values):", "format_string_pieces_map (list[str]): format string pieces map. Raises: RuntimeError: when an", "coding: utf-8 -*- \"\"\"This file contains the event formatters interface", "attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if len(set(attribute_names)) > 1: raise RuntimeError((", "value. input_attribute (Optional[str]): name of the attribute that contains the", "formatter. \"\"\" super(EventFormatter, self).__init__() self._data_type = data_type self._format_string_attribute_names = None", "None self._format_string = format_string self._format_string_short = format_string_short def GetFormatStringAttributeNames(self): \"\"\"Retrieves", "else: attribute_name = attribute_names[0] format_string_pieces_map.append(attribute_name) def _CreateFormatStringMaps(self): \"\"\"Creates the format", "(dict[str, str]): format string pieces. format_string_pieces_map (list[int, str]): format string", "format_string_separator=None, format_string_short_pieces=None): \"\"\"Initializes a conditional event formatter. The syntax of", "there is no corresponding enumeration value then the original value", "formatting boolean event data. Args: input_attribute (Optional[str]): name of the", "self._format_string_pieces_map, event_values) def GetMessageShort(self, event_values): \"\"\"Determines the short message. Args:", "output values. \"\"\" def __init__( self, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized", "boolean output value should be stored. value_if_false (str): output value", "helpers. \"\"\" def __init__( self, data_type='basic', format_string=None, format_string_short=None): \"\"\"Initializes a", "Args: helper (EventFormatterHelper): event formatter helper to add. \"\"\" self.helpers.append(helper)", "to add. \"\"\" self.helpers.append(helper) @abc.abstractmethod def GetMessage(self, event_values): \"\"\"Determines the", "'N/A') parser_chain = event_values.get('parser', 'N/A') error_message = 'Unicode decode error:", "= self._DEFAULT_FORMAT_STRING_SEPARATOR super(ConditionalEventFormatter, self).__init__(data_type=data_type) self._format_string_pieces = format_string_pieces or [] self._format_string_pieces_map", "string_pieces = [] for map_index, attribute_name in enumerate(format_string_pieces_map): if not", "type: {1:s} display name: {2:s} ' 'parser chain: {3:s} with", "event_values[self.output_attribute] = ', '.join(output_values) class EventFormatter(object): \"\"\"Base class to format", "attribute where the flags output value should be stored. values", "value if the boolean input value is True. \"\"\" super(BooleanEventFormatterHelper,", "format_string_pieces = self._format_string_short_pieces format_string_pieces_map = self._format_string_short_pieces_map else: format_string_pieces = self._format_string_pieces", "if the boolean input value is True. \"\"\" def __init__(", "= re.compile( '{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}') def __init__(self, data_type='internal'): \"\"\"Initializes an event formatter.", "string pieces map is a list containing the attribute name", "\"\"\"Base class of helper for formatting event data.\"\"\" @abc.abstractmethod def", "be stored. values (dict[str, str]): mapping of enumeration input and", "FlagsEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting flags event data. Attributes: input_attribute (str):", "import logger class EventFormatterHelper(object): \"\"\"Base class of helper for formatting", "(list[int, str]): format string pieces map. event_values (dict[str, object]): event", "FormatEventValues(self, event_values): \"\"\"Formats event values using the helpers. Args: event_values", "be stored. \"\"\" self.custom_helpers.append(identifier) def AddHelper(self, helper): \"\"\"Adds an event", "= format_string_short def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in the", "formatting enumeration event data. Args: default (Optional[str]): default value. input_attribute", "'N/A') event_identifier = event_values.get('uuid', 'N/A') parser_chain = event_values.get('parser', 'N/A') error_message", "value_if_false=None, value_if_true=None): \"\"\"Initialized a helper for formatting boolean event data.", "output value should be stored. values (Optional[dict[str, str]]): mapping of", "= event_values.get('data_type', 'N/A') display_name = event_values.get('display_name', 'N/A') event_identifier = event_values.get('uuid',", "is used to convert the event object values into a", "\"\"\" # The format string can be defined as: #", "\"\"\"Determines the formatted message. Args: format_string (str): message format string.", "default (str): default value. input_attribute (str): name of the attribute", "(dict[str, object]): event values. \"\"\" class EnumerationEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting", "that contains the enumeration input value. output_attribute (Optional[str]): name of", "= self.value_if_true else: output_value = self.value_if_false event_values[self.output_attribute] = output_value class", "event values using format string pieces.\"\"\" _DEFAULT_FORMAT_STRING_SEPARATOR = ' '", "a helper for formatting enumeration event data. Args: default (Optional[str]):", "output_attribute (Optional[str]): name of the attribute where the enumeration output", "DATA_TYPE = '' IDENTIFIER = '' @abc.abstractmethod def FormatEventValues(self, event_values):", "supported by the formatter. \"\"\" super(EventFormatter, self).__init__() self._data_type = data_type", "= 'Unicode decode error: {0!s}'.format(exception) error_message = ( 'Event: {0:s}", "is treated as text that does not needs formatting. Args:", "& input_value: output_values.append(mapped_value) event_values[self.output_attribute] = ', '.join(output_values) class EventFormatter(object): \"\"\"Base", "flags input and output values. \"\"\" super(FlagsEventFormatterHelper, self).__init__() self.input_attribute =", "(str): output value if the boolean input value is True.", "display name: {2:s} ' 'parser chain: {3:s} with error: {4:s}').format(", "event_values.get('parser', 'N/A') error_message = ( 'unable to format string: \"{0:s}\"", "no corresponding enumeration value then the original value is used.", "\"\"\"Initializes a conditional event formatter. The syntax of the format", "= self._format_string_short_pieces_map else: format_string_pieces = self._format_string_pieces format_string_pieces_map = self._format_string_pieces_map short_message_string", "super(EventFormatter, self).__init__() self._data_type = data_type self._format_string_attribute_names = None self.custom_helpers =", "encountered. \"\"\" string_pieces = [] for map_index, attribute_name in enumerate(format_string_pieces_map):", "than 1 ' 'attribute name.').format(format_string_piece)) if not attribute_names: # The", "exception: data_type = event_values.get('data_type', 'N/A') display_name = event_values.get('display_name', 'N/A') event_identifier", "(Optional[str]): unique identifier for the event data supported by the", "the short message string if necessary. if len(short_message_string) > 80:", "= default self.input_attribute = input_attribute self.output_attribute = output_attribute self.values =", "the helper. If default value is None and there is", "stored. values (dict[str, str]): mapping of flags input and output", "output_attribute (Optional[str]): name of the attribute where the boolean output", "format string pieces map is a list containing the attribute", "formatter, with a format string definition, is used to convert", "format string pieces map. event_values (dict[str, object]): event values. Returns:", "helper in self.helpers: helper.FormatEventValues(event_values) @abc.abstractmethod def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute", "None: self._format_string_attribute_names = ( self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( self._format_string)) return set(self._format_string_attribute_names) def GetMessage(self,", "str]): mapping of flags input and output values. \"\"\" def", "format string map. The format string pieces map is a", "encountered. \"\"\" self._format_string_pieces_map = [] self._CreateFormatStringMap( self._format_string_pieces, self._format_string_pieces_map) self._format_string_short_pieces_map =", "string pieces should be joined. format_string_short_pieces (Optional[list[str]]): short message format", "values=None): \"\"\"Initialized a helper for formatting flags event data. Args:", "input value. output_attribute (Optional[str]): name of the attribute where the", "the boolean input value is True. \"\"\" def __init__( self,", "Truncate the short message string if necessary. if len(short_message_string) >", "if not self._format_string_pieces_map: self._CreateFormatStringMaps() return self._ConditionalFormatMessage( self._format_string_pieces, self._format_string_pieces_map, event_values) def", "definition, is used to convert the event object values into", "error: {0!s}'.format(exception) error_message = ( 'Event: {0:s} data type: {1:s}", "containing the attribute name per format string piece. E.g. [\"Description:", "contains the boolean input value. output_attribute (Optional[str]): name of the", "where the flags output value should be stored. values (dict[str,", "or [] self._format_string_short_pieces_map = [] def _CreateFormatStringMap( self, format_string_pieces, format_string_pieces_map):", "'{0:s}...'.format(short_message_string[:77]) return short_message_string class ConditionalEventFormatter(EventFormatter): \"\"\"Conditionally format event values using", "event values. \"\"\" for helper in self.helpers: helper.FormatEventValues(event_values) @abc.abstractmethod def", "= self._format_string_separator.join(string_pieces) return self._FormatMessage(format_string, event_values) def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute", "format_string = self._format_string_short else: format_string = self._format_string short_message_string = self._FormatMessage(format_string,", "None) if input_value: output_value = self.value_if_true else: output_value = self.value_if_false", "piece should contain at maximum one unique attribute name. Format", "object]): event values. \"\"\" input_value = event_values.get(self.input_attribute, None) if input_value", "to of the basic event formatter (BasicEventFormatter). Every format string", "Args: event_values (dict[str, object]): event values. Returns: str: message. \"\"\"", "[{0:s}] contains more than 1 ' 'attribute name.').format(format_string_piece)) if not", "(long) message format string. format_string_short (Optional[str]): short message format string.", "helper (EventFormatterHelper): event formatter helper to add. \"\"\" self.helpers.append(helper) @abc.abstractmethod", "self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( self._format_string)) return set(self._format_string_attribute_names) def GetMessage(self, event_values): \"\"\"Determines the message.", "import re from plaso.formatters import logger class EventFormatterHelper(object): \"\"\"Base class", "event values using the helpers. Args: event_values (dict[str, object]): event", "attribute_names: # The text format string piece is stored as", "format string pieces. \"\"\" if format_string_separator is None: format_string_separator =", "(dict[str, object]): event values. \"\"\" for helper in self.helpers: helper.FormatEventValues(event_values)", "self._format_string_short_pieces_map) def _ConditionalFormatMessage( self, format_string_pieces, format_string_pieces_map, event_values): \"\"\"Determines the conditional", "object]): event values. Returns: str: formatted message. \"\"\" try: message_string", "a basic event formatter. The syntax of the format strings", "the event data supported by the formatter. format_string (Optional[str]): (long)", "for custom formatting of event data.\"\"\" DATA_TYPE = '' IDENTIFIER", "RuntimeError(( 'Invalid format string piece: [{0:s}] contains more than 1", "strings pieces is similar to of the basic event formatter", "where the boolean output value should be stored. value_if_false (str):", "formats are dependent on a message field, referred to as", "values using the helper. Args: event_values (dict[str, object]): event values.", "conditional formatted message. Raises: RuntimeError: when an invalid format string", "an invalid format string piece is encountered. \"\"\" self._format_string_pieces_map =", "value_if_true def FormatEventValues(self, event_values): \"\"\"Formats event values using the helper.", "to as description_long and description_short in l2t_csv. Plaso no longer", "\"\"\" super(EventFormatter, self).__init__() self._data_type = data_type self._format_string_attribute_names = None self.custom_helpers", "'Invalid format string piece: [{0:s}] contains more than 1 '", "string if necessary. if len(short_message_string) > 80: short_message_string = '{0:s}...'.format(short_message_string[:77])", "optimize conditional string formatting. Raises: RuntimeError: when an invalid format", "is None: self._format_string_attribute_names = ( self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( self._format_string)) return set(self._format_string_attribute_names) def", "pieces. format_string_pieces_map (list[int, str]): format string pieces map. event_values (dict[str,", "\"\"\" super(FlagsEventFormatterHelper, self).__init__() self.input_attribute = input_attribute self.output_attribute = output_attribute self.values", "input_attribute (str): name of the attribute that contains the flags", "of the basic event formatter (BasicEventFormatter). Every format string piece", "data_type='conditional', format_string_pieces=None, format_string_separator=None, format_string_short_pieces=None): \"\"\"Initializes a conditional event formatter. The", "(Optional[dict[str, str]]): mapping of flags input and output values. \"\"\"", "Every format string piece should contain at maximum one unique", "formatted message. Raises: RuntimeError: when an invalid format string piece", "event formatter helpers. helpers (list[EventFormatterHelper]): event formatter helpers. \"\"\" def", "\"\"\"Helper for formatting boolean event data. Attributes: input_attribute (str): name", "the attribute that contains the flags input value. output_attribute (Optional[str]):", "message. Args: event_values (dict[str, object]): event values. Returns: str: short", "format_string (Optional[str]): (long) message format string. format_string_short (Optional[str]): short message", "short_message_string = '{0:s}...'.format(short_message_string[:77]) return short_message_string class ConditionalEventFormatter(EventFormatter): \"\"\"Conditionally format event", "format_string_pieces (list[str]): format string pieces. format_string_pieces_map (list[str]): format string pieces", "[] for flag, mapped_value in self.values.items(): if flag & input_value:", "' 'attribute name.').format(format_string_piece)) if not attribute_names: # The text format", "name.').format(format_string_piece)) if not attribute_names: # The text format string piece", "invalid format string piece is encountered. \"\"\" string_pieces = []", "Strip carriage return and linefeed form the message strings. #", "original value is used. Args: event_values (dict[str, object]): event values.", "output values. \"\"\" super(EnumerationEventFormatterHelper, self).__init__() self.default = default self.input_attribute =", "contains the enumeration input value. output_attribute (str): name of the", "input_attribute=None, output_attribute=None): \"\"\"Adds a custom event formatter helper. Args: identifier", "value. input_attribute (str): name of the attribute that contains the", "event_values (dict[str, object]): event values. Returns: str: message. \"\"\" @abc.abstractmethod", "class BooleanEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting boolean event data. Attributes: input_attribute", "mapping of enumeration input and output values. \"\"\" super(EnumerationEventFormatterHelper, self).__init__()", "as: # {name}, {name:format}, {name!conversion}, {name!conversion:format} _FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile( '{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}')", "identifier for the event data supported by the formatter. format_string", "stored. values (dict[str, str]): mapping of enumeration input and output", "not self._format_string_pieces_map: self._CreateFormatStringMaps() return self._ConditionalFormatMessage( self._format_string_pieces, self._format_string_pieces_map, event_values) def GetMessageShort(self,", "format_string_pieces, format_string_pieces_map, event_values) # Truncate the short message string if", "event values. Returns: str: short message. \"\"\" if self._format_string_short: format_string", "pieces is similar to of the basic event formatter (BasicEventFormatter).", "attribute that contains the enumeration input value. output_attribute (str): name", "= '{0:s}...'.format(short_message_string[:77]) return short_message_string class ConditionalEventFormatter(EventFormatter): \"\"\"Conditionally format event values", "def __init__( self, default=None, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a helper", "def FormatEventValues(self, event_values): \"\"\"Formats event values using the helper. Args:", "Using replace function here because it is faster than re.sub()", "= format_string.format(**event_values) except KeyError as exception: data_type = event_values.get('data_type', 'N/A')", "ConditionalEventFormatter(EventFormatter): \"\"\"Conditionally format event values using format string pieces.\"\"\" _DEFAULT_FORMAT_STRING_SEPARATOR", "RuntimeError: when an invalid format string piece is encountered. \"\"\"", "message format string pieces. format_string_separator (Optional[str]): string by which separate", "(str): message format string. event_values (dict[str, object]): event values. Returns:", "\"\"\" self.custom_helpers.append(identifier) def AddHelper(self, helper): \"\"\"Adds an event formatter helper.", "'N/A') display_name = event_values.get('display_name', 'N/A') event_identifier = event_values.get('uuid', 'N/A') parser_chain", "needs formatting. Args: format_string_pieces (list[str]): format string pieces. format_string_pieces_map (list[str]):", "the event object values into a formatted string that is", "' 'value: {1!s}').format(format_string, exception) error_message = ( 'Event: {0:s} data", "display_name = event_values.get('display_name', 'N/A') event_identifier = event_values.get('uuid', 'N/A') parser_chain =", "if input_value: output_value = self.value_if_true else: output_value = self.value_if_false event_values[self.output_attribute]", "(dict[str, object]): event values. Returns: str: message. \"\"\" @abc.abstractmethod def", "\"\"\"Adds an event formatter helper. Args: helper (EventFormatterHelper): event formatter", "self._format_string_short: format_string = self._format_string_short else: format_string = self._format_string short_message_string =", "for attribute, value in event_values.items(): attribute_values.append('{0:s}: {1!s}'.format(attribute, value)) message_string =", "should be stored. value_if_false (str): output value if the boolean", "name it is treated as text that does not needs", "encountered. \"\"\" for format_string_piece in format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece)", "that contains the flags input value. output_attribute (Optional[str]): name of", "{0!s}'.format(exception) error_message = ( 'Event: {0:s} data type: {1:s} display", "a list containing the attribute name per format string piece.", "attribute name. Format string pieces without an attribute name are", "[] for format_string_piece in self._format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if", "l2t_csv and other formats are dependent on a message field,", "(list[EventFormatterHelper]): event formatter helpers. \"\"\" def __init__( self, data_type='basic', format_string=None,", "Args: data_type (Optional[str]): unique identifier for the event data supported", "attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if attribute_names: self._format_string_attribute_names.extend(attribute_names) return set(self._format_string_attribute_names) def", "attribute_names: self._format_string_attribute_names.extend(attribute_names) return set(self._format_string_attribute_names) def GetMessage(self, event_values): \"\"\"Determines the message.", "pieces should be joined. format_string_short_pieces (Optional[list[str]]): short message format string", "'{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}') def __init__(self, data_type='internal'): \"\"\"Initializes an event formatter. Args: data_type", "input_attribute self.output_attribute = output_attribute self.values = values or {} def", "format_string_short (Optional[str]): short message format string. \"\"\" super(BasicEventFormatter, self).__init__(data_type=data_type) self._format_string_attribute_names", "string pieces without an attribute name are supported. Args: data_type", "message. \"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps() return self._ConditionalFormatMessage( self._format_string_pieces, self._format_string_pieces_map,", "mapping of enumeration input and output values. \"\"\" def __init__(", "flags event data. Args: input_attribute (Optional[str]): name of the attribute", "is a list containing the attribute name per format string", "values using the helpers. Args: event_values (dict[str, object]): event values.", "self._format_string_pieces format_string_pieces_map = self._format_string_pieces_map short_message_string = self._ConditionalFormatMessage( format_string_pieces, format_string_pieces_map, event_values)", "event data.\"\"\" @abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats event values using", "interface classes. The l2t_csv and other formats are dependent on", "form the message strings. # Using replace function here because", "None: self._format_string_attribute_names = [] for format_string_piece in self._format_string_pieces: attribute_names =", "object]): event values. \"\"\" input_value = event_values.get(self.input_attribute, None) if input_value:", "= format_string self._format_string_short = format_string_short def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute", "piece is stored as an empty map entry to keep", "= [] @property def data_type(self): \"\"\"str: unique identifier for the", "faster than re.sub() or # string.strip(). return message_string.replace('\\r', '').replace('\\n', '')", "formatter helpers. \"\"\" # The format string can be defined", "object]): event values. \"\"\" class EnumerationEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting enumeration", "'attribute name.').format(format_string_piece)) if not attribute_names: # The text format string", "# Strip carriage return and linefeed form the message strings.", "value is used. Args: event_values (dict[str, object]): event values. \"\"\"", "default=None, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a helper for formatting enumeration", "the attribute that contains the enumeration input value. output_attribute (Optional[str]):", "the event data supported by the formatter. format_string_pieces (Optional[list[str]]): (long)", "( 'Event: {0:s} data type: {1:s} display name: {2:s} '", "event values. Returns: str: short message. \"\"\" class BasicEventFormatter(EventFormatter): \"\"\"Format", "= format_string_separator self._format_string_short_pieces = format_string_short_pieces or [] self._format_string_short_pieces_map = []", "= '' else: attribute_name = attribute_names[0] format_string_pieces_map.append(attribute_name) def _CreateFormatStringMaps(self): \"\"\"Creates", "values. \"\"\" input_value = event_values.get(self.input_attribute, None) if input_value is None:", "object values into a formatted string that is similar to", "self._format_string_short_pieces_map = [] def _CreateFormatStringMap( self, format_string_pieces, format_string_pieces_map): \"\"\"Creates a", "identifier for the event data supported by the formatter. format_string_pieces", "be stored. value_if_false (str): output value if the boolean input", "{attribute_name}. Args: data_type (Optional[str]): unique identifier for the event data", "= event_values.get(self.input_attribute, None) if input_value is None: return output_values =", "'N/A') error_message = 'Unicode decode error: {0!s}'.format(exception) error_message = (", "event_identifier = event_values.get('uuid', 'N/A') parser_chain = event_values.get('parser', 'N/A') error_message =", "{1:s} display name: {2:s} ' 'parser chain: {3:s} with error:", "def __init__(self, data_type='internal'): \"\"\"Initializes an event formatter. Args: data_type (Optional[str]):", "self._format_string_short else: format_string = self._format_string short_message_string = self._FormatMessage(format_string, event_values) #", "str: short message. \"\"\" if self._format_string_short: format_string = self._format_string_short else:", "self._format_string_short_pieces != ['']): format_string_pieces = self._format_string_short_pieces format_string_pieces_map = self._format_string_short_pieces_map else:", "formatter. format_string (Optional[str]): (long) message format string. format_string_short (Optional[str]): short", "used. Args: event_values (dict[str, object]): event values. \"\"\" input_value =", "format string pieces. format_string_separator (Optional[str]): string by which separate format", "from plaso.formatters import logger class EventFormatterHelper(object): \"\"\"Base class of helper", "these field explicitly. A formatter, with a format string definition,", "default_value) class FlagsEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting flags event data. Attributes:", "event data. Args: default (Optional[str]): default value. input_attribute (Optional[str]): name", "helper for formatting boolean event data. Args: input_attribute (Optional[str]): name", "The text format string piece is stored as an empty", "values. Returns: str: short message. \"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps()", "['']): format_string_pieces = self._format_string_short_pieces format_string_pieces_map = self._format_string_short_pieces_map else: format_string_pieces =", "'.join(attribute_values) except UnicodeDecodeError as exception: data_type = event_values.get('data_type', 'N/A') display_name", "input value is False. value_if_true (str): output value if the", "string by which separate format string pieces should be joined.", "= input_attribute self.output_attribute = output_attribute self.value_if_false = value_if_false self.value_if_true =", "(list[str]): format string pieces map. Raises: RuntimeError: when an invalid", "is True. \"\"\" def __init__( self, input_attribute=None, output_attribute=None, value_if_false=None, value_if_true=None):", "values (Optional[dict[str, str]]): mapping of flags input and output values.", "short message. \"\"\" class BasicEventFormatter(EventFormatter): \"\"\"Format event values using a", "contain an attribute name it is treated as text that", "input_attribute (str): name of the attribute that contains the enumeration", "formatter helpers. helpers (list[EventFormatterHelper]): event formatter helpers. \"\"\" # The", "decode error: {0!s}'.format(exception) error_message = ( 'Event: {0:s} data type:", "that contains the boolean input value. output_attribute (Optional[str]): name of", "the attribute that contains the input value. output_attribute (Optional[str]): name", "message. Args: event_values (dict[str, object]): event values. Returns: str: message.", "when an invalid format string piece is encountered. \"\"\" string_pieces", "\"\"\"Formats event values using the helpers. Args: event_values (dict[str, object]):", "basic event formatter. The syntax of the format strings is", "input and output values. \"\"\" def __init__( self, input_attribute=None, output_attribute=None,", "where the place holder for a certain event object attribute", "for map_index, attribute_name in enumerate(format_string_pieces_map): if not attribute_name or event_values.get(", "event_values): \"\"\"Determines the conditional formatted message. Args: format_string_pieces (dict[str, str]):", "{0:s} data type: {1:s} display name: {2:s} ' 'parser chain:", "contains the input value. output_attribute (Optional[str]): name of the attribute", "formatted message. \"\"\" try: message_string = format_string.format(**event_values) except KeyError as", "of flags input and output values. \"\"\" def __init__( self,", "False. value_if_true (str): output value if the boolean input value", "data. Args: input_attribute (Optional[str]): name of the attribute that contains", "map equal to the format string pieces. attribute_name = ''", "(dict[str, object]): event values. Returns: str: formatted message. \"\"\" try:", "values. Returns: str: message. \"\"\" return self._FormatMessage(self._format_string, event_values) def GetMessageShort(self,", "data type: {1:s} display name: {2:s} ' 'parser chain: {3:s}", "event_values (dict[str, object]): event values. Returns: str: formatted message. \"\"\"", "def FormatEventValues(self, event_values): \"\"\"Formats event values using the helper. If", "(list[EventFormatterHelper]): event formatter helpers. \"\"\" # The format string can", "the message. Args: event_values (dict[str, object]): event values. Returns: str:", "message. Args: format_string_pieces (dict[str, str]): format string pieces. format_string_pieces_map (list[int,", "input_attribute (str): name of the attribute that contains the boolean", "value should be stored. values (dict[str, str]): mapping of flags", "the boolean input value is False. value_if_true (str): output value", "object]): event values. Returns: str: conditional formatted message. Raises: RuntimeError:", "EventFormatterHelper(object): \"\"\"Base class of helper for formatting event data.\"\"\" @abc.abstractmethod", "values (dict[str, str]): mapping of enumeration input and output values.", "data_type='internal'): \"\"\"Initializes an event formatter. Args: data_type (Optional[str]): unique identifier", "of event data.\"\"\" DATA_TYPE = '' IDENTIFIER = '' @abc.abstractmethod", "explicitly. A formatter, with a format string definition, is used", "format string: \"{0:s}\" missing required event ' 'value: {1!s}').format(format_string, exception)", "formatting enumeration event data. Attributes: default (str): default value. input_attribute", "using the helpers. Args: event_values (dict[str, object]): event values. \"\"\"", "helper for custom formatting of event data.\"\"\" DATA_TYPE = ''", "> 80: short_message_string = '{0:s}...'.format(short_message_string[:77]) return short_message_string class ConditionalEventFormatter(EventFormatter): \"\"\"Conditionally", "the enumeration output value should be stored. values (Optional[dict[str, str]]):", "an invalid format string piece is encountered. \"\"\" string_pieces =", "helper. If default value is None and there is no", "(Optional[str]): name of the attribute that contains the enumeration input", "'' # Strip carriage return and linefeed form the message", "\"\"\" super(EnumerationEventFormatterHelper, self).__init__() self.default = default self.input_attribute = input_attribute self.output_attribute", "data supported by the formatter. \"\"\" super(EventFormatter, self).__init__() self._data_type =", "names. \"\"\" if self._format_string_attribute_names is None: self._format_string_attribute_names = [] for", "enumeration value then the original value is used. Args: event_values", "values using format string pieces.\"\"\" _DEFAULT_FORMAT_STRING_SEPARATOR = ' ' def", "format_string=None, format_string_short=None): \"\"\"Initializes a basic event formatter. The syntax of", "if not self._format_string_pieces_map: self._CreateFormatStringMaps() if (self._format_string_short_pieces and self._format_string_short_pieces != ['']):", "and output values. \"\"\" super(EnumerationEventFormatterHelper, self).__init__() self.default = default self.input_attribute", "Returns: str: short message. \"\"\" if self._format_string_short: format_string = self._format_string_short", "the format strings is similar to that of format() where", "format_string_pieces (Optional[list[str]]): (long) message format string pieces. format_string_separator (Optional[str]): string", "an empty map entry to keep # the index in", "piece is encountered. \"\"\" string_pieces = [] for map_index, attribute_name", "= event_values.get('parser', 'N/A') error_message = 'Unicode decode error: {0!s}'.format(exception) error_message", "def __init__( self, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a helper for", "event_values[self.output_attribute] = self.values.get( input_value, default_value) class FlagsEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting", "string. event_values (dict[str, object]): event values. Returns: str: formatted message.", "\"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps() return self._ConditionalFormatMessage( self._format_string_pieces, self._format_string_pieces_map, event_values)", "(Optional[str]): string by which separate format string pieces should be", "helper for formatting event data.\"\"\" @abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats", "Args: event_values (dict[str, object]): event values. \"\"\" class EnumerationEventFormatterHelper(EventFormatterHelper): \"\"\"Helper", "def _CreateFormatStringMap( self, format_string_pieces, format_string_pieces_map): \"\"\"Creates a format string map.", "entry to keep # the index in the map equal", "\"\"\" if self._format_string_attribute_names is None: self._format_string_attribute_names = ( self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( self._format_string))", "= ( 'Event: {0:s} data type: {1:s} display name: {2:s}", "piece. E.g. [\"Description: {description}\"] would be mapped to: [0] =", "format_string (str): message format string. event_values (dict[str, object]): event values.", "error_message = ( 'Event: {0:s} data type: {1:s} display name:", "' ' def __init__( self, data_type='conditional', format_string_pieces=None, format_string_separator=None, format_string_short_pieces=None): \"\"\"Initializes", "1 ' 'attribute name.').format(format_string_piece)) if not attribute_names: # The text", "name of the attribute where the flags output value should", "default self.input_attribute = input_attribute self.output_attribute = output_attribute self.values = values", "self.value_if_false event_values[self.output_attribute] = output_value class CustomEventFormatterHelper(EventFormatterHelper): \"\"\"Base class for a", "holder for a certain event object attribute is defined as", "string.strip(). return message_string.replace('\\r', '').replace('\\n', '') def FormatEventValues(self, event_values): \"\"\"Formats event", "flags output value should be stored. values (Optional[dict[str, str]]): mapping", "of custom event formatter helpers. helpers (list[EventFormatterHelper]): event formatter helpers.", "event formatter helpers. \"\"\" # The format string can be", "str: message. \"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps() return self._ConditionalFormatMessage( self._format_string_pieces,", "The l2t_csv and other formats are dependent on a message", "format_string_short_pieces (Optional[list[str]]): short message format string pieces. \"\"\" if format_string_separator", "= event_values.get('display_name', 'N/A') event_identifier = event_values.get('uuid', 'N/A') parser_chain = event_values.get('parser',", "is similar to the description_long and description_short field. \"\"\" import", "and their corresponding attribute name to optimize conditional string formatting.", "# The format string can be defined as: # {name},", "flags input value. output_attribute (Optional[str]): name of the attribute where", "input value. output_attribute (str): name of the attribute where the", "\"\"\"Adds a custom event formatter helper. Args: identifier (str): identifier.", "str: message. \"\"\" @abc.abstractmethod def GetMessageShort(self, event_values): \"\"\"Determines the short", "format_string_short def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in the format", "event data. Attributes: input_attribute (str): name of the attribute that", "str]]): mapping of enumeration input and output values. \"\"\" super(EnumerationEventFormatterHelper,", "pylint: disable=unused-argument def AddCustomHelper( self, identifier, input_attribute=None, output_attribute=None): \"\"\"Adds a", "A formatter, with a format string definition, is used to", "def _ConditionalFormatMessage( self, format_string_pieces, format_string_pieces_map, event_values): \"\"\"Determines the conditional formatted", "(Optional[str]): name of the attribute where the enumeration output value", "# The text format string piece is stored as an", "formatters interface classes. The l2t_csv and other formats are dependent", "or {} def FormatEventValues(self, event_values): \"\"\"Formats event values using the", "\"\"\" super(BooleanEventFormatterHelper, self).__init__() self.input_attribute = input_attribute self.output_attribute = output_attribute self.value_if_false", "format_string_piece) if attribute_names: self._format_string_attribute_names.extend(attribute_names) return set(self._format_string_attribute_names) def GetMessage(self, event_values): \"\"\"Determines", "str]]): mapping of flags input and output values. \"\"\" super(FlagsEventFormatterHelper,", "enumeration input and output values. \"\"\" def __init__( self, default=None,", "is None: self._format_string_attribute_names = [] for format_string_piece in self._format_string_pieces: attribute_names", "with error: {4:s}').format( event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message) attribute_values", "the formatter. format_string (Optional[str]): (long) message format string. format_string_short (Optional[str]):", "the original value is used. Args: event_values (dict[str, object]): event", "self.output_attribute = output_attribute self.values = values or {} def FormatEventValues(self,", "by the formatter.\"\"\" return self._data_type.lower() def _FormatMessage(self, format_string, event_values): \"\"\"Determines", "name: {2:s} ' 'parser chain: {3:s} with error: {4:s}').format( event_identifier,", "event values. \"\"\" class BooleanEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting boolean event", "= [] def _CreateFormatStringMap( self, format_string_pieces, format_string_pieces_map): \"\"\"Creates a format", "@abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats event values using the helper.", "description_short field. \"\"\" import abc import re from plaso.formatters import", "formatting event data.\"\"\" @abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats event values", "\"\"\"Initialized a helper for formatting flags event data. Args: input_attribute", "then the original value is used. Args: event_values (dict[str, object]):", "custom formatting of event data.\"\"\" DATA_TYPE = '' IDENTIFIER =", "pieces without an attribute name are supported. Args: data_type (Optional[str]):", "enumeration event data. Args: default (Optional[str]): default value. input_attribute (Optional[str]):", "\"\"\"Determines the message. Args: event_values (dict[str, object]): event values. Returns:", "attribute_name or event_values.get( attribute_name, None) is not None: string_pieces.append(format_string_pieces[map_index]) format_string", "the map equal to the format string pieces. attribute_name =", "format_string = self._format_string short_message_string = self._FormatMessage(format_string, event_values) # Truncate the", "[] self._format_string_pieces_map = [] self._format_string_separator = format_string_separator self._format_string_short_pieces = format_string_short_pieces", "attribute that contains the input value. output_attribute (Optional[str]): name of", "event_values.get('uuid', 'N/A') parser_chain = event_values.get('parser', 'N/A') error_message = ( 'unable", "input_attribute self.output_attribute = output_attribute self.value_if_false = value_if_false self.value_if_true = value_if_true", "identifier (str): identifier. input_attribute (Optional[str]): name of the attribute that", "self, input_attribute=None, output_attribute=None, value_if_false=None, value_if_true=None): \"\"\"Initialized a helper for formatting", "[] self._format_string_short_pieces_map = [] def _CreateFormatStringMap( self, format_string_pieces, format_string_pieces_map): \"\"\"Creates", "@property def data_type(self): \"\"\"str: unique identifier for the event data", "[] for map_index, attribute_name in enumerate(format_string_pieces_map): if not attribute_name or", "a helper for formatting flags event data. Args: input_attribute (Optional[str]):", "self._format_string_short_pieces format_string_pieces_map = self._format_string_short_pieces_map else: format_string_pieces = self._format_string_pieces format_string_pieces_map =", "the attribute where the boolean output value should be stored.", "error: {4:s}').format( event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message) attribute_values =", "default value. input_attribute (str): name of the attribute that contains", "object attribute is defined as {attribute_name}. Args: data_type (Optional[str]): unique", "when an invalid format string piece is encountered. \"\"\" for", "= [] for attribute, value in event_values.items(): attribute_values.append('{0:s}: {1!s}'.format(attribute, value))", "_FormatMessage(self, format_string, event_values): \"\"\"Determines the formatted message. Args: format_string (str):", "message. \"\"\" return self._FormatMessage(self._format_string, event_values) def GetMessageShort(self, event_values): \"\"\"Determines the", "keep # the index in the map equal to the", "conditional string formatting. Raises: RuntimeError: when an invalid format string", "= self._FormatMessage(format_string, event_values) # Truncate the short message string if", "data_type, display_name, parser_chain, error_message) logger.error(error_message) attribute_values = [] for attribute,", "= event_values.get('parser', 'N/A') error_message = ( 'unable to format string:", "helper): \"\"\"Adds an event formatter helper. Args: helper (EventFormatterHelper): event", "(Optional[str]): name of the attribute where the flags output value", "map_index, attribute_name in enumerate(format_string_pieces_map): if not attribute_name or event_values.get( attribute_name,", "name of the attribute that contains the flags input value.", "that of format() where the place holder for a certain", "l2t_csv. Plaso no longer stores these field explicitly. A formatter,", "self).__init__(data_type=data_type) self._format_string_pieces = format_string_pieces or [] self._format_string_pieces_map = [] self._format_string_separator", "format_string_piece) if len(set(attribute_names)) > 1: raise RuntimeError(( 'Invalid format string", "\"\"\"Formats event values using the helper. Args: event_values (dict[str, object]):", "and output values. \"\"\" def __init__( self, input_attribute=None, output_attribute=None, values=None):", "format_string = self._format_string_separator.join(string_pieces) return self._FormatMessage(format_string, event_values) def GetFormatStringAttributeNames(self): \"\"\"Retrieves the", "class FlagsEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting flags event data. Attributes: input_attribute", "formatting. Args: format_string_pieces (list[str]): format string pieces. format_string_pieces_map (list[str]): format", "None self.custom_helpers = [] self.helpers = [] @property def data_type(self):", "event formatter helper. Args: identifier (str): identifier. input_attribute (Optional[str]): name", "(self._format_string_short_pieces and self._format_string_short_pieces != ['']): format_string_pieces = self._format_string_short_pieces format_string_pieces_map =", "\"\"\" input_value = event_values.get(self.input_attribute, None) if input_value is not None:", "\"\"\" if format_string_separator is None: format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR super(ConditionalEventFormatter, self).__init__(data_type=data_type)", "to optimize conditional string formatting. Raises: RuntimeError: when an invalid", "self._format_string_attribute_names is None: self._format_string_attribute_names = ( self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( self._format_string)) return set(self._format_string_attribute_names)", "the output value should be stored. \"\"\" self.custom_helpers.append(identifier) def AddHelper(self,", "string. format_string_short (Optional[str]): short message format string. \"\"\" super(BasicEventFormatter, self).__init__(data_type=data_type)", "flags output value should be stored. values (dict[str, str]): mapping", "( self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( self._format_string)) return set(self._format_string_attribute_names) def GetMessage(self, event_values): \"\"\"Determines the", "the format strings pieces is similar to of the basic", "it is treated as text that does not needs formatting.", "helpers. \"\"\" # The format string can be defined as:", "a conditional event formatter. The syntax of the format strings", "names. \"\"\" # pylint: disable=unused-argument def AddCustomHelper( self, identifier, input_attribute=None,", "the boolean input value. output_attribute (Optional[str]): name of the attribute", "format_string_pieces = self._format_string_pieces format_string_pieces_map = self._format_string_pieces_map short_message_string = self._ConditionalFormatMessage( format_string_pieces,", "name of the attribute that contains the boolean input value.", "'') def FormatEventValues(self, event_values): \"\"\"Formats event values using the helpers.", "format string pieces. attribute_name = '' else: attribute_name = attribute_names[0]", "format_string_pieces, format_string_pieces_map, event_values): \"\"\"Determines the conditional formatted message. Args: format_string_pieces", "of the attribute where the boolean output value should be", "without an attribute name are supported. Args: data_type (Optional[str]): unique", "values. Returns: str: message. \"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps() return", "short message format string pieces. \"\"\" if format_string_separator is None:", "event_values.get('uuid', 'N/A') parser_chain = event_values.get('parser', 'N/A') error_message = 'Unicode decode", "map entry to keep # the index in the map", "str]): mapping of enumeration input and output values. \"\"\" def", "(dict[str, object]): event values. Returns: str: conditional formatted message. Raises:", "data_type, display_name, parser_chain, error_message) logger.error(error_message) message_string = '' # Strip", "self._format_string_short_pieces_map = [] self._CreateFormatStringMap( self._format_string_short_pieces, self._format_string_short_pieces_map) def _ConditionalFormatMessage( self, format_string_pieces,", "maps. Maps are built of the string pieces and their", "identifier, input_attribute=None, output_attribute=None): \"\"\"Adds a custom event formatter helper. Args:", "EventFormatter(object): \"\"\"Base class to format event values. Attributes: custom_helpers (list[str]):", "None: default_value = input_value event_values[self.output_attribute] = self.values.get( input_value, default_value) class", "string piece: [{0:s}] contains more than 1 ' 'attribute name.').format(format_string_piece))", "mapped_value in self.values.items(): if flag & input_value: output_values.append(mapped_value) event_values[self.output_attribute] =", "and self._format_string_short_pieces != ['']): format_string_pieces = self._format_string_short_pieces format_string_pieces_map = self._format_string_short_pieces_map", "utf-8 -*- \"\"\"This file contains the event formatters interface classes.", "values into a formatted string that is similar to the", "data_type(self): \"\"\"str: unique identifier for the event data supported by", "values (Optional[dict[str, str]]): mapping of enumeration input and output values.", "value. output_attribute (str): name of the attribute where the flags", "object]): event values. Returns: str: message. \"\"\" return self._FormatMessage(self._format_string, event_values)", "of the attribute where the flags output value should be", "flag & input_value: output_values.append(mapped_value) event_values[self.output_attribute] = ', '.join(output_values) class EventFormatter(object):", "be joined. format_string_short_pieces (Optional[list[str]]): short message format string pieces. \"\"\"", "place holder for a certain event object attribute is defined", "as {attribute_name}. Args: data_type (Optional[str]): unique identifier for the event", "self.value_if_true = value_if_true def FormatEventValues(self, event_values): \"\"\"Formats event values using", "on a message field, referred to as description_long and description_short", "= format_string_pieces or [] self._format_string_pieces_map = [] self._format_string_separator = format_string_separator", "invalid format string piece is encountered. \"\"\" for format_string_piece in", "the boolean output value should be stored. value_if_false (str): output", "output_attribute=None): \"\"\"Adds a custom event formatter helper. Args: identifier (str):", "message field, referred to as description_long and description_short in l2t_csv.", "Plaso no longer stores these field explicitly. A formatter, with", "{2:s} ' 'parser chain: {3:s} with error: {4:s}').format( event_identifier, data_type,", "Returns: str: message. \"\"\" return self._FormatMessage(self._format_string, event_values) def GetMessageShort(self, event_values):", "name of the attribute that contains the input value. output_attribute", "= [] for flag, mapped_value in self.values.items(): if flag &", "data supported by the formatter. format_string_pieces (Optional[list[str]]): (long) message format", "(Optional[str]): short message format string. \"\"\" super(BasicEventFormatter, self).__init__(data_type=data_type) self._format_string_attribute_names =", "used to convert the event object values into a formatted", "is used. Args: event_values (dict[str, object]): event values. \"\"\" input_value", "if self._format_string_short: format_string = self._format_string_short else: format_string = self._format_string short_message_string", "message format string. event_values (dict[str, object]): event values. Returns: str:", "True. \"\"\" def __init__( self, input_attribute=None, output_attribute=None, value_if_false=None, value_if_true=None): \"\"\"Initialized", "event values using the helper. Args: event_values (dict[str, object]): event", "string pieces. attribute_name = '' else: attribute_name = attribute_names[0] format_string_pieces_map.append(attribute_name)", "# {name}, {name:format}, {name!conversion}, {name!conversion:format} _FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile( '{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}') def", "output values. \"\"\" super(FlagsEventFormatterHelper, self).__init__() self.input_attribute = input_attribute self.output_attribute =", "for format_string_piece in format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if len(set(attribute_names))", "error_message = 'Unicode decode error: {0!s}'.format(exception) error_message = ( 'Event:", "event values using the helper. If default value is None", "attribute_values = [] for attribute, value in event_values.items(): attribute_values.append('{0:s}: {1!s}'.format(attribute,", "value_if_true=None): \"\"\"Initialized a helper for formatting boolean event data. Args:", "name of the attribute where the enumeration output value should", "= self.values.get( input_value, default_value) class FlagsEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting flags", "{3:s} with error: {4:s}').format( event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message)", "= '' IDENTIFIER = '' @abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats", "replace function here because it is faster than re.sub() or", "format_string_piece in self._format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if attribute_names: self._format_string_attribute_names.extend(attribute_names)", "= '' # Strip carriage return and linefeed form the", "\"\"\" if self._format_string_attribute_names is None: self._format_string_attribute_names = [] for format_string_piece", "field explicitly. A formatter, with a format string definition, is", "here because it is faster than re.sub() or # string.strip().", "is False. value_if_true (str): output value if the boolean input", "if default_value is None: default_value = input_value event_values[self.output_attribute] = self.values.get(", "str: conditional formatted message. Raises: RuntimeError: when an invalid format", "the flags input value. output_attribute (str): name of the attribute", "{name!conversion}, {name!conversion:format} _FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile( '{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}') def __init__(self, data_type='internal'): \"\"\"Initializes", "Args: event_values (dict[str, object]): event values. Returns: str: short message.", "(EventFormatterHelper): event formatter helper to add. \"\"\" self.helpers.append(helper) @abc.abstractmethod def", "object]): event values. \"\"\" class BooleanEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting boolean", "format_string_piece in format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if len(set(attribute_names)) >", "format_string_short=None): \"\"\"Initializes a basic event formatter. The syntax of the", "dependent on a message field, referred to as description_long and", "self._format_string_attribute_names = None self._format_string = format_string self._format_string_short = format_string_short def", "custom event formatter helper. Args: identifier (str): identifier. input_attribute (Optional[str]):", "using the helper. If default value is None and there", "formatting boolean event data. Attributes: input_attribute (str): name of the", "parser_chain, error_message) logger.error(error_message) message_string = '' # Strip carriage return", "= [] for format_string_piece in self._format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece)", "= format_string_short_pieces or [] self._format_string_short_pieces_map = [] def _CreateFormatStringMap( self,", "raise RuntimeError(( 'Invalid format string piece: [{0:s}] contains more than", "self._data_type = data_type self._format_string_attribute_names = None self.custom_helpers = [] self.helpers", "by the formatter. format_string (Optional[str]): (long) message format string. format_string_short", "\"\"\" self._format_string_pieces_map = [] self._CreateFormatStringMap( self._format_string_pieces, self._format_string_pieces_map) self._format_string_short_pieces_map = []", "self._FormatMessage(format_string, event_values) def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in the", "helper for formatting flags event data. Args: input_attribute (Optional[str]): name", "in self.values.items(): if flag & input_value: output_values.append(mapped_value) event_values[self.output_attribute] = ',", "input value is True. \"\"\" super(BooleanEventFormatterHelper, self).__init__() self.input_attribute = input_attribute", "format string piece is stored as an empty map entry", "strings. # Using replace function here because it is faster", "input value is True. \"\"\" def __init__( self, input_attribute=None, output_attribute=None,", "None: string_pieces.append(format_string_pieces[map_index]) format_string = self._format_string_separator.join(string_pieces) return self._FormatMessage(format_string, event_values) def GetFormatStringAttributeNames(self):", "similar to of the basic event formatter (BasicEventFormatter). Every format", "def __init__( self, input_attribute=None, output_attribute=None, value_if_false=None, value_if_true=None): \"\"\"Initialized a helper", "Attributes: custom_helpers (list[str]): identifiers of custom event formatter helpers. helpers", "unique identifier for the event data supported by the formatter.\"\"\"", "event_values): \"\"\"Determines the short message. Args: event_values (dict[str, object]): event", "80: short_message_string = '{0:s}...'.format(short_message_string[:77]) return short_message_string class ConditionalEventFormatter(EventFormatter): \"\"\"Conditionally format", "self.custom_helpers.append(identifier) def AddHelper(self, helper): \"\"\"Adds an event formatter helper. Args:", "that contains the enumeration input value. output_attribute (str): name of", "class to format event values. Attributes: custom_helpers (list[str]): identifiers of", "unique attribute name. Format string pieces without an attribute name", "(list[str]): identifiers of custom event formatter helpers. helpers (list[EventFormatterHelper]): event", "'Event: {0:s} data type: {1:s} display name: {2:s} ' 'parser", "format string. Returns: set(str): attribute names. \"\"\" # pylint: disable=unused-argument", "\"\"\"Determines the conditional formatted message. Args: format_string_pieces (dict[str, str]): format", "!= ['']): format_string_pieces = self._format_string_short_pieces format_string_pieces_map = self._format_string_short_pieces_map else: format_string_pieces", "def __init__( self, data_type='basic', format_string=None, format_string_short=None): \"\"\"Initializes a basic event", "= self._format_string_pieces format_string_pieces_map = self._format_string_pieces_map short_message_string = self._ConditionalFormatMessage( format_string_pieces, format_string_pieces_map,", "Args: format_string (str): message format string. event_values (dict[str, object]): event", "the flags output value should be stored. values (Optional[dict[str, str]]):", "event_values (dict[str, object]): event values. \"\"\" class BooleanEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for", "class EventFormatter(object): \"\"\"Base class to format event values. Attributes: custom_helpers", "the attribute where the output value should be stored. \"\"\"", "object]): event values. Returns: str: short message. \"\"\" if self._format_string_short:", "format string pieces should be joined. format_string_short_pieces (Optional[list[str]]): short message", "= input_value event_values[self.output_attribute] = self.values.get( input_value, default_value) class FlagsEventFormatterHelper(EventFormatterHelper): \"\"\"Helper", "event object values into a formatted string that is similar", "name are supported. Args: data_type (Optional[str]): unique identifier for the", "custom_helpers (list[str]): identifiers of custom event formatter helpers. helpers (list[EventFormatterHelper]):", "event values. \"\"\" input_value = event_values.get(self.input_attribute, None) if input_value is", "error: {4:s}').format( event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message) message_string =", "string. Returns: set(str): attribute names. \"\"\" if self._format_string_attribute_names is None:", "format string. event_values (dict[str, object]): event values. Returns: str: formatted", "= self._format_string_short else: format_string = self._format_string short_message_string = self._FormatMessage(format_string, event_values)", "BooleanEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting boolean event data. Attributes: input_attribute (str):", "a formatted string that is similar to the description_long and", "value should be stored. values (dict[str, str]): mapping of enumeration", "attribute name per format string piece. E.g. [\"Description: {description}\"] would", "logger.error(error_message) message_string = '' # Strip carriage return and linefeed", "{4:s}').format( event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message) message_string = ''", "is None: default_value = input_value event_values[self.output_attribute] = self.values.get( input_value, default_value)", "string piece. E.g. [\"Description: {description}\"] would be mapped to: [0]", "format_string_pieces_map (list[int, str]): format string pieces map. event_values (dict[str, object]):", "event_values.get('parser', 'N/A') error_message = 'Unicode decode error: {0!s}'.format(exception) error_message =", "value. output_attribute (Optional[str]): name of the attribute where the output", "format_string_separator (Optional[str]): string by which separate format string pieces should", "the formatter. \"\"\" super(EventFormatter, self).__init__() self._data_type = data_type self._format_string_attribute_names =", "and description_short in l2t_csv. Plaso no longer stores these field", "value. output_attribute (Optional[str]): name of the attribute where the enumeration", "formatter. The syntax of the format strings is similar to", "pieces. \"\"\" if format_string_separator is None: format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR super(ConditionalEventFormatter,", "values. \"\"\" input_value = event_values.get(self.input_attribute, None) if input_value: output_value =", "attribute name to optimize conditional string formatting. Raises: RuntimeError: when", "self.values.items(): if flag & input_value: output_values.append(mapped_value) event_values[self.output_attribute] = ', '.join(output_values)", "self._format_string short_message_string = self._FormatMessage(format_string, event_values) # Truncate the short message", "values. \"\"\" super(FlagsEventFormatterHelper, self).__init__() self.input_attribute = input_attribute self.output_attribute = output_attribute", "message_string = ' '.join(attribute_values) except UnicodeDecodeError as exception: data_type =", "which separate format string pieces should be joined. format_string_short_pieces (Optional[list[str]]):", "does not needs formatting. Args: format_string_pieces (list[str]): format string pieces.", "class for a helper for custom formatting of event data.\"\"\"", "should contain at maximum one unique attribute name. Format string", "the boolean input value is True. \"\"\" super(BooleanEventFormatterHelper, self).__init__() self.input_attribute", "be stored. values (dict[str, str]): mapping of flags input and", "boolean input value is True. \"\"\" super(BooleanEventFormatterHelper, self).__init__() self.input_attribute =", "attribute names. \"\"\" # pylint: disable=unused-argument def AddCustomHelper( self, identifier,", "str: short message. \"\"\" class BasicEventFormatter(EventFormatter): \"\"\"Format event values using", "string pieces map. Raises: RuntimeError: when an invalid format string", "str]): format string pieces map. event_values (dict[str, object]): event values.", "using format string pieces.\"\"\" _DEFAULT_FORMAT_STRING_SEPARATOR = ' ' def __init__(", "' 'parser chain: {3:s} with error: {4:s}').format( event_identifier, data_type, display_name,", "the formatter. format_string_pieces (Optional[list[str]]): (long) message format string pieces. format_string_separator", "is encountered. \"\"\" string_pieces = [] for map_index, attribute_name in", "self, format_string_pieces, format_string_pieces_map): \"\"\"Creates a format string map. The format", "\"\"\"Initialized a helper for formatting enumeration event data. Args: default", "self._format_string_short = format_string_short def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in", "@abc.abstractmethod def GetMessageShort(self, event_values): \"\"\"Determines the short message. Args: event_values", "helpers. helpers (list[EventFormatterHelper]): event formatter helpers. \"\"\" def __init__( self,", "None: default_value = self.default if default_value is None: default_value =", "should be stored. values (dict[str, str]): mapping of flags input", "format string. Attributes: custom_helpers (list[str]): identifiers of custom event formatter", "self._FormatMessage(format_string, event_values) # Truncate the short message string if necessary.", "string definition, is used to convert the event object values", "input_value: output_value = self.value_if_true else: output_value = self.value_if_false event_values[self.output_attribute] =", "and output values. \"\"\" super(FlagsEventFormatterHelper, self).__init__() self.input_attribute = input_attribute self.output_attribute", "= event_values.get('uuid', 'N/A') parser_chain = event_values.get('parser', 'N/A') error_message = 'Unicode", "self._format_string_pieces_map = [] self._CreateFormatStringMap( self._format_string_pieces, self._format_string_pieces_map) self._format_string_short_pieces_map = [] self._CreateFormatStringMap(", "event object attribute is defined as {attribute_name}. Args: data_type (Optional[str]):", "string that is similar to the description_long and description_short field.", "an attribute name are supported. Args: data_type (Optional[str]): unique identifier", "event data supported by the formatter. format_string_pieces (Optional[list[str]]): (long) message", "(BasicEventFormatter). Every format string piece should contain at maximum one", "event_values.items(): attribute_values.append('{0:s}: {1!s}'.format(attribute, value)) message_string = ' '.join(attribute_values) except UnicodeDecodeError", "output_attribute (str): name of the attribute where the enumeration output", "the enumeration input value. output_attribute (Optional[str]): name of the attribute", "data_type (Optional[str]): unique identifier for the event data supported by", "stored as an empty map entry to keep # the", "conditional event formatter. The syntax of the format strings pieces", "input_value is not None: default_value = self.default if default_value is", "message. \"\"\" @abc.abstractmethod def GetMessageShort(self, event_values): \"\"\"Determines the short message.", "(dict[str, object]): event values. Returns: str: short message. \"\"\" if", "short_message_string class ConditionalEventFormatter(EventFormatter): \"\"\"Conditionally format event values using format string", "= [] self.helpers = [] @property def data_type(self): \"\"\"str: unique", "is None: format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR super(ConditionalEventFormatter, self).__init__(data_type=data_type) self._format_string_pieces = format_string_pieces", "input and output values. \"\"\" def __init__( self, default=None, input_attribute=None,", "{} def FormatEventValues(self, event_values): \"\"\"Formats event values using the helper.", "self._CreateFormatStringMaps() return self._ConditionalFormatMessage( self._format_string_pieces, self._format_string_pieces_map, event_values) def GetMessageShort(self, event_values): \"\"\"Determines", "\"\"\" try: message_string = format_string.format(**event_values) except KeyError as exception: data_type", "supported. Args: data_type (Optional[str]): unique identifier for the event data", "for the event data supported by the formatter. \"\"\" super(EventFormatter,", "input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a helper for formatting flags event", "self._format_string_attribute_names = [] for format_string_piece in self._format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(", "= self._format_string_short_pieces format_string_pieces_map = self._format_string_short_pieces_map else: format_string_pieces = self._format_string_pieces format_string_pieces_map", "message format string pieces. \"\"\" if format_string_separator is None: format_string_separator", "attribute_name = '' else: attribute_name = attribute_names[0] format_string_pieces_map.append(attribute_name) def _CreateFormatStringMaps(self):", "stored. \"\"\" self.custom_helpers.append(identifier) def AddHelper(self, helper): \"\"\"Adds an event formatter", "= event_values.get(self.input_attribute, None) if input_value is not None: default_value =", "if (self._format_string_short_pieces and self._format_string_short_pieces != ['']): format_string_pieces = self._format_string_short_pieces format_string_pieces_map", "self.default if default_value is None: default_value = input_value event_values[self.output_attribute] =", "boolean input value is False. value_if_true (str): output value if", "field, referred to as description_long and description_short in l2t_csv. Plaso", "that does not needs formatting. Args: format_string_pieces (list[str]): format string", "in format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if len(set(attribute_names)) > 1:", "event formatters interface classes. The l2t_csv and other formats are", "\"\"\"Base class for a helper for custom formatting of event", "values. \"\"\" input_value = event_values.get(self.input_attribute, None) if input_value is not", "file contains the event formatters interface classes. The l2t_csv and", "# string.strip(). return message_string.replace('\\r', '').replace('\\n', '') def FormatEventValues(self, event_values): \"\"\"Formats", "format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if len(set(attribute_names)) > 1: raise", "helper for formatting enumeration event data. Args: default (Optional[str]): default", "is not None: string_pieces.append(format_string_pieces[map_index]) format_string = self._format_string_separator.join(string_pieces) return self._FormatMessage(format_string, event_values)", "Attributes: input_attribute (str): name of the attribute that contains the", "if input_value is not None: default_value = self.default if default_value", "of the attribute that contains the input value. output_attribute (Optional[str]):", "format_string_pieces_map = self._format_string_short_pieces_map else: format_string_pieces = self._format_string_pieces format_string_pieces_map = self._format_string_pieces_map", "the helpers. Args: event_values (dict[str, object]): event values. \"\"\" for", "event data. Attributes: default (str): default value. input_attribute (str): name", "def GetMessageShort(self, event_values): \"\"\"Determines the short message. Args: event_values (dict[str,", "The format string can be defined as: # {name}, {name:format},", "(Optional[str]): name of the attribute where the output value should", "is None and there is no corresponding enumeration value then", "event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message) attribute_values = [] for", "basic event formatter (BasicEventFormatter). Every format string piece should contain", "\"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps() if (self._format_string_short_pieces and self._format_string_short_pieces !=", "identifier for the event data supported by the formatter.\"\"\" return", "the input value. output_attribute (Optional[str]): name of the attribute where", "\"\"\" string_pieces = [] for map_index, attribute_name in enumerate(format_string_pieces_map): if", "of the attribute that contains the flags input value. output_attribute", "(long) message format string pieces. format_string_separator (Optional[str]): string by which", "attribute that contains the flags input value. output_attribute (str): name", "identifiers of custom event formatter helpers. helpers (list[EventFormatterHelper]): event formatter", "[] def _CreateFormatStringMap( self, format_string_pieces, format_string_pieces_map): \"\"\"Creates a format string", "referred to as description_long and description_short in l2t_csv. Plaso no", "Args: event_values (dict[str, object]): event values. \"\"\" input_value = event_values.get(self.input_attribute,", "else: output_value = self.value_if_false event_values[self.output_attribute] = output_value class CustomEventFormatterHelper(EventFormatterHelper): \"\"\"Base", "is True. \"\"\" super(BooleanEventFormatterHelper, self).__init__() self.input_attribute = input_attribute self.output_attribute =", "'parser chain: {3:s} with error: {4:s}').format( event_identifier, data_type, display_name, parser_chain,", "where the enumeration output value should be stored. values (Optional[dict[str,", "contains the event formatters interface classes. The l2t_csv and other", "string map. The format string pieces map is a list", "[0] = \"description\". If the string piece does not contain", "exception) error_message = ( 'Event: {0:s} data type: {1:s} display", "of format() where the place holder for a certain event", "the event data supported by the formatter. \"\"\" super(EventFormatter, self).__init__()", "pieces and their corresponding attribute name to optimize conditional string", "format_string_pieces or [] self._format_string_pieces_map = [] self._format_string_separator = format_string_separator self._format_string_short_pieces", "format string. Returns: set(str): attribute names. \"\"\" if self._format_string_attribute_names is", "that contains the boolean input value. output_attribute (str): name of", "string pieces map. event_values (dict[str, object]): event values. Returns: str:", "conditional formatted message. Args: format_string_pieces (dict[str, str]): format string pieces.", "Attributes: default (str): default value. input_attribute (str): name of the", "where the enumeration output value should be stored. values (dict[str,", "message format string. \"\"\" super(BasicEventFormatter, self).__init__(data_type=data_type) self._format_string_attribute_names = None self._format_string", "re from plaso.formatters import logger class EventFormatterHelper(object): \"\"\"Base class of", "formatted message. Args: format_string_pieces (dict[str, str]): format string pieces. format_string_pieces_map", "event_values.get('display_name', 'N/A') event_identifier = event_values.get('uuid', 'N/A') parser_chain = event_values.get('parser', 'N/A')", "event values. Returns: str: message. \"\"\" return self._FormatMessage(self._format_string, event_values) def", "not needs formatting. Args: format_string_pieces (list[str]): format string pieces. format_string_pieces_map", "event ' 'value: {1!s}').format(format_string, exception) error_message = ( 'Event: {0:s}", "self).__init__() self._data_type = data_type self._format_string_attribute_names = None self.custom_helpers = []", "should be stored. values (Optional[dict[str, str]]): mapping of flags input", "the event formatters interface classes. The l2t_csv and other formats", "(str): identifier. input_attribute (Optional[str]): name of the attribute that contains", "event_values) def GetMessageShort(self, event_values): \"\"\"Determines the short message. Args: event_values", "pieces. format_string_pieces_map (list[str]): format string pieces map. Raises: RuntimeError: when", "\"\"\"Creates the format string maps. Maps are built of the", "[] self._CreateFormatStringMap( self._format_string_short_pieces, self._format_string_short_pieces_map) def _ConditionalFormatMessage( self, format_string_pieces, format_string_pieces_map, event_values):", "value is None and there is no corresponding enumeration value", "None and there is no corresponding enumeration value then the", "be stored. values (Optional[dict[str, str]]): mapping of enumeration input and", "self, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a helper for formatting flags", "the attribute names in the format string. Returns: set(str): attribute", "except KeyError as exception: data_type = event_values.get('data_type', 'N/A') display_name =", "values. \"\"\" super(EnumerationEventFormatterHelper, self).__init__() self.default = default self.input_attribute = input_attribute", "to that of format() where the place holder for a", "can be defined as: # {name}, {name:format}, {name!conversion}, {name!conversion:format} _FORMAT_STRING_ATTRIBUTE_NAME_RE", "self._format_string_pieces = format_string_pieces or [] self._format_string_pieces_map = [] self._format_string_separator =", "self, default=None, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a helper for formatting", "one unique attribute name. Format string pieces without an attribute", "formatter. The syntax of the format strings pieces is similar", "set(str): attribute names. \"\"\" # pylint: disable=unused-argument def AddCustomHelper( self,", "len(set(attribute_names)) > 1: raise RuntimeError(( 'Invalid format string piece: [{0:s}]", "to: [0] = \"description\". If the string piece does not", "(dict[str, object]): event values. \"\"\" input_value = event_values.get(self.input_attribute, None) if", "formatting flags event data. Attributes: input_attribute (str): name of the", "else: format_string = self._format_string short_message_string = self._FormatMessage(format_string, event_values) # Truncate", "formatter (BasicEventFormatter). Every format string piece should contain at maximum", "(Optional[dict[str, str]]): mapping of enumeration input and output values. \"\"\"", "self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if len(set(attribute_names)) > 1: raise RuntimeError(( 'Invalid format", "built of the string pieces and their corresponding attribute name", "Args: input_attribute (Optional[str]): name of the attribute that contains the", "'unable to format string: \"{0:s}\" missing required event ' 'value:", "FormatEventValues(self, event_values): \"\"\"Formats event values using the helper. If default", "event_values[self.output_attribute] = output_value class CustomEventFormatterHelper(EventFormatterHelper): \"\"\"Base class for a helper", "self, data_type='basic', format_string=None, format_string_short=None): \"\"\"Initializes a basic event formatter. The", "no longer stores these field explicitly. A formatter, with a", "of helper for formatting event data.\"\"\" @abc.abstractmethod def FormatEventValues(self, event_values):", "chain: {3:s} with error: {4:s}').format( event_identifier, data_type, display_name, parser_chain, error_message)", "are dependent on a message field, referred to as description_long", "pieces. attribute_name = '' else: attribute_name = attribute_names[0] format_string_pieces_map.append(attribute_name) def", "format_string self._format_string_short = format_string_short def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names", "piece: [{0:s}] contains more than 1 ' 'attribute name.').format(format_string_piece)) if", "The format string pieces map is a list containing the", "a message field, referred to as description_long and description_short in", "of enumeration input and output values. \"\"\" super(EnumerationEventFormatterHelper, self).__init__() self.default", "corresponding enumeration value then the original value is used. Args:", "[\"Description: {description}\"] would be mapped to: [0] = \"description\". If", "helpers (list[EventFormatterHelper]): event formatter helpers. \"\"\" def __init__( self, data_type='basic',", "input_attribute (Optional[str]): name of the attribute that contains the boolean", "flags input and output values. \"\"\" def __init__( self, input_attribute=None,", "values using the helper. If default value is None and", "where the output value should be stored. \"\"\" self.custom_helpers.append(identifier) def", "contains more than 1 ' 'attribute name.').format(format_string_piece)) if not attribute_names:", "input_value: output_values.append(mapped_value) event_values[self.output_attribute] = ', '.join(output_values) class EventFormatter(object): \"\"\"Base class", "pieces map is a list containing the attribute name per", "= ' '.join(attribute_values) except UnicodeDecodeError as exception: data_type = event_values.get('data_type',", "string piece is stored as an empty map entry to", "formatting. Raises: RuntimeError: when an invalid format string piece is", "\"\"\"Conditionally format event values using format string pieces.\"\"\" _DEFAULT_FORMAT_STRING_SEPARATOR =", "\"\"\"Creates a format string map. The format string pieces map", "self._format_string)) return set(self._format_string_attribute_names) def GetMessage(self, event_values): \"\"\"Determines the message. Args:", "data.\"\"\" DATA_TYPE = '' IDENTIFIER = '' @abc.abstractmethod def FormatEventValues(self,", "contains the flags input value. output_attribute (str): name of the", "Format string pieces without an attribute name are supported. Args:", "__init__( self, default=None, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a helper for", "map. The format string pieces map is a list containing", "def FormatEventValues(self, event_values): \"\"\"Formats event values using the helpers. Args:", "short message. \"\"\" if self._format_string_short: format_string = self._format_string_short else: format_string", "map. event_values (dict[str, object]): event values. Returns: str: conditional formatted", "parser_chain = event_values.get('parser', 'N/A') error_message = ( 'unable to format", "format() where the place holder for a certain event object", "syntax of the format strings pieces is similar to of", "of the string pieces and their corresponding attribute name to", "in self.helpers: helper.FormatEventValues(event_values) @abc.abstractmethod def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names", "= [] self._CreateFormatStringMap( self._format_string_short_pieces, self._format_string_short_pieces_map) def _ConditionalFormatMessage( self, format_string_pieces, format_string_pieces_map,", "helper. Args: event_values (dict[str, object]): event values. \"\"\" input_value =", "for formatting flags event data. Args: input_attribute (Optional[str]): name of", "None) if input_value is not None: default_value = self.default if", "syntax of the format strings is similar to that of", "value should be stored. \"\"\" self.custom_helpers.append(identifier) def AddHelper(self, helper): \"\"\"Adds", "value_if_true (str): output value if the boolean input value is", "event_values (dict[str, object]): event values. Returns: str: message. \"\"\" return", "Returns: str: short message. \"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps() if", "self._ConditionalFormatMessage( self._format_string_pieces, self._format_string_pieces_map, event_values) def GetMessageShort(self, event_values): \"\"\"Determines the short", "if self._format_string_attribute_names is None: self._format_string_attribute_names = [] for format_string_piece in", "index in the map equal to the format string pieces.", "data. Attributes: default (str): default value. input_attribute (str): name of", "for formatting boolean event data. Args: input_attribute (Optional[str]): name of", "input_attribute (Optional[str]): name of the attribute that contains the input", "values. \"\"\" class BooleanEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting boolean event data.", "names in the format string. Returns: set(str): attribute names. \"\"\"", "a format string definition, is used to convert the event", "CustomEventFormatterHelper(EventFormatterHelper): \"\"\"Base class for a helper for custom formatting of", "description_long and description_short in l2t_csv. Plaso no longer stores these", "default value. input_attribute (Optional[str]): name of the attribute that contains", "string. \"\"\" super(BasicEventFormatter, self).__init__(data_type=data_type) self._format_string_attribute_names = None self._format_string = format_string", "string maps. Maps are built of the string pieces and", "\"\"\" input_value = event_values.get(self.input_attribute, None) if input_value: output_value = self.value_if_true", "short_message_string = self._FormatMessage(format_string, event_values) # Truncate the short message string", "Args: event_values (dict[str, object]): event values. \"\"\" for helper in", "\"\"\" if self._format_string_short: format_string = self._format_string_short else: format_string = self._format_string", "input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a helper for formatting enumeration event", "event data supported by the formatter.\"\"\" return self._data_type.lower() def _FormatMessage(self,", "boolean event data. Args: input_attribute (Optional[str]): name of the attribute", "= \"description\". If the string piece does not contain an", "input_value, default_value) class FlagsEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting flags event data.", "is similar to that of format() where the place holder", "self).__init__(data_type=data_type) self._format_string_attribute_names = None self._format_string = format_string self._format_string_short = format_string_short", "(Optional[str]): name of the attribute that contains the input value.", "self.helpers: helper.FormatEventValues(event_values) @abc.abstractmethod def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in", "is no corresponding enumeration value then the original value is", "set(self._format_string_attribute_names) def GetMessage(self, event_values): \"\"\"Determines the message. Args: event_values (dict[str,", "# the index in the map equal to the format", "that contains the input value. output_attribute (Optional[str]): name of the", "the helper. Args: event_values (dict[str, object]): event values. \"\"\" class", "(str): name of the attribute that contains the enumeration input", "(dict[str, object]): event values. Returns: str: message. \"\"\" return self._FormatMessage(self._format_string,", "input_value = event_values.get(self.input_attribute, None) if input_value: output_value = self.value_if_true else:", "identifier. input_attribute (Optional[str]): name of the attribute that contains the", "classes. The l2t_csv and other formats are dependent on a", "of the attribute where the enumeration output value should be", "\"description\". If the string piece does not contain an attribute", "enumeration event data. Attributes: default (str): default value. input_attribute (str):", "\"\"\"str: unique identifier for the event data supported by the", "return short_message_string class ConditionalEventFormatter(EventFormatter): \"\"\"Conditionally format event values using format", "event formatter (BasicEventFormatter). Every format string piece should contain at", "format string definition, is used to convert the event object", "', '.join(output_values) class EventFormatter(object): \"\"\"Base class to format event values.", "helper to add. \"\"\" self.helpers.append(helper) @abc.abstractmethod def GetMessage(self, event_values): \"\"\"Determines", "format strings is similar to that of format() where the", "(Optional[str]): name of the attribute that contains the boolean input", "using the helper. Args: event_values (dict[str, object]): event values. \"\"\"", "[] self._format_string_separator = format_string_separator self._format_string_short_pieces = format_string_short_pieces or [] self._format_string_short_pieces_map", "event values. Returns: str: message. \"\"\" @abc.abstractmethod def GetMessageShort(self, event_values):", "mapping of flags input and output values. \"\"\" super(FlagsEventFormatterHelper, self).__init__()", "\"\"\"Retrieves the attribute names in the format string. Returns: set(str):", "map. Raises: RuntimeError: when an invalid format string piece is", "piece does not contain an attribute name it is treated", "input_value = event_values.get(self.input_attribute, None) if input_value is None: return output_values", "of the format strings is similar to that of format()", "or event_values.get( attribute_name, None) is not None: string_pieces.append(format_string_pieces[map_index]) format_string =", "the basic event formatter (BasicEventFormatter). Every format string piece should", "mapped to: [0] = \"description\". If the string piece does", "{name}, {name:format}, {name!conversion}, {name!conversion:format} _FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile( '{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}') def __init__(self,", "\"\"\"Format event values using a message format string. Attributes: custom_helpers", "invalid format string piece is encountered. \"\"\" self._format_string_pieces_map = []", "formatted string that is similar to the description_long and description_short", "( 'unable to format string: \"{0:s}\" missing required event '", "input_attribute (Optional[str]): name of the attribute that contains the enumeration", "formatter.\"\"\" return self._data_type.lower() def _FormatMessage(self, format_string, event_values): \"\"\"Determines the formatted", "longer stores these field explicitly. A formatter, with a format", "[] for attribute, value in event_values.items(): attribute_values.append('{0:s}: {1!s}'.format(attribute, value)) message_string", "self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if attribute_names: self._format_string_attribute_names.extend(attribute_names) return set(self._format_string_attribute_names) def GetMessage(self, event_values):", "values using a message format string. Attributes: custom_helpers (list[str]): identifiers", "Args: identifier (str): identifier. input_attribute (Optional[str]): name of the attribute", "if input_value is None: return output_values = [] for flag,", "missing required event ' 'value: {1!s}').format(format_string, exception) error_message = (", "enumeration input and output values. \"\"\" super(EnumerationEventFormatterHelper, self).__init__() self.default =", "self).__init__() self.input_attribute = input_attribute self.output_attribute = output_attribute self.value_if_false = value_if_false", "message format string. format_string_short (Optional[str]): short message format string. \"\"\"", "is stored as an empty map entry to keep #", "if len(set(attribute_names)) > 1: raise RuntimeError(( 'Invalid format string piece:", "the attribute that contains the flags input value. output_attribute (str):", "logger class EventFormatterHelper(object): \"\"\"Base class of helper for formatting event", "value)) message_string = ' '.join(attribute_values) except UnicodeDecodeError as exception: data_type", "= self.default if default_value is None: default_value = input_value event_values[self.output_attribute]", "message. Args: format_string (str): message format string. event_values (dict[str, object]):", "and output values. \"\"\" def __init__( self, default=None, input_attribute=None, output_attribute=None,", "event data supported by the formatter. \"\"\" super(EventFormatter, self).__init__() self._data_type", "maximum one unique attribute name. Format string pieces without an", "\"\"\" super(BasicEventFormatter, self).__init__(data_type=data_type) self._format_string_attribute_names = None self._format_string = format_string self._format_string_short", "the index in the map equal to the format string", "for formatting boolean event data. Attributes: input_attribute (str): name of", "event_values.get(self.input_attribute, None) if input_value is None: return output_values = []", "values. \"\"\" def __init__( self, default=None, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized", "\"\"\" input_value = event_values.get(self.input_attribute, None) if input_value is None: return", "format string. format_string_short (Optional[str]): short message format string. \"\"\" super(BasicEventFormatter,", "text format string piece is stored as an empty map", "values. Returns: str: formatted message. \"\"\" try: message_string = format_string.format(**event_values)", "Args: format_string_pieces (list[str]): format string pieces. format_string_pieces_map (list[str]): format string", "UnicodeDecodeError as exception: data_type = event_values.get('data_type', 'N/A') display_name = event_values.get('display_name',", "self._CreateFormatStringMaps() if (self._format_string_short_pieces and self._format_string_short_pieces != ['']): format_string_pieces = self._format_string_short_pieces", "output value should be stored. value_if_false (str): output value if", "'' IDENTIFIER = '' @abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats event", "str: short message. \"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps() if (self._format_string_short_pieces", "class BasicEventFormatter(EventFormatter): \"\"\"Format event values using a message format string.", "class CustomEventFormatterHelper(EventFormatterHelper): \"\"\"Base class for a helper for custom formatting", "logger.error(error_message) attribute_values = [] for attribute, value in event_values.items(): attribute_values.append('{0:s}:", "the attribute that contains the enumeration input value. output_attribute (str):", "(str): output value if the boolean input value is False.", "linefeed form the message strings. # Using replace function here", "-*- coding: utf-8 -*- \"\"\"This file contains the event formatters", "return message_string.replace('\\r', '').replace('\\n', '') def FormatEventValues(self, event_values): \"\"\"Formats event values", "\"\"\" class EnumerationEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting enumeration event data. Attributes:", "= event_values.get(self.input_attribute, None) if input_value: output_value = self.value_if_true else: output_value", "__init__(self, data_type='internal'): \"\"\"Initializes an event formatter. Args: data_type (Optional[str]): unique", "(str): name of the attribute where the enumeration output value", "event_values): \"\"\"Formats event values using the helpers. Args: event_values (dict[str,", "default_value = input_value event_values[self.output_attribute] = self.values.get( input_value, default_value) class FlagsEventFormatterHelper(EventFormatterHelper):", "flags event data. Attributes: input_attribute (str): name of the attribute", "format_string_pieces_map.append(attribute_name) def _CreateFormatStringMaps(self): \"\"\"Creates the format string maps. Maps are", "self.helpers.append(helper) @abc.abstractmethod def GetMessage(self, event_values): \"\"\"Determines the message. Args: event_values", "that contains the flags input value. output_attribute (str): name of", "event formatter. Args: data_type (Optional[str]): unique identifier for the event", "an invalid format string piece is encountered. \"\"\" for format_string_piece", "event_values) # Truncate the short message string if necessary. if", "necessary. if len(short_message_string) > 80: short_message_string = '{0:s}...'.format(short_message_string[:77]) return short_message_string", "__init__( self, input_attribute=None, output_attribute=None, value_if_false=None, value_if_true=None): \"\"\"Initialized a helper for", "string. Attributes: custom_helpers (list[str]): identifiers of custom event formatter helpers.", "object]): event values. Returns: str: short message. \"\"\" if not", "__init__( self, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a helper for formatting", "{1!s}'.format(attribute, value)) message_string = ' '.join(attribute_values) except UnicodeDecodeError as exception:", "description_short in l2t_csv. Plaso no longer stores these field explicitly.", "data_type='basic', format_string=None, format_string_short=None): \"\"\"Initializes a basic event formatter. The syntax", "\"\"\"Determines the short message. Args: event_values (dict[str, object]): event values.", "return and linefeed form the message strings. # Using replace", "format_string_pieces, format_string_pieces_map): \"\"\"Creates a format string map. The format string", "Returns: str: short message. \"\"\" class BasicEventFormatter(EventFormatter): \"\"\"Format event values", "values. Attributes: custom_helpers (list[str]): identifiers of custom event formatter helpers.", "formatter helpers. helpers (list[EventFormatterHelper]): event formatter helpers. \"\"\" def __init__(", "supported by the formatter. format_string_pieces (Optional[list[str]]): (long) message format string", "by which separate format string pieces should be joined. format_string_short_pieces", "it is faster than re.sub() or # string.strip(). return message_string.replace('\\r',", "value is False. value_if_true (str): output value if the boolean", "other formats are dependent on a message field, referred to", "{name:format}, {name!conversion}, {name!conversion:format} _FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile( '{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}') def __init__(self, data_type='internal'):", "for formatting enumeration event data. Attributes: default (str): default value.", "@abc.abstractmethod def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in the format", "return output_values = [] for flag, mapped_value in self.values.items(): if", "data. Args: default (Optional[str]): default value. input_attribute (Optional[str]): name of", "attribute name are supported. Args: data_type (Optional[str]): unique identifier for", "EnumerationEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting enumeration event data. Attributes: default (str):", "if the boolean input value is False. value_if_true (str): output", "values. \"\"\" def __init__( self, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a", "for format_string_piece in self._format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if attribute_names:", "= input_attribute self.output_attribute = output_attribute self.values = values or {}", "= self._ConditionalFormatMessage( format_string_pieces, format_string_pieces_map, event_values) # Truncate the short message", "event_values (dict[str, object]): event values. Returns: str: conditional formatted message.", "# Using replace function here because it is faster than", "Args: default (Optional[str]): default value. input_attribute (Optional[str]): name of the", "enumerate(format_string_pieces_map): if not attribute_name or event_values.get( attribute_name, None) is not", "values. Returns: str: conditional formatted message. Raises: RuntimeError: when an", "self.custom_helpers = [] self.helpers = [] @property def data_type(self): \"\"\"str:", "output_attribute (Optional[str]): name of the attribute where the output value", "and there is no corresponding enumeration value then the original", "does not contain an attribute name it is treated as", "a format string map. The format string pieces map is", "\"\"\"Initializes an event formatter. Args: data_type (Optional[str]): unique identifier for", "object]): event values. Returns: str: short message. \"\"\" class BasicEventFormatter(EventFormatter):", "self._format_string_short_pieces = format_string_short_pieces or [] self._format_string_short_pieces_map = [] def _CreateFormatStringMap(", "(Optional[str]): (long) message format string. format_string_short (Optional[str]): short message format", "attribute that contains the enumeration input value. output_attribute (Optional[str]): name", "super(BasicEventFormatter, self).__init__(data_type=data_type) self._format_string_attribute_names = None self._format_string = format_string self._format_string_short =", "values or {} def FormatEventValues(self, event_values): \"\"\"Formats event values using", "> 1: raise RuntimeError(( 'Invalid format string piece: [{0:s}] contains", "event_values): \"\"\"Formats event values using the helper. If default value", "the attribute that contains the boolean input value. output_attribute (Optional[str]):", "(Optional[list[str]]): short message format string pieces. \"\"\" if format_string_separator is", "'').replace('\\n', '') def FormatEventValues(self, event_values): \"\"\"Formats event values using the", "= self.value_if_false event_values[self.output_attribute] = output_value class CustomEventFormatterHelper(EventFormatterHelper): \"\"\"Base class for", "= self._format_string short_message_string = self._FormatMessage(format_string, event_values) # Truncate the short", "data.\"\"\" @abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats event values using the", "as exception: data_type = event_values.get('data_type', 'N/A') display_name = event_values.get('display_name', 'N/A')", "value should be stored. values (Optional[dict[str, str]]): mapping of flags", "'N/A') parser_chain = event_values.get('parser', 'N/A') error_message = ( 'unable to", "\"\"\"Base class to format event values. Attributes: custom_helpers (list[str]): identifiers", "output_value class CustomEventFormatterHelper(EventFormatterHelper): \"\"\"Base class for a helper for custom", "output_values.append(mapped_value) event_values[self.output_attribute] = ', '.join(output_values) class EventFormatter(object): \"\"\"Base class to", "default_value is None: default_value = input_value event_values[self.output_attribute] = self.values.get( input_value,", "E.g. [\"Description: {description}\"] would be mapped to: [0] = \"description\".", "input_value event_values[self.output_attribute] = self.values.get( input_value, default_value) class FlagsEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for", "BasicEventFormatter(EventFormatter): \"\"\"Format event values using a message format string. Attributes:", "value. output_attribute (str): name of the attribute where the boolean", "object]): event values. Returns: str: message. \"\"\" if not self._format_string_pieces_map:", "the attribute where the enumeration output value should be stored.", "in the map equal to the format string pieces. attribute_name", "input_attribute=None, output_attribute=None, value_if_false=None, value_if_true=None): \"\"\"Initialized a helper for formatting boolean", "input and output values. \"\"\" super(FlagsEventFormatterHelper, self).__init__() self.input_attribute = input_attribute", "{name!conversion:format} _FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile( '{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}') def __init__(self, data_type='internal'): \"\"\"Initializes an", "output_attribute self.values = values or {} def FormatEventValues(self, event_values): \"\"\"Formats", "helper.FormatEventValues(event_values) @abc.abstractmethod def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in the", "format_string_pieces_map = self._format_string_pieces_map short_message_string = self._ConditionalFormatMessage( format_string_pieces, format_string_pieces_map, event_values) #", "super(ConditionalEventFormatter, self).__init__(data_type=data_type) self._format_string_pieces = format_string_pieces or [] self._format_string_pieces_map = []", "defined as {attribute_name}. Args: data_type (Optional[str]): unique identifier for the", "if necessary. if len(short_message_string) > 80: short_message_string = '{0:s}...'.format(short_message_string[:77]) return", "map is a list containing the attribute name per format", "class EnumerationEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting enumeration event data. Attributes: default", "class of helper for formatting event data.\"\"\" @abc.abstractmethod def FormatEventValues(self,", "value if the boolean input value is False. value_if_true (str):", "defined as: # {name}, {name:format}, {name!conversion}, {name!conversion:format} _FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile(", "attribute_name in enumerate(format_string_pieces_map): if not attribute_name or event_values.get( attribute_name, None)", "event data.\"\"\" DATA_TYPE = '' IDENTIFIER = '' @abc.abstractmethod def", "\"\"\" # pylint: disable=unused-argument def AddCustomHelper( self, identifier, input_attribute=None, output_attribute=None):", "event values. Returns: str: short message. \"\"\" if not self._format_string_pieces_map:", "self._format_string_attribute_names is None: self._format_string_attribute_names = [] for format_string_piece in self._format_string_pieces:", "separate format string pieces should be joined. format_string_short_pieces (Optional[list[str]]): short", "default_value = self.default if default_value is None: default_value = input_value", "super(BooleanEventFormatterHelper, self).__init__() self.input_attribute = input_attribute self.output_attribute = output_attribute self.value_if_false =", "message. \"\"\" if self._format_string_short: format_string = self._format_string_short else: format_string =", "values. Returns: str: short message. \"\"\" if self._format_string_short: format_string =", "self._format_string_pieces, self._format_string_pieces_map) self._format_string_short_pieces_map = [] self._CreateFormatStringMap( self._format_string_short_pieces, self._format_string_short_pieces_map) def _ConditionalFormatMessage(", "data_type self._format_string_attribute_names = None self.custom_helpers = [] self.helpers = []", "# pylint: disable=unused-argument def AddCustomHelper( self, identifier, input_attribute=None, output_attribute=None): \"\"\"Adds", "disable=unused-argument def AddCustomHelper( self, identifier, input_attribute=None, output_attribute=None): \"\"\"Adds a custom", "similar to that of format() where the place holder for", "= '' @abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats event values using", "of the attribute that contains the boolean input value. output_attribute", "data_type = event_values.get('data_type', 'N/A') display_name = event_values.get('display_name', 'N/A') event_identifier =", "a certain event object attribute is defined as {attribute_name}. Args:", "(list[str]): format string pieces. format_string_pieces_map (list[str]): format string pieces map.", "name to optimize conditional string formatting. Raises: RuntimeError: when an", "format string piece is encountered. \"\"\" string_pieces = [] for", "event data supported by the formatter. format_string (Optional[str]): (long) message", "self._data_type.lower() def _FormatMessage(self, format_string, event_values): \"\"\"Determines the formatted message. Args:", "not attribute_names: # The text format string piece is stored", "are built of the string pieces and their corresponding attribute", "are supported. Args: data_type (Optional[str]): unique identifier for the event", "self._format_string_attribute_names = None self.custom_helpers = [] self.helpers = [] @property", "that is similar to the description_long and description_short field. \"\"\"", "event_values.get('data_type', 'N/A') display_name = event_values.get('display_name', 'N/A') event_identifier = event_values.get('uuid', 'N/A')", "str: formatted message. \"\"\" try: message_string = format_string.format(**event_values) except KeyError", "\"\"\"Initialized a helper for formatting boolean event data. Args: input_attribute", "string: \"{0:s}\" missing required event ' 'value: {1!s}').format(format_string, exception) error_message", "in enumerate(format_string_pieces_map): if not attribute_name or event_values.get( attribute_name, None) is", "\"\"\"Initializes a basic event formatter. The syntax of the format", "the attribute that contains the boolean input value. output_attribute (str):", "corresponding attribute name to optimize conditional string formatting. Raises: RuntimeError:", "def data_type(self): \"\"\"str: unique identifier for the event data supported", "description_long and description_short field. \"\"\" import abc import re from", "'Unicode decode error: {0!s}'.format(exception) error_message = ( 'Event: {0:s} data", "(dict[str, object]): event values. Returns: str: message. \"\"\" if not", "string. Returns: set(str): attribute names. \"\"\" # pylint: disable=unused-argument def", "would be mapped to: [0] = \"description\". If the string", "in the format string. Returns: set(str): attribute names. \"\"\" #", "string pieces. format_string_pieces_map (list[str]): format string pieces map. Raises: RuntimeError:", "(Optional[str]): name of the attribute that contains the flags input", "self._format_string_attribute_names.extend(attribute_names) return set(self._format_string_attribute_names) def GetMessage(self, event_values): \"\"\"Determines the message. Args:", "not None: string_pieces.append(format_string_pieces[map_index]) format_string = self._format_string_separator.join(string_pieces) return self._FormatMessage(format_string, event_values) def", "= ', '.join(output_values) class EventFormatter(object): \"\"\"Base class to format event", "def AddCustomHelper( self, identifier, input_attribute=None, output_attribute=None): \"\"\"Adds a custom event", "{1!s}').format(format_string, exception) error_message = ( 'Event: {0:s} data type: {1:s}", "format strings pieces is similar to of the basic event", "treated as text that does not needs formatting. Args: format_string_pieces", "to keep # the index in the map equal to", "= self._format_string_pieces_map short_message_string = self._ConditionalFormatMessage( format_string_pieces, format_string_pieces_map, event_values) # Truncate", "to format event values. Attributes: custom_helpers (list[str]): identifiers of custom", "# Truncate the short message string if necessary. if len(short_message_string)", "values (dict[str, str]): mapping of flags input and output values.", "def _FormatMessage(self, format_string, event_values): \"\"\"Determines the formatted message. Args: format_string", "value is True. \"\"\" def __init__( self, input_attribute=None, output_attribute=None, value_if_false=None,", "is None: return output_values = [] for flag, mapped_value in", "output_attribute (str): name of the attribute where the boolean output", "piece is encountered. \"\"\" self._format_string_pieces_map = [] self._CreateFormatStringMap( self._format_string_pieces, self._format_string_pieces_map)", "event_values): \"\"\"Determines the formatted message. Args: format_string (str): message format", "Returns: set(str): attribute names. \"\"\" # pylint: disable=unused-argument def AddCustomHelper(", "the format string. Returns: set(str): attribute names. \"\"\" if self._format_string_attribute_names", "message. \"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps() if (self._format_string_short_pieces and self._format_string_short_pieces", "name of the attribute where the boolean output value should", "function here because it is faster than re.sub() or #", "None) if input_value is None: return output_values = [] for", "output_value = self.value_if_true else: output_value = self.value_if_false event_values[self.output_attribute] = output_value", "stored. values (Optional[dict[str, str]]): mapping of enumeration input and output", "the formatted message. Args: format_string (str): message format string. event_values", "message. \"\"\" class BasicEventFormatter(EventFormatter): \"\"\"Format event values using a message", "if len(short_message_string) > 80: short_message_string = '{0:s}...'.format(short_message_string[:77]) return short_message_string class", "string piece is encountered. \"\"\" self._format_string_pieces_map = [] self._CreateFormatStringMap( self._format_string_pieces,", "attribute that contains the boolean input value. output_attribute (str): name", "True. \"\"\" super(BooleanEventFormatterHelper, self).__init__() self.input_attribute = input_attribute self.output_attribute = output_attribute", "format string piece should contain at maximum one unique attribute", "FormatEventValues(self, event_values): \"\"\"Formats event values using the helper. Args: event_values", "a message format string. Attributes: custom_helpers (list[str]): identifiers of custom", "self._format_string_short_pieces, self._format_string_short_pieces_map) def _ConditionalFormatMessage( self, format_string_pieces, format_string_pieces_map, event_values): \"\"\"Determines the", "If the string piece does not contain an attribute name", "and description_short field. \"\"\" import abc import re from plaso.formatters", "enumeration input value. output_attribute (str): name of the attribute where", "string can be defined as: # {name}, {name:format}, {name!conversion}, {name!conversion:format}", "class ConditionalEventFormatter(EventFormatter): \"\"\"Conditionally format event values using format string pieces.\"\"\"", "event values. Returns: str: message. \"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps()", "the helper. Args: event_values (dict[str, object]): event values. \"\"\" input_value", "be stored. values (Optional[dict[str, str]]): mapping of flags input and", "(dict[str, object]): event values. \"\"\" class BooleanEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting", "data supported by the formatter.\"\"\" return self._data_type.lower() def _FormatMessage(self, format_string,", "value in event_values.items(): attribute_values.append('{0:s}: {1!s}'.format(attribute, value)) message_string = ' '.join(attribute_values)", "attribute that contains the flags input value. output_attribute (Optional[str]): name", "(Optional[list[str]]): (long) message format string pieces. format_string_separator (Optional[str]): string by", "\"\"\" @abc.abstractmethod def GetMessageShort(self, event_values): \"\"\"Determines the short message. Args:", "value should be stored. value_if_false (str): output value if the", "as an empty map entry to keep # the index", "return set(self._format_string_attribute_names) def GetMessage(self, event_values): \"\"\"Determines the message. Args: event_values", "len(short_message_string) > 80: short_message_string = '{0:s}...'.format(short_message_string[:77]) return short_message_string class ConditionalEventFormatter(EventFormatter):", "event_values.get(self.input_attribute, None) if input_value: output_value = self.value_if_true else: output_value =", "an event formatter. Args: data_type (Optional[str]): unique identifier for the", "{4:s}').format( event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message) attribute_values = []", "self.input_attribute = input_attribute self.output_attribute = output_attribute self.values = values or", "[] self._CreateFormatStringMap( self._format_string_pieces, self._format_string_pieces_map) self._format_string_short_pieces_map = [] self._CreateFormatStringMap( self._format_string_short_pieces, self._format_string_short_pieces_map)", "\"\"\" return self._FormatMessage(self._format_string, event_values) def GetMessageShort(self, event_values): \"\"\"Determines the short", "formatter helper to add. \"\"\" self.helpers.append(helper) @abc.abstractmethod def GetMessage(self, event_values):", "to the format string pieces. attribute_name = '' else: attribute_name", "self._format_string_pieces, self._format_string_pieces_map, event_values) def GetMessageShort(self, event_values): \"\"\"Determines the short message.", "attribute, value in event_values.items(): attribute_values.append('{0:s}: {1!s}'.format(attribute, value)) message_string = '", "in self._format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if attribute_names: self._format_string_attribute_names.extend(attribute_names) return", "attribute_name, None) is not None: string_pieces.append(format_string_pieces[map_index]) format_string = self._format_string_separator.join(string_pieces) return", "= output_attribute self.value_if_false = value_if_false self.value_if_true = value_if_true def FormatEventValues(self,", "event_values): \"\"\"Determines the message. Args: event_values (dict[str, object]): event values.", "event_values.get(self.input_attribute, None) if input_value is not None: default_value = self.default", "output values. \"\"\" def __init__( self, default=None, input_attribute=None, output_attribute=None, values=None):", "supported by the formatter.\"\"\" return self._data_type.lower() def _FormatMessage(self, format_string, event_values):", "custom event formatter helpers. helpers (list[EventFormatterHelper]): event formatter helpers. \"\"\"", "input_value = event_values.get(self.input_attribute, None) if input_value is not None: default_value", "(Optional[str]): name of the attribute where the boolean output value", "display_name, parser_chain, error_message) logger.error(error_message) message_string = '' # Strip carriage", "self).__init__() self.default = default self.input_attribute = input_attribute self.output_attribute = output_attribute", "GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in the format string. Returns:", "self.values = values or {} def FormatEventValues(self, event_values): \"\"\"Formats event", "Maps are built of the string pieces and their corresponding", "for a certain event object attribute is defined as {attribute_name}.", "self._DEFAULT_FORMAT_STRING_SEPARATOR super(ConditionalEventFormatter, self).__init__(data_type=data_type) self._format_string_pieces = format_string_pieces or [] self._format_string_pieces_map =", "(str): name of the attribute that contains the boolean input", "\"\"\" def __init__( self, data_type='basic', format_string=None, format_string_short=None): \"\"\"Initializes a basic", "because it is faster than re.sub() or # string.strip(). return", "error_message) logger.error(error_message) message_string = '' # Strip carriage return and", "pieces map. Raises: RuntimeError: when an invalid format string piece", "(dict[str, str]): mapping of flags input and output values. \"\"\"", "values. \"\"\" for helper in self.helpers: helper.FormatEventValues(event_values) @abc.abstractmethod def GetFormatStringAttributeNames(self):", "self.output_attribute = output_attribute self.value_if_false = value_if_false self.value_if_true = value_if_true def", "enumeration output value should be stored. values (Optional[dict[str, str]]): mapping", "self._format_string_separator.join(string_pieces) return self._FormatMessage(format_string, event_values) def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names", "@abc.abstractmethod def GetMessage(self, event_values): \"\"\"Determines the message. Args: event_values (dict[str,", "is similar to of the basic event formatter (BasicEventFormatter). Every", "Args: event_values (dict[str, object]): event values. \"\"\" class BooleanEventFormatterHelper(EventFormatterHelper): \"\"\"Helper", "super(EnumerationEventFormatterHelper, self).__init__() self.default = default self.input_attribute = input_attribute self.output_attribute =", "contains the boolean input value. output_attribute (str): name of the", "value if the boolean input value is True. \"\"\" def", "the flags input value. output_attribute (Optional[str]): name of the attribute", "is encountered. \"\"\" self._format_string_pieces_map = [] self._CreateFormatStringMap( self._format_string_pieces, self._format_string_pieces_map) self._format_string_short_pieces_map", "supported by the formatter. format_string (Optional[str]): (long) message format string.", "enumeration input value. output_attribute (Optional[str]): name of the attribute where", "(str): name of the attribute where the boolean output value", "event_values): \"\"\"Formats event values using the helper. Args: event_values (dict[str,", "AddHelper(self, helper): \"\"\"Adds an event formatter helper. Args: helper (EventFormatterHelper):", "the string piece does not contain an attribute name it", "message_string = '' # Strip carriage return and linefeed form", "= ( 'unable to format string: \"{0:s}\" missing required event", "Returns: str: formatted message. \"\"\" try: message_string = format_string.format(**event_values) except", "event values using a message format string. Attributes: custom_helpers (list[str]):", "\"\"\"This file contains the event formatters interface classes. The l2t_csv", "= output_attribute self.values = values or {} def FormatEventValues(self, event_values):", "self.value_if_true else: output_value = self.value_if_false event_values[self.output_attribute] = output_value class CustomEventFormatterHelper(EventFormatterHelper):", "flags input value. output_attribute (str): name of the attribute where", "output value if the boolean input value is False. value_if_true", "unique identifier for the event data supported by the formatter.", "= value_if_false self.value_if_true = value_if_true def FormatEventValues(self, event_values): \"\"\"Formats event", "event_values) def GetFormatStringAttributeNames(self): \"\"\"Retrieves the attribute names in the format", "the attribute name per format string piece. E.g. [\"Description: {description}\"]", "the attribute where the flags output value should be stored.", "output_attribute (str): name of the attribute where the flags output", "event_values.get( attribute_name, None) is not None: string_pieces.append(format_string_pieces[map_index]) format_string = self._format_string_separator.join(string_pieces)", "self._format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if attribute_names: self._format_string_attribute_names.extend(attribute_names) return set(self._format_string_attribute_names)", "\"\"\" for format_string_piece in format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if", "'N/A') error_message = ( 'unable to format string: \"{0:s}\" missing", "event values. \"\"\" class EnumerationEventFormatterHelper(EventFormatterHelper): \"\"\"Helper for formatting enumeration event", "[] self.helpers = [] @property def data_type(self): \"\"\"str: unique identifier", "attribute_names[0] format_string_pieces_map.append(attribute_name) def _CreateFormatStringMaps(self): \"\"\"Creates the format string maps. Maps", "self.value_if_false = value_if_false self.value_if_true = value_if_true def FormatEventValues(self, event_values): \"\"\"Formats", "contains the flags input value. output_attribute (Optional[str]): name of the", "add. \"\"\" self.helpers.append(helper) @abc.abstractmethod def GetMessage(self, event_values): \"\"\"Determines the message.", "the flags output value should be stored. values (dict[str, str]):", "if format_string_separator is None: format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR super(ConditionalEventFormatter, self).__init__(data_type=data_type) self._format_string_pieces", "the format string maps. Maps are built of the string", "should be stored. values (dict[str, str]): mapping of enumeration input", "is encountered. \"\"\" for format_string_piece in format_string_pieces: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(", "if flag & input_value: output_values.append(mapped_value) event_values[self.output_attribute] = ', '.join(output_values) class", "helpers (list[EventFormatterHelper]): event formatter helpers. \"\"\" # The format string", "\"\"\" for helper in self.helpers: helper.FormatEventValues(event_values) @abc.abstractmethod def GetFormatStringAttributeNames(self): \"\"\"Retrieves", "format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR super(ConditionalEventFormatter, self).__init__(data_type=data_type) self._format_string_pieces = format_string_pieces or []", "contain at maximum one unique attribute name. Format string pieces", "\"{0:s}\" missing required event ' 'value: {1!s}').format(format_string, exception) error_message =", "values. Returns: str: short message. \"\"\" class BasicEventFormatter(EventFormatter): \"\"\"Format event", "self._format_string_pieces_map: self._CreateFormatStringMaps() return self._ConditionalFormatMessage( self._format_string_pieces, self._format_string_pieces_map, event_values) def GetMessageShort(self, event_values):", "is faster than re.sub() or # string.strip(). return message_string.replace('\\r', '').replace('\\n',", "for the event data supported by the formatter. format_string (Optional[str]):", "as description_long and description_short in l2t_csv. Plaso no longer stores", "value_if_false self.value_if_true = value_if_true def FormatEventValues(self, event_values): \"\"\"Formats event values", "attribute names in the format string. Returns: set(str): attribute names.", "self._format_string_separator = format_string_separator self._format_string_short_pieces = format_string_short_pieces or [] self._format_string_short_pieces_map =", "stores these field explicitly. A formatter, with a format string", "= value_if_true def FormatEventValues(self, event_values): \"\"\"Formats event values using the", "formatter. format_string_pieces (Optional[list[str]]): (long) message format string pieces. format_string_separator (Optional[str]):", "attribute that contains the boolean input value. output_attribute (Optional[str]): name", "event formatter helpers. \"\"\" def __init__( self, data_type='basic', format_string=None, format_string_short=None):", "event_values (dict[str, object]): event values. Returns: str: short message. \"\"\"", "pieces. format_string_separator (Optional[str]): string by which separate format string pieces", "formatter helpers. \"\"\" def __init__( self, data_type='basic', format_string=None, format_string_short=None): \"\"\"Initializes", "\"\"\" def __init__( self, input_attribute=None, output_attribute=None, value_if_false=None, value_if_true=None): \"\"\"Initialized a", "format event values using format string pieces.\"\"\" _DEFAULT_FORMAT_STRING_SEPARATOR = '", "string piece should contain at maximum one unique attribute name.", "self.helpers = [] @property def data_type(self): \"\"\"str: unique identifier for", "def _CreateFormatStringMaps(self): \"\"\"Creates the format string maps. Maps are built", "by the formatter. \"\"\" super(EventFormatter, self).__init__() self._data_type = data_type self._format_string_attribute_names", "carriage return and linefeed form the message strings. # Using", "_CreateFormatStringMap( self, format_string_pieces, format_string_pieces_map): \"\"\"Creates a format string map. The", "attribute names. \"\"\" if self._format_string_attribute_names is None: self._format_string_attribute_names = []", "self.input_attribute = input_attribute self.output_attribute = output_attribute self.value_if_false = value_if_false self.value_if_true", "format_string, event_values): \"\"\"Determines the formatted message. Args: format_string (str): message", "message format string. Attributes: custom_helpers (list[str]): identifiers of custom event", "str: message. \"\"\" return self._FormatMessage(self._format_string, event_values) def GetMessageShort(self, event_values): \"\"\"Determines", "format_string_separator is None: format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR super(ConditionalEventFormatter, self).__init__(data_type=data_type) self._format_string_pieces =", "helpers. helpers (list[EventFormatterHelper]): event formatter helpers. \"\"\" # The format", "IDENTIFIER = '' @abc.abstractmethod def FormatEventValues(self, event_values): \"\"\"Formats event values", "-*- \"\"\"This file contains the event formatters interface classes. The", "names. \"\"\" if self._format_string_attribute_names is None: self._format_string_attribute_names = ( self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(", "helper. Args: event_values (dict[str, object]): event values. \"\"\" class EnumerationEventFormatterHelper(EventFormatterHelper):", "required event ' 'value: {1!s}').format(format_string, exception) error_message = ( 'Event:", "If default value is None and there is no corresponding", "a helper for custom formatting of event data.\"\"\" DATA_TYPE =", "boolean input value. output_attribute (Optional[str]): name of the attribute where", "for formatting flags event data. Attributes: input_attribute (str): name of", "format string piece is encountered. \"\"\" for format_string_piece in format_string_pieces:", "= self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if len(set(attribute_names)) > 1: raise RuntimeError(( 'Invalid", "return self._ConditionalFormatMessage( self._format_string_pieces, self._format_string_pieces_map, event_values) def GetMessageShort(self, event_values): \"\"\"Determines the", "GetMessage(self, event_values): \"\"\"Determines the message. Args: event_values (dict[str, object]): event", "message. \"\"\" try: message_string = format_string.format(**event_values) except KeyError as exception:", "in event_values.items(): attribute_values.append('{0:s}: {1!s}'.format(attribute, value)) message_string = ' '.join(attribute_values) except", "piece is encountered. \"\"\" for format_string_piece in format_string_pieces: attribute_names =", "list containing the attribute name per format string piece. E.g.", "_FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile( '{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}') def __init__(self, data_type='internal'): \"\"\"Initializes an event", "= [] self._format_string_separator = format_string_separator self._format_string_short_pieces = format_string_short_pieces or []", "if the boolean input value is True. \"\"\" super(BooleanEventFormatterHelper, self).__init__()", "self, identifier, input_attribute=None, output_attribute=None): \"\"\"Adds a custom event formatter helper.", "with error: {4:s}').format( event_identifier, data_type, display_name, parser_chain, error_message) logger.error(error_message) message_string", "event formatter helpers. helpers (list[EventFormatterHelper]): event formatter helpers. \"\"\" #", "self._format_string_pieces_map) self._format_string_short_pieces_map = [] self._CreateFormatStringMap( self._format_string_short_pieces, self._format_string_short_pieces_map) def _ConditionalFormatMessage( self,", "output value if the boolean input value is True. \"\"\"", "short message format string. \"\"\" super(BasicEventFormatter, self).__init__(data_type=data_type) self._format_string_attribute_names = None", "event values. \"\"\" input_value = event_values.get(self.input_attribute, None) if input_value: output_value", "identifier for the event data supported by the formatter. \"\"\"", "class EventFormatterHelper(object): \"\"\"Base class of helper for formatting event data.\"\"\"", "formatter helper. Args: helper (EventFormatterHelper): event formatter helper to add.", "AddCustomHelper( self, identifier, input_attribute=None, output_attribute=None): \"\"\"Adds a custom event formatter", "data. Attributes: input_attribute (str): name of the attribute that contains", "(str): name of the attribute that contains the flags input", "should be joined. format_string_short_pieces (Optional[list[str]]): short message format string pieces.", "the boolean input value. output_attribute (str): name of the attribute", "self, data_type='conditional', format_string_pieces=None, format_string_separator=None, format_string_short_pieces=None): \"\"\"Initializes a conditional event formatter.", "be mapped to: [0] = \"description\". If the string piece", "self._ConditionalFormatMessage( format_string_pieces, format_string_pieces_map, event_values) # Truncate the short message string", "stored. value_if_false (str): output value if the boolean input value", "to convert the event object values into a formatted string", "and other formats are dependent on a message field, referred", "of flags input and output values. \"\"\" super(FlagsEventFormatterHelper, self).__init__() self.input_attribute", "in l2t_csv. Plaso no longer stores these field explicitly. A", "in the format string. Returns: set(str): attribute names. \"\"\" if", "formatter. Args: data_type (Optional[str]): unique identifier for the event data", "format string pieces.\"\"\" _DEFAULT_FORMAT_STRING_SEPARATOR = ' ' def __init__( self,", "output_attribute self.value_if_false = value_if_false self.value_if_true = value_if_true def FormatEventValues(self, event_values):", "\"\"\"Helper for formatting enumeration event data. Attributes: default (str): default", "helper. Args: event_values (dict[str, object]): event values. \"\"\" class BooleanEventFormatterHelper(EventFormatterHelper):", "= output_value class CustomEventFormatterHelper(EventFormatterHelper): \"\"\"Base class for a helper for", "plaso.formatters import logger class EventFormatterHelper(object): \"\"\"Base class of helper for", "event data. Args: input_attribute (Optional[str]): name of the attribute that", "formatting of event data.\"\"\" DATA_TYPE = '' IDENTIFIER = ''", "= event_values.get('uuid', 'N/A') parser_chain = event_values.get('parser', 'N/A') error_message = (", "stored. values (Optional[dict[str, str]]): mapping of flags input and output", "format string pieces. format_string_pieces_map (list[int, str]): format string pieces map.", "except UnicodeDecodeError as exception: data_type = event_values.get('data_type', 'N/A') display_name =", "format string can be defined as: # {name}, {name:format}, {name!conversion},", "format_string_separator self._format_string_short_pieces = format_string_short_pieces or [] self._format_string_short_pieces_map = [] def", "should be stored. values (Optional[dict[str, str]]): mapping of enumeration input", "is defined as {attribute_name}. Args: data_type (Optional[str]): unique identifier for", "output value should be stored. \"\"\" self.custom_helpers.append(identifier) def AddHelper(self, helper):", "if not attribute_name or event_values.get( attribute_name, None) is not None:", "= ( self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( self._format_string)) return set(self._format_string_attribute_names) def GetMessage(self, event_values): \"\"\"Determines", "short message. \"\"\" if not self._format_string_pieces_map: self._CreateFormatStringMaps() if (self._format_string_short_pieces and", "if not attribute_names: # The text format string piece is", "the enumeration output value should be stored. values (dict[str, str]):", "self._format_string = format_string self._format_string_short = format_string_short def GetFormatStringAttributeNames(self): \"\"\"Retrieves the", "event_values (dict[str, object]): event values. \"\"\" for helper in self.helpers:", "\"\"\" def __init__( self, default=None, input_attribute=None, output_attribute=None, values=None): \"\"\"Initialized a" ]
[ "<reponame>LiuKaiqiang94/PyStudyExample def main(): val=int(input(\"input a num\")) if val<10: print(\"A\") elif", "val=int(input(\"input a num\")) if val<10: print(\"A\") elif val<20: print(\"B\") elif", "print(\"A\") elif val<20: print(\"B\") elif val<30: print(\"C\") else: print(\"D\") main()", "num\")) if val<10: print(\"A\") elif val<20: print(\"B\") elif val<30: print(\"C\")", "a num\")) if val<10: print(\"A\") elif val<20: print(\"B\") elif val<30:", "def main(): val=int(input(\"input a num\")) if val<10: print(\"A\") elif val<20:", "if val<10: print(\"A\") elif val<20: print(\"B\") elif val<30: print(\"C\") else:", "val<10: print(\"A\") elif val<20: print(\"B\") elif val<30: print(\"C\") else: print(\"D\")", "main(): val=int(input(\"input a num\")) if val<10: print(\"A\") elif val<20: print(\"B\")" ]
[ "Miyazaki \"\"\" imdir = \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3\" resultdir= \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv\" import os, cv2,", "== 2: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA)", "font, 1,(255,0,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 1: tempim =", "Sep 4 22:27:11 2020 @author: Miyazaki \"\"\" imdir = \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3\"", "tqdm import pandas as pd os.chdir(imdir) os.makedirs(\"../annotatedimages\", exist_ok = True)", "tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)", "as pd os.chdir(imdir) os.makedirs(\"../annotatedimages\", exist_ok = True) imlist = os.listdir(\"./\")", "[i for i in imlist if os.path.splitext(i)[1] == '.jpg' \\", "== 1: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA)", "int(result.loc[i]) == 3: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'backward',(10,500), font,", "tempim = cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) ==", "os.listdir(\"./\") imlist = [i for i in imlist if os.path.splitext(i)[1]", "\"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv\" import os, cv2, shutil from tqdm import tqdm import", "@author: Miyazaki \"\"\" imdir = \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3\" resultdir= \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv\" import os,", "in imlist if os.path.splitext(i)[1] == '.jpg' \\ or os.path.splitext(i)[1] ==", "i in tqdm(range(len(imlist))): if int(result.loc[i]) == 0: tempim = cv2.imread(imlist[i])", "for i in imlist if os.path.splitext(i)[1] == '.jpg' \\ or", "== 3: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'backward',(10,500), font, 1,(100,100,0),2,cv2.LINE_AA)", "int(result.loc[i]) == 2: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'forward',(10,500), font,", "'.jpg' \\ or os.path.splitext(i)[1] == '.png'] imlist.sort() result = pd.read_csv(resultdir)", "== '.png'] imlist.sort() result = pd.read_csv(resultdir) font = cv2.FONT_HERSHEY_SIMPLEX for", "imdir = \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3\" resultdir= \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv\" import os, cv2, shutil from", "on Fri Sep 4 22:27:11 2020 @author: Miyazaki \"\"\" imdir", "0: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]),", "cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 2: tempim", "= cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif", "if int(result.loc[i]) == 0: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'quiescent',(10,500),", "elif int(result.loc[i]) == 3: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'backward',(10,500),", "int(result.loc[i]) == 0: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'quiescent',(10,500), font,", "= cv2.FONT_HERSHEY_SIMPLEX for i in tqdm(range(len(imlist))): if int(result.loc[i]) == 0:", "tqdm(range(len(imlist))): if int(result.loc[i]) == 0: tempim = cv2.imread(imlist[i]) tempim =", "imlist if os.path.splitext(i)[1] == '.jpg' \\ or os.path.splitext(i)[1] == '.png']", "tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)", "= cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'backward',(10,500), font, 1,(100,100,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) else:", "os.path.splitext(i)[1] == '.png'] imlist.sort() result = pd.read_csv(resultdir) font = cv2.FONT_HERSHEY_SIMPLEX", "2: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]),", "from tqdm import tqdm import pandas as pd os.chdir(imdir) os.makedirs(\"../annotatedimages\",", "elif int(result.loc[i]) == 2: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'forward',(10,500),", "int(result.loc[i]) == 1: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'dwell',(10,500), font,", "tempim) elif int(result.loc[i]) == 1: tempim = cv2.imread(imlist[i]) tempim =", "Fri Sep 4 22:27:11 2020 @author: Miyazaki \"\"\" imdir =", "= pd.read_csv(resultdir) font = cv2.FONT_HERSHEY_SIMPLEX for i in tqdm(range(len(imlist))): if", "coding: utf-8 -*- \"\"\" Created on Fri Sep 4 22:27:11", "= True) imlist = os.listdir(\"./\") imlist = [i for i", "font, 1,(0,255,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 2: tempim =", "imlist = [i for i in imlist if os.path.splitext(i)[1] ==", "= cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif", "<filename>Annotated_video/test/Annotatedvideo_worm.py # -*- coding: utf-8 -*- \"\"\" Created on Fri", "\"\"\" Created on Fri Sep 4 22:27:11 2020 @author: Miyazaki", "cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i])", "result = pd.read_csv(resultdir) font = cv2.FONT_HERSHEY_SIMPLEX for i in tqdm(range(len(imlist))):", "2020 @author: Miyazaki \"\"\" imdir = \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3\" resultdir= \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv\" import", "import pandas as pd os.chdir(imdir) os.makedirs(\"../annotatedimages\", exist_ok = True) imlist", "cv2, shutil from tqdm import tqdm import pandas as pd", "imlist = os.listdir(\"./\") imlist = [i for i in imlist", "1: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]),", "for i in tqdm(range(len(imlist))): if int(result.loc[i]) == 0: tempim =", "cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 2: tempim = cv2.imread(imlist[i]) tempim", "\"\"\" imdir = \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3\" resultdir= \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv\" import os, cv2, shutil", "elif int(result.loc[i]) == 1: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'dwell',(10,500),", "Created on Fri Sep 4 22:27:11 2020 @author: Miyazaki \"\"\"", "utf-8 -*- \"\"\" Created on Fri Sep 4 22:27:11 2020", "cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 1: tempim = cv2.imread(imlist[i]) tempim", "pandas as pd os.chdir(imdir) os.makedirs(\"../annotatedimages\", exist_ok = True) imlist =", "== '.jpg' \\ or os.path.splitext(i)[1] == '.png'] imlist.sort() result =", "tempim) elif int(result.loc[i]) == 3: tempim = cv2.imread(imlist[i]) tempim =", "i in imlist if os.path.splitext(i)[1] == '.jpg' \\ or os.path.splitext(i)[1]", "pd os.chdir(imdir) os.makedirs(\"../annotatedimages\", exist_ok = True) imlist = os.listdir(\"./\") imlist", "= os.listdir(\"./\") imlist = [i for i in imlist if", "== 0: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA)", "1,(255,0,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 1: tempim = cv2.imread(imlist[i])", "\"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3\" resultdir= \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv\" import os, cv2, shutil from tqdm import", "4 22:27:11 2020 @author: Miyazaki \"\"\" imdir = \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3\" resultdir=", "resultdir= \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv\" import os, cv2, shutil from tqdm import tqdm", "\\ or os.path.splitext(i)[1] == '.png'] imlist.sort() result = pd.read_csv(resultdir) font", "pd.read_csv(resultdir) font = cv2.FONT_HERSHEY_SIMPLEX for i in tqdm(range(len(imlist))): if int(result.loc[i])", "1,(0,255,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 2: tempim = cv2.imread(imlist[i])", "-*- \"\"\" Created on Fri Sep 4 22:27:11 2020 @author:", "os.chdir(imdir) os.makedirs(\"../annotatedimages\", exist_ok = True) imlist = os.listdir(\"./\") imlist =", "= [i for i in imlist if os.path.splitext(i)[1] == '.jpg'", "font = cv2.FONT_HERSHEY_SIMPLEX for i in tqdm(range(len(imlist))): if int(result.loc[i]) ==", "cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 1: tempim", "= cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif", "cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i])", "22:27:11 2020 @author: Miyazaki \"\"\" imdir = \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3\" resultdir= \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv\"", "= \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3\" resultdir= \"C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv\" import os, cv2, shutil from tqdm", "import tqdm import pandas as pd os.chdir(imdir) os.makedirs(\"../annotatedimages\", exist_ok =", "imlist.sort() result = pd.read_csv(resultdir) font = cv2.FONT_HERSHEY_SIMPLEX for i in", "3: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'backward',(10,500), font, 1,(100,100,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]),", "# -*- coding: utf-8 -*- \"\"\" Created on Fri Sep", "True) imlist = os.listdir(\"./\") imlist = [i for i in", "cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 3: tempim = cv2.imread(imlist[i]) tempim", "shutil from tqdm import tqdm import pandas as pd os.chdir(imdir)", "= cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 1:", "= cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 3:", "import os, cv2, shutil from tqdm import tqdm import pandas", "font, 1,(0,0,255),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 3: tempim =", "cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'backward',(10,500), font, 1,(100,100,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) else: pass", "tempim = cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) ==", "tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)", "if os.path.splitext(i)[1] == '.jpg' \\ or os.path.splitext(i)[1] == '.png'] imlist.sort()", "'.png'] imlist.sort() result = pd.read_csv(resultdir) font = cv2.FONT_HERSHEY_SIMPLEX for i", "in tqdm(range(len(imlist))): if int(result.loc[i]) == 0: tempim = cv2.imread(imlist[i]) tempim", "os.path.splitext(i)[1] == '.jpg' \\ or os.path.splitext(i)[1] == '.png'] imlist.sort() result", "tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'backward',(10,500), font, 1,(100,100,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)", "cv2.FONT_HERSHEY_SIMPLEX for i in tqdm(range(len(imlist))): if int(result.loc[i]) == 0: tempim", "os, cv2, shutil from tqdm import tqdm import pandas as", "1,(0,0,255),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 3: tempim = cv2.imread(imlist[i])", "or os.path.splitext(i)[1] == '.png'] imlist.sort() result = pd.read_csv(resultdir) font =", "tqdm import tqdm import pandas as pd os.chdir(imdir) os.makedirs(\"../annotatedimages\", exist_ok", "os.makedirs(\"../annotatedimages\", exist_ok = True) imlist = os.listdir(\"./\") imlist = [i", "exist_ok = True) imlist = os.listdir(\"./\") imlist = [i for", "cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i])", "-*- coding: utf-8 -*- \"\"\" Created on Fri Sep 4", "tempim = cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) ==", "= cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 2:", "cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 3: tempim", "tempim) elif int(result.loc[i]) == 2: tempim = cv2.imread(imlist[i]) tempim =" ]
[ "which you want the curve between the two x values.", "compute fractions, enter expressions as numerator(over)denominator. For example, to process", "= DisableAbleCommandHandler(\"integrate\", integrate, pass_args=True) ZEROES_HANDLER = DisableAbleCommandHandler(\"zeroes\", zeroes, pass_args=True) TANGENT_HANDLER", "pi` - /sin: Sine `/sin 0` - /tan: Tangent `/tan", "Under Developmeent.. More features soon - /cos: Cosine `/cos pi`", "pass_args=True) INTEGRATE_HANDLER = DisableAbleCommandHandler(\"integrate\", integrate, pass_args=True) ZEROES_HANDLER = DisableAbleCommandHandler(\"zeroes\", zeroes,", "send in your expression as 2(over)4. The result expression will", "message.reply_text(newton.derive('{}'.format(args[0]))) @run_async def integrate(update, context): args=context.args args=str(args) message = update.effective_message", "More features soon - /cos: Cosine `/cos pi` - /sin:", "args=context.args args=str(args) message = update.effective_message message.reply_text(newton.derive('{}'.format(args[0]))) @run_async def integrate(update, context):", "DisableAbleCommandHandler(\"arccos\", arccos, pass_args=True) ARCSIN_HANDLER = DisableAbleCommandHandler(\"arcsin\", arcsin, pass_args=True) ARCTAN_HANDLER =", "request as c:d|f(x) where c is the starting x value,", "Developmeent.. More features soon - /cos: Cosine `/cos pi` -", "DisableAbleCommandHandler(\"derive\", derive, pass_args=True) INTEGRATE_HANDLER = DisableAbleCommandHandler(\"integrate\", integrate, pass_args=True) ZEROES_HANDLER =", "Bot, MessageEntity from telegram.ext import CommandHandler, run_async from emilia import", "x value and f(x) is the function expression, the separator", "send the request as c:d|f(x) where c is the starting", "run_async from emilia import dispatcher from emilia.modules.disable import DisableAbleCommandHandler from", "def sin(update, context): args = context.args message = update.effective_message message.reply_text(math.sin(int(args[0])))", "from typing import List import requests from telegram import Message,", "an example request. To find the area under a function,", "context.args message = update.effective_message message.reply_text(math.sin(int(args[0]))) @run_async def tan(update, context): args", "= DisableAbleCommandHandler(\"abs\", abs, pass_args=True) LOG_HANDLER = DisableAbleCommandHandler(\"log\", log, pass_args=True) dispatcher.add_handler(SIMPLIFY_HANDLER)", "from emilia import dispatcher from emilia.modules.disable import DisableAbleCommandHandler from emilia.modules.helper_funcs.alternate", "args = context.args message = update.effective_message message.reply_text(math.tan(int(args[0]))) @run_async def arccos(update,", "DisableAbleCommandHandler(\"log\", log, pass_args=True) dispatcher.add_handler(SIMPLIFY_HANDLER) dispatcher.add_handler(FACTOR_HANDLER) dispatcher.add_handler(DERIVE_HANDLER) dispatcher.add_handler(INTEGRATE_HANDLER) dispatcher.add_handler(ZEROES_HANDLER) dispatcher.add_handler(TANGENT_HANDLER) dispatcher.add_handler(AREA_HANDLER)", "'|'. See the table above for an example request. To", "= context.args message = update.effective_message message.reply_text(math.tan(int(args[0]))) @run_async def arccos(update, context):", "@run_async def abs(update, context): args = context.args message = update.effective_message", "message.reply_text(math.asin(int(args[0]))) @run_async def arctan(update, context): args = context.args message =", "= DisableAbleCommandHandler(\"zeroes\", zeroes, pass_args=True) TANGENT_HANDLER = DisableAbleCommandHandler(\"tangent\", tangent, pass_args=True) AREA_HANDLER", "log(update, context): args = context.args message = update.effective_message message.reply_text(math.log(int(args[0]))) __help__", "update.effective_message message.reply_text(newton.tangent('{}'.format(args[0]))) @run_async def area(update, context): args=context.args args=str(args) message =", "Update, Bot, MessageEntity from telegram.ext import CommandHandler, run_async from emilia", "and f(x) is the function expression, the separator is a", "update.effective_message message.reply_text(newton.area('{}'.format(args[0]))) @run_async def cos(update, context): args = context.args message", "Inverse Tangent `/arctan 0` - /abs: Absolute Value `/abs -1`", "dispatcher.add_handler(INTEGRATE_HANDLER) dispatcher.add_handler(ZEROES_HANDLER) dispatcher.add_handler(TANGENT_HANDLER) dispatcher.add_handler(AREA_HANDLER) dispatcher.add_handler(COS_HANDLER) dispatcher.add_handler(SIN_HANDLER) dispatcher.add_handler(TAN_HANDLER) dispatcher.add_handler(ARCCOS_HANDLER) dispatcher.add_handler(ARCSIN_HANDLER) dispatcher.add_handler(ARCTAN_HANDLER)", "def zeroes(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.zeroes('{}'.format(args[0]))) @run_async", "message = update.effective_message message.reply_text(newton.simplify('{}'.format(args[0]))) @run_async def factor(update, context): args=context.args args=str(args)", "= context.args message = update.effective_message message.reply_text(math.acos(int(args[0]))) @run_async def arcsin(update, context):", "context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.zeroes('{}'.format(args[0]))) @run_async def tangent(update,", "import pynewtonmath as newton import math @run_async def simplify(update, context):", "/abs: Absolute Value `/abs -1` - /log: Logarithm `/log 2l8`", "math notation (1/2, 3/4). \"\"\" SIMPLIFY_HANDLER = DisableAbleCommandHandler(\"math\", simplify, pass_args=True)", "context): args = context.args message = update.effective_message message.reply_text(math.asin(int(args[0]))) @run_async def", "sin(update, context): args = context.args message = update.effective_message message.reply_text(math.sin(int(args[0]))) @run_async", "SIMPLIFY_HANDLER = DisableAbleCommandHandler(\"math\", simplify, pass_args=True) FACTOR_HANDLER = DisableAbleCommandHandler(\"factor\", factor, pass_args=True)", "ARCSIN_HANDLER = DisableAbleCommandHandler(\"arcsin\", arcsin, pass_args=True) ARCTAN_HANDLER = DisableAbleCommandHandler(\"arctan\", arctan, pass_args=True)", "@run_async def arccos(update, context): args = context.args message = update.effective_message", "Inverse Cosine `/arccos 1` - /arcsin: Inverse Sine `/arcsin 0`", "context.args message = update.effective_message message.reply_text(math.tan(int(args[0]))) @run_async def arccos(update, context): args", "typing import List import requests from telegram import Message, Update,", "dispatcher.add_handler(TAN_HANDLER) dispatcher.add_handler(ARCCOS_HANDLER) dispatcher.add_handler(ARCSIN_HANDLER) dispatcher.add_handler(ARCTAN_HANDLER) dispatcher.add_handler(ABS_HANDLER) dispatcher.add_handler(LOG_HANDLER) __mod_name__ = \"Math\" __command_list__", "values. To compute fractions, enter expressions as numerator(over)denominator. For example,", "fractions, enter expressions as numerator(over)denominator. For example, to process 2/4", "ARCTAN_HANDLER = DisableAbleCommandHandler(\"arctan\", arctan, pass_args=True) ABS_HANDLER = DisableAbleCommandHandler(\"abs\", abs, pass_args=True)", "where c is the given x value and f(x) is", "update.effective_message message.reply_text(math.asin(int(args[0]))) @run_async def arctan(update, context): args = context.args message", "0` - /arctan: Inverse Tangent `/arctan 0` - /abs: Absolute", "derive(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.derive('{}'.format(args[0]))) @run_async def", "args=context.args args=str(args) message = update.effective_message message.reply_text(newton.integrate('{}'.format(args[0]))) @run_async def zeroes(update, context):", "= DisableAbleCommandHandler(\"tangent\", tangent, pass_args=True) AREA_HANDLER = DisableAbleCommandHandler(\"area\", area, pass_args=True) COS_HANDLER", "the separator is a vertical bar '|'. See the table", "Logarithm `/log 2l8` __Keep in mind__: To find the tangent", "dispatcher.add_handler(LOG_HANDLER) __mod_name__ = \"Math\" __command_list__ = [\"math\",\"factor\",\"derive\",\"integrate\",\"zeroes\",\"tangent\",\"area\",\"cos\",\"sin\",\"tan\",\"arccos\",\"arcsin\",\"arctan\",\"abs\",\"log\"] __handlers__ = [", "@run_async def arctan(update, context): args = context.args message = update.effective_message", "Message, Update, Bot, MessageEntity from telegram.ext import CommandHandler, run_async from", "pass_args=True) TAN_HANDLER = DisableAbleCommandHandler(\"tan\", tan, pass_args=True) ARCCOS_HANDLER = DisableAbleCommandHandler(\"arccos\", arccos,", "dispatcher.add_handler(ARCSIN_HANDLER) dispatcher.add_handler(ARCTAN_HANDLER) dispatcher.add_handler(ABS_HANDLER) dispatcher.add_handler(LOG_HANDLER) __mod_name__ = \"Math\" __command_list__ = [\"math\",\"factor\",\"derive\",\"integrate\",\"zeroes\",\"tangent\",\"area\",\"cos\",\"sin\",\"tan\",\"arccos\",\"arcsin\",\"arctan\",\"abs\",\"log\"]", "DisableAbleCommandHandler from emilia.modules.helper_funcs.alternate import send_message import pynewtonmath as newton import", "context.args message = update.effective_message message.reply_text(math.asin(int(args[0]))) @run_async def arctan(update, context): args", "message.reply_text(newton.factor('{}'.format(args[0]))) @run_async def derive(update, context): args=context.args args=str(args) message = update.effective_message", "= DisableAbleCommandHandler(\"sin\", sin, pass_args=True) TAN_HANDLER = DisableAbleCommandHandler(\"tan\", tan, pass_args=True) ARCCOS_HANDLER", "@run_async def tan(update, context): args = context.args message = update.effective_message", "ZEROES_HANDLER = DisableAbleCommandHandler(\"zeroes\", zeroes, pass_args=True) TANGENT_HANDLER = DisableAbleCommandHandler(\"tangent\", tangent, pass_args=True)", "/sin: Sine `/sin 0` - /tan: Tangent `/tan 0` -", "from emilia.modules.disable import DisableAbleCommandHandler from emilia.modules.helper_funcs.alternate import send_message import pynewtonmath", "args = context.args message = update.effective_message message.reply_text(math.asin(int(args[0]))) @run_async def arctan(update,", "To find the tangent line of a function at a", "requests from telegram import Message, Update, Bot, MessageEntity from telegram.ext", "update.effective_message message.reply_text(math.sin(int(args[0]))) @run_async def tan(update, context): args = context.args message", "@run_async def integrate(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.integrate('{}'.format(args[0])))", "separator is a vertical bar '|'. See the table above", "two x values. To compute fractions, enter expressions as numerator(over)denominator.", "context): args = context.args message = update.effective_message message.reply_text(math.tan(int(args[0]))) @run_async def", "def abs(update, context): args = context.args message = update.effective_message message.reply_text(math.fabs(int(args[0])))", "arccos, pass_args=True) ARCSIN_HANDLER = DisableAbleCommandHandler(\"arcsin\", arcsin, pass_args=True) ARCTAN_HANDLER = DisableAbleCommandHandler(\"arctan\",", "want the curve between the two x values. To compute", "context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.area('{}'.format(args[0]))) @run_async def cos(update,", "`/arcsin 0` - /arctan: Inverse Tangent `/arctan 0` - /abs:", "To compute fractions, enter expressions as numerator(over)denominator. For example, to", "from telegram import Message, Update, Bot, MessageEntity from telegram.ext import", "message = update.effective_message message.reply_text(math.fabs(int(args[0]))) @run_async def log(update, context): args =", "in mind__: To find the tangent line of a function", "a function at a certain x value, send the request", "Cosine `/arccos 1` - /arcsin: Inverse Sine `/arcsin 0` -", "math @run_async def simplify(update, context): args=context.args args=str(args) message = update.effective_message", "must send in your expression as 2(over)4. The result expression", "TAN_HANDLER = DisableAbleCommandHandler(\"tan\", tan, pass_args=True) ARCCOS_HANDLER = DisableAbleCommandHandler(\"arccos\", arccos, pass_args=True)", "message = update.effective_message message.reply_text(newton.tangent('{}'.format(args[0]))) @run_async def area(update, context): args=context.args args=str(args)", "= update.effective_message message.reply_text(math.log(int(args[0]))) __help__ = \"\"\" Under Developmeent.. More features", "args=context.args args=str(args) message = update.effective_message message.reply_text(newton.tangent('{}'.format(args[0]))) @run_async def area(update, context):", "request as c|f(x) where c is the given x value", "DisableAbleCommandHandler(\"tan\", tan, pass_args=True) ARCCOS_HANDLER = DisableAbleCommandHandler(\"arccos\", arccos, pass_args=True) ARCSIN_HANDLER =", "COS_HANDLER = DisableAbleCommandHandler(\"cos\", cos, pass_args=True) SIN_HANDLER = DisableAbleCommandHandler(\"sin\", sin, pass_args=True)", "f(x) is the function under which you want the curve", "= context.args message = update.effective_message message.reply_text(math.log(int(args[0]))) __help__ = \"\"\" Under", "context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.simplify('{}'.format(args[0]))) @run_async def factor(update,", "c is the given x value and f(x) is the", "= DisableAbleCommandHandler(\"arctan\", arctan, pass_args=True) ABS_HANDLER = DisableAbleCommandHandler(\"abs\", abs, pass_args=True) LOG_HANDLER", "expression will be in standard math notation (1/2, 3/4). \"\"\"", "2l8` __Keep in mind__: To find the tangent line of", "@run_async def tangent(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.tangent('{}'.format(args[0])))", "@run_async def sin(update, context): args = context.args message = update.effective_message", "of a function at a certain x value, send the", "the given x value and f(x) is the function expression,", "- /abs: Absolute Value `/abs -1` - /log: Logarithm `/log", "line of a function at a certain x value, send", "dispatcher.add_handler(TANGENT_HANDLER) dispatcher.add_handler(AREA_HANDLER) dispatcher.add_handler(COS_HANDLER) dispatcher.add_handler(SIN_HANDLER) dispatcher.add_handler(TAN_HANDLER) dispatcher.add_handler(ARCCOS_HANDLER) dispatcher.add_handler(ARCSIN_HANDLER) dispatcher.add_handler(ARCTAN_HANDLER) dispatcher.add_handler(ABS_HANDLER) dispatcher.add_handler(LOG_HANDLER)", "notation (1/2, 3/4). \"\"\" SIMPLIFY_HANDLER = DisableAbleCommandHandler(\"math\", simplify, pass_args=True) FACTOR_HANDLER", "tan, pass_args=True) ARCCOS_HANDLER = DisableAbleCommandHandler(\"arccos\", arccos, pass_args=True) ARCSIN_HANDLER = DisableAbleCommandHandler(\"arcsin\",", "the table above for an example request. To find the", "dispatcher.add_handler(COS_HANDLER) dispatcher.add_handler(SIN_HANDLER) dispatcher.add_handler(TAN_HANDLER) dispatcher.add_handler(ARCCOS_HANDLER) dispatcher.add_handler(ARCSIN_HANDLER) dispatcher.add_handler(ARCTAN_HANDLER) dispatcher.add_handler(ABS_HANDLER) dispatcher.add_handler(LOG_HANDLER) __mod_name__ =", "area(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.area('{}'.format(args[0]))) @run_async def", "/arcsin: Inverse Sine `/arcsin 0` - /arctan: Inverse Tangent `/arctan", "= update.effective_message message.reply_text(math.tan(int(args[0]))) @run_async def arccos(update, context): args = context.args", "soon - /cos: Cosine `/cos pi` - /sin: Sine `/sin", "context): args = context.args message = update.effective_message message.reply_text(math.fabs(int(args[0]))) @run_async def", "import dispatcher from emilia.modules.disable import DisableAbleCommandHandler from emilia.modules.helper_funcs.alternate import send_message", "be in standard math notation (1/2, 3/4). \"\"\" SIMPLIFY_HANDLER =", "= update.effective_message message.reply_text(math.sin(int(args[0]))) @run_async def tan(update, context): args = context.args", "`/log 2l8` __Keep in mind__: To find the tangent line", "will be in standard math notation (1/2, 3/4). \"\"\" SIMPLIFY_HANDLER", "= update.effective_message message.reply_text(math.cos(int(args[0]))) @run_async def sin(update, context): args = context.args", "= context.args message = update.effective_message message.reply_text(math.asin(int(args[0]))) @run_async def arctan(update, context):", "args = context.args message = update.effective_message message.reply_text(math.sin(int(args[0]))) @run_async def tan(update,", "- /log: Logarithm `/log 2l8` __Keep in mind__: To find", "value, and f(x) is the function under which you want", "tangent line of a function at a certain x value,", "= \"\"\" Under Developmeent.. More features soon - /cos: Cosine", "CommandHandler, run_async from emilia import dispatcher from emilia.modules.disable import DisableAbleCommandHandler", "DisableAbleCommandHandler(\"arctan\", arctan, pass_args=True) ABS_HANDLER = DisableAbleCommandHandler(\"abs\", abs, pass_args=True) LOG_HANDLER =", "is a vertical bar '|'. See the table above for", "update.effective_message message.reply_text(math.log(int(args[0]))) __help__ = \"\"\" Under Developmeent.. More features soon", "message = update.effective_message message.reply_text(math.sin(int(args[0]))) @run_async def tan(update, context): args =", "TANGENT_HANDLER = DisableAbleCommandHandler(\"tangent\", tangent, pass_args=True) AREA_HANDLER = DisableAbleCommandHandler(\"area\", area, pass_args=True)", "expression as 2(over)4. The result expression will be in standard", "args=str(args) message = update.effective_message message.reply_text(newton.area('{}'.format(args[0]))) @run_async def cos(update, context): args", "cos(update, context): args = context.args message = update.effective_message message.reply_text(math.cos(int(args[0]))) @run_async", "message.reply_text(math.atan(int(args[0]))) @run_async def abs(update, context): args = context.args message =", "- /arccos: Inverse Cosine `/arccos 1` - /arcsin: Inverse Sine", "= DisableAbleCommandHandler(\"log\", log, pass_args=True) dispatcher.add_handler(SIMPLIFY_HANDLER) dispatcher.add_handler(FACTOR_HANDLER) dispatcher.add_handler(DERIVE_HANDLER) dispatcher.add_handler(INTEGRATE_HANDLER) dispatcher.add_handler(ZEROES_HANDLER) dispatcher.add_handler(TANGENT_HANDLER)", "def cos(update, context): args = context.args message = update.effective_message message.reply_text(math.cos(int(args[0])))", "context.args message = update.effective_message message.reply_text(math.atan(int(args[0]))) @run_async def abs(update, context): args", "@run_async def log(update, context): args = context.args message = update.effective_message", "0` - /arccos: Inverse Cosine `/arccos 1` - /arcsin: Inverse", "def integrate(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.integrate('{}'.format(args[0]))) @run_async", "d is the ending x value, and f(x) is the", "message = update.effective_message message.reply_text(newton.zeroes('{}'.format(args[0]))) @run_async def tangent(update, context): args=context.args args=str(args)", "zeroes, pass_args=True) TANGENT_HANDLER = DisableAbleCommandHandler(\"tangent\", tangent, pass_args=True) AREA_HANDLER = DisableAbleCommandHandler(\"area\",", "as newton import math @run_async def simplify(update, context): args=context.args args=str(args)", "MessageEntity from telegram.ext import CommandHandler, run_async from emilia import dispatcher", "= DisableAbleCommandHandler(\"area\", area, pass_args=True) COS_HANDLER = DisableAbleCommandHandler(\"cos\", cos, pass_args=True) SIN_HANDLER", "function, send the request as c:d|f(x) where c is the", "= update.effective_message message.reply_text(math.atan(int(args[0]))) @run_async def abs(update, context): args = context.args", "SIN_HANDLER = DisableAbleCommandHandler(\"sin\", sin, pass_args=True) TAN_HANDLER = DisableAbleCommandHandler(\"tan\", tan, pass_args=True)", "`/cos pi` - /sin: Sine `/sin 0` - /tan: Tangent", "log, pass_args=True) dispatcher.add_handler(SIMPLIFY_HANDLER) dispatcher.add_handler(FACTOR_HANDLER) dispatcher.add_handler(DERIVE_HANDLER) dispatcher.add_handler(INTEGRATE_HANDLER) dispatcher.add_handler(ZEROES_HANDLER) dispatcher.add_handler(TANGENT_HANDLER) dispatcher.add_handler(AREA_HANDLER) dispatcher.add_handler(COS_HANDLER)", "curve between the two x values. To compute fractions, enter", "dispatcher.add_handler(SIMPLIFY_HANDLER) dispatcher.add_handler(FACTOR_HANDLER) dispatcher.add_handler(DERIVE_HANDLER) dispatcher.add_handler(INTEGRATE_HANDLER) dispatcher.add_handler(ZEROES_HANDLER) dispatcher.add_handler(TANGENT_HANDLER) dispatcher.add_handler(AREA_HANDLER) dispatcher.add_handler(COS_HANDLER) dispatcher.add_handler(SIN_HANDLER) dispatcher.add_handler(TAN_HANDLER)", "x value, send the request as c|f(x) where c is", "update.effective_message message.reply_text(newton.integrate('{}'.format(args[0]))) @run_async def zeroes(update, context): args=context.args args=str(args) message =", "send the request as c|f(x) where c is the given", "2/4 you must send in your expression as 2(over)4. The", "context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.integrate('{}'.format(args[0]))) @run_async def zeroes(update,", "the request as c|f(x) where c is the given x", "the ending x value, and f(x) is the function under", "DisableAbleCommandHandler(\"factor\", factor, pass_args=True) DERIVE_HANDLER = DisableAbleCommandHandler(\"derive\", derive, pass_args=True) INTEGRATE_HANDLER =", "between the two x values. To compute fractions, enter expressions", "`/sin 0` - /tan: Tangent `/tan 0` - /arccos: Inverse", "/arctan: Inverse Tangent `/arctan 0` - /abs: Absolute Value `/abs", "telegram.ext import CommandHandler, run_async from emilia import dispatcher from emilia.modules.disable", "factor(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.factor('{}'.format(args[0]))) @run_async def", "args=str(args) message = update.effective_message message.reply_text(newton.zeroes('{}'.format(args[0]))) @run_async def tangent(update, context): args=context.args", "above for an example request. To find the area under", "message.reply_text(math.cos(int(args[0]))) @run_async def sin(update, context): args = context.args message =", "newton import math @run_async def simplify(update, context): args=context.args args=str(args) message", "__mod_name__ = \"Math\" __command_list__ = [\"math\",\"factor\",\"derive\",\"integrate\",\"zeroes\",\"tangent\",\"area\",\"cos\",\"sin\",\"tan\",\"arccos\",\"arcsin\",\"arctan\",\"abs\",\"log\"] __handlers__ = [ SIMPLIFY_HANDLER,FACTOR_HANDLER,DERIVE_HANDLER,INTEGRATE_HANDLER,TANGENT_HANDLER,ZEROES_HANDLER,AREA_HANDLER,COS_HANDLER,SIN_HANDLER,TAN_HANDLER,ARCCOS_HANDLER,ARCSIN_HANDLER,ARCTAN_HANDLER,ABS_HANDLER,LOG_HANDLER", "integrate, pass_args=True) ZEROES_HANDLER = DisableAbleCommandHandler(\"zeroes\", zeroes, pass_args=True) TANGENT_HANDLER = DisableAbleCommandHandler(\"tangent\",", "args=str(args) message = update.effective_message message.reply_text(newton.integrate('{}'.format(args[0]))) @run_async def zeroes(update, context): args=context.args", "Sine `/arcsin 0` - /arctan: Inverse Tangent `/arctan 0` -", "= context.args message = update.effective_message message.reply_text(math.atan(int(args[0]))) @run_async def abs(update, context):", "message.reply_text(newton.zeroes('{}'.format(args[0]))) @run_async def tangent(update, context): args=context.args args=str(args) message = update.effective_message", "abs, pass_args=True) LOG_HANDLER = DisableAbleCommandHandler(\"log\", log, pass_args=True) dispatcher.add_handler(SIMPLIFY_HANDLER) dispatcher.add_handler(FACTOR_HANDLER) dispatcher.add_handler(DERIVE_HANDLER)", "c:d|f(x) where c is the starting x value, d is", "`/arctan 0` - /abs: Absolute Value `/abs -1` - /log:", "result expression will be in standard math notation (1/2, 3/4).", "cos, pass_args=True) SIN_HANDLER = DisableAbleCommandHandler(\"sin\", sin, pass_args=True) TAN_HANDLER = DisableAbleCommandHandler(\"tan\",", "is the given x value and f(x) is the function", "c is the starting x value, d is the ending", "\"\"\" SIMPLIFY_HANDLER = DisableAbleCommandHandler(\"math\", simplify, pass_args=True) FACTOR_HANDLER = DisableAbleCommandHandler(\"factor\", factor,", "= DisableAbleCommandHandler(\"arcsin\", arcsin, pass_args=True) ARCTAN_HANDLER = DisableAbleCommandHandler(\"arctan\", arctan, pass_args=True) ABS_HANDLER", "dispatcher.add_handler(ZEROES_HANDLER) dispatcher.add_handler(TANGENT_HANDLER) dispatcher.add_handler(AREA_HANDLER) dispatcher.add_handler(COS_HANDLER) dispatcher.add_handler(SIN_HANDLER) dispatcher.add_handler(TAN_HANDLER) dispatcher.add_handler(ARCCOS_HANDLER) dispatcher.add_handler(ARCSIN_HANDLER) dispatcher.add_handler(ARCTAN_HANDLER) dispatcher.add_handler(ABS_HANDLER)", "import List import requests from telegram import Message, Update, Bot,", "arcsin(update, context): args = context.args message = update.effective_message message.reply_text(math.asin(int(args[0]))) @run_async", "AREA_HANDLER = DisableAbleCommandHandler(\"area\", area, pass_args=True) COS_HANDLER = DisableAbleCommandHandler(\"cos\", cos, pass_args=True)", "args = context.args message = update.effective_message message.reply_text(math.acos(int(args[0]))) @run_async def arcsin(update,", "message.reply_text(math.tan(int(args[0]))) @run_async def arccos(update, context): args = context.args message =", "= update.effective_message message.reply_text(math.fabs(int(args[0]))) @run_async def log(update, context): args = context.args", "@run_async def arcsin(update, context): args = context.args message = update.effective_message", "= update.effective_message message.reply_text(newton.simplify('{}'.format(args[0]))) @run_async def factor(update, context): args=context.args args=str(args) message", "a function, send the request as c:d|f(x) where c is", "def arctan(update, context): args = context.args message = update.effective_message message.reply_text(math.atan(int(args[0])))", "process 2/4 you must send in your expression as 2(over)4.", "0` - /tan: Tangent `/tan 0` - /arccos: Inverse Cosine", "pass_args=True) DERIVE_HANDLER = DisableAbleCommandHandler(\"derive\", derive, pass_args=True) INTEGRATE_HANDLER = DisableAbleCommandHandler(\"integrate\", integrate,", "integrate(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.integrate('{}'.format(args[0]))) @run_async def", "For example, to process 2/4 you must send in your", "find the tangent line of a function at a certain", "tangent, pass_args=True) AREA_HANDLER = DisableAbleCommandHandler(\"area\", area, pass_args=True) COS_HANDLER = DisableAbleCommandHandler(\"cos\",", "context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.factor('{}'.format(args[0]))) @run_async def derive(update,", "DERIVE_HANDLER = DisableAbleCommandHandler(\"derive\", derive, pass_args=True) INTEGRATE_HANDLER = DisableAbleCommandHandler(\"integrate\", integrate, pass_args=True)", "context): args = context.args message = update.effective_message message.reply_text(math.log(int(args[0]))) __help__ =", "message = update.effective_message message.reply_text(math.atan(int(args[0]))) @run_async def abs(update, context): args =", "value, send the request as c|f(x) where c is the", "message.reply_text(newton.simplify('{}'.format(args[0]))) @run_async def factor(update, context): args=context.args args=str(args) message = update.effective_message", "message = update.effective_message message.reply_text(newton.area('{}'.format(args[0]))) @run_async def cos(update, context): args =", "Cosine `/cos pi` - /sin: Sine `/sin 0` - /tan:", "context): args = context.args message = update.effective_message message.reply_text(math.atan(int(args[0]))) @run_async def", "context.args message = update.effective_message message.reply_text(math.log(int(args[0]))) __help__ = \"\"\" Under Developmeent..", "pass_args=True) ARCTAN_HANDLER = DisableAbleCommandHandler(\"arctan\", arctan, pass_args=True) ABS_HANDLER = DisableAbleCommandHandler(\"abs\", abs,", "features soon - /cos: Cosine `/cos pi` - /sin: Sine", "emilia.modules.disable import DisableAbleCommandHandler from emilia.modules.helper_funcs.alternate import send_message import pynewtonmath as", "import send_message import pynewtonmath as newton import math @run_async def", "DisableAbleCommandHandler(\"cos\", cos, pass_args=True) SIN_HANDLER = DisableAbleCommandHandler(\"sin\", sin, pass_args=True) TAN_HANDLER =", "expression, the separator is a vertical bar '|'. See the", "context): args = context.args message = update.effective_message message.reply_text(math.acos(int(args[0]))) @run_async def", "request. To find the area under a function, send the", "you must send in your expression as 2(over)4. The result", "Tangent `/tan 0` - /arccos: Inverse Cosine `/arccos 1` -", "= update.effective_message message.reply_text(newton.tangent('{}'.format(args[0]))) @run_async def area(update, context): args=context.args args=str(args) message", "the starting x value, d is the ending x value,", "the function under which you want the curve between the", "message = update.effective_message message.reply_text(math.cos(int(args[0]))) @run_async def sin(update, context): args =", "message.reply_text(newton.tangent('{}'.format(args[0]))) @run_async def area(update, context): args=context.args args=str(args) message = update.effective_message", "args = context.args message = update.effective_message message.reply_text(math.log(int(args[0]))) __help__ = \"\"\"", "example request. To find the area under a function, send", "def derive(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.derive('{}'.format(args[0]))) @run_async", "@run_async def factor(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.factor('{}'.format(args[0])))", "starting x value, d is the ending x value, and", "LOG_HANDLER = DisableAbleCommandHandler(\"log\", log, pass_args=True) dispatcher.add_handler(SIMPLIFY_HANDLER) dispatcher.add_handler(FACTOR_HANDLER) dispatcher.add_handler(DERIVE_HANDLER) dispatcher.add_handler(INTEGRATE_HANDLER) dispatcher.add_handler(ZEROES_HANDLER)", "Absolute Value `/abs -1` - /log: Logarithm `/log 2l8` __Keep", "= update.effective_message message.reply_text(newton.zeroes('{}'.format(args[0]))) @run_async def tangent(update, context): args=context.args args=str(args) message", "is the starting x value, d is the ending x", "= DisableAbleCommandHandler(\"tan\", tan, pass_args=True) ARCCOS_HANDLER = DisableAbleCommandHandler(\"arccos\", arccos, pass_args=True) ARCSIN_HANDLER", "telegram import Message, Update, Bot, MessageEntity from telegram.ext import CommandHandler,", "message = update.effective_message message.reply_text(math.tan(int(args[0]))) @run_async def arccos(update, context): args =", "`/arccos 1` - /arcsin: Inverse Sine `/arcsin 0` - /arctan:", "message.reply_text(newton.integrate('{}'.format(args[0]))) @run_async def zeroes(update, context): args=context.args args=str(args) message = update.effective_message", "= update.effective_message message.reply_text(math.asin(int(args[0]))) @run_async def arctan(update, context): args = context.args", "dispatcher from emilia.modules.disable import DisableAbleCommandHandler from emilia.modules.helper_funcs.alternate import send_message import", "@run_async def cos(update, context): args = context.args message = update.effective_message", "import math @run_async def simplify(update, context): args=context.args args=str(args) message =", "context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.derive('{}'.format(args[0]))) @run_async def integrate(update,", "DisableAbleCommandHandler(\"math\", simplify, pass_args=True) FACTOR_HANDLER = DisableAbleCommandHandler(\"factor\", factor, pass_args=True) DERIVE_HANDLER =", "def arccos(update, context): args = context.args message = update.effective_message message.reply_text(math.acos(int(args[0])))", "pass_args=True) AREA_HANDLER = DisableAbleCommandHandler(\"area\", area, pass_args=True) COS_HANDLER = DisableAbleCommandHandler(\"cos\", cos,", "/tan: Tangent `/tan 0` - /arccos: Inverse Cosine `/arccos 1`", "- /cos: Cosine `/cos pi` - /sin: Sine `/sin 0`", "(1/2, 3/4). \"\"\" SIMPLIFY_HANDLER = DisableAbleCommandHandler(\"math\", simplify, pass_args=True) FACTOR_HANDLER =", "dispatcher.add_handler(DERIVE_HANDLER) dispatcher.add_handler(INTEGRATE_HANDLER) dispatcher.add_handler(ZEROES_HANDLER) dispatcher.add_handler(TANGENT_HANDLER) dispatcher.add_handler(AREA_HANDLER) dispatcher.add_handler(COS_HANDLER) dispatcher.add_handler(SIN_HANDLER) dispatcher.add_handler(TAN_HANDLER) dispatcher.add_handler(ARCCOS_HANDLER) dispatcher.add_handler(ARCSIN_HANDLER)", "FACTOR_HANDLER = DisableAbleCommandHandler(\"factor\", factor, pass_args=True) DERIVE_HANDLER = DisableAbleCommandHandler(\"derive\", derive, pass_args=True)", "= context.args message = update.effective_message message.reply_text(math.sin(int(args[0]))) @run_async def tan(update, context):", "import DisableAbleCommandHandler from emilia.modules.helper_funcs.alternate import send_message import pynewtonmath as newton", "= context.args message = update.effective_message message.reply_text(math.fabs(int(args[0]))) @run_async def log(update, context):", "= context.args message = update.effective_message message.reply_text(math.cos(int(args[0]))) @run_async def sin(update, context):", "\"\"\" Under Developmeent.. More features soon - /cos: Cosine `/cos", "<gh_stars>0 from typing import List import requests from telegram import", "= DisableAbleCommandHandler(\"math\", simplify, pass_args=True) FACTOR_HANDLER = DisableAbleCommandHandler(\"factor\", factor, pass_args=True) DERIVE_HANDLER", "= DisableAbleCommandHandler(\"cos\", cos, pass_args=True) SIN_HANDLER = DisableAbleCommandHandler(\"sin\", sin, pass_args=True) TAN_HANDLER", "x value, and f(x) is the function under which you", "= DisableAbleCommandHandler(\"derive\", derive, pass_args=True) INTEGRATE_HANDLER = DisableAbleCommandHandler(\"integrate\", integrate, pass_args=True) ZEROES_HANDLER", "pass_args=True) ZEROES_HANDLER = DisableAbleCommandHandler(\"zeroes\", zeroes, pass_args=True) TANGENT_HANDLER = DisableAbleCommandHandler(\"tangent\", tangent,", "send_message import pynewtonmath as newton import math @run_async def simplify(update,", "DisableAbleCommandHandler(\"integrate\", integrate, pass_args=True) ZEROES_HANDLER = DisableAbleCommandHandler(\"zeroes\", zeroes, pass_args=True) TANGENT_HANDLER =", "message.reply_text(math.sin(int(args[0]))) @run_async def tan(update, context): args = context.args message =", "args=context.args args=str(args) message = update.effective_message message.reply_text(newton.simplify('{}'.format(args[0]))) @run_async def factor(update, context):", "the two x values. To compute fractions, enter expressions as", "message = update.effective_message message.reply_text(newton.derive('{}'.format(args[0]))) @run_async def integrate(update, context): args=context.args args=str(args)", "for an example request. To find the area under a", "abs(update, context): args = context.args message = update.effective_message message.reply_text(math.fabs(int(args[0]))) @run_async", "= update.effective_message message.reply_text(newton.area('{}'.format(args[0]))) @run_async def cos(update, context): args = context.args", "args=str(args) message = update.effective_message message.reply_text(newton.factor('{}'.format(args[0]))) @run_async def derive(update, context): args=context.args", "area under a function, send the request as c:d|f(x) where", "update.effective_message message.reply_text(math.fabs(int(args[0]))) @run_async def log(update, context): args = context.args message", "and f(x) is the function under which you want the", "dispatcher.add_handler(ABS_HANDLER) dispatcher.add_handler(LOG_HANDLER) __mod_name__ = \"Math\" __command_list__ = [\"math\",\"factor\",\"derive\",\"integrate\",\"zeroes\",\"tangent\",\"area\",\"cos\",\"sin\",\"tan\",\"arccos\",\"arcsin\",\"arctan\",\"abs\",\"log\"] __handlers__ =", "c|f(x) where c is the given x value and f(x)", "args=str(args) message = update.effective_message message.reply_text(newton.derive('{}'.format(args[0]))) @run_async def integrate(update, context): args=context.args", "message = update.effective_message message.reply_text(math.log(int(args[0]))) __help__ = \"\"\" Under Developmeent.. More", "in standard math notation (1/2, 3/4). \"\"\" SIMPLIFY_HANDLER = DisableAbleCommandHandler(\"math\",", "DisableAbleCommandHandler(\"arcsin\", arcsin, pass_args=True) ARCTAN_HANDLER = DisableAbleCommandHandler(\"arctan\", arctan, pass_args=True) ABS_HANDLER =", "a vertical bar '|'. See the table above for an", "area, pass_args=True) COS_HANDLER = DisableAbleCommandHandler(\"cos\", cos, pass_args=True) SIN_HANDLER = DisableAbleCommandHandler(\"sin\",", "INTEGRATE_HANDLER = DisableAbleCommandHandler(\"integrate\", integrate, pass_args=True) ZEROES_HANDLER = DisableAbleCommandHandler(\"zeroes\", zeroes, pass_args=True)", "pass_args=True) FACTOR_HANDLER = DisableAbleCommandHandler(\"factor\", factor, pass_args=True) DERIVE_HANDLER = DisableAbleCommandHandler(\"derive\", derive,", "= update.effective_message message.reply_text(newton.integrate('{}'.format(args[0]))) @run_async def zeroes(update, context): args=context.args args=str(args) message", "the function expression, the separator is a vertical bar '|'.", "arccos(update, context): args = context.args message = update.effective_message message.reply_text(math.acos(int(args[0]))) @run_async", "pass_args=True) ARCCOS_HANDLER = DisableAbleCommandHandler(\"arccos\", arccos, pass_args=True) ARCSIN_HANDLER = DisableAbleCommandHandler(\"arcsin\", arcsin,", "args=str(args) message = update.effective_message message.reply_text(newton.tangent('{}'.format(args[0]))) @run_async def area(update, context): args=context.args", "- /tan: Tangent `/tan 0` - /arccos: Inverse Cosine `/arccos", "pass_args=True) SIN_HANDLER = DisableAbleCommandHandler(\"sin\", sin, pass_args=True) TAN_HANDLER = DisableAbleCommandHandler(\"tan\", tan,", "1` - /arcsin: Inverse Sine `/arcsin 0` - /arctan: Inverse", "value and f(x) is the function expression, the separator is", "= DisableAbleCommandHandler(\"arccos\", arccos, pass_args=True) ARCSIN_HANDLER = DisableAbleCommandHandler(\"arcsin\", arcsin, pass_args=True) ARCTAN_HANDLER", "def tan(update, context): args = context.args message = update.effective_message message.reply_text(math.tan(int(args[0])))", "message.reply_text(math.fabs(int(args[0]))) @run_async def log(update, context): args = context.args message =", "update.effective_message message.reply_text(newton.derive('{}'.format(args[0]))) @run_async def integrate(update, context): args=context.args args=str(args) message =", "def tangent(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.tangent('{}'.format(args[0]))) @run_async", "@run_async def zeroes(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.zeroes('{}'.format(args[0])))", "update.effective_message message.reply_text(newton.factor('{}'.format(args[0]))) @run_async def derive(update, context): args=context.args args=str(args) message =", "__Keep in mind__: To find the tangent line of a", "a certain x value, send the request as c|f(x) where", "function under which you want the curve between the two", "update.effective_message message.reply_text(math.cos(int(args[0]))) @run_async def sin(update, context): args = context.args message", "dispatcher.add_handler(FACTOR_HANDLER) dispatcher.add_handler(DERIVE_HANDLER) dispatcher.add_handler(INTEGRATE_HANDLER) dispatcher.add_handler(ZEROES_HANDLER) dispatcher.add_handler(TANGENT_HANDLER) dispatcher.add_handler(AREA_HANDLER) dispatcher.add_handler(COS_HANDLER) dispatcher.add_handler(SIN_HANDLER) dispatcher.add_handler(TAN_HANDLER) dispatcher.add_handler(ARCCOS_HANDLER)", "args=context.args args=str(args) message = update.effective_message message.reply_text(newton.factor('{}'.format(args[0]))) @run_async def derive(update, context):", "expressions as numerator(over)denominator. For example, to process 2/4 you must", "Inverse Sine `/arcsin 0` - /arctan: Inverse Tangent `/arctan 0`", "simplify, pass_args=True) FACTOR_HANDLER = DisableAbleCommandHandler(\"factor\", factor, pass_args=True) DERIVE_HANDLER = DisableAbleCommandHandler(\"derive\",", "sin, pass_args=True) TAN_HANDLER = DisableAbleCommandHandler(\"tan\", tan, pass_args=True) ARCCOS_HANDLER = DisableAbleCommandHandler(\"arccos\",", "DisableAbleCommandHandler(\"zeroes\", zeroes, pass_args=True) TANGENT_HANDLER = DisableAbleCommandHandler(\"tangent\", tangent, pass_args=True) AREA_HANDLER =", "dispatcher.add_handler(AREA_HANDLER) dispatcher.add_handler(COS_HANDLER) dispatcher.add_handler(SIN_HANDLER) dispatcher.add_handler(TAN_HANDLER) dispatcher.add_handler(ARCCOS_HANDLER) dispatcher.add_handler(ARCSIN_HANDLER) dispatcher.add_handler(ARCTAN_HANDLER) dispatcher.add_handler(ABS_HANDLER) dispatcher.add_handler(LOG_HANDLER) __mod_name__", "pass_args=True) ARCSIN_HANDLER = DisableAbleCommandHandler(\"arcsin\", arcsin, pass_args=True) ARCTAN_HANDLER = DisableAbleCommandHandler(\"arctan\", arctan,", "simplify(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.simplify('{}'.format(args[0]))) @run_async def", "ending x value, and f(x) is the function under which", "message = update.effective_message message.reply_text(math.asin(int(args[0]))) @run_async def arctan(update, context): args =", "function at a certain x value, send the request as", "DisableAbleCommandHandler(\"sin\", sin, pass_args=True) TAN_HANDLER = DisableAbleCommandHandler(\"tan\", tan, pass_args=True) ARCCOS_HANDLER =", "Tangent `/arctan 0` - /abs: Absolute Value `/abs -1` -", "from emilia.modules.helper_funcs.alternate import send_message import pynewtonmath as newton import math", "arcsin, pass_args=True) ARCTAN_HANDLER = DisableAbleCommandHandler(\"arctan\", arctan, pass_args=True) ABS_HANDLER = DisableAbleCommandHandler(\"abs\",", "See the table above for an example request. To find", "= update.effective_message message.reply_text(newton.factor('{}'.format(args[0]))) @run_async def derive(update, context): args=context.args args=str(args) message", "emilia.modules.helper_funcs.alternate import send_message import pynewtonmath as newton import math @run_async", "the area under a function, send the request as c:d|f(x)", "/cos: Cosine `/cos pi` - /sin: Sine `/sin 0` -", "/log: Logarithm `/log 2l8` __Keep in mind__: To find the", "Sine `/sin 0` - /tan: Tangent `/tan 0` - /arccos:", "function expression, the separator is a vertical bar '|'. See", "dispatcher.add_handler(ARCTAN_HANDLER) dispatcher.add_handler(ABS_HANDLER) dispatcher.add_handler(LOG_HANDLER) __mod_name__ = \"Math\" __command_list__ = [\"math\",\"factor\",\"derive\",\"integrate\",\"zeroes\",\"tangent\",\"area\",\"cos\",\"sin\",\"tan\",\"arccos\",\"arcsin\",\"arctan\",\"abs\",\"log\"] __handlers__", "example, to process 2/4 you must send in your expression", "tangent(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.tangent('{}'.format(args[0]))) @run_async def", "mind__: To find the tangent line of a function at", "__help__ = \"\"\" Under Developmeent.. More features soon - /cos:", "update.effective_message message.reply_text(newton.zeroes('{}'.format(args[0]))) @run_async def tangent(update, context): args=context.args args=str(args) message =", "- /sin: Sine `/sin 0` - /tan: Tangent `/tan 0`", "def arcsin(update, context): args = context.args message = update.effective_message message.reply_text(math.asin(int(args[0])))", "import Message, Update, Bot, MessageEntity from telegram.ext import CommandHandler, run_async", "message.reply_text(newton.area('{}'.format(args[0]))) @run_async def cos(update, context): args = context.args message =", "pass_args=True) ABS_HANDLER = DisableAbleCommandHandler(\"abs\", abs, pass_args=True) LOG_HANDLER = DisableAbleCommandHandler(\"log\", log,", "update.effective_message message.reply_text(math.acos(int(args[0]))) @run_async def arcsin(update, context): args = context.args message", "your expression as 2(over)4. The result expression will be in", "import CommandHandler, run_async from emilia import dispatcher from emilia.modules.disable import", "= update.effective_message message.reply_text(newton.derive('{}'.format(args[0]))) @run_async def integrate(update, context): args=context.args args=str(args) message", "context.args message = update.effective_message message.reply_text(math.cos(int(args[0]))) @run_async def sin(update, context): args", "context.args message = update.effective_message message.reply_text(math.fabs(int(args[0]))) @run_async def log(update, context): args", "`/tan 0` - /arccos: Inverse Cosine `/arccos 1` - /arcsin:", "0` - /abs: Absolute Value `/abs -1` - /log: Logarithm", "certain x value, send the request as c|f(x) where c", "x values. To compute fractions, enter expressions as numerator(over)denominator. For", "context.args message = update.effective_message message.reply_text(math.acos(int(args[0]))) @run_async def arcsin(update, context): args", "= update.effective_message message.reply_text(math.acos(int(args[0]))) @run_async def arcsin(update, context): args = context.args", "f(x) is the function expression, the separator is a vertical", "in your expression as 2(over)4. The result expression will be", "at a certain x value, send the request as c|f(x)", "update.effective_message message.reply_text(math.tan(int(args[0]))) @run_async def arccos(update, context): args = context.args message", "`/abs -1` - /log: Logarithm `/log 2l8` __Keep in mind__:", "args = context.args message = update.effective_message message.reply_text(math.cos(int(args[0]))) @run_async def sin(update,", "bar '|'. See the table above for an example request.", "args=str(args) message = update.effective_message message.reply_text(newton.simplify('{}'.format(args[0]))) @run_async def factor(update, context): args=context.args", "context): args = context.args message = update.effective_message message.reply_text(math.sin(int(args[0]))) @run_async def", "DisableAbleCommandHandler(\"tangent\", tangent, pass_args=True) AREA_HANDLER = DisableAbleCommandHandler(\"area\", area, pass_args=True) COS_HANDLER =", "message = update.effective_message message.reply_text(math.acos(int(args[0]))) @run_async def arcsin(update, context): args =", "@run_async def derive(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.derive('{}'.format(args[0])))", "args = context.args message = update.effective_message message.reply_text(math.atan(int(args[0]))) @run_async def abs(update,", "factor, pass_args=True) DERIVE_HANDLER = DisableAbleCommandHandler(\"derive\", derive, pass_args=True) INTEGRATE_HANDLER = DisableAbleCommandHandler(\"integrate\",", "2(over)4. The result expression will be in standard math notation", "as c:d|f(x) where c is the starting x value, d", "find the area under a function, send the request as", "x value, d is the ending x value, and f(x)", "derive, pass_args=True) INTEGRATE_HANDLER = DisableAbleCommandHandler(\"integrate\", integrate, pass_args=True) ZEROES_HANDLER = DisableAbleCommandHandler(\"zeroes\",", "is the function expression, the separator is a vertical bar", "@run_async def area(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.area('{}'.format(args[0])))", "@run_async def simplify(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.simplify('{}'.format(args[0])))", "arctan(update, context): args = context.args message = update.effective_message message.reply_text(math.atan(int(args[0]))) @run_async", "- /arcsin: Inverse Sine `/arcsin 0` - /arctan: Inverse Tangent", "pass_args=True) TANGENT_HANDLER = DisableAbleCommandHandler(\"tangent\", tangent, pass_args=True) AREA_HANDLER = DisableAbleCommandHandler(\"area\", area,", "as c|f(x) where c is the given x value and", "def simplify(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.simplify('{}'.format(args[0]))) @run_async", "DisableAbleCommandHandler(\"abs\", abs, pass_args=True) LOG_HANDLER = DisableAbleCommandHandler(\"log\", log, pass_args=True) dispatcher.add_handler(SIMPLIFY_HANDLER) dispatcher.add_handler(FACTOR_HANDLER)", "message.reply_text(math.acos(int(args[0]))) @run_async def arcsin(update, context): args = context.args message =", "message.reply_text(math.log(int(args[0]))) __help__ = \"\"\" Under Developmeent.. More features soon -", "as numerator(over)denominator. For example, to process 2/4 you must send", "vertical bar '|'. See the table above for an example", "message = update.effective_message message.reply_text(newton.factor('{}'.format(args[0]))) @run_async def derive(update, context): args=context.args args=str(args)", "under a function, send the request as c:d|f(x) where c", "you want the curve between the two x values. To", "pass_args=True) LOG_HANDLER = DisableAbleCommandHandler(\"log\", log, pass_args=True) dispatcher.add_handler(SIMPLIFY_HANDLER) dispatcher.add_handler(FACTOR_HANDLER) dispatcher.add_handler(DERIVE_HANDLER) dispatcher.add_handler(INTEGRATE_HANDLER)", "arctan, pass_args=True) ABS_HANDLER = DisableAbleCommandHandler(\"abs\", abs, pass_args=True) LOG_HANDLER = DisableAbleCommandHandler(\"log\",", "args=context.args args=str(args) message = update.effective_message message.reply_text(newton.zeroes('{}'.format(args[0]))) @run_async def tangent(update, context):", "context): args = context.args message = update.effective_message message.reply_text(math.cos(int(args[0]))) @run_async def", "update.effective_message message.reply_text(newton.simplify('{}'.format(args[0]))) @run_async def factor(update, context): args=context.args args=str(args) message =", "Value `/abs -1` - /log: Logarithm `/log 2l8` __Keep in", "under which you want the curve between the two x", "the curve between the two x values. To compute fractions,", "pynewtonmath as newton import math @run_async def simplify(update, context): args=context.args", "dispatcher.add_handler(ARCCOS_HANDLER) dispatcher.add_handler(ARCSIN_HANDLER) dispatcher.add_handler(ARCTAN_HANDLER) dispatcher.add_handler(ABS_HANDLER) dispatcher.add_handler(LOG_HANDLER) __mod_name__ = \"Math\" __command_list__ =", "List import requests from telegram import Message, Update, Bot, MessageEntity", "table above for an example request. To find the area", "dispatcher.add_handler(SIN_HANDLER) dispatcher.add_handler(TAN_HANDLER) dispatcher.add_handler(ARCCOS_HANDLER) dispatcher.add_handler(ARCSIN_HANDLER) dispatcher.add_handler(ARCTAN_HANDLER) dispatcher.add_handler(ABS_HANDLER) dispatcher.add_handler(LOG_HANDLER) __mod_name__ = \"Math\"", "= DisableAbleCommandHandler(\"factor\", factor, pass_args=True) DERIVE_HANDLER = DisableAbleCommandHandler(\"derive\", derive, pass_args=True) INTEGRATE_HANDLER", "standard math notation (1/2, 3/4). \"\"\" SIMPLIFY_HANDLER = DisableAbleCommandHandler(\"math\", simplify,", "= \"Math\" __command_list__ = [\"math\",\"factor\",\"derive\",\"integrate\",\"zeroes\",\"tangent\",\"area\",\"cos\",\"sin\",\"tan\",\"arccos\",\"arcsin\",\"arctan\",\"abs\",\"log\"] __handlers__ = [ SIMPLIFY_HANDLER,FACTOR_HANDLER,DERIVE_HANDLER,INTEGRATE_HANDLER,TANGENT_HANDLER,ZEROES_HANDLER,AREA_HANDLER,COS_HANDLER,SIN_HANDLER,TAN_HANDLER,ARCCOS_HANDLER,ARCSIN_HANDLER,ARCTAN_HANDLER,ABS_HANDLER,LOG_HANDLER ]", "message = update.effective_message message.reply_text(newton.integrate('{}'.format(args[0]))) @run_async def zeroes(update, context): args=context.args args=str(args)", "- /arctan: Inverse Tangent `/arctan 0` - /abs: Absolute Value", "def area(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.area('{}'.format(args[0]))) @run_async", "tan(update, context): args = context.args message = update.effective_message message.reply_text(math.tan(int(args[0]))) @run_async", "/arccos: Inverse Cosine `/arccos 1` - /arcsin: Inverse Sine `/arcsin", "args = context.args message = update.effective_message message.reply_text(math.fabs(int(args[0]))) @run_async def log(update,", "as 2(over)4. The result expression will be in standard math", "from telegram.ext import CommandHandler, run_async from emilia import dispatcher from", "pass_args=True) COS_HANDLER = DisableAbleCommandHandler(\"cos\", cos, pass_args=True) SIN_HANDLER = DisableAbleCommandHandler(\"sin\", sin,", "import requests from telegram import Message, Update, Bot, MessageEntity from", "the request as c:d|f(x) where c is the starting x", "The result expression will be in standard math notation (1/2,", "given x value and f(x) is the function expression, the", "To find the area under a function, send the request", "def log(update, context): args = context.args message = update.effective_message message.reply_text(math.log(int(args[0])))", "3/4). \"\"\" SIMPLIFY_HANDLER = DisableAbleCommandHandler(\"math\", simplify, pass_args=True) FACTOR_HANDLER = DisableAbleCommandHandler(\"factor\",", "update.effective_message message.reply_text(math.atan(int(args[0]))) @run_async def abs(update, context): args = context.args message", "to process 2/4 you must send in your expression as", "value, d is the ending x value, and f(x) is", "def factor(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.factor('{}'.format(args[0]))) @run_async", "is the ending x value, and f(x) is the function", "DisableAbleCommandHandler(\"area\", area, pass_args=True) COS_HANDLER = DisableAbleCommandHandler(\"cos\", cos, pass_args=True) SIN_HANDLER =", "pass_args=True) dispatcher.add_handler(SIMPLIFY_HANDLER) dispatcher.add_handler(FACTOR_HANDLER) dispatcher.add_handler(DERIVE_HANDLER) dispatcher.add_handler(INTEGRATE_HANDLER) dispatcher.add_handler(ZEROES_HANDLER) dispatcher.add_handler(TANGENT_HANDLER) dispatcher.add_handler(AREA_HANDLER) dispatcher.add_handler(COS_HANDLER) dispatcher.add_handler(SIN_HANDLER)", "the tangent line of a function at a certain x", "-1` - /log: Logarithm `/log 2l8` __Keep in mind__: To", "is the function under which you want the curve between", "emilia import dispatcher from emilia.modules.disable import DisableAbleCommandHandler from emilia.modules.helper_funcs.alternate import", "zeroes(update, context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.zeroes('{}'.format(args[0]))) @run_async def", "args=context.args args=str(args) message = update.effective_message message.reply_text(newton.area('{}'.format(args[0]))) @run_async def cos(update, context):", "ABS_HANDLER = DisableAbleCommandHandler(\"abs\", abs, pass_args=True) LOG_HANDLER = DisableAbleCommandHandler(\"log\", log, pass_args=True)", "where c is the starting x value, d is the", "context): args=context.args args=str(args) message = update.effective_message message.reply_text(newton.tangent('{}'.format(args[0]))) @run_async def area(update,", "enter expressions as numerator(over)denominator. For example, to process 2/4 you", "numerator(over)denominator. For example, to process 2/4 you must send in", "ARCCOS_HANDLER = DisableAbleCommandHandler(\"arccos\", arccos, pass_args=True) ARCSIN_HANDLER = DisableAbleCommandHandler(\"arcsin\", arcsin, pass_args=True)" ]
[ "policy_arn = f\"arn:aws:iam::{account_id}:policy/{policy_name}\" # policies are created in the Python", "logging.basicConfig(filename=\"rps.log\", level=logging.INFO) iam_resource = boto3.resource(\"iam\") sts_client = boto3.client(\"sts\") def create_role(", "\"EntityAlreadyExists\": role = iam_resource.Role(iam_role_name) logging.warning(\"The role %s already exists. Using", "adapted from the create_iam_role_for_lambda() method found here: https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html \"\"\" try:", "\"\"\" try: policy = iam_resource.create_policy( PolicyName=policy_name, PolicyDocument=policy_json ) except ClientError", "an existing policy by name. :return: IAM Policy object \"\"\"", "of the current credentials account_id = sts_client.get_caller_identity()[\"Account\"] # policy arns", "of given name and json description. Policies define permissions in", "role object This method was adapted from the create_iam_role_for_lambda() method", "number of the current credentials account_id = sts_client.get_caller_identity()[\"Account\"] # policy", "policy = get_policy_by_name(policy_name) logging.warning(\"The policy %s already exists. Using it.\",", "logging.error(error.response[\"Error\"][\"Message\"]) logging.exception( \"Couldn't create role %s or attach policy %s.\",", "policy_json: just be a valid policy json string :return: IAM", "policy = iam_resource.Policy(policy_arn) return policy def delete_role(iam_role) -> dict: \"\"\"", "%s.\", iam_role_name, str(policy_arns), ) raise else: logging.info(\"Created IAM role %s.\",", "this parameter is an IAM role object, such as returned", "represents the assume role policy defining what resources are allowed", "= f\"arn:aws:iam::{account_id}:policy/{policy_name}\" # policies are created in the Python SDK", "= iam_role.delete() except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete role", "found here: https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html \"\"\" try: role = iam_resource.create_role( RoleName=iam_role_name, AssumeRolePolicyDocument=assume_role_policy_json,", "permissions in AWS and can be associated with IAM roles.", "Policy object \"\"\" # sts provides the account number of", "policy_arns: list ) -> iam_resource.Role: \"\"\" Create an IAM role", "= iam_resource.Policy(policy_arn) return policy def delete_role(iam_role) -> dict: \"\"\" Delete", "json string that represents the assume role policy defining what", "attach to the role :return: IAM role object This method", "\"\"\" Get an existing policy by name. :return: IAM Policy", "attach policy %s.\", iam_role_name, str(policy_arns), ) raise else: logging.info(\"Created IAM", "return policy def get_policy_by_name(policy_name: str) -> iam_resource.Policy: \"\"\" Get an", "the creation to complete iam_resource.meta.client.get_waiter(\"role_exists\").wait(RoleName=iam_role_name) # attach the additional supplied", "as returned by create_role() \"\"\" try: # remove all policies", "by create_role() \"\"\" try: # remove all policies before deleting", "%s\", iam_policy.arn) else: logging.info(\"Deleted policy '%s'\", iam_policy.arn) return response if", "= iam_resource.create_role( RoleName=iam_role_name, AssumeRolePolicyDocument=assume_role_policy_json, ) # wait for the creation", "policy_arns, role.name) return role def create_policy(policy_name: str, policy_json: str) ->", "ClientError as error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": policy = get_policy_by_name(policy_name)", "also attach to the role :return: IAM role object This", "account number of the current credentials account_id = sts_client.get_caller_identity()[\"Account\"] #", "iam_resource.Policy(policy_arn) return policy def delete_role(iam_role) -> dict: \"\"\" Delete a", "except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete role %s\", iam_role.name)", "such as returned by create_policy() \"\"\" try: response = iam_policy.delete()", "roles. :param policy_json: just be a valid policy json string", "policy = create_policy(policy_name, policy_json) print(\"new policy arn: \", policy.arn) policy.delete()", "of strings representing existing policy arns to also attach to", "as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete role %s\", iam_role.name) else: logging.info(\"Deleted", "a role. :param iam_policy: this parameter is an IAM policy", "returned by create_policy() \"\"\" try: response = iam_policy.delete() except ClientError", "role.name) return role def create_policy(policy_name: str, policy_json: str) -> iam_resource.Policy:", "can be associated with IAM roles. :param policy_json: just be", "iam_resource.create_policy( PolicyName=policy_name, PolicyDocument=policy_json ) except ClientError as error: if error.response[\"Error\"][\"Code\"]", "role '%s'\", iam_role.name) return response def delete_policy(iam_policy) -> dict: \"\"\"", "error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete policy %s\", iam_policy.arn) else: logging.info(\"Deleted policy", "what resources are allowed to assume the role. :param policy_arns:", "policies are created in the Python SDK via their arn", "iam_role.name) return response def delete_policy(iam_policy) -> dict: \"\"\" Delete a", "the additional supplied policies for arn in policy_arns: role.attach_policy(PolicyArn=arn) except", "iam_resource.Policy: \"\"\" Create an IAM policy of given name and", "return response if __name__ == \"__main__\": # brief functionality test", "associated with IAM roles. :param policy_json: just be a valid", "logging.warning(\"The policy %s already exists. Using it.\", policy.arn) return policy", "policy defining what resources are allowed to assume the role.", "\"\"\" try: # remove all policies before deleting role for", "with IAM roles. :param policy_json: just be a valid policy", "an IAM policy of given name and json description. Policies", "policy_json_file = \"./policy/lambda_policy.json\" with open(policy_json_file) as file: policy_json = file.read()", "\"Couldn't create role %s or attach policy %s.\", iam_role_name, str(policy_arns),", "on Thu Apr 22 2021 # <NAME> # import boto3", "== \"EntityAlreadyExists\": role = iam_resource.Role(iam_role_name) logging.warning(\"The role %s already exists.", "created in the Python SDK via their arn policy =", "role policy defining what resources are allowed to assume the", "this parameter is an IAM policy object, such as returned", "as returned by create_policy() \"\"\" try: response = iam_policy.delete() except", "Python SDK via their arn policy = iam_resource.Policy(policy_arn) return policy", "import ClientError import logging logging.basicConfig(filename=\"rps.log\", level=logging.INFO) iam_resource = boto3.resource(\"iam\") sts_client", "def delete_policy(iam_policy) -> dict: \"\"\" Delete a role. :param iam_policy:", "== \"EntityAlreadyExists\": policy = get_policy_by_name(policy_name) logging.warning(\"The policy %s already exists.", "delete_policy(iam_policy) -> dict: \"\"\" Delete a role. :param iam_policy: this", "assume role policy defining what resources are allowed to assume", "iam_resource = boto3.resource(\"iam\") sts_client = boto3.client(\"sts\") def create_role( iam_role_name: str,", "Delete a role. :param iam_role: this parameter is an IAM", "def delete_role(iam_role) -> dict: \"\"\" Delete a role. :param iam_role:", "is an IAM role object, such as returned by create_role()", "create_role( iam_role_name: str, assume_role_policy_json: str, policy_arns: list ) -> iam_resource.Role:", "by create_policy() \"\"\" try: response = iam_policy.delete() except ClientError as", "role %s already exists. Using it.\", iam_role_name) return role else:", "in policy_arns: role.attach_policy(PolicyArn=arn) except ClientError as error: if error.response[\"Error\"][\"Code\"] ==", "return response def delete_policy(iam_policy) -> dict: \"\"\" Delete a role.", "# # Created on Thu Apr 22 2021 # <NAME>", "policies %s to role %s.\", policy_arns, role.name) return role def", "iam_policy: this parameter is an IAM policy object, such as", "except ClientError as error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": policy =", "else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception(\"Couldn't create policy %s\", policy_name) raise else: logging.info(\"Created", "with a given policy. :param assume_role_policy_json: A json string that", "\"__main__\": # brief functionality test with delete() cleanup at end", "\"./policy/lambda_policy.json\" with open(policy_json_file) as file: policy_json = file.read() policy_name =", "role.name) logging.info(\"Attached policies %s to role %s.\", policy_arns, role.name) return", "name and json description. Policies define permissions in AWS and", "PolicyDocument=policy_json ) except ClientError as error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\":", "be a valid policy json string :return: IAM Policy object", "via their arn policy = iam_resource.Policy(policy_arn) return policy def delete_role(iam_role)", "<NAME> # import boto3 from botocore.exceptions import ClientError import logging", "policy in iam_role.attached_policies.all(): policy.detach_role(RoleName=iam_role.name) response = iam_role.delete() except ClientError as", "if __name__ == \"__main__\": # brief functionality test with delete()", "# brief functionality test with delete() cleanup at end policy_json_file", "policy object, such as returned by create_policy() \"\"\" try: response", "iam_resource.Policy: \"\"\" Get an existing policy by name. :return: IAM", "arn in policy_arns: role.attach_policy(PolicyArn=arn) except ClientError as error: if error.response[\"Error\"][\"Code\"]", "policy. :param assume_role_policy_json: A json string that represents the assume", "complete iam_resource.meta.client.get_waiter(\"role_exists\").wait(RoleName=iam_role_name) # attach the additional supplied policies for arn", "def create_role( iam_role_name: str, assume_role_policy_json: str, policy_arns: list ) ->", "policy_name = \"test_policy\" policy = create_policy(policy_name, policy_json) print(\"new policy arn:", "else: logging.info(\"Deleted role '%s'\", iam_role.name) return response def delete_policy(iam_policy) ->", "iam_resource.Role: \"\"\" Create an IAM role with a given policy.", "str) -> iam_resource.Policy: \"\"\" Create an IAM policy of given", "Delete a role. :param iam_policy: this parameter is an IAM", "a given policy. :param assume_role_policy_json: A json string that represents", "'%s'\", iam_policy.arn) return response if __name__ == \"__main__\": # brief", "to the role :return: IAM role object This method was", "iam_role_name, str(policy_arns), ) raise else: logging.info(\"Created IAM role %s.\", role.name)", ") # wait for the creation to complete iam_resource.meta.client.get_waiter(\"role_exists\").wait(RoleName=iam_role_name) #", "logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete role %s\", iam_role.name) else: logging.info(\"Deleted role '%s'\",", "policy json string :return: IAM Policy object \"\"\" try: policy", "a role. :param iam_role: this parameter is an IAM role", "as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete policy %s\", iam_policy.arn) else: logging.info(\"Deleted", "role. :param policy_arns: a list of strings representing existing policy", "delete_role(iam_role) -> dict: \"\"\" Delete a role. :param iam_role: this", "== \"__main__\": # brief functionality test with delete() cleanup at", "in iam_role.attached_policies.all(): policy.detach_role(RoleName=iam_role.name) response = iam_role.delete() except ClientError as error:", "from the create_iam_role_for_lambda() method found here: https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html \"\"\" try: role", "object \"\"\" try: policy = iam_resource.create_policy( PolicyName=policy_name, PolicyDocument=policy_json ) except", "policy arns consist of an account id and policy name", "def create_policy(policy_name: str, policy_json: str) -> iam_resource.Policy: \"\"\" Create an", "role %s.\", role.name) logging.info(\"Attached policies %s to role %s.\", policy_arns,", "are allowed to assume the role. :param policy_arns: a list", "consist of an account id and policy name policy_arn =", "policy of given name and json description. Policies define permissions", "policies before deleting role for policy in iam_role.attached_policies.all(): policy.detach_role(RoleName=iam_role.name) response", "a valid policy json string :return: IAM Policy object \"\"\"", "role. :param iam_policy: this parameter is an IAM policy object,", "cleanup at end policy_json_file = \"./policy/lambda_policy.json\" with open(policy_json_file) as file:", "ClientError as error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": role = iam_resource.Role(iam_role_name)", "policy_name) return policy def get_policy_by_name(policy_name: str) -> iam_resource.Policy: \"\"\" Get", "sts provides the account number of the current credentials account_id", "%s already exists. Using it.\", policy.arn) return policy else: logging.error(error.response[\"Error\"][\"Message\"])", "\"EntityAlreadyExists\": policy = get_policy_by_name(policy_name) logging.warning(\"The policy %s already exists. Using", ") -> iam_resource.Role: \"\"\" Create an IAM role with a", "the current credentials account_id = sts_client.get_caller_identity()[\"Account\"] # policy arns consist", "role else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception( \"Couldn't create role %s or attach", "additional supplied policies for arn in policy_arns: role.attach_policy(PolicyArn=arn) except ClientError", "end policy_json_file = \"./policy/lambda_policy.json\" with open(policy_json_file) as file: policy_json =", "create role %s or attach policy %s.\", iam_role_name, str(policy_arns), )", "logging.info(\"Attached policies %s to role %s.\", policy_arns, role.name) return role", "-> iam_resource.Role: \"\"\" Create an IAM role with a given", "given policy. :param assume_role_policy_json: A json string that represents the", "iam_role.delete() except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete role %s\",", "IAM role with a given policy. :param assume_role_policy_json: A json", "= get_policy_by_name(policy_name) logging.warning(\"The policy %s already exists. Using it.\", policy.arn)", "in AWS and can be associated with IAM roles. :param", "and policy name policy_arn = f\"arn:aws:iam::{account_id}:policy/{policy_name}\" # policies are created", "file: policy_json = file.read() policy_name = \"test_policy\" policy = create_policy(policy_name,", "policy %s already exists. Using it.\", policy.arn) return policy else:", "account id and policy name policy_arn = f\"arn:aws:iam::{account_id}:policy/{policy_name}\" # policies", "as file: policy_json = file.read() policy_name = \"test_policy\" policy =", "policy %s.\", iam_role_name, str(policy_arns), ) raise else: logging.info(\"Created IAM role", "name. :return: IAM Policy object \"\"\" # sts provides the", "name policy_arn = f\"arn:aws:iam::{account_id}:policy/{policy_name}\" # policies are created in the", "json string :return: IAM Policy object \"\"\" try: policy =", "AWS and can be associated with IAM roles. :param policy_json:", "= iam_resource.Role(iam_role_name) logging.warning(\"The role %s already exists. Using it.\", iam_role_name)", "Create an IAM role with a given policy. :param assume_role_policy_json:", "create_policy(policy_name: str, policy_json: str) -> iam_resource.Policy: \"\"\" Create an IAM", ") except ClientError as error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": policy", ") raise else: logging.info(\"Created IAM role %s.\", role.name) logging.info(\"Attached policies", "IAM roles. :param policy_json: just be a valid policy json", "logging.info(\"Deleted role '%s'\", iam_role.name) return response def delete_policy(iam_policy) -> dict:", "str(policy_arns), ) raise else: logging.info(\"Created IAM role %s.\", role.name) logging.info(\"Attached", "policy_arns: a list of strings representing existing policy arns to", "level=logging.INFO) iam_resource = boto3.resource(\"iam\") sts_client = boto3.client(\"sts\") def create_role( iam_role_name:", "are created in the Python SDK via their arn policy", "strings representing existing policy arns to also attach to the", "= iam_resource.create_policy( PolicyName=policy_name, PolicyDocument=policy_json ) except ClientError as error: if", "credentials account_id = sts_client.get_caller_identity()[\"Account\"] # policy arns consist of an", "brief functionality test with delete() cleanup at end policy_json_file =", "creation to complete iam_resource.meta.client.get_waiter(\"role_exists\").wait(RoleName=iam_role_name) # attach the additional supplied policies", "assume_role_policy_json: A json string that represents the assume role policy", "string :return: IAM Policy object \"\"\" try: policy = iam_resource.create_policy(", "%s.\", policy_arns, role.name) return role def create_policy(policy_name: str, policy_json: str)", "'%s'\", iam_role.name) return response def delete_policy(iam_policy) -> dict: \"\"\" Delete", "-> iam_resource.Policy: \"\"\" Get an existing policy by name. :return:", "from botocore.exceptions import ClientError import logging logging.basicConfig(filename=\"rps.log\", level=logging.INFO) iam_resource =", "return role def create_policy(policy_name: str, policy_json: str) -> iam_resource.Policy: \"\"\"", "IAM policy of given name and json description. Policies define", "policy_json = file.read() policy_name = \"test_policy\" policy = create_policy(policy_name, policy_json)", ":return: IAM Policy object \"\"\" try: policy = iam_resource.create_policy( PolicyName=policy_name,", "as error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": role = iam_resource.Role(iam_role_name) logging.warning(\"The", "-> iam_resource.Policy: \"\"\" Create an IAM policy of given name", "attach the additional supplied policies for arn in policy_arns: role.attach_policy(PolicyArn=arn)", "Created on Thu Apr 22 2021 # <NAME> # import", "Get an existing policy by name. :return: IAM Policy object", "role :return: IAM role object This method was adapted from", "by name. :return: IAM Policy object \"\"\" # sts provides", "ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete role %s\", iam_role.name) else:", "IAM role %s.\", role.name) logging.info(\"Attached policies %s to role %s.\",", "# sts provides the account number of the current credentials", "# wait for the creation to complete iam_resource.meta.client.get_waiter(\"role_exists\").wait(RoleName=iam_role_name) # attach", "exists. Using it.\", policy.arn) return policy else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception(\"Couldn't create", "= sts_client.get_caller_identity()[\"Account\"] # policy arns consist of an account id", "%s\", iam_role.name) else: logging.info(\"Deleted role '%s'\", iam_role.name) return response def", "the Python SDK via their arn policy = iam_resource.Policy(policy_arn) return", "# attach the additional supplied policies for arn in policy_arns:", "role def create_policy(policy_name: str, policy_json: str) -> iam_resource.Policy: \"\"\" Create", "it.\", policy.arn) return policy else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception(\"Couldn't create policy %s\",", "iam_resource.Role(iam_role_name) logging.warning(\"The role %s already exists. Using it.\", iam_role_name) return", "\"\"\" try: role = iam_resource.create_role( RoleName=iam_role_name, AssumeRolePolicyDocument=assume_role_policy_json, ) # wait", "or attach policy %s.\", iam_role_name, str(policy_arns), ) raise else: logging.info(\"Created", "else: logging.info(\"Created IAM role %s.\", role.name) logging.info(\"Attached policies %s to", "role = iam_resource.create_role( RoleName=iam_role_name, AssumeRolePolicyDocument=assume_role_policy_json, ) # wait for the", "policy def get_policy_by_name(policy_name: str) -> iam_resource.Policy: \"\"\" Get an existing", "%s or attach policy %s.\", iam_role_name, str(policy_arns), ) raise else:", "%s.\", role.name) logging.info(\"Attached policies %s to role %s.\", policy_arns, role.name)", "exists. Using it.\", iam_role_name) return role else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception( \"Couldn't", "# import boto3 from botocore.exceptions import ClientError import logging logging.basicConfig(filename=\"rps.log\",", ":param iam_role: this parameter is an IAM role object, such", "Thu Apr 22 2021 # <NAME> # import boto3 from", ":param policy_arns: a list of strings representing existing policy arns", "list of strings representing existing policy arns to also attach", "delete policy %s\", iam_policy.arn) else: logging.info(\"Deleted policy '%s'\", iam_policy.arn) return", "json description. Policies define permissions in AWS and can be", "to also attach to the role :return: IAM role object", "get_policy_by_name(policy_name: str) -> iam_resource.Policy: \"\"\" Get an existing policy by", "such as returned by create_role() \"\"\" try: # remove all", "ClientError import logging logging.basicConfig(filename=\"rps.log\", level=logging.INFO) iam_resource = boto3.resource(\"iam\") sts_client =", "with delete() cleanup at end policy_json_file = \"./policy/lambda_policy.json\" with open(policy_json_file)", "policy arns to also attach to the role :return: IAM", "an IAM role with a given policy. :param assume_role_policy_json: A", "before deleting role for policy in iam_role.attached_policies.all(): policy.detach_role(RoleName=iam_role.name) response =", "error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete role %s\", iam_role.name) else: logging.info(\"Deleted role", "def get_policy_by_name(policy_name: str) -> iam_resource.Policy: \"\"\" Get an existing policy", "-> dict: \"\"\" Delete a role. :param iam_role: this parameter", "dict: \"\"\" Delete a role. :param iam_role: this parameter is", "id and policy name policy_arn = f\"arn:aws:iam::{account_id}:policy/{policy_name}\" # policies are", "%s already exists. Using it.\", iam_role_name) return role else: logging.error(error.response[\"Error\"][\"Message\"])", "string that represents the assume role policy defining what resources", "iam_role: this parameter is an IAM role object, such as", "assume the role. :param policy_arns: a list of strings representing", "create_role() \"\"\" try: # remove all policies before deleting role", "role %s.\", policy_arns, role.name) return role def create_policy(policy_name: str, policy_json:", "policy '%s'\", iam_policy.arn) return response if __name__ == \"__main__\": #", "This method was adapted from the create_iam_role_for_lambda() method found here:", ":param policy_json: just be a valid policy json string :return:", "sts_client.get_caller_identity()[\"Account\"] # policy arns consist of an account id and", "= iam_policy.delete() except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete policy", "all policies before deleting role for policy in iam_role.attached_policies.all(): policy.detach_role(RoleName=iam_role.name)", "with open(policy_json_file) as file: policy_json = file.read() policy_name = \"test_policy\"", "\"\"\" Create an IAM role with a given policy. :param", "IAM role object, such as returned by create_role() \"\"\" try:", "policy def delete_role(iam_role) -> dict: \"\"\" Delete a role. :param", "Policy '%s'\", policy_name) return policy def get_policy_by_name(policy_name: str) -> iam_resource.Policy:", "define permissions in AWS and can be associated with IAM", "boto3.resource(\"iam\") sts_client = boto3.client(\"sts\") def create_role( iam_role_name: str, assume_role_policy_json: str,", "try: response = iam_policy.delete() except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't", "role object, such as returned by create_role() \"\"\" try: #", "response = iam_role.delete() except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete", "role = iam_resource.Role(iam_role_name) logging.warning(\"The role %s already exists. Using it.\",", "error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": role = iam_resource.Role(iam_role_name) logging.warning(\"The role %s already", "already exists. Using it.\", iam_role_name) return role else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception(", "str) -> iam_resource.Policy: \"\"\" Get an existing policy by name.", "2021 # <NAME> # import boto3 from botocore.exceptions import ClientError", "role for policy in iam_role.attached_policies.all(): policy.detach_role(RoleName=iam_role.name) response = iam_role.delete() except", "raise else: logging.info(\"Created Policy '%s'\", policy_name) return policy def get_policy_by_name(policy_name:", "# remove all policies before deleting role for policy in", "and json description. Policies define permissions in AWS and can", "to complete iam_resource.meta.client.get_waiter(\"role_exists\").wait(RoleName=iam_role_name) # attach the additional supplied policies for", "open(policy_json_file) as file: policy_json = file.read() policy_name = \"test_policy\" policy", "import boto3 from botocore.exceptions import ClientError import logging logging.basicConfig(filename=\"rps.log\", level=logging.INFO)", "try: # remove all policies before deleting role for policy", "iam_policy.delete() except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete policy %s\",", "try: role = iam_resource.create_role( RoleName=iam_role_name, AssumeRolePolicyDocument=assume_role_policy_json, ) # wait for", "Using it.\", policy.arn) return policy else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception(\"Couldn't create policy", "policy_name) raise else: logging.info(\"Created Policy '%s'\", policy_name) return policy def", "the create_iam_role_for_lambda() method found here: https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html \"\"\" try: role =", "policy_arns: role.attach_policy(PolicyArn=arn) except ClientError as error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\":", "of an account id and policy name policy_arn = f\"arn:aws:iam::{account_id}:policy/{policy_name}\"", "SDK via their arn policy = iam_resource.Policy(policy_arn) return policy def", "Policies define permissions in AWS and can be associated with", "RoleName=iam_role_name, AssumeRolePolicyDocument=assume_role_policy_json, ) # wait for the creation to complete", "supplied policies for arn in policy_arns: role.attach_policy(PolicyArn=arn) except ClientError as", "except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete policy %s\", iam_policy.arn)", "-> dict: \"\"\" Delete a role. :param iam_policy: this parameter", "\"test_policy\" policy = create_policy(policy_name, policy_json) print(\"new policy arn: \", policy.arn)", "provides the account number of the current credentials account_id =", "A json string that represents the assume role policy defining", "existing policy arns to also attach to the role :return:", "import logging logging.basicConfig(filename=\"rps.log\", level=logging.INFO) iam_resource = boto3.resource(\"iam\") sts_client = boto3.client(\"sts\")", "already exists. Using it.\", policy.arn) return policy else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception(\"Couldn't", "object \"\"\" # sts provides the account number of the", "ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete policy %s\", iam_policy.arn) else:", "boto3 from botocore.exceptions import ClientError import logging logging.basicConfig(filename=\"rps.log\", level=logging.INFO) iam_resource", "str, policy_json: str) -> iam_resource.Policy: \"\"\" Create an IAM policy", "response if __name__ == \"__main__\": # brief functionality test with", "%s to role %s.\", policy_arns, role.name) return role def create_policy(policy_name:", "logging.error(error.response[\"Error\"][\"Message\"]) logging.exception(\"Couldn't create policy %s\", policy_name) raise else: logging.info(\"Created Policy", "AssumeRolePolicyDocument=assume_role_policy_json, ) # wait for the creation to complete iam_resource.meta.client.get_waiter(\"role_exists\").wait(RoleName=iam_role_name)", "iam_resource.meta.client.get_waiter(\"role_exists\").wait(RoleName=iam_role_name) # attach the additional supplied policies for arn in", "= file.read() policy_name = \"test_policy\" policy = create_policy(policy_name, policy_json) print(\"new", "# Created on Thu Apr 22 2021 # <NAME> #", "policy.detach_role(RoleName=iam_role.name) response = iam_role.delete() except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't", "str, assume_role_policy_json: str, policy_arns: list ) -> iam_resource.Role: \"\"\" Create", "# policies are created in the Python SDK via their", "is an IAM policy object, such as returned by create_policy()", "= \"test_policy\" policy = create_policy(policy_name, policy_json) print(\"new policy arn: \",", "= boto3.resource(\"iam\") sts_client = boto3.client(\"sts\") def create_role( iam_role_name: str, assume_role_policy_json:", "Apr 22 2021 # <NAME> # import boto3 from botocore.exceptions", "logging.info(\"Created IAM role %s.\", role.name) logging.info(\"Attached policies %s to role", "= \"./policy/lambda_policy.json\" with open(policy_json_file) as file: policy_json = file.read() policy_name", "policy = iam_resource.create_policy( PolicyName=policy_name, PolicyDocument=policy_json ) except ClientError as error:", "logging.info(\"Created Policy '%s'\", policy_name) return policy def get_policy_by_name(policy_name: str) ->", "policy %s\", policy_name) raise else: logging.info(\"Created Policy '%s'\", policy_name) return", "and can be associated with IAM roles. :param policy_json: just", "policies for arn in policy_arns: role.attach_policy(PolicyArn=arn) except ClientError as error:", "defining what resources are allowed to assume the role. :param", "else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception( \"Couldn't create role %s or attach policy", "that represents the assume role policy defining what resources are", "arns to also attach to the role :return: IAM role", "allowed to assume the role. :param policy_arns: a list of", "else: logging.info(\"Deleted policy '%s'\", iam_policy.arn) return response if __name__ ==", "valid policy json string :return: IAM Policy object \"\"\" try:", "Policy object \"\"\" try: policy = iam_resource.create_policy( PolicyName=policy_name, PolicyDocument=policy_json )", "parameter is an IAM policy object, such as returned by", "IAM Policy object \"\"\" try: policy = iam_resource.create_policy( PolicyName=policy_name, PolicyDocument=policy_json", "logging.exception( \"Couldn't create role %s or attach policy %s.\", iam_role_name,", "delete() cleanup at end policy_json_file = \"./policy/lambda_policy.json\" with open(policy_json_file) as", "parameter is an IAM role object, such as returned by", "botocore.exceptions import ClientError import logging logging.basicConfig(filename=\"rps.log\", level=logging.INFO) iam_resource = boto3.resource(\"iam\")", "to assume the role. :param policy_arns: a list of strings", "be associated with IAM roles. :param policy_json: just be a", "file.read() policy_name = \"test_policy\" policy = create_policy(policy_name, policy_json) print(\"new policy", "%s\", policy_name) raise else: logging.info(\"Created Policy '%s'\", policy_name) return policy", "return role else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception( \"Couldn't create role %s or", "logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete policy %s\", iam_policy.arn) else: logging.info(\"Deleted policy '%s'\",", "\"\"\" Delete a role. :param iam_role: this parameter is an", "wait for the creation to complete iam_resource.meta.client.get_waiter(\"role_exists\").wait(RoleName=iam_role_name) # attach the", "the role :return: IAM role object This method was adapted", "IAM Policy object \"\"\" # sts provides the account number", "policy_json: str) -> iam_resource.Policy: \"\"\" Create an IAM policy of", "response = iam_policy.delete() except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"]) logging.error(\"Couldn't delete", "error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": role = iam_resource.Role(iam_role_name) logging.warning(\"The role", "except ClientError as error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": role =", "for the creation to complete iam_resource.meta.client.get_waiter(\"role_exists\").wait(RoleName=iam_role_name) # attach the additional", "an account id and policy name policy_arn = f\"arn:aws:iam::{account_id}:policy/{policy_name}\" #", "iam_role.name) else: logging.info(\"Deleted role '%s'\", iam_role.name) return response def delete_policy(iam_policy)", "the role. :param policy_arns: a list of strings representing existing", "policy name policy_arn = f\"arn:aws:iam::{account_id}:policy/{policy_name}\" # policies are created in", "get_policy_by_name(policy_name) logging.warning(\"The policy %s already exists. Using it.\", policy.arn) return", "if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": policy = get_policy_by_name(policy_name) logging.warning(\"The policy %s", "\"\"\" Delete a role. :param iam_policy: this parameter is an", "\"\"\" try: response = iam_policy.delete() except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"])", "f\"arn:aws:iam::{account_id}:policy/{policy_name}\" # policies are created in the Python SDK via", "method found here: https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html \"\"\" try: role = iam_resource.create_role( RoleName=iam_role_name,", "role %s\", iam_role.name) else: logging.info(\"Deleted role '%s'\", iam_role.name) return response", "object, such as returned by create_policy() \"\"\" try: response =", "sts_client = boto3.client(\"sts\") def create_role( iam_role_name: str, assume_role_policy_json: str, policy_arns:", "create policy %s\", policy_name) raise else: logging.info(\"Created Policy '%s'\", policy_name)", "create_policy() \"\"\" try: response = iam_policy.delete() except ClientError as error:", "policy %s\", iam_policy.arn) else: logging.info(\"Deleted policy '%s'\", iam_policy.arn) return response", "return policy def delete_role(iam_role) -> dict: \"\"\" Delete a role.", "list ) -> iam_resource.Role: \"\"\" Create an IAM role with", "error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": policy = get_policy_by_name(policy_name) logging.warning(\"The policy %s already", "account_id = sts_client.get_caller_identity()[\"Account\"] # policy arns consist of an account", "assume_role_policy_json: str, policy_arns: list ) -> iam_resource.Role: \"\"\" Create an", "iam_role_name) return role else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception( \"Couldn't create role %s", "object, such as returned by create_role() \"\"\" try: # remove", ":param assume_role_policy_json: A json string that represents the assume role", "for policy in iam_role.attached_policies.all(): policy.detach_role(RoleName=iam_role.name) response = iam_role.delete() except ClientError", "iam_role_name: str, assume_role_policy_json: str, policy_arns: list ) -> iam_resource.Role: \"\"\"", "__name__ == \"__main__\": # brief functionality test with delete() cleanup", "at end policy_json_file = \"./policy/lambda_policy.json\" with open(policy_json_file) as file: policy_json", "test with delete() cleanup at end policy_json_file = \"./policy/lambda_policy.json\" with", "delete role %s\", iam_role.name) else: logging.info(\"Deleted role '%s'\", iam_role.name) return", "return policy else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception(\"Couldn't create policy %s\", policy_name) raise", "role. :param iam_role: this parameter is an IAM role object,", "IAM policy object, such as returned by create_policy() \"\"\" try:", "an IAM policy object, such as returned by create_policy() \"\"\"", "iam_policy.arn) return response if __name__ == \"__main__\": # brief functionality", "the assume role policy defining what resources are allowed to", "logging logging.basicConfig(filename=\"rps.log\", level=logging.INFO) iam_resource = boto3.resource(\"iam\") sts_client = boto3.client(\"sts\") def", "\"\"\" # sts provides the account number of the current", "given name and json description. Policies define permissions in AWS", "remove all policies before deleting role for policy in iam_role.attached_policies.all():", "PolicyName=policy_name, PolicyDocument=policy_json ) except ClientError as error: if error.response[\"Error\"][\"Code\"] ==", "role %s or attach policy %s.\", iam_role_name, str(policy_arns), ) raise", "https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html \"\"\" try: role = iam_resource.create_role( RoleName=iam_role_name, AssumeRolePolicyDocument=assume_role_policy_json, ) #", ":param iam_policy: this parameter is an IAM policy object, such", "existing policy by name. :return: IAM Policy object \"\"\" #", "create_iam_role_for_lambda() method found here: https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html \"\"\" try: role = iam_resource.create_role(", "iam_role.attached_policies.all(): policy.detach_role(RoleName=iam_role.name) response = iam_role.delete() except ClientError as error: logging.error(error.response[\"Error\"][\"Message\"])", "'%s'\", policy_name) return policy def get_policy_by_name(policy_name: str) -> iam_resource.Policy: \"\"\"", "was adapted from the create_iam_role_for_lambda() method found here: https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html \"\"\"", "try: policy = iam_resource.create_policy( PolicyName=policy_name, PolicyDocument=policy_json ) except ClientError as", "object This method was adapted from the create_iam_role_for_lambda() method found", "an IAM role object, such as returned by create_role() \"\"\"", "response def delete_policy(iam_policy) -> dict: \"\"\" Delete a role. :param", "policy else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception(\"Couldn't create policy %s\", policy_name) raise else:", "to role %s.\", policy_arns, role.name) return role def create_policy(policy_name: str,", "description. Policies define permissions in AWS and can be associated", "else: logging.info(\"Created Policy '%s'\", policy_name) return policy def get_policy_by_name(policy_name: str)", "# policy arns consist of an account id and policy", "policy by name. :return: IAM Policy object \"\"\" # sts", "here: https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html \"\"\" try: role = iam_resource.create_role( RoleName=iam_role_name, AssumeRolePolicyDocument=assume_role_policy_json, )", "str, policy_arns: list ) -> iam_resource.Role: \"\"\" Create an IAM", "their arn policy = iam_resource.Policy(policy_arn) return policy def delete_role(iam_role) ->", "as error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": policy = get_policy_by_name(policy_name) logging.warning(\"The", "method was adapted from the create_iam_role_for_lambda() method found here: https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html", "iam_resource.create_role( RoleName=iam_role_name, AssumeRolePolicyDocument=assume_role_policy_json, ) # wait for the creation to", "resources are allowed to assume the role. :param policy_arns: a", "role.attach_policy(PolicyArn=arn) except ClientError as error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": role", "arn policy = iam_resource.Policy(policy_arn) return policy def delete_role(iam_role) -> dict:", "representing existing policy arns to also attach to the role", "it.\", iam_role_name) return role else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception( \"Couldn't create role", "# <NAME> # import boto3 from botocore.exceptions import ClientError import", ":return: IAM role object This method was adapted from the", "iam_policy.arn) else: logging.info(\"Deleted policy '%s'\", iam_policy.arn) return response if __name__", "the account number of the current credentials account_id = sts_client.get_caller_identity()[\"Account\"]", "in the Python SDK via their arn policy = iam_resource.Policy(policy_arn)", "current credentials account_id = sts_client.get_caller_identity()[\"Account\"] # policy arns consist of", "if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": role = iam_resource.Role(iam_role_name) logging.warning(\"The role %s", "logging.warning(\"The role %s already exists. Using it.\", iam_role_name) return role", "arns consist of an account id and policy name policy_arn", "IAM role object This method was adapted from the create_iam_role_for_lambda()", "for arn in policy_arns: role.attach_policy(PolicyArn=arn) except ClientError as error: if", "error: if error.response[\"Error\"][\"Code\"] == \"EntityAlreadyExists\": policy = get_policy_by_name(policy_name) logging.warning(\"The policy", "logging.error(\"Couldn't delete policy %s\", iam_policy.arn) else: logging.info(\"Deleted policy '%s'\", iam_policy.arn)", "logging.error(\"Couldn't delete role %s\", iam_role.name) else: logging.info(\"Deleted role '%s'\", iam_role.name)", "boto3.client(\"sts\") def create_role( iam_role_name: str, assume_role_policy_json: str, policy_arns: list )", "logging.exception(\"Couldn't create policy %s\", policy_name) raise else: logging.info(\"Created Policy '%s'\",", "a list of strings representing existing policy arns to also", "logging.info(\"Deleted policy '%s'\", iam_policy.arn) return response if __name__ == \"__main__\":", "Create an IAM policy of given name and json description.", "dict: \"\"\" Delete a role. :param iam_policy: this parameter is", "role with a given policy. :param assume_role_policy_json: A json string", "= boto3.client(\"sts\") def create_role( iam_role_name: str, assume_role_policy_json: str, policy_arns: list", "\"\"\" Create an IAM policy of given name and json", "deleting role for policy in iam_role.attached_policies.all(): policy.detach_role(RoleName=iam_role.name) response = iam_role.delete()", "Using it.\", iam_role_name) return role else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception( \"Couldn't create", ":return: IAM Policy object \"\"\" # sts provides the account", "22 2021 # <NAME> # import boto3 from botocore.exceptions import", "returned by create_role() \"\"\" try: # remove all policies before", "raise else: logging.info(\"Created IAM role %s.\", role.name) logging.info(\"Attached policies %s", "just be a valid policy json string :return: IAM Policy", "functionality test with delete() cleanup at end policy_json_file = \"./policy/lambda_policy.json\"", "policy.arn) return policy else: logging.error(error.response[\"Error\"][\"Message\"]) logging.exception(\"Couldn't create policy %s\", policy_name)" ]
[ "enum import auto, Enum class RunStatus(Enum): SUCCESS = auto() CALLED_PROCESS_ERROR", "= auto() FILE_NOT_FOUND = auto() PROCESS_LOOKUP_ERROR = auto() TIMEOUT_EXPIRED =", "<reponame>babatana/stograde from enum import auto, Enum class RunStatus(Enum): SUCCESS =", "RunStatus(Enum): SUCCESS = auto() CALLED_PROCESS_ERROR = auto() FILE_NOT_FOUND = auto()", "= auto() CALLED_PROCESS_ERROR = auto() FILE_NOT_FOUND = auto() PROCESS_LOOKUP_ERROR =", "import auto, Enum class RunStatus(Enum): SUCCESS = auto() CALLED_PROCESS_ERROR =", "class RunStatus(Enum): SUCCESS = auto() CALLED_PROCESS_ERROR = auto() FILE_NOT_FOUND =", "auto, Enum class RunStatus(Enum): SUCCESS = auto() CALLED_PROCESS_ERROR = auto()", "SUCCESS = auto() CALLED_PROCESS_ERROR = auto() FILE_NOT_FOUND = auto() PROCESS_LOOKUP_ERROR", "Enum class RunStatus(Enum): SUCCESS = auto() CALLED_PROCESS_ERROR = auto() FILE_NOT_FOUND", "CALLED_PROCESS_ERROR = auto() FILE_NOT_FOUND = auto() PROCESS_LOOKUP_ERROR = auto() TIMEOUT_EXPIRED", "auto() CALLED_PROCESS_ERROR = auto() FILE_NOT_FOUND = auto() PROCESS_LOOKUP_ERROR = auto()", "from enum import auto, Enum class RunStatus(Enum): SUCCESS = auto()", "auto() FILE_NOT_FOUND = auto() PROCESS_LOOKUP_ERROR = auto() TIMEOUT_EXPIRED = auto()" ]
[]
[ "\"\"\" Add a list of labels to the end of", "value is a LabelList containing only labels with the same", "Label('a', 4.1, 5.0), Label('b', 5.0, 8.9) ] >>> res[2].labels [", "intervals = self.label_tree.envelop(start, end) else: intervals = self.label_tree.overlap(start, end) return", "not to the beginning of the original label-list. overlap(float): Amount", "iv in intervals] def ranges(self, yield_ranges_without_labels=False, include_labels=None): \"\"\" Generate all", "interval.end + threshold direct_overlaps = all_intervals.overlap(range_start, range_end) all_overlaps = [interval]", "default start/end values of 0 and ``inf``. Args: values(list): List", "sorted(all_labels) def label_count(self): \"\"\" Return for each label the number", "start=7.0, end=10.2), >>> Label('b', start=10.3, end=14.0) >>> ]) >>> s", "overlap=0.0): \"\"\" Split the label-list into x parts and return", "g', start=10.3, end=14.0) >>> ]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a',", "the labels. Example: >>> label_list = LabelList(idx='transcription', labels=[ >>> Label('this',", "every label-list but the first. >>> ll = LabelList(labels=[ >>>", "len(cutting_points) + 1``). The result is a list of label-lists", "shifted in splitted label-lists. So the start is relative to", "delimiter=' ', overlap_threshold=0.1): \"\"\" Return a ordered list of tokens", "used to split labels into tokens. See :meth:`audiomate.annotations.Label.tokenized` Returns: :class:`set`:", "as keys. Every value is a LabelList containing only labels", "# Extract labels from intervals with updated times for iv", "... ]) >>> def shift_labels(label): ... label.start += 1.0 ...", "start=3.95, end=6.0), >>> Label('c', start=7.0, end=10.2), >>> Label('d', start=10.3, end=14.0)", "Add a list of labels to the end of the", "\"\"\" __slots__ = ['idx', 'label_tree', 'utterance'] def __init__(self, idx='default', labels=None):", "3.0) ... ]) >>> def shift_labels(label): ... label.start += 1.0", "max(0, iv_start - overlap, label.start) label.end = min(iv_end + overlap,", "in self.labels: fn(label) def merge_overlaps(self, threshold=0.0): \"\"\" Merge overlapping labels", "raise ValueError('Labels overlap, not able to define the correct order')", "interval.data.value: all_overlaps.extend(recursive_overlaps(overlap)) return all_overlaps # For every remaining interval #", "cutting point and not to the beginning of the original", "5, 8), >>> Label('a', 8, 10), >>> Label('b', 10, 14),", "LabelList( idx=self.idx, labels=copy.deepcopy([iv.data for iv in self.label_tree], memo) ) @property", "10, 14), >>> Label('a', 15, 18.5) >>> ]) >>> ll.label_total_duration()", "label-list. Example: >>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters') >>>", "Args: threshold (float): Maximal distance between two labels to be", ">>> Label('timmy', 4, 8) >>> ]) \"\"\" __slots__ = ['idx',", "label. If the overlapping between two labels is greater than", "x is defined by the number of cutting-points (``x ==", "[Label('c', 12.0, 15.0)] If ``shift_times = True``, the times are", "= cutting_points[i] else: iv_end = float('inf') # get all intervals", "for one utterance. labels (list): The list containing the :py:class:`audiomate.annotations.Label`.", "tokens. See :meth:`audiomate.annotations.Label.tokenized` Returns: :class:`set`: A set of distinct tokens.", "= LabelList(idx='some', labels=[ >>> Label('a', start=0, end=4), >>> Label('b', start=3.95,", "the label-list. Returns: (LabelList): New label-list. Example: >>> ll =", "label.end = min(iv_end + overlap, label.end) if shift_times: orig_start =", ">>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd', 'q', 'b', 'c', 'a',", "i < len(cutting_points): iv_end = cutting_points[i] else: iv_end = float('inf')", "label.end, label)) self.label_tree.update(ivs) def apply(self, fn): \"\"\" Apply the given", "dictionary with distinct label-values as keys. Every value is a", "(key) the number of occurrences (value). Example: >>> ll =", "labels is greater than ``overlap_threshold``, an Exception is thrown. Args:", "def create_single(cls, value, idx='default'): \"\"\" Create a label-list with a", "sorted_by_start: if last_label_end is None or (last_label_end - label.start <", ">>> ]) >>> ll.label_total_duration() {'a': 7.5 'b': 7.0} \"\"\" durations", "= LabelList(labels=[ >>> Label('a', 3, 5), >>> Label('b', 5, 8),", "self.label_tree) data_other = (other.idx, other.label_tree) return data_this == data_other def", "of a weak ref return LabelList( idx=self.idx, labels=copy.deepcopy([iv.data for iv", "in self.labels: separated_lls[label.value].add(label) for ll in separated_lls.values(): ll.idx = self.idx", "splits.append(cp_splits) iv_start = iv_end return splits # # Convenience Constructors", "to be considered as overlapping. (default: 0.0) Example: >>> ll", "tokens based on all labels. Joins all token from all", "receives the current label which can then be edited in", "tokens = [] last_label_end = None for label in sorted_by_start:", ">>> ll = LabelList(idx='some', labels=[ >>> Label('a', start=0, end=4), >>>", "by the start of the label. If the overlapping between", "also overlapping ones are returned. (default ``False``) Returns: list: List", ">>> s['b'].labels [Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)] \"\"\" separated_lls", "in self.label_tree: yield interval.data def __len__(self): return self.label_tree.__len__() def __copy__(self):", "durations def label_values(self): \"\"\" Return a list of all occuring", "labels in the range. Example: >>> ll = LabelList(labels=[ >>>", "separated(self): \"\"\" Create a separate Label-List for every distinct label-value.", "a', start=7.0, end=10.2), >>> Label('f g', start=10.3, end=14.0) >>> ])", "Label('a', 0, 5), >>> Label('b', 5, 10), >>>]) >>> >>>", "14), >>> Label('a', 15, 18.5) >>> ]) >>> ll.label_total_duration() {'a':", "to the end of the list. Args: labels (list): Labels", "start=7.0, end=10.2), >>> Label('d', start=10.3, end=14.0) >>> ]) >>> ll.join('", "= self.label_tree.copy() # recursivly find a group of overlapping labels", "= next_interval.data.value for overlap in overlapping: ov_start = min(ov_start, overlap.begin)", "import copy import intervaltree from .label import Label class LabelList:", ">>> Label('a', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c',", ">>> Label('b', start=3.95, end=6.0), >>> Label('a', start=7.0, end=10.2), >>> Label('b',", "List of values(str) that should be created and appended to", "== 0: raise ValueError('At least one cutting-point is needed!') #", "within the list. Returns: dict: A dictionary containing for every", "Maximum overlap between two consecutive labels. Returns: str: A string", "we have to loop in sorted order cutting_points = sorted(cutting_points)", "label.end, label) def addl(self, value, start=0.0, end=float('inf')): \"\"\" Shortcut for", "range is defined as a part of the label-list for", "< threshold``. Args: threshold (float): Maximal distance between two labels", "self self.label_tree.addi(label.start, label.end, label) def addl(self, value, start=0.0, end=float('inf')): \"\"\"", "= collections.defaultdict(int) for label in self: occurrences[label.value] += 1 return", "class LabelList: \"\"\" Represents a list of labels which describe", "all labels concatenated together. The order of the labels is", "add(self, label): \"\"\" Add a label to the end of", "start=start, end=end)) def update(self, labels): \"\"\" Add a list of", "ll.idx = self.idx return separated_lls def labels_in_range(self, start, end, fully_included=False):", "corpus for one utterance. labels (list): The list containing the", "in self} return sorted(all_labels) def label_count(self): \"\"\" Return for each", "for label in self: tokens = tokens.union(set(label.tokenized(delimiter=delimiter))) return tokens #", "self.idx = idx self.utterance = None self.label_tree = intervaltree.IntervalTree() if", "label.end -= orig_start cp_splits.add(label) splits.append(cp_splits) iv_start = iv_end return splits", "... Label('a_label', 1.5, 2.7), ... Label('b_label', 1.0, 2.0), ... ])", "number of occurrences (value). Example: >>> ll = LabelList(labels=[ >>>", "threshold range_end = interval.end + threshold direct_overlaps = all_intervals.overlap(range_start, range_end)", "last_label_end is None or (last_label_end - label.start < overlap_threshold and", "the points in seconds, where the label-list is splitted. shift_times(bool):", "which can then be edited in place. Args: fn (func):", "7.0} \"\"\" durations = collections.defaultdict(float) for label in self: durations[label.value]", "= {l.value for l in self} return sorted(all_labels) def label_count(self):", "is thrown. Args: delimiter (str): A string to join two", "list: List of labels in the range. Example: >>> ll", "weak ref return LabelList( idx=self.idx, labels=[iv.data for iv in self.label_tree]", "= idx self.utterance = None self.label_tree = intervaltree.IntervalTree() if labels", "the label-list within a corpus for one utterance. labels (list):", "__copy__(self): # utterance is ignored intentionally, # since it is", "ll.labels_in_range(6.2, 10.1) [Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)] \"\"\" if", "``overlap_threshold``, an Exception is thrown. Args: delimiter (str): The delimiter", "Label('b', start=3.95, end=6.0), >>> Label('a', start=7.0, end=10.2), >>> Label('b', start=10.3,", "seconds (value). Example: >>> ll = LabelList(labels=[ >>> Label('a', 3,", "['idx', 'label_tree', 'utterance'] def __init__(self, idx='default', labels=None): self.idx = idx", ">>> ll.merge_overlapping_labels() >>> ll.labels [ Label('a_label', 1.0, 2.7), Label('b_label', 1.0,", "yields one range (tuple start/end/list-of-labels) at a time. Example: >>>", "from all labels (``label.tokenized()```). If the overlapping between two labels", "labels with the same value def recursive_overlaps(interval): range_start = interval.begin", "+ 1): if i < len(cutting_points): iv_end = cutting_points[i] else:", "range_end = interval.end + threshold direct_overlaps = all_intervals.overlap(range_start, range_end) all_overlaps", "between two consecutive labels. Returns: str: A string with all", "labels concatenated together. The order of the labels is defined", "of tokens based on all labels. Joins all token from", "# utterance is ignored intentionally, # since it is kind", "the given values. All labels will have default start/end values", "label in self.labels: separated_lls[label.value].add(label) for ll in separated_lls.values(): ll.idx =", "no labels are defined. include_labels(list): If not empty, only the", "And so on. Args: cutting_points(list): List of floats defining the", "intervals] def ranges(self, yield_ranges_without_labels=False, include_labels=None): \"\"\" Generate all ranges of", "in the range. Example: >>> ll = LabelList(labels=[ >>> Label('a',", "15.0)] If ``shift_times = True``, the times are adjusted to", "of 0 and ``inf``. Args: values(list): List of values(str) that", "in self.label_tree], memo) ) @property def labels(self): \"\"\" Return list", "overlapping and merge equal ranges to a list of labels", "= self self.label_tree.addi(label.start, label.end, label) def addl(self, value, start=0.0, end=float('inf')):", "\"\"\" Return start of the earliest starting label (lower bound).", "are returned. Otherwise also overlapping ones are returned. (default ``False``)", "is defined as a part of the label-list for which", "last_label_end = label.end else: raise ValueError('Labels overlap, not able to", "See :meth:`audiomate.annotations.Label.tokenized` Returns: :class:`set`: A set of distinct tokens. \"\"\"", "equal ranges to a list of labels tree_copy.split_overlaps() tree_copy.merge_equals(data_reducer=reduce, data_initializer=[])", "returned. (default ``False``) Returns: list: List of labels in the", "label to add. \"\"\" label.label_list = self self.label_tree.addi(label.start, label.end, label)", "- ') 'a - b - c - d' \"\"\"", "the earliest starting label (lower bound). \"\"\" return self.label_tree.begin() @property", "and last_label_end > 0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end = label.end else: raise", "'): \"\"\" Return a list of all tokens occurring in", "within the given range. Also labels that only overlap are", "list(self) @property def start(self): \"\"\" Return start of the earliest", "label-value (key) the total duration in seconds (value). Example: >>>", "able to define the correct order') return tokens # #", "If ``shift_times = True``, the times are adjusted to be", "['a', 'b', 'c', 'd'] \"\"\" all_labels = {l.value for l", "orig_start cp_splits.add(label) splits.append(cp_splits) iv_start = iv_end return splits # #", "10.0), Label('c', 11.0, 12.0) ] >>> res[3].labels [Label('c', 12.0, 15.0)]", "yield interval.data def __len__(self): return self.label_tree.__len__() def __copy__(self): # utterance", "label.label_list = self ivs.append(intervaltree.Interval(label.start, label.end, label)) self.label_tree.update(ivs) def apply(self, fn):", "fn): \"\"\" Apply the given function `fn` to every label", "the label-list. Args: delimiter (str): The delimiter used to split", "]) >>> ll.label_total_duration() {'a': 7.5 'b': 7.0} \"\"\" durations =", "end=end)) def update(self, labels): \"\"\" Add a list of labels", "tree_copy.remove(iv) def reduce(x, y): x.append(y) return x # Split labels", "sorted(tree_copy) last_end = intervals[0].begin # yield range by range for", "start=10.3, end=14.0)] \"\"\" separated_lls = collections.defaultdict(LabelList) for label in self.labels:", "concatenated together. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a',", "appended to the label-list. idx(str): The idx of the label-list.", "Label('a d q', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>>", "idx(str): The idx of the label-list. Returns: (LabelList): New label-list.", "# - Remove them # - Create a concatenated new", "overlap in overlapping: ov_start = min(ov_start, overlap.begin) ov_end = max(ov_end,", "distance between two labels to be considered as overlapping. (default:", "# Restructuring # def separated(self): \"\"\" Create a separate Label-List", "together. The order of the labels is defined by the", "of: class: `audiomate.annotations.LabelList`. Example: >>> ll = LabelList(labels=[ >>> Label('a',", ")) # Replace the old labels with the updated ones", "start=10.3, end=14.0) >>> ]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd',", "2.7), ... Label('b_label', 1.0, 2.0), ... ]) >>> ll.merge_overlapping_labels() >>>", "all_intervals.discard(interval) for overlap in direct_overlaps: if overlap.data.value == interval.data.value: all_overlaps.extend(recursive_overlaps(overlap))", "times for iv in intervals: label = copy.deepcopy(iv.data) label.start =", ">>> Label('d', start=10.3, end=14.0) >>> ]) >>> ll.join(' - ')", "idx self.utterance = None self.label_tree = intervaltree.IntervalTree() if labels is", "(other.idx, other.label_tree) return data_this == data_other def __iter__(self): for interval", ">>> ll.label_values() ['a', 'b', 'c', 'd'] \"\"\" all_labels = {l.value", "The result is a list of label-lists corresponding to each", "> ]) >>> next(ranges) (4.5, 5.1, []) >>> next(ranges) (5.1,", "labels between ``cutting_points[0]`` and ``cutting_points[1]``. And so on. Args: cutting_points(list):", "list of labels. \"\"\" return list(self) @property def start(self): \"\"\"", "3.0), Label(another_label, 3.0, 4.0)] \"\"\" for label in self.labels: fn(label)", ") cp_splits = LabelList(idx=self.idx) # Extract labels from intervals with", "total_length(self): \"\"\" Return the cumulative length of all labels (Number", "concat_values.append(label.value) last_label_end = label.end else: raise ValueError('Labels overlap, not able", "given value. \"\"\" return LabelList(idx=idx, labels=[ Label(value=value) ]) @classmethod def", "res[1].labels [ Label('a', 0.0, 0.4), Label('b', 0.4, 5.4) ] \"\"\"", "3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5),", "of the original label-list. overlap(float): Amount of overlap in seconds.", "Start-time in seconds. end(float): End-time in seconds. fully_included(bool): If ``True``,", "\"\"\" if fully_included: intervals = self.label_tree.envelop(start, end) else: intervals =", "same value. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a',", "[Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)] \"\"\" for label in", "'b': 2} \"\"\" occurrences = collections.defaultdict(int) for label in self:", "with a single label containing the given value. \"\"\" return", "if labels is not None: self.update(labels) def __eq__(self, other): data_this", "apply to every label Example: >>> ll = LabelList(labels=[ ...", ">>> ll.join(' - ') 'a - b - c -", "that are within the given range. Also labels that only", "0, inf), ] \"\"\" ll = LabelList(idx=idx) for label_value in", "Return a ordered list of tokens based on all labels.", "Label('b_label', 1.0, 2.0), ] \"\"\" updated_labels = [] all_intervals =", "start/end values of 0 and ``inf``. Args: values(list): List of", "Two labels are considered overlapping, if ``l2.start - l1.end <", "split labels into tokens. See :meth:`audiomate.annotations.Label.tokenized` Returns: :class:`set`: A set", "< overlap_threshold and last_label_end > 0): concat_values.append(label.value) last_label_end = label.end", "[] last_label_end = None for label in sorted_by_start: if last_label_end", "if len(cutting_points) == 0: raise ValueError('At least one cutting-point is", "audiomate.annotations.Label at 0x1090527c8 > ]) >>> next(ranges) (4.5, 5.1, [])", "= intervaltree.IntervalTree() if labels is not None: self.update(labels) def __eq__(self,", "Label('b', 0.4, 5.4) ] \"\"\" if len(cutting_points) == 0: raise", "q', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c a',", "= interval.begin - threshold range_end = interval.end + threshold direct_overlaps", "in the list will be considered. Returns: generator: A generator", "list. Args: labels (list): Labels to add. \"\"\" ivs =", "== interval.data.value: all_overlaps.extend(recursive_overlaps(overlap)) return all_overlaps # For every remaining interval", "containing for every label-value (key) the number of occurrences (value).", "ll.split([4.6]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.6)] >>>", "in sorted_by_start: if last_label_end is None or (last_label_end - label.start", "+= 1.0 ... label.end += 1.0 ... >>> ll.apply(shift_labels) >>>", "an utterance. An utterance can have multiple label-lists. Args: idx", "1): if i < len(cutting_points): iv_end = cutting_points[i] else: iv_end", "(Label): The label to add. \"\"\" label.label_list = self self.label_tree.addi(label.start,", "fn (func): Function to apply to every label Example: >>>", "0 and ``inf``. Args: values(list): List of values(str) that should", "values. Returns: list: Lexicographically sorted list (str) of label values.", "If True, start and end-time are shifted in splitted label-lists.", ">>> Label('b', 5, 10), >>> Label('c', 11, 15), >>>]) >>>", "label in self: durations[label.value] += label.duration return durations def label_values(self):", "in this label list. `fn` is a function of one", "labels are considered overlapping, if ``l2.start - l1.end < threshold``.", ">>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c',", "0x1090484c8 > ]) \"\"\" tree_copy = self.label_tree.copy() # Remove labels", "else: iv_end = float('inf') # get all intervals intersecting range", ">>> res = ll.split([4.6]) >>> len(res) 4 >>> res[0].labels [Label('a',", "old labels with the updated ones self.label_tree.clear() self.update(updated_labels) # #", "]) >>> ll.merge_overlapping_labels() >>> ll.labels [ Label('a_label', 1.0, 2.7), Label('b_label',", "ov_end = max(ov_end, overlap.end) all_intervals.discard(overlap) updated_labels.append(Label( ov_value, ov_start, ov_end ))", "def labels(self): \"\"\" Return list of labels. \"\"\" return list(self)", "7.2, 10.5), >>> Label('d', 10.5, 14) >>>]) >>> ranges =", "(str): An unique identifier for the label-list within a corpus", "end-time are shifted in splitted label-lists. So the start is", "a time. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2,", "Label('b', 5, 10), >>> Label('c', 11, 15), >>>]) >>> >>>", "= ll.separate() >>> s['a'].labels [Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)]", "are adjusted to be relative to the cutting-points for every", "`audiomate.annotations.LabelList`. Example: >>> ll = LabelList(labels=[ >>> Label('a', 0, 5),", "``0`` and ``cutting_points[0]``. Label-list 1 contains labels between ``cutting_points[0]`` and", "'f', 'g'] \"\"\" sorted_by_start = sorted(self.labels) tokens = [] last_label_end", "Generate all ranges of the label-list. A range is defined", "an Exception is thrown. Args: delimiter (str): A string to", "will be considered. Returns: generator: A generator which yields one", "for which the same labels are defined. Args: yield_ranges_without_labels(bool): If", "where the label-list is splitted. shift_times(bool): If True, start and", "threshold``. Args: threshold (float): Maximal distance between two labels to", "considered as overlapping. (default: 0.0) Example: >>> ll = LabelList(labels=[", ">>> Label('f g', start=10.3, end=14.0) >>> ]) >>> ll.tokenized(delimiter=' ',", "14), >>> Label('d', 15, 18) >>> ]) >>> ll.label_values() ['a',", "next_interval.data.value for overlap in overlapping: ov_start = min(ov_start, overlap.begin) ov_end", "> 0): concat_values.append(label.value) last_label_end = label.end else: raise ValueError('Labels overlap,", "or (last_label_end - label.start < overlap_threshold and last_label_end > 0):", "order. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a d", "range if necessary if yield_ranges_without_labels and iv.begin > last_end: yield", "Label('a_label', 1.0, 2.0), ... Label('another_label', 2.0, 3.0) ... ]) >>>", "a string with all labels concatenated together. The order of", "end))``. \"\"\" self.add(Label(value, start=start, end=end)) def update(self, labels): \"\"\" Add", ") def __deepcopy__(self, memo): # utterance is ignored intentionally, #", "else: raise ValueError('Labels overlap, not able to define the correct", "[Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)] \"\"\" separated_lls = collections.defaultdict(LabelList)", "is defined by the number of cutting-points (``x == len(cutting_points)", "5.1, []) >>> next(ranges) (5.1, 7.2, [ < audiomate.annotations.label.Label at", "Exception is thrown. Args: delimiter (str): A string to join", ">>> Label('d', 15, 18) >>> ]) >>> ll.label_values() ['a', 'b',", "ov_start = min(ov_start, overlap.begin) ov_end = max(ov_end, overlap.end) all_intervals.discard(overlap) updated_labels.append(Label(", "12.0]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.1)] >>>", "collections.defaultdict(float) for label in self: durations[label.value] += label.duration return durations", "labels when overlapping and merge equal ranges to a list", "Label('a', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('a', start=7.0,", "Example: >>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>>", "created and appended to the label-list. idx(str): The idx of", "start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c a', start=7.0,", "self.label_tree.clear() self.update(updated_labels) # # Statistics # def label_total_duration(self): \"\"\" Return", ">>> Label('a', 0, 5), >>> Label('b', 5, 10), >>>]) >>>", "\"\"\" return self.label_tree.end() @property def total_length(self): \"\"\" Return the cumulative", "overlap.data.value == interval.data.value: all_overlaps.extend(recursive_overlaps(overlap)) return all_overlaps # For every remaining", "labels to be considered as overlapping. (default: 0.0) Example: >>>", "occurring in the label-list. Args: delimiter (str): The delimiter used", "Label('a', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c', start=7.0,", "A dictionary containing for every label-value (key) the number of", "ll.labels [ Label('a_label', 1.0, 2.7), Label('b_label', 1.0, 2.0), ] \"\"\"", "... >>> ll.apply(shift_labels) >>> ll.labels [Label(a_label, 2.0, 3.0), Label(another_label, 3.0,", "\"\"\" separated_lls = collections.defaultdict(LabelList) for label in self.labels: separated_lls[label.value].add(label) for", "points in seconds, where the label-list is splitted. shift_times(bool): If", "Label-list 0 contains labels between ``0`` and ``cutting_points[0]``. Label-list 1", "return LabelList( idx=self.idx, labels=[iv.data for iv in self.label_tree] ) def", "the number of occurrences within the list. Returns: dict: A", "# since it is kind of a weak ref return", "data_this == data_other def __iter__(self): for interval in self.label_tree: yield", "'d', 'q', 'b', 'c', 'a', 'f', 'g'] \"\"\" sorted_by_start =", "for overlap in overlapping: ov_start = min(ov_start, overlap.begin) ov_end =", "range (tuple start/end/list-of-labels) at a time. Example: >>> ll =", "s['b'].labels [Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)] \"\"\" separated_lls =", "= LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('another_label', 2.0, 3.0)", "An unique identifier for the label-list within a corpus for", "not included if include_labels is not None: for iv in", "= float('inf') # get all intervals intersecting range intervals =", "label-lists corresponding to each part. Label-list 0 contains labels between", "ones self.label_tree.clear() self.update(updated_labels) # # Statistics # def label_total_duration(self): \"\"\"", "') 'a - b - c - d' \"\"\" sorted_by_start", "4.5, [ < audiomate.annotations.Label at 0x1090527c8 > ]) >>> next(ranges)", "sorted list (str) of label values. Example: >>> ll =", "dictionary containing for every label-value (key) the total duration in", "= copy.deepcopy(iv.data) label.start = max(0, iv_start - overlap, label.start) label.end", "inf), Label('z', 0, inf), ] \"\"\" ll = LabelList(idx=idx) for", "label-lists. Args: idx (str): An unique identifier for the label-list", "end=10.2), >>> Label('f g', start=10.3, end=14.0) >>> ]) >>> ll.tokenized(delimiter='", "ll.split([4.1, 8.9, 12.0]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0,", "seconds, where the label-list is splitted. shift_times(bool): If True, start", "\"\"\" Return for each label the number of occurrences within", ">>> Label('b', start=10.3, end=14.0) >>> ]) >>> s = ll.separate()", "with the updated ones self.label_tree.clear() self.update(updated_labels) # # Statistics #", "Label('d', 10.5, 14) >>>]) >>> ranges = ll.ranges() >>> next(ranges)", "durations[label.value] += label.duration return durations def label_values(self): \"\"\" Return a", "cutting-point is needed!') # we have to loop in sorted", "labels not included if include_labels is not None: for iv", ">>> Label('a d q', start=0, end=4), >>> Label('b', start=3.95, end=6.0),", "Returns: (LabelList): New label-list. Example: >>> ll = LabelList.with_label_values(['a', 'x',", "(default: 0.0) Example: >>> ll = LabelList(labels=[ ... Label('a_label', 1.0,", "Returns: str: A list containing tokens of all labels ordered", "Split labels when overlapping and merge equal ranges to a", "- threshold range_end = interval.end + threshold direct_overlaps = all_intervals.overlap(range_start,", "Label('b', 10, 14), >>> Label('a', 15, 18.5) >>> ]) >>>", "Label('a', 0, inf), Label('x', 0, inf), Label('z', 0, inf), ]", "labels=[iv.data for iv in self.label_tree] ) def __deepcopy__(self, memo): #", "= max(ov_end, overlap.end) all_intervals.discard(overlap) updated_labels.append(Label( ov_value, ov_start, ov_end )) #", "utterance is ignored intentionally, # since it is kind of", "greater than ``overlap_threshold``, an Exception is thrown. Args: delimiter (str):", "this label list. `fn` is a function of one argument", "5.4) ] \"\"\" if len(cutting_points) == 0: raise ValueError('At least", "the number of occurrences (value). Example: >>> ll = LabelList(labels=[", "token from all labels (``label.tokenized()```). If the overlapping between two", "(list): Labels to add. \"\"\" ivs = [] for label", "into tokens. (default: space) overlap_threshold (float): Maximum overlap between two", "with the same value def recursive_overlaps(interval): range_start = interval.begin -", "start=7.0, end=10.2)] >>> s['b'].labels [Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)]", "end=6.0), >>> Label('c', start=7.0, end=10.2), >>> Label('d', start=10.3, end=14.0) >>>", "(4.5, 5.1, []) >>> next(ranges) (5.1, 7.2, [ < audiomate.annotations.label.Label", "all occuring label values. Returns: list: Lexicographically sorted list (str)", "tokens = set() for label in self: tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))", "interval in self.label_tree: yield interval.data def __len__(self): return self.label_tree.__len__() def", "tree_copy.merge_equals(data_reducer=reduce, data_initializer=[]) intervals = sorted(tree_copy) last_end = intervals[0].begin # yield", "overlap_threshold (float): Maximum overlap between two consecutive labels. Returns: str:", "which the same labels are defined. Args: yield_ranges_without_labels(bool): If True", "next(ranges) (4.5, 5.1, []) >>> next(ranges) (5.1, 7.2, [ <", "result is a list of label-lists corresponding to each part.", "is a list of label-lists corresponding to each part. Label-list", ">>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d',", "11, 15), >>>]) >>> >>> res = ll.split([4.1, 8.9, 12.0])", "cp_splits = LabelList(idx=self.idx) # Extract labels from intervals with updated", "= ll.split([4.6]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.6)]", "value. \"\"\" return LabelList(idx=idx, labels=[ Label(value=value) ]) @classmethod def with_label_values(cls,", "\"\"\" Return list of labels. \"\"\" return list(self) @property def", "label.label_list = self self.label_tree.addi(label.start, label.end, label) def addl(self, value, start=0.0,", "a new label-list containing labels with the given values. All", "self.labels) # # Alteration # def add(self, label): \"\"\" Add", "def labels_in_range(self, start, end, fully_included=False): \"\"\" Return a list of", "within a corpus for one utterance. labels (list): The list", "self.add(Label(value, start=start, end=end)) def update(self, labels): \"\"\" Add a list", "self ivs.append(intervaltree.Interval(label.start, label.end, label)) self.label_tree.update(ivs) def apply(self, fn): \"\"\" Apply", "return separated_lls def labels_in_range(self, start, end, fully_included=False): \"\"\" Return a", "new label-list containing labels with the given values. All labels", "overlap are included. Args: start(float): Start-time in seconds. end(float): End-time", "0.4), Label('b', 0.4, 5.4) ] \"\"\" if len(cutting_points) == 0:", "new label-lists. x is defined by the number of cutting-points", ">>> label_list = LabelList(idx='transcription', labels=[ >>> Label('this', 0, 2), >>>", "# # Query Label Values # def join(self, delimiter=' ',", "so on. Args: cutting_points(list): List of floats defining the points", "7.2, 10.5)] \"\"\" if fully_included: intervals = self.label_tree.envelop(start, end) else:", "Returns: dict: A dictionary with distinct label-values as keys. Every", "with distinct label-values as keys. Every value is a LabelList", "tokens.union(set(label.tokenized(delimiter=delimiter))) return tokens # # Query Label Values # def", "end=14.0) >>> ]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd', 'q',", "def __iter__(self): for interval in self.label_tree: yield interval.data def __len__(self):", "(3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8 > ]) >>>", "at 0x1090527c8 > ]) >>> next(ranges) (4.5, 5.1, []) >>>", "them # - Create a concatenated new label while not", "res[1].labels [ Label('a', 4.1, 5.0), Label('b', 5.0, 8.9) ] >>>", "start is relative to the cutting point and not to", "the cumulative length of all labels (Number of characters). \"\"\"", "start, end, fully_included=False): \"\"\" Return a list of labels, that", "Return a list of all occuring label values. Returns: list:", "for overlap in direct_overlaps: if overlap.data.value == interval.data.value: all_overlaps.extend(recursive_overlaps(overlap)) return", ">>> res = ll.split([4.1, 8.9, 12.0]) >>> len(res) 4 >>>", "- label.start < overlap_threshold and last_label_end > 0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end", "Add a label to the end of the list. Args:", "in splitted label-lists. So the start is relative to the", "\"\"\" tokens = set() for label in self: tokens =", "duration in seconds (value). Example: >>> ll = LabelList(labels=[ >>>", "{'a': 7.5 'b': 7.0} \"\"\" durations = collections.defaultdict(float) for label", "the updated ones self.label_tree.clear() self.update(updated_labels) # # Statistics # def", "and last_label_end > 0): concat_values.append(label.value) last_label_end = label.end else: raise", "Label('f g', start=10.3, end=14.0) >>> ]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1)", "d' \"\"\" sorted_by_start = sorted(self.labels) concat_values = [] last_label_end =", "y): x.append(y) return x # Split labels when overlapping and", "Label('b', 5.0, 8.9) ] >>> res[2].labels [ Label('b', 8.9, 10.0),", "a list of all occuring label values. Returns: list: Lexicographically", "data_initializer=[]) intervals = sorted(tree_copy) last_end = intervals[0].begin # yield range", "that should be created and appended to the label-list. idx(str):", "3 'b': 2} \"\"\" occurrences = collections.defaultdict(int) for label in", "(``label.tokenized()```). If the overlapping between two labels is greater than", "if include_labels is not None: for iv in list(tree_copy): if", "yield range by range for iv in intervals: # yield", "[ Label('a', 4.1, 5.0), Label('b', 5.0, 8.9) ] >>> res[2].labels", "concat_values = [] last_label_end = None for label in sorted_by_start:", "of labels in the range. Example: >>> ll = LabelList(labels=[", "overlap ) cp_splits = LabelList(idx=self.idx) # Extract labels from intervals", ">>> Label('d', 10.5, 14) >>>]) >>> ranges = ll.ranges() >>>", "dictionary containing for every label-value (key) the number of occurrences", "3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('a', 7.2, 10.5),", "labels. overlap_threshold (float): Maximum overlap between two consecutive labels. Returns:", "LabelList(labels=[ >>> Label('a', 3, 5), >>> Label('b', 5, 8), >>>", "idx (str): An unique identifier for the label-list within a", "into tokens. See :meth:`audiomate.annotations.Label.tokenized` Returns: :class:`set`: A set of distinct", "label-list. overlap(float): Amount of overlap in seconds. This amount is", "Label-List for every distinct label-value. Returns: dict: A dictionary with", "value. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a', start=0,", "range by range for iv in intervals: # yield an", "= iv_end return splits # # Convenience Constructors # @classmethod", "\"\"\" return list(self) @property def start(self): \"\"\" Return start of", "Shortcut for ``add(Label(value, start, end))``. \"\"\" self.add(Label(value, start=start, end=end)) def", "end=14.0) >>> ]) >>> ll.join(' - ') 'a - b", "15, 18.5) >>> ]) >>> ll.label_total_duration() {'a': 7.5 'b': 7.0}", ">>> ll = LabelList(idx='some', labels=[ >>> Label('a d q', start=0,", "x # Split labels when overlapping and merge equal ranges", "self: occurrences[label.value] += 1 return occurrences def all_tokens(self, delimiter=' '):", "given range. Also labels that only overlap are included. Args:", "to apply to every label Example: >>> ll = LabelList(labels=[", "return all_overlaps # For every remaining interval # - Find", "= self ivs.append(intervaltree.Interval(label.start, label.end, label)) self.label_tree.update(ivs) def apply(self, fn): \"\"\"", "recursively # - Remove them # - Create a concatenated", "Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2,", "ov_value = next_interval.data.value for overlap in overlapping: ov_start = min(ov_start,", "ov_start = float('inf') ov_end = 0.0 ov_value = next_interval.data.value for", "= LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10),", ">>> Label('c', 11, 15), >>>]) >>> >>> res = ll.split([4.1,", "def tokenized(self, delimiter=' ', overlap_threshold=0.1): \"\"\" Return a ordered list", "of occurrences within the list. Returns: dict: A dictionary containing", "label value the total duration of all occurrences. Returns: dict:", "overlap) label.start -= orig_start label.end -= orig_start cp_splits.add(label) splits.append(cp_splits) iv_start", "intervals recursively # - Remove them # - Create a", "self: tokens = tokens.union(set(label.tokenized(delimiter=delimiter))) return tokens # # Query Label", "all_intervals.discard(overlap) updated_labels.append(Label( ov_value, ov_start, ov_end )) # Replace the old", "thrown. Args: delimiter (str): The delimiter used to split labels", "def __deepcopy__(self, memo): # utterance is ignored intentionally, # since", "5.1, 8.9), >>> Label('a', 7.2, 10.5), >>> Label('b', 10.5, 14),", "# # Statistics # def label_total_duration(self): \"\"\" Return for each", "]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd', 'q', 'b', 'c',", "s['a'].labels [Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)] >>> s['b'].labels [Label('b',", "5), >>> Label('b', 5, 10), >>> Label('c', 11, 15), >>>])", "... Label('another_label', 2.0, 3.0) ... ]) >>> def shift_labels(label): ...", "Create a concatenated new label while not all_intervals.is_empty(): next_interval =", "if shift_times: orig_start = max(0, iv_start - overlap) label.start -=", "def label_values(self): \"\"\" Return a list of all occuring label", "the :py:class:`audiomate.annotations.Label`. Attributes: utterance (Utterance): The utterance this label-list is", "8) >>> ]) \"\"\" __slots__ = ['idx', 'label_tree', 'utterance'] def", "shift_labels(label): ... label.start += 1.0 ... label.end += 1.0 ...", "intervals intersecting range intervals = self.label_tree.overlap( iv_start - overlap, iv_end", "end of the list. Args: labels (list): Labels to add.", "of characters). \"\"\" return sum(label.length for label in self.labels) #", "iv_start - overlap, label.start) label.end = min(iv_end + overlap, label.end)", "'c', 'd'] \"\"\" all_labels = {l.value for l in self}", "yield (last_end, iv.begin, []) yield (iv.begin, iv.end, iv.data) last_end =", "a ordered list of tokens based on all labels. Joins", "\"\"\" Return a list of all tokens occurring in the", "tokens # # Query Label Values # def join(self, delimiter='", "]) \"\"\" tree_copy = self.label_tree.copy() # Remove labels not included", "= None self.label_tree = intervaltree.IntervalTree() if labels is not None:", "(lower bound). \"\"\" return self.label_tree.begin() @property def end(self): \"\"\" Return", "end=6.0), Label('b', start=10.3, end=14.0)] \"\"\" separated_lls = collections.defaultdict(LabelList) for label", "def with_label_values(cls, values, idx='default'): \"\"\" Create a new label-list containing", "if necessary if yield_ranges_without_labels and iv.begin > last_end: yield (last_end,", "included if include_labels is not None: for iv in list(tree_copy):", "def shift_labels(label): ... label.start += 1.0 ... label.end += 1.0", ">>>]) >>> >>> res = ll.split([4.6]) >>> len(res) 4 >>>", "end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c', start=7.0, end=10.2), >>>", "of label-lists corresponding to each part. Label-list 0 contains labels", "string with all labels concatenated together. Example: >>> ll =", ">>> ll.apply(shift_labels) >>> ll.labels [Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)]", "number of cutting-points (``x == len(cutting_points) + 1``). The result", "of the list. Args: labels (list): Labels to add. \"\"\"", "in self.labels) # # Alteration # def add(self, label): \"\"\"", "cutting_points[i] else: iv_end = float('inf') # get all intervals intersecting", "ll.join(' - ') 'a - b - c - d'", ":py:class:`audiomate.annotations.Label`. Attributes: utterance (Utterance): The utterance this label-list is belonging", "5, 10), >>> Label('c', 11, 15), >>>]) >>> >>> res", "\"\"\" Return end of the lastly ending label (upper bound).", "interval # - Find overlapping intervals recursively # - Remove", ">>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14), >>> Label('d',", "of labels. \"\"\" return list(self) @property def start(self): \"\"\" Return", "max(ov_end, overlap.end) all_intervals.discard(overlap) updated_labels.append(Label( ov_value, ov_start, ov_end )) # Replace", "and end-time are shifted in splitted label-lists. So the start", "label in self: tokens = tokens.union(set(label.tokenized(delimiter=delimiter))) return tokens # #", "iv in list(tree_copy): if iv.data.value not in include_labels: tree_copy.remove(iv) def", "to be relative to the cutting-points for every label-list but", "+= 1 return occurrences def all_tokens(self, delimiter=' '): \"\"\" Return", "with the same value. Example: >>> ll = LabelList(idx='some', labels=[", "iv.data.value not in include_labels: tree_copy.remove(iv) def reduce(x, y): x.append(y) return", "group of overlapping labels with the same value def recursive_overlaps(interval):", "Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14), >>> Label('d', 15,", "> 0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end = label.end else: raise ValueError('Labels overlap,", "same value. Two labels are considered overlapping, if ``l2.start -", "start=0, end=4), Label('a', start=7.0, end=10.2)] >>> s['b'].labels [Label('b', start=3.95, end=6.0),", "iv_end = float('inf') # get all intervals intersecting range intervals", "# # Alteration # def add(self, label): \"\"\" Add a", "containing the :py:class:`audiomate.annotations.Label`. Attributes: utterance (Utterance): The utterance this label-list", "labels with the same value. Example: >>> ll = LabelList(idx='some',", "labels will have default start/end values of 0 and ``inf``.", "based on all labels. Joins all token from all labels", "label-list containing labels with the given values. All labels will", "threshold=0.0): \"\"\" Merge overlapping labels with the same value. Two", "for each distinct label value the total duration of all", "1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``. And so on.", "Args: values(list): List of values(str) that should be created and", ">>> ll.labels [Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)] \"\"\" for", "self.label_tree = intervaltree.IntervalTree() if labels is not None: self.update(labels) def", "\"\"\" tree_copy = self.label_tree.copy() # Remove labels not included if", "== len(cutting_points) + 1``). The result is a list of", "\"\"\" Create a new label-list containing labels with the given", "defined. Args: yield_ranges_without_labels(bool): If True also yields ranges for which", "overlap, label.start) label.end = min(iv_end + overlap, label.end) if shift_times:", ">>> Label('b', 10, 14), >>> Label('a', 15, 18.5) >>> ])", "overlap, not able to define the correct order') return delimiter.join(concat_values)", "original label-list. overlap(float): Amount of overlap in seconds. This amount", "to loop in sorted order cutting_points = sorted(cutting_points) splits =", "range for iv in intervals: # yield an empty range", "of all labels (Number of characters). \"\"\" return sum(label.length for", "threshold direct_overlaps = all_intervals.overlap(range_start, range_end) all_overlaps = [interval] all_intervals.discard(interval) for", "Label('a', start=7.0, end=10.2), >>> Label('b', start=10.3, end=14.0) >>> ]) >>>", "separated_lls = collections.defaultdict(LabelList) for label in self.labels: separated_lls[label.value].add(label) for ll", "# @classmethod def create_single(cls, value, idx='default'): \"\"\" Create a label-list", "given values. All labels will have default start/end values of", "7.2, 10.5), >>> Label('d', 10.5, 14), >>> Label('d', 15, 18)", "(str): A string to join two consecutive labels. overlap_threshold (float):", "``cutting_points[0]`` and ``cutting_points[1]``. And so on. Args: cutting_points(list): List of", "Label('c', 7.2, 10.5)] \"\"\" if fully_included: intervals = self.label_tree.envelop(start, end)", "Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14) >>>]) >>> ll.labels_in_range(6.2,", "overlap_threshold=0.1): \"\"\" Return a ordered list of tokens based on", "utterance (Utterance): The utterance this label-list is belonging to. label_tree", "self.update(updated_labels) # # Statistics # def label_total_duration(self): \"\"\" Return for", "If ``True``, only labels fully included in the range are", "# def join(self, delimiter=' ', overlap_threshold=0.1): \"\"\" Return a string", "s = ll.separate() >>> s['a'].labels [Label('a', start=0, end=4), Label('a', start=7.0,", "values. All labels will have default start/end values of 0", "tokens # # Restructuring # def separated(self): \"\"\" Create a", "labels to the end of the list. Args: labels (list):", "iv in intervals: # yield an empty range if necessary", "Returns: list: A list of of: class: `audiomate.annotations.LabelList`. Example: >>>", "delimiter.join(concat_values) def tokenized(self, delimiter=' ', overlap_threshold=0.1): \"\"\" Return a ordered", "defined by the number of cutting-points (``x == len(cutting_points) +", "str: A list containing tokens of all labels ordered according", "separated_lls[label.value].add(label) for ll in separated_lls.values(): ll.idx = self.idx return separated_lls", "8.9, 10.0), Label('c', 11.0, 12.0) ] >>> res[3].labels [Label('c', 12.0,", "- l1.end < threshold``. Args: threshold (float): Maximal distance between", "occurrences[label.value] += 1 return occurrences def all_tokens(self, delimiter=' '): \"\"\"", "label (upper bound). \"\"\" return self.label_tree.end() @property def total_length(self): \"\"\"", "interval-tree storing the labels. Example: >>> label_list = LabelList(idx='transcription', labels=[", "intervals = self.label_tree.overlap(start, end) return [iv.data for iv in intervals]", "return LabelList( idx=self.idx, labels=copy.deepcopy([iv.data for iv in self.label_tree], memo) )", "Amount of overlap in seconds. This amount is subtracted from", "def __copy__(self): # utterance is ignored intentionally, # since it", "- overlap) label.start -= orig_start label.end -= orig_start cp_splits.add(label) splits.append(cp_splits)", "of of: class: `audiomate.annotations.LabelList`. Example: >>> ll = LabelList(labels=[ >>>", "only the label values in the list will be considered.", "splitted. shift_times(bool): If True, start and end-time are shifted in", "(LabelList): New label-list. Example: >>> ll = LabelList.with_label_values(['a', 'x', 'z'],", "import Label class LabelList: \"\"\" Represents a list of labels", "start-cutting-point, and added to a end-cutting-point. Returns: list: A list", ">>> next(ranges) (4.5, 5.1, []) >>> next(ranges) (5.1, 7.2, [", "range(len(cutting_points) + 1): if i < len(cutting_points): iv_end = cutting_points[i]", "then be edited in place. Args: fn (func): Function to", "the cutting-points for every label-list but the first. >>> ll", ">>> ]) >>> ll.label_count() {'a': 3 'b': 2} \"\"\" occurrences", "inf), ] \"\"\" ll = LabelList(idx=idx) for label_value in values:", "recursivly find a group of overlapping labels with the same", "Label('b', 5, 8), >>> Label('a', 8, 10), >>> Label('b', 10,", "list of of: class: `audiomate.annotations.LabelList`. Example: >>> ll = LabelList(labels=[", "label-list is belonging to. label_tree (IntervalTree): The interval-tree storing the", "delimiter (str): The delimiter used to split labels into tokens.", "if overlap.data.value == interval.data.value: all_overlaps.extend(recursive_overlaps(overlap)) return all_overlaps # For every", "return LabelList(idx=idx, labels=[ Label(value=value) ]) @classmethod def with_label_values(cls, values, idx='default'):", "... ]) >>> ll.merge_overlapping_labels() >>> ll.labels [ Label('a_label', 1.0, 2.7),", "= True``, the times are adjusted to be relative to", "l1.end < threshold``. Args: threshold (float): Maximal distance between two", "join two consecutive labels. overlap_threshold (float): Maximum overlap between two", ">>> ranges = ll.ranges() >>> next(ranges) (3.2, 4.5, [ <", "occurrences def all_tokens(self, delimiter=' '): \"\"\" Return a list of", "label-list. idx(str): The idx of the label-list. Returns: (LabelList): New", ">>> Label('a', 8, 10), >>> Label('b', 10, 14), >>> Label('a',", "Label('d', 15, 18) >>> ]) >>> ll.label_values() ['a', 'b', 'c',", "'a - b - c - d' \"\"\" sorted_by_start =", "and iv.begin > last_end: yield (last_end, iv.begin, []) yield (iv.begin,", "find a group of overlapping labels with the same value", "for ll in separated_lls.values(): ll.idx = self.idx return separated_lls def", "are shifted in splitted label-lists. So the start is relative", "Label('x', 0, inf), Label('z', 0, inf), ] \"\"\" ll =", "for iv in self.label_tree], memo) ) @property def labels(self): \"\"\"", "iv in self.label_tree], memo) ) @property def labels(self): \"\"\" Return", ">>> res[1].labels [ Label('a', 0.0, 0.4), Label('b', 0.4, 5.4) ]", "2.0, 3.0) ... ]) >>> def shift_labels(label): ... label.start +=", "10), >>> Label('b', 10, 14), >>> Label('a', 15, 18.5) >>>", "end) else: intervals = self.label_tree.overlap(start, end) return [iv.data for iv", "considered overlapping, if ``l2.start - l1.end < threshold``. Args: threshold", ">>> Label('b', 10.5, 14), >>> Label('a', 15, 18) >>> ])", "Otherwise also overlapping ones are returned. (default ``False``) Returns: list:", "labels concatenated together. Example: >>> ll = LabelList(idx='some', labels=[ >>>", "if i < len(cutting_points): iv_end = cutting_points[i] else: iv_end =", "function `fn` to every label in this label list. `fn`", "label.end) if shift_times: orig_start = max(0, iv_start - overlap) label.start", "label.end += 1.0 ... >>> ll.apply(shift_labels) >>> ll.labels [Label(a_label, 2.0,", "'letters' >>> ll.labels [ Label('a', 0, inf), Label('x', 0, inf),", "in intervals: label = copy.deepcopy(iv.data) label.start = max(0, iv_start -", "in seconds. end(float): End-time in seconds. fully_included(bool): If ``True``, only", "concatenated together. The order of the labels is defined by", ":class:`set`: A set of distinct tokens. \"\"\" tokens = set()", "label (Label): The label to add. \"\"\" label.label_list = self", "to define the correct order') return delimiter.join(concat_values) def tokenized(self, delimiter='", "ll.idx 'letters' >>> ll.labels [ Label('a', 0, inf), Label('x', 0,", "1.0, 2.0), ... Label('another_label', 2.0, 3.0) ... ]) >>> def", "distinct label-values as keys. Every value is a LabelList containing", "i in range(len(cutting_points) + 1): if i < len(cutting_points): iv_end", "labels (``label.tokenized()```). If the overlapping between two labels is greater", "# def label_total_duration(self): \"\"\" Return for each distinct label value", "> last_end: yield (last_end, iv.begin, []) yield (iv.begin, iv.end, iv.data)", "be considered. Returns: generator: A generator which yields one range", "iv_start - overlap, iv_end + overlap ) cp_splits = LabelList(idx=self.idx)", "label-list. A range is defined as a part of the", "ref return LabelList( idx=self.idx, labels=[iv.data for iv in self.label_tree] )", "is thrown. Args: delimiter (str): The delimiter used to split", "Return start of the earliest starting label (lower bound). \"\"\"", "\"\"\" Merge overlapping labels with the same value. Two labels", "two labels is greater than ``overlap_threshold``, an Exception is thrown.", "A string to join two consecutive labels. overlap_threshold (float): Maximum", "to every label in this label list. `fn` is a", "for every distinct label-value. Returns: dict: A dictionary with distinct", "4, 8) >>> ]) \"\"\" __slots__ = ['idx', 'label_tree', 'utterance']", "def addl(self, value, start=0.0, end=float('inf')): \"\"\" Shortcut for ``add(Label(value, start,", "start(float): Start-time in seconds. end(float): End-time in seconds. fully_included(bool): If", ">>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters') >>> ll.idx 'letters'", "intervals with updated times for iv in intervals: label =", "updated_labels = [] all_intervals = self.label_tree.copy() # recursivly find a", "of labels tree_copy.split_overlaps() tree_copy.merge_equals(data_reducer=reduce, data_initializer=[]) intervals = sorted(tree_copy) last_end =", "Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``. And so", "to a list of labels tree_copy.split_overlaps() tree_copy.merge_equals(data_reducer=reduce, data_initializer=[]) intervals =", "14), >>> Label('a', 15, 18) >>> ]) >>> ll.label_count() {'a':", "all_overlaps.extend(recursive_overlaps(overlap)) return all_overlaps # For every remaining interval # -", "= sorted(cutting_points) splits = [] iv_start = 0.0 for i", "- Create a concatenated new label while not all_intervals.is_empty(): next_interval", "= None for label in sorted_by_start: if last_label_end is None", "5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14),", "def end(self): \"\"\" Return end of the lastly ending label", "2.0), ] \"\"\" updated_labels = [] all_intervals = self.label_tree.copy() #", "= self.label_tree.envelop(start, end) else: intervals = self.label_tree.overlap(start, end) return [iv.data", "describe an utterance. An utterance can have multiple label-lists. Args:", "order') return tokens # # Restructuring # def separated(self): \"\"\"", "# Statistics # def label_total_duration(self): \"\"\" Return for each distinct", "intersecting range intervals = self.label_tree.overlap( iv_start - overlap, iv_end +", "a end-cutting-point. Returns: list: A list of of: class: `audiomate.annotations.LabelList`.", "value def recursive_overlaps(interval): range_start = interval.begin - threshold range_end =", "for label in self: occurrences[label.value] += 1 return occurrences def", "for every label-value (key) the number of occurrences (value). Example:", "Label(another_label, 3.0, 4.0)] \"\"\" for label in self.labels: fn(label) def", "2.0), ... ]) >>> ll.merge_overlapping_labels() >>> ll.labels [ Label('a_label', 1.0,", "'b', 'c', 'a', 'f', 'g'] \"\"\" sorted_by_start = sorted(self.labels) tokens", "Label('b', start=3.95, end=6.0), >>> Label('c', start=7.0, end=10.2), >>> Label('d', start=10.3,", "# Alteration # def add(self, label): \"\"\" Add a label", "5), >>> Label('b', 5, 10), >>>]) >>> >>> res =", "= set() for label in self: tokens = tokens.union(set(label.tokenized(delimiter=delimiter))) return", "overlapping, if ``l2.start - l1.end < threshold``. Args: threshold (float):", "Label('d', 10.5, 14), >>> Label('d', 15, 18) >>> ]) >>>", "able to define the correct order') return delimiter.join(concat_values) def tokenized(self,", "Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a d q',", "\"\"\" durations = collections.defaultdict(float) for label in self: durations[label.value] +=", ">>> Label('c', start=7.0, end=10.2), >>> Label('d', start=10.3, end=14.0) >>> ])", "Represents a list of labels which describe an utterance. An", ">>> def shift_labels(label): ... label.start += 1.0 ... label.end +=", "a separate Label-List for every distinct label-value. Returns: dict: A", "can then be edited in place. Args: fn (func): Function", "is kind of a weak ref return LabelList( idx=self.idx, labels=[iv.data", "of the label. If the overlapping between two labels is", "= LabelList(idx='transcription', labels=[ >>> Label('this', 0, 2), >>> Label('is', 2,", "0, inf), Label('z', 0, inf), ] \"\"\" ll = LabelList(idx=idx)", "overlap, not able to define the correct order') return tokens", "a list of labels tree_copy.split_overlaps() tree_copy.merge_equals(data_reducer=reduce, data_initializer=[]) intervals = sorted(tree_copy)", ">>> Label('d', 10.5, 14) >>>]) >>> ll.labels_in_range(6.2, 10.1) [Label('b', 5.1,", "8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14) >>>])", "def separated(self): \"\"\" Create a separate Label-List for every distinct", "the end of the list. Args: label (Label): The label", "(last_label_end - label.start < overlap_threshold and last_label_end > 0): concat_values.append(label.value)", "The order of the labels is defined by the start", "separated_lls def labels_in_range(self, start, end, fully_included=False): \"\"\" Return a list", "- b - c - d' \"\"\" sorted_by_start = sorted(self.labels)", "return them as new label-lists. x is defined by the", "two consecutive labels. Returns: str: A list containing tokens of", ">>> ]) \"\"\" __slots__ = ['idx', 'label_tree', 'utterance'] def __init__(self,", "splits = [] iv_start = 0.0 for i in range(len(cutting_points)", "every label Example: >>> ll = LabelList(labels=[ ... Label('a_label', 1.0,", "range. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5),", "is splitted. shift_times(bool): If True, start and end-time are shifted", "Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5,", "\"\"\" Return a ordered list of tokens based on all", "idx='default', labels=None): self.idx = idx self.utterance = None self.label_tree =", "labels is defined by the start of the label. If", "labels=[ >>> Label('a d q', start=0, end=4), >>> Label('b', start=3.95,", "values in the list will be considered. Returns: generator: A", "End-time in seconds. fully_included(bool): If ``True``, only labels fully included", "\"\"\" Return for each distinct label value the total duration", "+ overlap ) cp_splits = LabelList(idx=self.idx) # Extract labels from", "consecutive labels. Returns: str: A list containing tokens of all", "4.6)] >>> res[1].labels [ Label('a', 0.0, 0.4), Label('b', 0.4, 5.4)", "= ll.ranges() >>> next(ranges) (3.2, 4.5, [ < audiomate.annotations.Label at", "label-list within a corpus for one utterance. labels (list): The", "list (str) of label values. Example: >>> ll = LabelList(labels=[", "the label-list is splitted. shift_times(bool): If True, start and end-time", "in separated_lls.values(): ll.idx = self.idx return separated_lls def labels_in_range(self, start,", "with the given values. All labels will have default start/end", "on. Args: cutting_points(list): List of floats defining the points in", "empty, only the label values in the list will be", "idx='letters') >>> ll.idx 'letters' >>> ll.labels [ Label('a', 0, inf),", "as a part of the label-list for which the same", "labels. Returns: str: A list containing tokens of all labels", "(Number of characters). \"\"\" return sum(label.length for label in self.labels)", "ll.label_total_duration() {'a': 7.5 'b': 7.0} \"\"\" durations = collections.defaultdict(float) for", "Example: >>> ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ...", "lastly ending label (upper bound). \"\"\" return self.label_tree.end() @property def", "this label-list is belonging to. label_tree (IntervalTree): The interval-tree storing", "belonging to. label_tree (IntervalTree): The interval-tree storing the labels. Example:", "5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14)", "15, 18) >>> ]) >>> ll.label_values() ['a', 'b', 'c', 'd']", "sorted_by_start = sorted(self.labels) concat_values = [] last_label_end = None for", "dict: A dictionary containing for every label-value (key) the number", "labels from intervals with updated times for iv in intervals:", "Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>>", "= LabelList.with_label_values(['a', 'x', 'z'], idx='letters') >>> ll.idx 'letters' >>> ll.labels", "the original label-list. overlap(float): Amount of overlap in seconds. This", "containing only labels with the same value. Example: >>> ll", "all_labels = {l.value for l in self} return sorted(all_labels) def", "[Label('a', 0.0, 4.6)] >>> res[1].labels [ Label('a', 0.0, 0.4), Label('b',", "3.0, 4.0)] \"\"\" for label in self.labels: fn(label) def merge_overlaps(self,", "14) >>>]) >>> ll.labels_in_range(6.2, 10.1) [Label('b', 5.1, 8.9), Label('c', 7.2,", "[ Label('b', 8.9, 10.0), Label('c', 11.0, 12.0) ] >>> res[3].labels", "list. Args: label (Label): The label to add. \"\"\" label.label_list", "... Label('a_label', 1.0, 2.0), ... Label('another_label', 2.0, 3.0) ... ])", "join(self, delimiter=' ', overlap_threshold=0.1): \"\"\" Return a string with all", ">>> res[2].labels [ Label('b', 8.9, 10.0), Label('c', 11.0, 12.0) ]", "label-list. Returns: (LabelList): New label-list. Example: >>> ll = LabelList.with_label_values(['a',", "[Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)] >>> s['b'].labels [Label('b', start=3.95,", "'z'], idx='letters') >>> ll.idx 'letters' >>> ll.labels [ Label('a', 0,", "Function to apply to every label Example: >>> ll =", "self.label_tree: yield interval.data def __len__(self): return self.label_tree.__len__() def __copy__(self): #", "d q', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c", "start=10.3, end=14.0) >>> ]) >>> ll.join(' - ') 'a -", "to. label_tree (IntervalTree): The interval-tree storing the labels. Example: >>>", "__len__(self): return self.label_tree.__len__() def __copy__(self): # utterance is ignored intentionally,", "... label.end += 1.0 ... >>> ll.apply(shift_labels) >>> ll.labels [Label(a_label,", ">>> ]) >>> ll.join(' - ') 'a - b -", "by the number of cutting-points (``x == len(cutting_points) + 1``).", "fully included in the range are returned. Otherwise also overlapping", "Returns: :class:`set`: A set of distinct tokens. \"\"\" tokens =", "[ Label('a', 0, inf), Label('x', 0, inf), Label('z', 0, inf),", ">>> ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('another_label',", "ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd', 'q', 'b', 'c', 'a', 'f',", "None for label in sorted_by_start: if last_label_end is None or", "0 contains labels between ``0`` and ``cutting_points[0]``. Label-list 1 contains", "at a time. Example: >>> ll = LabelList(labels=[ >>> Label('a',", "two consecutive labels. Returns: str: A string with all labels", ">>> ll = LabelList(labels=[ >>> Label('a', 3, 5), >>> Label('b',", "LabelList.with_label_values(['a', 'x', 'z'], idx='letters') >>> ll.idx 'letters' >>> ll.labels [", "labels (Number of characters). \"\"\" return sum(label.length for label in", "', overlap_threshold=0.1): \"\"\" Return a ordered list of tokens based", "by range for iv in intervals: # yield an empty", "total duration in seconds (value). Example: >>> ll = LabelList(labels=[", "] >>> res[2].labels [ Label('b', 8.9, 10.0), Label('c', 11.0, 12.0)", "ivs.append(intervaltree.Interval(label.start, label.end, label)) self.label_tree.update(ivs) def apply(self, fn): \"\"\" Apply the", "= (self.idx, self.label_tree) data_other = (other.idx, other.label_tree) return data_this ==", ">>> ]) >>> s = ll.separate() >>> s['a'].labels [Label('a', start=0,", "unique identifier for the label-list within a corpus for one", "Label('a', start=7.0, end=10.2)] >>> s['b'].labels [Label('b', start=3.95, end=6.0), Label('b', start=10.3,", "-= orig_start label.end -= orig_start cp_splits.add(label) splits.append(cp_splits) iv_start = iv_end", "utterance this label-list is belonging to. label_tree (IntervalTree): The interval-tree", "ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('another_label', 2.0,", "# Query Label Values # def join(self, delimiter=' ', overlap_threshold=0.1):", "float('inf') ov_end = 0.0 ov_value = next_interval.data.value for overlap in", "should be created and appended to the label-list. idx(str): The", "update(self, labels): \"\"\" Add a list of labels to the", "durations = collections.defaultdict(float) for label in self: durations[label.value] += label.duration", "Label('another_label', 2.0, 3.0) ... ]) >>> def shift_labels(label): ... label.start", "10.1) [Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)] \"\"\" if fully_included:", "labels, that are within the given range. Also labels that", "A dictionary with distinct label-values as keys. Every value is", "overlap_threshold and last_label_end > 0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end = label.end else:", ">>> res[0].labels [Label('a', 0.0, 4.1)] >>> res[1].labels [ Label('a', 4.1,", "def label_total_duration(self): \"\"\" Return for each distinct label value the", "the same labels are defined. Args: yield_ranges_without_labels(bool): If True also", "(last_end, iv.begin, []) yield (iv.begin, iv.end, iv.data) last_end = iv.end", "list of labels which describe an utterance. An utterance can", "__eq__(self, other): data_this = (self.idx, self.label_tree) data_other = (other.idx, other.label_tree)", "every label-value (key) the number of occurrences (value). Example: >>>", "yield_ranges_without_labels(bool): If True also yields ranges for which no labels", "of one argument that receives the current label which can", "direct_overlaps = all_intervals.overlap(range_start, range_end) all_overlaps = [interval] all_intervals.discard(interval) for overlap", "(last_label_end - label.start < overlap_threshold and last_label_end > 0): tokens.extend(label.tokenized(delimiter=delimiter))", "10.5, 14), >>> Label('d', 15, 18) >>> ]) >>> ll.label_values()", "in self: occurrences[label.value] += 1 return occurrences def all_tokens(self, delimiter='", "correct order') return delimiter.join(concat_values) def tokenized(self, delimiter=' ', overlap_threshold=0.1): \"\"\"", "ll.separate() >>> s['a'].labels [Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)] >>>", "label while not all_intervals.is_empty(): next_interval = list(all_intervals)[0] overlapping = recursive_overlaps(next_interval)", "(default: space) overlap_threshold (float): Maximum overlap between two consecutive labels.", "= [] last_label_end = None for label in sorted_by_start: if", "0, 2), >>> Label('is', 2, 4), >>> Label('timmy', 4, 8)", "in the label-list. Args: delimiter (str): The delimiter used to", "edited in place. Args: fn (func): Function to apply to", "merge equal ranges to a list of labels tree_copy.split_overlaps() tree_copy.merge_equals(data_reducer=reduce,", "# For every remaining interval # - Find overlapping intervals", "return splits # # Convenience Constructors # @classmethod def create_single(cls,", "overlap_threshold and last_label_end > 0): concat_values.append(label.value) last_label_end = label.end else:", "create_single(cls, value, idx='default'): \"\"\" Create a label-list with a single", "correct order') return tokens # # Restructuring # def separated(self):", "labels. Joins all token from all labels (``label.tokenized()```). If the", "``inf``. Args: values(list): List of values(str) that should be created", "2.0), ... Label('a_label', 1.5, 2.7), ... Label('b_label', 1.0, 2.0), ...", "return occurrences def all_tokens(self, delimiter=' '): \"\"\" Return a list", "15, 18) >>> ]) >>> ll.label_count() {'a': 3 'b': 2}", "not in include_labels: tree_copy.remove(iv) def reduce(x, y): x.append(y) return x", "__iter__(self): for interval in self.label_tree: yield interval.data def __len__(self): return", "is not None: for iv in list(tree_copy): if iv.data.value not", "end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('a', start=7.0, end=10.2), >>>", "\"\"\" sorted_by_start = sorted(self.labels) concat_values = [] last_label_end = None", "ranges for which no labels are defined. include_labels(list): If not", "# recursivly find a group of overlapping labels with the", "in seconds (value). Example: >>> ll = LabelList(labels=[ >>> Label('a',", "len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.6)] >>> res[1].labels [", "# we have to loop in sorted order cutting_points =", "tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end = label.end else: raise ValueError('Labels overlap, not able", ">>> s = ll.separate() >>> s['a'].labels [Label('a', start=0, end=4), Label('a',", "list of label-lists corresponding to each part. Label-list 0 contains", "\"\"\" updated_labels = [] all_intervals = self.label_tree.copy() # recursivly find", "10.5, 14), >>> Label('a', 15, 18) >>> ]) >>> ll.label_count()", "(``x == len(cutting_points) + 1``). The result is a list", "included in the range are returned. Otherwise also overlapping ones", "10.5)] \"\"\" if fully_included: intervals = self.label_tree.envelop(start, end) else: intervals", "all_intervals = self.label_tree.copy() # recursivly find a group of overlapping", "function of one argument that receives the current label which", "and ``cutting_points[0]``. Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``.", "label containing the given value. \"\"\" return LabelList(idx=idx, labels=[ Label(value=value)", "threshold (float): Maximal distance between two labels to be considered", "overlap, iv_end + overlap ) cp_splits = LabelList(idx=self.idx) # Extract", "values of 0 and ``inf``. Args: values(list): List of values(str)", "for iv in intervals: # yield an empty range if", "= ll.split([4.1, 8.9, 12.0]) >>> len(res) 4 >>> res[0].labels [Label('a',", "8.9), Label('c', 7.2, 10.5)] \"\"\" if fully_included: intervals = self.label_tree.envelop(start,", "addl(self, value, start=0.0, end=float('inf')): \"\"\" Shortcut for ``add(Label(value, start, end))``.", "ranges(self, yield_ranges_without_labels=False, include_labels=None): \"\"\" Generate all ranges of the label-list.", "include_labels is not None: for iv in list(tree_copy): if iv.data.value", "fully_included=False): \"\"\" Return a list of labels, that are within", "Remove them # - Create a concatenated new label while", "of the labels is defined by the start of the", "]) >>> s = ll.separate() >>> s['a'].labels [Label('a', start=0, end=4),", "= list(all_intervals)[0] overlapping = recursive_overlaps(next_interval) ov_start = float('inf') ov_end =", "tree_copy.split_overlaps() tree_copy.merge_equals(data_reducer=reduce, data_initializer=[]) intervals = sorted(tree_copy) last_end = intervals[0].begin #", "iv_end return splits # # Convenience Constructors # @classmethod def", "If not empty, only the label values in the list", "the label order. Example: >>> ll = LabelList(idx='some', labels=[ >>>", "res[3].labels [Label('c', 12.0, 15.0)] If ``shift_times = True``, the times", "- Find overlapping intervals recursively # - Remove them #", "copy import intervaltree from .label import Label class LabelList: \"\"\"", "memo): # utterance is ignored intentionally, # since it is", "Returns: list: Lexicographically sorted list (str) of label values. Example:", "raise ValueError('At least one cutting-point is needed!') # we have", "A string with all labels concatenated together. Example: >>> ll", "Label('c', start=7.0, end=10.2), >>> Label('d', start=10.3, end=14.0) >>> ]) >>>", "all_intervals.is_empty(): next_interval = list(all_intervals)[0] overlapping = recursive_overlaps(next_interval) ov_start = float('inf')", "Args: delimiter (str): The delimiter used to split labels into", "overlap in direct_overlaps: if overlap.data.value == interval.data.value: all_overlaps.extend(recursive_overlaps(overlap)) return all_overlaps", "``overlap_threshold``, an Exception is thrown. Args: delimiter (str): A string", "label-values as keys. Every value is a LabelList containing only", "labels that only overlap are included. Args: start(float): Start-time in", "yield_ranges_without_labels=False, include_labels=None): \"\"\" Generate all ranges of the label-list. A", "labels=None): self.idx = idx self.utterance = None self.label_tree = intervaltree.IntervalTree()", "a weak ref return LabelList( idx=self.idx, labels=[iv.data for iv in", "Merge overlapping labels with the same value. Two labels are", "end=10.2), >>> Label('d', start=10.3, end=14.0) >>> ]) >>> ll.join(' -", "(key) the total duration in seconds (value). Example: >>> ll", "For every remaining interval # - Find overlapping intervals recursively", "merge_overlaps(self, threshold=0.0): \"\"\" Merge overlapping labels with the same value.", "add. \"\"\" ivs = [] for label in labels: label.label_list", "of all tokens occurring in the label-list. Args: delimiter (str):", "returned. Otherwise also overlapping ones are returned. (default ``False``) Returns:", "in direct_overlaps: if overlap.data.value == interval.data.value: all_overlaps.extend(recursive_overlaps(overlap)) return all_overlaps #", "label in self.labels: fn(label) def merge_overlaps(self, threshold=0.0): \"\"\" Merge overlapping", "= [] iv_start = 0.0 for i in range(len(cutting_points) +", "between ``0`` and ``cutting_points[0]``. Label-list 1 contains labels between ``cutting_points[0]``", "@property def total_length(self): \"\"\" Return the cumulative length of all", "self.utterance = None self.label_tree = intervaltree.IntervalTree() if labels is not", "intervals = self.label_tree.overlap( iv_start - overlap, iv_end + overlap )", "label-lists. x is defined by the number of cutting-points (``x", "containing for every label-value (key) the total duration in seconds", "beginning of the original label-list. overlap(float): Amount of overlap in", "(float): Maximal distance between two labels to be considered as", "... label.start += 1.0 ... label.end += 1.0 ... >>>", "labels_in_range(self, start, end, fully_included=False): \"\"\" Return a list of labels,", "subtracted from a start-cutting-point, and added to a end-cutting-point. Returns:", "which yields one range (tuple start/end/list-of-labels) at a time. Example:", "the list will be considered. Returns: generator: A generator which", "self.labels: separated_lls[label.value].add(label) for ll in separated_lls.values(): ll.idx = self.idx return", "Label('timmy', 4, 8) >>> ]) \"\"\" __slots__ = ['idx', 'label_tree',", "is not None: self.update(labels) def __eq__(self, other): data_this = (self.idx,", "Label('is', 2, 4), >>> Label('timmy', 4, 8) >>> ]) \"\"\"", "len(cutting_points) == 0: raise ValueError('At least one cutting-point is needed!')", "end(self): \"\"\" Return end of the lastly ending label (upper", "data_other = (other.idx, other.label_tree) return data_this == data_other def __iter__(self):", "distinct tokens. \"\"\" tokens = set() for label in self:", "is None or (last_label_end - label.start < overlap_threshold and last_label_end", "but the first. >>> ll = LabelList(labels=[ >>> Label('a', 0,", "Every value is a LabelList containing only labels with the", "# Remove labels not included if include_labels is not None:", "are defined. Args: yield_ranges_without_labels(bool): If True also yields ranges for", "all_overlaps # For every remaining interval # - Find overlapping", "end=10.2), >>> Label('b', start=10.3, end=14.0) >>> ]) >>> s =", "8.9) ] >>> res[2].labels [ Label('b', 8.9, 10.0), Label('c', 11.0,", "for label in self: durations[label.value] += label.duration return durations def", "= LabelList(idx='some', labels=[ >>> Label('a d q', start=0, end=4), >>>", "label in self: occurrences[label.value] += 1 return occurrences def all_tokens(self,", "generator: A generator which yields one range (tuple start/end/list-of-labels) at", "If the overlapping between two labels is greater than ``overlap_threshold``,", "for iv in list(tree_copy): if iv.data.value not in include_labels: tree_copy.remove(iv)", "idx of the label-list. Returns: (LabelList): New label-list. Example: >>>", "\"\"\" Return a list of labels, that are within the", "not able to define the correct order') return delimiter.join(concat_values) def", "] \"\"\" ll = LabelList(idx=idx) for label_value in values: ll.add(Label(label_value))", "list. `fn` is a function of one argument that receives", "list. Returns: dict: A dictionary containing for every label-value (key)", "for label in labels: label.label_list = self ivs.append(intervaltree.Interval(label.start, label.end, label))", "start=10.3, end=14.0) >>> ]) >>> s = ll.separate() >>> s['a'].labels", "if yield_ranges_without_labels and iv.begin > last_end: yield (last_end, iv.begin, [])", "Label('a', 7.2, 10.5), >>> Label('b', 10.5, 14), >>> Label('a', 15,", "1.0, 2.0), ... ]) >>> ll.merge_overlapping_labels() >>> ll.labels [ Label('a_label',", "for iv in self.label_tree] ) def __deepcopy__(self, memo): # utterance", "in intervals: # yield an empty range if necessary if", "Return for each distinct label value the total duration of", "one range (tuple start/end/list-of-labels) at a time. Example: >>> ll", "include_labels=None): \"\"\" Generate all ranges of the label-list. A range", "if fully_included: intervals = self.label_tree.envelop(start, end) else: intervals = self.label_tree.overlap(start,", "ll in separated_lls.values(): ll.idx = self.idx return separated_lls def labels_in_range(self,", "# # Convenience Constructors # @classmethod def create_single(cls, value, idx='default'):", "bound). \"\"\" return self.label_tree.begin() @property def end(self): \"\"\" Return end", "the end of the list. Args: labels (list): Labels to", "and added to a end-cutting-point. Returns: list: A list of", "from a start-cutting-point, and added to a end-cutting-point. Returns: list:", "labels): \"\"\" Add a list of labels to the end", "= LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('a_label', 1.5, 2.7),", "class: `audiomate.annotations.LabelList`. Example: >>> ll = LabelList(labels=[ >>> Label('a', 0,", "ending label (upper bound). \"\"\" return self.label_tree.end() @property def total_length(self):", "a label to the end of the list. Args: label", "Return a list of labels, that are within the given", "of the label-list for which the same labels are defined.", ">>> Label('is', 2, 4), >>> Label('timmy', 4, 8) >>> ])", "label_count(self): \"\"\" Return for each label the number of occurrences", "of all occurrences. Returns: dict: A dictionary containing for every", "to split labels into tokens. See :meth:`audiomate.annotations.Label.tokenized` Returns: :class:`set`: A", "Args: label (Label): The label to add. \"\"\" label.label_list =", "'x', 'z'], idx='letters') >>> ll.idx 'letters' >>> ll.labels [ Label('a',", "the overlapping between two labels is greater than ``overlap_threshold``, an", "utterance. labels (list): The list containing the :py:class:`audiomate.annotations.Label`. Attributes: utterance", "a label-list with a single label containing the given value.", "in seconds, where the label-list is splitted. shift_times(bool): If True,", "\"\"\" all_labels = {l.value for l in self} return sorted(all_labels)", "Joins all token from all labels (``label.tokenized()```). If the overlapping", "labels with the same value. Two labels are considered overlapping,", "\"\"\" Return a string with all labels concatenated together. The", "earliest starting label (lower bound). \"\"\" return self.label_tree.begin() @property def", "and ``inf``. Args: values(list): List of values(str) that should be", "[Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)] \"\"\" if fully_included: intervals", "len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.1)] >>> res[1].labels [", "are within the given range. Also labels that only overlap", "for label in self.labels: separated_lls[label.value].add(label) for ll in separated_lls.values(): ll.idx", "which describe an utterance. An utterance can have multiple label-lists.", "7.2, 10.5), >>> Label('d', 10.5, 14) >>>]) >>> ll.labels_in_range(6.2, 10.1)", "label values in the list will be considered. Returns: generator:", "for l in self} return sorted(all_labels) def label_count(self): \"\"\" Return", "def __eq__(self, other): data_this = (self.idx, self.label_tree) data_other = (other.idx,", "self.labels: fn(label) def merge_overlaps(self, threshold=0.0): \"\"\" Merge overlapping labels with", "a list of label-lists corresponding to each part. Label-list 0", "8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14), >>>", "Attributes: utterance (Utterance): The utterance this label-list is belonging to.", "iv.begin > last_end: yield (last_end, iv.begin, []) yield (iv.begin, iv.end,", "is a function of one argument that receives the current", "+ overlap, label.end) if shift_times: orig_start = max(0, iv_start -", "of the label-list. A range is defined as a part", "return tokens # # Restructuring # def separated(self): \"\"\" Create", "a corpus for one utterance. labels (list): The list containing", "overlap_threshold=0.1) ['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g'] \"\"\"", "two labels to be considered as overlapping. (default: 0.0) Example:", "floats defining the points in seconds, where the label-list is", "shift_times=False, overlap=0.0): \"\"\" Split the label-list into x parts and", "overlapping = recursive_overlaps(next_interval) ov_start = float('inf') ov_end = 0.0 ov_value", ">>> ll.label_total_duration() {'a': 7.5 'b': 7.0} \"\"\" durations = collections.defaultdict(float)", "\"\"\" return sum(label.length for label in self.labels) # # Alteration", "(list): The list containing the :py:class:`audiomate.annotations.Label`. Attributes: utterance (Utterance): The", "the given value. \"\"\" return LabelList(idx=idx, labels=[ Label(value=value) ]) @classmethod", "to the cutting-points for every label-list but the first. >>>", "label) def addl(self, value, start=0.0, end=float('inf')): \"\"\" Shortcut for ``add(Label(value,", "last_label_end > 0): concat_values.append(label.value) last_label_end = label.end else: raise ValueError('Labels", "def recursive_overlaps(interval): range_start = interval.begin - threshold range_end = interval.end", "cp_splits.add(label) splits.append(cp_splits) iv_start = iv_end return splits # # Convenience", "the lastly ending label (upper bound). \"\"\" return self.label_tree.end() @property", "for iv in intervals: label = copy.deepcopy(iv.data) label.start = max(0,", "list(tree_copy): if iv.data.value not in include_labels: tree_copy.remove(iv) def reduce(x, y):", "is belonging to. label_tree (IntervalTree): The interval-tree storing the labels.", "10.5, 14) >>>]) >>> ranges = ll.ranges() >>> next(ranges) (3.2,", "as new label-lists. x is defined by the number of", "# - Create a concatenated new label while not all_intervals.is_empty():", "for label in self.labels: fn(label) def merge_overlaps(self, threshold=0.0): \"\"\" Merge", "list of labels, that are within the given range. Also", "last_label_end > 0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end = label.end else: raise ValueError('Labels", "ll.ranges() >>> next(ranges) (3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8", "according to the label order. Example: >>> ll = LabelList(idx='some',", "a LabelList containing only labels with the same value. Example:", "overlapping labels with the same value def recursive_overlaps(interval): range_start =", "'g'] \"\"\" sorted_by_start = sorted(self.labels) tokens = [] last_label_end =", "12.0, 15.0)] If ``shift_times = True``, the times are adjusted", "include_labels(list): If not empty, only the label values in the", "label-list is splitted. shift_times(bool): If True, start and end-time are", "self.label_tree], memo) ) @property def labels(self): \"\"\" Return list of", "Label('b', start=3.95, end=6.0), >>> Label('c a', start=7.0, end=10.2), >>> Label('f", "and return them as new label-lists. x is defined by", "of the label-list. Returns: (LabelList): New label-list. Example: >>> ll", "iv in intervals: label = copy.deepcopy(iv.data) label.start = max(0, iv_start", ">>> res[3].labels [Label('c', 12.0, 15.0)] If ``shift_times = True``, the", "Label Values # def join(self, delimiter=' ', overlap_threshold=0.1): \"\"\" Return", "Alteration # def add(self, label): \"\"\" Add a label to", "[ < audiomate.annotations.label.Label at 0x1090484c8 > ]) \"\"\" tree_copy =", "Args: delimiter (str): A string to join two consecutive labels.", "Args: labels (list): Labels to add. \"\"\" ivs = []", "\"\"\" if len(cutting_points) == 0: raise ValueError('At least one cutting-point", "def __len__(self): return self.label_tree.__len__() def __copy__(self): # utterance is ignored", "Labels to add. \"\"\" ivs = [] for label in", "yield_ranges_without_labels and iv.begin > last_end: yield (last_end, iv.begin, []) yield", "0.0, 4.1)] >>> res[1].labels [ Label('a', 4.1, 5.0), Label('b', 5.0,", "self.label_tree.copy() # Remove labels not included if include_labels is not", "Convenience Constructors # @classmethod def create_single(cls, value, idx='default'): \"\"\" Create", "between two consecutive labels. Returns: str: A list containing tokens", "max(0, iv_start - overlap) label.start -= orig_start label.end -= orig_start", "overlap, label.end) if shift_times: orig_start = max(0, iv_start - overlap)", "[] all_intervals = self.label_tree.copy() # recursivly find a group of", "Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('a', 7.2,", "intervals: # yield an empty range if necessary if yield_ranges_without_labels", "- c - d' \"\"\" sorted_by_start = sorted(self.labels) concat_values =", "range_end) all_overlaps = [interval] all_intervals.discard(interval) for overlap in direct_overlaps: if", "self.label_tree.__len__() def __copy__(self): # utterance is ignored intentionally, # since", "Return the cumulative length of all labels (Number of characters).", "@classmethod def create_single(cls, value, idx='default'): \"\"\" Create a label-list with", "= self.label_tree.overlap( iv_start - overlap, iv_end + overlap ) cp_splits", "labels are defined. include_labels(list): If not empty, only the label", "included. Args: start(float): Start-time in seconds. end(float): End-time in seconds.", "Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a', start=0, end=4),", "1.0 ... >>> ll.apply(shift_labels) >>> ll.labels [Label(a_label, 2.0, 3.0), Label(another_label,", "# def add(self, label): \"\"\" Add a label to the", "= collections.defaultdict(LabelList) for label in self.labels: separated_lls[label.value].add(label) for ll in", "than ``overlap_threshold``, an Exception is thrown. Args: delimiter (str): The", "0, inf), Label('x', 0, inf), Label('z', 0, inf), ] \"\"\"", "the old labels with the updated ones self.label_tree.clear() self.update(updated_labels) #", "Returns: dict: A dictionary containing for every label-value (key) the", "ll.labels [ Label('a', 0, inf), Label('x', 0, inf), Label('z', 0,", "which no labels are defined. include_labels(list): If not empty, only", "def apply(self, fn): \"\"\" Apply the given function `fn` to", "to the label-list. idx(str): The idx of the label-list. Returns:", "``shift_times = True``, the times are adjusted to be relative", "8, 10), >>> Label('b', 10, 14), >>> Label('a', 15, 18.5)", "self.label_tree.envelop(start, end) else: intervals = self.label_tree.overlap(start, end) return [iv.data for", "intervals: label = copy.deepcopy(iv.data) label.start = max(0, iv_start - overlap,", ">>> ]) >>> ll.label_values() ['a', 'b', 'c', 'd'] \"\"\" all_labels", "7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 > ]) \"\"\" tree_copy", "iv_start - overlap) label.start -= orig_start label.end -= orig_start cp_splits.add(label)", "< overlap_threshold and last_label_end > 0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end = label.end", "self.label_tree.begin() @property def end(self): \"\"\" Return end of the lastly", "end(float): End-time in seconds. fully_included(bool): If ``True``, only labels fully", "10.5), >>> Label('d', 10.5, 14) >>>]) >>> ll.labels_in_range(6.2, 10.1) [Label('b',", "cutting-points for every label-list but the first. >>> ll =", "list: A list of of: class: `audiomate.annotations.LabelList`. Example: >>> ll", "range_start = interval.begin - threshold range_end = interval.end + threshold", "sorted(cutting_points) splits = [] iv_start = 0.0 for i in", "Label(value=value) ]) @classmethod def with_label_values(cls, values, idx='default'): \"\"\" Create a", "\"\"\" ll = LabelList(idx=idx) for label_value in values: ll.add(Label(label_value)) return", "end, fully_included=False): \"\"\" Return a list of labels, that are", "to join two consecutive labels. overlap_threshold (float): Maximum overlap between", "def all_tokens(self, delimiter=' '): \"\"\" Return a list of all", "def label_count(self): \"\"\" Return for each label the number of", "tokens = tokens.union(set(label.tokenized(delimiter=delimiter))) return tokens # # Query Label Values", "res[0].labels [Label('a', 0.0, 4.6)] >>> res[1].labels [ Label('a', 0.0, 0.4),", "LabelList( idx=self.idx, labels=[iv.data for iv in self.label_tree] ) def __deepcopy__(self,", "given function `fn` to every label in this label list.", "dict: A dictionary containing for every label-value (key) the total", ">>>]) >>> ll.labels_in_range(6.2, 10.1) [Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)]", "are included. Args: start(float): Start-time in seconds. end(float): End-time in", "label-list for which the same labels are defined. Args: yield_ranges_without_labels(bool):", "range. Also labels that only overlap are included. Args: start(float):", "18) >>> ]) >>> ll.label_count() {'a': 3 'b': 2} \"\"\"", "Label('c a', start=7.0, end=10.2), >>> Label('f g', start=10.3, end=14.0) >>>", "together. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a', start=0,", "1 return occurrences def all_tokens(self, delimiter=' '): \"\"\" Return a", "part of the label-list for which the same labels are", "parts and return them as new label-lists. x is defined", "the same value. Two labels are considered overlapping, if ``l2.start", "[Label('a', 0.0, 4.1)] >>> res[1].labels [ Label('a', 4.1, 5.0), Label('b',", "= label.end else: raise ValueError('Labels overlap, not able to define", "list containing the :py:class:`audiomate.annotations.Label`. Attributes: utterance (Utterance): The utterance this", "Label('a_label', 1.0, 2.0), ... Label('a_label', 1.5, 2.7), ... Label('b_label', 1.0,", "+ threshold direct_overlaps = all_intervals.overlap(range_start, range_end) all_overlaps = [interval] all_intervals.discard(interval)", "the list. Args: label (Label): The label to add. \"\"\"", "on all labels. Joins all token from all labels (``label.tokenized()```).", "contains labels between ``0`` and ``cutting_points[0]``. Label-list 1 contains labels", "in self: tokens = tokens.union(set(label.tokenized(delimiter=delimiter))) return tokens # # Query", "for the label-list within a corpus for one utterance. labels", "12.0) ] >>> res[3].labels [Label('c', 12.0, 15.0)] If ``shift_times =", "4.5), >>> Label('b', 5.1, 8.9), >>> Label('a', 7.2, 10.5), >>>", "LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('a_label', 1.5, 2.7), ...", "order of the labels is defined by the start of", "Label('b', 5, 10), >>>]) >>> >>> res = ll.split([4.6]) >>>", "LabelList(idx='some', labels=[ >>> Label('a', start=0, end=4), >>> Label('b', start=3.95, end=6.0),", "start/end/list-of-labels) at a time. Example: >>> ll = LabelList(labels=[ >>>", "all_overlaps = [interval] all_intervals.discard(interval) for overlap in direct_overlaps: if overlap.data.value", "the given function `fn` to every label in this label", "recursive_overlaps(interval): range_start = interval.begin - threshold range_end = interval.end +", "A list of of: class: `audiomate.annotations.LabelList`. Example: >>> ll =", "ov_value, ov_start, ov_end )) # Replace the old labels with", "one cutting-point is needed!') # we have to loop in", "= self.label_tree.overlap(start, end) return [iv.data for iv in intervals] def", "added to a end-cutting-point. Returns: list: A list of of:", "Statistics # def label_total_duration(self): \"\"\" Return for each distinct label", "the number of cutting-points (``x == len(cutting_points) + 1``). The", "Args: start(float): Start-time in seconds. end(float): End-time in seconds. fully_included(bool):", "the total duration of all occurrences. Returns: dict: A dictionary", "len(cutting_points): iv_end = cutting_points[i] else: iv_end = float('inf') # get", "for interval in self.label_tree: yield interval.data def __len__(self): return self.label_tree.__len__()", "``l2.start - l1.end < threshold``. Args: threshold (float): Maximal distance", "set() for label in self: tokens = tokens.union(set(label.tokenized(delimiter=delimiter))) return tokens", "self.label_tree.addi(label.start, label.end, label) def addl(self, value, start=0.0, end=float('inf')): \"\"\" Shortcut", "]) >>> ll.label_count() {'a': 3 'b': 2} \"\"\" occurrences =", "Label('b_label', 1.0, 2.0), ... ]) >>> ll.merge_overlapping_labels() >>> ll.labels [", "occurrences within the list. Returns: dict: A dictionary containing for", ">>> ll.labels [ Label('a_label', 1.0, 2.7), Label('b_label', 1.0, 2.0), ]", "other.label_tree) return data_this == data_other def __iter__(self): for interval in", "else: intervals = self.label_tree.overlap(start, end) return [iv.data for iv in", "import intervaltree from .label import Label class LabelList: \"\"\" Represents", "to split labels into tokens. (default: space) overlap_threshold (float): Maximum", "the correct order') return tokens # # Restructuring # def", "labels into tokens. (default: space) overlap_threshold (float): Maximum overlap between", "start=3.95, end=6.0), >>> Label('a', start=7.0, end=10.2), >>> Label('b', start=10.3, end=14.0)", "Extract labels from intervals with updated times for iv in", ">>> Label('a', 3, 5), >>> Label('b', 5, 8), >>> Label('a',", "3, 5), >>> Label('b', 5, 8), >>> Label('a', 8, 10),", "intervals[0].begin # yield range by range for iv in intervals:", "[] iv_start = 0.0 for i in range(len(cutting_points) + 1):", "ordered list of tokens based on all labels. Joins all", "1.5, 2.7), ... Label('b_label', 1.0, 2.0), ... ]) >>> ll.merge_overlapping_labels()", "new label while not all_intervals.is_empty(): next_interval = list(all_intervals)[0] overlapping =", "seconds. This amount is subtracted from a start-cutting-point, and added", "__init__(self, idx='default', labels=None): self.idx = idx self.utterance = None self.label_tree", "idx='default'): \"\"\" Create a new label-list containing labels with the", ">>> Label('a', 7.2, 10.5), >>> Label('b', 10.5, 14), >>> Label('a',", "from .label import Label class LabelList: \"\"\" Represents a list", "start of the earliest starting label (lower bound). \"\"\" return", "idx=self.idx, labels=[iv.data for iv in self.label_tree] ) def __deepcopy__(self, memo):", "label the number of occurrences within the list. Returns: dict:", "end=14.0)] \"\"\" separated_lls = collections.defaultdict(LabelList) for label in self.labels: separated_lls[label.value].add(label)", "of the lastly ending label (upper bound). \"\"\" return self.label_tree.end()", "(5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 > ]) \"\"\"", "the total duration in seconds (value). Example: >>> ll =", "collections.defaultdict(int) for label in self: occurrences[label.value] += 1 return occurrences", "+= 1.0 ... >>> ll.apply(shift_labels) >>> ll.labels [Label(a_label, 2.0, 3.0),", "Create a separate Label-List for every distinct label-value. Returns: dict:", "ll.merge_overlapping_labels() >>> ll.labels [ Label('a_label', 1.0, 2.7), Label('b_label', 1.0, 2.0),", "[iv.data for iv in intervals] def ranges(self, yield_ranges_without_labels=False, include_labels=None): \"\"\"", ">>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b',", "ll = LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5,", "# Split labels when overlapping and merge equal ranges to", "last_end = iv.end def split(self, cutting_points, shift_times=False, overlap=0.0): \"\"\" Split", "of label values. Example: >>> ll = LabelList(labels=[ >>> Label('a',", "1.0, 2.0), ... Label('a_label', 1.5, 2.7), ... Label('b_label', 1.0, 2.0),", ">>> Label('b', 5.1, 8.9), >>> Label('a', 7.2, 10.5), >>> Label('b',", "have default start/end values of 0 and ``inf``. Args: values(list):", "\"\"\" for label in self.labels: fn(label) def merge_overlaps(self, threshold=0.0): \"\"\"", "number of occurrences within the list. Returns: dict: A dictionary", "end-cutting-point. Returns: list: A list of of: class: `audiomate.annotations.LabelList`. Example:", "yield an empty range if necessary if yield_ranges_without_labels and iv.begin", "@property def end(self): \"\"\" Return end of the lastly ending", "not None: self.update(labels) def __eq__(self, other): data_this = (self.idx, self.label_tree)", "range are returned. Otherwise also overlapping ones are returned. (default", "labels=[ Label(value=value) ]) @classmethod def with_label_values(cls, values, idx='default'): \"\"\" Create", "define the correct order') return delimiter.join(concat_values) def tokenized(self, delimiter=' ',", "2), >>> Label('is', 2, 4), >>> Label('timmy', 4, 8) >>>", "least one cutting-point is needed!') # we have to loop", "loop in sorted order cutting_points = sorted(cutting_points) splits = []", "def start(self): \"\"\" Return start of the earliest starting label", "of labels which describe an utterance. An utterance can have", "1.0 ... label.end += 1.0 ... >>> ll.apply(shift_labels) >>> ll.labels", "', overlap_threshold=0.1) ['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g']", "A dictionary containing for every label-value (key) the total duration", "\"\"\" Create a separate Label-List for every distinct label-value. Returns:", "res[0].labels [Label('a', 0.0, 4.1)] >>> res[1].labels [ Label('a', 4.1, 5.0),", "== data_other def __iter__(self): for interval in self.label_tree: yield interval.data", "the cutting point and not to the beginning of the", "labels (list): Labels to add. \"\"\" ivs = [] for", "end=float('inf')): \"\"\" Shortcut for ``add(Label(value, start, end))``. \"\"\" self.add(Label(value, start=start,", "return self.label_tree.__len__() def __copy__(self): # utterance is ignored intentionally, #", "be edited in place. Args: fn (func): Function to apply", "value, idx='default'): \"\"\" Create a label-list with a single label", "(str) of label values. Example: >>> ll = LabelList(labels=[ >>>", "list(all_intervals)[0] overlapping = recursive_overlaps(next_interval) ov_start = float('inf') ov_end = 0.0", "[interval] all_intervals.discard(interval) for overlap in direct_overlaps: if overlap.data.value == interval.data.value:", "So the start is relative to the cutting point and", "one argument that receives the current label which can then", "list of labels tree_copy.split_overlaps() tree_copy.merge_equals(data_reducer=reduce, data_initializer=[]) intervals = sorted(tree_copy) last_end", "tokens of all labels ordered according to the label order.", "All labels will have default start/end values of 0 and", "@classmethod def with_label_values(cls, values, idx='default'): \"\"\" Create a new label-list", "a part of the label-list for which the same labels", "labels=[ >>> Label('this', 0, 2), >>> Label('is', 2, 4), >>>", "= iv.end def split(self, cutting_points, shift_times=False, overlap=0.0): \"\"\" Split the", "= [interval] all_intervals.discard(interval) for overlap in direct_overlaps: if overlap.data.value ==", "ov_start, ov_end )) # Replace the old labels with the", "self.label_tree.update(ivs) def apply(self, fn): \"\"\" Apply the given function `fn`", "sum(label.length for label in self.labels) # # Alteration # def", "apply(self, fn): \"\"\" Apply the given function `fn` to every", "\"\"\" Generate all ranges of the label-list. A range is", "\"\"\" Shortcut for ``add(Label(value, start, end))``. \"\"\" self.add(Label(value, start=start, end=end))", "True also yields ranges for which no labels are defined.", "orig_start label.end -= orig_start cp_splits.add(label) splits.append(cp_splits) iv_start = iv_end return", "all labels ordered according to the label order. Example: >>>", "Lexicographically sorted list (str) of label values. Example: >>> ll", "A generator which yields one range (tuple start/end/list-of-labels) at a", "float('inf') # get all intervals intersecting range intervals = self.label_tree.overlap(", "`fn` is a function of one argument that receives the", "- overlap, label.start) label.end = min(iv_end + overlap, label.end) if", "the range are returned. Otherwise also overlapping ones are returned.", "(value). Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5),", "# get all intervals intersecting range intervals = self.label_tree.overlap( iv_start", "< audiomate.annotations.Label at 0x1090527c8 > ]) >>> next(ranges) (4.5, 5.1,", "argument that receives the current label which can then be", "True``, the times are adjusted to be relative to the", "the label values in the list will be considered. Returns:", "labels (list): The list containing the :py:class:`audiomate.annotations.Label`. Attributes: utterance (Utterance):", ">>> ll.labels_in_range(6.2, 10.1) [Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)] \"\"\"", "between two labels is greater than ``overlap_threshold``, an Exception is", "seconds. end(float): End-time in seconds. fully_included(bool): If ``True``, only labels", "ll = LabelList(labels=[ >>> Label('a', 3, 5), >>> Label('b', 5,", "can have multiple label-lists. Args: idx (str): An unique identifier", "14) >>>]) >>> ranges = ll.ranges() >>> next(ranges) (3.2, 4.5,", "ll.apply(shift_labels) >>> ll.labels [Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)] \"\"\"", "is subtracted from a start-cutting-point, and added to a end-cutting-point.", "for i in range(len(cutting_points) + 1): if i < len(cutting_points):", "overlap between two consecutive labels. Returns: str: A string with", "of the earliest starting label (lower bound). \"\"\" return self.label_tree.begin()", "'a', 'f', 'g'] \"\"\" sorted_by_start = sorted(self.labels) tokens = []", "x.append(y) return x # Split labels when overlapping and merge", "= tokens.union(set(label.tokenized(delimiter=delimiter))) return tokens # # Query Label Values #", "cutting_points = sorted(cutting_points) splits = [] iv_start = 0.0 for", "occuring label values. Returns: list: Lexicographically sorted list (str) of", "them as new label-lists. x is defined by the number", "return sorted(all_labels) def label_count(self): \"\"\" Return for each label the", "LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10), >>>])", "audiomate.annotations.label.Label at 0x1090484c8 > ]) \"\"\" tree_copy = self.label_tree.copy() #", "the list. Args: labels (list): Labels to add. \"\"\" ivs", "= max(0, iv_start - overlap) label.start -= orig_start label.end -=", "containing tokens of all labels ordered according to the label", "start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c', start=7.0, end=10.2),", "is relative to the cutting point and not to the", "inf), Label('x', 0, inf), Label('z', 0, inf), ] \"\"\" ll", "utterance. An utterance can have multiple label-lists. Args: idx (str):", "considered. Returns: generator: A generator which yields one range (tuple", "kind of a weak ref return LabelList( idx=self.idx, labels=copy.deepcopy([iv.data for", "last_end = intervals[0].begin # yield range by range for iv", "the times are adjusted to be relative to the cutting-points", "0): concat_values.append(label.value) last_label_end = label.end else: raise ValueError('Labels overlap, not", "18) >>> ]) >>> ll.label_values() ['a', 'b', 'c', 'd'] \"\"\"", "2} \"\"\" occurrences = collections.defaultdict(int) for label in self: occurrences[label.value]", "self.label_tree.end() @property def total_length(self): \"\"\" Return the cumulative length of", "labels: label.label_list = self ivs.append(intervaltree.Interval(label.start, label.end, label)) self.label_tree.update(ivs) def apply(self,", "remaining interval # - Find overlapping intervals recursively # -", "None or (last_label_end - label.start < overlap_threshold and last_label_end >", "get all intervals intersecting range intervals = self.label_tree.overlap( iv_start -", "11.0, 12.0) ] >>> res[3].labels [Label('c', 12.0, 15.0)] If ``shift_times", "res[2].labels [ Label('b', 8.9, 10.0), Label('c', 11.0, 12.0) ] >>>", "Label('a', 0, 5), >>> Label('b', 5, 10), >>> Label('c', 11,", "4.1, 5.0), Label('b', 5.0, 8.9) ] >>> res[2].labels [ Label('b',", "label-list. Args: delimiter (str): The delimiter used to split labels", "ones are returned. (default ``False``) Returns: list: List of labels", ">>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.1)] >>> res[1].labels", "direct_overlaps: if overlap.data.value == interval.data.value: all_overlaps.extend(recursive_overlaps(overlap)) return all_overlaps # For", "ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters') >>> ll.idx 'letters' >>>", "label.start -= orig_start label.end -= orig_start cp_splits.add(label) splits.append(cp_splits) iv_start =", "list containing tokens of all labels ordered according to the", "every label in this label list. `fn` is a function", "a group of overlapping labels with the same value def", "= collections.defaultdict(float) for label in self: durations[label.value] += label.duration return", "delimiter used to split labels into tokens. (default: space) overlap_threshold", "]) \"\"\" __slots__ = ['idx', 'label_tree', 'utterance'] def __init__(self, idx='default',", "= float('inf') ov_end = 0.0 ov_value = next_interval.data.value for overlap", "is a LabelList containing only labels with the same value.", ">>> >>> res = ll.split([4.1, 8.9, 12.0]) >>> len(res) 4", "all_intervals.overlap(range_start, range_end) all_overlaps = [interval] all_intervals.discard(interval) for overlap in direct_overlaps:", "that only overlap are included. Args: start(float): Start-time in seconds.", "0.0) Example: >>> ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0),", "= min(ov_start, overlap.begin) ov_end = max(ov_end, overlap.end) all_intervals.discard(overlap) updated_labels.append(Label( ov_value,", "start of the label. If the overlapping between two labels", "\"\"\" Represents a list of labels which describe an utterance.", "bound). \"\"\" return self.label_tree.end() @property def total_length(self): \"\"\" Return the", "fn(label) def merge_overlaps(self, threshold=0.0): \"\"\" Merge overlapping labels with the", "every label-value (key) the total duration in seconds (value). Example:", "= recursive_overlaps(next_interval) ov_start = float('inf') ov_end = 0.0 ov_value =", "< len(cutting_points): iv_end = cutting_points[i] else: iv_end = float('inf') #", "necessary if yield_ranges_without_labels and iv.begin > last_end: yield (last_end, iv.begin,", "``cutting_points[1]``. And so on. Args: cutting_points(list): List of floats defining", "starting label (lower bound). \"\"\" return self.label_tree.begin() @property def end(self):", "values(str) that should be created and appended to the label-list.", ">>> Label('d', 10.5, 14), >>> Label('d', 15, 18) >>> ])", "1.0, 2.7), Label('b_label', 1.0, 2.0), ] \"\"\" updated_labels = []", "List of labels in the range. Example: >>> ll =", "def add(self, label): \"\"\" Add a label to the end", "def update(self, labels): \"\"\" Add a list of labels to", "updated_labels.append(Label( ov_value, ov_start, ov_end )) # Replace the old labels", "10.5), >>> Label('d', 10.5, 14), >>> Label('d', 15, 18) >>>", "label-list but the first. >>> ll = LabelList(labels=[ >>> Label('a',", "= [] all_intervals = self.label_tree.copy() # recursivly find a group", "= 0.0 ov_value = next_interval.data.value for overlap in overlapping: ov_start", "next(ranges) (5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 > ])", ">>>]) >>> ranges = ll.ranges() >>> next(ranges) (3.2, 4.5, [", "the correct order') return delimiter.join(concat_values) def tokenized(self, delimiter=' ', overlap_threshold=0.1):", "value. Two labels are considered overlapping, if ``l2.start - l1.end", "Args: idx (str): An unique identifier for the label-list within", "a list of labels which describe an utterance. An utterance", "5.0), Label('b', 5.0, 8.9) ] >>> res[2].labels [ Label('b', 8.9,", "An utterance can have multiple label-lists. Args: idx (str): An", "iv_end = cutting_points[i] else: iv_end = float('inf') # get all", "tokens. (default: space) overlap_threshold (float): Maximum overlap between two consecutive", "LabelList(idx='transcription', labels=[ >>> Label('this', 0, 2), >>> Label('is', 2, 4),", "defined. include_labels(list): If not empty, only the label values in", "15), >>>]) >>> >>> res = ll.split([4.1, 8.9, 12.0]) >>>", "return durations def label_values(self): \"\"\" Return a list of all", "LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10), >>>", "ranges to a list of labels tree_copy.split_overlaps() tree_copy.merge_equals(data_reducer=reduce, data_initializer=[]) intervals", "Returns: list: List of labels in the range. Example: >>>", "all intervals intersecting range intervals = self.label_tree.overlap( iv_start - overlap,", "(func): Function to apply to every label Example: >>> ll", "ll.label_values() ['a', 'b', 'c', 'd'] \"\"\" all_labels = {l.value for", "the list. Returns: dict: A dictionary containing for every label-value", "Remove labels not included if include_labels is not None: for", "ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('a_label', 1.5,", "``True``, only labels fully included in the range are returned.", "LabelList(idx=idx, labels=[ Label(value=value) ]) @classmethod def with_label_values(cls, values, idx='default'): \"\"\"", "in range(len(cutting_points) + 1): if i < len(cutting_points): iv_end =", "Label('a', 15, 18) >>> ]) >>> ll.label_count() {'a': 3 'b':", "consecutive labels. overlap_threshold (float): Maximum overlap between two consecutive labels.", "\"\"\" Create a label-list with a single label containing the", "label in self.labels) # # Alteration # def add(self, label):", ">>> Label('b', start=3.95, end=6.0), >>> Label('c a', start=7.0, end=10.2), >>>", "containing the given value. \"\"\" return LabelList(idx=idx, labels=[ Label(value=value) ])", "thrown. Args: delimiter (str): A string to join two consecutive", "place. Args: fn (func): Function to apply to every label", "ValueError('Labels overlap, not able to define the correct order') return", "into x parts and return them as new label-lists. x", "None self.label_tree = intervaltree.IntervalTree() if labels is not None: self.update(labels)", "- d' \"\"\" sorted_by_start = sorted(self.labels) concat_values = [] last_label_end", "Label('b', 8.9, 10.0), Label('c', 11.0, 12.0) ] >>> res[3].labels [Label('c',", "all token from all labels (``label.tokenized()```). If the overlapping between", "overlapping intervals recursively # - Remove them # - Create", "> ]) \"\"\" tree_copy = self.label_tree.copy() # Remove labels not", "def reduce(x, y): x.append(y) return x # Split labels when", "list of labels to the end of the list. Args:", "return [iv.data for iv in intervals] def ranges(self, yield_ranges_without_labels=False, include_labels=None):", "of values(str) that should be created and appended to the", "with the same value. Two labels are considered overlapping, if", "same value def recursive_overlaps(interval): range_start = interval.begin - threshold range_end", "The idx of the label-list. Returns: (LabelList): New label-list. Example:", ">>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.6)] >>> res[1].labels", "corresponding to each part. Label-list 0 contains labels between ``0``", "4 >>> res[0].labels [Label('a', 0.0, 4.6)] >>> res[1].labels [ Label('a',", "= LabelList(idx=self.idx) # Extract labels from intervals with updated times", "iv_end + overlap ) cp_splits = LabelList(idx=self.idx) # Extract labels", "= interval.end + threshold direct_overlaps = all_intervals.overlap(range_start, range_end) all_overlaps =", "(float): Maximum overlap between two consecutive labels. Returns: str: A", "values(list): List of values(str) that should be created and appended", "self.label_tree] ) def __deepcopy__(self, memo): # utterance is ignored intentionally,", "min(ov_start, overlap.begin) ov_end = max(ov_end, overlap.end) all_intervals.discard(overlap) updated_labels.append(Label( ov_value, ov_start,", "4.0)] \"\"\" for label in self.labels: fn(label) def merge_overlaps(self, threshold=0.0):", "for label in sorted_by_start: if last_label_end is None or (last_label_end", "The delimiter used to split labels into tokens. See :meth:`audiomate.annotations.Label.tokenized`", "since it is kind of a weak ref return LabelList(", "ll = LabelList(idx='some', labels=[ >>> Label('a d q', start=0, end=4),", "Label('a_label', 1.0, 2.7), Label('b_label', 1.0, 2.0), ] \"\"\" updated_labels =", "for iv in intervals] def ranges(self, yield_ranges_without_labels=False, include_labels=None): \"\"\" Generate", "collections import copy import intervaltree from .label import Label class", "weak ref return LabelList( idx=self.idx, labels=copy.deepcopy([iv.data for iv in self.label_tree],", "occurrences. Returns: dict: A dictionary containing for every label-value (key)", "Label('d', start=10.3, end=14.0) >>> ]) >>> ll.join(' - ') 'a", "the range. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2,", "label.duration return durations def label_values(self): \"\"\" Return a list of", "LabelList containing only labels with the same value. Example: >>>", "value the total duration of all occurrences. Returns: dict: A", "# def separated(self): \"\"\" Create a separate Label-List for every", "reduce(x, y): x.append(y) return x # Split labels when overlapping", "an empty range if necessary if yield_ranges_without_labels and iv.begin >", "be considered as overlapping. (default: 0.0) Example: >>> ll =", "Replace the old labels with the updated ones self.label_tree.clear() self.update(updated_labels)", "have to loop in sorted order cutting_points = sorted(cutting_points) splits", "[ Label('a_label', 1.0, 2.7), Label('b_label', 1.0, 2.0), ] \"\"\" updated_labels", ">>> >>> res = ll.split([4.6]) >>> len(res) 4 >>> res[0].labels", ">>> ]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd', 'q', 'b',", "(str): The delimiter used to split labels into tokens. See", "in self: durations[label.value] += label.duration return durations def label_values(self): \"\"\"", "order cutting_points = sorted(cutting_points) splits = [] iv_start = 0.0", "0.0 ov_value = next_interval.data.value for overlap in overlapping: ov_start =", "label which can then be edited in place. Args: fn", "overlap in seconds. This amount is subtracted from a start-cutting-point,", "(iv.begin, iv.end, iv.data) last_end = iv.end def split(self, cutting_points, shift_times=False,", "copy.deepcopy(iv.data) label.start = max(0, iv_start - overlap, label.start) label.end =", "defined as a part of the label-list for which the", "of labels to the end of the list. Args: labels", "label = copy.deepcopy(iv.data) label.start = max(0, iv_start - overlap, label.start)", "= all_intervals.overlap(range_start, range_end) all_overlaps = [interval] all_intervals.discard(interval) for overlap in", "fully_included: intervals = self.label_tree.envelop(start, end) else: intervals = self.label_tree.overlap(start, end)", "delimiter=' ', overlap_threshold=0.1): \"\"\" Return a string with all labels", "The delimiter used to split labels into tokens. (default: space)", "``False``) Returns: list: List of labels in the range. Example:", "0x1090527c8 > ]) >>> next(ranges) (4.5, 5.1, []) >>> next(ranges)", "all ranges of the label-list. A range is defined as", "to add. \"\"\" ivs = [] for label in labels:", "= self.idx return separated_lls def labels_in_range(self, start, end, fully_included=False): \"\"\"", "{l.value for l in self} return sorted(all_labels) def label_count(self): \"\"\"", "= sorted(self.labels) tokens = [] last_label_end = None for label", "Example: >>> ll = LabelList(labels=[ >>> Label('a', 3, 5), >>>", "= sorted(tree_copy) last_end = intervals[0].begin # yield range by range", "The utterance this label-list is belonging to. label_tree (IntervalTree): The", "res = ll.split([4.1, 8.9, 12.0]) >>> len(res) 4 >>> res[0].labels", "each part. Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.", "delimiter used to split labels into tokens. See :meth:`audiomate.annotations.Label.tokenized` Returns:", "Example: >>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters') >>> ll.idx", "... Label('a_label', 1.0, 2.0), ... Label('a_label', 1.5, 2.7), ... Label('b_label',", "same labels are defined. Args: yield_ranges_without_labels(bool): If True also yields", "in sorted order cutting_points = sorted(cutting_points) splits = [] iv_start", "of all labels ordered according to the label order. Example:", "Query Label Values # def join(self, delimiter=' ', overlap_threshold=0.1): \"\"\"", "@property def labels(self): \"\"\" Return list of labels. \"\"\" return", "if ``l2.start - l1.end < threshold``. Args: threshold (float): Maximal", "return self.label_tree.end() @property def total_length(self): \"\"\" Return the cumulative length", "current label which can then be edited in place. Args:", "shift_times: orig_start = max(0, iv_start - overlap) label.start -= orig_start", "all_tokens(self, delimiter=' '): \"\"\" Return a list of all tokens", "updated times for iv in intervals: label = copy.deepcopy(iv.data) label.start", "LabelList(idx='some', labels=[ >>> Label('a d q', start=0, end=4), >>> Label('b',", "only labels with the same value. Example: >>> ll =", "and ``cutting_points[1]``. And so on. Args: cutting_points(list): List of floats", "utterance can have multiple label-lists. Args: idx (str): An unique", "all labels (``label.tokenized()```). If the overlapping between two labels is", "This amount is subtracted from a start-cutting-point, and added to", "= ['idx', 'label_tree', 'utterance'] def __init__(self, idx='default', labels=None): self.idx =", "'utterance'] def __init__(self, idx='default', labels=None): self.idx = idx self.utterance =", "string to join two consecutive labels. overlap_threshold (float): Maximum overlap", "None: for iv in list(tree_copy): if iv.data.value not in include_labels:", "5, 10), >>>]) >>> >>> res = ll.split([4.6]) >>> len(res)", "labels(self): \"\"\" Return list of labels. \"\"\" return list(self) @property", "def __init__(self, idx='default', labels=None): self.idx = idx self.utterance = None", "list of all occuring label values. Returns: list: Lexicographically sorted", "0, 5), >>> Label('b', 5, 10), >>> Label('c', 11, 15),", "a single label containing the given value. \"\"\" return LabelList(idx=idx,", "end=6.0), >>> Label('a', start=7.0, end=10.2), >>> Label('b', start=10.3, end=14.0) >>>", "def ranges(self, yield_ranges_without_labels=False, include_labels=None): \"\"\" Generate all ranges of the", "cutting_points(list): List of floats defining the points in seconds, where", "Label('a', 15, 18.5) >>> ]) >>> ll.label_total_duration() {'a': 7.5 'b':", "overlap.end) all_intervals.discard(overlap) updated_labels.append(Label( ov_value, ov_start, ov_end )) # Replace the", "Label('d', 10.5, 14) >>>]) >>> ll.labels_in_range(6.2, 10.1) [Label('b', 5.1, 8.9),", "range intervals = self.label_tree.overlap( iv_start - overlap, iv_end + overlap", "label values. Returns: list: Lexicographically sorted list (str) of label", "LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>>", ">>> next(ranges) (5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 >", "in place. Args: fn (func): Function to apply to every", "sorted(self.labels) concat_values = [] last_label_end = None for label in", "Also labels that only overlap are included. Args: start(float): Start-time", "distinct label-value. Returns: dict: A dictionary with distinct label-values as", "ignored intentionally, # since it is kind of a weak", "overlapping labels with the same value. Two labels are considered", "set of distinct tokens. \"\"\" tokens = set() for label", "of cutting-points (``x == len(cutting_points) + 1``). The result is", "ValueError('At least one cutting-point is needed!') # we have to", "end of the lastly ending label (upper bound). \"\"\" return", "0.0 for i in range(len(cutting_points) + 1): if i <", ">>> res[1].labels [ Label('a', 4.1, 5.0), Label('b', 5.0, 8.9) ]", "define the correct order') return tokens # # Restructuring #", "end) return [iv.data for iv in intervals] def ranges(self, yield_ranges_without_labels=False,", "the label-list for which the same labels are defined. Args:", "overlap_threshold=0.1): \"\"\" Return a string with all labels concatenated together.", "\"\"\" Apply the given function `fn` to every label in", "4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>>", "data_this = (self.idx, self.label_tree) data_other = (other.idx, other.label_tree) return data_this", "return sum(label.length for label in self.labels) # # Alteration #", "(str): The delimiter used to split labels into tokens. (default:", "intentionally, # since it is kind of a weak ref", "def join(self, delimiter=' ', overlap_threshold=0.1): \"\"\" Return a string with", "Restructuring # def separated(self): \"\"\" Create a separate Label-List for", "of overlap in seconds. This amount is subtracted from a", "length of all labels (Number of characters). \"\"\" return sum(label.length", "]) >>> ll.label_values() ['a', 'b', 'c', 'd'] \"\"\" all_labels =", "iv.data) last_end = iv.end def split(self, cutting_points, shift_times=False, overlap=0.0): \"\"\"", "cutting-points (``x == len(cutting_points) + 1``). The result is a", "yields ranges for which no labels are defined. include_labels(list): If", "all occurrences. Returns: dict: A dictionary containing for every label-value", "is defined by the start of the label. If the", "# Replace the old labels with the updated ones self.label_tree.clear()", "with_label_values(cls, values, idx='default'): \"\"\" Create a new label-list containing labels", "4 >>> res[0].labels [Label('a', 0.0, 4.1)] >>> res[1].labels [ Label('a',", "iv_start = 0.0 for i in range(len(cutting_points) + 1): if", "two consecutive labels. overlap_threshold (float): Maximum overlap between two consecutive", "the same value. Example: >>> ll = LabelList(idx='some', labels=[ >>>", "interval.begin - threshold range_end = interval.end + threshold direct_overlaps =", "to the label order. Example: >>> ll = LabelList(idx='some', labels=[", "]) >>> ll.join(' - ') 'a - b - c", "Return a list of all tokens occurring in the label-list.", "Split the label-list into x parts and return them as", "Label('c', 11, 15), >>>]) >>> >>> res = ll.split([4.1, 8.9,", "18.5) >>> ]) >>> ll.label_total_duration() {'a': 7.5 'b': 7.0} \"\"\"", ">>>]) >>> >>> res = ll.split([4.1, 8.9, 12.0]) >>> len(res)", "are considered overlapping, if ``l2.start - l1.end < threshold``. Args:", "labels=[ >>> Label('a', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>>", "a concatenated new label while not all_intervals.is_empty(): next_interval = list(all_intervals)[0]", "Constructors # @classmethod def create_single(cls, value, idx='default'): \"\"\" Create a", "def merge_overlaps(self, threshold=0.0): \"\"\" Merge overlapping labels with the same", "l in self} return sorted(all_labels) def label_count(self): \"\"\" Return for", "list of tokens based on all labels. Joins all token", "that receives the current label which can then be edited", "order') return delimiter.join(concat_values) def tokenized(self, delimiter=' ', overlap_threshold=0.1): \"\"\" Return", "Label('a_label', 1.5, 2.7), ... Label('b_label', 1.0, 2.0), ... ]) >>>", "4.1)] >>> res[1].labels [ Label('a', 4.1, 5.0), Label('b', 5.0, 8.9)", "splits # # Convenience Constructors # @classmethod def create_single(cls, value,", "= max(0, iv_start - overlap, label.start) label.end = min(iv_end +", "also yields ranges for which no labels are defined. include_labels(list):", "= [] for label in labels: label.label_list = self ivs.append(intervaltree.Interval(label.start,", "return list(self) @property def start(self): \"\"\" Return start of the", "7.5 'b': 7.0} \"\"\" durations = collections.defaultdict(float) for label in", "Label('b', 5.1, 8.9), >>> Label('a', 7.2, 10.5), >>> Label('b', 10.5,", "label-list with a single label containing the given value. \"\"\"", "10.5), >>> Label('b', 10.5, 14), >>> Label('a', 15, 18) >>>", "return x # Split labels when overlapping and merge equal", "to a end-cutting-point. Returns: list: A list of of: class:", "label-value (key) the number of occurrences (value). Example: >>> ll", "A range is defined as a part of the label-list", "to the beginning of the original label-list. overlap(float): Amount of", "label to the end of the list. Args: label (Label):", "labels into tokens. See :meth:`audiomate.annotations.Label.tokenized` Returns: :class:`set`: A set of", "are returned. (default ``False``) Returns: list: List of labels in", "recursive_overlaps(next_interval) ov_start = float('inf') ov_end = 0.0 ov_value = next_interval.data.value", "label (lower bound). \"\"\" return self.label_tree.begin() @property def end(self): \"\"\"", "include_labels: tree_copy.remove(iv) def reduce(x, y): x.append(y) return x # Split", "def split(self, cutting_points, shift_times=False, overlap=0.0): \"\"\" Split the label-list into", "not able to define the correct order') return tokens #", "= (other.idx, other.label_tree) return data_this == data_other def __iter__(self): for", "2, 4), >>> Label('timmy', 4, 8) >>> ]) \"\"\" __slots__", "the start is relative to the cutting point and not", "ivs = [] for label in labels: label.label_list = self", "labels between ``0`` and ``cutting_points[0]``. Label-list 1 contains labels between", "]) >>> next(ranges) (4.5, 5.1, []) >>> next(ranges) (5.1, 7.2,", "end=10.2)] >>> s['b'].labels [Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)] \"\"\"", "Create a new label-list containing labels with the given values.", ">>> Label('this', 0, 2), >>> Label('is', 2, 4), >>> Label('timmy',", "in include_labels: tree_copy.remove(iv) def reduce(x, y): x.append(y) return x #", "ll.label_count() {'a': 3 'b': 2} \"\"\" occurrences = collections.defaultdict(int) for", "of overlapping labels with the same value def recursive_overlaps(interval): range_start", ">>> Label('a', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('a',", "start, end))``. \"\"\" self.add(Label(value, start=start, end=end)) def update(self, labels): \"\"\"", "A set of distinct tokens. \"\"\" tokens = set() for", "Create a label-list with a single label containing the given", "ll.labels [Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)] \"\"\" for label", "for every label-list but the first. >>> ll = LabelList(labels=[", "sorted order cutting_points = sorted(cutting_points) splits = [] iv_start =", "at 0x1090484c8 > ]) \"\"\" tree_copy = self.label_tree.copy() # Remove", "LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('another_label', 2.0, 3.0) ...", "fully_included(bool): If ``True``, only labels fully included in the range", "If True also yields ranges for which no labels are", "The label to add. \"\"\" label.label_list = self self.label_tree.addi(label.start, label.end,", "ranges = ll.ranges() >>> next(ranges) (3.2, 4.5, [ < audiomate.annotations.Label", "list: Lexicographically sorted list (str) of label values. Example: >>>", "separated_lls.values(): ll.idx = self.idx return separated_lls def labels_in_range(self, start, end,", "idx='default'): \"\"\" Create a label-list with a single label containing", "if iv.data.value not in include_labels: tree_copy.remove(iv) def reduce(x, y): x.append(y)", "not empty, only the label values in the list will", "label_tree (IntervalTree): The interval-tree storing the labels. Example: >>> label_list", "self.label_tree.overlap( iv_start - overlap, iv_end + overlap ) cp_splits =", "all labels concatenated together. Example: >>> ll = LabelList(idx='some', labels=[", "0.4, 5.4) ] \"\"\" if len(cutting_points) == 0: raise ValueError('At", "the label-list into x parts and return them as new", "iv.begin, []) yield (iv.begin, iv.end, iv.data) last_end = iv.end def", "occurrences (value). Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2,", "be created and appended to the label-list. idx(str): The idx", "ll = LabelList(idx=idx) for label_value in values: ll.add(Label(label_value)) return ll", "label_list = LabelList(idx='transcription', labels=[ >>> Label('this', 0, 2), >>> Label('is',", "10.5, 14) >>>]) >>> ll.labels_in_range(6.2, 10.1) [Label('b', 5.1, 8.9), Label('c',", "used to split labels into tokens. (default: space) overlap_threshold (float):", ".label import Label class LabelList: \"\"\" Represents a list of", "= self.label_tree.copy() # Remove labels not included if include_labels is", "self: durations[label.value] += label.duration return durations def label_values(self): \"\"\" Return", "5), >>> Label('b', 5, 8), >>> Label('a', 8, 10), >>>", ">>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b',", "end=14.0) >>> ]) >>> s = ll.separate() >>> s['a'].labels [Label('a',", "8.9, 12.0]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.1)]", "b - c - d' \"\"\" sorted_by_start = sorted(self.labels) concat_values", "ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1,", "label.start < overlap_threshold and last_label_end > 0): concat_values.append(label.value) last_label_end =", "not None: for iv in list(tree_copy): if iv.data.value not in", "containing labels with the given values. All labels will have", "the start of the label. If the overlapping between two", "self} return sorted(all_labels) def label_count(self): \"\"\" Return for each label", "label.start += 1.0 ... label.end += 1.0 ... >>> ll.apply(shift_labels)", "adjusted to be relative to the cutting-points for every label-list", "< audiomate.annotations.label.Label at 0x1090484c8 > ]) \"\"\" tree_copy = self.label_tree.copy()", "import collections import copy import intervaltree from .label import Label", "an Exception is thrown. Args: delimiter (str): The delimiter used", "interval.data def __len__(self): return self.label_tree.__len__() def __copy__(self): # utterance is", "tokens occurring in the label-list. Args: delimiter (str): The delimiter", "be relative to the cutting-points for every label-list but the", "\"\"\" sorted_by_start = sorted(self.labels) tokens = [] last_label_end = None", "separate Label-List for every distinct label-value. Returns: dict: A dictionary", "only overlap are included. Args: start(float): Start-time in seconds. end(float):", "label.start = max(0, iv_start - overlap, label.start) label.end = min(iv_end", "True, start and end-time are shifted in splitted label-lists. So", "Apply the given function `fn` to every label in this", "Returns: str: A string with all labels concatenated together. Example:", "orig_start = max(0, iv_start - overlap) label.start -= orig_start label.end", "2.0), ... Label('another_label', 2.0, 3.0) ... ]) >>> def shift_labels(label):", "characters). \"\"\" return sum(label.length for label in self.labels) # #", "None: self.update(labels) def __eq__(self, other): data_this = (self.idx, self.label_tree) data_other", "for each label the number of occurrences within the list.", "label.end else: raise ValueError('Labels overlap, not able to define the", "will have default start/end values of 0 and ``inf``. Args:", "label): \"\"\" Add a label to the end of the", "duration of all occurrences. Returns: dict: A dictionary containing for", "for ``add(Label(value, start, end))``. \"\"\" self.add(Label(value, start=start, end=end)) def update(self,", "a function of one argument that receives the current label", ">>> Label('b', start=3.95, end=6.0), >>> Label('c', start=7.0, end=10.2), >>> Label('d',", ">>> next(ranges) (3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8 >", "are defined. include_labels(list): If not empty, only the label values", "sorted(self.labels) tokens = [] last_label_end = None for label in", "labels tree_copy.split_overlaps() tree_copy.merge_equals(data_reducer=reduce, data_initializer=[]) intervals = sorted(tree_copy) last_end = intervals[0].begin", "``add(Label(value, start, end))``. \"\"\" self.add(Label(value, start=start, end=end)) def update(self, labels):", "10), >>> Label('c', 11, 15), >>>]) >>> >>> res =", "Label('this', 0, 2), >>> Label('is', 2, 4), >>> Label('timmy', 4,", "['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g'] \"\"\" sorted_by_start", "The interval-tree storing the labels. Example: >>> label_list = LabelList(idx='transcription',", "overlapping: ov_start = min(ov_start, overlap.begin) ov_end = max(ov_end, overlap.end) all_intervals.discard(overlap)", "[ < audiomate.annotations.Label at 0x1090527c8 > ]) >>> next(ranges) (4.5,", ">>> Label('a', 0, 5), >>> Label('b', 5, 10), >>> Label('c',", "8.9), >>> Label('a', 7.2, 10.5), >>> Label('b', 10.5, 14), >>>", "LabelList: \"\"\" Represents a list of labels which describe an", "- overlap, iv_end + overlap ) cp_splits = LabelList(idx=self.idx) #", "a list of all tokens occurring in the label-list. Args:", ">>> ll.label_count() {'a': 3 'b': 2} \"\"\" occurrences = collections.defaultdict(int)", "all labels (Number of characters). \"\"\" return sum(label.length for label", "Args: cutting_points(list): List of floats defining the points in seconds,", "for every label-value (key) the total duration in seconds (value).", ">>> Label('a', 15, 18) >>> ]) >>> ll.label_count() {'a': 3", "labels ordered according to the label order. Example: >>> ll", "return data_this == data_other def __iter__(self): for interval in self.label_tree:", "label-lists. So the start is relative to the cutting point", "min(iv_end + overlap, label.end) if shift_times: orig_start = max(0, iv_start", "Return list of labels. \"\"\" return list(self) @property def start(self):", "not all_intervals.is_empty(): next_interval = list(all_intervals)[0] overlapping = recursive_overlaps(next_interval) ov_start =", ">>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('a',", "iv_start = iv_end return splits # # Convenience Constructors #", "in intervals] def ranges(self, yield_ranges_without_labels=False, include_labels=None): \"\"\" Generate all ranges", "label Example: >>> ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0),", "(default ``False``) Returns: list: List of labels in the range.", "Values # def join(self, delimiter=' ', overlap_threshold=0.1): \"\"\" Return a", "it is kind of a weak ref return LabelList( idx=self.idx,", "\"\"\" ivs = [] for label in labels: label.label_list =", "intervals = sorted(tree_copy) last_end = intervals[0].begin # yield range by", "\"\"\" self.add(Label(value, start=start, end=end)) def update(self, labels): \"\"\" Add a", "labels are defined. Args: yield_ranges_without_labels(bool): If True also yields ranges", "4), >>> Label('timmy', 4, 8) >>> ]) \"\"\" __slots__ =", "to the cutting point and not to the beginning of", "in list(tree_copy): if iv.data.value not in include_labels: tree_copy.remove(iv) def reduce(x,", "labels. Returns: str: A string with all labels concatenated together.", "Return for each label the number of occurrences within the", "for label in self.labels) # # Alteration # def add(self,", "label_total_duration(self): \"\"\" Return for each distinct label value the total", "data_other def __iter__(self): for interval in self.label_tree: yield interval.data def", "of the list. Args: label (Label): The label to add.", "contains labels between ``cutting_points[0]`` and ``cutting_points[1]``. And so on. Args:", "defining the points in seconds, where the label-list is splitted.", "(self.idx, self.label_tree) data_other = (other.idx, other.label_tree) return data_this == data_other", "splitted label-lists. So the start is relative to the cutting", "Label class LabelList: \"\"\" Represents a list of labels which", "', overlap_threshold=0.1): \"\"\" Return a string with all labels concatenated", "and appended to the label-list. idx(str): The idx of the", "(upper bound). \"\"\" return self.label_tree.end() @property def total_length(self): \"\"\" Return", ":meth:`audiomate.annotations.Label.tokenized` Returns: :class:`set`: A set of distinct tokens. \"\"\" tokens", "Maximal distance between two labels to be considered as overlapping.", "Label('b', start=10.3, end=14.0)] \"\"\" separated_lls = collections.defaultdict(LabelList) for label in", "0, 5), >>> Label('b', 5, 10), >>>]) >>> >>> res", "2.7), Label('b_label', 1.0, 2.0), ] \"\"\" updated_labels = [] all_intervals", "multiple label-lists. Args: idx (str): An unique identifier for the", "identifier for the label-list within a corpus for one utterance.", "labels. Example: >>> label_list = LabelList(idx='transcription', labels=[ >>> Label('this', 0,", "\"\"\" Return a list of all occuring label values. Returns:", "label.start < overlap_threshold and last_label_end > 0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end =", "ref return LabelList( idx=self.idx, labels=copy.deepcopy([iv.data for iv in self.label_tree], memo)", "{'a': 3 'b': 2} \"\"\" occurrences = collections.defaultdict(int) for label", "label values. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2,", "value, start=0.0, end=float('inf')): \"\"\" Shortcut for ``add(Label(value, start, end))``. \"\"\"", "end=4), Label('a', start=7.0, end=10.2)] >>> s['b'].labels [Label('b', start=3.95, end=6.0), Label('b',", "last_end: yield (last_end, iv.begin, []) yield (iv.begin, iv.end, iv.data) last_end", "overlapping. (default: 0.0) Example: >>> ll = LabelList(labels=[ ... Label('a_label',", "self.label_tree.overlap(start, end) return [iv.data for iv in intervals] def ranges(self,", ">>> ll.idx 'letters' >>> ll.labels [ Label('a', 0, inf), Label('x',", "total duration of all occurrences. Returns: dict: A dictionary containing", "c - d' \"\"\" sorted_by_start = sorted(self.labels) concat_values = []", "New label-list. Example: >>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters')", "a start-cutting-point, and added to a end-cutting-point. Returns: list: A", "the current label which can then be edited in place.", "label-value. Returns: dict: A dictionary with distinct label-values as keys.", "string with all labels concatenated together. The order of the", "labels. \"\"\" return list(self) @property def start(self): \"\"\" Return start", "yield (iv.begin, iv.end, iv.data) last_end = iv.end def split(self, cutting_points,", "kind of a weak ref return LabelList( idx=self.idx, labels=[iv.data for", "concatenated new label while not all_intervals.is_empty(): next_interval = list(all_intervals)[0] overlapping", "ov_end = 0.0 ov_value = next_interval.data.value for overlap in overlapping:", "return delimiter.join(concat_values) def tokenized(self, delimiter=' ', overlap_threshold=0.1): \"\"\" Return a", "0.0, 0.4), Label('b', 0.4, 5.4) ] \"\"\" if len(cutting_points) ==", ">>> Label('b', 5, 10), >>>]) >>> >>> res = ll.split([4.6])", "@property def start(self): \"\"\" Return start of the earliest starting", "ov_end )) # Replace the old labels with the updated", "defined by the start of the label. If the overlapping", "# yield an empty range if necessary if yield_ranges_without_labels and", "- label.start < overlap_threshold and last_label_end > 0): concat_values.append(label.value) last_label_end", "values, idx='default'): \"\"\" Create a new label-list containing labels with", "label in sorted_by_start: if last_label_end is None or (last_label_end -", "The list containing the :py:class:`audiomate.annotations.Label`. Attributes: utterance (Utterance): The utterance", "labels which describe an utterance. An utterance can have multiple", "in labels: label.label_list = self ivs.append(intervaltree.Interval(label.start, label.end, label)) self.label_tree.update(ivs) def", "+ 1``). The result is a list of label-lists corresponding", ">>> ll.labels [ Label('a', 0, inf), Label('x', 0, inf), Label('z',", "with updated times for iv in intervals: label = copy.deepcopy(iv.data)", "than ``overlap_threshold``, an Exception is thrown. Args: delimiter (str): A", "] \"\"\" updated_labels = [] all_intervals = self.label_tree.copy() # recursivly", "start=3.95, end=6.0), >>> Label('c a', start=7.0, end=10.2), >>> Label('f g',", "= 0.0 for i in range(len(cutting_points) + 1): if i", "]) @classmethod def with_label_values(cls, values, idx='default'): \"\"\" Create a new", "updated ones self.label_tree.clear() self.update(updated_labels) # # Statistics # def label_total_duration(self):", "the beginning of the original label-list. overlap(float): Amount of overlap", "intervaltree.IntervalTree() if labels is not None: self.update(labels) def __eq__(self, other):", "delimiter=' '): \"\"\" Return a list of all tokens occurring", "have multiple label-lists. Args: idx (str): An unique identifier for", "Returns: generator: A generator which yields one range (tuple start/end/list-of-labels)", "Label('c', 11.0, 12.0) ] >>> res[3].labels [Label('c', 12.0, 15.0)] If", "return self.label_tree.begin() @property def end(self): \"\"\" Return end of the", "cumulative length of all labels (Number of characters). \"\"\" return", "if last_label_end is None or (last_label_end - label.start < overlap_threshold", "when overlapping and merge equal ranges to a list of", "each label the number of occurrences within the list. Returns:", "Maximum overlap between two consecutive labels. Returns: str: A list", "\"\"\" label.label_list = self self.label_tree.addi(label.start, label.end, label) def addl(self, value,", "in seconds. This amount is subtracted from a start-cutting-point, and", "the first. >>> ll = LabelList(labels=[ >>> Label('a', 0, 5),", "the same value def recursive_overlaps(interval): range_start = interval.begin - threshold", "all tokens occurring in the label-list. Args: delimiter (str): The", "ll = LabelList(idx='some', labels=[ >>> Label('a', start=0, end=4), >>> Label('b',", "the label. If the overlapping between two labels is greater", "label)) self.label_tree.update(ivs) def apply(self, fn): \"\"\" Apply the given function", "in the range are returned. Otherwise also overlapping ones are", "the given range. Also labels that only overlap are included.", "space) overlap_threshold (float): Maximum overlap between two consecutive labels. Returns:", "of a weak ref return LabelList( idx=self.idx, labels=[iv.data for iv", "with all labels concatenated together. Example: >>> ll = LabelList(idx='some',", "overlap(float): Amount of overlap in seconds. This amount is subtracted", "delimiter (str): A string to join two consecutive labels. overlap_threshold", "cutting_points, shift_times=False, overlap=0.0): \"\"\" Split the label-list into x parts", "\"\"\" Return the cumulative length of all labels (Number of", "first. >>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>>", "[] for label in labels: label.label_list = self ivs.append(intervaltree.Interval(label.start, label.end,", "with all labels concatenated together. The order of the labels", "label list. `fn` is a function of one argument that", "label.start) label.end = min(iv_end + overlap, label.end) if shift_times: orig_start", "Label('b', start=10.3, end=14.0) >>> ]) >>> s = ll.separate() >>>", "# yield range by range for iv in intervals: #", "] \"\"\" if len(cutting_points) == 0: raise ValueError('At least one", "def total_length(self): \"\"\" Return the cumulative length of all labels", "1.0, 2.0), ] \"\"\" updated_labels = [] all_intervals = self.label_tree.copy()", "(Utterance): The utterance this label-list is belonging to. label_tree (IntervalTree):", "label in labels: label.label_list = self ivs.append(intervaltree.Interval(label.start, label.end, label)) self.label_tree.update(ivs)", "iv.end, iv.data) last_end = iv.end def split(self, cutting_points, shift_times=False, overlap=0.0):", "distinct label value the total duration of all occurrences. Returns:", ">>> ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('a_label',", "Return end of the lastly ending label (upper bound). \"\"\"", "of distinct tokens. \"\"\" tokens = set() for label in", ">>> res[0].labels [Label('a', 0.0, 4.6)] >>> res[1].labels [ Label('a', 0.0,", "as overlapping. (default: 0.0) Example: >>> ll = LabelList(labels=[ ...", "\"\"\" return self.label_tree.begin() @property def end(self): \"\"\" Return end of", "self.update(labels) def __eq__(self, other): data_this = (self.idx, self.label_tree) data_other =", "[ Label('a', 0.0, 0.4), Label('b', 0.4, 5.4) ] \"\"\" if", "+= label.duration return durations def label_values(self): \"\"\" Return a list", "tree_copy = self.label_tree.copy() # Remove labels not included if include_labels", "Label('a', 8, 10), >>> Label('b', 10, 14), >>> Label('a', 15,", "[]) >>> next(ranges) (5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8", "of floats defining the points in seconds, where the label-list", "start=3.95, end=6.0), Label('b', start=10.3, end=14.0)] \"\"\" separated_lls = collections.defaultdict(LabelList) for", "sorted_by_start = sorted(self.labels) tokens = [] last_label_end = None for", "tokens. \"\"\" tokens = set() for label in self: tokens", "10), >>>]) >>> >>> res = ll.split([4.6]) >>> len(res) 4", "label_values(self): \"\"\" Return a list of all occuring label values.", "labels is not None: self.update(labels) def __eq__(self, other): data_this =", "values. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5),", "'d'] \"\"\" all_labels = {l.value for l in self} return", "overlapping ones are returned. (default ``False``) Returns: list: List of", "occurrences = collections.defaultdict(int) for label in self: occurrences[label.value] += 1", "# # Restructuring # def separated(self): \"\"\" Create a separate", "label order. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a", ">>> s['a'].labels [Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)] >>> s['b'].labels", "the labels is defined by the start of the label.", "seconds. fully_included(bool): If ``True``, only labels fully included in the", "= LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9),", "(IntervalTree): The interval-tree storing the labels. Example: >>> label_list =", "overlapping between two labels is greater than ``overlap_threshold``, an Exception", "point and not to the beginning of the original label-list.", "between ``cutting_points[0]`` and ``cutting_points[1]``. And so on. Args: cutting_points(list): List", "add. \"\"\" label.label_list = self self.label_tree.addi(label.start, label.end, label) def addl(self,", "iv in self.label_tree] ) def __deepcopy__(self, memo): # utterance is", "0.0, 4.6)] >>> res[1].labels [ Label('a', 0.0, 0.4), Label('b', 0.4,", "collections.defaultdict(LabelList) for label in self.labels: separated_lls[label.value].add(label) for ll in separated_lls.values():", ">>> Label('c a', start=7.0, end=10.2), >>> Label('f g', start=10.3, end=14.0)", "split(self, cutting_points, shift_times=False, overlap=0.0): \"\"\" Split the label-list into x", "\"\"\" occurrences = collections.defaultdict(int) for label in self: occurrences[label.value] +=", "other): data_this = (self.idx, self.label_tree) data_other = (other.idx, other.label_tree) return", "labels with the updated ones self.label_tree.clear() self.update(updated_labels) # # Statistics", "between two labels to be considered as overlapping. (default: 0.0)", "'b': 7.0} \"\"\" durations = collections.defaultdict(float) for label in self:", "str: A string with all labels concatenated together. Example: >>>", "\"\"\" Split the label-list into x parts and return them", "(tuple start/end/list-of-labels) at a time. Example: >>> ll = LabelList(labels=[", "end of the list. Args: label (Label): The label to", "is needed!') # we have to loop in sorted order", "Label('b', 10.5, 14), >>> Label('a', 15, 18) >>> ]) >>>", "needed!') # we have to loop in sorted order cutting_points", "iv.end def split(self, cutting_points, shift_times=False, overlap=0.0): \"\"\" Split the label-list", "shift_times(bool): If True, start and end-time are shifted in splitted", "of labels, that are within the given range. Also labels", "\"\"\" return LabelList(idx=idx, labels=[ Label(value=value) ]) @classmethod def with_label_values(cls, values,", "labels with the given values. All labels will have default", "5.0, 8.9) ] >>> res[2].labels [ Label('b', 8.9, 10.0), Label('c',", "'b', 'c', 'd'] \"\"\" all_labels = {l.value for l in", "__slots__ = ['idx', 'label_tree', 'utterance'] def __init__(self, idx='default', labels=None): self.idx", ">>> Label('a', start=7.0, end=10.2), >>> Label('b', start=10.3, end=14.0) >>> ])", "idx=self.idx, labels=copy.deepcopy([iv.data for iv in self.label_tree], memo) ) @property def", "start=0.0, end=float('inf')): \"\"\" Shortcut for ``add(Label(value, start, end))``. \"\"\" self.add(Label(value,", "``cutting_points[0]``. Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``. And", "Args: yield_ranges_without_labels(bool): If True also yields ranges for which no", "Example: >>> label_list = LabelList(idx='transcription', labels=[ >>> Label('this', 0, 2),", "a list of labels to the end of the list.", "\"\"\" Add a label to the end of the list.", "res = ll.split([4.6]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0,", "is kind of a weak ref return LabelList( idx=self.idx, labels=copy.deepcopy([iv.data", "start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('a', start=7.0, end=10.2),", "in overlapping: ov_start = min(ov_start, overlap.begin) ov_end = max(ov_end, overlap.end)", ">>> Label('a', 15, 18.5) >>> ]) >>> ll.label_total_duration() {'a': 7.5", "start=7.0, end=10.2), >>> Label('f g', start=10.3, end=14.0) >>> ]) >>>", "self.label_tree.copy() # recursivly find a group of overlapping labels with", "generator which yields one range (tuple start/end/list-of-labels) at a time.", "to add. \"\"\" label.label_list = self self.label_tree.addi(label.start, label.end, label) def", "'c', 'a', 'f', 'g'] \"\"\" sorted_by_start = sorted(self.labels) tokens =", ") @property def labels(self): \"\"\" Return list of labels. \"\"\"", "-= orig_start cp_splits.add(label) splits.append(cp_splits) iv_start = iv_end return splits #", "memo) ) @property def labels(self): \"\"\" Return list of labels.", "is ignored intentionally, # since it is kind of a", "ranges of the label-list. A range is defined as a", "list of all tokens occurring in the label-list. Args: delimiter", "and not to the beginning of the original label-list. overlap(float):", "overlap between two consecutive labels. Returns: str: A list containing", "... Label('b_label', 1.0, 2.0), ... ]) >>> ll.merge_overlapping_labels() >>> ll.labels", "end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c a', start=7.0, end=10.2),", "`fn` to every label in this label list. `fn` is", "= min(iv_end + overlap, label.end) if shift_times: orig_start = max(0,", "# - Find overlapping intervals recursively # - Remove them", "label-list into x parts and return them as new label-lists.", "next_interval = list(all_intervals)[0] overlapping = recursive_overlaps(next_interval) ov_start = float('inf') ov_end", "relative to the cutting point and not to the beginning", "a list of labels, that are within the given range.", "end=6.0), >>> Label('c a', start=7.0, end=10.2), >>> Label('f g', start=10.3,", "label in this label list. `fn` is a function of", "split labels into tokens. (default: space) overlap_threshold (float): Maximum overlap", "Exception is thrown. Args: delimiter (str): The delimiter used to", "8), >>> Label('a', 8, 10), >>> Label('b', 10, 14), >>>", "empty range if necessary if yield_ranges_without_labels and iv.begin > last_end:", "keys. Every value is a LabelList containing only labels with", "consecutive labels. Returns: str: A string with all labels concatenated", "x parts and return them as new label-lists. x is", "start(self): \"\"\" Return start of the earliest starting label (lower", "ordered according to the label order. Example: >>> ll =", "is greater than ``overlap_threshold``, an Exception is thrown. Args: delimiter", "0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end = label.end else: raise ValueError('Labels overlap, not", "last_label_end = None for label in sorted_by_start: if last_label_end is", "0: raise ValueError('At least one cutting-point is needed!') # we", "# Convenience Constructors # @classmethod def create_single(cls, value, idx='default'): \"\"\"", "LabelList(idx=self.idx) # Extract labels from intervals with updated times for", "10.5), >>> Label('d', 10.5, 14) >>>]) >>> ranges = ll.ranges()", "one utterance. labels (list): The list containing the :py:class:`audiomate.annotations.Label`. Attributes:", "the label-list. A range is defined as a part of", "while not all_intervals.is_empty(): next_interval = list(all_intervals)[0] overlapping = recursive_overlaps(next_interval) ov_start", "2.0, 3.0), Label(another_label, 3.0, 4.0)] \"\"\" for label in self.labels:", "Label('z', 0, inf), ] \"\"\" ll = LabelList(idx=idx) for label_value", "to every label Example: >>> ll = LabelList(labels=[ ... Label('a_label',", "of all occuring label values. Returns: list: Lexicographically sorted list", "]) >>> def shift_labels(label): ... label.start += 1.0 ... label.end", "Label('a', 3, 5), >>> Label('b', 5, 8), >>> Label('a', 8,", "to each part. Label-list 0 contains labels between ``0`` and", "- Remove them # - Create a concatenated new label", "of occurrences (value). Example: >>> ll = LabelList(labels=[ >>> Label('a',", "return tokens # # Query Label Values # def join(self,", "] >>> res[3].labels [Label('c', 12.0, 15.0)] If ``shift_times = True``,", "and merge equal ranges to a list of labels tree_copy.split_overlaps()", "times are adjusted to be relative to the cutting-points for", "time. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5),", "for which no labels are defined. include_labels(list): If not empty,", "start and end-time are shifted in splitted label-lists. So the", "to the end of the list. Args: label (Label): The", "a weak ref return LabelList( idx=self.idx, labels=copy.deepcopy([iv.data for iv in", "Find overlapping intervals recursively # - Remove them # -", "1``). The result is a list of label-lists corresponding to", "= intervals[0].begin # yield range by range for iv in", "__deepcopy__(self, memo): # utterance is ignored intentionally, # since it", ">>> Label('b', 5, 8), >>> Label('a', 8, 10), >>> Label('b',", "next(ranges) (3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8 > ])", "7.2, 10.5), >>> Label('b', 10.5, 14), >>> Label('a', 15, 18)", "'q', 'b', 'c', 'a', 'f', 'g'] \"\"\" sorted_by_start = sorted(self.labels)", "list will be considered. Returns: generator: A generator which yields", "Args: fn (func): Function to apply to every label Example:", "each distinct label value the total duration of all occurrences.", "every distinct label-value. Returns: dict: A dictionary with distinct label-values", "the label-list. idx(str): The idx of the label-list. Returns: (LabelList):", "every remaining interval # - Find overlapping intervals recursively #", "in self.label_tree] ) def __deepcopy__(self, memo): # utterance is ignored", "single label containing the given value. \"\"\" return LabelList(idx=idx, labels=[", "tokenized(self, delimiter=' ', overlap_threshold=0.1): \"\"\" Return a ordered list of", "in seconds. fully_included(bool): If ``True``, only labels fully included in", "[]) yield (iv.begin, iv.end, iv.data) last_end = iv.end def split(self,", "all labels. Joins all token from all labels (``label.tokenized()```). If", "'label_tree', 'utterance'] def __init__(self, idx='default', labels=None): self.idx = idx self.utterance", "storing the labels. Example: >>> label_list = LabelList(idx='transcription', labels=[ >>>", "= sorted(self.labels) concat_values = [] last_label_end = None for label", ">>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14) >>>]) >>>", "overlap.begin) ov_end = max(ov_end, overlap.end) all_intervals.discard(overlap) updated_labels.append(Label( ov_value, ov_start, ov_end", "intervaltree from .label import Label class LabelList: \"\"\" Represents a", "A list containing tokens of all labels ordered according to", "dict: A dictionary with distinct label-values as keys. Every value", "List of floats defining the points in seconds, where the", "labels fully included in the range are returned. Otherwise also", "(value). Example: >>> ll = LabelList(labels=[ >>> Label('a', 3, 5),", "5.1, 8.9), Label('c', 7.2, 10.5)] \"\"\" if fully_included: intervals =", "amount is subtracted from a start-cutting-point, and added to a", "labels=copy.deepcopy([iv.data for iv in self.label_tree], memo) ) @property def labels(self):", "to define the correct order') return tokens # # Restructuring", "from intervals with updated times for iv in intervals: label", "self.idx return separated_lls def labels_in_range(self, start, end, fully_included=False): \"\"\" Return", "only labels fully included in the range are returned. Otherwise", "Return a string with all labels concatenated together. The order", "relative to the cutting-points for every label-list but the first.", "Label('a', 0.0, 0.4), Label('b', 0.4, 5.4) ] \"\"\" if len(cutting_points)", "Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14) >>>]) >>> ranges", "part. Label-list 0 contains labels between ``0`` and ``cutting_points[0]``. Label-list" ]
[ "[] errors = [] if DataKey.AGES not in spot.data[config]: #", "ui_utils.create_figure_widget(fig, self) layout.addWidget(graph_widget) graph_and_points.setLayout(layout) return graph_and_points ############### ### Actions ###", "self) layout.addWidget(graph_widget) graph_and_points.setLayout(layout) return graph_and_points ############### ### Actions ### ###############", "config and current_spot: self.plot_cps_graph(current_spot, config) def plot_cps_graph(self, spot, config): axis", "layout.addWidget(self._create_age_graph_and_point_selection()) return layout def _create_age_graph_and_point_selection(self): graph_and_points = QWidget() layout =", "axis.spines['top'].set_visible(False) axis.spines['right'].set_visible(False) xs = [] ys = [] errors =", "name\")) layout.addWidget(self._create_age_graph_and_point_selection()) return layout def _create_age_graph_and_point_selection(self): graph_and_points = QWidget() layout", "x = i + 1 y, dy = age xs.append(x)", "Qt from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel", "if isinstance(age, str): continue x = i + 1 y,", "= ui_utils.create_figure_widget(fig, self) layout.addWidget(graph_widget) graph_and_points.setLayout(layout) return graph_and_points ############### ### Actions", "class AgeResultsWidget(QWidget): def __init__(self, results_dialog): QWidget.__init__(self) self.results_dialog = results_dialog layout", "= QVBoxLayout() fig = plt.figure() self.axes = plt.axes() graph_widget, self.canvas", "xs.append(x) if y is None: ys.append(0) errors.append(0) else: ys.append(y) errors.append(dy)", "plot words on graph return ages = spot.data[config][DataKey.AGES] if len(ages)", "ages = spot.data[config][DataKey.AGES] if len(ages) != 0: for i, age", "marker='o') axis.text(0.5, 1, string, transform=axis.transAxes, horizontalalignment=\"center\") axis.set_xlabel(\"Scan number\") axis.set_ylabel(\"Age (ka)\")", "def _create_age_graph_and_point_selection(self): graph_and_points = QWidget() layout = QVBoxLayout() fig =", "matplotlib from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QHBoxLayout, QDialog,", "self.results_dialog.sample_tree.current_spot() config = self.results_dialog.configuration_widget.current_config if config and current_spot: self.plot_cps_graph(current_spot, config)", "fig = plt.figure() self.axes = plt.axes() graph_widget, self.canvas = ui_utils.create_figure_widget(fig,", "= [] errors = [] if DataKey.AGES not in spot.data[config]:", "= i + 1 y, dy = age xs.append(x) if", "= plt.figure() self.axes = plt.axes() graph_widget, self.canvas = ui_utils.create_figure_widget(fig, self)", "config = self.results_dialog.configuration_widget.current_config if config and current_spot: self.plot_cps_graph(current_spot, config) def", "errors = [] if DataKey.AGES not in spot.data[config]: # TODO", "and spot name\")) layout.addWidget(self._create_age_graph_and_point_selection()) return layout def _create_age_graph_and_point_selection(self): graph_and_points =", "graph_widget, self.canvas = ui_utils.create_figure_widget(fig, self) layout.addWidget(graph_widget) graph_and_points.setLayout(layout) return graph_and_points ###############", "QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel matplotlib.use('QT5Agg') import matplotlib.pyplot as", "current_spot = self.results_dialog.sample_tree.current_spot() config = self.results_dialog.configuration_widget.current_config if config and current_spot:", "weighted_age, age_st_dev = spot.data[config][DataKey.WEIGHTED_AGE] if isinstance(weighted_age, str): string = \"No", "else: string = f\"Weighted age: {weighted_age:.0f}, 1σ: {age_st_dev:.0f}\" axis.errorbar(xs, ys,", "ui_utils class AgeResultsWidget(QWidget): def __init__(self, results_dialog): QWidget.__init__(self) self.results_dialog = results_dialog", "# TODO plot some text return weighted_age, age_st_dev = spot.data[config][DataKey.WEIGHTED_AGE]", "dy = age xs.append(x) if y is None: ys.append(0) errors.append(0)", "import matplotlib.pyplot as plt from models.data_key import DataKey from utils", "results_dialog): QWidget.__init__(self) self.results_dialog = results_dialog layout = QHBoxLayout() layout.addLayout(self._create_widget()) self.setLayout(layout)", "QPushButton, QWidget, QVBoxLayout, QLabel matplotlib.use('QT5Agg') import matplotlib.pyplot as plt from", "spot, config): axis = self.axes axis.clear() if spot is None:", "import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel matplotlib.use('QT5Agg') import matplotlib.pyplot", "y, dy = age xs.append(x) if y is None: ys.append(0)", "= [] ys = [] errors = [] if DataKey.AGES", "return layout def _create_age_graph_and_point_selection(self): graph_and_points = QWidget() layout = QVBoxLayout()", "plot some text return weighted_age, age_st_dev = spot.data[config][DataKey.WEIGHTED_AGE] if isinstance(weighted_age,", "ys = [] errors = [] if DataKey.AGES not in", "QVBoxLayout, QLabel matplotlib.use('QT5Agg') import matplotlib.pyplot as plt from models.data_key import", "i, j: self.replot_graph()) results_dialog.configuration_changed.connect(self.replot_graph) def _create_widget(self): layout = QVBoxLayout() layout.addWidget(QLabel(\"Sample", "### ############### def replot_graph(self): current_spot = self.results_dialog.sample_tree.current_spot() config = self.results_dialog.configuration_widget.current_config", "= plt.axes() graph_widget, self.canvas = ui_utils.create_figure_widget(fig, self) layout.addWidget(graph_widget) graph_and_points.setLayout(layout) return", "i + 1 y, dy = age xs.append(x) if y", "1σ: {age_st_dev:.0f}\" axis.errorbar(xs, ys, yerr=errors, linestyle=\"none\", marker='o') axis.text(0.5, 1, string,", "TODO plot some text return weighted_age, age_st_dev = spot.data[config][DataKey.WEIGHTED_AGE] if", "enumerate(ages): if isinstance(age, str): continue x = i + 1", "some text return weighted_age, age_st_dev = spot.data[config][DataKey.WEIGHTED_AGE] if isinstance(weighted_age, str):", "_create_age_graph_and_point_selection(self): graph_and_points = QWidget() layout = QVBoxLayout() fig = plt.figure()", "self.setLayout(layout) results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i, j: self.replot_graph()) results_dialog.configuration_changed.connect(self.replot_graph) def _create_widget(self): layout =", "matplotlib.use('QT5Agg') import matplotlib.pyplot as plt from models.data_key import DataKey from", "return axis.spines['top'].set_visible(False) axis.spines['right'].set_visible(False) xs = [] ys = [] errors", "return ages = spot.data[config][DataKey.AGES] if len(ages) != 0: for i,", "layout def _create_age_graph_and_point_selection(self): graph_and_points = QWidget() layout = QVBoxLayout() fig", "import Qt from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout,", "age_st_dev = spot.data[config][DataKey.WEIGHTED_AGE] if isinstance(weighted_age, str): string = \"No weighted", "from models.data_key import DataKey from utils import ui_utils class AgeResultsWidget(QWidget):", "ys, yerr=errors, linestyle=\"none\", marker='o') axis.text(0.5, 1, string, transform=axis.transAxes, horizontalalignment=\"center\") axis.set_xlabel(\"Scan", "= results_dialog layout = QHBoxLayout() layout.addLayout(self._create_widget()) self.setLayout(layout) results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i, j:", "def replot_graph(self): current_spot = self.results_dialog.sample_tree.current_spot() config = self.results_dialog.configuration_widget.current_config if config", "axis = self.axes axis.clear() if spot is None: return axis.spines['top'].set_visible(False)", "f\"Weighted age: {weighted_age:.0f}, 1σ: {age_st_dev:.0f}\" axis.errorbar(xs, ys, yerr=errors, linestyle=\"none\", marker='o')", "spot is None: return axis.spines['top'].set_visible(False) axis.spines['right'].set_visible(False) xs = [] ys", "graph return ages = spot.data[config][DataKey.AGES] if len(ages) != 0: for", "as plt from models.data_key import DataKey from utils import ui_utils", "utils import ui_utils class AgeResultsWidget(QWidget): def __init__(self, results_dialog): QWidget.__init__(self) self.results_dialog", "QVBoxLayout() layout.addWidget(QLabel(\"Sample and spot name\")) layout.addWidget(self._create_age_graph_and_point_selection()) return layout def _create_age_graph_and_point_selection(self):", "xs = [] ys = [] errors = [] if", "= self.axes axis.clear() if spot is None: return axis.spines['top'].set_visible(False) axis.spines['right'].set_visible(False)", "None: return axis.spines['top'].set_visible(False) axis.spines['right'].set_visible(False) xs = [] ys = []", "in enumerate(ages): if isinstance(age, str): continue x = i +", "from utils import ui_utils class AgeResultsWidget(QWidget): def __init__(self, results_dialog): QWidget.__init__(self)", "on graph return ages = spot.data[config][DataKey.AGES] if len(ages) != 0:", "continue x = i + 1 y, dy = age", "QHBoxLayout() layout.addLayout(self._create_widget()) self.setLayout(layout) results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i, j: self.replot_graph()) results_dialog.configuration_changed.connect(self.replot_graph) def _create_widget(self):", "ys.append(0) errors.append(0) else: ys.append(y) errors.append(dy) else: # TODO plot some", "axis.text(0.5, 1, string, transform=axis.transAxes, horizontalalignment=\"center\") axis.set_xlabel(\"Scan number\") axis.set_ylabel(\"Age (ka)\") self.canvas.draw()", "not in spot.data[config]: # TODO plot words on graph return", "\"No weighted age\" else: string = f\"Weighted age: {weighted_age:.0f}, 1σ:", "from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton,", "def _create_widget(self): layout = QVBoxLayout() layout.addWidget(QLabel(\"Sample and spot name\")) layout.addWidget(self._create_age_graph_and_point_selection())", "is None: return axis.spines['top'].set_visible(False) axis.spines['right'].set_visible(False) xs = [] ys =", "!= 0: for i, age in enumerate(ages): if isinstance(age, str):", "<filename>src/views/age_results_widget.py import matplotlib from PyQt5.QtCore import Qt from PyQt5.QtWidgets import", "return graph_and_points ############### ### Actions ### ############### def replot_graph(self): current_spot", "layout.addWidget(QLabel(\"Sample and spot name\")) layout.addWidget(self._create_age_graph_and_point_selection()) return layout def _create_age_graph_and_point_selection(self): graph_and_points", "= self.results_dialog.configuration_widget.current_config if config and current_spot: self.plot_cps_graph(current_spot, config) def plot_cps_graph(self,", "[] if DataKey.AGES not in spot.data[config]: # TODO plot words", "def __init__(self, results_dialog): QWidget.__init__(self) self.results_dialog = results_dialog layout = QHBoxLayout()", "else: # TODO plot some text return weighted_age, age_st_dev =", "results_dialog layout = QHBoxLayout() layout.addLayout(self._create_widget()) self.setLayout(layout) results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i, j: self.replot_graph())", "Actions ### ############### def replot_graph(self): current_spot = self.results_dialog.sample_tree.current_spot() config =", "return weighted_age, age_st_dev = spot.data[config][DataKey.WEIGHTED_AGE] if isinstance(weighted_age, str): string =", "DataKey from utils import ui_utils class AgeResultsWidget(QWidget): def __init__(self, results_dialog):", "spot.data[config]: # TODO plot words on graph return ages =", "current_spot: self.plot_cps_graph(current_spot, config) def plot_cps_graph(self, spot, config): axis = self.axes", "spot.data[config][DataKey.WEIGHTED_AGE] if isinstance(weighted_age, str): string = \"No weighted age\" else:", "layout = QVBoxLayout() layout.addWidget(QLabel(\"Sample and spot name\")) layout.addWidget(self._create_age_graph_and_point_selection()) return layout", "len(ages) != 0: for i, age in enumerate(ages): if isinstance(age,", "self.canvas = ui_utils.create_figure_widget(fig, self) layout.addWidget(graph_widget) graph_and_points.setLayout(layout) return graph_and_points ############### ###", "graph_and_points.setLayout(layout) return graph_and_points ############### ### Actions ### ############### def replot_graph(self):", "AgeResultsWidget(QWidget): def __init__(self, results_dialog): QWidget.__init__(self) self.results_dialog = results_dialog layout =", "= spot.data[config][DataKey.AGES] if len(ages) != 0: for i, age in", "str): string = \"No weighted age\" else: string = f\"Weighted", "models.data_key import DataKey from utils import ui_utils class AgeResultsWidget(QWidget): def", "= age xs.append(x) if y is None: ys.append(0) errors.append(0) else:", "j: self.replot_graph()) results_dialog.configuration_changed.connect(self.replot_graph) def _create_widget(self): layout = QVBoxLayout() layout.addWidget(QLabel(\"Sample and", "layout.addLayout(self._create_widget()) self.setLayout(layout) results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i, j: self.replot_graph()) results_dialog.configuration_changed.connect(self.replot_graph) def _create_widget(self): layout", "config) def plot_cps_graph(self, spot, config): axis = self.axes axis.clear() if", "# TODO plot words on graph return ages = spot.data[config][DataKey.AGES]", "axis.errorbar(xs, ys, yerr=errors, linestyle=\"none\", marker='o') axis.text(0.5, 1, string, transform=axis.transAxes, horizontalalignment=\"center\")", "PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel matplotlib.use('QT5Agg') import", "QDialog, QPushButton, QWidget, QVBoxLayout, QLabel matplotlib.use('QT5Agg') import matplotlib.pyplot as plt", "__init__(self, results_dialog): QWidget.__init__(self) self.results_dialog = results_dialog layout = QHBoxLayout() layout.addLayout(self._create_widget())", "1 y, dy = age xs.append(x) if y is None:", "text return weighted_age, age_st_dev = spot.data[config][DataKey.WEIGHTED_AGE] if isinstance(weighted_age, str): string", "QWidget() layout = QVBoxLayout() fig = plt.figure() self.axes = plt.axes()", "self.axes = plt.axes() graph_widget, self.canvas = ui_utils.create_figure_widget(fig, self) layout.addWidget(graph_widget) graph_and_points.setLayout(layout)", "plt from models.data_key import DataKey from utils import ui_utils class", "if len(ages) != 0: for i, age in enumerate(ages): if", "layout = QHBoxLayout() layout.addLayout(self._create_widget()) self.setLayout(layout) results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i, j: self.replot_graph()) results_dialog.configuration_changed.connect(self.replot_graph)", "self.results_dialog.configuration_widget.current_config if config and current_spot: self.plot_cps_graph(current_spot, config) def plot_cps_graph(self, spot,", "age: {weighted_age:.0f}, 1σ: {age_st_dev:.0f}\" axis.errorbar(xs, ys, yerr=errors, linestyle=\"none\", marker='o') axis.text(0.5,", "ys.append(y) errors.append(dy) else: # TODO plot some text return weighted_age,", "[] ys = [] errors = [] if DataKey.AGES not", "axis.clear() if spot is None: return axis.spines['top'].set_visible(False) axis.spines['right'].set_visible(False) xs =", "errors.append(0) else: ys.append(y) errors.append(dy) else: # TODO plot some text", "self.results_dialog = results_dialog layout = QHBoxLayout() layout.addLayout(self._create_widget()) self.setLayout(layout) results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i,", "age\" else: string = f\"Weighted age: {weighted_age:.0f}, 1σ: {age_st_dev:.0f}\" axis.errorbar(xs,", "QLabel matplotlib.use('QT5Agg') import matplotlib.pyplot as plt from models.data_key import DataKey", "spot.data[config][DataKey.AGES] if len(ages) != 0: for i, age in enumerate(ages):", "str): continue x = i + 1 y, dy =", "import ui_utils class AgeResultsWidget(QWidget): def __init__(self, results_dialog): QWidget.__init__(self) self.results_dialog =", "results_dialog.configuration_changed.connect(self.replot_graph) def _create_widget(self): layout = QVBoxLayout() layout.addWidget(QLabel(\"Sample and spot name\"))", "None: ys.append(0) errors.append(0) else: ys.append(y) errors.append(dy) else: # TODO plot", "graph_and_points = QWidget() layout = QVBoxLayout() fig = plt.figure() self.axes", "graph_and_points ############### ### Actions ### ############### def replot_graph(self): current_spot =", "matplotlib.pyplot as plt from models.data_key import DataKey from utils import", "TODO plot words on graph return ages = spot.data[config][DataKey.AGES] if", "from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel matplotlib.use('QT5Agg')", "if DataKey.AGES not in spot.data[config]: # TODO plot words on", "import matplotlib from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QHBoxLayout,", "axis.spines['right'].set_visible(False) xs = [] ys = [] errors = []", "in spot.data[config]: # TODO plot words on graph return ages", "age in enumerate(ages): if isinstance(age, str): continue x = i", "age xs.append(x) if y is None: ys.append(0) errors.append(0) else: ys.append(y)", "self.plot_cps_graph(current_spot, config) def plot_cps_graph(self, spot, config): axis = self.axes axis.clear()", "if y is None: ys.append(0) errors.append(0) else: ys.append(y) errors.append(dy) else:", "y is None: ys.append(0) errors.append(0) else: ys.append(y) errors.append(dy) else: #", "spot name\")) layout.addWidget(self._create_age_graph_and_point_selection()) return layout def _create_age_graph_and_point_selection(self): graph_and_points = QWidget()", "0: for i, age in enumerate(ages): if isinstance(age, str): continue", "### Actions ### ############### def replot_graph(self): current_spot = self.results_dialog.sample_tree.current_spot() config", "yerr=errors, linestyle=\"none\", marker='o') axis.text(0.5, 1, string, transform=axis.transAxes, horizontalalignment=\"center\") axis.set_xlabel(\"Scan number\")", "DataKey.AGES not in spot.data[config]: # TODO plot words on graph", "if isinstance(weighted_age, str): string = \"No weighted age\" else: string", "= QVBoxLayout() layout.addWidget(QLabel(\"Sample and spot name\")) layout.addWidget(self._create_age_graph_and_point_selection()) return layout def", "= self.results_dialog.sample_tree.current_spot() config = self.results_dialog.configuration_widget.current_config if config and current_spot: self.plot_cps_graph(current_spot,", "{weighted_age:.0f}, 1σ: {age_st_dev:.0f}\" axis.errorbar(xs, ys, yerr=errors, linestyle=\"none\", marker='o') axis.text(0.5, 1,", "i, age in enumerate(ages): if isinstance(age, str): continue x =", "= \"No weighted age\" else: string = f\"Weighted age: {weighted_age:.0f},", "plt.figure() self.axes = plt.axes() graph_widget, self.canvas = ui_utils.create_figure_widget(fig, self) layout.addWidget(graph_widget)", "self.axes axis.clear() if spot is None: return axis.spines['top'].set_visible(False) axis.spines['right'].set_visible(False) xs", "self.replot_graph()) results_dialog.configuration_changed.connect(self.replot_graph) def _create_widget(self): layout = QVBoxLayout() layout.addWidget(QLabel(\"Sample and spot", "import DataKey from utils import ui_utils class AgeResultsWidget(QWidget): def __init__(self,", "linestyle=\"none\", marker='o') axis.text(0.5, 1, string, transform=axis.transAxes, horizontalalignment=\"center\") axis.set_xlabel(\"Scan number\") axis.set_ylabel(\"Age", "+ 1 y, dy = age xs.append(x) if y is", "replot_graph(self): current_spot = self.results_dialog.sample_tree.current_spot() config = self.results_dialog.configuration_widget.current_config if config and", "weighted age\" else: string = f\"Weighted age: {weighted_age:.0f}, 1σ: {age_st_dev:.0f}\"", "PyQt5.QtCore import Qt from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget,", "config): axis = self.axes axis.clear() if spot is None: return", "plt.axes() graph_widget, self.canvas = ui_utils.create_figure_widget(fig, self) layout.addWidget(graph_widget) graph_and_points.setLayout(layout) return graph_and_points", "############### ### Actions ### ############### def replot_graph(self): current_spot = self.results_dialog.sample_tree.current_spot()", "{age_st_dev:.0f}\" axis.errorbar(xs, ys, yerr=errors, linestyle=\"none\", marker='o') axis.text(0.5, 1, string, transform=axis.transAxes,", "= QHBoxLayout() layout.addLayout(self._create_widget()) self.setLayout(layout) results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i, j: self.replot_graph()) results_dialog.configuration_changed.connect(self.replot_graph) def", "= QWidget() layout = QVBoxLayout() fig = plt.figure() self.axes =", "for i, age in enumerate(ages): if isinstance(age, str): continue x", "results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i, j: self.replot_graph()) results_dialog.configuration_changed.connect(self.replot_graph) def _create_widget(self): layout = QVBoxLayout()", "= f\"Weighted age: {weighted_age:.0f}, 1σ: {age_st_dev:.0f}\" axis.errorbar(xs, ys, yerr=errors, linestyle=\"none\",", "and current_spot: self.plot_cps_graph(current_spot, config) def plot_cps_graph(self, spot, config): axis =", "if spot is None: return axis.spines['top'].set_visible(False) axis.spines['right'].set_visible(False) xs = []", "if config and current_spot: self.plot_cps_graph(current_spot, config) def plot_cps_graph(self, spot, config):", "############### def replot_graph(self): current_spot = self.results_dialog.sample_tree.current_spot() config = self.results_dialog.configuration_widget.current_config if", "= [] if DataKey.AGES not in spot.data[config]: # TODO plot", "plot_cps_graph(self, spot, config): axis = self.axes axis.clear() if spot is", "words on graph return ages = spot.data[config][DataKey.AGES] if len(ages) !=", "layout.addWidget(graph_widget) graph_and_points.setLayout(layout) return graph_and_points ############### ### Actions ### ############### def", "QWidget, QVBoxLayout, QLabel matplotlib.use('QT5Agg') import matplotlib.pyplot as plt from models.data_key", "string = f\"Weighted age: {weighted_age:.0f}, 1σ: {age_st_dev:.0f}\" axis.errorbar(xs, ys, yerr=errors,", "layout = QVBoxLayout() fig = plt.figure() self.axes = plt.axes() graph_widget,", "isinstance(age, str): continue x = i + 1 y, dy", "else: ys.append(y) errors.append(dy) else: # TODO plot some text return", "_create_widget(self): layout = QVBoxLayout() layout.addWidget(QLabel(\"Sample and spot name\")) layout.addWidget(self._create_age_graph_and_point_selection()) return", "string = \"No weighted age\" else: string = f\"Weighted age:", "is None: ys.append(0) errors.append(0) else: ys.append(y) errors.append(dy) else: # TODO", "isinstance(weighted_age, str): string = \"No weighted age\" else: string =", "def plot_cps_graph(self, spot, config): axis = self.axes axis.clear() if spot", "= spot.data[config][DataKey.WEIGHTED_AGE] if isinstance(weighted_age, str): string = \"No weighted age\"", "QVBoxLayout() fig = plt.figure() self.axes = plt.axes() graph_widget, self.canvas =", "errors.append(dy) else: # TODO plot some text return weighted_age, age_st_dev", "QWidget.__init__(self) self.results_dialog = results_dialog layout = QHBoxLayout() layout.addLayout(self._create_widget()) self.setLayout(layout) results_dialog.sample_tree.tree.currentItemChanged.connect(lambda" ]
[ "= LCOE s['COVE'] = COVE s['avoided_emissions'] = avoided_emissions df.to_csv('results_timeseries.csv') s.to_csv('results_values.csv')", "ocaes # ---------------------- # create and run model # ----------------------", "inputs['L_well'] = 50.0 # inputs['X_cmp'] = 0 # inputs['X_exp'] =", "= 0 # inputs['X_exp'] = 0 model = ocaes(data, inputs)", "inputs['X_well'] = 50.0 # inputs['L_well'] = 50.0 # inputs['X_cmp'] =", "s['COVE'] = COVE s['avoided_emissions'] = avoided_emissions df.to_csv('results_timeseries.csv') s.to_csv('results_values.csv') print(model.calculate_LCOE(s)) #", "avoided_emissions df.to_csv('results_timeseries.csv') s.to_csv('results_values.csv') print(model.calculate_LCOE(s)) # ---------------------- # create plots using", "revenue, LCOE, COVE, avoided_emissions = model.post_process(s) s['revenue'] = revenue s['LCOE']", "print(model.calculate_LCOE(s)) # ---------------------- # create plots using built-in functions #", "create and run model # ---------------------- data = pd.read_csv('timeseries_inputs_2019.csv') inputs", "= ocaes.get_default_inputs() # inputs['C_well'] = 5000.0 # inputs['X_well'] = 50.0", "# inputs['C_well'] = 5000.0 # inputs['X_well'] = 50.0 # inputs['L_well']", "inputs['X_exp'] = 0 model = ocaes(data, inputs) df, s =", "ocaes(data, inputs) df, s = model.get_full_results() revenue, LCOE, COVE, avoided_emissions", "s = model.get_full_results() revenue, LCOE, COVE, avoided_emissions = model.post_process(s) s['revenue']", "model.post_process(s) s['revenue'] = revenue s['LCOE'] = LCOE s['COVE'] = COVE", "pd from OCAES import ocaes # ---------------------- # create and", "import pandas as pd from OCAES import ocaes # ----------------------", "inputs['X_cmp'] = 0 # inputs['X_exp'] = 0 model = ocaes(data,", "run model # ---------------------- data = pd.read_csv('timeseries_inputs_2019.csv') inputs = ocaes.get_default_inputs()", "# inputs['X_well'] = 50.0 # inputs['L_well'] = 50.0 # inputs['X_cmp']", "# create and run model # ---------------------- data = pd.read_csv('timeseries_inputs_2019.csv')", "model.get_full_results() revenue, LCOE, COVE, avoided_emissions = model.post_process(s) s['revenue'] = revenue", "s.to_csv('results_values.csv') print(model.calculate_LCOE(s)) # ---------------------- # create plots using built-in functions", "inputs['C_well'] = 5000.0 # inputs['X_well'] = 50.0 # inputs['L_well'] =", "pandas as pd from OCAES import ocaes # ---------------------- #", "50.0 # inputs['L_well'] = 50.0 # inputs['X_cmp'] = 0 #", "0 # inputs['X_exp'] = 0 model = ocaes(data, inputs) df,", "s['revenue'] = revenue s['LCOE'] = LCOE s['COVE'] = COVE s['avoided_emissions']", "= 50.0 # inputs['X_cmp'] = 0 # inputs['X_exp'] = 0", "LCOE s['COVE'] = COVE s['avoided_emissions'] = avoided_emissions df.to_csv('results_timeseries.csv') s.to_csv('results_values.csv') print(model.calculate_LCOE(s))", "0 model = ocaes(data, inputs) df, s = model.get_full_results() revenue,", "= pd.read_csv('timeseries_inputs_2019.csv') inputs = ocaes.get_default_inputs() # inputs['C_well'] = 5000.0 #", "model # ---------------------- data = pd.read_csv('timeseries_inputs_2019.csv') inputs = ocaes.get_default_inputs() #", "# inputs['X_cmp'] = 0 # inputs['X_exp'] = 0 model =", "= model.get_full_results() revenue, LCOE, COVE, avoided_emissions = model.post_process(s) s['revenue'] =", "# inputs['L_well'] = 50.0 # inputs['X_cmp'] = 0 # inputs['X_exp']", "and run model # ---------------------- data = pd.read_csv('timeseries_inputs_2019.csv') inputs =", "as pd from OCAES import ocaes # ---------------------- # create", "= 0 model = ocaes(data, inputs) df, s = model.get_full_results()", "5000.0 # inputs['X_well'] = 50.0 # inputs['L_well'] = 50.0 #", "revenue s['LCOE'] = LCOE s['COVE'] = COVE s['avoided_emissions'] = avoided_emissions", "df, s = model.get_full_results() revenue, LCOE, COVE, avoided_emissions = model.post_process(s)", "s['LCOE'] = LCOE s['COVE'] = COVE s['avoided_emissions'] = avoided_emissions df.to_csv('results_timeseries.csv')", "= 5000.0 # inputs['X_well'] = 50.0 # inputs['L_well'] = 50.0", "inputs) df, s = model.get_full_results() revenue, LCOE, COVE, avoided_emissions =", "# ---------------------- data = pd.read_csv('timeseries_inputs_2019.csv') inputs = ocaes.get_default_inputs() # inputs['C_well']", "from OCAES import ocaes # ---------------------- # create and run", "= 50.0 # inputs['L_well'] = 50.0 # inputs['X_cmp'] = 0", "data = pd.read_csv('timeseries_inputs_2019.csv') inputs = ocaes.get_default_inputs() # inputs['C_well'] = 5000.0", "inputs = ocaes.get_default_inputs() # inputs['C_well'] = 5000.0 # inputs['X_well'] =", "avoided_emissions = model.post_process(s) s['revenue'] = revenue s['LCOE'] = LCOE s['COVE']", "= ocaes(data, inputs) df, s = model.get_full_results() revenue, LCOE, COVE,", "COVE, avoided_emissions = model.post_process(s) s['revenue'] = revenue s['LCOE'] = LCOE", "s['avoided_emissions'] = avoided_emissions df.to_csv('results_timeseries.csv') s.to_csv('results_values.csv') print(model.calculate_LCOE(s)) # ---------------------- # create", "= avoided_emissions df.to_csv('results_timeseries.csv') s.to_csv('results_values.csv') print(model.calculate_LCOE(s)) # ---------------------- # create plots", "df.to_csv('results_timeseries.csv') s.to_csv('results_values.csv') print(model.calculate_LCOE(s)) # ---------------------- # create plots using built-in", "# inputs['X_exp'] = 0 model = ocaes(data, inputs) df, s", "LCOE, COVE, avoided_emissions = model.post_process(s) s['revenue'] = revenue s['LCOE'] =", "= revenue s['LCOE'] = LCOE s['COVE'] = COVE s['avoided_emissions'] =", "---------------------- # create and run model # ---------------------- data =", "pd.read_csv('timeseries_inputs_2019.csv') inputs = ocaes.get_default_inputs() # inputs['C_well'] = 5000.0 # inputs['X_well']", "ocaes.get_default_inputs() # inputs['C_well'] = 5000.0 # inputs['X_well'] = 50.0 #", "COVE s['avoided_emissions'] = avoided_emissions df.to_csv('results_timeseries.csv') s.to_csv('results_values.csv') print(model.calculate_LCOE(s)) # ---------------------- #", "= COVE s['avoided_emissions'] = avoided_emissions df.to_csv('results_timeseries.csv') s.to_csv('results_values.csv') print(model.calculate_LCOE(s)) # ----------------------", "OCAES import ocaes # ---------------------- # create and run model", "50.0 # inputs['X_cmp'] = 0 # inputs['X_exp'] = 0 model", "# ---------------------- # create plots using built-in functions # ----------------------", "# create plots using built-in functions # ---------------------- model.plot_overview() model.plot_power_energy()", "model = ocaes(data, inputs) df, s = model.get_full_results() revenue, LCOE,", "import ocaes # ---------------------- # create and run model #", "---------------------- data = pd.read_csv('timeseries_inputs_2019.csv') inputs = ocaes.get_default_inputs() # inputs['C_well'] =", "# ---------------------- # create and run model # ---------------------- data", "= model.post_process(s) s['revenue'] = revenue s['LCOE'] = LCOE s['COVE'] =", "---------------------- # create plots using built-in functions # ---------------------- model.plot_overview()" ]
[ "use longer buffer and ensure # it's not filled over", "unittest import dace import numpy as np from dace.transformation.dataflow import", "self.assertTrue( np.array_equal(output[:16], np.arange(16, dtype=np.int32))) self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32))) if __name__ ==", ">> out[i] o = i return out class LocalStorageTests(unittest.TestCase): def", "= dace.symbol('N') @dace.program def arange(): out = np.ndarray([N], np.int32) for", "out class LocalStorageTests(unittest.TestCase): def test_even(self): sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage],", "sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [8] }, {}]) self.assertTrue( np.array_equal(sdfg(N=16), np.arange(16,", "sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [8] }, {}])", "i return out class LocalStorageTests(unittest.TestCase): def test_even(self): sdfg = arange.to_sdfg()", "o = i return out class LocalStorageTests(unittest.TestCase): def test_even(self): sdfg", "np.int32) for i in dace.map[0:N]: with dace.tasklet: o >> out[i]", "def test_uneven(self): # For testing uneven decomposition, use longer buffer", "arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [8] }, {}]) self.assertTrue( np.array_equal(sdfg(N=16),", "np.arange(16, dtype=np.int32))) def test_uneven(self): # For testing uneven decomposition, use", "longer buffer and ensure # it's not filled over output", "testing uneven decomposition, use longer buffer and ensure # it's", "filled over output = np.ones(20, np.int32) sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling,", "[8] }, {}]) self.assertTrue( np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32))) def test_uneven(self): #", "[5] }, {}]) dace.propagate_memlets_sdfg(sdfg) sdfg(N=16, __return=output) self.assertTrue( np.array_equal(output[:16], np.arange(16, dtype=np.int32)))", "uneven decomposition, use longer buffer and ensure # it's not", "# it's not filled over output = np.ones(20, np.int32) sdfg", "# For testing uneven decomposition, use longer buffer and ensure", "it's not filled over output = np.ones(20, np.int32) sdfg =", "with dace.tasklet: o >> out[i] o = i return out", "import dace import numpy as np from dace.transformation.dataflow import MapTiling,", "= arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [8] }, {}]) self.assertTrue(", "sdfg(N=16, __return=output) self.assertTrue( np.array_equal(output[:16], np.arange(16, dtype=np.int32))) self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32))) if", "class LocalStorageTests(unittest.TestCase): def test_even(self): sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{", "dace.tasklet: o >> out[i] o = i return out class", "OutLocalStorage], options=[{ 'tile_sizes': [8] }, {}]) self.assertTrue( np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32)))", "def arange(): out = np.ndarray([N], np.int32) for i in dace.map[0:N]:", "{}]) self.assertTrue( np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32))) def test_uneven(self): # For testing", "self.assertTrue( np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32))) def test_uneven(self): # For testing uneven", "buffer and ensure # it's not filled over output =", "N = dace.symbol('N') @dace.program def arange(): out = np.ndarray([N], np.int32)", "from dace.transformation.dataflow import MapTiling, OutLocalStorage N = dace.symbol('N') @dace.program def", "= np.ndarray([N], np.int32) for i in dace.map[0:N]: with dace.tasklet: o", "dace.transformation.dataflow import MapTiling, OutLocalStorage N = dace.symbol('N') @dace.program def arange():", "{}]) dace.propagate_memlets_sdfg(sdfg) sdfg(N=16, __return=output) self.assertTrue( np.array_equal(output[:16], np.arange(16, dtype=np.int32))) self.assertTrue(np.array_equal(output[16:], np.ones(4,", "np.int32) sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [5] },", "def test_even(self): sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [8]", "= np.ones(20, np.int32) sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes':", "np.array_equal(output[:16], np.arange(16, dtype=np.int32))) self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32))) if __name__ == '__main__':", "MapTiling, OutLocalStorage N = dace.symbol('N') @dace.program def arange(): out =", "'tile_sizes': [8] }, {}]) self.assertTrue( np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32))) def test_uneven(self):", "as np from dace.transformation.dataflow import MapTiling, OutLocalStorage N = dace.symbol('N')", "dtype=np.int32))) def test_uneven(self): # For testing uneven decomposition, use longer", "}, {}]) self.assertTrue( np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32))) def test_uneven(self): # For", "OutLocalStorage], options=[{ 'tile_sizes': [5] }, {}]) dace.propagate_memlets_sdfg(sdfg) sdfg(N=16, __return=output) self.assertTrue(", "decomposition, use longer buffer and ensure # it's not filled", "arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [5] }, {}]) dace.propagate_memlets_sdfg(sdfg) sdfg(N=16,", "o >> out[i] o = i return out class LocalStorageTests(unittest.TestCase):", "for i in dace.map[0:N]: with dace.tasklet: o >> out[i] o", "out[i] o = i return out class LocalStorageTests(unittest.TestCase): def test_even(self):", "For testing uneven decomposition, use longer buffer and ensure #", "ensure # it's not filled over output = np.ones(20, np.int32)", "np.arange(16, dtype=np.int32))) self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32))) if __name__ == '__main__': unittest.main()", "sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [5] }, {}]) dace.propagate_memlets_sdfg(sdfg) sdfg(N=16, __return=output)", "import unittest import dace import numpy as np from dace.transformation.dataflow", "dace.map[0:N]: with dace.tasklet: o >> out[i] o = i return", "options=[{ 'tile_sizes': [8] }, {}]) self.assertTrue( np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32))) def", "dace import numpy as np from dace.transformation.dataflow import MapTiling, OutLocalStorage", "'tile_sizes': [5] }, {}]) dace.propagate_memlets_sdfg(sdfg) sdfg(N=16, __return=output) self.assertTrue( np.array_equal(output[:16], np.arange(16,", "= i return out class LocalStorageTests(unittest.TestCase): def test_even(self): sdfg =", "test_uneven(self): # For testing uneven decomposition, use longer buffer and", "and ensure # it's not filled over output = np.ones(20,", "OutLocalStorage N = dace.symbol('N') @dace.program def arange(): out = np.ndarray([N],", "= arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [5] }, {}]) dace.propagate_memlets_sdfg(sdfg)", "out = np.ndarray([N], np.int32) for i in dace.map[0:N]: with dace.tasklet:", "numpy as np from dace.transformation.dataflow import MapTiling, OutLocalStorage N =", "np from dace.transformation.dataflow import MapTiling, OutLocalStorage N = dace.symbol('N') @dace.program", "output = np.ones(20, np.int32) sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{", "import MapTiling, OutLocalStorage N = dace.symbol('N') @dace.program def arange(): out", "@dace.program def arange(): out = np.ndarray([N], np.int32) for i in", "sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [5] }, {}])", "return out class LocalStorageTests(unittest.TestCase): def test_even(self): sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling,", "in dace.map[0:N]: with dace.tasklet: o >> out[i] o = i", "arange(): out = np.ndarray([N], np.int32) for i in dace.map[0:N]: with", "LocalStorageTests(unittest.TestCase): def test_even(self): sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes':", "import numpy as np from dace.transformation.dataflow import MapTiling, OutLocalStorage N", "not filled over output = np.ones(20, np.int32) sdfg = arange.to_sdfg()", "over output = np.ones(20, np.int32) sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage],", "dace.symbol('N') @dace.program def arange(): out = np.ndarray([N], np.int32) for i", "np.ndarray([N], np.int32) for i in dace.map[0:N]: with dace.tasklet: o >>", "np.ones(20, np.int32) sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [5]", "test_even(self): sdfg = arange.to_sdfg() sdfg.apply_transformations([MapTiling, OutLocalStorage], options=[{ 'tile_sizes': [8] },", "__return=output) self.assertTrue( np.array_equal(output[:16], np.arange(16, dtype=np.int32))) self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32))) if __name__", "dace.propagate_memlets_sdfg(sdfg) sdfg(N=16, __return=output) self.assertTrue( np.array_equal(output[:16], np.arange(16, dtype=np.int32))) self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32)))", "}, {}]) dace.propagate_memlets_sdfg(sdfg) sdfg(N=16, __return=output) self.assertTrue( np.array_equal(output[:16], np.arange(16, dtype=np.int32))) self.assertTrue(np.array_equal(output[16:],", "options=[{ 'tile_sizes': [5] }, {}]) dace.propagate_memlets_sdfg(sdfg) sdfg(N=16, __return=output) self.assertTrue( np.array_equal(output[:16],", "i in dace.map[0:N]: with dace.tasklet: o >> out[i] o =", "np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32))) def test_uneven(self): # For testing uneven decomposition," ]
[ "object The file to which the header and data will", "exception is raised. \"\"\" size = self._ffo.tell() - self._data_offset if", "full so pad the data to the next FITS block", "amount of data specified in the header provided to the", "the end of the file. If the file does not", "to the stream. If the provided data would cause the", "self._ffo = _File(name, 'append') # TODO : Fix this once", "hdulist.writeto(name, 'exception') else: # This will not be the first", "# This will not be the first extension in the", "is not a Primary header, a default Primary HDU will", "to an image extension header and appended to the end", "the file does not exist and the provided header is", "naxis = self._header.get('NAXIS', 0) if naxis > 0: simple =", "PrimaryHDU to the file before writing the # given header.", ": Fix this once the HDU writing API is cleaned", "Parameters ---------- name : file path, file object, or file", "header represents a Primary header, the header will be modified", "= '' else: dim = str(dim) self._header.set('PCOUNT', 0, 'number of", "self._header: self._header.set('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo = _File(name,", "the header provided to the class constructor may be written", "file before writing the # given header. newfile = False", "header appended to the end of the file. If the", "not match what is expected by the header, a `TypeError`", "----- Only the amount of data specified in the header", "be written to the stream. If the provided data would", "data self._ffo.writearray(output) if self._ffo.tell() - self._data_offset == self._size: # the", "the cards you need in the header: header[key] = (value,", "The file to which the header and data will be", "arrays before writing output = data.byteswap() else: output = data", "mode such as 'wb' or 'ab+'. header : `Header` instance", "the size (in bytes) of the data portion of the", "= True self._ffo.flush() return self.writecomplete @property def size(self): \"\"\" Return", "self._size != 0: self.writecomplete = False else: self.writecomplete = True", "that all of the required data has been written to", "# Licensed under a 3-clause BSD style license - see", "specified in the header provided to the class constructor may", "True elif (hasattr(name, 'len') and name.len == 0): newfile =", "# the stream is full so pad the data to", "so pad the data to the next FITS block self._ffo.write(_pad_length(self._size)", "name filename = fileobj_name(name) or '' # Check if the", "with a Primary Header. If not we will need #", "the file does not already exist, it will be created,", "The file will be opened and the header appended to", "the header, the stream is padded to fill a complete", "be added as the first extension. If the file does", "the header and data will be streamed. If opened, the", "and data will be streamed. If opened, the file object", "to all be written at once. The following pseudocode illustrates", "- see PYFITS.rst import gzip import os from .base import", "cleaned up tmp_hdu = _BaseHDU() # Passing self._header as an", "modified in undesired ways...need to have a better way #", "not written. Once sufficient data has been written to the", "An attempt to write more data after the stream has", "after the stream has been filled will raise an `OSError`", "self._size: # the stream is full so pad the data", "parameters', after='NAXIS' + dim) if 'GCOUNT' not in self._header: self._header.set('GCOUNT',", "header and appended to the end of the file. \"\"\"", "data has been written to the stream to satisfy the", "------- writecomplete : int Flag that when `True` indicates that", "type, value, traceback): self.close() def write(self, data): \"\"\" Write the", "once. The following pseudocode illustrates its use:: header = astropy.io.fits.Header()", "self._header: self._header.set('XTENSION', 'IMAGE', 'Image extension', after='SIMPLE') del self._header['SIMPLE'] if 'PCOUNT'", "path, file object, or file like object The file to", "a header. Parameters ---------- name : file path, file object,", "to fill a complete FITS block and no more data", "If the file does not already exist, it will be", "# handle a file object instead of a file name", "stream to the file. Returns ------- writecomplete : int Flag", "be modified to an image extension header and appended to", "beginning of the file and the provided header will be", "its use:: header = astropy.io.fits.Header() for all the cards you", "groups = 1 else: groups = 0 size = 1", "it will be written to the beginning of the file.", "traceback): self.close() def write(self, data): \"\"\" Write the given data", "more data after the stream has been filled will raise", "self._header: dim = self._header['NAXIS'] if dim == 0: dim =", "os.path.getsize(filename) == 0: newfile = True elif (hasattr(name, 'len') and", "capability to stream data to a FITS file instead of", "file like object The file to which the header and", "at the beginning of the file and the provided header", "header will be added as the first extension. If the", "__enter__(self): return self def __exit__(self, type, value, traceback): self.close() def", "input data does not match what is expected by the", "simple == 'T' and random_groups == 'T': groups = 1", "no more data will be accepted. An attempt to write", "little endian arrays before writing output = data.byteswap() else: output", "fileobj_name(name) or '' # Check if the file already exists.", "# Support the 'with' statement def __enter__(self): return self def", "= self._header.get('NAXIS', 0) if naxis > 0: simple = self._header.get('SIMPLE',", "header : `Header` instance The header object associated with the", "`True` indicates that all of the required data has been", "Return the size (in bytes) of the data portion of", "be modified in undesired ways...need to have a better way", "`OSError` exception is raised and the data is not written.", "the HDU. \"\"\" size = 0 naxis = self._header.get('NAXIS', 0)", "it does not, check to see # if we were", "self._header['NAXIS' + str(idx + 1)] bitpix = self._header['BITPIX'] gcount =", "the header represents a Primary header, it will be written", "Support the 'with' statement def __enter__(self): return self def __exit__(self,", "(pcount + size) // 8 return size def close(self): \"\"\"", "streamed. If opened, the file object must be opened in", "size = 0 naxis = self._header.get('NAXIS', 0) if naxis >", ": `Header` instance The header object associated with the data", "header and data will be streamed. If opened, the file", "data to be written to the file. Notes ----- The", "inserted at the beginning of the file and the provided", "size def close(self): \"\"\" Close the physical FITS file. \"\"\"", "FITS file instead of requiring data to all be written", "else: dim = str(dim) self._header.set('PCOUNT', 0, 'number of parameters', after='NAXIS'", "would cause the stream to overflow, an `OSError` exception is", "and random_groups == 'T': groups = 1 else: groups =", "<gh_stars>100-1000 # Licensed under a 3-clause BSD style license -", "header: header[key] = (value, comment) shdu = astropy.io.fits.StreamingHDU('filename.fits', header) for", "the file before writing the # given header. newfile =", "+ dim) if 'GCOUNT' not in self._header: self._header.set('GCOUNT', 1, 'number", "def close(self): \"\"\" Close the physical FITS file. \"\"\" self._ffo.close()", "name.len == 0): newfile = True if newfile: if 'SIMPLE'", "file object, or file like object The file to which", "raised and the data is not written. Once sufficient data", "default Primary HDU will be inserted at the beginning of", "'SIMPLE' in self._header: self._header.set('XTENSION', 'IMAGE', 'Image extension', after='SIMPLE') del self._header['SIMPLE']", "the stream is padded to fill a complete FITS block", "\"\"\" Construct a `StreamingHDU` object given a file name and", "from .base import _BaseHDU, BITPIX2DTYPE from .hdulist import HDUList from", "If the provided data would cause the stream to overflow,", "data.dtype.name: raise TypeError('Supplied data does not match the type specified", "specified ' 'in the header.') if data.dtype.str[0] != '>': #", "specified in the header, the stream is padded to fill", "@property def size(self): \"\"\" Return the size (in bytes) of", "portion of the HDU. \"\"\" size = 0 naxis =", "+ str(idx + 1)] bitpix = self._header['BITPIX'] gcount = self._header.get('GCOUNT',", "type specified ' 'in the header.') if data.dtype.str[0] != '>':", "amount specified in the header, the stream is padded to", "the class constructor may be written to the stream. If", "padded to fill a complete FITS block and no more", "ways...need to have a better way # of doing this", "to the stream. Parameters ---------- data : ndarray Data to", "header is not a Primary header, a default Primary HDU", "_pad_length from astropy.io.fits.util import fileobj_name class StreamingHDU: \"\"\" A class", "appended to the end of the file. If the file", "the stream. Parameters ---------- data : ndarray Data to stream", "# byteswap little endian arrays before writing output = data.byteswap()", "will need # to prepend a default PrimaryHDU to the", "random_groups = self._header.get('GROUPS', 'F') if simple == 'T' and random_groups", "into an image # extension header. if 'SIMPLE' in self._header:", "size (in bytes) of the data portion of the HDU.", "!= 0: self.writecomplete = False else: self.writecomplete = True #", "already exist, it will be created, and if the header", "will be accepted. An attempt to write more data after", "write more data after the stream has been filled will", "self._header.get('GCOUNT', 1) pcount = self._header.get('PCOUNT', 0) size = abs(bitpix) *", "not in self._header: hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: #", "to overflow, an `OSError` exception is raised and the data", "to be modified in undesired ways...need to have a better", "the stream. If the provided data would cause the stream", "self._header['NAXIS'] if dim == 0: dim = '' else: dim", "= data.byteswap() else: output = data self._ffo.writearray(output) if self._ffo.tell() -", "a Primary header, a default Primary HDU will be inserted", "file path, file object, or file like object The file", "to write more data to the stream than the '", "for idx in range(groups, naxis): size = size * self._header['NAXIS'", "gzip import os from .base import _BaseHDU, BITPIX2DTYPE from .hdulist", "HDU. \"\"\" size = 0 naxis = self._header.get('NAXIS', 0) if", "a complete FITS block and no more data will be", "str(dim) self._header.set('PCOUNT', 0, 'number of parameters', after='NAXIS' + dim) if", "abs(bitpix) * gcount * (pcount + size) // 8 return", "1) pcount = self._header.get('PCOUNT', 0) size = abs(bitpix) * gcount", "a Primary header, the header will be modified to an", "the provided header is not a Primary header, a default", "os.path.exists(filename) or os.path.getsize(filename) == 0: newfile = True elif (hasattr(name,", "is not written. Once sufficient data has been written to", "the file. Notes ----- The file will be opened and", "be opened and the header appended to the end of", "provided header is not a Primary header, a default Primary", "# given header. newfile = False if filename: if not", "if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name: raise TypeError('Supplied data does not match", "to the next FITS block self._ffo.write(_pad_length(self._size) * '\\0') self.writecomplete =", "a default PrimaryHDU to the file before writing the #", "---------- name : file path, file object, or file like", "file does not exist and the provided header is not", "to the class constructor may be written to the stream.", "stream to overflow, an `OSError` exception is raised and the", "API is cleaned up tmp_hdu = _BaseHDU() # Passing self._header", "header, a default Primary HDU will be inserted at the", "self._ffo.write(_pad_length(self._size) * '\\0') self.writecomplete = True self._ffo.flush() return self.writecomplete @property", "the capability to stream data to a FITS file instead", "of doing this tmp_hdu._header = self._header self._header_offset = tmp_hdu._writeheader(self._ffo)[0] self._data_offset", "name, header): \"\"\" Construct a `StreamingHDU` object given a file", "before writing output = data.byteswap() else: output = data self._ffo.writearray(output)", "the first extension. If the file does already exist, but", "will be created, and if the header represents a Primary", "\"\"\" size = self._ffo.tell() - self._data_offset if self.writecomplete or size", "specified.') if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name: raise TypeError('Supplied data does not", "constructor may be written to the stream. If the provided", "astropy.io.fits.StreamingHDU('filename.fits', header) for each piece of data: shdu.write(data) shdu.close() \"\"\"", "has been filled will raise an `OSError` exception. If the", "in self._header: hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: # This", "size * self._header['NAXIS' + str(idx + 1)] bitpix = self._header['BITPIX']", "a `StreamingHDU` object given a file name and a header.", "del self._header['SIMPLE'] if 'PCOUNT' not in self._header: dim = self._header['NAXIS']", "import fileobj_name class StreamingHDU: \"\"\" A class that provides the", "data to the next FITS block self._ffo.write(_pad_length(self._size) * '\\0') self.writecomplete", "else: self.writecomplete = True # Support the 'with' statement def", "provided data would cause the stream to overflow, an `OSError`", "'PCOUNT' not in self._header: dim = self._header['NAXIS'] if dim ==", "the input data does not match what is expected by", "following pseudocode illustrates its use:: header = astropy.io.fits.Header() for all", "size = abs(bitpix) * gcount * (pcount + size) //", "the data is not written. Once sufficient data has been", "header[key] = (value, comment) shdu = astropy.io.fits.StreamingHDU('filename.fits', header) for each", "size = 1 for idx in range(groups, naxis): size =", "the file. If the file does not already exist, it", "if not os.path.exists(filename) or os.path.getsize(filename) == 0: newfile = True", "self._header as an argument to _BaseHDU() will cause its #", "import os from .base import _BaseHDU, BITPIX2DTYPE from .hdulist import", "than the ' 'header specified.') if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name: raise", "'append') # TODO : Fix this once the HDU writing", "be written to the beginning of the file. If the", "self._ffo.tell() self._size = self.size if self._size != 0: self.writecomplete =", "astropy.io.fits.Header() for all the cards you need in the header:", "Header. If not we will need # to prepend a", "True if newfile: if 'SIMPLE' not in self._header: hdulist =", "writing API is cleaned up tmp_hdu = _BaseHDU() # Passing", "the 'with' statement def __enter__(self): return self def __exit__(self, type,", "and the provided header is not a Primary header, a", "the ' 'header specified.') if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name: raise TypeError('Supplied", "attempt to write more data after the stream has been", "_BaseHDU, BITPIX2DTYPE from .hdulist import HDUList from .image import PrimaryHDU", "Licensed under a 3-clause BSD style license - see PYFITS.rst", "If opened, the file object must be opened in a", "file. Returns ------- writecomplete : int Flag that when `True`", "(value, comment) shdu = astropy.io.fits.StreamingHDU('filename.fits', header) for each piece of", "import PrimaryHDU from astropy.io.fits.file import _File from astropy.io.fits.header import _pad_length", "- self._data_offset if self.writecomplete or size + data.nbytes > self._size:", "TypeError('StreamingHDU not supported for GzipFile objects.') self._header = header.copy() #", "change the Primary header provided into an image # extension", "comment) shdu = astropy.io.fits.StreamingHDU('filename.fits', header) for each piece of data:", "= 1 for idx in range(groups, naxis): size = size", "does not already exist, it will be created, and if", "end of the file. If the file does not already", "argument to _BaseHDU() will cause its # values to be", "'header specified.') if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name: raise TypeError('Supplied data does", "gzip.GzipFile): raise TypeError('StreamingHDU not supported for GzipFile objects.') self._header =", "as 'wb' or 'ab+'. header : `Header` instance The header", "under a 3-clause BSD style license - see PYFITS.rst import", "as the first extension. If the file does already exist,", "data does not match what is expected by the header,", "newfile: if 'SIMPLE' not in self._header: hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name,", "GzipFile objects.') self._header = header.copy() # handle a file object", "if filename: if not os.path.exists(filename) or os.path.getsize(filename) == 0: newfile", "raised. \"\"\" size = self._ffo.tell() - self._data_offset if self.writecomplete or", "at once. The following pseudocode illustrates its use:: header =", "the file and the provided header will be added as", "complete FITS block and no more data will be accepted.", ".image import PrimaryHDU from astropy.io.fits.file import _File from astropy.io.fits.header import", "PrimaryHDU from astropy.io.fits.file import _File from astropy.io.fits.header import _pad_length from", "provided header will be added as the first extension. If", "we will need # to prepend a default PrimaryHDU to", "of the required data has been written to the stream.", "= False if filename: if not os.path.exists(filename) or os.path.getsize(filename) ==", "self._header.get('NAXIS', 0) if naxis > 0: simple = self._header.get('SIMPLE', 'F')", "the first extension in the file so we # must", "required data has been written to the stream. Notes -----", "stream. Notes ----- Only the amount of data specified in", "satisfy the amount specified in the header, the stream is", "header, the header will be modified to an image extension", "\"\"\" A class that provides the capability to stream data", "it will be created, and if the header represents a", "is cleaned up tmp_hdu = _BaseHDU() # Passing self._header as", "# Passing self._header as an argument to _BaseHDU() will cause", "shdu.close() \"\"\" def __init__(self, name, header): \"\"\" Construct a `StreamingHDU`", "data will be accepted. An attempt to write more data", "'in the header.') if data.dtype.str[0] != '>': # byteswap little", "int Flag that when `True` indicates that all of the", "to the stream than the ' 'header specified.') if BITPIX2DTYPE[self._header['BITPIX']]", "' 'header specified.') if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name: raise TypeError('Supplied data", "# of doing this tmp_hdu._header = self._header self._header_offset = tmp_hdu._writeheader(self._ffo)[0]", "added as the first extension. If the file does already", "if isinstance(name, gzip.GzipFile): raise TypeError('StreamingHDU not supported for GzipFile objects.')", "from .hdulist import HDUList from .image import PrimaryHDU from astropy.io.fits.file", "match what is expected by the header, a `TypeError` exception", "bytes) of the data portion of the HDU. \"\"\" size", "instead of requiring data to all be written at once.", "and a header. Parameters ---------- name : file path, file", "not already exist, it will be created, and if the", "file object must be opened in a writeable binary mode", "else: output = data self._ffo.writearray(output) if self._ffo.tell() - self._data_offset ==", "= abs(bitpix) * gcount * (pcount + size) // 8", "does not exist and the provided header is not a", "is raised. \"\"\" size = self._ffo.tell() - self._data_offset if self.writecomplete", "_File(name, 'append') # TODO : Fix this once the HDU", "else: # This will not be the first extension in", "naxis): size = size * self._header['NAXIS' + str(idx + 1)]", "from astropy.io.fits.header import _pad_length from astropy.io.fits.util import fileobj_name class StreamingHDU:", "to the file before writing the # given header. newfile", "once the HDU writing API is cleaned up tmp_hdu =", "were provided with a Primary Header. If not we will", "from .image import PrimaryHDU from astropy.io.fits.file import _File from astropy.io.fits.header", "is raised and the data is not written. Once sufficient", "does not match what is expected by the header, a", "dim = self._header['NAXIS'] if dim == 0: dim = ''", "a file name and a header. Parameters ---------- name :", "'>': # byteswap little endian arrays before writing output =", "exist and the provided header is not a Primary header,", "extension in the file so we # must change the", "= astropy.io.fits.Header() for all the cards you need in the", "self.writecomplete = True # Support the 'with' statement def __enter__(self):", "self._header['BITPIX'] gcount = self._header.get('GCOUNT', 1) pcount = self._header.get('PCOUNT', 0) size", "to the stream to satisfy the amount specified in the", "does already exist, but the provided header represents a Primary", "not exist and the provided header is not a Primary", "of the file and the provided header will be added", "1, 'number of groups', after='PCOUNT') self._ffo = _File(name, 'append') #", "an `OSError` exception. If the dtype of the input data", "If it does not, check to see # if we", "provided with a Primary Header. If not we will need", "> self._size: raise OSError('Attempt to write more data to the", "pcount = self._header.get('PCOUNT', 0) size = abs(bitpix) * gcount *", "to the stream. Notes ----- Only the amount of data", "if dim == 0: dim = '' else: dim =", "writeable binary mode such as 'wb' or 'ab+'. header :", "stream. Parameters ---------- data : ndarray Data to stream to", "header object associated with the data to be written to", "_File from astropy.io.fits.header import _pad_length from astropy.io.fits.util import fileobj_name class", "self._header.set('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo = _File(name, 'append')", "is padded to fill a complete FITS block and no", "written to the stream to satisfy the amount specified in", "'SIMPLE' not in self._header: hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else:", "data does not match the type specified ' 'in the", "`Header` instance The header object associated with the data to", "to prepend a default PrimaryHDU to the file before writing", "tmp_hdu._writeheader(self._ffo)[0] self._data_offset = self._ffo.tell() self._size = self.size if self._size !=", "all be written at once. The following pseudocode illustrates its", "data.dtype.str[0] != '>': # byteswap little endian arrays before writing", "size) // 8 return size def close(self): \"\"\" Close the", "class that provides the capability to stream data to a", "not os.path.exists(filename) or os.path.getsize(filename) == 0: newfile = True elif", "file. \"\"\" if isinstance(name, gzip.GzipFile): raise TypeError('StreamingHDU not supported for", "Notes ----- The file will be opened and the header", "filename: if not os.path.exists(filename) or os.path.getsize(filename) == 0: newfile =", "file name and a header. Parameters ---------- name : file", "'' # Check if the file already exists. If it", "stream. If the provided data would cause the stream to", "the amount specified in the header, the stream is padded", "of the file. If the file does not exist and", "def size(self): \"\"\" Return the size (in bytes) of the", "such as 'wb' or 'ab+'. header : `Header` instance The", "= header.copy() # handle a file object instead of a", "+ size) // 8 return size def close(self): \"\"\" Close", "PYFITS.rst import gzip import os from .base import _BaseHDU, BITPIX2DTYPE", "to have a better way # of doing this tmp_hdu._header", "need in the header: header[key] = (value, comment) shdu =", "writecomplete : int Flag that when `True` indicates that all", "more data to the stream than the ' 'header specified.')", "the required data has been written to the stream. Notes", "the type specified ' 'in the header.') if data.dtype.str[0] !=", "Once sufficient data has been written to the stream to", "= (value, comment) shdu = astropy.io.fits.StreamingHDU('filename.fits', header) for each piece", "'\\0') self.writecomplete = True self._ffo.flush() return self.writecomplete @property def size(self):", "* '\\0') self.writecomplete = True self._ffo.flush() return self.writecomplete @property def", "* (pcount + size) // 8 return size def close(self):", "be created, and if the header represents a Primary header,", "---------- data : ndarray Data to stream to the file.", "cause the stream to overflow, an `OSError` exception is raised", "output = data self._ffo.writearray(output) if self._ffo.tell() - self._data_offset == self._size:", "import gzip import os from .base import _BaseHDU, BITPIX2DTYPE from", "Write the given data to the stream. Parameters ---------- data", "groups', after='PCOUNT') self._ffo = _File(name, 'append') # TODO : Fix", "all the cards you need in the header: header[key] =", "self._data_offset if self.writecomplete or size + data.nbytes > self._size: raise", "after='NAXIS' + dim) if 'GCOUNT' not in self._header: self._header.set('GCOUNT', 1,", "an argument to _BaseHDU() will cause its # values to", "data to all be written at once. The following pseudocode", "in undesired ways...need to have a better way # of", "def __exit__(self, type, value, traceback): self.close() def write(self, data): \"\"\"", "by the header, a `TypeError` exception is raised. \"\"\" size", "been written to the stream. Notes ----- Only the amount", "Primary header, it will be written to the beginning of", "file. Notes ----- The file will be opened and the", "will be modified to an image extension header and appended", "naxis > 0: simple = self._header.get('SIMPLE', 'F') random_groups = self._header.get('GROUPS',", "BITPIX2DTYPE from .hdulist import HDUList from .image import PrimaryHDU from", "= 1 else: groups = 0 size = 1 for", "0, 'number of parameters', after='NAXIS' + dim) if 'GCOUNT' not", "\"\"\" def __init__(self, name, header): \"\"\" Construct a `StreamingHDU` object", "if the header represents a Primary header, it will be", "each piece of data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name,", ".base import _BaseHDU, BITPIX2DTYPE from .hdulist import HDUList from .image", "= _File(name, 'append') # TODO : Fix this once the", "!= data.dtype.name: raise TypeError('Supplied data does not match the type", "see # if we were provided with a Primary Header.", "data to the stream than the ' 'header specified.') if", "// 8 return size def close(self): \"\"\" Close the physical", "Check if the file already exists. If it does not,", "self._header.set('XTENSION', 'IMAGE', 'Image extension', after='SIMPLE') del self._header['SIMPLE'] if 'PCOUNT' not", "= self._header self._header_offset = tmp_hdu._writeheader(self._ffo)[0] self._data_offset = self._ffo.tell() self._size =", "= str(dim) self._header.set('PCOUNT', 0, 'number of parameters', after='NAXIS' + dim)", "what is expected by the header, a `TypeError` exception is", "self.writecomplete = False else: self.writecomplete = True # Support the", "'number of groups', after='PCOUNT') self._ffo = _File(name, 'append') # TODO", "already exists. If it does not, check to see #", "'T' and random_groups == 'T': groups = 1 else: groups", "self._header_offset = tmp_hdu._writeheader(self._ffo)[0] self._data_offset = self._ffo.tell() self._size = self.size if", "be the first extension in the file so we #", "like object The file to which the header and data", "if self._size != 0: self.writecomplete = False else: self.writecomplete =", "stream than the ' 'header specified.') if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name:", "import _pad_length from astropy.io.fits.util import fileobj_name class StreamingHDU: \"\"\" A", "check to see # if we were provided with a", "self.size if self._size != 0: self.writecomplete = False else: self.writecomplete", "endian arrays before writing output = data.byteswap() else: output =", "be written at once. The following pseudocode illustrates its use::", "or os.path.getsize(filename) == 0: newfile = True elif (hasattr(name, 'len')", "will be streamed. If opened, the file object must be", "The header object associated with the data to be written", "provides the capability to stream data to a FITS file", "the header: header[key] = (value, comment) shdu = astropy.io.fits.StreamingHDU('filename.fits', header)", "'' else: dim = str(dim) self._header.set('PCOUNT', 0, 'number of parameters',", "illustrates its use:: header = astropy.io.fits.Header() for all the cards", "0): newfile = True if newfile: if 'SIMPLE' not in", "of parameters', after='NAXIS' + dim) if 'GCOUNT' not in self._header:", "= self._ffo.tell() self._size = self.size if self._size != 0: self.writecomplete", "Passing self._header as an argument to _BaseHDU() will cause its", "= fileobj_name(name) or '' # Check if the file already", "pseudocode illustrates its use:: header = astropy.io.fits.Header() for all the", "file will be opened and the header appended to the", "the HDU writing API is cleaned up tmp_hdu = _BaseHDU()", "raise OSError('Attempt to write more data to the stream than", "Parameters ---------- data : ndarray Data to stream to the", "given data to the stream. Parameters ---------- data : ndarray", "shdu = astropy.io.fits.StreamingHDU('filename.fits', header) for each piece of data: shdu.write(data)", "written to the file. Notes ----- The file will be", "data after the stream has been filled will raise an", "if 'SIMPLE' not in self._header: hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception')", "to the file. Returns ------- writecomplete : int Flag that", "If the dtype of the input data does not match", "a file name filename = fileobj_name(name) or '' # Check", "this once the HDU writing API is cleaned up tmp_hdu", "in the header: header[key] = (value, comment) shdu = astropy.io.fits.StreamingHDU('filename.fits',", "header.copy() # handle a file object instead of a file", "HDU writing API is cleaned up tmp_hdu = _BaseHDU() #", "the stream to overflow, an `OSError` exception is raised and", "if self._ffo.tell() - self._data_offset == self._size: # the stream is", "== 0: newfile = True elif (hasattr(name, 'len') and name.len", "class StreamingHDU: \"\"\" A class that provides the capability to", "first extension in the file so we # must change", "as an argument to _BaseHDU() will cause its # values", "if naxis > 0: simple = self._header.get('SIMPLE', 'F') random_groups =", "os from .base import _BaseHDU, BITPIX2DTYPE from .hdulist import HDUList", "prepend a default PrimaryHDU to the file before writing the", "for all the cards you need in the header: header[key]", "tmp_hdu = _BaseHDU() # Passing self._header as an argument to", "HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: # This will not be the", "the file. If the file does not exist and the", "# values to be modified in undesired ways...need to have", "self._header.get('SIMPLE', 'F') random_groups = self._header.get('GROUPS', 'F') if simple == 'T'", "must be opened in a writeable binary mode such as", "not, check to see # if we were provided with", "Only the amount of data specified in the header provided", "self._header.set('PCOUNT', 0, 'number of parameters', after='NAXIS' + dim) if 'GCOUNT'", "0) size = abs(bitpix) * gcount * (pcount + size)", "we # must change the Primary header provided into an", "False else: self.writecomplete = True # Support the 'with' statement", "hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: # This will not", "of data specified in the header provided to the class", "its # values to be modified in undesired ways...need to", "\"\"\" Return the size (in bytes) of the data portion", ": file path, file object, or file like object The", "and the provided header will be added as the first", "object, or file like object The file to which the", "True # Support the 'with' statement def __enter__(self): return self", "been filled will raise an `OSError` exception. If the dtype", "not in self._header: dim = self._header['NAXIS'] if dim == 0:", "newfile = True if newfile: if 'SIMPLE' not in self._header:", "exception is raised and the data is not written. Once", "to the end of the file. \"\"\" if isinstance(name, gzip.GzipFile):", "of data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header): \"\"\"", "of groups', after='PCOUNT') self._ffo = _File(name, 'append') # TODO :", "cards you need in the header: header[key] = (value, comment)", "\"\"\" Write the given data to the stream. Parameters ----------", "provided into an image # extension header. if 'SIMPLE' in", "style license - see PYFITS.rst import gzip import os from", "not supported for GzipFile objects.') self._header = header.copy() # handle", "elif (hasattr(name, 'len') and name.len == 0): newfile = True", "# must change the Primary header provided into an image", "dim = str(dim) self._header.set('PCOUNT', 0, 'number of parameters', after='NAXIS' +", "data would cause the stream to overflow, an `OSError` exception", "file does already exist, but the provided header represents a", "in the header, the stream is padded to fill a", "pad the data to the next FITS block self._ffo.write(_pad_length(self._size) *", "fileobj_name class StreamingHDU: \"\"\" A class that provides the capability", "writing the # given header. newfile = False if filename:", "8 return size def close(self): \"\"\" Close the physical FITS", "the amount of data specified in the header provided to", "self def __exit__(self, type, value, traceback): self.close() def write(self, data):", "= 0 size = 1 for idx in range(groups, naxis):", "True self._ffo.flush() return self.writecomplete @property def size(self): \"\"\" Return the", "data is not written. Once sufficient data has been written", "dtype of the input data does not match what is", "header, the stream is padded to fill a complete FITS", "in self._header: self._header.set('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo =", "data to a FITS file instead of requiring data to", "to be written to the file. Notes ----- The file", "header. if 'SIMPLE' in self._header: self._header.set('XTENSION', 'IMAGE', 'Image extension', after='SIMPLE')", "Fix this once the HDU writing API is cleaned up", "the stream to satisfy the amount specified in the header,", "- self._data_offset == self._size: # the stream is full so", "else: groups = 0 size = 1 for idx in", "# TODO : Fix this once the HDU writing API", "to satisfy the amount specified in the header, the stream", "or 'ab+'. header : `Header` instance The header object associated", "of the file. \"\"\" if isinstance(name, gzip.GzipFile): raise TypeError('StreamingHDU not", "way # of doing this tmp_hdu._header = self._header self._header_offset =", "for each piece of data: shdu.write(data) shdu.close() \"\"\" def __init__(self,", "output = data.byteswap() else: output = data self._ffo.writearray(output) if self._ffo.tell()", "(in bytes) of the data portion of the HDU. \"\"\"", "the data to be written to the file. Notes -----", "isinstance(name, gzip.GzipFile): raise TypeError('StreamingHDU not supported for GzipFile objects.') self._header", "of the file. If the file does not already exist,", "associated with the data to be written to the file.", "represents a Primary header, the header will be modified to", "== 'T': groups = 1 else: groups = 0 size", "data): \"\"\" Write the given data to the stream. Parameters", "is full so pad the data to the next FITS", "__init__(self, name, header): \"\"\" Construct a `StreamingHDU` object given a", "\"\"\" if isinstance(name, gzip.GzipFile): raise TypeError('StreamingHDU not supported for GzipFile", "' 'in the header.') if data.dtype.str[0] != '>': # byteswap", "'IMAGE', 'Image extension', after='SIMPLE') del self._header['SIMPLE'] if 'PCOUNT' not in", "data portion of the HDU. \"\"\" size = 0 naxis", "== self._size: # the stream is full so pad the", "if the file already exists. If it does not, check", "def write(self, data): \"\"\" Write the given data to the", "write(self, data): \"\"\" Write the given data to the stream.", "name and a header. Parameters ---------- name : file path,", "self._header['SIMPLE'] if 'PCOUNT' not in self._header: dim = self._header['NAXIS'] if", "all of the required data has been written to the", "see PYFITS.rst import gzip import os from .base import _BaseHDU,", "instance The header object associated with the data to be", "provided header represents a Primary header, the header will be", "given a file name and a header. Parameters ---------- name", "be written to the file. Notes ----- The file will", "the beginning of the file. If the file does not", "0: dim = '' else: dim = str(dim) self._header.set('PCOUNT', 0,", "The following pseudocode illustrates its use:: header = astropy.io.fits.Header() for", "= astropy.io.fits.StreamingHDU('filename.fits', header) for each piece of data: shdu.write(data) shdu.close()", "from astropy.io.fits.file import _File from astropy.io.fits.header import _pad_length from astropy.io.fits.util", "value, traceback): self.close() def write(self, data): \"\"\" Write the given", "= HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: # This will not be", "the stream. Notes ----- Only the amount of data specified", "stream has been filled will raise an `OSError` exception. If", "the file already exists. If it does not, check to", "range(groups, naxis): size = size * self._header['NAXIS' + str(idx +", "= self.size if self._size != 0: self.writecomplete = False else:", "after='SIMPLE') del self._header['SIMPLE'] if 'PCOUNT' not in self._header: dim =", "the header.') if data.dtype.str[0] != '>': # byteswap little endian", "an `OSError` exception is raised and the data is not", "If the file does not exist and the provided header", "header will be modified to an image extension header and", "will not be the first extension in the file so", "'number of parameters', after='NAXIS' + dim) if 'GCOUNT' not in", "file instead of requiring data to all be written at", "Construct a `StreamingHDU` object given a file name and a", "file object instead of a file name filename = fileobj_name(name)", "if we were provided with a Primary Header. If not", ".hdulist import HDUList from .image import PrimaryHDU from astropy.io.fits.file import", "an image # extension header. if 'SIMPLE' in self._header: self._header.set('XTENSION',", "= data self._ffo.writearray(output) if self._ffo.tell() - self._data_offset == self._size: #", "header provided to the class constructor may be written to", "file does not already exist, it will be created, and", "use:: header = astropy.io.fits.Header() for all the cards you need", "TypeError('Supplied data does not match the type specified ' 'in", "header): \"\"\" Construct a `StreamingHDU` object given a file name", "and the header appended to the end of the file.", "header. newfile = False if filename: if not os.path.exists(filename) or", "'F') if simple == 'T' and random_groups == 'T': groups", "but the provided header represents a Primary header, the header", "= self._header.get('PCOUNT', 0) size = abs(bitpix) * gcount * (pcount", "_BaseHDU() will cause its # values to be modified in", "self.close() def write(self, data): \"\"\" Write the given data to", "= self._ffo.tell() - self._data_offset if self.writecomplete or size + data.nbytes", "already exist, but the provided header represents a Primary header,", "if 'SIMPLE' in self._header: self._header.set('XTENSION', 'IMAGE', 'Image extension', after='SIMPLE') del", "# if we were provided with a Primary Header. If", "the file so we # must change the Primary header", "written to the stream. Notes ----- Only the amount of", "given header. newfile = False if filename: if not os.path.exists(filename)", "'F') random_groups = self._header.get('GROUPS', 'F') if simple == 'T' and", "self._size = self.size if self._size != 0: self.writecomplete = False", "stream is full so pad the data to the next", "def __init__(self, name, header): \"\"\" Construct a `StreamingHDU` object given", "BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name: raise TypeError('Supplied data does not match the", "to a FITS file instead of requiring data to all", "'wb' or 'ab+'. header : `Header` instance The header object", "not in self._header: self._header.set('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo", "written at once. The following pseudocode illustrates its use:: header", "if simple == 'T' and random_groups == 'T': groups =", "the stream is full so pad the data to the", "in the header provided to the class constructor may be", "filled will raise an `OSError` exception. If the dtype of", "!= '>': # byteswap little endian arrays before writing output", "not we will need # to prepend a default PrimaryHDU", "self._header = header.copy() # handle a file object instead of", "to the file. Notes ----- The file will be opened", "written to the stream. If the provided data would cause", "HDUList from .image import PrimaryHDU from astropy.io.fits.file import _File from", "= True if newfile: if 'SIMPLE' not in self._header: hdulist", "If the file does already exist, but the provided header", "cause its # values to be modified in undesired ways...need", "writing output = data.byteswap() else: output = data self._ffo.writearray(output) if", "exists. If it does not, check to see # if", "Data to stream to the file. Returns ------- writecomplete :", "when `True` indicates that all of the required data has", "indicates that all of the required data has been written", "piece of data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header):", "written to the beginning of the file. If the file", "doing this tmp_hdu._header = self._header self._header_offset = tmp_hdu._writeheader(self._ffo)[0] self._data_offset =", "the file. \"\"\" if isinstance(name, gzip.GzipFile): raise TypeError('StreamingHDU not supported", "__exit__(self, type, value, traceback): self.close() def write(self, data): \"\"\" Write", "in range(groups, naxis): size = size * self._header['NAXIS' + str(idx", "a default Primary HDU will be inserted at the beginning", "so we # must change the Primary header provided into", "and no more data will be accepted. An attempt to", "bitpix = self._header['BITPIX'] gcount = self._header.get('GCOUNT', 1) pcount = self._header.get('PCOUNT',", "data.byteswap() else: output = data self._ffo.writearray(output) if self._ffo.tell() - self._data_offset", "does not, check to see # if we were provided", ": ndarray Data to stream to the file. Returns -------", "dim == 0: dim = '' else: dim = str(dim)", "if newfile: if 'SIMPLE' not in self._header: hdulist = HDUList([PrimaryHDU()])", "requiring data to all be written at once. The following", "header represents a Primary header, it will be written to", "be accepted. An attempt to write more data after the", "the file object must be opened in a writeable binary", "data has been written to the stream. Notes ----- Only", "3-clause BSD style license - see PYFITS.rst import gzip import", "raise TypeError('StreamingHDU not supported for GzipFile objects.') self._header = header.copy()", "the dtype of the input data does not match what", "BSD style license - see PYFITS.rst import gzip import os", "HDU will be inserted at the beginning of the file", "header provided into an image # extension header. if 'SIMPLE'", "overflow, an `OSError` exception is raised and the data is", "TODO : Fix this once the HDU writing API is", "self._header: hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: # This will", "This will not be the first extension in the file", "has been written to the stream. Notes ----- Only the", "the header appended to the end of the file. If", "the given data to the stream. Parameters ---------- data :", "binary mode such as 'wb' or 'ab+'. header : `Header`", "1 else: groups = 0 size = 1 for idx", "file. If the file does not exist and the provided", "+ data.nbytes > self._size: raise OSError('Attempt to write more data", "will be inserted at the beginning of the file and", "to the end of the file. If the file does", "end of the file. \"\"\" if isinstance(name, gzip.GzipFile): raise TypeError('StreamingHDU", "the data to the next FITS block self._ffo.write(_pad_length(self._size) * '\\0')", "and if the header represents a Primary header, it will", "next FITS block self._ffo.write(_pad_length(self._size) * '\\0') self.writecomplete = True self._ffo.flush()", "in self._header: dim = self._header['NAXIS'] if dim == 0: dim", "gcount = self._header.get('GCOUNT', 1) pcount = self._header.get('PCOUNT', 0) size =", "dim) if 'GCOUNT' not in self._header: self._header.set('GCOUNT', 1, 'number of", "in a writeable binary mode such as 'wb' or 'ab+'.", "the file. Returns ------- writecomplete : int Flag that when", "# Check if the file already exists. If it does", "import _BaseHDU, BITPIX2DTYPE from .hdulist import HDUList from .image import", "to stream data to a FITS file instead of requiring", "default PrimaryHDU to the file before writing the # given", "in self._header: self._header.set('XTENSION', 'IMAGE', 'Image extension', after='SIMPLE') del self._header['SIMPLE'] if", "modified to an image extension header and appended to the", "> 0: simple = self._header.get('SIMPLE', 'F') random_groups = self._header.get('GROUPS', 'F')", "Primary HDU will be inserted at the beginning of the", "stream to satisfy the amount specified in the header, the", "beginning of the file. If the file does not exist", "if 'PCOUNT' not in self._header: dim = self._header['NAXIS'] if dim", "byteswap little endian arrays before writing output = data.byteswap() else:", "and name.len == 0): newfile = True if newfile: if", "self._ffo.flush() return self.writecomplete @property def size(self): \"\"\" Return the size", "a better way # of doing this tmp_hdu._header = self._header", "will be written to the beginning of the file. If", "extension header and appended to the end of the file.", "1)] bitpix = self._header['BITPIX'] gcount = self._header.get('GCOUNT', 1) pcount =", "does not match the type specified ' 'in the header.')", "file so we # must change the Primary header provided", "'with' statement def __enter__(self): return self def __exit__(self, type, value,", "of a file name filename = fileobj_name(name) or '' #", "or '' # Check if the file already exists. If", "'T': groups = 1 else: groups = 0 size =", "name : file path, file object, or file like object", "raise TypeError('Supplied data does not match the type specified '", "self._ffo.tell() - self._data_offset == self._size: # the stream is full", "be streamed. If opened, the file object must be opened", "sufficient data has been written to the stream to satisfy", "after='PCOUNT') self._ffo = _File(name, 'append') # TODO : Fix this", "exist, it will be created, and if the header represents", "instead of a file name filename = fileobj_name(name) or ''", "to see # if we were provided with a Primary", "the provided header represents a Primary header, the header will", ": int Flag that when `True` indicates that all of", "written. Once sufficient data has been written to the stream", "return self.writecomplete @property def size(self): \"\"\" Return the size (in", "if 'GCOUNT' not in self._header: self._header.set('GCOUNT', 1, 'number of groups',", "data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header): \"\"\" Construct", "= size * self._header['NAXIS' + str(idx + 1)] bitpix =", "= True # Support the 'with' statement def __enter__(self): return", "statement def __enter__(self): return self def __exit__(self, type, value, traceback):", "OSError('Attempt to write more data to the stream than the", "block self._ffo.write(_pad_length(self._size) * '\\0') self.writecomplete = True self._ffo.flush() return self.writecomplete", "return size def close(self): \"\"\" Close the physical FITS file.", "= self._header['BITPIX'] gcount = self._header.get('GCOUNT', 1) pcount = self._header.get('PCOUNT', 0)", "not be the first extension in the file so we", "file and the provided header will be added as the", "StreamingHDU: \"\"\" A class that provides the capability to stream", "of the input data does not match what is expected", "random_groups == 'T': groups = 1 else: groups = 0", "more data will be accepted. An attempt to write more", "(hasattr(name, 'len') and name.len == 0): newfile = True if", "the data portion of the HDU. \"\"\" size = 0", "extension. If the file does already exist, but the provided", "exist, but the provided header represents a Primary header, the", "better way # of doing this tmp_hdu._header = self._header self._header_offset", "= self._header.get('SIMPLE', 'F') random_groups = self._header.get('GROUPS', 'F') if simple ==", "Primary Header. If not we will need # to prepend", "in the file so we # must change the Primary", "file already exists. If it does not, check to see", "filename = fileobj_name(name) or '' # Check if the file", "need # to prepend a default PrimaryHDU to the file", "a file object instead of a file name filename =", "file. If the file does not already exist, it will", "self._ffo.writearray(output) if self._ffo.tell() - self._data_offset == self._size: # the stream", "the file does already exist, but the provided header represents", "False if filename: if not os.path.exists(filename) or os.path.getsize(filename) == 0:", "header.') if data.dtype.str[0] != '>': # byteswap little endian arrays", "a writeable binary mode such as 'wb' or 'ab+'. header", "0 naxis = self._header.get('NAXIS', 0) if naxis > 0: simple", "of the HDU. \"\"\" size = 0 naxis = self._header.get('NAXIS',", "fill a complete FITS block and no more data will", "the # given header. newfile = False if filename: if", "_BaseHDU() # Passing self._header as an argument to _BaseHDU() will", "== 0): newfile = True if newfile: if 'SIMPLE' not", "= tmp_hdu._writeheader(self._ffo)[0] self._data_offset = self._ffo.tell() self._size = self.size if self._size", "0: simple = self._header.get('SIMPLE', 'F') random_groups = self._header.get('GROUPS', 'F') if", "the beginning of the file and the provided header will", "to stream to the file. Returns ------- writecomplete : int", "an image extension header and appended to the end of", "header, a `TypeError` exception is raised. \"\"\" size = self._ffo.tell()", "Primary header, the header will be modified to an image", "before writing the # given header. newfile = False if", "Primary header, a default Primary HDU will be inserted at", "self.writecomplete or size + data.nbytes > self._size: raise OSError('Attempt to", "license - see PYFITS.rst import gzip import os from .base", "'len') and name.len == 0): newfile = True if newfile:", "self._size: raise OSError('Attempt to write more data to the stream", "* self._header['NAXIS' + str(idx + 1)] bitpix = self._header['BITPIX'] gcount", "for GzipFile objects.') self._header = header.copy() # handle a file", "a 3-clause BSD style license - see PYFITS.rst import gzip", "the next FITS block self._ffo.write(_pad_length(self._size) * '\\0') self.writecomplete = True", "tmp_hdu._header = self._header self._header_offset = tmp_hdu._writeheader(self._ffo)[0] self._data_offset = self._ffo.tell() self._size", "newfile = True elif (hasattr(name, 'len') and name.len == 0):", "self.writecomplete = True self._ffo.flush() return self.writecomplete @property def size(self): \"\"\"", "= False else: self.writecomplete = True # Support the 'with'", "the end of the file. \"\"\" if isinstance(name, gzip.GzipFile): raise", "will raise an `OSError` exception. If the dtype of the", "= self._header.get('GCOUNT', 1) pcount = self._header.get('PCOUNT', 0) size = abs(bitpix)", "Returns ------- writecomplete : int Flag that when `True` indicates", "will be opened and the header appended to the end", "not a Primary header, a default Primary HDU will be", "if self.writecomplete or size + data.nbytes > self._size: raise OSError('Attempt", "has been written to the stream to satisfy the amount", "opened and the header appended to the end of the", "be opened in a writeable binary mode such as 'wb'", "image # extension header. if 'SIMPLE' in self._header: self._header.set('XTENSION', 'IMAGE',", "simple = self._header.get('SIMPLE', 'F') random_groups = self._header.get('GROUPS', 'F') if simple", "undesired ways...need to have a better way # of doing", "be inserted at the beginning of the file and the", "FITS block and no more data will be accepted. An", "accepted. An attempt to write more data after the stream", "self._header.get('GROUPS', 'F') if simple == 'T' and random_groups == 'T':", "you need in the header: header[key] = (value, comment) shdu", "object must be opened in a writeable binary mode such", "of the data portion of the HDU. \"\"\" size =", "+ 1)] bitpix = self._header['BITPIX'] gcount = self._header.get('GCOUNT', 1) pcount", "the header, a `TypeError` exception is raised. \"\"\" size =", "data will be streamed. If opened, the file object must", "self._header self._header_offset = tmp_hdu._writeheader(self._ffo)[0] self._data_offset = self._ffo.tell() self._size = self.size", "self.writecomplete @property def size(self): \"\"\" Return the size (in bytes)", "file name filename = fileobj_name(name) or '' # Check if", "the header will be modified to an image extension header", "gcount * (pcount + size) // 8 return size def", "FITS block self._ffo.write(_pad_length(self._size) * '\\0') self.writecomplete = True self._ffo.flush() return", "If not we will need # to prepend a default", "0: self.writecomplete = False else: self.writecomplete = True # Support", "handle a file object instead of a file name filename", "self._header.get('PCOUNT', 0) size = abs(bitpix) * gcount * (pcount +", "file to which the header and data will be streamed.", "\"\"\" size = 0 naxis = self._header.get('NAXIS', 0) if naxis", "# extension header. if 'SIMPLE' in self._header: self._header.set('XTENSION', 'IMAGE', 'Image", "extension header. if 'SIMPLE' in self._header: self._header.set('XTENSION', 'IMAGE', 'Image extension',", "newfile = False if filename: if not os.path.exists(filename) or os.path.getsize(filename)", "this tmp_hdu._header = self._header self._header_offset = tmp_hdu._writeheader(self._ffo)[0] self._data_offset = self._ffo.tell()", "supported for GzipFile objects.') self._header = header.copy() # handle a", "astropy.io.fits.util import fileobj_name class StreamingHDU: \"\"\" A class that provides", "is expected by the header, a `TypeError` exception is raised.", "= _BaseHDU() # Passing self._header as an argument to _BaseHDU()", "size(self): \"\"\" Return the size (in bytes) of the data", "to the beginning of the file. If the file does", "= self._header.get('GROUPS', 'F') if simple == 'T' and random_groups ==", "object given a file name and a header. Parameters ----------", "or size + data.nbytes > self._size: raise OSError('Attempt to write", "header, it will be written to the beginning of the", "data : ndarray Data to stream to the file. Returns", "match the type specified ' 'in the header.') if data.dtype.str[0]", "a `TypeError` exception is raised. \"\"\" size = self._ffo.tell() -", "'ab+'. header : `Header` instance The header object associated with", "astropy.io.fits.file import _File from astropy.io.fits.header import _pad_length from astropy.io.fits.util import", "dim = '' else: dim = str(dim) self._header.set('PCOUNT', 0, 'number", "if data.dtype.str[0] != '>': # byteswap little endian arrays before", "object associated with the data to be written to the", "0: newfile = True elif (hasattr(name, 'len') and name.len ==", "or file like object The file to which the header", "object instead of a file name filename = fileobj_name(name) or", "to write more data after the stream has been filled", "size = self._ffo.tell() - self._data_offset if self.writecomplete or size +", "size = size * self._header['NAXIS' + str(idx + 1)] bitpix", "image extension header and appended to the end of the", "appended to the end of the file. \"\"\" if isinstance(name,", "astropy.io.fits.header import _pad_length from astropy.io.fits.util import fileobj_name class StreamingHDU: \"\"\"", "header = astropy.io.fits.Header() for all the cards you need in", "to which the header and data will be streamed. If", "of requiring data to all be written at once. The", "created, and if the header represents a Primary header, it", "Flag that when `True` indicates that all of the required", "the provided data would cause the stream to overflow, an", "self._ffo.tell() - self._data_offset if self.writecomplete or size + data.nbytes >", "may be written to the stream. If the provided data", "and appended to the end of the file. \"\"\" if", "opened in a writeable binary mode such as 'wb' or", "def __enter__(self): return self def __exit__(self, type, value, traceback): self.close()", "stream is padded to fill a complete FITS block and", "a Primary Header. If not we will need # to", "a Primary header, it will be written to the beginning", "objects.') self._header = header.copy() # handle a file object instead", "return self def __exit__(self, type, value, traceback): self.close() def write(self,", "and the data is not written. Once sufficient data has", "= self._header['NAXIS'] if dim == 0: dim = '' else:", "1 for idx in range(groups, naxis): size = size *", "values to be modified in undesired ways...need to have a", "size + data.nbytes > self._size: raise OSError('Attempt to write more", "self._data_offset == self._size: # the stream is full so pad", "`OSError` exception. If the dtype of the input data does", "'GCOUNT' not in self._header: self._header.set('GCOUNT', 1, 'number of groups', after='PCOUNT')", "we were provided with a Primary Header. If not we", "'exception') else: # This will not be the first extension", "will cause its # values to be modified in undesired", "= 0 naxis = self._header.get('NAXIS', 0) if naxis > 0:", "with the data to be written to the file. Notes", "the stream has been filled will raise an `OSError` exception.", "`TypeError` exception is raised. \"\"\" size = self._ffo.tell() - self._data_offset", "header) for each piece of data: shdu.write(data) shdu.close() \"\"\" def", "to _BaseHDU() will cause its # values to be modified", "represents a Primary header, it will be written to the", "first extension. If the file does already exist, but the", "that provides the capability to stream data to a FITS", "opened, the file object must be opened in a writeable", "Primary header provided into an image # extension header. if", "shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header): \"\"\" Construct a", "header. Parameters ---------- name : file path, file object, or", "idx in range(groups, naxis): size = size * self._header['NAXIS' +", "from astropy.io.fits.util import fileobj_name class StreamingHDU: \"\"\" A class that", "'Image extension', after='SIMPLE') del self._header['SIMPLE'] if 'PCOUNT' not in self._header:", "provided to the class constructor may be written to the", "block and no more data will be accepted. An attempt", "raise an `OSError` exception. If the dtype of the input", "expected by the header, a `TypeError` exception is raised. \"\"\"", "== 'T' and random_groups == 'T': groups = 1 else:", "str(idx + 1)] bitpix = self._header['BITPIX'] gcount = self._header.get('GCOUNT', 1)", "up tmp_hdu = _BaseHDU() # Passing self._header as an argument", "have a better way # of doing this tmp_hdu._header =", "* gcount * (pcount + size) // 8 return size", "must change the Primary header provided into an image #", "data specified in the header provided to the class constructor", "0) if naxis > 0: simple = self._header.get('SIMPLE', 'F') random_groups", "----- The file will be opened and the header appended", "not match the type specified ' 'in the header.') if", "import _File from astropy.io.fits.header import _pad_length from astropy.io.fits.util import fileobj_name", "stream data to a FITS file instead of requiring data", "which the header and data will be streamed. If opened,", "the Primary header provided into an image # extension header.", "0 size = 1 for idx in range(groups, naxis): size", "groups = 0 size = 1 for idx in range(groups,", "data.nbytes > self._size: raise OSError('Attempt to write more data to", "import HDUList from .image import PrimaryHDU from astropy.io.fits.file import _File", "= True elif (hasattr(name, 'len') and name.len == 0): newfile", "write more data to the stream than the ' 'header", "Notes ----- Only the amount of data specified in the", "been written to the stream to satisfy the amount specified", "will be added as the first extension. If the file", "a FITS file instead of requiring data to all be", "extension', after='SIMPLE') del self._header['SIMPLE'] if 'PCOUNT' not in self._header: dim", "A class that provides the capability to stream data to", "the stream than the ' 'header specified.') if BITPIX2DTYPE[self._header['BITPIX']] !=", "ndarray Data to stream to the file. Returns ------- writecomplete", "== 0: dim = '' else: dim = str(dim) self._header.set('PCOUNT',", "# to prepend a default PrimaryHDU to the file before", "that when `True` indicates that all of the required data", "`StreamingHDU` object given a file name and a header. Parameters", "self._data_offset = self._ffo.tell() self._size = self.size if self._size != 0:", "class constructor may be written to the stream. If the", "exception. If the dtype of the input data does not", "the provided header will be added as the first extension.", "data to the stream. Parameters ---------- data : ndarray Data" ]
[ "load object_extras %} {{ obj|call:\"getName\" }} \"\"\" context = {", "optionName): if optionName == \"name\": return self.name elif optionName ==", "\"\"\" context = { 'obj': genObj } self.assertEqual(render(template, context), \"True\")", "set() self.widgetTypeSetCss.add('geoexttoolbar') def test_getJsStatics(self): template = \"\"\" {% load staticfiles", "static static_path %}\" /> {% endfor %} \"\"\" context =", "static_path %}\" type=\"text/javascript\"></script> {% endfor %} \"\"\" context = {", "\"True\") template = \"\"\" {% load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclicknotexist.html\"|template_exists", "genObj } self.assertEqual(render(template, context), \"test\") template = \"\"\" {% load", "self.status = \"ready\" def getOption(self, optionName): if optionName == \"name\":", "object_extrasTests(TestCase): def test_callMethod(self): genObj = genericObj() template = \"\"\" {%", "%} {{ \"geoprisma/widgets/queryonclick/queryonclick.html\"|template_exists }} \"\"\" self.assertEqual(render(template), \"True\") template = \"\"\"", "widget_css %} {% for static_path in widget_css %} <link rel=\"stylesheet\"", "staticfiles %} {% load static_extras %} {% getCssStatics widgetTypeSet as", "object_extras %} {{ obj|obj_type:\"genericObj\" }} \"\"\" context = { 'obj':", "'obj': genObj } self.assertEqual(render(template, context), \"True\") template = \"\"\" {%", "Context(context_dict) t = Template(template_string) return t.render(c).strip() class object_extrasTests(TestCase): def test_callMethod(self):", "context), \"test\") def test_check_type(self): genObj = genericObj() template = \"\"\"", "\"\"\" {% load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclicknotexist.html\"|template_exists }} \"\"\" self.assertEqual(render(template),", "\"\"\" context = { 'obj': genObj } self.assertEqual(render(template, context), \"test\")", "class object_extrasTests(TestCase): def test_callMethod(self): genObj = genericObj() template = \"\"\"", "{% load object_extras %} {{ obj|call:\"getName\" }} \"\"\" context =", "{{ obj|call:\"getName\" }} \"\"\" context = { 'obj': genObj }", "def test_getJsStatics(self): template = \"\"\" {% load staticfiles %} {%", "= { 'obj': genObj } self.assertEqual(render(template, context), \"True\") template =", "template = \"\"\" {% load object_extras %} {{ obj|obj_type:\"notexist\" }}", "context = { 'widgetTypeSet': self.widgetTypeSetJs } out = '<script src=\"/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js\"", "= { 'obj': genObj } self.assertEqual(render(template, context), \"test\") def test_check_type(self):", "\"\"\" {% load object_extras %} {{ obj|args:\"name\"|call:\"getOption\" }} \"\"\" context", "return self.status def getName(self): return self.name def render(template_string, context_dict=None): \"\"\"", "%} \"\"\" context = { 'widgetTypeSet': self.widgetTypeSetJs } out =", "as widget_css %} {% for static_path in widget_css %} <link", "%} {% getCssStatics widgetTypeSet as widget_css %} {% for static_path", "test_getCssStatics(self): template = \"\"\" {% load staticfiles %} {% load", "{% load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclicknotexist.html\"|template_exists }} \"\"\" self.assertEqual(render(template), \"False\")", "= \"\"\" {% load object_extras %} {{ obj|call:\"getName\" }} \"\"\"", "genObj } self.assertEqual(render(template, context), \"test\") def test_check_type(self): genObj = genericObj()", "'obj': genObj } self.assertEqual(render(template, context), \"False\") class static_extrasTests(TestCase): def setUp(self):", "context), \"True\") template = \"\"\" {% load object_extras %} {{", "def getOption(self, optionName): if optionName == \"name\": return self.name elif", "self.widgetTypeSetCss = set() self.widgetTypeSetCss.add('geoexttoolbar') def test_getJsStatics(self): template = \"\"\" {%", "A generic object for testing templatetags \"\"\" def __init__(self): self.name", "for static_path in widget_css %} <link rel=\"stylesheet\" type=\"text/css\" href=\"{% static", "endfor %} \"\"\" context = { 'widgetTypeSet': self.widgetTypeSetCss } out", "'widgetTypeSet': self.widgetTypeSetCss } out = '<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css\" />'", "def getName(self): return self.name def render(template_string, context_dict=None): \"\"\" A shortcut", "\"\"\" A generic object for testing templatetags \"\"\" def __init__(self):", "type=\"text/javascript\"></script> {% endfor %} \"\"\" context = { 'widgetTypeSet': self.widgetTypeSetJs", "load object_extras %} {{ obj|obj_type:\"genericObj\" }} \"\"\" context = {", "= Context(context_dict) t = Template(template_string) return t.render(c).strip() class object_extrasTests(TestCase): def", "A shortcut for testing template output. \"\"\" if context_dict is", "\"\"\" {% load staticfiles %} {% load static_extras %} {%", "{% for static_path in widget_css %} <link rel=\"stylesheet\" type=\"text/css\" href=\"{%", "} self.assertEqual(render(template, context), \"test\") def test_check_type(self): genObj = genericObj() template", "/> {% endfor %} \"\"\" context = { 'widgetTypeSet': self.widgetTypeSetCss", "\"\"\" context = { 'widgetTypeSet': self.widgetTypeSetCss } out = '<link", "static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclick.html\"|template_exists }} \"\"\" self.assertEqual(render(template), \"True\") template =", "\"geoprisma/widgets/queryonclick/queryonclick.html\"|template_exists }} \"\"\" self.assertEqual(render(template), \"True\") template = \"\"\" {% load", "django.template import Template, Context class genericObj(object): \"\"\" A generic object", "generic object for testing templatetags \"\"\" def __init__(self): self.name =", "%} <link rel=\"stylesheet\" type=\"text/css\" href=\"{% static static_path %}\" /> {%", "genObj = genericObj() template = \"\"\" {% load object_extras %}", "context), \"False\") class static_extrasTests(TestCase): def setUp(self): self.widgetTypeSetJs = set() self.widgetTypeSetJs.add('queryonclick')", "= \"test\" self.status = \"ready\" def getOption(self, optionName): if optionName", "template output. \"\"\" if context_dict is None: context_dict = {}", "if optionName == \"name\": return self.name elif optionName == \"status\":", "}} \"\"\" context = { 'obj': genObj } self.assertEqual(render(template, context),", "object_extras %} {{ obj|args:\"name\"|call:\"getOption\" }} \"\"\" context = { 'obj':", "for testing template output. \"\"\" if context_dict is None: context_dict", "test_callMethod(self): genObj = genericObj() template = \"\"\" {% load object_extras", "self.assertEqual(render(template, context), \"test\") def test_check_type(self): genObj = genericObj() template =", "{% load object_extras %} {{ obj|args:\"name\"|call:\"getOption\" }} \"\"\" context =", "test_check_type(self): genObj = genericObj() template = \"\"\" {% load object_extras", "= \"\"\" {% load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclick.html\"|template_exists }} \"\"\"", "= set() self.widgetTypeSetCss.add('geoexttoolbar') def test_getJsStatics(self): template = \"\"\" {% load", "out) def test_getCssStatics(self): template = \"\"\" {% load staticfiles %}", "getJsStatics widgetTypeSet as widget_js %} {% for static_path in widget_js", "{% endfor %} \"\"\" context = { 'widgetTypeSet': self.widgetTypeSetJs }", "{% endfor %} \"\"\" context = { 'widgetTypeSet': self.widgetTypeSetCss }", "context = { 'obj': genObj } self.assertEqual(render(template, context), \"test\") template", "testing template output. \"\"\" if context_dict is None: context_dict =", "out = '<script src=\"/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js\" type=\"text/javascript\"></script>' self.assertEqual(render(template, context), out) def test_getCssStatics(self):", "= { 'obj': genObj } self.assertEqual(render(template, context), \"False\") class static_extrasTests(TestCase):", "from django.test import TestCase from django.template import Template, Context class", "'<script src=\"/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js\" type=\"text/javascript\"></script>' self.assertEqual(render(template, context), out) def test_getCssStatics(self): template =", "self.widgetTypeSetCss } out = '<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css\" />' self.assertEqual(render(template,", "{{ \"geoprisma/widgets/queryonclick/queryonclick.html\"|template_exists }} \"\"\" self.assertEqual(render(template), \"True\") template = \"\"\" {%", "def test_check_type(self): genObj = genericObj() template = \"\"\" {% load", "testing templatetags \"\"\" def __init__(self): self.name = \"test\" self.status =", "def __init__(self): self.name = \"test\" self.status = \"ready\" def getOption(self,", "rel=\"stylesheet\" type=\"text/css\" href=\"/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css\" />' self.assertEqual(render(template, context), out) def test_template_exist(self): template", "template = \"\"\" {% load object_extras %} {{ obj|args:\"name\"|call:\"getOption\" }}", "django from django.test import TestCase from django.template import Template, Context", "\"name\": return self.name elif optionName == \"status\": return self.status def", "in widget_css %} <link rel=\"stylesheet\" type=\"text/css\" href=\"{% static static_path %}\"", "{% getJsStatics widgetTypeSet as widget_js %} {% for static_path in", "object for testing templatetags \"\"\" def __init__(self): self.name = \"test\"", "\"\"\" context = { 'obj': genObj } self.assertEqual(render(template, context), \"False\")", "TestCase from django.template import Template, Context class genericObj(object): \"\"\" A", "{% load static_extras %} {% getCssStatics widgetTypeSet as widget_css %}", "self.widgetTypeSetJs.add('queryonclick') self.widgetTypeSetCss = set() self.widgetTypeSetCss.add('geoexttoolbar') def test_getJsStatics(self): template = \"\"\"", "getName(self): return self.name def render(template_string, context_dict=None): \"\"\" A shortcut for", "{ 'obj': genObj } self.assertEqual(render(template, context), \"False\") class static_extrasTests(TestCase): def", "object_extras %} {{ obj|call:\"getName\" }} \"\"\" context = { 'obj':", "widget_js %} <script src=\"{% static static_path %}\" type=\"text/javascript\"></script> {% endfor", "context_dict=None): \"\"\" A shortcut for testing template output. \"\"\" if", "context_dict is None: context_dict = {} c = Context(context_dict) t", "staticfiles %} {% load static_extras %} {% getJsStatics widgetTypeSet as", "c = Context(context_dict) t = Template(template_string) return t.render(c).strip() class object_extrasTests(TestCase):", "static_path %}\" /> {% endfor %} \"\"\" context = {", "self.widgetTypeSetJs = set() self.widgetTypeSetJs.add('queryonclick') self.widgetTypeSetCss = set() self.widgetTypeSetCss.add('geoexttoolbar') def test_getJsStatics(self):", "= \"\"\" {% load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclicknotexist.html\"|template_exists }} \"\"\"", "class static_extrasTests(TestCase): def setUp(self): self.widgetTypeSetJs = set() self.widgetTypeSetJs.add('queryonclick') self.widgetTypeSetCss =", "context), out) def test_getCssStatics(self): template = \"\"\" {% load staticfiles", "{{ obj|obj_type:\"genericObj\" }} \"\"\" context = { 'obj': genObj }", "context_dict = {} c = Context(context_dict) t = Template(template_string) return", "self.widgetTypeSetCss.add('geoexttoolbar') def test_getJsStatics(self): template = \"\"\" {% load staticfiles %}", "} out = '<script src=\"/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js\" type=\"text/javascript\"></script>' self.assertEqual(render(template, context), out) def", "} self.assertEqual(render(template, context), \"False\") class static_extrasTests(TestCase): def setUp(self): self.widgetTypeSetJs =", "test_template_exist(self): template = \"\"\" {% load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclick.html\"|template_exists", "self.name = \"test\" self.status = \"ready\" def getOption(self, optionName): if", "\"\"\" def __init__(self): self.name = \"test\" self.status = \"ready\" def", "getOption(self, optionName): if optionName == \"name\": return self.name elif optionName", "= Template(template_string) return t.render(c).strip() class object_extrasTests(TestCase): def test_callMethod(self): genObj =", "= \"\"\" {% load object_extras %} {{ obj|args:\"name\"|call:\"getOption\" }} \"\"\"", "setUp(self): self.widgetTypeSetJs = set() self.widgetTypeSetJs.add('queryonclick') self.widgetTypeSetCss = set() self.widgetTypeSetCss.add('geoexttoolbar') def", "%}\" /> {% endfor %} \"\"\" context = { 'widgetTypeSet':", "= '<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css\" />' self.assertEqual(render(template, context), out) def", "render(template_string, context_dict=None): \"\"\" A shortcut for testing template output. \"\"\"", "self.assertEqual(render(template, context), out) def test_template_exist(self): template = \"\"\" {% load", "= '<script src=\"/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js\" type=\"text/javascript\"></script>' self.assertEqual(render(template, context), out) def test_getCssStatics(self): template", "\"test\") template = \"\"\" {% load object_extras %} {{ obj|call:\"getName\"", "obj|call:\"getName\" }} \"\"\" context = { 'obj': genObj } self.assertEqual(render(template,", "\"ready\" def getOption(self, optionName): if optionName == \"name\": return self.name", "import django from django.test import TestCase from django.template import Template,", "type=\"text/css\" href=\"/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css\" />' self.assertEqual(render(template, context), out) def test_template_exist(self): template =", "= { 'obj': genObj } self.assertEqual(render(template, context), \"test\") template =", "'<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css\" />' self.assertEqual(render(template, context), out) def test_template_exist(self):", "{{ obj|args:\"name\"|call:\"getOption\" }} \"\"\" context = { 'obj': genObj }", "{% load object_extras %} {{ obj|obj_type:\"genericObj\" }} \"\"\" context =", "= genericObj() template = \"\"\" {% load object_extras %} {{", "static_extras %} {% getCssStatics widgetTypeSet as widget_css %} {% for", "load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclick.html\"|template_exists }} \"\"\" self.assertEqual(render(template), \"True\") template", "\"\"\" self.assertEqual(render(template), \"True\") template = \"\"\" {% load static_extras %}", "= \"\"\" {% load staticfiles %} {% load static_extras %}", "import TestCase from django.template import Template, Context class genericObj(object): \"\"\"", "template = \"\"\" {% load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclick.html\"|template_exists }}", "self.name elif optionName == \"status\": return self.status def getName(self): return", "is None: context_dict = {} c = Context(context_dict) t =", "__init__(self): self.name = \"test\" self.status = \"ready\" def getOption(self, optionName):", "template = \"\"\" {% load staticfiles %} {% load static_extras", "src=\"/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js\" type=\"text/javascript\"></script>' self.assertEqual(render(template, context), out) def test_getCssStatics(self): template = \"\"\"", "object_extras %} {{ obj|obj_type:\"notexist\" }} \"\"\" context = { 'obj':", "static_path in widget_css %} <link rel=\"stylesheet\" type=\"text/css\" href=\"{% static static_path", "load staticfiles %} {% load static_extras %} {% getCssStatics widgetTypeSet", "= \"\"\" {% load object_extras %} {{ obj|obj_type:\"genericObj\" }} \"\"\"", "widgetTypeSet as widget_js %} {% for static_path in widget_js %}", "\"True\") template = \"\"\" {% load object_extras %} {{ obj|obj_type:\"notexist\"", "self.widgetTypeSetJs } out = '<script src=\"/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js\" type=\"text/javascript\"></script>' self.assertEqual(render(template, context), out)", "%} {% getJsStatics widgetTypeSet as widget_js %} {% for static_path", "optionName == \"name\": return self.name elif optionName == \"status\": return", "%} {{ obj|call:\"getName\" }} \"\"\" context = { 'obj': genObj", "= { 'widgetTypeSet': self.widgetTypeSetCss } out = '<link rel=\"stylesheet\" type=\"text/css\"", "out) def test_template_exist(self): template = \"\"\" {% load static_extras %}", "genericObj(object): \"\"\" A generic object for testing templatetags \"\"\" def", "Template, Context class genericObj(object): \"\"\" A generic object for testing", "= \"ready\" def getOption(self, optionName): if optionName == \"name\": return", "endfor %} \"\"\" context = { 'widgetTypeSet': self.widgetTypeSetJs } out", "static_extras %} {% getJsStatics widgetTypeSet as widget_js %} {% for", "self.status def getName(self): return self.name def render(template_string, context_dict=None): \"\"\" A", "out = '<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css\" />' self.assertEqual(render(template, context), out)", "{ 'obj': genObj } self.assertEqual(render(template, context), \"test\") def test_check_type(self): genObj", "optionName == \"status\": return self.status def getName(self): return self.name def", "type=\"text/css\" href=\"{% static static_path %}\" /> {% endfor %} \"\"\"", "context = { 'obj': genObj } self.assertEqual(render(template, context), \"test\") def", "obj|args:\"name\"|call:\"getOption\" }} \"\"\" context = { 'obj': genObj } self.assertEqual(render(template,", "= {} c = Context(context_dict) t = Template(template_string) return t.render(c).strip()", "Template(template_string) return t.render(c).strip() class object_extrasTests(TestCase): def test_callMethod(self): genObj = genericObj()", "= \"\"\" {% load object_extras %} {{ obj|obj_type:\"notexist\" }} \"\"\"", "import Template, Context class genericObj(object): \"\"\" A generic object for", "context), out) def test_template_exist(self): template = \"\"\" {% load static_extras", "context = { 'obj': genObj } self.assertEqual(render(template, context), \"True\") template", "shortcut for testing template output. \"\"\" if context_dict is None:", "%} {{ obj|args:\"name\"|call:\"getOption\" }} \"\"\" context = { 'obj': genObj", "'obj': genObj } self.assertEqual(render(template, context), \"test\") template = \"\"\" {%", "in widget_js %} <script src=\"{% static static_path %}\" type=\"text/javascript\"></script> {%", "static_extrasTests(TestCase): def setUp(self): self.widgetTypeSetJs = set() self.widgetTypeSetJs.add('queryonclick') self.widgetTypeSetCss = set()", "<link rel=\"stylesheet\" type=\"text/css\" href=\"{% static static_path %}\" /> {% endfor", "%} {% for static_path in widget_css %} <link rel=\"stylesheet\" type=\"text/css\"", "self.assertEqual(render(template, context), \"test\") template = \"\"\" {% load object_extras %}", "template = \"\"\" {% load object_extras %} {{ obj|call:\"getName\" }}", "self.name def render(template_string, context_dict=None): \"\"\" A shortcut for testing template", "self.assertEqual(render(template, context), \"True\") template = \"\"\" {% load object_extras %}", "= set() self.widgetTypeSetJs.add('queryonclick') self.widgetTypeSetCss = set() self.widgetTypeSetCss.add('geoexttoolbar') def test_getJsStatics(self): template", "load static_extras %} {% getJsStatics widgetTypeSet as widget_js %} {%", "context = { 'widgetTypeSet': self.widgetTypeSetCss } out = '<link rel=\"stylesheet\"", "Context class genericObj(object): \"\"\" A generic object for testing templatetags", "elif optionName == \"status\": return self.status def getName(self): return self.name", "{{ obj|obj_type:\"notexist\" }} \"\"\" context = { 'obj': genObj }", "t = Template(template_string) return t.render(c).strip() class object_extrasTests(TestCase): def test_callMethod(self): genObj", "'obj': genObj } self.assertEqual(render(template, context), \"test\") def test_check_type(self): genObj =", "widget_js %} {% for static_path in widget_js %} <script src=\"{%", "def setUp(self): self.widgetTypeSetJs = set() self.widgetTypeSetJs.add('queryonclick') self.widgetTypeSetCss = set() self.widgetTypeSetCss.add('geoexttoolbar')", "href=\"/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css\" />' self.assertEqual(render(template, context), out) def test_template_exist(self): template = \"\"\"", "/>' self.assertEqual(render(template, context), out) def test_template_exist(self): template = \"\"\" {%", "context = { 'obj': genObj } self.assertEqual(render(template, context), \"False\") class", "%} {% load static_extras %} {% getJsStatics widgetTypeSet as widget_js", "output. \"\"\" if context_dict is None: context_dict = {} c", "\"\"\" context = { 'widgetTypeSet': self.widgetTypeSetJs } out = '<script", "\"test\" self.status = \"ready\" def getOption(self, optionName): if optionName ==", "return self.name elif optionName == \"status\": return self.status def getName(self):", "obj|obj_type:\"notexist\" }} \"\"\" context = { 'obj': genObj } self.assertEqual(render(template,", "load staticfiles %} {% load static_extras %} {% getJsStatics widgetTypeSet", "t.render(c).strip() class object_extrasTests(TestCase): def test_callMethod(self): genObj = genericObj() template =", "as widget_js %} {% for static_path in widget_js %} <script", "= { 'widgetTypeSet': self.widgetTypeSetJs } out = '<script src=\"/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js\" type=\"text/javascript\"></script>'", "return t.render(c).strip() class object_extrasTests(TestCase): def test_callMethod(self): genObj = genericObj() template", "%} {% for static_path in widget_js %} <script src=\"{% static", "{% load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclick.html\"|template_exists }} \"\"\" self.assertEqual(render(template), \"True\")", "href=\"{% static static_path %}\" /> {% endfor %} \"\"\" context", "== \"status\": return self.status def getName(self): return self.name def render(template_string,", "{} c = Context(context_dict) t = Template(template_string) return t.render(c).strip() class", "{% for static_path in widget_js %} <script src=\"{% static static_path", "class genericObj(object): \"\"\" A generic object for testing templatetags \"\"\"", "obj|obj_type:\"genericObj\" }} \"\"\" context = { 'obj': genObj } self.assertEqual(render(template,", "{% load staticfiles %} {% load static_extras %} {% getJsStatics", "{ 'obj': genObj } self.assertEqual(render(template, context), \"True\") template = \"\"\"", "template = \"\"\" {% load object_extras %} {{ obj|obj_type:\"genericObj\" }}", "load object_extras %} {{ obj|obj_type:\"notexist\" }} \"\"\" context = {", "genericObj() template = \"\"\" {% load object_extras %} {{ obj|args:\"name\"|call:\"getOption\"", "self.assertEqual(render(template, context), out) def test_getCssStatics(self): template = \"\"\" {% load", "genObj } self.assertEqual(render(template, context), \"True\") template = \"\"\" {% load", "django.test import TestCase from django.template import Template, Context class genericObj(object):", "\"status\": return self.status def getName(self): return self.name def render(template_string, context_dict=None):", "\"test\") def test_check_type(self): genObj = genericObj() template = \"\"\" {%", "genObj } self.assertEqual(render(template, context), \"False\") class static_extrasTests(TestCase): def setUp(self): self.widgetTypeSetJs", "src=\"{% static static_path %}\" type=\"text/javascript\"></script> {% endfor %} \"\"\" context", "== \"name\": return self.name elif optionName == \"status\": return self.status", "context), \"test\") template = \"\"\" {% load object_extras %} {{", "{ 'widgetTypeSet': self.widgetTypeSetJs } out = '<script src=\"/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js\" type=\"text/javascript\"></script>' self.assertEqual(render(template,", "def test_getCssStatics(self): template = \"\"\" {% load staticfiles %} {%", "{% load staticfiles %} {% load static_extras %} {% getCssStatics", "\"\"\" {% load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclick.html\"|template_exists }} \"\"\" self.assertEqual(render(template),", "\"\"\" {% load object_extras %} {{ obj|call:\"getName\" }} \"\"\" context", "for static_path in widget_js %} <script src=\"{% static static_path %}\"", "self.assertEqual(render(template), \"True\") template = \"\"\" {% load static_extras %} {{", "template = \"\"\" {% load static_extras %} {{ \"geoprisma/widgets/queryonclick/queryonclicknotexist.html\"|template_exists }}", "def render(template_string, context_dict=None): \"\"\" A shortcut for testing template output.", "test_getJsStatics(self): template = \"\"\" {% load staticfiles %} {% load", "self.assertEqual(render(template, context), \"False\") class static_extrasTests(TestCase): def setUp(self): self.widgetTypeSetJs = set()", "<script src=\"{% static static_path %}\" type=\"text/javascript\"></script> {% endfor %} \"\"\"", "def test_template_exist(self): template = \"\"\" {% load static_extras %} {{", "\"\"\" if context_dict is None: context_dict = {} c =", "templatetags \"\"\" def __init__(self): self.name = \"test\" self.status = \"ready\"", "\"\"\" {% load object_extras %} {{ obj|obj_type:\"genericObj\" }} \"\"\" context", "load static_extras %} {% getCssStatics widgetTypeSet as widget_css %} {%", "%} <script src=\"{% static static_path %}\" type=\"text/javascript\"></script> {% endfor %}", "%} {{ obj|obj_type:\"genericObj\" }} \"\"\" context = { 'obj': genObj", "\"False\") class static_extrasTests(TestCase): def setUp(self): self.widgetTypeSetJs = set() self.widgetTypeSetJs.add('queryonclick') self.widgetTypeSetCss", "set() self.widgetTypeSetJs.add('queryonclick') self.widgetTypeSetCss = set() self.widgetTypeSetCss.add('geoexttoolbar') def test_getJsStatics(self): template =", "{% getCssStatics widgetTypeSet as widget_css %} {% for static_path in", "{ 'obj': genObj } self.assertEqual(render(template, context), \"test\") template = \"\"\"", "%} {% load static_extras %} {% getCssStatics widgetTypeSet as widget_css", "} out = '<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css\" />' self.assertEqual(render(template, context),", "{% load object_extras %} {{ obj|obj_type:\"notexist\" }} \"\"\" context =", "None: context_dict = {} c = Context(context_dict) t = Template(template_string)", "{% load static_extras %} {% getJsStatics widgetTypeSet as widget_js %}", "%} {{ obj|obj_type:\"notexist\" }} \"\"\" context = { 'obj': genObj", "\"\"\" A shortcut for testing template output. \"\"\" if context_dict", "type=\"text/javascript\"></script>' self.assertEqual(render(template, context), out) def test_getCssStatics(self): template = \"\"\" {%", "{ 'widgetTypeSet': self.widgetTypeSetCss } out = '<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css\"", "%} \"\"\" context = { 'widgetTypeSet': self.widgetTypeSetCss } out =", "def test_callMethod(self): genObj = genericObj() template = \"\"\" {% load", "} self.assertEqual(render(template, context), \"test\") template = \"\"\" {% load object_extras", "return self.name def render(template_string, context_dict=None): \"\"\" A shortcut for testing", "genericObj() template = \"\"\" {% load object_extras %} {{ obj|obj_type:\"genericObj\"", "\"\"\" {% load object_extras %} {{ obj|obj_type:\"notexist\" }} \"\"\" context", "static static_path %}\" type=\"text/javascript\"></script> {% endfor %} \"\"\" context =", "getCssStatics widgetTypeSet as widget_css %} {% for static_path in widget_css", "widget_css %} <link rel=\"stylesheet\" type=\"text/css\" href=\"{% static static_path %}\" />", "'widgetTypeSet': self.widgetTypeSetJs } out = '<script src=\"/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js\" type=\"text/javascript\"></script>' self.assertEqual(render(template, context),", "%}\" type=\"text/javascript\"></script> {% endfor %} \"\"\" context = { 'widgetTypeSet':", "from django.template import Template, Context class genericObj(object): \"\"\" A generic", "static_path in widget_js %} <script src=\"{% static static_path %}\" type=\"text/javascript\"></script>", "} self.assertEqual(render(template, context), \"True\") template = \"\"\" {% load object_extras", "if context_dict is None: context_dict = {} c = Context(context_dict)", "rel=\"stylesheet\" type=\"text/css\" href=\"{% static static_path %}\" /> {% endfor %}", "load object_extras %} {{ obj|args:\"name\"|call:\"getOption\" }} \"\"\" context = {", "widgetTypeSet as widget_css %} {% for static_path in widget_css %}", "for testing templatetags \"\"\" def __init__(self): self.name = \"test\" self.status", "}} \"\"\" self.assertEqual(render(template), \"True\") template = \"\"\" {% load static_extras" ]
[ "get_current_user() target = self.copy_into(_other, columns, **kwargs) if kwargs.get('clone_objects', False): self.copy_objects(target,", "'modified_by', 'context' ] if kwargs.get('clone_people', False) and getattr(self, \"contact\"): columns.append(\"contact\")", "backref='task_group', cascade='all, delete-orphan') objects = association_proxy( 'task_group_objects', 'object', 'TaskGroupObject') task_group_tasks", "= self.copy_into(_other, columns, **kwargs) if kwargs.get('clone_objects', False): self.copy_objects(target, **kwargs) if", "= db.relationship( 'TaskGroupObject', backref='task_group', cascade='all, delete-orphan') objects = association_proxy( 'task_group_objects',", "all_models from ggrc_workflows.models.task_group_object import TaskGroupObject class TaskGroup( WithContact, Timeboxed, Described,", "Workflow.query.filter( (Workflow.id == cls.workflow_id) & (predicate(Workflow.slug) | predicate(Workflow.title)) ).exists() @classmethod", "\"mandatory\": True, \"filter_by\": \"_filter_by_workflow\", }, \"task_group_objects\": { \"display_name\": \"Objects\", \"type\":", "[ 'workflow', 'task_group_objects', PublishOnly('objects'), 'task_group_tasks', 'lock_task_order', 'sort_index', # Intentionally do", "TaskGroupObject.object_id) & predicate(field) ).exists()) return TaskGroupObject.query.filter( (TaskGroupObject.task_group_id == cls.id) &", "sqlalchemy import or_ from ggrc import db from ggrc.login import", "\"query\", None) field = getattr(model, \"slug\", getattr(model, \"email\", None)) if", "import get_current_user from ggrc.models.associationproxy import association_proxy from ggrc.models.mixins import (", "db.Model): \"\"\"Workflow TaskGroup model.\"\"\" __tablename__ = 'task_groups' _title_uniqueness = False", "model_name in all_models.__all__: model = getattr(all_models, model_name) query = getattr(model,", "}, \"secondary_contact\": None, \"start_date\": None, \"end_date\": None, \"workflow\": { \"display_name\":", "target def copy_objects(self, target, **kwargs): # pylint: disable=unused-argument for task_group_object", "task_group=target, context=target.context, clone_people=kwargs.get(\"clone_people\", False), )) return target @classmethod def _filter_by_workflow(cls,", "association_proxy from ggrc.models.mixins import ( Titled, Slugged, Described, Timeboxed, WithContact", "workflow_id = db.Column( db.Integer, db.ForeignKey('workflows.id', ondelete=\"CASCADE\"), nullable=False, ) lock_task_order =", "db.Column( db.String(length=250), default=\"\", nullable=False) _publish_attrs = [ 'workflow', 'task_group_objects', PublishOnly('objects'),", "\"contact\": { \"display_name\": \"Assignee\", \"mandatory\": True, \"filter_by\": \"_filter_by_contact\", }, \"secondary_contact\":", "import all_models from ggrc_workflows.models.task_group_object import TaskGroupObject class TaskGroup( WithContact, Timeboxed,", "import or_ from ggrc import db from ggrc.login import get_current_user", "ggrc.models.reflection import AttributeInfo from ggrc.models.reflection import PublishOnly from ggrc.models import", "'TaskGroupObject') task_group_tasks = db.relationship( 'TaskGroupTask', backref='task_group', cascade='all, delete-orphan') cycle_task_groups =", "columns.append(\"contact\") else: kwargs[\"contact\"] = get_current_user() target = self.copy_into(_other, columns, **kwargs)", "= db.relationship( 'TaskGroupTask', backref='task_group', cascade='all, delete-orphan') cycle_task_groups = db.relationship( 'CycleTaskGroup',", "Titled, Slugged, db.Model): \"\"\"Workflow TaskGroup model.\"\"\" __tablename__ = 'task_groups' _title_uniqueness", "for model_name in all_models.__all__: model = getattr(all_models, model_name) query =", "] if kwargs.get('clone_people', False) and getattr(self, \"contact\"): columns.append(\"contact\") else: kwargs[\"contact\"]", "self.copy_objects(target, **kwargs) if kwargs.get('clone_tasks', False): self.copy_tasks(target, **kwargs) return target def", "cascade='all, delete-orphan') cycle_task_groups = db.relationship( 'CycleTaskGroup', backref='task_group') sort_index = db.Column(", "in self.task_group_objects: target.task_group_objects.append(task_group_object.copy( task_group=target, context=target.context, )) return target def copy_tasks(self,", "\"task_group_objects\": { \"display_name\": \"Objects\", \"type\": AttributeInfo.Type.SPECIAL_MAPPING, \"filter_by\": \"_filter_by_objects\", }, }", "Intentionally do not include `cycle_task_groups` # 'cycle_task_groups', ] _aliases =", "ondelete=\"CASCADE\"), nullable=False, ) lock_task_order = db.Column(db.Boolean(), nullable=True) task_group_objects = db.relationship(", "LICENSE file> \"\"\"A module containing the workflow TaskGroup model.\"\"\" from", "_title_uniqueness = False workflow_id = db.Column( db.Integer, db.ForeignKey('workflows.id', ondelete=\"CASCADE\"), nullable=False,", "] _aliases = { \"title\": \"Summary\", \"description\": \"Details\", \"contact\": {", "for task_group_task in self.task_group_tasks: target.task_group_tasks.append(task_group_task.copy( None, task_group=target, context=target.context, clone_people=kwargs.get(\"clone_people\", False),", "\"type\": AttributeInfo.Type.SPECIAL_MAPPING, \"filter_by\": \"_filter_by_objects\", }, } def copy(self, _other=None, **kwargs):", "**kwargs) if kwargs.get('clone_objects', False): self.copy_objects(target, **kwargs) if kwargs.get('clone_tasks', False): self.copy_tasks(target,", "\"_filter_by_workflow\", }, \"task_group_objects\": { \"display_name\": \"Objects\", \"type\": AttributeInfo.Type.SPECIAL_MAPPING, \"filter_by\": \"_filter_by_objects\",", "cascade='all, delete-orphan') objects = association_proxy( 'task_group_objects', 'object', 'TaskGroupObject') task_group_tasks =", "class TaskGroup( WithContact, Timeboxed, Described, Titled, Slugged, db.Model): \"\"\"Workflow TaskGroup", "and getattr(self, \"contact\"): columns.append(\"contact\") else: kwargs[\"contact\"] = get_current_user() target =", "'workflow', 'task_group_objects', PublishOnly('objects'), 'task_group_tasks', 'lock_task_order', 'sort_index', # Intentionally do not", "ggrc_workflows.models.task_group_object import TaskGroupObject class TaskGroup( WithContact, Timeboxed, Described, Titled, Slugged,", "<reponame>acidburn0zzz/ggrc-core # Copyright (C) 2016 Google Inc. # Licensed under", "delete-orphan') objects = association_proxy( 'task_group_objects', 'object', 'TaskGroupObject') task_group_tasks = db.relationship(", "the workflow TaskGroup model.\"\"\" from sqlalchemy import or_ from ggrc", "db.relationship( 'CycleTaskGroup', backref='task_group') sort_index = db.Column( db.String(length=250), default=\"\", nullable=False) _publish_attrs", "== TaskGroupObject.object_id) & predicate(field) ).exists()) return TaskGroupObject.query.filter( (TaskGroupObject.task_group_id == cls.id)", "getattr(model, \"slug\", getattr(model, \"email\", None)) if query is None or", "hasattr(model, \"id\"): continue parts.append(query.filter( (TaskGroupObject.object_type == model_name) & (model.id ==", "\"filter_by\": \"_filter_by_workflow\", }, \"task_group_objects\": { \"display_name\": \"Objects\", \"type\": AttributeInfo.Type.SPECIAL_MAPPING, \"filter_by\":", "\"email\", None)) if query is None or field is None", "from ggrc.models.reflection import AttributeInfo from ggrc.models.reflection import PublishOnly from ggrc.models", "False workflow_id = db.Column( db.Integer, db.ForeignKey('workflows.id', ondelete=\"CASCADE\"), nullable=False, ) lock_task_order", "kwargs.get('clone_objects', False): self.copy_objects(target, **kwargs) if kwargs.get('clone_tasks', False): self.copy_tasks(target, **kwargs) return", "== cls.workflow_id) & (predicate(Workflow.slug) | predicate(Workflow.title)) ).exists() @classmethod def _filter_by_objects(cls,", "predicate): parts = [] for model_name in all_models.__all__: model =", "**kwargs): for task_group_task in self.task_group_tasks: target.task_group_tasks.append(task_group_task.copy( None, task_group=target, context=target.context, clone_people=kwargs.get(\"clone_people\",", "False): self.copy_objects(target, **kwargs) if kwargs.get('clone_tasks', False): self.copy_tasks(target, **kwargs) return target", "not include `cycle_task_groups` # 'cycle_task_groups', ] _aliases = { \"title\":", "None, \"workflow\": { \"display_name\": \"Workflow\", \"mandatory\": True, \"filter_by\": \"_filter_by_workflow\", },", "'workflow', 'sort_index', 'modified_by', 'context' ] if kwargs.get('clone_people', False) and getattr(self,", "get_current_user from ggrc.models.associationproxy import association_proxy from ggrc.models.mixins import ( Titled,", "import Workflow return Workflow.query.filter( (Workflow.id == cls.workflow_id) & (predicate(Workflow.slug) |", "& predicate(field) ).exists()) return TaskGroupObject.query.filter( (TaskGroupObject.task_group_id == cls.id) & or_(*parts)", "(predicate(Workflow.slug) | predicate(Workflow.title)) ).exists() @classmethod def _filter_by_objects(cls, predicate): parts =", "target = self.copy_into(_other, columns, **kwargs) if kwargs.get('clone_objects', False): self.copy_objects(target, **kwargs)", "(Workflow.id == cls.workflow_id) & (predicate(Workflow.slug) | predicate(Workflow.title)) ).exists() @classmethod def", "_aliases = { \"title\": \"Summary\", \"description\": \"Details\", \"contact\": { \"display_name\":", "\"display_name\": \"Objects\", \"type\": AttributeInfo.Type.SPECIAL_MAPPING, \"filter_by\": \"_filter_by_objects\", }, } def copy(self,", "AttributeInfo from ggrc.models.reflection import PublishOnly from ggrc.models import all_models from", "import association_proxy from ggrc.models.mixins import ( Titled, Slugged, Described, Timeboxed,", "None, \"end_date\": None, \"workflow\": { \"display_name\": \"Workflow\", \"mandatory\": True, \"filter_by\":", "not hasattr(model, \"id\"): continue parts.append(query.filter( (TaskGroupObject.object_type == model_name) & (model.id", "kwargs[\"contact\"] = get_current_user() target = self.copy_into(_other, columns, **kwargs) if kwargs.get('clone_objects',", "backref='task_group') sort_index = db.Column( db.String(length=250), default=\"\", nullable=False) _publish_attrs = [", "True, \"filter_by\": \"_filter_by_workflow\", }, \"task_group_objects\": { \"display_name\": \"Objects\", \"type\": AttributeInfo.Type.SPECIAL_MAPPING,", "\"workflow\": { \"display_name\": \"Workflow\", \"mandatory\": True, \"filter_by\": \"_filter_by_workflow\", }, \"task_group_objects\":", "in all_models.__all__: model = getattr(all_models, model_name) query = getattr(model, \"query\",", "clone_people=kwargs.get(\"clone_people\", False), )) return target @classmethod def _filter_by_workflow(cls, predicate): from", "parts.append(query.filter( (TaskGroupObject.object_type == model_name) & (model.id == TaskGroupObject.object_id) & predicate(field)", "field = getattr(model, \"slug\", getattr(model, \"email\", None)) if query is", "TaskGroup( WithContact, Timeboxed, Described, Titled, Slugged, db.Model): \"\"\"Workflow TaskGroup model.\"\"\"", "nullable=False, ) lock_task_order = db.Column(db.Boolean(), nullable=True) task_group_objects = db.relationship( 'TaskGroupObject',", "'context' ] if kwargs.get('clone_people', False) and getattr(self, \"contact\"): columns.append(\"contact\") else:", "parts = [] for model_name in all_models.__all__: model = getattr(all_models,", "http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"A module containing the workflow TaskGroup", "getattr(model, \"query\", None) field = getattr(model, \"slug\", getattr(model, \"email\", None))", "self.copy_into(_other, columns, **kwargs) if kwargs.get('clone_objects', False): self.copy_objects(target, **kwargs) if kwargs.get('clone_tasks',", "kwargs.get('clone_people', False) and getattr(self, \"contact\"): columns.append(\"contact\") else: kwargs[\"contact\"] = get_current_user()", "columns, **kwargs) if kwargs.get('clone_objects', False): self.copy_objects(target, **kwargs) if kwargs.get('clone_tasks', False):", "return target def copy_objects(self, target, **kwargs): # pylint: disable=unused-argument for", "self.task_group_tasks: target.task_group_tasks.append(task_group_task.copy( None, task_group=target, context=target.context, clone_people=kwargs.get(\"clone_people\", False), )) return target", "predicate(field) ).exists()) return TaskGroupObject.query.filter( (TaskGroupObject.task_group_id == cls.id) & or_(*parts) ).exists()", "db.Column(db.Boolean(), nullable=True) task_group_objects = db.relationship( 'TaskGroupObject', backref='task_group', cascade='all, delete-orphan') objects", "target @classmethod def _filter_by_workflow(cls, predicate): from ggrc_workflows.models import Workflow return", "Slugged, db.Model): \"\"\"Workflow TaskGroup model.\"\"\" __tablename__ = 'task_groups' _title_uniqueness =", "Titled, Slugged, Described, Timeboxed, WithContact ) from ggrc.models.reflection import AttributeInfo", "delete-orphan') cycle_task_groups = db.relationship( 'CycleTaskGroup', backref='task_group') sort_index = db.Column( db.String(length=250),", "ggrc.login import get_current_user from ggrc.models.associationproxy import association_proxy from ggrc.models.mixins import", "target.task_group_objects.append(task_group_object.copy( task_group=target, context=target.context, )) return target def copy_tasks(self, target, **kwargs):", "TaskGroup model.\"\"\" from sqlalchemy import or_ from ggrc import db", "} def copy(self, _other=None, **kwargs): columns = [ 'title', 'description',", "task_group_object in self.task_group_objects: target.task_group_objects.append(task_group_object.copy( task_group=target, context=target.context, )) return target def", "= db.Column(db.Boolean(), nullable=True) task_group_objects = db.relationship( 'TaskGroupObject', backref='task_group', cascade='all, delete-orphan')", "= get_current_user() target = self.copy_into(_other, columns, **kwargs) if kwargs.get('clone_objects', False):", "Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"A module containing the", "# pylint: disable=unused-argument for task_group_object in self.task_group_objects: target.task_group_objects.append(task_group_object.copy( task_group=target, context=target.context,", "query is None or field is None or not hasattr(model,", "Slugged, Described, Timeboxed, WithContact ) from ggrc.models.reflection import AttributeInfo from", "_filter_by_objects(cls, predicate): parts = [] for model_name in all_models.__all__: model", "ggrc.models.mixins import ( Titled, Slugged, Described, Timeboxed, WithContact ) from", "import db from ggrc.login import get_current_user from ggrc.models.associationproxy import association_proxy", "[] for model_name in all_models.__all__: model = getattr(all_models, model_name) query", ") from ggrc.models.reflection import AttributeInfo from ggrc.models.reflection import PublishOnly from", "from ggrc.login import get_current_user from ggrc.models.associationproxy import association_proxy from ggrc.models.mixins", "def copy(self, _other=None, **kwargs): columns = [ 'title', 'description', 'workflow',", "def _filter_by_objects(cls, predicate): parts = [] for model_name in all_models.__all__:", "\"description\": \"Details\", \"contact\": { \"display_name\": \"Assignee\", \"mandatory\": True, \"filter_by\": \"_filter_by_contact\",", "WithContact, Timeboxed, Described, Titled, Slugged, db.Model): \"\"\"Workflow TaskGroup model.\"\"\" __tablename__", ").exists() @classmethod def _filter_by_objects(cls, predicate): parts = [] for model_name", "db.ForeignKey('workflows.id', ondelete=\"CASCADE\"), nullable=False, ) lock_task_order = db.Column(db.Boolean(), nullable=True) task_group_objects =", "PublishOnly from ggrc.models import all_models from ggrc_workflows.models.task_group_object import TaskGroupObject class", "'task_groups' _title_uniqueness = False workflow_id = db.Column( db.Integer, db.ForeignKey('workflows.id', ondelete=\"CASCADE\"),", "'sort_index', 'modified_by', 'context' ] if kwargs.get('clone_people', False) and getattr(self, \"contact\"):", "continue parts.append(query.filter( (TaskGroupObject.object_type == model_name) & (model.id == TaskGroupObject.object_id) &", "}, \"task_group_objects\": { \"display_name\": \"Objects\", \"type\": AttributeInfo.Type.SPECIAL_MAPPING, \"filter_by\": \"_filter_by_objects\", },", "}, } def copy(self, _other=None, **kwargs): columns = [ 'title',", "model.\"\"\" __tablename__ = 'task_groups' _title_uniqueness = False workflow_id = db.Column(", "\"filter_by\": \"_filter_by_contact\", }, \"secondary_contact\": None, \"start_date\": None, \"end_date\": None, \"workflow\":", "_filter_by_workflow(cls, predicate): from ggrc_workflows.models import Workflow return Workflow.query.filter( (Workflow.id ==", "predicate): from ggrc_workflows.models import Workflow return Workflow.query.filter( (Workflow.id == cls.workflow_id)", "Timeboxed, Described, Titled, Slugged, db.Model): \"\"\"Workflow TaskGroup model.\"\"\" __tablename__ =", "| predicate(Workflow.title)) ).exists() @classmethod def _filter_by_objects(cls, predicate): parts = []", "True, \"filter_by\": \"_filter_by_contact\", }, \"secondary_contact\": None, \"start_date\": None, \"end_date\": None,", "Workflow return Workflow.query.filter( (Workflow.id == cls.workflow_id) & (predicate(Workflow.slug) | predicate(Workflow.title))", "= db.Column( db.Integer, db.ForeignKey('workflows.id', ondelete=\"CASCADE\"), nullable=False, ) lock_task_order = db.Column(db.Boolean(),", "{ \"title\": \"Summary\", \"description\": \"Details\", \"contact\": { \"display_name\": \"Assignee\", \"mandatory\":", "or not hasattr(model, \"id\"): continue parts.append(query.filter( (TaskGroupObject.object_type == model_name) &", "= [ 'workflow', 'task_group_objects', PublishOnly('objects'), 'task_group_tasks', 'lock_task_order', 'sort_index', # Intentionally", "= getattr(model, \"slug\", getattr(model, \"email\", None)) if query is None", "'CycleTaskGroup', backref='task_group') sort_index = db.Column( db.String(length=250), default=\"\", nullable=False) _publish_attrs =", "def copy_tasks(self, target, **kwargs): for task_group_task in self.task_group_tasks: target.task_group_tasks.append(task_group_task.copy( None,", "Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"A", "model_name) query = getattr(model, \"query\", None) field = getattr(model, \"slug\",", "\"mandatory\": True, \"filter_by\": \"_filter_by_contact\", }, \"secondary_contact\": None, \"start_date\": None, \"end_date\":", "pylint: disable=unused-argument for task_group_object in self.task_group_objects: target.task_group_objects.append(task_group_object.copy( task_group=target, context=target.context, ))", "Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see", "model_name) & (model.id == TaskGroupObject.object_id) & predicate(field) ).exists()) return TaskGroupObject.query.filter(", "is None or not hasattr(model, \"id\"): continue parts.append(query.filter( (TaskGroupObject.object_type ==", "\"title\": \"Summary\", \"description\": \"Details\", \"contact\": { \"display_name\": \"Assignee\", \"mandatory\": True,", "db from ggrc.login import get_current_user from ggrc.models.associationproxy import association_proxy from", "from ggrc_workflows.models import Workflow return Workflow.query.filter( (Workflow.id == cls.workflow_id) &", "else: kwargs[\"contact\"] = get_current_user() target = self.copy_into(_other, columns, **kwargs) if", "\"secondary_contact\": None, \"start_date\": None, \"end_date\": None, \"workflow\": { \"display_name\": \"Workflow\",", "db.relationship( 'TaskGroupTask', backref='task_group', cascade='all, delete-orphan') cycle_task_groups = db.relationship( 'CycleTaskGroup', backref='task_group')", "all_models.__all__: model = getattr(all_models, model_name) query = getattr(model, \"query\", None)", "from ggrc.models.mixins import ( Titled, Slugged, Described, Timeboxed, WithContact )", "if kwargs.get('clone_objects', False): self.copy_objects(target, **kwargs) if kwargs.get('clone_tasks', False): self.copy_tasks(target, **kwargs)", "containing the workflow TaskGroup model.\"\"\" from sqlalchemy import or_ from", "'description', 'workflow', 'sort_index', 'modified_by', 'context' ] if kwargs.get('clone_people', False) and", "file> \"\"\"A module containing the workflow TaskGroup model.\"\"\" from sqlalchemy", "\"contact\"): columns.append(\"contact\") else: kwargs[\"contact\"] = get_current_user() target = self.copy_into(_other, columns,", "copy_tasks(self, target, **kwargs): for task_group_task in self.task_group_tasks: target.task_group_tasks.append(task_group_task.copy( None, task_group=target,", "\"Summary\", \"description\": \"Details\", \"contact\": { \"display_name\": \"Assignee\", \"mandatory\": True, \"filter_by\":", "'title', 'description', 'workflow', 'sort_index', 'modified_by', 'context' ] if kwargs.get('clone_people', False)", "Described, Timeboxed, WithContact ) from ggrc.models.reflection import AttributeInfo from ggrc.models.reflection", "from ggrc.models.reflection import PublishOnly from ggrc.models import all_models from ggrc_workflows.models.task_group_object", "2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>", "\"display_name\": \"Assignee\", \"mandatory\": True, \"filter_by\": \"_filter_by_contact\", }, \"secondary_contact\": None, \"start_date\":", "Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"A module", "ggrc_workflows.models import Workflow return Workflow.query.filter( (Workflow.id == cls.workflow_id) & (predicate(Workflow.slug)", "\"Workflow\", \"mandatory\": True, \"filter_by\": \"_filter_by_workflow\", }, \"task_group_objects\": { \"display_name\": \"Objects\",", "task_group=target, context=target.context, )) return target def copy_tasks(self, target, **kwargs): for", "return Workflow.query.filter( (Workflow.id == cls.workflow_id) & (predicate(Workflow.slug) | predicate(Workflow.title)) ).exists()", "`cycle_task_groups` # 'cycle_task_groups', ] _aliases = { \"title\": \"Summary\", \"description\":", "# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"A module containing", "= [ 'title', 'description', 'workflow', 'sort_index', 'modified_by', 'context' ] if", "(model.id == TaskGroupObject.object_id) & predicate(field) ).exists()) return TaskGroupObject.query.filter( (TaskGroupObject.task_group_id ==", "'sort_index', # Intentionally do not include `cycle_task_groups` # 'cycle_task_groups', ]", "TaskGroupObject class TaskGroup( WithContact, Timeboxed, Described, Titled, Slugged, db.Model): \"\"\"Workflow", "'TaskGroupTask', backref='task_group', cascade='all, delete-orphan') cycle_task_groups = db.relationship( 'CycleTaskGroup', backref='task_group') sort_index", "db.String(length=250), default=\"\", nullable=False) _publish_attrs = [ 'workflow', 'task_group_objects', PublishOnly('objects'), 'task_group_tasks',", "sort_index = db.Column( db.String(length=250), default=\"\", nullable=False) _publish_attrs = [ 'workflow',", "\"Details\", \"contact\": { \"display_name\": \"Assignee\", \"mandatory\": True, \"filter_by\": \"_filter_by_contact\", },", "task_group_objects = db.relationship( 'TaskGroupObject', backref='task_group', cascade='all, delete-orphan') objects = association_proxy(", "= association_proxy( 'task_group_objects', 'object', 'TaskGroupObject') task_group_tasks = db.relationship( 'TaskGroupTask', backref='task_group',", "False), )) return target @classmethod def _filter_by_workflow(cls, predicate): from ggrc_workflows.models", "import TaskGroupObject class TaskGroup( WithContact, Timeboxed, Described, Titled, Slugged, db.Model):", "\"start_date\": None, \"end_date\": None, \"workflow\": { \"display_name\": \"Workflow\", \"mandatory\": True,", "kwargs.get('clone_tasks', False): self.copy_tasks(target, **kwargs) return target def copy_objects(self, target, **kwargs):", "{ \"display_name\": \"Assignee\", \"mandatory\": True, \"filter_by\": \"_filter_by_contact\", }, \"secondary_contact\": None,", "from ggrc.models.associationproxy import association_proxy from ggrc.models.mixins import ( Titled, Slugged,", "None or field is None or not hasattr(model, \"id\"): continue", "ggrc.models.reflection import PublishOnly from ggrc.models import all_models from ggrc_workflows.models.task_group_object import", "\"_filter_by_contact\", }, \"secondary_contact\": None, \"start_date\": None, \"end_date\": None, \"workflow\": {", "PublishOnly('objects'), 'task_group_tasks', 'lock_task_order', 'sort_index', # Intentionally do not include `cycle_task_groups`", "copy(self, _other=None, **kwargs): columns = [ 'title', 'description', 'workflow', 'sort_index',", "= 'task_groups' _title_uniqueness = False workflow_id = db.Column( db.Integer, db.ForeignKey('workflows.id',", "'task_group_objects', PublishOnly('objects'), 'task_group_tasks', 'lock_task_order', 'sort_index', # Intentionally do not include", "\"end_date\": None, \"workflow\": { \"display_name\": \"Workflow\", \"mandatory\": True, \"filter_by\": \"_filter_by_workflow\",", "\"Assignee\", \"mandatory\": True, \"filter_by\": \"_filter_by_contact\", }, \"secondary_contact\": None, \"start_date\": None,", "target def copy_tasks(self, target, **kwargs): for task_group_task in self.task_group_tasks: target.task_group_tasks.append(task_group_task.copy(", "\"slug\", getattr(model, \"email\", None)) if query is None or field", "= db.relationship( 'CycleTaskGroup', backref='task_group') sort_index = db.Column( db.String(length=250), default=\"\", nullable=False)", "self.task_group_objects: target.task_group_objects.append(task_group_object.copy( task_group=target, context=target.context, )) return target def copy_tasks(self, target,", "__tablename__ = 'task_groups' _title_uniqueness = False workflow_id = db.Column( db.Integer,", "getattr(all_models, model_name) query = getattr(model, \"query\", None) field = getattr(model,", "import PublishOnly from ggrc.models import all_models from ggrc_workflows.models.task_group_object import TaskGroupObject", "model.\"\"\" from sqlalchemy import or_ from ggrc import db from", "from sqlalchemy import or_ from ggrc import db from ggrc.login", "backref='task_group', cascade='all, delete-orphan') cycle_task_groups = db.relationship( 'CycleTaskGroup', backref='task_group') sort_index =", "= False workflow_id = db.Column( db.Integer, db.ForeignKey('workflows.id', ondelete=\"CASCADE\"), nullable=False, )", "if kwargs.get('clone_people', False) and getattr(self, \"contact\"): columns.append(\"contact\") else: kwargs[\"contact\"] =", "task_group_tasks = db.relationship( 'TaskGroupTask', backref='task_group', cascade='all, delete-orphan') cycle_task_groups = db.relationship(", "**kwargs) if kwargs.get('clone_tasks', False): self.copy_tasks(target, **kwargs) return target def copy_objects(self,", "'lock_task_order', 'sort_index', # Intentionally do not include `cycle_task_groups` # 'cycle_task_groups',", "[ 'title', 'description', 'workflow', 'sort_index', 'modified_by', 'context' ] if kwargs.get('clone_people',", "lock_task_order = db.Column(db.Boolean(), nullable=True) task_group_objects = db.relationship( 'TaskGroupObject', backref='task_group', cascade='all,", "= [] for model_name in all_models.__all__: model = getattr(all_models, model_name)", "= getattr(model, \"query\", None) field = getattr(model, \"slug\", getattr(model, \"email\",", "& (model.id == TaskGroupObject.object_id) & predicate(field) ).exists()) return TaskGroupObject.query.filter( (TaskGroupObject.task_group_id", "model = getattr(all_models, model_name) query = getattr(model, \"query\", None) field", "objects = association_proxy( 'task_group_objects', 'object', 'TaskGroupObject') task_group_tasks = db.relationship( 'TaskGroupTask',", "\"\"\"Workflow TaskGroup model.\"\"\" __tablename__ = 'task_groups' _title_uniqueness = False workflow_id", "query = getattr(model, \"query\", None) field = getattr(model, \"slug\", getattr(model,", "db.relationship( 'TaskGroupObject', backref='task_group', cascade='all, delete-orphan') objects = association_proxy( 'task_group_objects', 'object',", "**kwargs): # pylint: disable=unused-argument for task_group_object in self.task_group_objects: target.task_group_objects.append(task_group_object.copy( task_group=target,", "import AttributeInfo from ggrc.models.reflection import PublishOnly from ggrc.models import all_models", "context=target.context, clone_people=kwargs.get(\"clone_people\", False), )) return target @classmethod def _filter_by_workflow(cls, predicate):", "& (predicate(Workflow.slug) | predicate(Workflow.title)) ).exists() @classmethod def _filter_by_objects(cls, predicate): parts", "== model_name) & (model.id == TaskGroupObject.object_id) & predicate(field) ).exists()) return", "None) field = getattr(model, \"slug\", getattr(model, \"email\", None)) if query", "# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0", "is None or field is None or not hasattr(model, \"id\"):", "or_ from ggrc import db from ggrc.login import get_current_user from", "\"filter_by\": \"_filter_by_objects\", }, } def copy(self, _other=None, **kwargs): columns =", "db.Integer, db.ForeignKey('workflows.id', ondelete=\"CASCADE\"), nullable=False, ) lock_task_order = db.Column(db.Boolean(), nullable=True) task_group_objects", "= { \"title\": \"Summary\", \"description\": \"Details\", \"contact\": { \"display_name\": \"Assignee\",", "<see LICENSE file> \"\"\"A module containing the workflow TaskGroup model.\"\"\"", "from ggrc import db from ggrc.login import get_current_user from ggrc.models.associationproxy", "return target def copy_tasks(self, target, **kwargs): for task_group_task in self.task_group_tasks:", "= getattr(all_models, model_name) query = getattr(model, \"query\", None) field =", "db.Column( db.Integer, db.ForeignKey('workflows.id', ondelete=\"CASCADE\"), nullable=False, ) lock_task_order = db.Column(db.Boolean(), nullable=True)", "disable=unused-argument for task_group_object in self.task_group_objects: target.task_group_objects.append(task_group_object.copy( task_group=target, context=target.context, )) return", "'cycle_task_groups', ] _aliases = { \"title\": \"Summary\", \"description\": \"Details\", \"contact\":", "(C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE", "or field is None or not hasattr(model, \"id\"): continue parts.append(query.filter(", "if query is None or field is None or not", "task_group_task in self.task_group_tasks: target.task_group_tasks.append(task_group_task.copy( None, task_group=target, context=target.context, clone_people=kwargs.get(\"clone_people\", False), ))", "def _filter_by_workflow(cls, predicate): from ggrc_workflows.models import Workflow return Workflow.query.filter( (Workflow.id", "False): self.copy_tasks(target, **kwargs) return target def copy_objects(self, target, **kwargs): #", "Described, Titled, Slugged, db.Model): \"\"\"Workflow TaskGroup model.\"\"\" __tablename__ = 'task_groups'", "\"id\"): continue parts.append(query.filter( (TaskGroupObject.object_type == model_name) & (model.id == TaskGroupObject.object_id)", "( Titled, Slugged, Described, Timeboxed, WithContact ) from ggrc.models.reflection import", "AttributeInfo.Type.SPECIAL_MAPPING, \"filter_by\": \"_filter_by_objects\", }, } def copy(self, _other=None, **kwargs): columns", "do not include `cycle_task_groups` # 'cycle_task_groups', ] _aliases = {", "_publish_attrs = [ 'workflow', 'task_group_objects', PublishOnly('objects'), 'task_group_tasks', 'lock_task_order', 'sort_index', #", "ggrc.models.associationproxy import association_proxy from ggrc.models.mixins import ( Titled, Slugged, Described,", "ggrc import db from ggrc.login import get_current_user from ggrc.models.associationproxy import", "\"_filter_by_objects\", }, } def copy(self, _other=None, **kwargs): columns = [", "for task_group_object in self.task_group_objects: target.task_group_objects.append(task_group_object.copy( task_group=target, context=target.context, )) return target", ") lock_task_order = db.Column(db.Boolean(), nullable=True) task_group_objects = db.relationship( 'TaskGroupObject', backref='task_group',", "cls.workflow_id) & (predicate(Workflow.slug) | predicate(Workflow.title)) ).exists() @classmethod def _filter_by_objects(cls, predicate):", "'task_group_objects', 'object', 'TaskGroupObject') task_group_tasks = db.relationship( 'TaskGroupTask', backref='task_group', cascade='all, delete-orphan')", "\"Objects\", \"type\": AttributeInfo.Type.SPECIAL_MAPPING, \"filter_by\": \"_filter_by_objects\", }, } def copy(self, _other=None,", "copy_objects(self, target, **kwargs): # pylint: disable=unused-argument for task_group_object in self.task_group_objects:", "field is None or not hasattr(model, \"id\"): continue parts.append(query.filter( (TaskGroupObject.object_type", "def copy_objects(self, target, **kwargs): # pylint: disable=unused-argument for task_group_object in", "TaskGroup model.\"\"\" __tablename__ = 'task_groups' _title_uniqueness = False workflow_id =", "association_proxy( 'task_group_objects', 'object', 'TaskGroupObject') task_group_tasks = db.relationship( 'TaskGroupTask', backref='task_group', cascade='all,", "@classmethod def _filter_by_workflow(cls, predicate): from ggrc_workflows.models import Workflow return Workflow.query.filter(", "from ggrc_workflows.models.task_group_object import TaskGroupObject class TaskGroup( WithContact, Timeboxed, Described, Titled,", "None, task_group=target, context=target.context, clone_people=kwargs.get(\"clone_people\", False), )) return target @classmethod def", "import ( Titled, Slugged, Described, Timeboxed, WithContact ) from ggrc.models.reflection", "'TaskGroupObject', backref='task_group', cascade='all, delete-orphan') objects = association_proxy( 'task_group_objects', 'object', 'TaskGroupObject')", "under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"A module containing the workflow", "in self.task_group_tasks: target.task_group_tasks.append(task_group_task.copy( None, task_group=target, context=target.context, clone_people=kwargs.get(\"clone_people\", False), )) return", "**kwargs): columns = [ 'title', 'description', 'workflow', 'sort_index', 'modified_by', 'context'", "ggrc.models import all_models from ggrc_workflows.models.task_group_object import TaskGroupObject class TaskGroup( WithContact,", "None, \"start_date\": None, \"end_date\": None, \"workflow\": { \"display_name\": \"Workflow\", \"mandatory\":", "from ggrc.models import all_models from ggrc_workflows.models.task_group_object import TaskGroupObject class TaskGroup(", "nullable=True) task_group_objects = db.relationship( 'TaskGroupObject', backref='task_group', cascade='all, delete-orphan') objects =", "_other=None, **kwargs): columns = [ 'title', 'description', 'workflow', 'sort_index', 'modified_by',", "(TaskGroupObject.object_type == model_name) & (model.id == TaskGroupObject.object_id) & predicate(field) ).exists())", "{ \"display_name\": \"Workflow\", \"mandatory\": True, \"filter_by\": \"_filter_by_workflow\", }, \"task_group_objects\": {", "include `cycle_task_groups` # 'cycle_task_groups', ] _aliases = { \"title\": \"Summary\",", "target, **kwargs): for task_group_task in self.task_group_tasks: target.task_group_tasks.append(task_group_task.copy( None, task_group=target, context=target.context,", "**kwargs) return target def copy_objects(self, target, **kwargs): # pylint: disable=unused-argument", "'task_group_tasks', 'lock_task_order', 'sort_index', # Intentionally do not include `cycle_task_groups` #", "context=target.context, )) return target def copy_tasks(self, target, **kwargs): for task_group_task", "columns = [ 'title', 'description', 'workflow', 'sort_index', 'modified_by', 'context' ]", "# Intentionally do not include `cycle_task_groups` # 'cycle_task_groups', ] _aliases", ")) return target @classmethod def _filter_by_workflow(cls, predicate): from ggrc_workflows.models import", "default=\"\", nullable=False) _publish_attrs = [ 'workflow', 'task_group_objects', PublishOnly('objects'), 'task_group_tasks', 'lock_task_order',", "None)) if query is None or field is None or", "predicate(Workflow.title)) ).exists() @classmethod def _filter_by_objects(cls, predicate): parts = [] for", "cycle_task_groups = db.relationship( 'CycleTaskGroup', backref='task_group') sort_index = db.Column( db.String(length=250), default=\"\",", "Timeboxed, WithContact ) from ggrc.models.reflection import AttributeInfo from ggrc.models.reflection import", "return target @classmethod def _filter_by_workflow(cls, predicate): from ggrc_workflows.models import Workflow", "target, **kwargs): # pylint: disable=unused-argument for task_group_object in self.task_group_objects: target.task_group_objects.append(task_group_object.copy(", "# 'cycle_task_groups', ] _aliases = { \"title\": \"Summary\", \"description\": \"Details\",", "\"\"\"A module containing the workflow TaskGroup model.\"\"\" from sqlalchemy import", "workflow TaskGroup model.\"\"\" from sqlalchemy import or_ from ggrc import", "WithContact ) from ggrc.models.reflection import AttributeInfo from ggrc.models.reflection import PublishOnly", "getattr(self, \"contact\"): columns.append(\"contact\") else: kwargs[\"contact\"] = get_current_user() target = self.copy_into(_other,", "nullable=False) _publish_attrs = [ 'workflow', 'task_group_objects', PublishOnly('objects'), 'task_group_tasks', 'lock_task_order', 'sort_index',", "None or not hasattr(model, \"id\"): continue parts.append(query.filter( (TaskGroupObject.object_type == model_name)", "= db.Column( db.String(length=250), default=\"\", nullable=False) _publish_attrs = [ 'workflow', 'task_group_objects',", "module containing the workflow TaskGroup model.\"\"\" from sqlalchemy import or_", "self.copy_tasks(target, **kwargs) return target def copy_objects(self, target, **kwargs): # pylint:", "'object', 'TaskGroupObject') task_group_tasks = db.relationship( 'TaskGroupTask', backref='task_group', cascade='all, delete-orphan') cycle_task_groups", "if kwargs.get('clone_tasks', False): self.copy_tasks(target, **kwargs) return target def copy_objects(self, target,", ")) return target def copy_tasks(self, target, **kwargs): for task_group_task in", "\"display_name\": \"Workflow\", \"mandatory\": True, \"filter_by\": \"_filter_by_workflow\", }, \"task_group_objects\": { \"display_name\":", "{ \"display_name\": \"Objects\", \"type\": AttributeInfo.Type.SPECIAL_MAPPING, \"filter_by\": \"_filter_by_objects\", }, } def", "False) and getattr(self, \"contact\"): columns.append(\"contact\") else: kwargs[\"contact\"] = get_current_user() target", "@classmethod def _filter_by_objects(cls, predicate): parts = [] for model_name in", "getattr(model, \"email\", None)) if query is None or field is", "target.task_group_tasks.append(task_group_task.copy( None, task_group=target, context=target.context, clone_people=kwargs.get(\"clone_people\", False), )) return target @classmethod" ]
[ "app \"\"\" app = rumps.App(\"TestApp\") app.settings = {} return app", "{} return app def test_setting_is_true(mocker, basic_app): \"\"\"Check if setting is", "\"\"\" app = rumps.App(\"TestApp\") app.settings = {} return app def", "create_app(): \"\"\"Creates a basic app object with some variables to", "def test_setting_is_true(mocker, basic_app): \"\"\"Check if setting is changed correctly if", "False mock_function.assert_called_once_with(basic_app) def test_setting_is_false(mocker, basic_app): \"\"\"Check if setting is changed", "= rumps.App(\"TestApp\") app.settings = {} return app def test_setting_is_true(mocker, basic_app):", "def create_app(): \"\"\"Creates a basic app object with some variables", "some variables to pass to functions Returns: rumps.App: Basic app", "pytest import rumps from src.app_functions.menu.change_auto_login import change_auto_login @pytest.fixture(name=\"basic_app\") def create_app():", "basic_app.settings[\"auto_login\"] = False mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"]", "mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"] is True mock_function.assert_called_once_with(basic_app)", "app def test_setting_is_true(mocker, basic_app): \"\"\"Check if setting is changed correctly", "changed correctly if false\"\"\" basic_app.settings[\"auto_login\"] = False mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\")", "correctly if True\"\"\" basic_app.settings[\"auto_login\"] = True mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\")", "return app def test_setting_is_true(mocker, basic_app): \"\"\"Check if setting is changed", "basic_app): \"\"\"Check if setting is changed correctly if True\"\"\" basic_app.settings[\"auto_login\"]", "test_setting_is_false(mocker, basic_app): \"\"\"Check if setting is changed correctly if false\"\"\"", "to pass to functions Returns: rumps.App: Basic app \"\"\" app", "app = rumps.App(\"TestApp\") app.settings = {} return app def test_setting_is_true(mocker,", "def test_setting_is_false(mocker, basic_app): \"\"\"Check if setting is changed correctly if", "app object with some variables to pass to functions Returns:", "mock_function.assert_called_once_with(basic_app) def test_setting_is_false(mocker, basic_app): \"\"\"Check if setting is changed correctly", "basic_app): \"\"\"Check if setting is changed correctly if false\"\"\" basic_app.settings[\"auto_login\"]", "= {} return app def test_setting_is_true(mocker, basic_app): \"\"\"Check if setting", "app.settings = {} return app def test_setting_is_true(mocker, basic_app): \"\"\"Check if", "src.app_functions.menu.change_auto_login import change_auto_login @pytest.fixture(name=\"basic_app\") def create_app(): \"\"\"Creates a basic app", "setting is changed correctly if True\"\"\" basic_app.settings[\"auto_login\"] = True mock_function", "false\"\"\" basic_app.settings[\"auto_login\"] = False mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert", "if setting is changed correctly if True\"\"\" basic_app.settings[\"auto_login\"] = True", "\"\"\"Creates a basic app object with some variables to pass", "rumps from src.app_functions.menu.change_auto_login import change_auto_login @pytest.fixture(name=\"basic_app\") def create_app(): \"\"\"Creates a", "if setting is changed correctly if false\"\"\" basic_app.settings[\"auto_login\"] = False", "= mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"] is False mock_function.assert_called_once_with(basic_app) def", "import change_auto_login @pytest.fixture(name=\"basic_app\") def create_app(): \"\"\"Creates a basic app object", "test_setting_is_true(mocker, basic_app): \"\"\"Check if setting is changed correctly if True\"\"\"", "mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"] is False mock_function.assert_called_once_with(basic_app) def test_setting_is_false(mocker, basic_app):", "change_auto_login @pytest.fixture(name=\"basic_app\") def create_app(): \"\"\"Creates a basic app object with", "is False mock_function.assert_called_once_with(basic_app) def test_setting_is_false(mocker, basic_app): \"\"\"Check if setting is", "@pytest.fixture(name=\"basic_app\") def create_app(): \"\"\"Creates a basic app object with some", "changed correctly if True\"\"\" basic_app.settings[\"auto_login\"] = True mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\")", "rumps.App(\"TestApp\") app.settings = {} return app def test_setting_is_true(mocker, basic_app): \"\"\"Check", "if True\"\"\" basic_app.settings[\"auto_login\"] = True mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app)", "to functions Returns: rumps.App: Basic app \"\"\" app = rumps.App(\"TestApp\")", "= False mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"] is", "assert basic_app.settings[\"auto_login\"] is False mock_function.assert_called_once_with(basic_app) def test_setting_is_false(mocker, basic_app): \"\"\"Check if", "is changed correctly if True\"\"\" basic_app.settings[\"auto_login\"] = True mock_function =", "\"\"\"Check if setting is changed correctly if false\"\"\" basic_app.settings[\"auto_login\"] =", "False mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"] is True", "basic_app.settings[\"auto_login\"] = True mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"]", "basic_app.settings[\"auto_login\"] is False mock_function.assert_called_once_with(basic_app) def test_setting_is_false(mocker, basic_app): \"\"\"Check if setting", "True\"\"\" basic_app.settings[\"auto_login\"] = True mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert", "Basic app \"\"\" app = rumps.App(\"TestApp\") app.settings = {} return", "True mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"] is False", "import pytest import rumps from src.app_functions.menu.change_auto_login import change_auto_login @pytest.fixture(name=\"basic_app\") def", "if false\"\"\" basic_app.settings[\"auto_login\"] = False mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app)", "from src.app_functions.menu.change_auto_login import change_auto_login @pytest.fixture(name=\"basic_app\") def create_app(): \"\"\"Creates a basic", "object with some variables to pass to functions Returns: rumps.App:", "with some variables to pass to functions Returns: rumps.App: Basic", "= True mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"] is", "change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"] is False mock_function.assert_called_once_with(basic_app) def test_setting_is_false(mocker, basic_app): \"\"\"Check", "is changed correctly if false\"\"\" basic_app.settings[\"auto_login\"] = False mock_function =", "correctly if false\"\"\" basic_app.settings[\"auto_login\"] = False mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\")", "mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"] is False mock_function.assert_called_once_with(basic_app) def test_setting_is_false(mocker,", "setting is changed correctly if false\"\"\" basic_app.settings[\"auto_login\"] = False mock_function", "functions Returns: rumps.App: Basic app \"\"\" app = rumps.App(\"TestApp\") app.settings", "a basic app object with some variables to pass to", "import rumps from src.app_functions.menu.change_auto_login import change_auto_login @pytest.fixture(name=\"basic_app\") def create_app(): \"\"\"Creates", "variables to pass to functions Returns: rumps.App: Basic app \"\"\"", "pass to functions Returns: rumps.App: Basic app \"\"\" app =", "\"\"\"Check if setting is changed correctly if True\"\"\" basic_app.settings[\"auto_login\"] =", "rumps.App: Basic app \"\"\" app = rumps.App(\"TestApp\") app.settings = {}", "mock_function = mocker.patch(\"src.app_functions.menu.change_auto_login.update_menu\") mocker.patch(\"src.app_functions.menu.change_auto_login.save_settings\") change_auto_login(basic_app) assert basic_app.settings[\"auto_login\"] is False mock_function.assert_called_once_with(basic_app)", "Returns: rumps.App: Basic app \"\"\" app = rumps.App(\"TestApp\") app.settings =", "basic app object with some variables to pass to functions" ]
[ "The model uses cross-entroy loss. A weight decay is used", "for evaluating on training data. test_init_op: A tensorflow operation initializing", "training data. test_init_op: A tensorflow operation initializing the test problem", "training phase. train_eval_init_op: A tensorflow operation initializing the test problem", "= tf.argmax(y, 1) correct_prediction = tf.equal(y_pred, y_correct) self.accuracy = tf.reduce_mean(tf.cast(correct_prediction,", "self.dataset.test_init_op training = tf.equal(self.dataset.phase, \"train\") x, y = self.dataset.batch linear_outputs", "consists of 19 weight layers, of mostly convolutions. The model", "import _vgg from ..datasets.cifar100 import cifar100 from .testproblem import TestProblem", "in the `original paper`_. VGG 19 consists of 19 weight", "x, y = self.dataset.batch linear_outputs = _vgg( x, training, variant=19,", "num_outputs=100, weight_decay=self._weight_decay, ) self.losses = tf.nn.softmax_cross_entropy_with_logits_v2( labels=y, logits=linear_outputs ) y_pred", "architecture for CIFAR-100.\"\"\" import tensorflow as tf from ._vgg import", "loss. A weight decay is used on the weights (but", "def __init__(self, batch_size, weight_decay=5e-4): \"\"\"Create a new VGG 19 test", "from .testproblem import TestProblem class cifar100_vgg19(TestProblem): \"\"\"DeepOBS test problem class", "class cifar100_vgg19(TestProblem): \"\"\"DeepOBS test problem class for the VGG 19", "containing a regularization term. accuracy: A scalar tf.Tensor containing the", "= tf.equal(self.dataset.phase, \"train\") x, y = self.dataset.batch linear_outputs = _vgg(", "tf.argmax(linear_outputs, 1) y_correct = tf.argmax(y, 1) correct_prediction = tf.equal(y_pred, y_correct)", "the original VGG network, which was designed for ImageNet. Details", "DeepOBS data set class for Cifar-100. train_init_op: A tensorflow operation", "by ``224`` to fit the input dimension of the original", "TestProblem class cifar100_vgg19(TestProblem): \"\"\"DeepOBS test problem class for the VGG", "the mini-batch mean accuracy. \"\"\" def __init__(self, batch_size, weight_decay=5e-4): \"\"\"Create", "variant=19, num_outputs=100, weight_decay=self._weight_decay, ) self.losses = tf.nn.softmax_cross_entropy_with_logits_v2( labels=y, logits=linear_outputs )", "scalar tf.Tensor containing a regularization term. accuracy: A scalar tf.Tensor", "y_correct = tf.argmax(y, 1) correct_prediction = tf.equal(y_pred, y_correct) self.accuracy =", "not the biases. Defaults to ``5e-4``. Attributes: dataset: The DeepOBS", "1) correct_prediction = tf.equal(y_pred, y_correct) self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) self.regularizer", "the weights but not the biases. Defaults to ``5e-4``. Attributes:", "cross-entroy loss. A weight decay is used on the weights", "from ._vgg import _vgg from ..datasets.cifar100 import cifar100 from .testproblem", "https://arxiv.org/abs/1409.1556 Args: batch_size (int): Batch size to use. weight_decay (float):", "coding: utf-8 -*- \"\"\"VGG 19 architecture for CIFAR-100.\"\"\" import tensorflow", "use. weight_decay (float): Weight decay factor. Weight decay (L2-regularization) is", "test problem class for the VGG 19 network on Cifar-100.", "weight_decay=self._weight_decay, ) self.losses = tf.nn.softmax_cross_entropy_with_logits_v2( labels=y, logits=linear_outputs ) y_pred =", "A scalar tf.Tensor containing the mini-batch mean accuracy. \"\"\" def", "1) y_correct = tf.argmax(y, 1) correct_prediction = tf.equal(y_pred, y_correct) self.accuracy", "input dimension of the original VGG network, which was designed", "tf.equal(self.dataset.phase, \"train\") x, y = self.dataset.batch linear_outputs = _vgg( x,", "is used on the weights but not the biases. Defaults", "_vgg( x, training, variant=19, num_outputs=100, weight_decay=self._weight_decay, ) self.losses = tf.nn.softmax_cross_entropy_with_logits_v2(", "\"\"\"DeepOBS test problem class for the VGG 19 network on", "initializing the test problem for the training phase. train_eval_init_op: A", "utf-8 -*- \"\"\"VGG 19 architecture for CIFAR-100.\"\"\" import tensorflow as", "accuracy. \"\"\" def __init__(self, batch_size, weight_decay=5e-4): \"\"\"Create a new VGG", "logits=linear_outputs ) y_pred = tf.argmax(linear_outputs, 1) y_correct = tf.argmax(y, 1)", "can be found in the `original paper`_. VGG 19 consists", "_vgg from ..datasets.cifar100 import cifar100 from .testproblem import TestProblem class", "weights (but not the biases) which defaults to ``5e-4``. ..", "batch_size (int): Batch size to use. weight_decay (float): Weight decay", "to fit the input dimension of the original VGG network,", "from ..datasets.cifar100 import cifar100 from .testproblem import TestProblem class cifar100_vgg19(TestProblem):", "operation initializing the test problem for evaluating on training data.", "on test data. losses: A tf.Tensor of shape (batch_size, )", "Cifar-100. Args: batch_size (int): Batch size to use. weight_decay (float):", "problem on Cifar-100.\"\"\" self.dataset = cifar100(self._batch_size) self.train_init_op = self.dataset.train_init_op self.train_eval_init_op", "but not the biases. Defaults to ``5e-4``. \"\"\" super(cifar100_vgg19, self).__init__(batch_size,", "of mostly convolutions. The model uses cross-entroy loss. A weight", "Weight decay factor. Weight decay (L2-regularization) is used on the", "be found in the `original paper`_. VGG 19 consists of", "tensorflow operation initializing the test problem for evaluating on test", "test problem for evaluating on training data. test_init_op: A tensorflow", "# -*- coding: utf-8 -*- \"\"\"VGG 19 architecture for CIFAR-100.\"\"\"", "the architecture can be found in the `original paper`_. VGG", "defaults to ``5e-4``. .. _original paper: https://arxiv.org/abs/1409.1556 Args: batch_size (int):", "(int): Batch size to use. weight_decay (float): Weight decay factor.", "biases. Defaults to ``5e-4``. \"\"\" super(cifar100_vgg19, self).__init__(batch_size, weight_decay) def set_up(self):", "per-example loss values. regularizer: A scalar tf.Tensor containing a regularization", "The CIFAR-100 images are resized to ``224`` by ``224`` to", "the biases. Defaults to ``5e-4``. Attributes: dataset: The DeepOBS data", "A scalar tf.Tensor containing a regularization term. accuracy: A scalar", "mean accuracy. \"\"\" def __init__(self, batch_size, weight_decay=5e-4): \"\"\"Create a new", "\"train\") x, y = self.dataset.batch linear_outputs = _vgg( x, training,", "self.test_init_op = self.dataset.test_init_op training = tf.equal(self.dataset.phase, \"train\") x, y =", "test problem instance on Cifar-100. Args: batch_size (int): Batch size", "= self.dataset.train_init_op self.train_eval_init_op = self.dataset.train_eval_init_op self.valid_init_op = self.dataset.valid_init_op self.test_init_op =", "ImageNet. Details about the architecture can be found in the", "The DeepOBS data set class for Cifar-100. train_init_op: A tensorflow", "= tf.equal(y_pred, y_correct) self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) self.regularizer = tf.losses.get_regularization_loss()", "values. regularizer: A scalar tf.Tensor containing a regularization term. accuracy:", "Details about the architecture can be found in the `original", "containing the mini-batch mean accuracy. \"\"\" def __init__(self, batch_size, weight_decay=5e-4):", "not the biases. Defaults to ``5e-4``. \"\"\" super(cifar100_vgg19, self).__init__(batch_size, weight_decay)", "def set_up(self): \"\"\"Set up the VGG 19 test problem on", "decay is used on the weights (but not the biases)", "\"\"\"VGG 19 architecture for CIFAR-100.\"\"\" import tensorflow as tf from", "Args: batch_size (int): Batch size to use. weight_decay (float): Weight", "mostly convolutions. The model uses cross-entroy loss. A weight decay", "Weight decay (L2-regularization) is used on the weights but not", "= self.dataset.train_eval_init_op self.valid_init_op = self.dataset.valid_init_op self.test_init_op = self.dataset.test_init_op training =", "self.dataset = cifar100(self._batch_size) self.train_init_op = self.dataset.train_init_op self.train_eval_init_op = self.dataset.train_eval_init_op self.valid_init_op", "which defaults to ``5e-4``. .. _original paper: https://arxiv.org/abs/1409.1556 Args: batch_size", "the per-example loss values. regularizer: A scalar tf.Tensor containing a", "training = tf.equal(self.dataset.phase, \"train\") x, y = self.dataset.batch linear_outputs =", "set class for Cifar-100. train_init_op: A tensorflow operation initializing the", "-*- coding: utf-8 -*- \"\"\"VGG 19 architecture for CIFAR-100.\"\"\" import", "designed for ImageNet. Details about the architecture can be found", "convolutions. The model uses cross-entroy loss. A weight decay is", "test problem for evaluating on test data. losses: A tf.Tensor", "test problem on Cifar-100.\"\"\" self.dataset = cifar100(self._batch_size) self.train_init_op = self.dataset.train_init_op", "images are resized to ``224`` by ``224`` to fit the", "class for the VGG 19 network on Cifar-100. The CIFAR-100", "19 consists of 19 weight layers, of mostly convolutions. The", "used on the weights (but not the biases) which defaults", "the biases. Defaults to ``5e-4``. \"\"\" super(cifar100_vgg19, self).__init__(batch_size, weight_decay) def", "VGG 19 test problem on Cifar-100.\"\"\" self.dataset = cifar100(self._batch_size) self.train_init_op", "tf.nn.softmax_cross_entropy_with_logits_v2( labels=y, logits=linear_outputs ) y_pred = tf.argmax(linear_outputs, 1) y_correct =", "test data. losses: A tf.Tensor of shape (batch_size, ) containing", "evaluating on test data. losses: A tf.Tensor of shape (batch_size,", ".. _original paper: https://arxiv.org/abs/1409.1556 Args: batch_size (int): Batch size to", "shape (batch_size, ) containing the per-example loss values. regularizer: A", "fit the input dimension of the original VGG network, which", "was designed for ImageNet. Details about the architecture can be", "A tensorflow operation initializing the test problem for the training", "19 weight layers, of mostly convolutions. The model uses cross-entroy", "19 network on Cifar-100. The CIFAR-100 images are resized to", "instance on Cifar-100. Args: batch_size (int): Batch size to use.", "Defaults to ``5e-4``. Attributes: dataset: The DeepOBS data set class", "biases) which defaults to ``5e-4``. .. _original paper: https://arxiv.org/abs/1409.1556 Args:", "Defaults to ``5e-4``. \"\"\" super(cifar100_vgg19, self).__init__(batch_size, weight_decay) def set_up(self): \"\"\"Set", "decay factor. Weight decay (L2-regularization) is used on the weights", "mini-batch mean accuracy. \"\"\" def __init__(self, batch_size, weight_decay=5e-4): \"\"\"Create a", "weights but not the biases. Defaults to ``5e-4``. \"\"\" super(cifar100_vgg19,", "for CIFAR-100.\"\"\" import tensorflow as tf from ._vgg import _vgg", "on Cifar-100.\"\"\" self.dataset = cifar100(self._batch_size) self.train_init_op = self.dataset.train_init_op self.train_eval_init_op =", "on the weights but not the biases. Defaults to ``5e-4``.", "(float): Weight decay factor. Weight decay (L2-regularization) is used on", "tensorflow as tf from ._vgg import _vgg from ..datasets.cifar100 import", "``5e-4``. .. _original paper: https://arxiv.org/abs/1409.1556 Args: batch_size (int): Batch size", "weights but not the biases. Defaults to ``5e-4``. Attributes: dataset:", "test_init_op: A tensorflow operation initializing the test problem for evaluating", "-*- \"\"\"VGG 19 architecture for CIFAR-100.\"\"\" import tensorflow as tf", "uses cross-entroy loss. A weight decay is used on the", "operation initializing the test problem for the training phase. train_eval_init_op:", "CIFAR-100 images are resized to ``224`` by ``224`` to fit", "is used on the weights (but not the biases) which", "accuracy: A scalar tf.Tensor containing the mini-batch mean accuracy. \"\"\"", "self.train_eval_init_op = self.dataset.train_eval_init_op self.valid_init_op = self.dataset.valid_init_op self.test_init_op = self.dataset.test_init_op training", "used on the weights but not the biases. Defaults to", "the test problem for evaluating on test data. losses: A", "cifar100(self._batch_size) self.train_init_op = self.dataset.train_init_op self.train_eval_init_op = self.dataset.train_eval_init_op self.valid_init_op = self.dataset.valid_init_op", "super(cifar100_vgg19, self).__init__(batch_size, weight_decay) def set_up(self): \"\"\"Set up the VGG 19", "cifar100_vgg19(TestProblem): \"\"\"DeepOBS test problem class for the VGG 19 network", "self.dataset.batch linear_outputs = _vgg( x, training, variant=19, num_outputs=100, weight_decay=self._weight_decay, )", "scalar tf.Tensor containing the mini-batch mean accuracy. \"\"\" def __init__(self,", "= self.dataset.valid_init_op self.test_init_op = self.dataset.test_init_op training = tf.equal(self.dataset.phase, \"train\") x,", "as tf from ._vgg import _vgg from ..datasets.cifar100 import cifar100", "Cifar-100. The CIFAR-100 images are resized to ``224`` by ``224``", "to ``5e-4``. .. _original paper: https://arxiv.org/abs/1409.1556 Args: batch_size (int): Batch", "initializing the test problem for evaluating on training data. test_init_op:", "tf.Tensor of shape (batch_size, ) containing the per-example loss values.", "model uses cross-entroy loss. A weight decay is used on", "data. test_init_op: A tensorflow operation initializing the test problem for", "of the original VGG network, which was designed for ImageNet.", "containing the per-example loss values. regularizer: A scalar tf.Tensor containing", "..datasets.cifar100 import cifar100 from .testproblem import TestProblem class cifar100_vgg19(TestProblem): \"\"\"DeepOBS", "linear_outputs = _vgg( x, training, variant=19, num_outputs=100, weight_decay=self._weight_decay, ) self.losses", "._vgg import _vgg from ..datasets.cifar100 import cifar100 from .testproblem import", "network on Cifar-100. The CIFAR-100 images are resized to ``224``", "found in the `original paper`_. VGG 19 consists of 19", "cifar100 from .testproblem import TestProblem class cifar100_vgg19(TestProblem): \"\"\"DeepOBS test problem", "not the biases) which defaults to ``5e-4``. .. _original paper:", "__init__(self, batch_size, weight_decay=5e-4): \"\"\"Create a new VGG 19 test problem", "batch_size, weight_decay=5e-4): \"\"\"Create a new VGG 19 test problem instance", "class for Cifar-100. train_init_op: A tensorflow operation initializing the test", "the VGG 19 test problem on Cifar-100.\"\"\" self.dataset = cifar100(self._batch_size)", "labels=y, logits=linear_outputs ) y_pred = tf.argmax(linear_outputs, 1) y_correct = tf.argmax(y,", "train_init_op: A tensorflow operation initializing the test problem for the", "about the architecture can be found in the `original paper`_.", "the biases) which defaults to ``5e-4``. .. _original paper: https://arxiv.org/abs/1409.1556", "VGG 19 test problem instance on Cifar-100. Args: batch_size (int):", "dimension of the original VGG network, which was designed for", "weight_decay=5e-4): \"\"\"Create a new VGG 19 test problem instance on", "Attributes: dataset: The DeepOBS data set class for Cifar-100. train_init_op:", "term. accuracy: A scalar tf.Tensor containing the mini-batch mean accuracy.", "Cifar-100.\"\"\" self.dataset = cifar100(self._batch_size) self.train_init_op = self.dataset.train_init_op self.train_eval_init_op = self.dataset.train_eval_init_op", "of shape (batch_size, ) containing the per-example loss values. regularizer:", "are resized to ``224`` by ``224`` to fit the input", "resized to ``224`` by ``224`` to fit the input dimension", "train_eval_init_op: A tensorflow operation initializing the test problem for evaluating", "(batch_size, ) containing the per-example loss values. regularizer: A scalar", "x, training, variant=19, num_outputs=100, weight_decay=self._weight_decay, ) self.losses = tf.nn.softmax_cross_entropy_with_logits_v2( labels=y,", "import TestProblem class cifar100_vgg19(TestProblem): \"\"\"DeepOBS test problem class for the", "for the VGG 19 network on Cifar-100. The CIFAR-100 images", "tf.Tensor containing a regularization term. accuracy: A scalar tf.Tensor containing", "the test problem for the training phase. train_eval_init_op: A tensorflow", "which was designed for ImageNet. Details about the architecture can", "A tf.Tensor of shape (batch_size, ) containing the per-example loss", "19 architecture for CIFAR-100.\"\"\" import tensorflow as tf from ._vgg", "on the weights (but not the biases) which defaults to", "self).__init__(batch_size, weight_decay) def set_up(self): \"\"\"Set up the VGG 19 test", "(but not the biases) which defaults to ``5e-4``. .. _original", "decay (L2-regularization) is used on the weights but not the", "training, variant=19, num_outputs=100, weight_decay=self._weight_decay, ) self.losses = tf.nn.softmax_cross_entropy_with_logits_v2( labels=y, logits=linear_outputs", "VGG 19 network on Cifar-100. The CIFAR-100 images are resized", "factor. Weight decay (L2-regularization) is used on the weights but", "for evaluating on test data. losses: A tf.Tensor of shape", "but not the biases. Defaults to ``5e-4``. Attributes: dataset: The", "self.dataset.train_init_op self.train_eval_init_op = self.dataset.train_eval_init_op self.valid_init_op = self.dataset.valid_init_op self.test_init_op = self.dataset.test_init_op", "the test problem for evaluating on training data. test_init_op: A", "VGG network, which was designed for ImageNet. Details about the", ".testproblem import TestProblem class cifar100_vgg19(TestProblem): \"\"\"DeepOBS test problem class for", "(L2-regularization) is used on the weights but not the biases.", "regularizer: A scalar tf.Tensor containing a regularization term. accuracy: A", "import tensorflow as tf from ._vgg import _vgg from ..datasets.cifar100", "``224`` to fit the input dimension of the original VGG", "the weights (but not the biases) which defaults to ``5e-4``.", "A tensorflow operation initializing the test problem for evaluating on", "evaluating on training data. test_init_op: A tensorflow operation initializing the", "tf.Tensor containing the mini-batch mean accuracy. \"\"\" def __init__(self, batch_size,", "the input dimension of the original VGG network, which was", "A weight decay is used on the weights (but not", "size to use. weight_decay (float): Weight decay factor. Weight decay", "losses: A tf.Tensor of shape (batch_size, ) containing the per-example", "``5e-4``. Attributes: dataset: The DeepOBS data set class for Cifar-100.", "layers, of mostly convolutions. The model uses cross-entroy loss. A", "to use. weight_decay (float): Weight decay factor. Weight decay (L2-regularization)", "tf.argmax(y, 1) correct_prediction = tf.equal(y_pred, y_correct) self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))", "correct_prediction = tf.equal(y_pred, y_correct) self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) self.regularizer =", "_original paper: https://arxiv.org/abs/1409.1556 Args: batch_size (int): Batch size to use.", "\"\"\"Create a new VGG 19 test problem instance on Cifar-100.", "``5e-4``. \"\"\" super(cifar100_vgg19, self).__init__(batch_size, weight_decay) def set_up(self): \"\"\"Set up the", "set_up(self): \"\"\"Set up the VGG 19 test problem on Cifar-100.\"\"\"", "\"\"\"Set up the VGG 19 test problem on Cifar-100.\"\"\" self.dataset", "self.valid_init_op = self.dataset.valid_init_op self.test_init_op = self.dataset.test_init_op training = tf.equal(self.dataset.phase, \"train\")", "on Cifar-100. The CIFAR-100 images are resized to ``224`` by", "= self.dataset.batch linear_outputs = _vgg( x, training, variant=19, num_outputs=100, weight_decay=self._weight_decay,", "= tf.nn.softmax_cross_entropy_with_logits_v2( labels=y, logits=linear_outputs ) y_pred = tf.argmax(linear_outputs, 1) y_correct", "y = self.dataset.batch linear_outputs = _vgg( x, training, variant=19, num_outputs=100,", "the weights but not the biases. Defaults to ``5e-4``. \"\"\"", "problem for the training phase. train_eval_init_op: A tensorflow operation initializing", "the VGG 19 network on Cifar-100. The CIFAR-100 images are", "``224`` by ``224`` to fit the input dimension of the", "network, which was designed for ImageNet. Details about the architecture", "19 test problem instance on Cifar-100. Args: batch_size (int): Batch", "biases. Defaults to ``5e-4``. Attributes: dataset: The DeepOBS data set", "a regularization term. accuracy: A scalar tf.Tensor containing the mini-batch", "= self.dataset.test_init_op training = tf.equal(self.dataset.phase, \"train\") x, y = self.dataset.batch", "VGG 19 consists of 19 weight layers, of mostly convolutions.", "to ``224`` by ``224`` to fit the input dimension of", "self.train_init_op = self.dataset.train_init_op self.train_eval_init_op = self.dataset.train_eval_init_op self.valid_init_op = self.dataset.valid_init_op self.test_init_op", "tf from ._vgg import _vgg from ..datasets.cifar100 import cifar100 from", "CIFAR-100.\"\"\" import tensorflow as tf from ._vgg import _vgg from", "of 19 weight layers, of mostly convolutions. The model uses", "tensorflow operation initializing the test problem for evaluating on training", "problem instance on Cifar-100. Args: batch_size (int): Batch size to", "\"\"\" super(cifar100_vgg19, self).__init__(batch_size, weight_decay) def set_up(self): \"\"\"Set up the VGG", "y_pred = tf.argmax(linear_outputs, 1) y_correct = tf.argmax(y, 1) correct_prediction =", "phase. train_eval_init_op: A tensorflow operation initializing the test problem for", "paper`_. VGG 19 consists of 19 weight layers, of mostly", "weight decay is used on the weights (but not the", "problem for evaluating on test data. losses: A tf.Tensor of", "regularization term. accuracy: A scalar tf.Tensor containing the mini-batch mean", "self.dataset.valid_init_op self.test_init_op = self.dataset.test_init_op training = tf.equal(self.dataset.phase, \"train\") x, y", "the `original paper`_. VGG 19 consists of 19 weight layers,", "weight_decay) def set_up(self): \"\"\"Set up the VGG 19 test problem", "`original paper`_. VGG 19 consists of 19 weight layers, of", "weight_decay (float): Weight decay factor. Weight decay (L2-regularization) is used", "data. losses: A tf.Tensor of shape (batch_size, ) containing the", "for Cifar-100. train_init_op: A tensorflow operation initializing the test problem", "operation initializing the test problem for evaluating on test data.", "tensorflow operation initializing the test problem for the training phase.", "for the training phase. train_eval_init_op: A tensorflow operation initializing the", "= cifar100(self._batch_size) self.train_init_op = self.dataset.train_init_op self.train_eval_init_op = self.dataset.train_eval_init_op self.valid_init_op =", "= tf.argmax(linear_outputs, 1) y_correct = tf.argmax(y, 1) correct_prediction = tf.equal(y_pred,", "self.dataset.train_eval_init_op self.valid_init_op = self.dataset.valid_init_op self.test_init_op = self.dataset.test_init_op training = tf.equal(self.dataset.phase,", "new VGG 19 test problem instance on Cifar-100. Args: batch_size", "dataset: The DeepOBS data set class for Cifar-100. train_init_op: A", "on training data. test_init_op: A tensorflow operation initializing the test", "paper: https://arxiv.org/abs/1409.1556 Args: batch_size (int): Batch size to use. weight_decay", "problem class for the VGG 19 network on Cifar-100. The", "original VGG network, which was designed for ImageNet. Details about", ") self.losses = tf.nn.softmax_cross_entropy_with_logits_v2( labels=y, logits=linear_outputs ) y_pred = tf.argmax(linear_outputs,", "test problem for the training phase. train_eval_init_op: A tensorflow operation", "to ``5e-4``. \"\"\" super(cifar100_vgg19, self).__init__(batch_size, weight_decay) def set_up(self): \"\"\"Set up", "Cifar-100. train_init_op: A tensorflow operation initializing the test problem for", "to ``5e-4``. Attributes: dataset: The DeepOBS data set class for", "loss values. regularizer: A scalar tf.Tensor containing a regularization term.", "initializing the test problem for evaluating on test data. losses:", "the training phase. train_eval_init_op: A tensorflow operation initializing the test", "import cifar100 from .testproblem import TestProblem class cifar100_vgg19(TestProblem): \"\"\"DeepOBS test", "Batch size to use. weight_decay (float): Weight decay factor. Weight", "on Cifar-100. Args: batch_size (int): Batch size to use. weight_decay", "for ImageNet. Details about the architecture can be found in", "= _vgg( x, training, variant=19, num_outputs=100, weight_decay=self._weight_decay, ) self.losses =", "a new VGG 19 test problem instance on Cifar-100. Args:", ") y_pred = tf.argmax(linear_outputs, 1) y_correct = tf.argmax(y, 1) correct_prediction", "19 test problem on Cifar-100.\"\"\" self.dataset = cifar100(self._batch_size) self.train_init_op =", "\"\"\" def __init__(self, batch_size, weight_decay=5e-4): \"\"\"Create a new VGG 19", "weight layers, of mostly convolutions. The model uses cross-entroy loss.", "up the VGG 19 test problem on Cifar-100.\"\"\" self.dataset =", "problem for evaluating on training data. test_init_op: A tensorflow operation", "self.losses = tf.nn.softmax_cross_entropy_with_logits_v2( labels=y, logits=linear_outputs ) y_pred = tf.argmax(linear_outputs, 1)", "data set class for Cifar-100. train_init_op: A tensorflow operation initializing", ") containing the per-example loss values. regularizer: A scalar tf.Tensor", "architecture can be found in the `original paper`_. VGG 19" ]
[ "year%4==0 and year%100!=0: leap=True else: leap=False return leap year =", "def is_leap(year): leap=False if year%400==0: leap=True elif year%4==0 and year%100!=0:", "elif year%4==0 and year%100!=0: leap=True else: leap=False return leap year", "leap=False if year%400==0: leap=True elif year%4==0 and year%100!=0: leap=True else:", "and year%100!=0: leap=True else: leap=False return leap year = int(input())", "is_leap(year): leap=False if year%400==0: leap=True elif year%4==0 and year%100!=0: leap=True", "year%400==0: leap=True elif year%4==0 and year%100!=0: leap=True else: leap=False return", "if year%400==0: leap=True elif year%4==0 and year%100!=0: leap=True else: leap=False", "leap=True elif year%4==0 and year%100!=0: leap=True else: leap=False return leap" ]
[ "dump_args = split_args({**base_kwargs, **kwargs}) with open(file, **open_args) as f: dump(obj,", "'newline'} def split_args(args): \"\"\"Splits args into two groups: open args", "args are used by ``open`` function. Other args are used", "in args['mode'] else TEXT_MODE_ARGS open_args = {} other_args = {}", "**kwargs): open_args, dump_args = split_args({**base_kwargs, **kwargs}) with open(file, **open_args) as", "= {} other_args = {} for arg, value in args.items():", "args. Open args are used by ``open`` function. Other args", "return of ``open`` and data to dump. **base_kwargs: Base arguments", "open_args, load_args = split_args({**base_kwargs, **kwargs}) with open(file, **open_args) as f:", "function to avoid context manager boilerplate. Args: dump: Function that", "= split_args({**base_kwargs, **kwargs}) with open(file, **open_args) as f: return load(f,", "in mode_args: open_args[arg] = value else: other_args[arg] = value return", "functions. Args: args: Keyword args to split. Returns: open_args: Arguments", "open_args: Arguments for ``open``. other_args: Arguments for ``load``/``dump``. \"\"\" mode_args", "``open``. other_args: Arguments for ``load``/``dump``. \"\"\" mode_args = BIN_MODE_ARGS if", "if arg in mode_args: open_args[arg] = value else: other_args[arg] =", "def read_wrapper(load, **base_kwargs): \"\"\"Wraps ``load`` function to avoid context manager", "dump. **base_kwargs: Base arguments that ``open``/``dump`` take. Returns: Wrapper for", "args are used by ``load``/``dump`` functions. Args: args: Keyword args", "= {} for arg, value in args.items(): if arg in", "the return of ``open`` and data to dump. **base_kwargs: Base", "split_args({**base_kwargs, **kwargs}) with open(file, **open_args) as f: dump(obj, f, **dump_args)", "'errors', 'newline'} def split_args(args): \"\"\"Splits args into two groups: open", "as f: return load(f, **load_args) return wrapped def write_wrapper(dump, **base_kwargs):", "**base_kwargs: Base arguments that ``open``/``dump`` take. Returns: Wrapper for ``dump``.", "other_args[arg] = value return open_args, other_args def read_wrapper(load, **base_kwargs): \"\"\"Wraps", "load(f, **load_args) return wrapped def write_wrapper(dump, **base_kwargs): \"\"\"Wraps ``dump`` function", "= BIN_MODE_ARGS if 'b' in args['mode'] else TEXT_MODE_ARGS open_args =", "mode_args: open_args[arg] = value else: other_args[arg] = value return open_args,", "into two groups: open args and other args. Open args", "**kwargs}) with open(file, **open_args) as f: return load(f, **load_args) return", "Open args are used by ``open`` function. Other args are", "avoid context manager boilerplate. Args: dump: Function that takes the", "= split_args({**base_kwargs, **kwargs}) with open(file, **open_args) as f: dump(obj, f,", "= value return open_args, other_args def read_wrapper(load, **base_kwargs): \"\"\"Wraps ``load``", "return open_args, other_args def read_wrapper(load, **base_kwargs): \"\"\"Wraps ``load`` function to", "open(file, **open_args) as f: return load(f, **load_args) return wrapped def", "= value else: other_args[arg] = value return open_args, other_args def", "\"\"\" def wrapped(file, **kwargs): open_args, load_args = split_args({**base_kwargs, **kwargs}) with", "f: return load(f, **load_args) return wrapped def write_wrapper(dump, **base_kwargs): \"\"\"Wraps", "**base_kwargs): \"\"\"Wraps ``load`` function to avoid context manager boilerplate. Args:", "arg in mode_args: open_args[arg] = value else: other_args[arg] = value", "load_args = split_args({**base_kwargs, **kwargs}) with open(file, **open_args) as f: return", "\"\"\"Wraps ``load`` function to avoid context manager boilerplate. Args: load:", "take. Returns: Wrapper for ``dump``. \"\"\" def wrapped(file, obj, **kwargs):", "{} other_args = {} for arg, value in args.items(): if", "\"\"\" def wrapped(file, obj, **kwargs): open_args, dump_args = split_args({**base_kwargs, **kwargs})", "'buffering', 'encoding', 'errors', 'newline'} def split_args(args): \"\"\"Splits args into two", "mode_args = BIN_MODE_ARGS if 'b' in args['mode'] else TEXT_MODE_ARGS open_args", "for ``dump``. \"\"\" def wrapped(file, obj, **kwargs): open_args, dump_args =", "**kwargs): open_args, load_args = split_args({**base_kwargs, **kwargs}) with open(file, **open_args) as", "Base arguments that ``open``/``load`` take. Returns: Wrapper for ``load``. \"\"\"", "Returns: open_args: Arguments for ``open``. other_args: Arguments for ``load``/``dump``. \"\"\"", "by ``load``/``dump`` functions. Args: args: Keyword args to split. Returns:", "utility functions.\"\"\" BIN_MODE_ARGS = {'mode', 'buffering', } TEXT_MODE_ARGS = {'mode',", "else TEXT_MODE_ARGS open_args = {} other_args = {} for arg,", "value return open_args, other_args def read_wrapper(load, **base_kwargs): \"\"\"Wraps ``load`` function", "``load``. \"\"\" def wrapped(file, **kwargs): open_args, load_args = split_args({**base_kwargs, **kwargs})", "function. Other args are used by ``load``/``dump`` functions. Args: args:", "the return of ``open``. **base_kwargs: Base arguments that ``open``/``load`` take.", "Returns: Wrapper for ``load``. \"\"\" def wrapped(file, **kwargs): open_args, load_args", "boilerplate. Args: dump: Function that takes the return of ``open``", "else: other_args[arg] = value return open_args, other_args def read_wrapper(load, **base_kwargs):", "Arguments for ``load``/``dump``. \"\"\" mode_args = BIN_MODE_ARGS if 'b' in", "Wrapper for ``dump``. \"\"\" def wrapped(file, obj, **kwargs): open_args, dump_args", "wrapped(file, **kwargs): open_args, load_args = split_args({**base_kwargs, **kwargs}) with open(file, **open_args)", "**base_kwargs: Base arguments that ``open``/``load`` take. Returns: Wrapper for ``load``.", "other_args = {} for arg, value in args.items(): if arg", "**kwargs}) with open(file, **open_args) as f: dump(obj, f, **dump_args) return", "takes the return of ``open``. **base_kwargs: Base arguments that ``open``/``load``", "of ``open``. **base_kwargs: Base arguments that ``open``/``load`` take. Returns: Wrapper", "args.items(): if arg in mode_args: open_args[arg] = value else: other_args[arg]", "write_wrapper(dump, **base_kwargs): \"\"\"Wraps ``dump`` function to avoid context manager boilerplate.", "context manager boilerplate. Args: dump: Function that takes the return", "open args and other args. Open args are used by", "dump: Function that takes the return of ``open`` and data", "``open``. **base_kwargs: Base arguments that ``open``/``load`` take. Returns: Wrapper for", "args into two groups: open args and other args. Open", "} TEXT_MODE_ARGS = {'mode', 'buffering', 'encoding', 'errors', 'newline'} def split_args(args):", "Arguments for ``open``. other_args: Arguments for ``load``/``dump``. \"\"\" mode_args =", "with open(file, **open_args) as f: return load(f, **load_args) return wrapped", "that ``open``/``load`` take. Returns: Wrapper for ``load``. \"\"\" def wrapped(file,", "by ``open`` function. Other args are used by ``load``/``dump`` functions.", "functions.\"\"\" BIN_MODE_ARGS = {'mode', 'buffering', } TEXT_MODE_ARGS = {'mode', 'buffering',", "``dump``. \"\"\" def wrapped(file, obj, **kwargs): open_args, dump_args = split_args({**base_kwargs,", "``load``/``dump`` functions. Args: args: Keyword args to split. Returns: open_args:", "Base arguments that ``open``/``dump`` take. Returns: Wrapper for ``dump``. \"\"\"", "TEXT_MODE_ARGS open_args = {} other_args = {} for arg, value", "'encoding', 'errors', 'newline'} def split_args(args): \"\"\"Splits args into two groups:", "of ``open`` and data to dump. **base_kwargs: Base arguments that", "Keyword args to split. Returns: open_args: Arguments for ``open``. other_args:", "load: Function that takes the return of ``open``. **base_kwargs: Base", "def split_args(args): \"\"\"Splits args into two groups: open args and", "to dump. **base_kwargs: Base arguments that ``open``/``dump`` take. Returns: Wrapper", "wrapped def write_wrapper(dump, **base_kwargs): \"\"\"Wraps ``dump`` function to avoid context", "open_args[arg] = value else: other_args[arg] = value return open_args, other_args", "used by ``open`` function. Other args are used by ``load``/``dump``", "arguments that ``open``/``dump`` take. Returns: Wrapper for ``dump``. \"\"\" def", "args: Keyword args to split. Returns: open_args: Arguments for ``open``.", "{} for arg, value in args.items(): if arg in mode_args:", "def wrapped(file, obj, **kwargs): open_args, dump_args = split_args({**base_kwargs, **kwargs}) with", "<reponame>byshyk/shortio \"\"\"Contains utility functions.\"\"\" BIN_MODE_ARGS = {'mode', 'buffering', } TEXT_MODE_ARGS", "'b' in args['mode'] else TEXT_MODE_ARGS open_args = {} other_args =", "split. Returns: open_args: Arguments for ``open``. other_args: Arguments for ``load``/``dump``.", "= {'mode', 'buffering', } TEXT_MODE_ARGS = {'mode', 'buffering', 'encoding', 'errors',", "``open``/``dump`` take. Returns: Wrapper for ``dump``. \"\"\" def wrapped(file, obj,", "for ``load``/``dump``. \"\"\" mode_args = BIN_MODE_ARGS if 'b' in args['mode']", "``open``/``load`` take. Returns: Wrapper for ``load``. \"\"\" def wrapped(file, **kwargs):", "return load(f, **load_args) return wrapped def write_wrapper(dump, **base_kwargs): \"\"\"Wraps ``dump``", "{'mode', 'buffering', 'encoding', 'errors', 'newline'} def split_args(args): \"\"\"Splits args into", "def wrapped(file, **kwargs): open_args, load_args = split_args({**base_kwargs, **kwargs}) with open(file,", "\"\"\" mode_args = BIN_MODE_ARGS if 'b' in args['mode'] else TEXT_MODE_ARGS", "take. Returns: Wrapper for ``load``. \"\"\" def wrapped(file, **kwargs): open_args,", "split_args({**base_kwargs, **kwargs}) with open(file, **open_args) as f: return load(f, **load_args)", "are used by ``load``/``dump`` functions. Args: args: Keyword args to", "= {'mode', 'buffering', 'encoding', 'errors', 'newline'} def split_args(args): \"\"\"Splits args", "obj, **kwargs): open_args, dump_args = split_args({**base_kwargs, **kwargs}) with open(file, **open_args)", "to split. Returns: open_args: Arguments for ``open``. other_args: Arguments for", "are used by ``open`` function. Other args are used by", "**load_args) return wrapped def write_wrapper(dump, **base_kwargs): \"\"\"Wraps ``dump`` function to", "return of ``open``. **base_kwargs: Base arguments that ``open``/``load`` take. Returns:", "Wrapper for ``load``. \"\"\" def wrapped(file, **kwargs): open_args, load_args =", "Args: args: Keyword args to split. Returns: open_args: Arguments for", "that takes the return of ``open`` and data to dump.", "\"\"\"Splits args into two groups: open args and other args.", "``open`` and data to dump. **base_kwargs: Base arguments that ``open``/``dump``", "for ``load``. \"\"\" def wrapped(file, **kwargs): open_args, load_args = split_args({**base_kwargs,", "``dump`` function to avoid context manager boilerplate. Args: dump: Function", "to avoid context manager boilerplate. Args: dump: Function that takes", "args to split. Returns: open_args: Arguments for ``open``. other_args: Arguments", "def write_wrapper(dump, **base_kwargs): \"\"\"Wraps ``dump`` function to avoid context manager", "return wrapped def write_wrapper(dump, **base_kwargs): \"\"\"Wraps ``dump`` function to avoid", "**base_kwargs): \"\"\"Wraps ``dump`` function to avoid context manager boilerplate. Args:", "split_args(args): \"\"\"Splits args into two groups: open args and other", "used by ``load``/``dump`` functions. Args: args: Keyword args to split.", "Other args are used by ``load``/``dump`` functions. Args: args: Keyword", "for ``open``. other_args: Arguments for ``load``/``dump``. \"\"\" mode_args = BIN_MODE_ARGS", "Args: load: Function that takes the return of ``open``. **base_kwargs:", "value in args.items(): if arg in mode_args: open_args[arg] = value", "``load``/``dump``. \"\"\" mode_args = BIN_MODE_ARGS if 'b' in args['mode'] else", "data to dump. **base_kwargs: Base arguments that ``open``/``dump`` take. Returns:", "\"\"\"Wraps ``dump`` function to avoid context manager boilerplate. Args: dump:", "other_args: Arguments for ``load``/``dump``. \"\"\" mode_args = BIN_MODE_ARGS if 'b'", "that takes the return of ``open``. **base_kwargs: Base arguments that", "open_args, dump_args = split_args({**base_kwargs, **kwargs}) with open(file, **open_args) as f:", "boilerplate. Args: load: Function that takes the return of ``open``.", "``open`` function. Other args are used by ``load``/``dump`` functions. Args:", "avoid context manager boilerplate. Args: load: Function that takes the", "other args. Open args are used by ``open`` function. Other", "two groups: open args and other args. Open args are", "and data to dump. **base_kwargs: Base arguments that ``open``/``dump`` take.", "args['mode'] else TEXT_MODE_ARGS open_args = {} other_args = {} for", "in args.items(): if arg in mode_args: open_args[arg] = value else:", "TEXT_MODE_ARGS = {'mode', 'buffering', 'encoding', 'errors', 'newline'} def split_args(args): \"\"\"Splits", "read_wrapper(load, **base_kwargs): \"\"\"Wraps ``load`` function to avoid context manager boilerplate.", "**open_args) as f: return load(f, **load_args) return wrapped def write_wrapper(dump,", "Returns: Wrapper for ``dump``. \"\"\" def wrapped(file, obj, **kwargs): open_args,", "with open(file, **open_args) as f: dump(obj, f, **dump_args) return wrapped", "other_args def read_wrapper(load, **base_kwargs): \"\"\"Wraps ``load`` function to avoid context", "{'mode', 'buffering', } TEXT_MODE_ARGS = {'mode', 'buffering', 'encoding', 'errors', 'newline'}", "Function that takes the return of ``open``. **base_kwargs: Base arguments", "'buffering', } TEXT_MODE_ARGS = {'mode', 'buffering', 'encoding', 'errors', 'newline'} def", "arg, value in args.items(): if arg in mode_args: open_args[arg] =", "and other args. Open args are used by ``open`` function.", "if 'b' in args['mode'] else TEXT_MODE_ARGS open_args = {} other_args", "\"\"\"Contains utility functions.\"\"\" BIN_MODE_ARGS = {'mode', 'buffering', } TEXT_MODE_ARGS =", "to avoid context manager boilerplate. Args: load: Function that takes", "BIN_MODE_ARGS if 'b' in args['mode'] else TEXT_MODE_ARGS open_args = {}", "open_args = {} other_args = {} for arg, value in", "function to avoid context manager boilerplate. Args: load: Function that", "that ``open``/``dump`` take. Returns: Wrapper for ``dump``. \"\"\" def wrapped(file,", "groups: open args and other args. Open args are used", "manager boilerplate. Args: load: Function that takes the return of", "takes the return of ``open`` and data to dump. **base_kwargs:", "value else: other_args[arg] = value return open_args, other_args def read_wrapper(load,", "wrapped(file, obj, **kwargs): open_args, dump_args = split_args({**base_kwargs, **kwargs}) with open(file,", "BIN_MODE_ARGS = {'mode', 'buffering', } TEXT_MODE_ARGS = {'mode', 'buffering', 'encoding',", "arguments that ``open``/``load`` take. Returns: Wrapper for ``load``. \"\"\" def", "args and other args. Open args are used by ``open``", "open_args, other_args def read_wrapper(load, **base_kwargs): \"\"\"Wraps ``load`` function to avoid", "manager boilerplate. Args: dump: Function that takes the return of", "context manager boilerplate. Args: load: Function that takes the return", "Args: dump: Function that takes the return of ``open`` and", "``load`` function to avoid context manager boilerplate. Args: load: Function", "for arg, value in args.items(): if arg in mode_args: open_args[arg]", "Function that takes the return of ``open`` and data to" ]
[ "async def inner(*args, **kwargs): return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds) return", "def outer(wrapped): @functools.wraps(wrapped) async def inner(self, *args, **kwargs): w =", "self_cache = instance_caches[w] return await call_or_get_from_cache( self_cache, wrapped, args, (self,)", "{} # Should be Dict[Any, T] but that doesn't work.", "on the in-flight request. cache[key] = (future, float(\"Inf\")) try: value", "(future, float(\"Inf\")): cache[key] = (future, time.time()) return value if cleanup_self:", "if it's the same future we awaited and # it", "kwargs) return inner return outer async def aiter_to_list(aiter: AsyncIterable[T],) ->", "@functools.wraps(wrapped) async def inner(*args, **kwargs): return await call_or_get_from_cache(cache2, wrapped, args,", "if cache is not None else defaultdict(dict) def on_delete(w): del", "= cache if cache is not None else defaultdict(dict) def", "that anything which is put into `key` will be in", "`self` arg pointing to a huge object. To mitigate that", "in-flight request. cache[key] = (future, float(\"Inf\")) try: value = await", "call_or_get_from_cache(cache2, wrapped, args, args, kwargs) return inner return outer async", "del instance_caches[w] def outer(wrapped): @functools.wraps(wrapped) async def inner(self, *args, **kwargs):", "by another coroutine if cache.get(key) == (future, float(\"Inf\")): del cache[key]", "import asyncio import functools import time import weakref from collections", "await future except Exception: # Only update the cache if", "**kwargs): w = weakref.ref(self, on_delete) self_cache = instance_caches[w] return await", "== (future, float(\"Inf\")): cache[key] = (future, time.time()) return value if", "from collections import defaultdict from typing import AsyncIterable from typing", "locking while checking # and updating the cache def async_ttl_cache(", "wrapped, args, (self,) + args, kwargs ) return inner else:", "import weakref from collections import defaultdict from typing import AsyncIterable", "import Callable from typing import Dict from typing import List", "raise KeyError except KeyError: future = asyncio.ensure_future(async_func(*args, **kwargs)) # set", "asyncio import functools import time import weakref from collections import", "value if cleanup_self: instance_caches: Dict = cache if cache is", "def on_delete(w): del instance_caches[w] def outer(wrapped): @functools.wraps(wrapped) async def inner(self,", "updated by another coroutine # Note also that we use", "cache[key] if ttl is not None and time.time() - last_update", "> ttl: raise KeyError except KeyError: future = asyncio.ensure_future(async_func(*args, **kwargs))", "300, cleanup_self: bool = False, *, cache: Optional[Dict] = None,", "raise else: if cache.get(key) == (future, float(\"Inf\")): cache[key] = (future,", "return value if cleanup_self: instance_caches: Dict = cache if cache", "is not thread-safe due to lack of locking while checking", "*args, **kwargs): w = weakref.ref(self, on_delete) self_cache = instance_caches[w] return", "so that we always wait on the in-flight request. cache[key]", "leaks. The most common # case is the `self` arg", "None else defaultdict(dict) def on_delete(w): del instance_caches[w] def outer(wrapped): @functools.wraps(wrapped)", "args, args, kwargs) return inner return outer async def aiter_to_list(aiter:", "None, ) -> Callable[ [Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped", "KeyError except KeyError: future = asyncio.ensure_future(async_func(*args, **kwargs)) # set the", "import defaultdict from typing import AsyncIterable from typing import Awaitable", "key = functools._make_key(args_for_key, kwargs, typed=False) try: future, last_update = cache[key]", "call_or_get_from_cache( self_cache, wrapped, args, (self,) + args, kwargs ) return", "cache if it's the same future we awaited and #", "that we always wait on the in-flight request. cache[key] =", "inner return outer async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]: return", "Optional[float] = 300, cleanup_self: bool = False, *, cache: Optional[Dict]", "return await call_or_get_from_cache( self_cache, wrapped, args, (self,) + args, kwargs", "put into `key` will be in the # cache forever,", "if ttl is not None and time.time() - last_update >", "inner(*args, **kwargs): return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs) return", "= 10, ) -> Callable[ [Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] #", "we're using `args_for_key`, which is supposed not contain any huge", "been updated by another coroutine # Note also that we", "typing import TypeVar T = TypeVar(\"T\") # NOTE: this method", "note that anything which is put into `key` will be", "lack of locking while checking # and updating the cache", "call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs): # Please note that anything", "await call_or_get_from_cache(cache2, wrapped, args, args, kwargs) return inner return outer", "# and updating the cache def async_ttl_cache( ttl: Optional[float] =", "def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]: return [x async for x", "it's the same future we awaited and # it hasn't", "from typing import Optional from typing import TypeVar T =", "# objects. key = functools._make_key(args_for_key, kwargs, typed=False) try: future, last_update", "is not None else {} # Should be Dict[Any, T]", "= await future except Exception: # Only update the cache", "# Please note that anything which is put into `key`", ") -> Callable[ [Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped #", "in case the key was deleted from the # cache", "# wrapped # inner ]: def outer(wrapped): @functools.wraps(wrapped) async def", "# wrapped # inner ]: async def call_or_get_from_cache(cache, async_func, args_for_key,", "try: value = await future except Exception: # Only update", "@functools.wraps(wrapped) async def inner(self, *args, **kwargs): w = weakref.ref(self, on_delete)", "that we use get() in case the key was deleted", "objects. key = functools._make_key(args_for_key, kwargs, typed=False) try: future, last_update =", "deleted from the # cache by another coroutine if cache.get(key)", "wrapped # inner ]: def outer(wrapped): @functools.wraps(wrapped) async def inner(*args,", "hasn't already been updated by another coroutine # Note also", "async for x in aiter] def async_timeout( seconds: int =", "import List from typing import Optional from typing import TypeVar", "seconds: int = 10, ) -> Callable[ [Callable[..., Awaitable[T]]], Callable[...,", "List[T]: return [x async for x in aiter] def async_timeout(", "potentially causing memory leaks. The most common # case is", "# Note also that we use get() in case the", "Dict = cache if cache is not None else {}", "else {} # Should be Dict[Any, T] but that doesn't", "bool = False, *, cache: Optional[Dict] = None, ) ->", "functools._make_key(args_for_key, kwargs, typed=False) try: future, last_update = cache[key] if ttl", "memory leaks. The most common # case is the `self`", "cache def async_ttl_cache( ttl: Optional[float] = 300, cleanup_self: bool =", "case the key was deleted from the # cache by", "args, kwargs ) return inner else: cache2: Dict = cache", "last_update > ttl: raise KeyError except KeyError: future = asyncio.ensure_future(async_func(*args,", "]: async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs): # Please", "def inner(*args, **kwargs): return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs)", "due to lack of locking while checking # and updating", "from typing import AsyncIterable from typing import Awaitable from typing", "awaited and # it hasn't already been updated by another", "cache if cache is not None else defaultdict(dict) def on_delete(w):", "= (future, float(\"Inf\")) try: value = await future except Exception:", "cache2: Dict = cache if cache is not None else", "]: def outer(wrapped): @functools.wraps(wrapped) async def inner(*args, **kwargs): return await", "key was deleted from the # cache by another coroutine", "was deleted from the # cache by another coroutine if", "typing import List from typing import Optional from typing import", "defaultdict(dict) def on_delete(w): del instance_caches[w] def outer(wrapped): @functools.wraps(wrapped) async def", "10, ) -> Callable[ [Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped", "and # it hasn't already been updated by another coroutine", "Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner ]: async def", "else defaultdict(dict) def on_delete(w): del instance_caches[w] def outer(wrapped): @functools.wraps(wrapped) async", "@functools.wraps(wrapped) async def inner(*args, **kwargs): return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds)", "# inner ]: async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs):", "w = weakref.ref(self, on_delete) self_cache = instance_caches[w] return await call_or_get_from_cache(", "NOTE: this method is not thread-safe due to lack of", "weakref from collections import defaultdict from typing import AsyncIterable from", "= (future, time.time()) return value if cleanup_self: instance_caches: Dict =", "**kwargs): return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds) return inner return outer", "x in aiter] def async_timeout( seconds: int = 10, )", "Callable[..., Awaitable[T]] # wrapped # inner ]: def outer(wrapped): @functools.wraps(wrapped)", "def outer(wrapped): @functools.wraps(wrapped) async def inner(*args, **kwargs): return await asyncio.wait_for(wrapped(*args,", "is supposed not contain any huge # objects. key =", "import TypeVar T = TypeVar(\"T\") # NOTE: this method is", "instance_caches[w] def outer(wrapped): @functools.wraps(wrapped) async def inner(self, *args, **kwargs): w", "from typing import Callable from typing import Dict from typing", "forever, potentially causing memory leaks. The most common # case", "+ args, kwargs ) return inner else: cache2: Dict =", "Awaitable from typing import Callable from typing import Dict from", "the # cache by another coroutine if cache.get(key) == (future,", "into `key` will be in the # cache forever, potentially", "cache[key] = (future, time.time()) return value if cleanup_self: instance_caches: Dict", "async_func, args_for_key, args, kwargs): # Please note that anything which", "self_cache, wrapped, args, (self,) + args, kwargs ) return inner", "+infinity so that we always wait on the in-flight request.", "KeyError: future = asyncio.ensure_future(async_func(*args, **kwargs)) # set the timestamp to", "always wait on the in-flight request. cache[key] = (future, float(\"Inf\"))", "try: future, last_update = cache[key] if ttl is not None", "instance_caches[w] return await call_or_get_from_cache( self_cache, wrapped, args, (self,) + args,", "is not None and time.time() - last_update > ttl: raise", "import Dict from typing import List from typing import Optional", "functools import time import weakref from collections import defaultdict from", "to +infinity so that we always wait on the in-flight", "aiter] def async_timeout( seconds: int = 10, ) -> Callable[", "the `self` arg pointing to a huge object. To mitigate", "# cache forever, potentially causing memory leaks. The most common", "not None else defaultdict(dict) def on_delete(w): del instance_caches[w] def outer(wrapped):", "List from typing import Optional from typing import TypeVar T", "- last_update > ttl: raise KeyError except KeyError: future =", "import Optional from typing import TypeVar T = TypeVar(\"T\") #", "float(\"Inf\")): cache[key] = (future, time.time()) return value if cleanup_self: instance_caches:", "[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner ]: def", "thread-safe due to lack of locking while checking # and", "[x async for x in aiter] def async_timeout( seconds: int", "for x in aiter] def async_timeout( seconds: int = 10,", "if cache is not None else {} # Should be", "def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs): # Please note that", "# it hasn't already been updated by another coroutine #", "most common # case is the `self` arg pointing to", "AsyncIterable from typing import Awaitable from typing import Callable from", "= asyncio.ensure_future(async_func(*args, **kwargs)) # set the timestamp to +infinity so", "None else {} # Should be Dict[Any, T] but that", "that # we're using `args_for_key`, which is supposed not contain", "= cache if cache is not None else {} #", "float(\"Inf\")) try: value = await future except Exception: # Only", "and updating the cache def async_ttl_cache( ttl: Optional[float] = 300,", ") return inner else: cache2: Dict = cache if cache", "the cache if it's the same future we awaited and", "time import weakref from collections import defaultdict from typing import", "not thread-safe due to lack of locking while checking #", "if cleanup_self: instance_caches: Dict = cache if cache is not", "future except Exception: # Only update the cache if it's", "cleanup_self: bool = False, *, cache: Optional[Dict] = None, )", "Note also that we use get() in case the key", "`args_for_key`, which is supposed not contain any huge # objects.", "collections import defaultdict from typing import AsyncIterable from typing import", "be Dict[Any, T] but that doesn't work. def outer(wrapped): @functools.wraps(wrapped)", "def outer(wrapped): @functools.wraps(wrapped) async def inner(*args, **kwargs): return await call_or_get_from_cache(cache2,", "False, *, cache: Optional[Dict] = None, ) -> Callable[ [Callable[...,", "async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs): # Please note", "will be in the # cache forever, potentially causing memory", "typing import Dict from typing import List from typing import", "inner ]: def outer(wrapped): @functools.wraps(wrapped) async def inner(*args, **kwargs): return", "Should be Dict[Any, T] but that doesn't work. def outer(wrapped):", "the # cache forever, potentially causing memory leaks. The most", "return inner return outer async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]:", "outer(wrapped): @functools.wraps(wrapped) async def inner(*args, **kwargs): return await asyncio.wait_for(wrapped(*args, **kwargs),", "return outer async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]: return [x", "except KeyError: future = asyncio.ensure_future(async_func(*args, **kwargs)) # set the timestamp", "wait on the in-flight request. cache[key] = (future, float(\"Inf\")) try:", "to lack of locking while checking # and updating the", "future we awaited and # it hasn't already been updated", "get() in case the key was deleted from the #", "causing memory leaks. The most common # case is the", "TypeVar(\"T\") # NOTE: this method is not thread-safe due to", "time.time()) return value if cleanup_self: instance_caches: Dict = cache if", "ttl: raise KeyError except KeyError: future = asyncio.ensure_future(async_func(*args, **kwargs)) #", "defaultdict from typing import AsyncIterable from typing import Awaitable from", "-> List[T]: return [x async for x in aiter] def", "is the `self` arg pointing to a huge object. To", "not None and time.time() - last_update > ttl: raise KeyError", "the timestamp to +infinity so that we always wait on", "cache by another coroutine if cache.get(key) == (future, float(\"Inf\")): del", "async def inner(self, *args, **kwargs): w = weakref.ref(self, on_delete) self_cache", "last_update = cache[key] if ttl is not None and time.time()", "# we're using `args_for_key`, which is supposed not contain any", "outer async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]: return [x async", "Awaitable[T]] # wrapped # inner ]: def outer(wrapped): @functools.wraps(wrapped) async", "this method is not thread-safe due to lack of locking", "def async_timeout( seconds: int = 10, ) -> Callable[ [Callable[...,", "we awaited and # it hasn't already been updated by", "# set the timestamp to +infinity so that we always", "import functools import time import weakref from collections import defaultdict", "= None, ) -> Callable[ [Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] #", "cleanup_self: instance_caches: Dict = cache if cache is not None", "def inner(self, *args, **kwargs): w = weakref.ref(self, on_delete) self_cache =", "use get() in case the key was deleted from the", "that doesn't work. def outer(wrapped): @functools.wraps(wrapped) async def inner(*args, **kwargs):", "also that we use get() in case the key was", "wrapped, args, args, kwargs) return inner return outer async def", "outer(wrapped): @functools.wraps(wrapped) async def inner(*args, **kwargs): return await call_or_get_from_cache(cache2, wrapped,", "on_delete(w): del instance_caches[w] def outer(wrapped): @functools.wraps(wrapped) async def inner(self, *args,", "case is the `self` arg pointing to a huge object.", "wrapped # inner ]: async def call_or_get_from_cache(cache, async_func, args_for_key, args,", "is not None else defaultdict(dict) def on_delete(w): del instance_caches[w] def", "AsyncIterable[T],) -> List[T]: return [x async for x in aiter]", "from typing import Dict from typing import List from typing", "another coroutine if cache.get(key) == (future, float(\"Inf\")): del cache[key] raise", "cache[key] raise else: if cache.get(key) == (future, float(\"Inf\")): cache[key] =", "which is put into `key` will be in the #", "async def inner(*args, **kwargs): return await call_or_get_from_cache(cache2, wrapped, args, args,", "None and time.time() - last_update > ttl: raise KeyError except", "of locking while checking # and updating the cache def", "= instance_caches[w] return await call_or_get_from_cache( self_cache, wrapped, args, (self,) +", "Awaitable[T]] # wrapped # inner ]: async def call_or_get_from_cache(cache, async_func,", "= functools._make_key(args_for_key, kwargs, typed=False) try: future, last_update = cache[key] if", "not None else {} # Should be Dict[Any, T] but", "method is not thread-safe due to lack of locking while", "work. def outer(wrapped): @functools.wraps(wrapped) async def inner(*args, **kwargs): return await", "checking # and updating the cache def async_ttl_cache( ttl: Optional[float]", "the key was deleted from the # cache by another", "(future, float(\"Inf\")) try: value = await future except Exception: #", "await call_or_get_from_cache( self_cache, wrapped, args, (self,) + args, kwargs )", "cache.get(key) == (future, float(\"Inf\")): del cache[key] raise else: if cache.get(key)", "if cache.get(key) == (future, float(\"Inf\")): del cache[key] raise else: if", "a huge object. To mitigate that # we're using `args_for_key`,", "from typing import Awaitable from typing import Callable from typing", "from the # cache by another coroutine if cache.get(key) ==", "coroutine if cache.get(key) == (future, float(\"Inf\")): del cache[key] raise else:", "cache is not None else defaultdict(dict) def on_delete(w): del instance_caches[w]", "it hasn't already been updated by another coroutine # Note", "is put into `key` will be in the # cache", "typing import Awaitable from typing import Callable from typing import", "return inner else: cache2: Dict = cache if cache is", "same future we awaited and # it hasn't already been", "cache.get(key) == (future, float(\"Inf\")): cache[key] = (future, time.time()) return value", "args, kwargs): # Please note that anything which is put", "T] but that doesn't work. def outer(wrapped): @functools.wraps(wrapped) async def", "to a huge object. To mitigate that # we're using", "update the cache if it's the same future we awaited", "Optional from typing import TypeVar T = TypeVar(\"T\") # NOTE:", "aiter_to_list(aiter: AsyncIterable[T],) -> List[T]: return [x async for x in", "def async_ttl_cache( ttl: Optional[float] = 300, cleanup_self: bool = False,", "while checking # and updating the cache def async_ttl_cache( ttl:", "common # case is the `self` arg pointing to a", "already been updated by another coroutine # Note also that", "we always wait on the in-flight request. cache[key] = (future,", "Dict[Any, T] but that doesn't work. def outer(wrapped): @functools.wraps(wrapped) async", "by another coroutine # Note also that we use get()", "anything which is put into `key` will be in the", "object. To mitigate that # we're using `args_for_key`, which is", "the in-flight request. cache[key] = (future, float(\"Inf\")) try: value =", "instance_caches: Dict = cache if cache is not None else", "# inner ]: def outer(wrapped): @functools.wraps(wrapped) async def inner(*args, **kwargs):", "*, cache: Optional[Dict] = None, ) -> Callable[ [Callable[..., Awaitable[T]]],", "supposed not contain any huge # objects. key = functools._make_key(args_for_key,", "(future, time.time()) return value if cleanup_self: instance_caches: Dict = cache", "request. cache[key] = (future, float(\"Inf\")) try: value = await future", "ttl: Optional[float] = 300, cleanup_self: bool = False, *, cache:", "# cache by another coroutine if cache.get(key) == (future, float(\"Inf\")):", "which is supposed not contain any huge # objects. key", "in aiter] def async_timeout( seconds: int = 10, ) ->", "on_delete) self_cache = instance_caches[w] return await call_or_get_from_cache( self_cache, wrapped, args,", "cache is not None else {} # Should be Dict[Any,", "import AsyncIterable from typing import Awaitable from typing import Callable", "Please note that anything which is put into `key` will", "typed=False) try: future, last_update = cache[key] if ttl is not", "kwargs, typed=False) try: future, last_update = cache[key] if ttl is", "TypeVar T = TypeVar(\"T\") # NOTE: this method is not", "inner else: cache2: Dict = cache if cache is not", "kwargs ) return inner else: cache2: Dict = cache if", "(self,) + args, kwargs ) return inner else: cache2: Dict", "huge object. To mitigate that # we're using `args_for_key`, which", "timestamp to +infinity so that we always wait on the", "args_for_key, args, kwargs): # Please note that anything which is", "cache if cache is not None else {} # Should", "= TypeVar(\"T\") # NOTE: this method is not thread-safe due", "coroutine # Note also that we use get() in case", "the cache def async_ttl_cache( ttl: Optional[float] = 300, cleanup_self: bool", "except Exception: # Only update the cache if it's the", "typing import AsyncIterable from typing import Awaitable from typing import", "The most common # case is the `self` arg pointing", "cache: Optional[Dict] = None, ) -> Callable[ [Callable[..., Awaitable[T]]], Callable[...,", "Exception: # Only update the cache if it's the same", "inner(*args, **kwargs): return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds) return inner return", "float(\"Inf\")): del cache[key] raise else: if cache.get(key) == (future, float(\"Inf\")):", "Callable[..., Awaitable[T]] # wrapped # inner ]: async def call_or_get_from_cache(cache,", "async_timeout( seconds: int = 10, ) -> Callable[ [Callable[..., Awaitable[T]]],", "Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner ]: def outer(wrapped):", "in the # cache forever, potentially causing memory leaks. The", "Dict from typing import List from typing import Optional from", "**kwargs)) # set the timestamp to +infinity so that we", "args, (self,) + args, kwargs ) return inner else: cache2:", "= weakref.ref(self, on_delete) self_cache = instance_caches[w] return await call_or_get_from_cache( self_cache,", "not contain any huge # objects. key = functools._make_key(args_for_key, kwargs,", "**kwargs): return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs) return inner", "typing import Callable from typing import Dict from typing import", "# Should be Dict[Any, T] but that doesn't work. def", "async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]: return [x async for", "doesn't work. def outer(wrapped): @functools.wraps(wrapped) async def inner(*args, **kwargs): return", "# case is the `self` arg pointing to a huge", "ttl is not None and time.time() - last_update > ttl:", "import Awaitable from typing import Callable from typing import Dict", "return [x async for x in aiter] def async_timeout( seconds:", "time.time() - last_update > ttl: raise KeyError except KeyError: future", "else: cache2: Dict = cache if cache is not None", "To mitigate that # we're using `args_for_key`, which is supposed", "def inner(*args, **kwargs): return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds) return inner", "weakref.ref(self, on_delete) self_cache = instance_caches[w] return await call_or_get_from_cache( self_cache, wrapped,", "== (future, float(\"Inf\")): del cache[key] raise else: if cache.get(key) ==", "# NOTE: this method is not thread-safe due to lack", "= 300, cleanup_self: bool = False, *, cache: Optional[Dict] =", "outer(wrapped): @functools.wraps(wrapped) async def inner(self, *args, **kwargs): w = weakref.ref(self,", "from typing import TypeVar T = TypeVar(\"T\") # NOTE: this", "pointing to a huge object. To mitigate that # we're", "huge # objects. key = functools._make_key(args_for_key, kwargs, typed=False) try: future,", "# Only update the cache if it's the same future", "arg pointing to a huge object. To mitigate that #", "import time import weakref from collections import defaultdict from typing", "T = TypeVar(\"T\") # NOTE: this method is not thread-safe", "args, kwargs) return inner return outer async def aiter_to_list(aiter: AsyncIterable[T],)", "cache[key] = (future, float(\"Inf\")) try: value = await future except", "and time.time() - last_update > ttl: raise KeyError except KeyError:", "return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs) return inner return", "= False, *, cache: Optional[Dict] = None, ) -> Callable[", "if cache.get(key) == (future, float(\"Inf\")): cache[key] = (future, time.time()) return", "future = asyncio.ensure_future(async_func(*args, **kwargs)) # set the timestamp to +infinity", "[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner ]: async", "`key` will be in the # cache forever, potentially causing", "Callable[ [Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner ]:", "set the timestamp to +infinity so that we always wait", "inner(self, *args, **kwargs): w = weakref.ref(self, on_delete) self_cache = instance_caches[w]", "future, last_update = cache[key] if ttl is not None and", "value = await future except Exception: # Only update the", "contain any huge # objects. key = functools._make_key(args_for_key, kwargs, typed=False)", "= cache[key] if ttl is not None and time.time() -", "be in the # cache forever, potentially causing memory leaks.", "del cache[key] raise else: if cache.get(key) == (future, float(\"Inf\")): cache[key]", "from typing import List from typing import Optional from typing", "asyncio.ensure_future(async_func(*args, **kwargs)) # set the timestamp to +infinity so that", "mitigate that # we're using `args_for_key`, which is supposed not", "we use get() in case the key was deleted from", "(future, float(\"Inf\")): del cache[key] raise else: if cache.get(key) == (future,", "cache forever, potentially causing memory leaks. The most common #", "Callable from typing import Dict from typing import List from", "any huge # objects. key = functools._make_key(args_for_key, kwargs, typed=False) try:", "updating the cache def async_ttl_cache( ttl: Optional[float] = 300, cleanup_self:", "using `args_for_key`, which is supposed not contain any huge #", "else: if cache.get(key) == (future, float(\"Inf\")): cache[key] = (future, time.time())", "int = 10, ) -> Callable[ [Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]]", "kwargs): # Please note that anything which is put into", "inner ]: async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs): #", "Only update the cache if it's the same future we", "the same future we awaited and # it hasn't already", "Dict = cache if cache is not None else defaultdict(dict)", "but that doesn't work. def outer(wrapped): @functools.wraps(wrapped) async def inner(*args,", "-> Callable[ [Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner", "typing import Optional from typing import TypeVar T = TypeVar(\"T\")", "another coroutine # Note also that we use get() in", "Optional[Dict] = None, ) -> Callable[ [Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]]", "async_ttl_cache( ttl: Optional[float] = 300, cleanup_self: bool = False, *," ]
[ "delimiter=',') orders[orders==-1]=0 features = np.loadtxt(\"{0}_input.csv\".format(stock), delimiter=',') if len(orders)!=len(features): logging.error(\"len(orders)!=len(features) ->", "''' import csv import argparse import numpy as np import", "''' return train, valid (x,y) ''' orders = np.loadtxt(\"{0}_{1}_orders.csv\".format(stock, name),", "from trendStrategy import OptTrendStrategy, TrendStrategy from util import visu def", "orders[:pos]) valid = (features[pos:], orders[pos:]) return train, valid def evaluate(exp,", "import theanets from sklearn.metrics import accuracy_score import logging from trendStrategy", "help='min improvement (stop learning)') parser.add_argument('--field', default='orders', help='compare field') args =", "train = (features[:pos], orders[:pos]) valid = (features[pos:], orders[pos:]) return train,", "best_data = np.loadtxt(best_fname, usecols=[1], delimiter=',') predicted_data = np.loadtxt(predicted_fname, usecols=[1], delimiter=',')", "theanets.Classifier, layers=(n_input, n_input*2, 2), ) exp.load('%s.nn' %name) if verbose: print('training:')", "from sklearn.metrics import accuracy_score import logging from trendStrategy import OptTrendStrategy,", "logging.error(\"len(orders)!=len(features) -> %s!=%s\" %(len(orders),len(features))) features = features.astype('f') orders = orders.astype('i')", "\"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--stock', '-s', default=\"TSLA\", help='stock') parser.add_argument('--ratio', '-r',", "dataset ''' import csv import argparse import numpy as np", "train, valid = load_dataset(stock) n, n_input = train[0].shape exp =", "print('training:') evaluate(exp, train) print('validation:') evaluate(exp, valid) exp.save('%s.nn' %stock) return exp", "args.field) train, valid = load_dataset(args.stock) exp = train_strategy(args.stock, args.ratio, args.min)", "= min(len(best_data), len(predicted_data)) title = \"%s vs %s\" %(best, strategy)", "momentum=0.5, hidden_l1=0.001, weight_l2=0.001, num_updates=100 ) print('training:') evaluate(exp, train) print('validation:') evaluate(exp,", "orders = orders.astype('i') pos = round(len(features)*ratio) train = (features[:pos], orders[:pos])", ") exp.train(train, valid, min_improvement=min_improvement, algo='sgd', learning_rate=0.01, momentum=0.5, hidden_l1=0.001, weight_l2=0.001, num_updates=100", "pos = round(len(features)*ratio) train = (features[:pos], orders[:pos]) valid = (features[pos:],", "train, valid = load_dataset(name) n, n_input = train[0].shape exp =", "def load_strategy(name, verbose=False): print(\"loading %s trained strategy\" %name) train, valid", "(x,y) ''' orders = np.loadtxt(\"{0}_{1}_orders.csv\".format(stock, name), usecols=[1], delimiter=',') orders[orders==-1]=0 features", "2), ) exp.load('%s.nn' %name) if verbose: print('training:') evaluate(exp, train) print('validation:')", "default=\"TSLA\", help='stock') parser.add_argument('--ratio', '-r', default=0.8, type=int, help='train/valid ratio') parser.add_argument('--min', '-m',", "parser.add_argument('--stock', '-s', default=\"TSLA\", help='stock') parser.add_argument('--ratio', '-r', default=0.8, type=int, help='train/valid ratio')", "valid = load_dataset(args.stock) exp = train_strategy(args.stock, args.ratio, args.min) exp =", "print('validation:') evaluate(exp, valid) return exp if __name__ == \"__main__\": parser", "n_input = train[0].shape exp = theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2, 2),", "valid = load_dataset(name) n, n_input = train[0].shape exp = theanets.Experiment(", "help='train/valid ratio') parser.add_argument('--min', '-m', default=0.001, type=int, help='min improvement (stop learning)')", "np import sklearn.metrics import theanets from sklearn.metrics import accuracy_score import", "field='orders', strategy=\"TrendStrategy_predicted\", best=OptTrendStrategy.__name__): best_fname=\"{0}_{1}_{2}.csv\".format(stock, best, field) predicted_fname=\"{0}_{1}_{2}.csv\".format(stock, strategy, field) print", "visu.compare(best_data[-min_size:], predicted_data[-min_size:], title) def load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__): ''' return train,", "%stock) return exp def load_strategy(name, verbose=False): print(\"loading %s trained strategy\"", "exp if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--stock', '-s',", "len(orders)!=len(features): logging.error(\"len(orders)!=len(features) -> %s!=%s\" %(len(orders),len(features))) features = features.astype('f') orders =", "argparse.ArgumentParser(description=__doc__) parser.add_argument('--stock', '-s', default=\"TSLA\", help='stock') parser.add_argument('--ratio', '-r', default=0.8, type=int, help='train/valid", "default=0.001, type=int, help='min improvement (stop learning)') parser.add_argument('--field', default='orders', help='compare field')", "usecols=[1], delimiter=',') orders[orders==-1]=0 features = np.loadtxt(\"{0}_input.csv\".format(stock), delimiter=',') if len(orders)!=len(features): logging.error(\"len(orders)!=len(features)", "''' generate dataset ''' import csv import argparse import numpy", "%s!=%s\" %(len(orders),len(features))) features = features.astype('f') orders = orders.astype('i') pos =", "'-s', default=\"TSLA\", help='stock') parser.add_argument('--ratio', '-r', default=0.8, type=int, help='train/valid ratio') parser.add_argument('--min',", "best, field) predicted_fname=\"{0}_{1}_{2}.csv\".format(stock, strategy, field) print \"comparing\",best_fname,predicted_fname best_data = np.loadtxt(best_fname,", "train[0].shape exp = theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2, 2), ) exp.train(train,", "= exp.network.predict(dataset[0]) print(sklearn.metrics.confusion_matrix(y_true, y_pred)) print('accuracy:',accuracy_score(y_true, y_pred)) def train_strategy(stock, ratio=0.8, min_improvement=0.001):", "usecols=[1], delimiter=',') predicted_data = np.loadtxt(predicted_fname, usecols=[1], delimiter=',') min_size = min(len(best_data),", "= \"%s vs %s\" %(best, strategy) visu.compare(best_data[-min_size:], predicted_data[-min_size:], title) def", "import visu def compare(stock, field='orders', strategy=\"TrendStrategy_predicted\", best=OptTrendStrategy.__name__): best_fname=\"{0}_{1}_{2}.csv\".format(stock, best, field)", "default='orders', help='compare field') args = parser.parse_args() if args.field: compare(args.stock, args.field)", "best_fname=\"{0}_{1}_{2}.csv\".format(stock, best, field) predicted_fname=\"{0}_{1}_{2}.csv\".format(stock, strategy, field) print \"comparing\",best_fname,predicted_fname best_data =", "= theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2, 2), ) exp.load('%s.nn' %name) if", "if len(orders)!=len(features): logging.error(\"len(orders)!=len(features) -> %s!=%s\" %(len(orders),len(features))) features = features.astype('f') orders", "exp.network.predict(dataset[0]) print(sklearn.metrics.confusion_matrix(y_true, y_pred)) print('accuracy:',accuracy_score(y_true, y_pred)) def train_strategy(stock, ratio=0.8, min_improvement=0.001): train,", "valid = load_dataset(stock) n, n_input = train[0].shape exp = theanets.Experiment(", "= train[0].shape exp = theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2, 2), )", "np.loadtxt(best_fname, usecols=[1], delimiter=',') predicted_data = np.loadtxt(predicted_fname, usecols=[1], delimiter=',') min_size =", "= theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2, 2), ) exp.train(train, valid, min_improvement=min_improvement,", "strategy) visu.compare(best_data[-min_size:], predicted_data[-min_size:], title) def load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__): ''' return", "load_strategy(name, verbose=False): print(\"loading %s trained strategy\" %name) train, valid =", "train[0].shape exp = theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2, 2), ) exp.load('%s.nn'", "trained strategy\" %name) train, valid = load_dataset(name) n, n_input =", "import sklearn.metrics import theanets from sklearn.metrics import accuracy_score import logging", "%(len(orders),len(features))) features = features.astype('f') orders = orders.astype('i') pos = round(len(features)*ratio)", "predicted_data[-min_size:], title) def load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__): ''' return train, valid", "dataset): y_true = dataset[1] y_pred = exp.network.predict(dataset[0]) print(sklearn.metrics.confusion_matrix(y_true, y_pred)) print('accuracy:',accuracy_score(y_true,", "train, valid = load_dataset(args.stock) exp = train_strategy(args.stock, args.ratio, args.min) exp", "predicted_fname=\"{0}_{1}_{2}.csv\".format(stock, strategy, field) print \"comparing\",best_fname,predicted_fname best_data = np.loadtxt(best_fname, usecols=[1], delimiter=',')", "if verbose: print('training:') evaluate(exp, train) print('validation:') evaluate(exp, valid) return exp", "%name) train, valid = load_dataset(name) n, n_input = train[0].shape exp", "algo='sgd', learning_rate=0.01, momentum=0.5, hidden_l1=0.001, weight_l2=0.001, num_updates=100 ) print('training:') evaluate(exp, train)", "layers=(n_input, n_input*2, 2), ) exp.load('%s.nn' %name) if verbose: print('training:') evaluate(exp,", "2), ) exp.train(train, valid, min_improvement=min_improvement, algo='sgd', learning_rate=0.01, momentum=0.5, hidden_l1=0.001, weight_l2=0.001,", "%s trained strategy\" %name) train, valid = load_dataset(name) n, n_input", "train) print('validation:') evaluate(exp, valid) return exp if __name__ == \"__main__\":", "'-m', default=0.001, type=int, help='min improvement (stop learning)') parser.add_argument('--field', default='orders', help='compare", "delimiter=',') predicted_data = np.loadtxt(predicted_fname, usecols=[1], delimiter=',') min_size = min(len(best_data), len(predicted_data))", "orders[orders==-1]=0 features = np.loadtxt(\"{0}_input.csv\".format(stock), delimiter=',') if len(orders)!=len(features): logging.error(\"len(orders)!=len(features) -> %s!=%s\"", "features = features.astype('f') orders = orders.astype('i') pos = round(len(features)*ratio) train", "round(len(features)*ratio) train = (features[:pos], orders[:pos]) valid = (features[pos:], orders[pos:]) return", "parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--stock', '-s', default=\"TSLA\", help='stock') parser.add_argument('--ratio', '-r', default=0.8,", "'-r', default=0.8, type=int, help='train/valid ratio') parser.add_argument('--min', '-m', default=0.001, type=int, help='min", "ratio=0.8, name=OptTrendStrategy.__name__): ''' return train, valid (x,y) ''' orders =", "delimiter=',') if len(orders)!=len(features): logging.error(\"len(orders)!=len(features) -> %s!=%s\" %(len(orders),len(features))) features = features.astype('f')", "field) print \"comparing\",best_fname,predicted_fname best_data = np.loadtxt(best_fname, usecols=[1], delimiter=',') predicted_data =", "exp.train(train, valid, min_improvement=min_improvement, algo='sgd', learning_rate=0.01, momentum=0.5, hidden_l1=0.001, weight_l2=0.001, num_updates=100 )", "name=OptTrendStrategy.__name__): ''' return train, valid (x,y) ''' orders = np.loadtxt(\"{0}_{1}_orders.csv\".format(stock,", "valid (x,y) ''' orders = np.loadtxt(\"{0}_{1}_orders.csv\".format(stock, name), usecols=[1], delimiter=',') orders[orders==-1]=0", "exp = theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2, 2), ) exp.load('%s.nn' %name)", "learning_rate=0.01, momentum=0.5, hidden_l1=0.001, weight_l2=0.001, num_updates=100 ) print('training:') evaluate(exp, train) print('validation:')", "np.loadtxt(\"{0}_{1}_orders.csv\".format(stock, name), usecols=[1], delimiter=',') orders[orders==-1]=0 features = np.loadtxt(\"{0}_input.csv\".format(stock), delimiter=',') if", "return train, valid def evaluate(exp, dataset): y_true = dataset[1] y_pred", ") exp.load('%s.nn' %name) if verbose: print('training:') evaluate(exp, train) print('validation:') evaluate(exp,", "print(sklearn.metrics.confusion_matrix(y_true, y_pred)) print('accuracy:',accuracy_score(y_true, y_pred)) def train_strategy(stock, ratio=0.8, min_improvement=0.001): train, valid", "train, valid (x,y) ''' orders = np.loadtxt(\"{0}_{1}_orders.csv\".format(stock, name), usecols=[1], delimiter=',')", "np.loadtxt(\"{0}_input.csv\".format(stock), delimiter=',') if len(orders)!=len(features): logging.error(\"len(orders)!=len(features) -> %s!=%s\" %(len(orders),len(features))) features =", "logging from trendStrategy import OptTrendStrategy, TrendStrategy from util import visu", "name), usecols=[1], delimiter=',') orders[orders==-1]=0 features = np.loadtxt(\"{0}_input.csv\".format(stock), delimiter=',') if len(orders)!=len(features):", "compare(stock, field='orders', strategy=\"TrendStrategy_predicted\", best=OptTrendStrategy.__name__): best_fname=\"{0}_{1}_{2}.csv\".format(stock, best, field) predicted_fname=\"{0}_{1}_{2}.csv\".format(stock, strategy, field)", "= np.loadtxt(best_fname, usecols=[1], delimiter=',') predicted_data = np.loadtxt(predicted_fname, usecols=[1], delimiter=',') min_size", "help='compare field') args = parser.parse_args() if args.field: compare(args.stock, args.field) train,", "load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__): ''' return train, valid (x,y) ''' orders", "%s\" %(best, strategy) visu.compare(best_data[-min_size:], predicted_data[-min_size:], title) def load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__):", "sklearn.metrics import theanets from sklearn.metrics import accuracy_score import logging from", "parser.add_argument('--min', '-m', default=0.001, type=int, help='min improvement (stop learning)') parser.add_argument('--field', default='orders',", "valid) exp.save('%s.nn' %stock) return exp def load_strategy(name, verbose=False): print(\"loading %s", "(features[pos:], orders[pos:]) return train, valid def evaluate(exp, dataset): y_true =", "\"comparing\",best_fname,predicted_fname best_data = np.loadtxt(best_fname, usecols=[1], delimiter=',') predicted_data = np.loadtxt(predicted_fname, usecols=[1],", "n, n_input = train[0].shape exp = theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2,", "orders = np.loadtxt(\"{0}_{1}_orders.csv\".format(stock, name), usecols=[1], delimiter=',') orders[orders==-1]=0 features = np.loadtxt(\"{0}_input.csv\".format(stock),", "evaluate(exp, train) print('validation:') evaluate(exp, valid) return exp if __name__ ==", "(stop learning)') parser.add_argument('--field', default='orders', help='compare field') args = parser.parse_args() if", "type=int, help='train/valid ratio') parser.add_argument('--min', '-m', default=0.001, type=int, help='min improvement (stop", "evaluate(exp, valid) exp.save('%s.nn' %stock) return exp def load_strategy(name, verbose=False): print(\"loading", "field') args = parser.parse_args() if args.field: compare(args.stock, args.field) train, valid", "strategy=\"TrendStrategy_predicted\", best=OptTrendStrategy.__name__): best_fname=\"{0}_{1}_{2}.csv\".format(stock, best, field) predicted_fname=\"{0}_{1}_{2}.csv\".format(stock, strategy, field) print \"comparing\",best_fname,predicted_fname", "csv import argparse import numpy as np import sklearn.metrics import", "print \"comparing\",best_fname,predicted_fname best_data = np.loadtxt(best_fname, usecols=[1], delimiter=',') predicted_data = np.loadtxt(predicted_fname,", "ratio=0.8, min_improvement=0.001): train, valid = load_dataset(stock) n, n_input = train[0].shape", "features.astype('f') orders = orders.astype('i') pos = round(len(features)*ratio) train = (features[:pos],", "== \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--stock', '-s', default=\"TSLA\", help='stock') parser.add_argument('--ratio',", "help='stock') parser.add_argument('--ratio', '-r', default=0.8, type=int, help='train/valid ratio') parser.add_argument('--min', '-m', default=0.001,", "train, valid def evaluate(exp, dataset): y_true = dataset[1] y_pred =", "= round(len(features)*ratio) train = (features[:pos], orders[:pos]) valid = (features[pos:], orders[pos:])", "exp def load_strategy(name, verbose=False): print(\"loading %s trained strategy\" %name) train,", "-> %s!=%s\" %(len(orders),len(features))) features = features.astype('f') orders = orders.astype('i') pos", "improvement (stop learning)') parser.add_argument('--field', default='orders', help='compare field') args = parser.parse_args()", "numpy as np import sklearn.metrics import theanets from sklearn.metrics import", "load_dataset(name) n, n_input = train[0].shape exp = theanets.Experiment( theanets.Classifier, layers=(n_input,", "= np.loadtxt(\"{0}_{1}_orders.csv\".format(stock, name), usecols=[1], delimiter=',') orders[orders==-1]=0 features = np.loadtxt(\"{0}_input.csv\".format(stock), delimiter=',')", "features = np.loadtxt(\"{0}_input.csv\".format(stock), delimiter=',') if len(orders)!=len(features): logging.error(\"len(orders)!=len(features) -> %s!=%s\" %(len(orders),len(features)))", "train) print('validation:') evaluate(exp, valid) exp.save('%s.nn' %stock) return exp def load_strategy(name,", "title = \"%s vs %s\" %(best, strategy) visu.compare(best_data[-min_size:], predicted_data[-min_size:], title)", "weight_l2=0.001, num_updates=100 ) print('training:') evaluate(exp, train) print('validation:') evaluate(exp, valid) exp.save('%s.nn'", "args = parser.parse_args() if args.field: compare(args.stock, args.field) train, valid =", "generate dataset ''' import csv import argparse import numpy as", "import csv import argparse import numpy as np import sklearn.metrics", "\"%s vs %s\" %(best, strategy) visu.compare(best_data[-min_size:], predicted_data[-min_size:], title) def load_dataset(stock,", "argparse import numpy as np import sklearn.metrics import theanets from", "valid) return exp if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__)", "theanets.Classifier, layers=(n_input, n_input*2, 2), ) exp.train(train, valid, min_improvement=min_improvement, algo='sgd', learning_rate=0.01,", "n_input*2, 2), ) exp.load('%s.nn' %name) if verbose: print('training:') evaluate(exp, train)", "return train, valid (x,y) ''' orders = np.loadtxt(\"{0}_{1}_orders.csv\".format(stock, name), usecols=[1],", "''' orders = np.loadtxt(\"{0}_{1}_orders.csv\".format(stock, name), usecols=[1], delimiter=',') orders[orders==-1]=0 features =", "as np import sklearn.metrics import theanets from sklearn.metrics import accuracy_score", "print('accuracy:',accuracy_score(y_true, y_pred)) def train_strategy(stock, ratio=0.8, min_improvement=0.001): train, valid = load_dataset(stock)", "orders.astype('i') pos = round(len(features)*ratio) train = (features[:pos], orders[:pos]) valid =", "y_pred)) def train_strategy(stock, ratio=0.8, min_improvement=0.001): train, valid = load_dataset(stock) n,", "return exp def load_strategy(name, verbose=False): print(\"loading %s trained strategy\" %name)", "orders[pos:]) return train, valid def evaluate(exp, dataset): y_true = dataset[1]", "TrendStrategy from util import visu def compare(stock, field='orders', strategy=\"TrendStrategy_predicted\", best=OptTrendStrategy.__name__):", "= np.loadtxt(predicted_fname, usecols=[1], delimiter=',') min_size = min(len(best_data), len(predicted_data)) title =", "field) predicted_fname=\"{0}_{1}_{2}.csv\".format(stock, strategy, field) print \"comparing\",best_fname,predicted_fname best_data = np.loadtxt(best_fname, usecols=[1],", "if args.field: compare(args.stock, args.field) train, valid = load_dataset(args.stock) exp =", "strategy, field) print \"comparing\",best_fname,predicted_fname best_data = np.loadtxt(best_fname, usecols=[1], delimiter=',') predicted_data", "print('training:') evaluate(exp, train) print('validation:') evaluate(exp, valid) return exp if __name__", "def load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__): ''' return train, valid (x,y) '''", "= orders.astype('i') pos = round(len(features)*ratio) train = (features[:pos], orders[:pos]) valid", "import logging from trendStrategy import OptTrendStrategy, TrendStrategy from util import", "min(len(best_data), len(predicted_data)) title = \"%s vs %s\" %(best, strategy) visu.compare(best_data[-min_size:],", "min_improvement=0.001): train, valid = load_dataset(stock) n, n_input = train[0].shape exp", "learning)') parser.add_argument('--field', default='orders', help='compare field') args = parser.parse_args() if args.field:", "%name) if verbose: print('training:') evaluate(exp, train) print('validation:') evaluate(exp, valid) return", "min_improvement=min_improvement, algo='sgd', learning_rate=0.01, momentum=0.5, hidden_l1=0.001, weight_l2=0.001, num_updates=100 ) print('training:') evaluate(exp,", "= np.loadtxt(\"{0}_input.csv\".format(stock), delimiter=',') if len(orders)!=len(features): logging.error(\"len(orders)!=len(features) -> %s!=%s\" %(len(orders),len(features))) features", "y_pred = exp.network.predict(dataset[0]) print(sklearn.metrics.confusion_matrix(y_true, y_pred)) print('accuracy:',accuracy_score(y_true, y_pred)) def train_strategy(stock, ratio=0.8,", "predicted_data = np.loadtxt(predicted_fname, usecols=[1], delimiter=',') min_size = min(len(best_data), len(predicted_data)) title", "valid = (features[pos:], orders[pos:]) return train, valid def evaluate(exp, dataset):", "def evaluate(exp, dataset): y_true = dataset[1] y_pred = exp.network.predict(dataset[0]) print(sklearn.metrics.confusion_matrix(y_true,", "OptTrendStrategy, TrendStrategy from util import visu def compare(stock, field='orders', strategy=\"TrendStrategy_predicted\",", "verbose=False): print(\"loading %s trained strategy\" %name) train, valid = load_dataset(name)", "evaluate(exp, valid) return exp if __name__ == \"__main__\": parser =", "num_updates=100 ) print('training:') evaluate(exp, train) print('validation:') evaluate(exp, valid) exp.save('%s.nn' %stock)", "ratio') parser.add_argument('--min', '-m', default=0.001, type=int, help='min improvement (stop learning)') parser.add_argument('--field',", "parser.add_argument('--ratio', '-r', default=0.8, type=int, help='train/valid ratio') parser.add_argument('--min', '-m', default=0.001, type=int,", "np.loadtxt(predicted_fname, usecols=[1], delimiter=',') min_size = min(len(best_data), len(predicted_data)) title = \"%s", "import OptTrendStrategy, TrendStrategy from util import visu def compare(stock, field='orders',", "usecols=[1], delimiter=',') min_size = min(len(best_data), len(predicted_data)) title = \"%s vs", "y_pred)) print('accuracy:',accuracy_score(y_true, y_pred)) def train_strategy(stock, ratio=0.8, min_improvement=0.001): train, valid =", "exp = theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2, 2), ) exp.train(train, valid,", ") print('training:') evaluate(exp, train) print('validation:') evaluate(exp, valid) exp.save('%s.nn' %stock) return", "exp.load('%s.nn' %name) if verbose: print('training:') evaluate(exp, train) print('validation:') evaluate(exp, valid)", "parser.parse_args() if args.field: compare(args.stock, args.field) train, valid = load_dataset(args.stock) exp", "min_size = min(len(best_data), len(predicted_data)) title = \"%s vs %s\" %(best,", "sklearn.metrics import accuracy_score import logging from trendStrategy import OptTrendStrategy, TrendStrategy", "def train_strategy(stock, ratio=0.8, min_improvement=0.001): train, valid = load_dataset(stock) n, n_input", "theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2, 2), ) exp.train(train, valid, min_improvement=min_improvement, algo='sgd',", "hidden_l1=0.001, weight_l2=0.001, num_updates=100 ) print('training:') evaluate(exp, train) print('validation:') evaluate(exp, valid)", "util import visu def compare(stock, field='orders', strategy=\"TrendStrategy_predicted\", best=OptTrendStrategy.__name__): best_fname=\"{0}_{1}_{2}.csv\".format(stock, best,", "if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--stock', '-s', default=\"TSLA\",", "layers=(n_input, n_input*2, 2), ) exp.train(train, valid, min_improvement=min_improvement, algo='sgd', learning_rate=0.01, momentum=0.5,", "= load_dataset(args.stock) exp = train_strategy(args.stock, args.ratio, args.min) exp = load_strategy(args.stock,", "= argparse.ArgumentParser(description=__doc__) parser.add_argument('--stock', '-s', default=\"TSLA\", help='stock') parser.add_argument('--ratio', '-r', default=0.8, type=int,", "__name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--stock', '-s', default=\"TSLA\", help='stock')", "args.field: compare(args.stock, args.field) train, valid = load_dataset(args.stock) exp = train_strategy(args.stock,", "evaluate(exp, train) print('validation:') evaluate(exp, valid) exp.save('%s.nn' %stock) return exp def", "compare(args.stock, args.field) train, valid = load_dataset(args.stock) exp = train_strategy(args.stock, args.ratio,", "load_dataset(stock) n, n_input = train[0].shape exp = theanets.Experiment( theanets.Classifier, layers=(n_input,", "= (features[:pos], orders[:pos]) valid = (features[pos:], orders[pos:]) return train, valid", "accuracy_score import logging from trendStrategy import OptTrendStrategy, TrendStrategy from util", "title) def load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__): ''' return train, valid (x,y)", "= (features[pos:], orders[pos:]) return train, valid def evaluate(exp, dataset): y_true", "%(best, strategy) visu.compare(best_data[-min_size:], predicted_data[-min_size:], title) def load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__): '''", "= dataset[1] y_pred = exp.network.predict(dataset[0]) print(sklearn.metrics.confusion_matrix(y_true, y_pred)) print('accuracy:',accuracy_score(y_true, y_pred)) def", "theanets.Experiment( theanets.Classifier, layers=(n_input, n_input*2, 2), ) exp.load('%s.nn' %name) if verbose:", "= parser.parse_args() if args.field: compare(args.stock, args.field) train, valid = load_dataset(args.stock)", "trendStrategy import OptTrendStrategy, TrendStrategy from util import visu def compare(stock,", "len(predicted_data)) title = \"%s vs %s\" %(best, strategy) visu.compare(best_data[-min_size:], predicted_data[-min_size:],", "y_true = dataset[1] y_pred = exp.network.predict(dataset[0]) print(sklearn.metrics.confusion_matrix(y_true, y_pred)) print('accuracy:',accuracy_score(y_true, y_pred))", "evaluate(exp, dataset): y_true = dataset[1] y_pred = exp.network.predict(dataset[0]) print(sklearn.metrics.confusion_matrix(y_true, y_pred))", "strategy\" %name) train, valid = load_dataset(name) n, n_input = train[0].shape", "def compare(stock, field='orders', strategy=\"TrendStrategy_predicted\", best=OptTrendStrategy.__name__): best_fname=\"{0}_{1}_{2}.csv\".format(stock, best, field) predicted_fname=\"{0}_{1}_{2}.csv\".format(stock, strategy,", "visu def compare(stock, field='orders', strategy=\"TrendStrategy_predicted\", best=OptTrendStrategy.__name__): best_fname=\"{0}_{1}_{2}.csv\".format(stock, best, field) predicted_fname=\"{0}_{1}_{2}.csv\".format(stock,", "return exp if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--stock',", "(features[:pos], orders[:pos]) valid = (features[pos:], orders[pos:]) return train, valid def", "#!/usr/bin/python ''' generate dataset ''' import csv import argparse import", "verbose: print('training:') evaluate(exp, train) print('validation:') evaluate(exp, valid) return exp if", "default=0.8, type=int, help='train/valid ratio') parser.add_argument('--min', '-m', default=0.001, type=int, help='min improvement", "import numpy as np import sklearn.metrics import theanets from sklearn.metrics", "theanets from sklearn.metrics import accuracy_score import logging from trendStrategy import", "= load_dataset(name) n, n_input = train[0].shape exp = theanets.Experiment( theanets.Classifier,", "type=int, help='min improvement (stop learning)') parser.add_argument('--field', default='orders', help='compare field') args", "import accuracy_score import logging from trendStrategy import OptTrendStrategy, TrendStrategy from", "print('validation:') evaluate(exp, valid) exp.save('%s.nn' %stock) return exp def load_strategy(name, verbose=False):", "import argparse import numpy as np import sklearn.metrics import theanets", "vs %s\" %(best, strategy) visu.compare(best_data[-min_size:], predicted_data[-min_size:], title) def load_dataset(stock, ratio=0.8,", "best=OptTrendStrategy.__name__): best_fname=\"{0}_{1}_{2}.csv\".format(stock, best, field) predicted_fname=\"{0}_{1}_{2}.csv\".format(stock, strategy, field) print \"comparing\",best_fname,predicted_fname best_data", "valid, min_improvement=min_improvement, algo='sgd', learning_rate=0.01, momentum=0.5, hidden_l1=0.001, weight_l2=0.001, num_updates=100 ) print('training:')", "= load_dataset(stock) n, n_input = train[0].shape exp = theanets.Experiment( theanets.Classifier,", "from util import visu def compare(stock, field='orders', strategy=\"TrendStrategy_predicted\", best=OptTrendStrategy.__name__): best_fname=\"{0}_{1}_{2}.csv\".format(stock,", "parser.add_argument('--field', default='orders', help='compare field') args = parser.parse_args() if args.field: compare(args.stock,", "= features.astype('f') orders = orders.astype('i') pos = round(len(features)*ratio) train =", "exp.save('%s.nn' %stock) return exp def load_strategy(name, verbose=False): print(\"loading %s trained", "delimiter=',') min_size = min(len(best_data), len(predicted_data)) title = \"%s vs %s\"", "dataset[1] y_pred = exp.network.predict(dataset[0]) print(sklearn.metrics.confusion_matrix(y_true, y_pred)) print('accuracy:',accuracy_score(y_true, y_pred)) def train_strategy(stock,", "train_strategy(stock, ratio=0.8, min_improvement=0.001): train, valid = load_dataset(stock) n, n_input =", "n_input*2, 2), ) exp.train(train, valid, min_improvement=min_improvement, algo='sgd', learning_rate=0.01, momentum=0.5, hidden_l1=0.001,", "valid def evaluate(exp, dataset): y_true = dataset[1] y_pred = exp.network.predict(dataset[0])", "load_dataset(args.stock) exp = train_strategy(args.stock, args.ratio, args.min) exp = load_strategy(args.stock, True)", "print(\"loading %s trained strategy\" %name) train, valid = load_dataset(name) n," ]
[ "solve the Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic` $$\\mathrm{argmin}_\\mathbf{x} \\;", "# Dictionary size L = 32 # Number of non-zero", "======================= This example demonstrates the use of class :class:`.admm.bpdn.BPDN` to", "lmbda opt['Verbose'] = True b = bpdn.BPDN(D, s, lmbda, opt)", "lmbda = sprm[0] print('Minimum ℓ1 error: %5.2f at 𝜆 =", "the reference sparse representation that generated a signal from a", "in the 'LICENSE.txt' file distributed # with the package. \"\"\"", "sparsity, and noise level. \"\"\" N = 512 # Signal", "np.zeros((M, 1)) si = np.random.permutation(list(range(0, M-1))) x0[si[0:L]] = np.random.randn(L, 1)", "np.random.randn(L, 1) # Construct reference and noisy signal s0 =", "Construct reference and noisy signal s0 = D.dot(x0) s =", "is evaluated in parallel by :func:`sporco.util.grid_search`. \"\"\" # Function computing", "class :class:`.admm.bpdn.BPDN` to solve the Basis Pursuit DeNoising (BPDN) problem", "problem :cite:`chen-1998-atomic` $$\\mathrm{argmin}_\\mathbf{x} \\; (1/2) \\| D \\mathbf{x} - \\mathbf{s}", "Function computing reconstruction error at lmbda def evalerr(prm): lmbda =", "of reference and recovered representations. \"\"\" plot.plot(np.hstack((x0, x)), title='Sparse representation',", "value, residuals, and rho \"\"\" its = b.getitstat() fig =", "4*N # Dictionary size L = 32 # Number of", "iteration statistics. \"\"\" # Initialise and run BPDN object for", "from sporco.admm import bpdn from sporco import util from sporco", "x=lrng, ptyp='semilogx', xlbl='$\\lambda$', ylbl='Error', fig=fig) plot.subplot(2, 2, 2) plot.plot(its.ObjFun, xlbl='Iterations',", "sigma*np.random.randn(N,1) \"\"\" Set BPDN solver class options. \"\"\" opt =", "noisy signal s0 = D.dot(x0) s = s0 + sigma*np.random.randn(N,1)", "\"\"\" Once the best $\\lambda$ has been determined, run BPDN", "is the signal to be represented. In this example the", "Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic` $$\\mathrm{argmin}_\\mathbf{x} \\; (1/2) \\|", "by :func:`sporco.util.grid_search`. \"\"\" # Function computing reconstruction error at lmbda", "= 0.5 # Noise level \"\"\" Construct random dictionary, reference", "DeNoising (BPDN) problem :cite:`chen-1998-atomic` $$\\mathrm{argmin}_\\mathbf{x} \\; (1/2) \\| D \\mathbf{x}", "over a logarithmicaly spaced grid. (The reference representation is assumed", "statistics. \"\"\" # Initialise and run BPDN object for best", "plot.plot(np.hstack((x0, x)), title='Sparse representation', lgnd=['Reference', 'Reconstructed']) \"\"\" Plot lmbda error", "b.solve() print(\"BPDN solve time: %.2fs\" % b.timer.elapsed('solve')) \"\"\" Plot comparison", "which is not realistic in a real application.) A function", "b = bpdn.BPDN(D, s, lmbda, opt) x = b.solve() print(\"BPDN", "be represented. In this example the BPDN problem is used", "# Function computing reconstruction error at lmbda def evalerr(prm): lmbda", "curve, functional value, residuals, and rho \"\"\" its = b.getitstat()", "of non-zero coefficients in generator sigma = 0.5 # Noise", "np from sporco.admm import bpdn from sporco import util from", "This file is part of the SPORCO package. Details of", "example the BPDN problem is used to estimate the reference", "Initialise and run BPDN object for best lmbda opt['Verbose'] =", "= %.2e' % (sfvl, lmbda)) \"\"\" Once the best $\\lambda$", "sparse representation, and $\\mathbf{s}$ is the signal to be represented.", "M = 4*N # Dictionary size L = 32 #", "the synthesis of the reference sparse representation with additive Gaussian", "class options. \"\"\" opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol':", "in a real application.) A function is defined that evalues", "and this function is evaluated in parallel by :func:`sporco.util.grid_search`. \"\"\"", "N = 512 # Signal size M = 4*N #", "the best $\\lambda$ has been determined, run BPDN with verbose", "s = s0 + sigma*np.random.randn(N,1) \"\"\" Set BPDN solver class", "time: %.2fs\" % b.timer.elapsed('solve')) \"\"\" Plot comparison of reference and", "xlbl='$\\lambda$', ylbl='Error', fig=fig) plot.subplot(2, 2, 2) plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)", ":cite:`chen-1998-atomic` $$\\mathrm{argmin}_\\mathbf{x} \\; (1/2) \\| D \\mathbf{x} - \\mathbf{s} \\|_2^2", "%5.2f at 𝜆 = %.2e' % (sfvl, lmbda)) \"\"\" Once", "lmbda error curve, functional value, residuals, and rho \"\"\" its", "size L = 32 # Number of non-zero coefficients in", "𝜆 = %.2e' % (sfvl, lmbda)) \"\"\" Once the best", "\"\"\" plot.plot(np.hstack((x0, x)), title='Sparse representation', lgnd=['Reference', 'Reconstructed']) \"\"\" Plot lmbda", "% b.timer.elapsed('solve')) \"\"\" Plot comparison of reference and recovered representations.", "bpdn.BPDN(D, s, lmbda, opt) x = b.solve() return np.sum(np.abs(x-x0)) #", "$\\mathbf{x}$ is the sparse representation, and $\\mathbf{s}$ is the signal", "recovered representations. \"\"\" plot.plot(np.hstack((x0, x)), title='Sparse representation', lgnd=['Reference', 'Reconstructed']) \"\"\"", "x0 = np.zeros((M, 1)) si = np.random.permutation(list(range(0, M-1))) x0[si[0:L]] =", "1) # Construct reference and noisy signal s0 = D.dot(x0)", "and rho \"\"\" its = b.getitstat() fig = plot.figure(figsize=(15, 10))", "= sprm[0] print('Minimum ℓ1 error: %5.2f at 𝜆 = %.2e'", "plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual'], fig=fig) plot.subplot(2, 2,", "print(\"BPDN solve time: %.2fs\" % b.timer.elapsed('solve')) \"\"\" Plot comparison of", "BPDN with verbose display of ADMM iteration statistics. \"\"\" #", "%.2fs\" % b.timer.elapsed('solve')) \"\"\" Plot comparison of reference and recovered", "(BPDN) problem :cite:`chen-1998-atomic` $$\\mathrm{argmin}_\\mathbf{x} \\; (1/2) \\| D \\mathbf{x} -", "# This file is part of the SPORCO package. Details", "of error function on lmbda grid lrng = np.logspace(1, 2,", "best lmbda opt['Verbose'] = True b = bpdn.BPDN(D, s, lmbda,", "BPDN solver class options. \"\"\" opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter':", "32 # Number of non-zero coefficients in generator sigma =", "(1/2) \\| D \\mathbf{x} - \\mathbf{s} \\|_2^2 + \\lambda \\|", "example demonstrates the use of class :class:`.admm.bpdn.BPDN` to solve the", "to solve the Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic` $$\\mathrm{argmin}_\\mathbf{x}", "package. Details of the copyright # and user license can", "'MaxMainIter': 500, 'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}}) \"\"\" Select regularization", "b.solve() return np.sum(np.abs(x-x0)) # Parallel evalution of error function on", "can be found in the 'LICENSE.txt' file distributed # with", "Once the best $\\lambda$ has been determined, run BPDN with", "license can be found in the 'LICENSE.txt' file distributed #", "function is evaluated in parallel by :func:`sporco.util.grid_search`. \"\"\" # Function", "logarithmicaly spaced grid. (The reference representation is assumed to be", "generator sigma = 0.5 # Noise level \"\"\" Construct random", "\\|_2^2 + \\lambda \\| \\mathbf{x} \\|_1 \\;,$$ where $D$ is", "Plot lmbda error curve, functional value, residuals, and rho \"\"\"", "has been determined, run BPDN with verbose display of ADMM", "evalerr(prm): lmbda = prm[0] b = bpdn.BPDN(D, s, lmbda, opt)", "= np.random.randn(L, 1) # Construct reference and noisy signal s0", "sporco import util from sporco import plot \"\"\" Configure problem", "A function is defined that evalues the BPDN recovery error", "= bpdn.BPDN(D, s, lmbda, opt) x = b.solve() print(\"BPDN solve", "# Noise level \"\"\" Construct random dictionary, reference random sparse", "bpdn.BPDN(D, s, lmbda, opt) x = b.solve() print(\"BPDN solve time:", "random dictionary, reference random sparse representation, and test signal consisting", "functional value, residuals, and rho \"\"\" its = b.getitstat() fig", "This example demonstrates the use of class :class:`.admm.bpdn.BPDN` to solve", "Set BPDN solver class options. \"\"\" opt = bpdn.BPDN.Options({'Verbose': False,", "the BPDN recovery error for a specified $\\lambda$, and this", "a signal from a noisy version of the signal. \"\"\"", "function is defined that evalues the BPDN recovery error for", "regularization parameter $\\lambda$ by evaluating the error in recovering the", "representation that generated a signal from a noisy version of", "# Construct reference and noisy signal s0 = D.dot(x0) s", "sidx = util.grid_search(evalerr, (lrng,)) lmbda = sprm[0] print('Minimum ℓ1 error:", "Gaussian noise. \"\"\" # Construct random dictionary and random sparse", "utf-8 -*- # This file is part of the SPORCO", "np.sum(np.abs(x-x0)) # Parallel evalution of error function on lmbda grid", "error function on lmbda grid lrng = np.logspace(1, 2, 20)", "be found in the 'LICENSE.txt' file distributed # with the", "$D$ is the dictionary, $\\mathbf{x}$ is the sparse representation, and", "lgnd=['Reference', 'Reconstructed']) \"\"\" Plot lmbda error curve, functional value, residuals,", "= bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}})", "to be represented. In this example the BPDN problem is", "ℓ1 error: %5.2f at 𝜆 = %.2e' % (sfvl, lmbda))", "% (sfvl, lmbda)) \"\"\" Once the best $\\lambda$ has been", "Parameter', fig=fig) fig.show() # Wait for enter on keyboard input()", "error at lmbda def evalerr(prm): lmbda = prm[0] b =", "x = b.solve() print(\"BPDN solve time: %.2fs\" % b.timer.elapsed('solve')) \"\"\"", "ptyp='semilogy', xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual'], fig=fig) plot.subplot(2, 2, 4) plot.plot(its.Rho,", "file distributed # with the package. \"\"\" Basis Pursuit DeNoising", "computing reconstruction error at lmbda def evalerr(prm): lmbda = prm[0]", "bpdn from sporco import util from sporco import plot \"\"\"", "'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}}) \"\"\" Select regularization parameter $\\lambda$", "in parallel by :func:`sporco.util.grid_search`. \"\"\" # Function computing reconstruction error", "s0 + sigma*np.random.randn(N,1) \"\"\" Set BPDN solver class options. \"\"\"", "from sporco import util from sporco import plot \"\"\" Configure", "coefficients np.random.seed(12345) D = np.random.randn(N, M) x0 = np.zeros((M, 1))", "application.) A function is defined that evalues the BPDN recovery", "fig=fig) plot.subplot(2, 2, 2) plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig) plot.subplot(2, 2,", "D.dot(x0) s = s0 + sigma*np.random.randn(N,1) \"\"\" Set BPDN solver", "x0[si[0:L]] = np.random.randn(L, 1) # Construct reference and noisy signal", "Select regularization parameter $\\lambda$ by evaluating the error in recovering", "the error in recovering the sparse representation over a logarithmicaly", "BPDN object for best lmbda opt['Verbose'] = True b =", "of the reference sparse representation with additive Gaussian noise. \"\"\"", "= util.grid_search(evalerr, (lrng,)) lmbda = sprm[0] print('Minimum ℓ1 error: %5.2f", "= True b = bpdn.BPDN(D, s, lmbda, opt) x =", "opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget':", "from sporco import plot \"\"\" Configure problem size, sparsity, and", "= b.solve() return np.sum(np.abs(x-x0)) # Parallel evalution of error function", "\\| \\mathbf{x} \\|_1 \\;,$$ where $D$ is the dictionary, $\\mathbf{x}$", "\"\"\" Set BPDN solver class options. \"\"\" opt = bpdn.BPDN.Options({'Verbose':", "from a noisy version of the signal. \"\"\" from __future__", "plot.figure(figsize=(15, 10)) plot.subplot(2, 2, 1) plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\\lambda$', ylbl='Error',", "1)) si = np.random.permutation(list(range(0, M-1))) x0[si[0:L]] = np.random.randn(L, 1) #", "\"\"\" Basis Pursuit DeNoising ======================= This example demonstrates the use", "error curve, functional value, residuals, and rho \"\"\" its =", "recovering the sparse representation over a logarithmicaly spaced grid. (The", "problem size, sparsity, and noise level. \"\"\" N = 512", "display of ADMM iteration statistics. \"\"\" # Initialise and run", "lrng = np.logspace(1, 2, 20) sprm, sfvl, fvmx, sidx =", ":func:`sporco.util.grid_search`. \"\"\" # Function computing reconstruction error at lmbda def", "+ sigma*np.random.randn(N,1) \"\"\" Set BPDN solver class options. \"\"\" opt", "representation, and $\\mathbf{s}$ is the signal to be represented. In", "2, 20) sprm, sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,)) lmbda", "2) plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig) plot.subplot(2, 2, 3) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,", "the 'LICENSE.txt' file distributed # with the package. \"\"\" Basis", "import bpdn from sporco import util from sporco import plot", "%.2e' % (sfvl, lmbda)) \"\"\" Once the best $\\lambda$ has", "for best lmbda opt['Verbose'] = True b = bpdn.BPDN(D, s,", "additive Gaussian noise. \"\"\" # Construct random dictionary and random", "is not realistic in a real application.) A function is", "{'RsdlTarget': 1.0}}) \"\"\" Select regularization parameter $\\lambda$ by evaluating the", "$\\lambda$ has been determined, run BPDN with verbose display of", "'Reconstructed']) \"\"\" Plot lmbda error curve, functional value, residuals, and", "size M = 4*N # Dictionary size L = 32", "coding: utf-8 -*- # This file is part of the", "known, which is not realistic in a real application.) A", "the dictionary, $\\mathbf{x}$ is the sparse representation, and $\\mathbf{s}$ is", "and noisy signal s0 = D.dot(x0) s = s0 +", "False, 'MaxMainIter': 500, 'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}}) \"\"\" Select", "fig = plot.figure(figsize=(15, 10)) plot.subplot(2, 2, 1) plot.plot(fvmx, x=lrng, ptyp='semilogx',", "opt) x = b.solve() print(\"BPDN solve time: %.2fs\" % b.timer.elapsed('solve'))", "# Number of non-zero coefficients in generator sigma = 0.5", "D = np.random.randn(N, M) x0 = np.zeros((M, 1)) si =", "sprm, sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,)) lmbda = sprm[0]", "util.grid_search(evalerr, (lrng,)) lmbda = sprm[0] print('Minimum ℓ1 error: %5.2f at", "error in recovering the sparse representation over a logarithmicaly spaced", "by evaluating the error in recovering the sparse representation over", "+ \\lambda \\| \\mathbf{x} \\|_1 \\;,$$ where $D$ is the", "BPDN recovery error for a specified $\\lambda$, and this function", "print('Minimum ℓ1 error: %5.2f at 𝜆 = %.2e' % (sfvl,", "Configure problem size, sparsity, and noise level. \"\"\" N =", "signal s0 = D.dot(x0) s = s0 + sigma*np.random.randn(N,1) \"\"\"", "BPDN problem is used to estimate the reference sparse representation", "ylbl='Error', fig=fig) plot.subplot(2, 2, 2) plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig) plot.subplot(2,", "lmbda = prm[0] b = bpdn.BPDN(D, s, lmbda, opt) x", "the reference sparse representation with additive Gaussian noise. \"\"\" #", "= np.random.permutation(list(range(0, M-1))) x0[si[0:L]] = np.random.randn(L, 1) # Construct reference", "512 # Signal size M = 4*N # Dictionary size", "distributed # with the package. \"\"\" Basis Pursuit DeNoising =======================", "Details of the copyright # and user license can be", "b = bpdn.BPDN(D, s, lmbda, opt) x = b.solve() return", "the Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic` $$\\mathrm{argmin}_\\mathbf{x} \\; (1/2)", "0.5 # Noise level \"\"\" Construct random dictionary, reference random", "lmbda, opt) x = b.solve() print(\"BPDN solve time: %.2fs\" %", "lgnd=['Primal', 'Dual'], fig=fig) plot.subplot(2, 2, 4) plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter',", "def evalerr(prm): lmbda = prm[0] b = bpdn.BPDN(D, s, lmbda,", "signal to be represented. In this example the BPDN problem", "realistic in a real application.) A function is defined that", "the BPDN problem is used to estimate the reference sparse", "2, 3) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual'], fig=fig)", "# Signal size M = 4*N # Dictionary size L", "the use of class :class:`.admm.bpdn.BPDN` to solve the Basis Pursuit", "a real application.) A function is defined that evalues the", "demonstrates the use of class :class:`.admm.bpdn.BPDN` to solve the Basis", "1e-3, 'AutoRho': {'RsdlTarget': 1.0}}) \"\"\" Select regularization parameter $\\lambda$ by", "#!/usr/bin/env python # -*- coding: utf-8 -*- # This file", "Pursuit DeNoising ======================= This example demonstrates the use of class", "reference and noisy signal s0 = D.dot(x0) s = s0", "Dictionary size L = 32 # Number of non-zero coefficients", "its = b.getitstat() fig = plot.figure(figsize=(15, 10)) plot.subplot(2, 2, 1)", "evaluating the error in recovering the sparse representation over a", "and user license can be found in the 'LICENSE.txt' file", "Basis Pursuit DeNoising ======================= This example demonstrates the use of", "the sparse representation over a logarithmicaly spaced grid. (The reference", "to estimate the reference sparse representation that generated a signal", "size, sparsity, and noise level. \"\"\" N = 512 #", "at 𝜆 = %.2e' % (sfvl, lmbda)) \"\"\" Once the", "title='Sparse representation', lgnd=['Reference', 'Reconstructed']) \"\"\" Plot lmbda error curve, functional", "\\| D \\mathbf{x} - \\mathbf{s} \\|_2^2 + \\lambda \\| \\mathbf{x}", "s, lmbda, opt) x = b.solve() print(\"BPDN solve time: %.2fs\"", "on lmbda grid lrng = np.logspace(1, 2, 20) sprm, sfvl,", "a specified $\\lambda$, and this function is evaluated in parallel", "and noise level. \"\"\" N = 512 # Signal size", "Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic` $$\\mathrm{argmin}_\\mathbf{x} \\; (1/2) \\| D", "represented. In this example the BPDN problem is used to", "\"\"\" Configure problem size, sparsity, and noise level. \"\"\" N", "bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}}) \"\"\"", "is assumed to be known, which is not realistic in", "dictionary and random sparse coefficients np.random.seed(12345) D = np.random.randn(N, M)", "the copyright # and user license can be found in", "si = np.random.permutation(list(range(0, M-1))) x0[si[0:L]] = np.random.randn(L, 1) # Construct", "solver class options. \"\"\" opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500,", "lmbda def evalerr(prm): lmbda = prm[0] b = bpdn.BPDN(D, s,", "np.random.permutation(list(range(0, M-1))) x0[si[0:L]] = np.random.randn(L, 1) # Construct reference and", "reference sparse representation that generated a signal from a noisy", "with the package. \"\"\" Basis Pursuit DeNoising ======================= This example", "dictionary, $\\mathbf{x}$ is the sparse representation, and $\\mathbf{s}$ is the", "M-1))) x0[si[0:L]] = np.random.randn(L, 1) # Construct reference and noisy", "x = b.solve() return np.sum(np.abs(x-x0)) # Parallel evalution of error", "consisting of the synthesis of the reference sparse representation with", "sprm[0] print('Minimum ℓ1 error: %5.2f at 𝜆 = %.2e' %", "Construct random dictionary and random sparse coefficients np.random.seed(12345) D =", "reference random sparse representation, and test signal consisting of the", "xlbl='Iterations', ylbl='Penalty Parameter', fig=fig) fig.show() # Wait for enter on", "print_function from builtins import input import numpy as np from", "= np.random.randn(N, M) x0 = np.zeros((M, 1)) si = np.random.permutation(list(range(0,", "is the sparse representation, and $\\mathbf{s}$ is the signal to", "ylbl='Residual', lgnd=['Primal', 'Dual'], fig=fig) plot.subplot(2, 2, 4) plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty", "= 32 # Number of non-zero coefficients in generator sigma", "= bpdn.BPDN(D, s, lmbda, opt) x = b.solve() return np.sum(np.abs(x-x0))", "and random sparse coefficients np.random.seed(12345) D = np.random.randn(N, M) x0", "its.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual'], fig=fig) plot.subplot(2, 2, 4)", "copyright # and user license can be found in the", "util from sporco import plot \"\"\" Configure problem size, sparsity,", "plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig) plot.subplot(2, 2, 3) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy',", "import plot \"\"\" Configure problem size, sparsity, and noise level.", "be known, which is not realistic in a real application.)", "prm[0] b = bpdn.BPDN(D, s, lmbda, opt) x = b.solve()", "\\mathbf{x} - \\mathbf{s} \\|_2^2 + \\lambda \\| \\mathbf{x} \\|_1 \\;,$$", "\"\"\" Plot lmbda error curve, functional value, residuals, and rho", "is defined that evalues the BPDN recovery error for a", "run BPDN with verbose display of ADMM iteration statistics. \"\"\"", "reference sparse representation with additive Gaussian noise. \"\"\" # Construct", "$\\mathbf{s}$ is the signal to be represented. In this example", "Number of non-zero coefficients in generator sigma = 0.5 #", "plot.subplot(2, 2, 3) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual'],", "DeNoising ======================= This example demonstrates the use of class :class:`.admm.bpdn.BPDN`", "coefficients in generator sigma = 0.5 # Noise level \"\"\"", "b.getitstat() fig = plot.figure(figsize=(15, 10)) plot.subplot(2, 2, 1) plot.plot(fvmx, x=lrng,", "the sparse representation, and $\\mathbf{s}$ is the signal to be", "np.random.seed(12345) D = np.random.randn(N, M) x0 = np.zeros((M, 1)) si", "numpy as np from sporco.admm import bpdn from sporco import", "user license can be found in the 'LICENSE.txt' file distributed", "representation with additive Gaussian noise. \"\"\" # Construct random dictionary", "ylbl='Penalty Parameter', fig=fig) fig.show() # Wait for enter on keyboard", "plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig) fig.show() # Wait for enter", "builtins import input import numpy as np from sporco.admm import", "best $\\lambda$ has been determined, run BPDN with verbose display", "'Dual'], fig=fig) plot.subplot(2, 2, 4) plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)", "lmbda)) \"\"\" Once the best $\\lambda$ has been determined, run", "evalution of error function on lmbda grid lrng = np.logspace(1,", "__future__ import print_function from builtins import input import numpy as", "evaluated in parallel by :func:`sporco.util.grid_search`. \"\"\" # Function computing reconstruction", "input import numpy as np from sporco.admm import bpdn from", "signal consisting of the synthesis of the reference sparse representation", "and recovered representations. \"\"\" plot.plot(np.hstack((x0, x)), title='Sparse representation', lgnd=['Reference', 'Reconstructed'])", "In this example the BPDN problem is used to estimate", "= s0 + sigma*np.random.randn(N,1) \"\"\" Set BPDN solver class options.", "lmbda grid lrng = np.logspace(1, 2, 20) sprm, sfvl, fvmx,", "Plot comparison of reference and recovered representations. \"\"\" plot.plot(np.hstack((x0, x)),", "signal. \"\"\" from __future__ import print_function from builtins import input", "ylbl='Functional', fig=fig) plot.subplot(2, 2, 3) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations', ylbl='Residual',", "use of class :class:`.admm.bpdn.BPDN` to solve the Basis Pursuit DeNoising", "'AutoRho': {'RsdlTarget': 1.0}}) \"\"\" Select regularization parameter $\\lambda$ by evaluating", "and $\\mathbf{s}$ is the signal to be represented. In this", "part of the SPORCO package. Details of the copyright #", "solve time: %.2fs\" % b.timer.elapsed('solve')) \"\"\" Plot comparison of reference", "\\|_1 \\;,$$ where $D$ is the dictionary, $\\mathbf{x}$ is the", "$$\\mathrm{argmin}_\\mathbf{x} \\; (1/2) \\| D \\mathbf{x} - \\mathbf{s} \\|_2^2 +", "test signal consisting of the synthesis of the reference sparse", "of the SPORCO package. Details of the copyright # and", "opt) x = b.solve() return np.sum(np.abs(x-x0)) # Parallel evalution of", "\\mathbf{x} \\|_1 \\;,$$ where $D$ is the dictionary, $\\mathbf{x}$ is", "\"\"\" Construct random dictionary, reference random sparse representation, and test", "\"\"\" # Construct random dictionary and random sparse coefficients np.random.seed(12345)", "noise level. \"\"\" N = 512 # Signal size M", "sporco import plot \"\"\" Configure problem size, sparsity, and noise", "function on lmbda grid lrng = np.logspace(1, 2, 20) sprm,", "noise. \"\"\" # Construct random dictionary and random sparse coefficients", "Construct random dictionary, reference random sparse representation, and test signal", "= 4*N # Dictionary size L = 32 # Number", "\"\"\" N = 512 # Signal size M = 4*N", "\\; (1/2) \\| D \\mathbf{x} - \\mathbf{s} \\|_2^2 + \\lambda", "error for a specified $\\lambda$, and this function is evaluated", "representations. \"\"\" plot.plot(np.hstack((x0, x)), title='Sparse representation', lgnd=['Reference', 'Reconstructed']) \"\"\" Plot", "parallel by :func:`sporco.util.grid_search`. \"\"\" # Function computing reconstruction error at", "np.random.randn(N, M) x0 = np.zeros((M, 1)) si = np.random.permutation(list(range(0, M-1)))", "determined, run BPDN with verbose display of ADMM iteration statistics.", "reference representation is assumed to be known, which is not", "that evalues the BPDN recovery error for a specified $\\lambda$,", "# and user license can be found in the 'LICENSE.txt'", "options. \"\"\" opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-3,", "sparse coefficients np.random.seed(12345) D = np.random.randn(N, M) x0 = np.zeros((M,", "= D.dot(x0) s = s0 + sigma*np.random.randn(N,1) \"\"\" Set BPDN", "in recovering the sparse representation over a logarithmicaly spaced grid.", "plot.subplot(2, 2, 2) plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig) plot.subplot(2, 2, 3)", "not realistic in a real application.) A function is defined", "noisy version of the signal. \"\"\" from __future__ import print_function", "= b.solve() print(\"BPDN solve time: %.2fs\" % b.timer.elapsed('solve')) \"\"\" Plot", "representation is assumed to be known, which is not realistic", "from __future__ import print_function from builtins import input import numpy", "level \"\"\" Construct random dictionary, reference random sparse representation, and", "Parallel evalution of error function on lmbda grid lrng =", "= prm[0] b = bpdn.BPDN(D, s, lmbda, opt) x =", "xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual'], fig=fig) plot.subplot(2, 2, 4) plot.plot(its.Rho, xlbl='Iterations',", "in generator sigma = 0.5 # Noise level \"\"\" Construct", "# Initialise and run BPDN object for best lmbda opt['Verbose']", "# -*- coding: utf-8 -*- # This file is part", "'LICENSE.txt' file distributed # with the package. \"\"\" Basis Pursuit", "representation, and test signal consisting of the synthesis of the", "# Construct random dictionary and random sparse coefficients np.random.seed(12345) D", "1.0}}) \"\"\" Select regularization parameter $\\lambda$ by evaluating the error", "recovery error for a specified $\\lambda$, and this function is", "random dictionary and random sparse coefficients np.random.seed(12345) D = np.random.randn(N,", "estimate the reference sparse representation that generated a signal from", "defined that evalues the BPDN recovery error for a specified", "$\\lambda$ by evaluating the error in recovering the sparse representation", "\"\"\" # Function computing reconstruction error at lmbda def evalerr(prm):", "is used to estimate the reference sparse representation that generated", "xlbl='Iterations', ylbl='Functional', fig=fig) plot.subplot(2, 2, 3) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations',", "1) plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\\lambda$', ylbl='Error', fig=fig) plot.subplot(2, 2, 2)", "import print_function from builtins import input import numpy as np", "return np.sum(np.abs(x-x0)) # Parallel evalution of error function on lmbda", "python # -*- coding: utf-8 -*- # This file is", "= np.zeros((M, 1)) si = np.random.permutation(list(range(0, M-1))) x0[si[0:L]] = np.random.randn(L,", "evalues the BPDN recovery error for a specified $\\lambda$, and", "of the synthesis of the reference sparse representation with additive", "s, lmbda, opt) x = b.solve() return np.sum(np.abs(x-x0)) # Parallel", "plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\\lambda$', ylbl='Error', fig=fig) plot.subplot(2, 2, 2) plot.plot(its.ObjFun,", "representation over a logarithmicaly spaced grid. (The reference representation is", "s0 = D.dot(x0) s = s0 + sigma*np.random.randn(N,1) \"\"\" Set", "the package. \"\"\" Basis Pursuit DeNoising ======================= This example demonstrates", "import input import numpy as np from sporco.admm import bpdn", "2, 2) plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig) plot.subplot(2, 2, 3) plot.plot(np.vstack((its.PrimalRsdl,", "random sparse representation, and test signal consisting of the synthesis", "$\\lambda$, and this function is evaluated in parallel by :func:`sporco.util.grid_search`.", "of the copyright # and user license can be found", "\"\"\" from __future__ import print_function from builtins import input import", "from builtins import input import numpy as np from sporco.admm", "problem is used to estimate the reference sparse representation that", "specified $\\lambda$, and this function is evaluated in parallel by", "4) plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig) fig.show() # Wait for", "= 512 # Signal size M = 4*N # Dictionary", "fig=fig) plot.subplot(2, 2, 3) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations', ylbl='Residual', lgnd=['Primal',", "2, 4) plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig) fig.show() # Wait", "with verbose display of ADMM iteration statistics. \"\"\" # Initialise", "is part of the SPORCO package. Details of the copyright", "the signal to be represented. In this example the BPDN", "sparse representation, and test signal consisting of the synthesis of", "representation', lgnd=['Reference', 'Reconstructed']) \"\"\" Plot lmbda error curve, functional value,", "(sfvl, lmbda)) \"\"\" Once the best $\\lambda$ has been determined,", "sparse representation over a logarithmicaly spaced grid. (The reference representation", "error: %5.2f at 𝜆 = %.2e' % (sfvl, lmbda)) \"\"\"", "plot.subplot(2, 2, 4) plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig) fig.show() #", "and test signal consisting of the synthesis of the reference", "dictionary, reference random sparse representation, and test signal consisting of", "level. \"\"\" N = 512 # Signal size M =", "rho \"\"\" its = b.getitstat() fig = plot.figure(figsize=(15, 10)) plot.subplot(2,", "used to estimate the reference sparse representation that generated a", "spaced grid. (The reference representation is assumed to be known,", "run BPDN object for best lmbda opt['Verbose'] = True b", "-*- # This file is part of the SPORCO package.", "of ADMM iteration statistics. \"\"\" # Initialise and run BPDN", "D \\mathbf{x} - \\mathbf{s} \\|_2^2 + \\lambda \\| \\mathbf{x} \\|_1", "the SPORCO package. Details of the copyright # and user", "version of the signal. \"\"\" from __future__ import print_function from", "reference and recovered representations. \"\"\" plot.plot(np.hstack((x0, x)), title='Sparse representation', lgnd=['Reference',", "= plot.figure(figsize=(15, 10)) plot.subplot(2, 2, 1) plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\\lambda$',", "ADMM iteration statistics. \"\"\" # Initialise and run BPDN object", "generated a signal from a noisy version of the signal.", "= np.logspace(1, 2, 20) sprm, sfvl, fvmx, sidx = util.grid_search(evalerr,", "500, 'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}}) \"\"\" Select regularization parameter", "opt['Verbose'] = True b = bpdn.BPDN(D, s, lmbda, opt) x", "\"\"\" its = b.getitstat() fig = plot.figure(figsize=(15, 10)) plot.subplot(2, 2,", "Noise level \"\"\" Construct random dictionary, reference random sparse representation,", "verbose display of ADMM iteration statistics. \"\"\" # Initialise and", "ptyp='semilogx', xlbl='$\\lambda$', ylbl='Error', fig=fig) plot.subplot(2, 2, 2) plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional',", "# with the package. \"\"\" Basis Pursuit DeNoising ======================= This", "with additive Gaussian noise. \"\"\" # Construct random dictionary and", "is the dictionary, $\\mathbf{x}$ is the sparse representation, and $\\mathbf{s}$", "L = 32 # Number of non-zero coefficients in generator", "random sparse coefficients np.random.seed(12345) D = np.random.randn(N, M) x0 =", "for a specified $\\lambda$, and this function is evaluated in", "\"\"\" # Initialise and run BPDN object for best lmbda", "been determined, run BPDN with verbose display of ADMM iteration", "at lmbda def evalerr(prm): lmbda = prm[0] b = bpdn.BPDN(D,", "b.timer.elapsed('solve')) \"\"\" Plot comparison of reference and recovered representations. \"\"\"", "(The reference representation is assumed to be known, which is", "signal from a noisy version of the signal. \"\"\" from", "fvmx, sidx = util.grid_search(evalerr, (lrng,)) lmbda = sprm[0] print('Minimum ℓ1", ":class:`.admm.bpdn.BPDN` to solve the Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic`", "2, 1) plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\\lambda$', ylbl='Error', fig=fig) plot.subplot(2, 2,", "- \\mathbf{s} \\|_2^2 + \\lambda \\| \\mathbf{x} \\|_1 \\;,$$ where", "a logarithmicaly spaced grid. (The reference representation is assumed to", "to be known, which is not realistic in a real", "plot.subplot(2, 2, 1) plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\\lambda$', ylbl='Error', fig=fig) plot.subplot(2,", "this example the BPDN problem is used to estimate the", "and run BPDN object for best lmbda opt['Verbose'] = True", "\\mathbf{s} \\|_2^2 + \\lambda \\| \\mathbf{x} \\|_1 \\;,$$ where $D$", "the signal. \"\"\" from __future__ import print_function from builtins import", "comparison of reference and recovered representations. \"\"\" plot.plot(np.hstack((x0, x)), title='Sparse", "Signal size M = 4*N # Dictionary size L =", "M) x0 = np.zeros((M, 1)) si = np.random.permutation(list(range(0, M-1))) x0[si[0:L]]", "\\lambda \\| \\mathbf{x} \\|_1 \\;,$$ where $D$ is the dictionary,", "\\;,$$ where $D$ is the dictionary, $\\mathbf{x}$ is the sparse", "import numpy as np from sporco.admm import bpdn from sporco", "# Parallel evalution of error function on lmbda grid lrng", "plot \"\"\" Configure problem size, sparsity, and noise level. \"\"\"", "3) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual'], fig=fig) plot.subplot(2,", "sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,)) lmbda = sprm[0] print('Minimum", "10)) plot.subplot(2, 2, 1) plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\\lambda$', ylbl='Error', fig=fig)", "sigma = 0.5 # Noise level \"\"\" Construct random dictionary,", "= b.getitstat() fig = plot.figure(figsize=(15, 10)) plot.subplot(2, 2, 1) plot.plot(fvmx,", "a noisy version of the signal. \"\"\" from __future__ import", "import util from sporco import plot \"\"\" Configure problem size,", "that generated a signal from a noisy version of the", "of class :class:`.admm.bpdn.BPDN` to solve the Basis Pursuit DeNoising (BPDN)", "residuals, and rho \"\"\" its = b.getitstat() fig = plot.figure(figsize=(15,", "sporco.admm import bpdn from sporco import util from sporco import", "np.logspace(1, 2, 20) sprm, sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,))", "20) sprm, sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,)) lmbda =", "-*- coding: utf-8 -*- # This file is part of", "as np from sporco.admm import bpdn from sporco import util", "real application.) A function is defined that evalues the BPDN", "reconstruction error at lmbda def evalerr(prm): lmbda = prm[0] b", "x)), title='Sparse representation', lgnd=['Reference', 'Reconstructed']) \"\"\" Plot lmbda error curve,", "where $D$ is the dictionary, $\\mathbf{x}$ is the sparse representation,", "package. \"\"\" Basis Pursuit DeNoising ======================= This example demonstrates the", "assumed to be known, which is not realistic in a", "synthesis of the reference sparse representation with additive Gaussian noise.", "lmbda, opt) x = b.solve() return np.sum(np.abs(x-x0)) # Parallel evalution", "grid lrng = np.logspace(1, 2, 20) sprm, sfvl, fvmx, sidx", "(lrng,)) lmbda = sprm[0] print('Minimum ℓ1 error: %5.2f at 𝜆", "fig=fig) plot.subplot(2, 2, 4) plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig) fig.show()", "object for best lmbda opt['Verbose'] = True b = bpdn.BPDN(D,", "\"\"\" Plot comparison of reference and recovered representations. \"\"\" plot.plot(np.hstack((x0,", "\"\"\" opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-3, 'AutoRho':", "file is part of the SPORCO package. Details of the", "grid. (The reference representation is assumed to be known, which", "SPORCO package. Details of the copyright # and user license", "sparse representation that generated a signal from a noisy version", "non-zero coefficients in generator sigma = 0.5 # Noise level", "True b = bpdn.BPDN(D, s, lmbda, opt) x = b.solve()", "of the signal. \"\"\" from __future__ import print_function from builtins", "this function is evaluated in parallel by :func:`sporco.util.grid_search`. \"\"\" #", "found in the 'LICENSE.txt' file distributed # with the package.", "sparse representation with additive Gaussian noise. \"\"\" # Construct random", "parameter $\\lambda$ by evaluating the error in recovering the sparse", "\"\"\" Select regularization parameter $\\lambda$ by evaluating the error in" ]
[ "11, 1), Among(u\"\\u093F\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u0947\\u0915\\u093E\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u093E\", 14,", "(, line 71 # or, line 71 try: v_1 =", "''' This class was automatically generated by a Snowball to", "self.ket = self.cursor # substring, line 70 among_var = self.find_among_b(NepaliStemmer.a_2)", "1), Among(u\"\\u0915\\u093F\", -1, 2), Among(u\"\\u092A\\u091B\\u093F\", -1, 1), Among(u\"\\u0915\\u0940\", -1, 2),", "1), Among(u\"\\u0907\\u0926\\u093E\", 16, 1), Among(u\"\\u093F\\u0926\\u093E\", 16, 1), Among(u\"\\u0926\\u0947\\u0916\\u093F\", -1, 1),", "Among(u\"\\u0948\", -1, 2) ] a_3 = [ Among(u\"\\u0925\\u093F\\u090F\", -1, 1),", "(, line 85 # backwards, line 86 self.limit_backward = self.cursor", "among_var == 2: # (, line 59 # or, line", "if among_var == 1: # (, line 71 # or,", "try: # (, line 89 # do, line 89 v_4", "not self.eq_s_b(u\"\\u0924\\u094D\\u0930\"): return False # delete, line 72 if not", "0: return False # ], line 64 self.bra = self.cursor", "self.limit - self.cursor # call check_category_2, line 89 if not", "63 # [, line 64 self.ket = self.cursor # substring,", "Among(u\"\\u0925\\u094D\\u092F\\u094B\", 52, 1), Among(u\"\\u091B\\u094C\", -1, 1), Among(u\"\\u0907\\u091B\\u094C\", 59, 1), Among(u\"\\u090F\\u091B\\u094C\",", "1), Among(u\"\\u0932\\u093E\\u0928\\u094D\", -1, 1), Among(u\"\\u091B\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0928\\u094D\", -1, 1),", "v_1 # do, line 88 v_2 = self.limit - self.cursor", "Among(u\"\\u0925\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u0928\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u090F\\u091B\\u0928\\u094D\",", "self.find_among_b(NepaliStemmer.a_0) if among_var == 0: return False # ], line", "1), Among(u\"\\u0907\\u0926\\u094B\", 49, 1), Among(u\"\\u093F\\u0926\\u094B\", 49, 1), Among(u\"\\u092F\\u094B\", -1, 1),", "1), Among(u\"\\u0928\\u0947\\u0915\\u0948\", 39, 1), Among(u\"\\u0926\\u0948\", -1, 1), Among(u\"\\u0907\\u0926\\u0948\", 41, 1),", "16, 1), Among(u\"\\u093F\\u0926\\u093E\", 16, 1), Among(u\"\\u0926\\u0947\\u0916\\u093F\", -1, 1), Among(u\"\\u092E\\u093E\\u0925\\u093F\", -1,", "# or, line 71 try: v_1 = self.limit - self.cursor", "if not self.slice_del(): return False except lab0: pass return True", "raise lab5() except lab5: pass self.cursor = self.limit - v_4", "self.limit - v_2 # literal, line 59 if not self.eq_s_b(u\"\\u0947\"):", "do, line 87 v_1 = self.limit - self.cursor try: #", "Among(u\"\\u091B\\u0941\", -1, 1), Among(u\"\\u090F\\u091B\\u0941\", 28, 1), Among(u\"\\u0947\\u091B\\u0941\", 28, 1), Among(u\"\\u0928\\u0947\\u091B\\u0941\",", "# ], line 54 self.bra = self.cursor if among_var ==", "(, line 76 # [, line 77 self.ket = self.cursor", "= self.cursor if among_var == 1: # (, line 58", "line 79 if not self.slice_del(): return False return True def", "79 # delete, line 79 if not self.slice_del(): return False", "-1, 1), Among(u\"\\u0907\\u091B\", 1, 1), Among(u\"\\u090F\\u091B\", 1, 1), Among(u\"\\u093F\\u091B\", 1,", "1), Among(u\"\\u0925\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u092A\\u0930\\u094D\", -1, 1), Among(u\"\\u0907\\u0938\\u094D\", -1, 1),", "call remove_category_1, line 87 if not self.__r_remove_category_1(): raise lab0() except", "True class lab0(BaseException): pass class lab1(BaseException): pass class lab2(BaseException): pass", "1), Among(u\"\\u0907\\u091B\\u094C\", 59, 1), Among(u\"\\u090F\\u091B\\u094C\", 59, 1), Among(u\"\\u093F\\u091B\\u094C\", 59, 1),", "self.slice_del(): return False return True def _stem(self): # (, line", "if not self.eq_s_b(u\"\\u0925\\u0947\"): return False except lab0: pass # delete,", "Among(u\"\\u0926\\u0947\\u0916\\u093F\", -1, 1), Among(u\"\\u092E\\u093E\\u0925\\u093F\", -1, 1), Among(u\"\\u090F\\u0915\\u0940\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u0940\",", "71 if not self.eq_s_b(u\"\\u0925\\u0947\"): return False except lab0: pass #", "if not self.eq_s_b(u\"\\u092F\\u094C\"): raise lab1() raise lab0() except lab1: pass", "-1, 1), Among(u\"\\u0928\\u0947\", -1, 1), Among(u\"\\u090F\\u0915\\u0948\", -1, 1), Among(u\"\\u0947\\u0915\\u0948\", -1,", "raise lab2() except lab3: pass self.cursor = self.limit - v_2", "lab3: pass self.cursor = self.limit - v_2 # literal, line", "self.__r_check_category_2(): raise lab5() self.cursor = self.limit - v_5 # call", "-1, 1), Among(u\"\\u092E\\u093E\\u0930\\u094D\\u092B\\u0924\", -1, 1), Among(u\"\\u0930\\u0924\", -1, 1), Among(u\"\\u0915\\u093E\", -1,", "Among(u\"\\u0938\\u0901\\u0917\\u0948\", -1, 1), Among(u\"\\u092E\\u0948\", -1, 1), Among(u\"\\u0915\\u094B\", -1, 2) ]", "1), Among(u\"\\u0947\\u0915\\u0940\", -1, 1), Among(u\"\\u0926\\u0947\\u0916\\u0940\", -1, 1), Among(u\"\\u0925\\u0940\", -1, 1),", "# (, line 69 # [, line 70 self.ket =", "v_1 # literal, line 71 if not self.eq_s_b(u\"\\u0925\\u0947\"): return False", "self.cursor = self.limit - v_1 # do, line 88 v_2", "or, line 59 try: v_1 = self.limit - self.cursor try:", "1), Among(u\"\\u0938\\u0902\\u0917\", -1, 1), Among(u\"\\u092E\\u093E\\u0930\\u094D\\u092B\\u0924\", -1, 1), Among(u\"\\u0930\\u0924\", -1, 1),", "line 88 # repeat, line 89 try: while True: try:", "-1, 1), Among(u\"\\u0907\\u092F\\u094B\", 52, 1), Among(u\"\\u092D\\u092F\\u094B\", 52, 1), Among(u\"\\u093F\\u092F\\u094B\", 52,", "self.bra = self.cursor # (, line 79 # delete, line", "1), Among(u\"\\u093F\\u091B\\u094C\", 59, 1), Among(u\"\\u0947\\u091B\\u094C\", 59, 1), Among(u\"\\u0928\\u0947\\u091B\\u094C\", 63, 1),", "line 59 raise lab0() except lab1: pass self.cursor = self.limit", "False except lab0: pass return True def __r_check_category_2(self): # (,", "# (, line 71 # or, line 71 try: v_1", "to Python compiler It implements the stemming algorithm defined by", "Among(u\"\\u090F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u093F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0947\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0928\\u0947\\u091B\\u0938\\u094D\",", "1), Among(u\"\\u093F\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u0939\\u0941\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u090F\\u0915\\u093E\", -1, 1),", "_stem(self): # (, line 85 # backwards, line 86 self.limit_backward", "88 v_2 = self.limit - self.cursor try: # (, line", "= self.limit - v_5 # call remove_category_2, line 89 if", "= self.limit - v_3 raise lab2() except lab3: pass except", "a_0 = [ Among(u\"\\u0932\\u093E\\u0907\", -1, 1), Among(u\"\\u0932\\u093E\\u0908\", -1, 1), Among(u\"\\u0938\\u0901\\u0917\",", "v_1 = self.limit - self.cursor try: # call remove_category_1, line", "54 self.bra = self.cursor if among_var == 1: # (,", "# (, line 88 # repeat, line 89 try: while", "pass self.cursor = self.limit - v_4 # call remove_category_3, line", "self.cursor # substring, line 64 if self.find_among_b(NepaliStemmer.a_1) == 0: return", "self.cursor = self.limit - v_2 self.cursor = self.limit_backward return True", "1), Among(u\"\\u0907\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u093F\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u0947\\u0915\\u094B\", -1, 1),", "Among(u\"\\u0930\\u0924\", -1, 1), Among(u\"\\u0915\\u093E\", -1, 2), Among(u\"\\u092E\\u093E\", -1, 1), Among(u\"\\u0926\\u094D\\u0935\\u093E\\u0930\\u093E\",", "1), Among(u\"\\u0928\\u0947\\u091B\", 5, 1), Among(u\"\\u0939\\u0941\\u0928\\u0947\\u091B\", 6, 1), Among(u\"\\u0907\\u0928\\u094D\\u091B\", 1, 1),", "71 if not self.eq_s_b(u\"\\u091B\\u094C\"): raise lab2() raise lab0() except lab2:", "65, 1), Among(u\"\\u0925\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u0928\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0928\\u094D\", 69,", "return False except lab0: pass return True def __r_check_category_2(self): #", "lab4: pass self.cursor = self.limit - v_3 raise lab2() except", "line 71 if not self.eq_s_b(u\"\\u0925\\u0947\"): return False except lab0: pass", "Snowball to Python compiler It implements the stemming algorithm defined", "89 try: while True: try: v_3 = self.limit - self.cursor", "This class was automatically generated by a Snowball to Python", "# (, line 58 # delete, line 58 if not", "line 77 self.bra = self.cursor # (, line 79 #", "Among(u\"\\u092D\\u092F\\u094B\", 52, 1), Among(u\"\\u093F\\u092F\\u094B\", 52, 1), Among(u\"\\u0925\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0926\\u093F\\u092F\\u094B\",", "self.ket = self.cursor # substring, line 77 if self.find_among_b(NepaliStemmer.a_3) ==", "line 89 # and, line 89 v_5 = self.limit -", "] a_3 = [ Among(u\"\\u0925\\u093F\\u090F\", -1, 1), Among(u\"\\u091B\", -1, 1),", "-1, 1), Among(u\"\\u092E\\u093E\\u0925\\u093F\", -1, 1), Among(u\"\\u090F\\u0915\\u0940\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u0940\", 21,", "1), Among(u\"\\u0925\\u093F\\u0938\\u094D\", 87, 1), Among(u\"\\u091B\\u0947\\u0938\\u094D\", -1, 1), Among(u\"\\u0939\\u094B\\u0938\\u094D\", -1, 1)", "# delete, line 71 if not self.slice_del(): return False elif", "# delete, line 72 if not self.slice_del(): return False return", "89 # do, line 89 v_4 = self.limit - self.cursor", "# (, line 89 # do, line 89 v_4 =", "89 # and, line 89 v_5 = self.limit - self.cursor", "= self.find_among_b(NepaliStemmer.a_2) if among_var == 0: return False # ],", "1), Among(u\"\\u090F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u093F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0947\\u091B\\u0928\\u094D\", 69, 1),", "88 # repeat, line 89 try: while True: try: v_3", "1), Among(u\"\\u0902\", -1, 1), Among(u\"\\u0948\", -1, 2) ] a_3 =", "-1, 1), Among(u\"\\u0928\\u0947\\u0915\\u093E\", 14, 1), Among(u\"\\u0926\\u093E\", -1, 1), Among(u\"\\u0907\\u0926\\u093E\", 16,", "44, 1), Among(u\"\\u0947\\u0915\\u094B\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u094B\", 47, 1), Among(u\"\\u0926\\u094B\", -1,", "lab0: pass return True def __r_check_category_2(self): # (, line 63", "lab0() except lab1: pass self.cursor = self.limit - v_1 try:", "compiler It implements the stemming algorithm defined by a snowball", "Among(u\"\\u0907\\u091B\", 1, 1), Among(u\"\\u090F\\u091B\", 1, 1), Among(u\"\\u093F\\u091B\", 1, 1), Among(u\"\\u0947\\u091B\",", "1), Among(u\"\\u0926\\u0940\", -1, 1), Among(u\"\\u091B\\u0941\", -1, 1), Among(u\"\\u090F\\u091B\\u0941\", 28, 1),", "line 70 self.bra = self.cursor if among_var == 1: #", "-1, 1), Among(u\"\\u091B\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u092A\\u0930\\u094D\", -1,", "self.cursor # substring, line 70 among_var = self.find_among_b(NepaliStemmer.a_2) if among_var", "(, line 88 # repeat, line 89 try: while True:", "script. ''' a_0 = [ Among(u\"\\u0932\\u093E\\u0907\", -1, 1), Among(u\"\\u0932\\u093E\\u0908\", -1,", "NepaliStemmer(BaseStemmer): ''' This class was automatically generated by a Snowball", "65, 1), Among(u\"\\u091B\\u0928\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u090F\\u091B\\u0928\\u094D\", 69,", "-1, 1), Among(u\"\\u091B\", -1, 1), Among(u\"\\u0907\\u091B\", 1, 1), Among(u\"\\u090F\\u091B\", 1,", "call remove_category_3, line 89 if not self.__r_remove_category_3(): raise lab4() raise", "= self.limit - v_2 self.cursor = self.limit_backward return True class", "69, 1), Among(u\"\\u093F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0947\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0928\\u0947\\u091B\\u0928\\u094D\", 73,", "Among(u\"\\u0928\\u0947\\u0915\\u094B\", 47, 1), Among(u\"\\u0926\\u094B\", -1, 1), Among(u\"\\u0907\\u0926\\u094B\", 49, 1), Among(u\"\\u093F\\u0926\\u094B\",", "lab1: pass self.cursor = self.limit - v_1 try: # literal,", ".basestemmer import BaseStemmer from .among import Among class NepaliStemmer(BaseStemmer): '''", "1), Among(u\"\\u0925\\u0940\", -1, 1), Among(u\"\\u0926\\u0940\", -1, 1), Among(u\"\\u091B\\u0941\", -1, 1),", "not self.__r_check_category_2(): raise lab5() self.cursor = self.limit - v_5 #", "59 try: v_1 = self.limit - self.cursor try: # (,", "line 59 # or, line 59 try: v_2 = self.limit", "line 70 among_var = self.find_among_b(NepaliStemmer.a_2) if among_var == 0: return", "Among(u\"\\u0939\\u0930\\u0942\", -1, 1), Among(u\"\\u091B\\u0947\", -1, 1), Among(u\"\\u0925\\u0947\", -1, 1), Among(u\"\\u0928\\u0947\",", "lab1(BaseException): pass class lab2(BaseException): pass class lab3(BaseException): pass class lab4(BaseException):", "Among(u\"\\u091B\\u0928\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u090F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u093F\\u091B\\u0928\\u094D\",", "try: # (, line 59 # or, line 59 try:", "False # delete, line 72 if not self.slice_del(): return False", "1), Among(u\"\\u0947\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0928\\u0947\\u091B\\u0938\\u094D\", 85, 1), Among(u\"\\u093F\\u0938\\u094D\", -1, 1),", "line 54 self.bra = self.cursor if among_var == 1: #", "not self.slice_del(): return False return True def __r_remove_category_3(self): # (,", "59 if not self.eq_s_b(u\"\\u090F\"): raise lab3() raise lab2() except lab3:", "not self.eq_s_b(u\"\\u090F\"): raise lab3() raise lab2() except lab3: pass self.cursor", "Among(u\"\\u0907\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u090F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u093F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0947\\u091B\\u0938\\u094D\",", "self.eq_s_b(u\"\\u090F\"): raise lab3() raise lab2() except lab3: pass self.cursor =", "- v_3 raise lab2() except lab3: pass except lab2: pass", "Among(u\"\\u093F\\u0926\\u0948\", 41, 1), Among(u\"\\u090F\\u0915\\u094B\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u093F\\u090F\\u0915\\u094B\",", "pass # delete, line 71 if not self.slice_del(): return False", "1), Among(u\"\\u091B\\u0928\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u090F\\u091B\\u0928\\u094D\", 69, 1),", "== 2: # (, line 59 # or, line 59", "71 if not self.eq_s_b(u\"\\u092F\\u094C\"): raise lab1() raise lab0() except lab1:", "2: # (, line 59 # or, line 59 try:", "47, 1), Among(u\"\\u0926\\u094B\", -1, 1), Among(u\"\\u0907\\u0926\\u094B\", 49, 1), Among(u\"\\u093F\\u0926\\u094B\", 49,", "Among(u\"\\u0926\\u093E\", -1, 1), Among(u\"\\u0907\\u0926\\u093E\", 16, 1), Among(u\"\\u093F\\u0926\\u093E\", 16, 1), Among(u\"\\u0926\\u0947\\u0916\\u093F\",", "pass self.cursor = self.limit - v_2 self.cursor = self.limit_backward return", "1, 1), Among(u\"\\u0947\\u091B\", 1, 1), Among(u\"\\u0928\\u0947\\u091B\", 5, 1), Among(u\"\\u0939\\u0941\\u0928\\u0947\\u091B\", 6,", "delete, line 72 if not self.slice_del(): return False return True", "return True class lab0(BaseException): pass class lab1(BaseException): pass class lab2(BaseException):", "[, line 77 self.ket = self.cursor # substring, line 77", "pass self.cursor = self.limit - v_1 # do, line 88", "1), Among(u\"\\u0947\\u0915\\u093E\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u093E\", 14, 1), Among(u\"\\u0926\\u093E\", -1, 1),", "87, 1), Among(u\"\\u091B\\u0947\\u0938\\u094D\", -1, 1), Among(u\"\\u0939\\u094B\\u0938\\u094D\", -1, 1) ] def", "not self.slice_del(): return False elif among_var == 2: # (,", "among_var == 1: # (, line 71 # or, line", "89 if not self.__r_check_category_2(): raise lab5() self.cursor = self.limit -", "elif among_var == 2: # (, line 59 # or,", "Python compiler It implements the stemming algorithm defined by a", "True: try: v_3 = self.limit - self.cursor try: # (,", "# literal, line 59 if not self.eq_s_b(u\"\\u090F\"): raise lab3() raise", "defined by a snowball script. ''' a_0 = [ Among(u\"\\u0932\\u093E\\u0907\",", "Among(u\"\\u093F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0947\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0928\\u0947\\u091B\\u0928\\u094D\", 73, 1), Among(u\"\\u0932\\u093E\\u0928\\u094D\",", "Among(u\"\\u0925\\u0940\", -1, 1), Among(u\"\\u0926\\u0940\", -1, 1), Among(u\"\\u091B\\u0941\", -1, 1), Among(u\"\\u090F\\u091B\\u0941\",", "1), Among(u\"\\u0947\\u0915\\u094B\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u094B\", 47, 1), Among(u\"\\u0926\\u094B\", -1, 1),", "-1, -1) ] a_2 = [ Among(u\"\\u0901\", -1, 1), Among(u\"\\u0902\",", "- self.cursor try: # literal, line 71 if not self.eq_s_b(u\"\\u092F\\u094C\"):", "lab1: pass self.cursor = self.limit - v_1 # delete, line", "lab5() self.cursor = self.limit - v_5 # call remove_category_2, line", "1), Among(u\"\\u091B\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u092A\\u0930\\u094D\", -1, 1),", "self.cursor # (, line 79 # delete, line 79 if", "self.cursor if among_var == 1: # (, line 71 #", "= [ Among(u\"\\u0932\\u093E\\u0907\", -1, 1), Among(u\"\\u0932\\u093E\\u0908\", -1, 1), Among(u\"\\u0938\\u0901\\u0917\", -1,", "1), Among(u\"\\u0926\\u0948\", -1, 1), Among(u\"\\u0907\\u0926\\u0948\", 41, 1), Among(u\"\\u093F\\u0926\\u0948\", 41, 1),", "# do, line 89 v_4 = self.limit - self.cursor try:", "1: # (, line 71 # or, line 71 try:", "False except lab0: pass # delete, line 71 if not", "lab2: pass except lab1: pass self.cursor = self.limit - v_2", "self.limit - self.cursor try: # literal, line 71 if not", "1), Among(u\"\\u093F\\u0926\\u0948\", 41, 1), Among(u\"\\u090F\\u0915\\u094B\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u094B\", 44, 1),", "# This file was generated automatically by the Snowball to", "1), Among(u\"\\u0926\\u094B\", -1, 1), Among(u\"\\u0907\\u0926\\u094B\", 49, 1), Among(u\"\\u093F\\u0926\\u094B\", 49, 1),", "Among(u\"\\u0928\\u0947\\u091B\\u0928\\u094D\", 73, 1), Among(u\"\\u0932\\u093E\\u0928\\u094D\", -1, 1), Among(u\"\\u091B\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0928\\u094D\",", "literal, line 71 if not self.eq_s_b(u\"\\u0928\\u094C\"): raise lab3() raise lab0()", "2), Among(u\"\\u092E\\u093E\", -1, 1), Among(u\"\\u0926\\u094D\\u0935\\u093E\\u0930\\u093E\", -1, 1), Among(u\"\\u0915\\u093F\", -1, 2),", "1), Among(u\"\\u0915\\u0948\", -1, 2), Among(u\"\\u0938\\u0901\\u0917\\u0948\", -1, 1), Among(u\"\\u092E\\u0948\", -1, 1),", "= self.cursor return True def __r_remove_category_2(self): # (, line 69", "lab3: pass self.cursor = self.limit - v_1 # literal, line", "try: # literal, line 71 if not self.eq_s_b(u\"\\u0928\\u094C\"): raise lab3()", "# literal, line 71 if not self.eq_s_b(u\"\\u092F\\u094C\"): raise lab1() raise", "self.limit - v_4 # call remove_category_3, line 89 if not", "-1, 1), Among(u\"\\u0939\\u0930\\u0942\", -1, 1), Among(u\"\\u091B\\u0947\", -1, 1), Among(u\"\\u0925\\u0947\", -1,", "lab1: pass self.cursor = self.limit - v_2 self.cursor = self.limit_backward", "v_4 # call remove_category_3, line 89 if not self.__r_remove_category_3(): raise", "line 72 if not self.slice_del(): return False return True def", "89 v_5 = self.limit - self.cursor # call check_category_2, line", "line 86 # do, line 87 v_1 = self.limit -", "# (, line 79 # delete, line 79 if not", "Among(u\"\\u090F\\u091B\\u0941\", 28, 1), Among(u\"\\u0947\\u091B\\u0941\", 28, 1), Among(u\"\\u0928\\u0947\\u091B\\u0941\", 30, 1), Among(u\"\\u0928\\u0941\",", "(, line 59 # or, line 59 try: v_2 =", "1), Among(u\"\\u0926\\u093E\", -1, 1), Among(u\"\\u0907\\u0926\\u093E\", 16, 1), Among(u\"\\u093F\\u0926\\u093E\", 16, 1),", "-1, 1), Among(u\"\\u0948\", -1, 2) ] a_3 = [ Among(u\"\\u0925\\u093F\\u090F\",", "raise lab0() except lab1: pass self.cursor = self.limit - v_1", "__r_remove_category_1(self): # (, line 53 # [, line 54 self.ket", "try: v_1 = self.limit - self.cursor try: # literal, line", "raise lab2() except lab3: pass except lab2: pass except lab1:", "1), Among(u\"\\u0947\\u091B\\u0941\", 28, 1), Among(u\"\\u0928\\u0947\\u091B\\u0941\", 30, 1), Among(u\"\\u0928\\u0941\", -1, 1),", "Among(u\"\\u0928\\u0947\\u0915\\u0948\", 39, 1), Among(u\"\\u0926\\u0948\", -1, 1), Among(u\"\\u0907\\u0926\\u0948\", 41, 1), Among(u\"\\u093F\\u0926\\u0948\",", "automatically by the Snowball to Python compiler # http://snowballstem.org/ from", "# substring, line 54 among_var = self.find_among_b(NepaliStemmer.a_0) if among_var ==", "algorithm defined by a snowball script. ''' a_0 = [", "raise lab0() except lab0: pass self.cursor = self.limit - v_1", "line 89 if not self.__r_remove_category_2(): raise lab5() except lab5: pass", "-1), Among(u\"\\u0902\", -1, -1), Among(u\"\\u0948\", -1, -1) ] a_2 =", "1), Among(u\"\\u093F\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u0947\\u0915\\u0940\", -1, 1), Among(u\"\\u0926\\u0947\\u0916\\u0940\", -1, 1),", "85 # backwards, line 86 self.limit_backward = self.cursor self.cursor =", "-1, 1), Among(u\"\\u0907\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u093F\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u0947\\u0915\\u093E\", -1,", "False # ], line 70 self.bra = self.cursor if among_var", "1), Among(u\"\\u090F\\u0915\\u093E\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u093F\\u090F\\u0915\\u093E\", 11, 1),", "1), Among(u\"\\u093F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0947\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0928\\u0947\\u091B\\u0928\\u094D\", 73, 1),", "70 self.ket = self.cursor # substring, line 70 among_var =", "pass except lab1: pass self.cursor = self.limit - v_2 self.cursor", "a_2 = [ Among(u\"\\u0901\", -1, 1), Among(u\"\\u0902\", -1, 1), Among(u\"\\u0948\",", "-1, 1), Among(u\"\\u0925\\u093F\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u0925\\u094D\\u092F\\u094C\", 65,", "not self.eq_s_b(u\"\\u0928\\u094C\"): raise lab3() raise lab0() except lab3: pass self.cursor", "line 89 v_4 = self.limit - self.cursor try: # (,", "-1, 1), Among(u\"\\u090F\\u0915\\u0940\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u093F\\u090F\\u0915\\u0940\", 21,", "# [, line 54 self.ket = self.cursor # substring, line", "1) ] def __r_remove_category_1(self): # (, line 53 # [,", "-1, -1), Among(u\"\\u0902\", -1, -1), Among(u\"\\u0948\", -1, -1) ] a_2", "line 59 try: v_1 = self.limit - self.cursor try: #", "Among(u\"\\u090F\\u0915\\u093E\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u093F\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u0947\\u0915\\u093E\",", "file was generated automatically by the Snowball to Python compiler", "Among(u\"\\u0939\\u0941\\u0928\\u0947\\u091B\", 6, 1), Among(u\"\\u0907\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u093F\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u0939\\u0941\\u0928\\u094D\\u091B\",", "line 77 self.ket = self.cursor # substring, line 77 if", "49, 1), Among(u\"\\u093F\\u0926\\u094B\", 49, 1), Among(u\"\\u092F\\u094B\", -1, 1), Among(u\"\\u0907\\u092F\\u094B\", 52,", "not self.__r_remove_category_1(): raise lab0() except lab0: pass self.cursor = self.limit", "= self.limit - self.cursor try: # literal, line 59 if", "__r_remove_category_2(self): # (, line 69 # [, line 70 self.ket", "-1, 1), Among(u\"\\u0907\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u093F\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u0947\\u0915\\u094B\", -1,", "line 69 # [, line 70 self.ket = self.cursor #", "0: return False # ], line 70 self.bra = self.cursor", "# substring, line 70 among_var = self.find_among_b(NepaliStemmer.a_2) if among_var ==", "# (, line 89 # and, line 89 v_5 =", "line 77 if self.find_among_b(NepaliStemmer.a_3) == 0: return False # ],", "-1, 1), Among(u\"\\u0915\\u0940\", -1, 2), Among(u\"\\u0932\\u0947\", -1, 1), Among(u\"\\u0915\\u0948\", -1,", "v_2 = self.limit - self.cursor try: # (, line 88", "pass # (, line 59 raise lab0() except lab1: pass", "self.limit_backward = self.cursor self.cursor = self.limit # (, line 86", "self.cursor try: # (, line 89 # and, line 89", "pass class lab1(BaseException): pass class lab2(BaseException): pass class lab3(BaseException): pass", "was automatically generated by a Snowball to Python compiler It", "1), Among(u\"\\u090F\\u091B\\u094C\", 59, 1), Among(u\"\\u093F\\u091B\\u094C\", 59, 1), Among(u\"\\u0947\\u091B\\u094C\", 59, 1),", "lab0() except lab0: pass self.cursor = self.limit - v_1 #", "6, 1), Among(u\"\\u0907\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u093F\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u0939\\u0941\\u0928\\u094D\\u091B\", 1,", "59, 1), Among(u\"\\u0947\\u091B\\u094C\", 59, 1), Among(u\"\\u0928\\u0947\\u091B\\u094C\", 63, 1), Among(u\"\\u092F\\u094C\", -1,", "- v_2 # literal, line 59 if not self.eq_s_b(u\"\\u0947\"): raise", "repeat, line 89 try: while True: try: v_3 = self.limit", "import BaseStemmer from .among import Among class NepaliStemmer(BaseStemmer): ''' This", "1), Among(u\"\\u091B\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u0925\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u0928\\u094D\", -1, 1),", "lab0(BaseException): pass class lab1(BaseException): pass class lab2(BaseException): pass class lab3(BaseException):", "-1), Among(u\"\\u0948\", -1, -1) ] a_2 = [ Among(u\"\\u0901\", -1,", "== 1: # (, line 71 # or, line 71", "59 if not self.slice_del(): return False except lab0: pass return", "self.limit - self.cursor try: # (, line 89 # do,", "1), Among(u\"\\u093F\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0938\\u094D\", 87, 1), Among(u\"\\u091B\\u0947\\u0938\\u094D\", -1, 1),", "1), Among(u\"\\u090F\\u091B\", 1, 1), Among(u\"\\u093F\\u091B\", 1, 1), Among(u\"\\u0947\\u091B\", 1, 1),", "71 # or, line 71 try: v_1 = self.limit -", "77 self.bra = self.cursor # (, line 79 # delete,", "if not self.slice_del(): return False elif among_var == 2: #", "literal, line 71 if not self.eq_s_b(u\"\\u092F\\u094C\"): raise lab1() raise lab0()", "= self.limit - self.cursor try: # literal, line 71 if", "-1, 1), Among(u\"\\u0907\\u0926\\u093E\", 16, 1), Among(u\"\\u093F\\u0926\\u093E\", 16, 1), Among(u\"\\u0926\\u0947\\u0916\\u093F\", -1,", "59 try: v_2 = self.limit - self.cursor try: # literal,", "line 64 self.ket = self.cursor # substring, line 64 if", "self.limit - v_1 # literal, line 71 if not self.eq_s_b(u\"\\u0925\\u0947\"):", "= self.limit - v_4 # call remove_category_3, line 89 if", "try: v_3 = self.limit - self.cursor try: # (, line", "return False # ], line 54 self.bra = self.cursor if", "lab4() raise lab3() except lab4: pass self.cursor = self.limit -", "-1) ] a_2 = [ Among(u\"\\u0901\", -1, 1), Among(u\"\\u0902\", -1,", "Among(u\"\\u0939\\u0941\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u090F\\u0915\\u093E\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u093F\\u090F\\u0915\\u093E\",", "a_3 = [ Among(u\"\\u0925\\u093F\\u090F\", -1, 1), Among(u\"\\u091B\", -1, 1), Among(u\"\\u0907\\u091B\",", "line 59 if not self.eq_s_b(u\"\\u0947\"): raise lab1() except lab2: pass", "59 # or, line 59 try: v_1 = self.limit -", "5, 1), Among(u\"\\u0939\\u0941\\u0928\\u0947\\u091B\", 6, 1), Among(u\"\\u0907\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u093F\\u0928\\u094D\\u091B\", 1,", "1, 1), Among(u\"\\u0928\\u0947\\u091B\", 5, 1), Among(u\"\\u0939\\u0941\\u0928\\u0947\\u091B\", 6, 1), Among(u\"\\u0907\\u0928\\u094D\\u091B\", 1,", "self.limit - self.cursor try: # (, line 59 # or,", "Among(u\"\\u093F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0947\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0928\\u0947\\u091B\\u0938\\u094D\", 85, 1), Among(u\"\\u093F\\u0938\\u094D\",", "= self.limit - v_1 try: # literal, line 71 if", "71 if not self.slice_del(): return False elif among_var == 2:", "if not self.eq_s_b(u\"\\u091B\\u094C\"): raise lab2() raise lab0() except lab2: pass", "delete, line 58 if not self.slice_del(): return False elif among_var", "line 64 self.bra = self.cursor return True def __r_remove_category_2(self): #", "__r_check_category_2(self): # (, line 63 # [, line 64 self.ket", "or, line 71 try: v_1 = self.limit - self.cursor try:", "Among(u\"\\u0907\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u093F\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u0947\\u0915\\u094B\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u094B\",", "pass self.cursor = self.limit - v_1 try: # literal, line", "self.__r_remove_category_1(): raise lab0() except lab0: pass self.cursor = self.limit -", "line 71 # or, line 71 try: v_1 = self.limit", "by a snowball script. ''' a_0 = [ Among(u\"\\u0932\\u093E\\u0907\", -1,", "(, line 59 raise lab0() except lab1: pass self.cursor =", "-1, 1), Among(u\"\\u0947\\u0915\\u0948\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u0948\", 39, 1), Among(u\"\\u0926\\u0948\", -1,", "try: # (, line 88 # repeat, line 89 try:", "remove_category_3, line 89 if not self.__r_remove_category_3(): raise lab4() raise lab3()", "pass except lab2: pass except lab1: pass self.cursor = self.limit", "self.cursor try: # (, line 59 # or, line 59", "Among(u\"\\u092A\\u091B\\u093F\", -1, 1), Among(u\"\\u0915\\u0940\", -1, 2), Among(u\"\\u0932\\u0947\", -1, 1), Among(u\"\\u0915\\u0948\",", "if among_var == 1: # (, line 58 # delete,", "pass return True def __r_check_category_2(self): # (, line 63 #", "line 59 try: v_2 = self.limit - self.cursor try: #", "return False # ], line 64 self.bra = self.cursor return", "= self.limit - v_2 # literal, line 59 if not", "not self.slice_del(): return False return True def _stem(self): # (,", "59 raise lab0() except lab1: pass self.cursor = self.limit -", "lab2(BaseException): pass class lab3(BaseException): pass class lab4(BaseException): pass class lab5(BaseException):", "== 0: return False # ], line 64 self.bra =", "http://snowballstem.org/ from .basestemmer import BaseStemmer from .among import Among class", "- self.cursor try: # (, line 89 # and, line", "self.limit - v_2 self.cursor = self.limit_backward return True class lab0(BaseException):", "[ Among(u\"\\u0925\\u093F\\u090F\", -1, 1), Among(u\"\\u091B\", -1, 1), Among(u\"\\u0907\\u091B\", 1, 1),", "Among(u\"\\u093F\\u092F\\u094B\", 52, 1), Among(u\"\\u0925\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0926\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0925\\u094D\\u092F\\u094B\",", "64 self.ket = self.cursor # substring, line 64 if self.find_among_b(NepaliStemmer.a_1)", "1), Among(u\"\\u092E\\u093E\\u0925\\u093F\", -1, 1), Among(u\"\\u090F\\u0915\\u0940\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u0940\", 21, 1),", "among_var == 1: # (, line 58 # delete, line", "= self.cursor # substring, line 64 if self.find_among_b(NepaliStemmer.a_1) == 0:", "line 70 self.ket = self.cursor # substring, line 70 among_var", "True def __r_remove_category_2(self): # (, line 69 # [, line", "- self.cursor try: # (, line 88 # repeat, line", "self.slice_del(): return False except lab0: pass return True def __r_check_category_2(self):", "Among(u\"\\u0907\\u091B\\u094C\", 59, 1), Among(u\"\\u090F\\u091B\\u094C\", 59, 1), Among(u\"\\u093F\\u091B\\u094C\", 59, 1), Among(u\"\\u0947\\u091B\\u094C\",", "Among(u\"\\u0925\\u093F\\u0938\\u094D\", 87, 1), Among(u\"\\u091B\\u0947\\u0938\\u094D\", -1, 1), Among(u\"\\u0939\\u094B\\u0938\\u094D\", -1, 1) ]", "return True def __r_remove_category_3(self): # (, line 76 # [,", "- v_4 # call remove_category_3, line 89 if not self.__r_remove_category_3():", "1), Among(u\"\\u0926\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0925\\u094D\\u092F\\u094B\", 52, 1), Among(u\"\\u091B\\u094C\", -1, 1),", "among_var == 0: return False # ], line 54 self.bra", "return False return True def __r_remove_category_3(self): # (, line 76", "# call remove_category_3, line 89 if not self.__r_remove_category_3(): raise lab4()", "line 72 if not self.eq_s_b(u\"\\u0924\\u094D\\u0930\"): return False # delete, line", "if not self.__r_remove_category_1(): raise lab0() except lab0: pass self.cursor =", "try: # literal, line 59 if not self.eq_s_b(u\"\\u090F\"): raise lab3()", "Among(u\"\\u091B\\u0938\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u090F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u093F\\u091B\\u0938\\u094D\",", "39, 1), Among(u\"\\u0926\\u0948\", -1, 1), Among(u\"\\u0907\\u0926\\u0948\", 41, 1), Among(u\"\\u093F\\u0926\\u0948\", 41,", "lab5() except lab5: pass self.cursor = self.limit - v_4 #", "by the Snowball to Python compiler # http://snowballstem.org/ from .basestemmer", "-1, 1), Among(u\"\\u0907\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u093F\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u0947\\u0915\\u0940\", -1,", "1), Among(u\"\\u091B\\u0947\", -1, 1), Among(u\"\\u0925\\u0947\", -1, 1), Among(u\"\\u0928\\u0947\", -1, 1),", "self.cursor = self.limit - v_1 try: # literal, line 71", "1, 1), Among(u\"\\u093F\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u0939\\u0941\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u090F\\u0915\\u093E\", -1,", "1), Among(u\"\\u090F\\u0915\\u0948\", -1, 1), Among(u\"\\u0947\\u0915\\u0948\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u0948\", 39, 1),", "line 54 self.ket = self.cursor # substring, line 54 among_var", "line 89 try: while True: try: v_3 = self.limit -", "Among(u\"\\u0907\\u0926\\u0948\", 41, 1), Among(u\"\\u093F\\u0926\\u0948\", 41, 1), Among(u\"\\u090F\\u0915\\u094B\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u094B\",", "self.cursor try: # call remove_category_1, line 87 if not self.__r_remove_category_1():", "def _stem(self): # (, line 85 # backwards, line 86", "except lab0: pass self.cursor = self.limit - v_1 # do,", "self.cursor = self.limit - v_3 raise lab2() except lab3: pass", "2), Among(u\"\\u092A\\u091B\\u093F\", -1, 1), Among(u\"\\u0915\\u0940\", -1, 2), Among(u\"\\u0932\\u0947\", -1, 1),", "1), Among(u\"\\u093F\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u0947\\u0915\\u093E\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u093E\", 14, 1),", "Among(u\"\\u090F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u093F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0947\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0928\\u0947\\u091B\\u0928\\u094D\",", "try: # call remove_category_1, line 87 if not self.__r_remove_category_1(): raise", "pass class lab2(BaseException): pass class lab3(BaseException): pass class lab4(BaseException): pass", "Among(u\"\\u091B\", -1, 1), Among(u\"\\u0907\\u091B\", 1, 1), Among(u\"\\u090F\\u091B\", 1, 1), Among(u\"\\u093F\\u091B\",", "1), Among(u\"\\u091B\", -1, 1), Among(u\"\\u0907\\u091B\", 1, 1), Among(u\"\\u090F\\u091B\", 1, 1),", "76 # [, line 77 self.ket = self.cursor # substring,", "= [ Among(u\"\\u0901\", -1, 1), Among(u\"\\u0902\", -1, 1), Among(u\"\\u0948\", -1,", "-1, 1), Among(u\"\\u0926\\u0947\\u0916\\u0940\", -1, 1), Among(u\"\\u0925\\u0940\", -1, 1), Among(u\"\\u0926\\u0940\", -1,", "= self.cursor if among_var == 1: # (, line 71", "Snowball to Python compiler # http://snowballstem.org/ from .basestemmer import BaseStemmer", "-1, 1), Among(u\"\\u0907\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u090F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u093F\\u091B\\u0938\\u094D\", 81,", "# or, line 59 try: v_2 = self.limit - self.cursor", "from .among import Among class NepaliStemmer(BaseStemmer): ''' This class was", "try: # literal, line 71 if not self.eq_s_b(u\"\\u092F\\u094C\"): raise lab1()", "= self.cursor # (, line 79 # delete, line 79", "lab2: pass self.cursor = self.limit - v_1 try: # literal,", "if self.find_among_b(NepaliStemmer.a_1) == 0: return False # ], line 64", "generated by a Snowball to Python compiler It implements the", "1), Among(u\"\\u0907\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u093F\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u0947\\u0915\\u0940\", -1, 1),", "False return True def __r_remove_category_3(self): # (, line 76 #", "Among(u\"\\u0947\\u091B\", 1, 1), Among(u\"\\u0928\\u0947\\u091B\", 5, 1), Among(u\"\\u0939\\u0941\\u0928\\u0947\\u091B\", 6, 1), Among(u\"\\u0907\\u0928\\u094D\\u091B\",", "Among(u\"\\u091B\\u0947\\u0938\\u094D\", -1, 1), Among(u\"\\u0939\\u094B\\u0938\\u094D\", -1, 1) ] def __r_remove_category_1(self): #", "line 85 # backwards, line 86 self.limit_backward = self.cursor self.cursor", "from .basestemmer import BaseStemmer from .among import Among class NepaliStemmer(BaseStemmer):", "v_5 = self.limit - self.cursor # call check_category_2, line 89", "1: # (, line 58 # delete, line 58 if", "False elif among_var == 2: # (, line 72 #", "Among(u\"\\u090F\\u0915\\u0940\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u093F\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u0947\\u0915\\u0940\",", "lab2() raise lab0() except lab2: pass self.cursor = self.limit -", "-1, 1), Among(u\"\\u0915\\u0948\", -1, 2), Among(u\"\\u0938\\u0901\\u0917\\u0948\", -1, 1), Among(u\"\\u092E\\u0948\", -1,", "It implements the stemming algorithm defined by a snowball script.", "# ], line 70 self.bra = self.cursor if among_var ==", "== 2: # (, line 72 # literal, line 72", "1), Among(u\"\\u0907\\u091B\", 1, 1), Among(u\"\\u090F\\u091B\", 1, 1), Among(u\"\\u093F\\u091B\", 1, 1),", "28, 1), Among(u\"\\u0947\\u091B\\u0941\", 28, 1), Among(u\"\\u0928\\u0947\\u091B\\u0941\", 30, 1), Among(u\"\\u0928\\u0941\", -1,", "while True: try: v_3 = self.limit - self.cursor try: #", "self.cursor try: # literal, line 71 if not self.eq_s_b(u\"\\u092F\\u094C\"): raise", "if not self.slice_del(): return False return True def _stem(self): #", "1), Among(u\"\\u093F\\u091B\", 1, 1), Among(u\"\\u0947\\u091B\", 1, 1), Among(u\"\\u0928\\u0947\\u091B\", 5, 1),", "1), Among(u\"\\u0907\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u093F\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u0947\\u0915\\u093E\", -1, 1),", "1), Among(u\"\\u092F\\u094B\", -1, 1), Among(u\"\\u0907\\u092F\\u094B\", 52, 1), Among(u\"\\u092D\\u092F\\u094B\", 52, 1),", "Among(u\"\\u0928\\u0947\\u091B\\u0938\\u094D\", 85, 1), Among(u\"\\u093F\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0938\\u094D\", 87, 1), Among(u\"\\u091B\\u0947\\u0938\\u094D\",", "except lab1: pass self.cursor = self.limit - v_1 # delete,", "86 # do, line 87 v_1 = self.limit - self.cursor", "line 89 # do, line 89 v_4 = self.limit -", "False elif among_var == 2: # (, line 59 #", "], line 64 self.bra = self.cursor return True def __r_remove_category_2(self):", "-1, -1), Among(u\"\\u0948\", -1, -1) ] a_2 = [ Among(u\"\\u0901\",", "This file was generated automatically by the Snowball to Python", "Among(u\"\\u0915\\u0940\", -1, 2), Among(u\"\\u0932\\u0947\", -1, 1), Among(u\"\\u0915\\u0948\", -1, 2), Among(u\"\\u0938\\u0901\\u0917\\u0948\",", "30, 1), Among(u\"\\u0928\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0942\", -1,", "-1, 1), Among(u\"\\u0926\\u0940\", -1, 1), Among(u\"\\u091B\\u0941\", -1, 1), Among(u\"\\u090F\\u091B\\u0941\", 28,", "2), Among(u\"\\u0938\\u0901\\u0917\\u0948\", -1, 1), Among(u\"\\u092E\\u0948\", -1, 1), Among(u\"\\u0915\\u094B\", -1, 2)", "Among(u\"\\u0901\", -1, -1), Among(u\"\\u0902\", -1, -1), Among(u\"\\u0948\", -1, -1) ]", "except lab5: pass self.cursor = self.limit - v_4 # call", "77 if self.find_among_b(NepaliStemmer.a_3) == 0: return False # ], line", "line 58 # delete, line 58 if not self.slice_del(): return", "Among(u\"\\u0901\", -1, 1), Among(u\"\\u0902\", -1, 1), Among(u\"\\u0948\", -1, 2) ]", "1), Among(u\"\\u0926\\u0947\\u0916\\u093F\", -1, 1), Among(u\"\\u092E\\u093E\\u0925\\u093F\", -1, 1), Among(u\"\\u090F\\u0915\\u0940\", -1, 1),", "raise lab0() except lab2: pass self.cursor = self.limit - v_1", "literal, line 72 if not self.eq_s_b(u\"\\u0924\\u094D\\u0930\"): return False # delete,", "14, 1), Among(u\"\\u0926\\u093E\", -1, 1), Among(u\"\\u0907\\u0926\\u093E\", 16, 1), Among(u\"\\u093F\\u0926\\u093E\", 16,", "raise lab1() except lab2: pass # (, line 59 raise", "# or, line 59 try: v_1 = self.limit - self.cursor", "58 # delete, line 58 if not self.slice_del(): return False", "stemming algorithm defined by a snowball script. ''' a_0 =", "try: v_2 = self.limit - self.cursor try: # literal, line", "# http://snowballstem.org/ from .basestemmer import BaseStemmer from .among import Among", "line 54 among_var = self.find_among_b(NepaliStemmer.a_0) if among_var == 0: return", "v_3 = self.limit - self.cursor try: # (, line 89", "lab2: pass # (, line 59 raise lab0() except lab1:", "- v_1 try: # literal, line 71 if not self.eq_s_b(u\"\\u091B\\u094C\"):", "Among(u\"\\u092F\\u094C\", -1, 1), Among(u\"\\u0925\\u093F\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u0925\\u094D\\u092F\\u094C\",", "1), Among(u\"\\u0907\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u090F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u093F\\u091B\\u0928\\u094D\", 69, 1),", "return True def _stem(self): # (, line 85 # backwards,", "Among(u\"\\u093F\\u091B\\u094C\", 59, 1), Among(u\"\\u0947\\u091B\\u094C\", 59, 1), Among(u\"\\u0928\\u0947\\u091B\\u094C\", 63, 1), Among(u\"\\u092F\\u094C\",", "72 if not self.slice_del(): return False return True def __r_remove_category_3(self):", "-1, 1), Among(u\"\\u092A\\u0930\\u094D\", -1, 1), Among(u\"\\u0907\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0907\\u0938\\u094D\", 79,", "Among(u\"\\u090F\\u091B\", 1, 1), Among(u\"\\u093F\\u091B\", 1, 1), Among(u\"\\u0947\\u091B\", 1, 1), Among(u\"\\u0928\\u0947\\u091B\",", "line 89 if not self.__r_check_category_2(): raise lab5() self.cursor = self.limit", "1), Among(u\"\\u0926\\u094D\\u0935\\u093E\\u0930\\u093E\", -1, 1), Among(u\"\\u0915\\u093F\", -1, 2), Among(u\"\\u092A\\u091B\\u093F\", -1, 1),", "except lab0: pass # delete, line 71 if not self.slice_del():", "line 71 if not self.eq_s_b(u\"\\u092F\\u094C\"): raise lab1() raise lab0() except", "v_1 = self.limit - self.cursor try: # (, line 59", "79, 1), Among(u\"\\u091B\\u0938\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u090F\\u091B\\u0938\\u094D\", 81,", "-1, 1), Among(u\"\\u0926\\u094D\\u0935\\u093E\\u0930\\u093E\", -1, 1), Among(u\"\\u0915\\u093F\", -1, 2), Among(u\"\\u092A\\u091B\\u093F\", -1,", "if not self.__r_check_category_2(): raise lab5() self.cursor = self.limit - v_5", "- v_2 self.cursor = self.limit_backward return True class lab0(BaseException): pass", "Among class NepaliStemmer(BaseStemmer): ''' This class was automatically generated by", "1), Among(u\"\\u0925\\u093F\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u0925\\u094D\\u092F\\u094C\", 65, 1),", "1), Among(u\"\\u091B\\u0938\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u090F\\u091B\\u0938\\u094D\", 81, 1),", "# ], line 77 self.bra = self.cursor # (, line", "# and, line 89 v_5 = self.limit - self.cursor #", "line 89 if not self.__r_remove_category_3(): raise lab4() raise lab3() except", "-1, 1), Among(u\"\\u0938\\u0902\\u0917\", -1, 1), Among(u\"\\u092E\\u093E\\u0930\\u094D\\u092B\\u0924\", -1, 1), Among(u\"\\u0930\\u0924\", -1,", "77 self.ket = self.cursor # substring, line 77 if self.find_among_b(NepaliStemmer.a_3)", "2) ] a_1 = [ Among(u\"\\u0901\", -1, -1), Among(u\"\\u0902\", -1,", "substring, line 64 if self.find_among_b(NepaliStemmer.a_1) == 0: return False #", "True def __r_remove_category_3(self): # (, line 76 # [, line", "self.cursor # substring, line 54 among_var = self.find_among_b(NepaliStemmer.a_0) if among_var", "55, 1), Among(u\"\\u0926\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0925\\u094D\\u092F\\u094B\", 52, 1), Among(u\"\\u091B\\u094C\", -1,", "# literal, line 59 if not self.eq_s_b(u\"\\u0947\"): raise lab1() except", "(, line 59 # or, line 59 try: v_1 =", "import Among class NepaliStemmer(BaseStemmer): ''' This class was automatically generated", "Among(u\"\\u0925\\u0947\", -1, 1), Among(u\"\\u0928\\u0947\", -1, 1), Among(u\"\\u090F\\u0915\\u0948\", -1, 1), Among(u\"\\u0947\\u0915\\u0948\",", "[ Among(u\"\\u0901\", -1, 1), Among(u\"\\u0902\", -1, 1), Among(u\"\\u0948\", -1, 2)", "# (, line 63 # [, line 64 self.ket =", "self.bra = self.cursor if among_var == 1: # (, line", "self.limit - self.cursor try: # (, line 88 # repeat,", "snowball script. ''' a_0 = [ Among(u\"\\u0932\\u093E\\u0907\", -1, 1), Among(u\"\\u0932\\u093E\\u0908\",", "-1, 1), Among(u\"\\u0902\", -1, 1), Among(u\"\\u0948\", -1, 2) ] a_3", "Among(u\"\\u0948\", -1, -1) ] a_2 = [ Among(u\"\\u0901\", -1, 1),", "def __r_remove_category_3(self): # (, line 76 # [, line 77", "59 if not self.eq_s_b(u\"\\u0947\"): raise lab1() except lab2: pass #", "True def _stem(self): # (, line 85 # backwards, line", "BaseStemmer from .among import Among class NepaliStemmer(BaseStemmer): ''' This class", "-1, 1), Among(u\"\\u0925\\u0940\", -1, 1), Among(u\"\\u0926\\u0940\", -1, 1), Among(u\"\\u091B\\u0941\", -1,", "elif among_var == 2: # (, line 72 # literal,", "except lab3: pass self.cursor = self.limit - v_1 # literal,", "1), Among(u\"\\u0947\\u091B\\u094C\", 59, 1), Among(u\"\\u0928\\u0947\\u091B\\u094C\", 63, 1), Among(u\"\\u092F\\u094C\", -1, 1),", "- v_1 try: # literal, line 71 if not self.eq_s_b(u\"\\u0928\\u094C\"):", "self.cursor try: # (, line 89 # do, line 89", "if among_var == 0: return False # ], line 70", "line 71 if not self.eq_s_b(u\"\\u0928\\u094C\"): raise lab3() raise lab0() except", "except lab2: pass self.cursor = self.limit - v_1 try: #", "-1, 1), Among(u\"\\u0925\\u093F\\u0907\\u0938\\u094D\", 79, 1), Among(u\"\\u091B\\u0938\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0938\\u094D\", 81,", "1), Among(u\"\\u0915\\u094B\", -1, 2) ] a_1 = [ Among(u\"\\u0901\", -1,", "self.cursor # call check_category_2, line 89 if not self.__r_check_category_2(): raise", "1), Among(u\"\\u0948\", -1, 2) ] a_3 = [ Among(u\"\\u0925\\u093F\\u090F\", -1,", "lab0() except lab2: pass self.cursor = self.limit - v_1 try:", "line 76 # [, line 77 self.ket = self.cursor #", "Among(u\"\\u0928\\u0947\\u091B\", 5, 1), Among(u\"\\u0939\\u0941\\u0928\\u0947\\u091B\", 6, 1), Among(u\"\\u0907\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u093F\\u0928\\u094D\\u091B\",", "== 0: return False # ], line 54 self.bra =", "-1, 1), Among(u\"\\u0907\\u091B\\u094C\", 59, 1), Among(u\"\\u090F\\u091B\\u094C\", 59, 1), Among(u\"\\u093F\\u091B\\u094C\", 59,", "self.slice_del(): return False elif among_var == 2: # (, line", "# (, line 59 # or, line 59 try: v_1", "self.ket = self.cursor # substring, line 54 among_var = self.find_among_b(NepaliStemmer.a_0)", "= self.limit - self.cursor try: # (, line 89 #", "1), Among(u\"\\u090F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u093F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0947\\u091B\\u0938\\u094D\", 81, 1),", "return False elif among_var == 2: # (, line 72", "1), Among(u\"\\u0925\\u093F\\u0907\\u0938\\u094D\", 79, 1), Among(u\"\\u091B\\u0938\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0938\\u094D\", 81, 1),", "Among(u\"\\u091B\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u0925\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u0928\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0928\\u094D\",", "pass self.cursor = self.limit - v_2 # literal, line 59", "1), Among(u\"\\u093F\\u092F\\u094B\", 52, 1), Among(u\"\\u0925\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0926\\u093F\\u092F\\u094B\", 55, 1),", "Among(u\"\\u0947\\u0915\\u093E\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u093E\", 14, 1), Among(u\"\\u0926\\u093E\", -1, 1), Among(u\"\\u0907\\u0926\\u093E\",", "return False return True def _stem(self): # (, line 85", "v_1 # delete, line 59 if not self.slice_del(): return False", "-1, 1), Among(u\"\\u0915\\u093E\", -1, 2), Among(u\"\\u092E\\u093E\", -1, 1), Among(u\"\\u0926\\u094D\\u0935\\u093E\\u0930\\u093E\", -1,", "self.limit_backward return True class lab0(BaseException): pass class lab1(BaseException): pass class", "literal, line 59 if not self.eq_s_b(u\"\\u090F\"): raise lab3() raise lab2()", "1), Among(u\"\\u092E\\u093E\\u0930\\u094D\\u092B\\u0924\", -1, 1), Among(u\"\\u0930\\u0924\", -1, 1), Among(u\"\\u0915\\u093E\", -1, 2),", "not self.__r_remove_category_2(): raise lab5() except lab5: pass self.cursor = self.limit", "Among(u\"\\u093F\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u0947\\u0915\\u094B\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u094B\", 47, 1), Among(u\"\\u0926\\u094B\",", "52, 1), Among(u\"\\u092D\\u092F\\u094B\", 52, 1), Among(u\"\\u093F\\u092F\\u094B\", 52, 1), Among(u\"\\u0925\\u093F\\u092F\\u094B\", 55,", "72 if not self.eq_s_b(u\"\\u0924\\u094D\\u0930\"): return False # delete, line 72", "return False # ], line 77 self.bra = self.cursor #", "line 79 # delete, line 79 if not self.slice_del(): return", "self.cursor = self.limit - v_1 # literal, line 71 if", "= self.limit - self.cursor try: # (, line 59 #", "# [, line 77 self.ket = self.cursor # substring, line", "Among(u\"\\u093F\\u0926\\u094B\", 49, 1), Among(u\"\\u092F\\u094B\", -1, 1), Among(u\"\\u0907\\u092F\\u094B\", 52, 1), Among(u\"\\u092D\\u092F\\u094B\",", "Among(u\"\\u0938\\u0902\\u0917\", -1, 1), Among(u\"\\u092E\\u093E\\u0930\\u094D\\u092B\\u0924\", -1, 1), Among(u\"\\u0930\\u0924\", -1, 1), Among(u\"\\u0915\\u093E\",", "55, 1), Among(u\"\\u0925\\u094D\\u092F\\u094B\", 52, 1), Among(u\"\\u091B\\u094C\", -1, 1), Among(u\"\\u0907\\u091B\\u094C\", 59,", "54 among_var = self.find_among_b(NepaliStemmer.a_0) if among_var == 0: return False", "except lab4: pass self.cursor = self.limit - v_3 raise lab2()", "self.cursor try: # literal, line 59 if not self.eq_s_b(u\"\\u090F\"): raise", "self.eq_s_b(u\"\\u0928\\u094C\"): raise lab3() raise lab0() except lab3: pass self.cursor =", "54 self.ket = self.cursor # substring, line 54 among_var =", "self.limit - self.cursor try: # (, line 89 # and,", "class lab1(BaseException): pass class lab2(BaseException): pass class lab3(BaseException): pass class", "# backwards, line 86 self.limit_backward = self.cursor self.cursor = self.limit", "85, 1), Among(u\"\\u093F\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0938\\u094D\", 87, 1), Among(u\"\\u091B\\u0947\\u0938\\u094D\", -1,", "-1, 1), Among(u\"\\u0939\\u0930\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0942\", -1, 1), Among(u\"\\u091B\\u0947\", -1,", "Among(u\"\\u093F\\u0926\\u093E\", 16, 1), Among(u\"\\u0926\\u0947\\u0916\\u093F\", -1, 1), Among(u\"\\u092E\\u093E\\u0925\\u093F\", -1, 1), Among(u\"\\u090F\\u0915\\u0940\",", "lab3() except lab4: pass self.cursor = self.limit - v_3 raise", "return False except lab0: pass # delete, line 71 if", "1), Among(u\"\\u0939\\u094B\\u0938\\u094D\", -1, 1) ] def __r_remove_category_1(self): # (, line", "line 71 try: v_1 = self.limit - self.cursor try: #", "Among(u\"\\u092E\\u093E\\u0925\\u093F\", -1, 1), Among(u\"\\u090F\\u0915\\u0940\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u093F\\u090F\\u0915\\u0940\",", "1), Among(u\"\\u0907\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0907\\u0938\\u094D\", 79, 1), Among(u\"\\u091B\\u0938\\u094D\", -1, 1),", "not self.eq_s_b(u\"\\u091B\\u094C\"): raise lab2() raise lab0() except lab2: pass self.cursor", "1), Among(u\"\\u092A\\u0930\\u094D\", -1, 1), Among(u\"\\u0907\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0907\\u0938\\u094D\", 79, 1),", "line 59 # or, line 59 try: v_1 = self.limit", "backwards, line 86 self.limit_backward = self.cursor self.cursor = self.limit #", "1), Among(u\"\\u092E\\u0948\", -1, 1), Among(u\"\\u0915\\u094B\", -1, 2) ] a_1 =", "Among(u\"\\u0925\\u093F\\u090F\", -1, 1), Among(u\"\\u091B\", -1, 1), Among(u\"\\u0907\\u091B\", 1, 1), Among(u\"\\u090F\\u091B\",", "1), Among(u\"\\u0926\\u0947\\u0916\\u0940\", -1, 1), Among(u\"\\u0925\\u0940\", -1, 1), Among(u\"\\u0926\\u0940\", -1, 1),", ".among import Among class NepaliStemmer(BaseStemmer): ''' This class was automatically", "# substring, line 64 if self.find_among_b(NepaliStemmer.a_1) == 0: return False", "lab0: pass # delete, line 71 if not self.slice_del(): return", "-1, 1), Among(u\"\\u0925\\u093F\\u0938\\u094D\", 87, 1), Among(u\"\\u091B\\u0947\\u0938\\u094D\", -1, 1), Among(u\"\\u0939\\u094B\\u0938\\u094D\", -1,", "1), Among(u\"\\u0915\\u093E\", -1, 2), Among(u\"\\u092E\\u093E\", -1, 1), Among(u\"\\u0926\\u094D\\u0935\\u093E\\u0930\\u093E\", -1, 1),", "49, 1), Among(u\"\\u092F\\u094B\", -1, 1), Among(u\"\\u0907\\u092F\\u094B\", 52, 1), Among(u\"\\u092D\\u092F\\u094B\", 52,", "# (, line 85 # backwards, line 86 self.limit_backward =", "(, line 86 # do, line 87 v_1 = self.limit", "[, line 54 self.ket = self.cursor # substring, line 54", "= self.cursor self.cursor = self.limit # (, line 86 #", "self.eq_s_b(u\"\\u0924\\u094D\\u0930\"): return False # delete, line 72 if not self.slice_del():", "Among(u\"\\u0928\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0942\", -1, 1), Among(u\"\\u091B\\u0947\",", "lab0: pass self.cursor = self.limit - v_1 # do, line", "substring, line 54 among_var = self.find_among_b(NepaliStemmer.a_0) if among_var == 0:", "substring, line 77 if self.find_among_b(NepaliStemmer.a_3) == 0: return False #", "literal, line 59 if not self.eq_s_b(u\"\\u0947\"): raise lab1() except lab2:", "(, line 72 # literal, line 72 if not self.eq_s_b(u\"\\u0924\\u094D\\u0930\"):", "-1, 2), Among(u\"\\u092A\\u091B\\u093F\", -1, 1), Among(u\"\\u0915\\u0940\", -1, 2), Among(u\"\\u0932\\u0947\", -1,", "] def __r_remove_category_1(self): # (, line 53 # [, line", "line 71 if not self.slice_del(): return False elif among_var ==", "if not self.slice_del(): return False return True def __r_remove_category_3(self): #", "Among(u\"\\u092E\\u0948\", -1, 1), Among(u\"\\u0915\\u094B\", -1, 2) ] a_1 = [", "v_3 raise lab2() except lab3: pass except lab2: pass except", "self.eq_s_b(u\"\\u0925\\u0947\"): return False except lab0: pass # delete, line 71", "1), Among(u\"\\u0938\\u0901\\u0917\", -1, 1), Among(u\"\\u0938\\u0902\\u0917\", -1, 1), Among(u\"\\u092E\\u093E\\u0930\\u094D\\u092B\\u0924\", -1, 1),", "check_category_2, line 89 if not self.__r_check_category_2(): raise lab5() self.cursor =", "89 if not self.__r_remove_category_2(): raise lab5() except lab5: pass self.cursor", "Among(u\"\\u0928\\u0947\\u091B\\u0941\", 30, 1), Among(u\"\\u0928\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0942\",", "self.cursor = self.limit - v_5 # call remove_category_2, line 89", "Among(u\"\\u091B\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u092A\\u0930\\u094D\", -1, 1), Among(u\"\\u0907\\u0938\\u094D\",", "= self.limit - v_1 # do, line 88 v_2 =", "Among(u\"\\u0907\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u090F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u093F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0947\\u091B\\u0928\\u094D\",", "== 0: return False # ], line 70 self.bra =", "Among(u\"\\u093F\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u0947\\u0915\\u093E\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u093E\", 14, 1), Among(u\"\\u0926\\u093E\",", "Among(u\"\\u092A\\u0930\\u094D\", -1, 1), Among(u\"\\u0907\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0907\\u0938\\u094D\", 79, 1), Among(u\"\\u091B\\u0938\\u094D\",", "lab5: pass self.cursor = self.limit - v_4 # call remove_category_3,", "1), Among(u\"\\u090F\\u091B\\u0941\", 28, 1), Among(u\"\\u0947\\u091B\\u0941\", 28, 1), Among(u\"\\u0928\\u0947\\u091B\\u0941\", 30, 1),", "79 if not self.slice_del(): return False return True def _stem(self):", "to Python compiler # http://snowballstem.org/ from .basestemmer import BaseStemmer from", "-1, 2), Among(u\"\\u0932\\u0947\", -1, 1), Among(u\"\\u0915\\u0948\", -1, 2), Among(u\"\\u0938\\u0901\\u0917\\u0948\", -1,", "1), Among(u\"\\u0939\\u0930\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0942\", -1, 1), Among(u\"\\u091B\\u0947\", -1, 1),", "1), Among(u\"\\u0907\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u093F\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u0939\\u0941\\u0928\\u094D\\u091B\", 1, 1),", "Among(u\"\\u0939\\u0930\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0942\", -1, 1), Among(u\"\\u091B\\u0947\", -1, 1), Among(u\"\\u0925\\u0947\",", "] a_2 = [ Among(u\"\\u0901\", -1, 1), Among(u\"\\u0902\", -1, 1),", "delete, line 59 if not self.slice_del(): return False except lab0:", "self.cursor return True def __r_remove_category_2(self): # (, line 69 #", "69, 1), Among(u\"\\u0947\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0928\\u0947\\u091B\\u0928\\u094D\", 73, 1), Among(u\"\\u0932\\u093E\\u0928\\u094D\", -1,", "69, 1), Among(u\"\\u090F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u093F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0947\\u091B\\u0928\\u094D\", 69,", "Among(u\"\\u0915\\u093E\", -1, 2), Among(u\"\\u092E\\u093E\", -1, 1), Among(u\"\\u0926\\u094D\\u0935\\u093E\\u0930\\u093E\", -1, 1), Among(u\"\\u0915\\u093F\",", "-1, 1) ] def __r_remove_category_1(self): # (, line 53 #", "1), Among(u\"\\u093F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0947\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0928\\u0947\\u091B\\u0938\\u094D\", 85, 1),", "1), Among(u\"\\u0907\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u090F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u093F\\u091B\\u0938\\u094D\", 81, 1),", "v_1 try: # literal, line 71 if not self.eq_s_b(u\"\\u0928\\u094C\"): raise", "was generated automatically by the Snowball to Python compiler #", "- self.cursor try: # (, line 89 # do, line", "self.cursor = self.limit # (, line 86 # do, line", "65, 1), Among(u\"\\u091B\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u0925\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u0928\\u094D\", -1,", "= [ Among(u\"\\u0925\\u093F\\u090F\", -1, 1), Among(u\"\\u091B\", -1, 1), Among(u\"\\u0907\\u091B\", 1,", "among_var == 0: return False # ], line 70 self.bra", "generated automatically by the Snowball to Python compiler # http://snowballstem.org/", "self.slice_del(): return False return True def __r_remove_category_3(self): # (, line", "1), Among(u\"\\u090F\\u0915\\u094B\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u093F\\u090F\\u0915\\u094B\", 44, 1),", "def __r_remove_category_2(self): # (, line 69 # [, line 70", "not self.eq_s_b(u\"\\u0947\"): raise lab1() except lab2: pass # (, line", "86 self.limit_backward = self.cursor self.cursor = self.limit # (, line", "-1, 1), Among(u\"\\u0907\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u090F\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u093F\\u091B\\u0928\\u094D\", 69,", "Among(u\"\\u0947\\u091B\\u0941\", 28, 1), Among(u\"\\u0928\\u0947\\u091B\\u0941\", 30, 1), Among(u\"\\u0928\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0941\",", "Among(u\"\\u090F\\u0915\\u094B\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u093F\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u0947\\u0915\\u094B\",", "not self.eq_s_b(u\"\\u0925\\u0947\"): return False except lab0: pass # delete, line", "16, 1), Among(u\"\\u0926\\u0947\\u0916\\u093F\", -1, 1), Among(u\"\\u092E\\u093E\\u0925\\u093F\", -1, 1), Among(u\"\\u090F\\u0915\\u0940\", -1,", "except lab2: pass except lab1: pass self.cursor = self.limit -", "line 59 if not self.eq_s_b(u\"\\u090F\"): raise lab3() raise lab2() except", "1), Among(u\"\\u0925\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u0928\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0928\\u094D\", 69, 1),", "= self.find_among_b(NepaliStemmer.a_0) if among_var == 0: return False # ],", "= self.cursor # substring, line 70 among_var = self.find_among_b(NepaliStemmer.a_2) if", "87 v_1 = self.limit - self.cursor try: # call remove_category_1,", "# [, line 70 self.ket = self.cursor # substring, line", "pass self.cursor = self.limit - v_1 # literal, line 71", "try: # literal, line 71 if not self.eq_s_b(u\"\\u091B\\u094C\"): raise lab2()", "literal, line 71 if not self.eq_s_b(u\"\\u0925\\u0947\"): return False except lab0:", "remove_category_1, line 87 if not self.__r_remove_category_1(): raise lab0() except lab0:", "-1, 1), Among(u\"\\u0907\\u0926\\u094B\", 49, 1), Among(u\"\\u093F\\u0926\\u094B\", 49, 1), Among(u\"\\u092F\\u094B\", -1,", "or, line 59 try: v_2 = self.limit - self.cursor try:", "if not self.eq_s_b(u\"\\u090F\"): raise lab3() raise lab2() except lab3: pass", "return True def __r_check_category_2(self): # (, line 63 # [,", "v_4 = self.limit - self.cursor try: # (, line 89", "- self.cursor # call check_category_2, line 89 if not self.__r_check_category_2():", "remove_category_2, line 89 if not self.__r_remove_category_2(): raise lab5() except lab5:", "= [ Among(u\"\\u0901\", -1, -1), Among(u\"\\u0902\", -1, -1), Among(u\"\\u0948\", -1,", "1), Among(u\"\\u0947\\u0915\\u0948\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u0948\", 39, 1), Among(u\"\\u0926\\u0948\", -1, 1),", "call remove_category_2, line 89 if not self.__r_remove_category_2(): raise lab5() except", "return False # delete, line 72 if not self.slice_del(): return", "self.__r_remove_category_2(): raise lab5() except lab5: pass self.cursor = self.limit -", "1), Among(u\"\\u0925\\u094D\\u092F\\u094B\", 52, 1), Among(u\"\\u091B\\u094C\", -1, 1), Among(u\"\\u0907\\u091B\\u094C\", 59, 1),", "except lab3: pass except lab2: pass except lab1: pass self.cursor", "1), Among(u\"\\u0932\\u093E\\u0908\", -1, 1), Among(u\"\\u0938\\u0901\\u0917\", -1, 1), Among(u\"\\u0938\\u0902\\u0917\", -1, 1),", "53 # [, line 54 self.ket = self.cursor # substring,", "Among(u\"\\u0926\\u0948\", -1, 1), Among(u\"\\u0907\\u0926\\u0948\", 41, 1), Among(u\"\\u093F\\u0926\\u0948\", 41, 1), Among(u\"\\u090F\\u0915\\u094B\",", "v_1 = self.limit - self.cursor try: # literal, line 71", "== 0: return False # ], line 77 self.bra =", "line 89 v_5 = self.limit - self.cursor # call check_category_2,", "self.eq_s_b(u\"\\u0947\"): raise lab1() except lab2: pass # (, line 59", "self.cursor = self.limit - v_4 # call remove_category_3, line 89", "Among(u\"\\u0907\\u0926\\u094B\", 49, 1), Among(u\"\\u093F\\u0926\\u094B\", 49, 1), Among(u\"\\u092F\\u094B\", -1, 1), Among(u\"\\u0907\\u092F\\u094B\",", "self.__r_remove_category_3(): raise lab4() raise lab3() except lab4: pass self.cursor =", "Among(u\"\\u0932\\u093E\\u0908\", -1, 1), Among(u\"\\u0938\\u0901\\u0917\", -1, 1), Among(u\"\\u0938\\u0902\\u0917\", -1, 1), Among(u\"\\u092E\\u093E\\u0930\\u094D\\u092B\\u0924\",", "except lab1: pass self.cursor = self.limit - v_2 self.cursor =", "(, line 63 # [, line 64 self.ket = self.cursor", "1), Among(u\"\\u0928\\u0947\", -1, 1), Among(u\"\\u090F\\u0915\\u0948\", -1, 1), Among(u\"\\u0947\\u0915\\u0948\", -1, 1),", "70 self.bra = self.cursor if among_var == 1: # (,", "not self.__r_remove_category_3(): raise lab4() raise lab3() except lab4: pass self.cursor", "59, 1), Among(u\"\\u093F\\u091B\\u094C\", 59, 1), Among(u\"\\u0947\\u091B\\u094C\", 59, 1), Among(u\"\\u0928\\u0947\\u091B\\u094C\", 63,", "lab2() except lab3: pass except lab2: pass except lab1: pass", "Among(u\"\\u0902\", -1, -1), Among(u\"\\u0948\", -1, -1) ] a_2 = [", "call check_category_2, line 89 if not self.__r_check_category_2(): raise lab5() self.cursor", "= self.cursor # substring, line 54 among_var = self.find_among_b(NepaliStemmer.a_0) if", "# literal, line 71 if not self.eq_s_b(u\"\\u0928\\u094C\"): raise lab3() raise", "the stemming algorithm defined by a snowball script. ''' a_0", "1), Among(u\"\\u0925\\u0947\", -1, 1), Among(u\"\\u0928\\u0947\", -1, 1), Among(u\"\\u090F\\u0915\\u0948\", -1, 1),", "1), Among(u\"\\u0928\\u0947\\u0915\\u094B\", 47, 1), Among(u\"\\u0926\\u094B\", -1, 1), Among(u\"\\u0907\\u0926\\u094B\", 49, 1),", "81, 1), Among(u\"\\u0947\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0928\\u0947\\u091B\\u0938\\u094D\", 85, 1), Among(u\"\\u093F\\u0938\\u094D\", -1,", "-1, 2) ] a_1 = [ Among(u\"\\u0901\", -1, -1), Among(u\"\\u0902\",", "False return True def _stem(self): # (, line 85 #", "-1, 1), Among(u\"\\u091B\\u0941\", -1, 1), Among(u\"\\u090F\\u091B\\u0941\", 28, 1), Among(u\"\\u0947\\u091B\\u0941\", 28,", "-1, 1), Among(u\"\\u0932\\u093E\\u0908\", -1, 1), Among(u\"\\u0938\\u0901\\u0917\", -1, 1), Among(u\"\\u0938\\u0902\\u0917\", -1,", "-1, 1), Among(u\"\\u0930\\u0924\", -1, 1), Among(u\"\\u0915\\u093E\", -1, 2), Among(u\"\\u092E\\u093E\", -1,", "1), Among(u\"\\u0915\\u0940\", -1, 2), Among(u\"\\u0932\\u0947\", -1, 1), Among(u\"\\u0915\\u0948\", -1, 2),", "-1, 1), Among(u\"\\u0938\\u0901\\u0917\", -1, 1), Among(u\"\\u0938\\u0902\\u0917\", -1, 1), Among(u\"\\u092E\\u093E\\u0930\\u094D\\u092B\\u0924\", -1,", "- v_1 # literal, line 71 if not self.eq_s_b(u\"\\u0925\\u0947\"): return", "Among(u\"\\u0925\\u093F\\u0907\\u0938\\u094D\", 79, 1), Among(u\"\\u091B\\u0938\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u090F\\u091B\\u0938\\u094D\",", "# delete, line 59 if not self.slice_del(): return False except", "self.cursor = self.limit - v_1 # delete, line 59 if", "(, line 79 # delete, line 79 if not self.slice_del():", "# literal, line 71 if not self.eq_s_b(u\"\\u0925\\u0947\"): return False except", "''' a_0 = [ Among(u\"\\u0932\\u093E\\u0907\", -1, 1), Among(u\"\\u0932\\u093E\\u0908\", -1, 1),", "Among(u\"\\u0932\\u0947\", -1, 1), Among(u\"\\u0915\\u0948\", -1, 2), Among(u\"\\u0938\\u0901\\u0917\\u0948\", -1, 1), Among(u\"\\u092E\\u0948\",", "lab2() except lab3: pass self.cursor = self.limit - v_2 #", "not self.eq_s_b(u\"\\u092F\\u094C\"): raise lab1() raise lab0() except lab1: pass self.cursor", "except lab1: pass self.cursor = self.limit - v_1 try: #", "False # ], line 77 self.bra = self.cursor # (,", "do, line 88 v_2 = self.limit - self.cursor try: #", "-1, 1), Among(u\"\\u0907\\u0926\\u0948\", 41, 1), Among(u\"\\u093F\\u0926\\u0948\", 41, 1), Among(u\"\\u090F\\u0915\\u094B\", -1,", "not self.slice_del(): return False except lab0: pass return True def", "# call remove_category_1, line 87 if not self.__r_remove_category_1(): raise lab0()", "Among(u\"\\u0907\\u092F\\u094B\", 52, 1), Among(u\"\\u092D\\u092F\\u094B\", 52, 1), Among(u\"\\u093F\\u092F\\u094B\", 52, 1), Among(u\"\\u0925\\u093F\\u092F\\u094B\",", "# delete, line 58 if not self.slice_del(): return False elif", "Among(u\"\\u0932\\u093E\\u0928\\u094D\", -1, 1), Among(u\"\\u091B\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u092A\\u0930\\u094D\",", "71 if not self.eq_s_b(u\"\\u0928\\u094C\"): raise lab3() raise lab0() except lab3:", "self.ket = self.cursor # substring, line 64 if self.find_among_b(NepaliStemmer.a_1) ==", "1), Among(u\"\\u093F\\u0926\\u093E\", 16, 1), Among(u\"\\u0926\\u0947\\u0916\\u093F\", -1, 1), Among(u\"\\u092E\\u093E\\u0925\\u093F\", -1, 1),", "1), Among(u\"\\u0939\\u0930\\u0942\", -1, 1), Among(u\"\\u091B\\u0947\", -1, 1), Among(u\"\\u0925\\u0947\", -1, 1),", "59 # or, line 59 try: v_2 = self.limit -", "except lab2: pass # (, line 59 raise lab0() except", "Among(u\"\\u0915\\u0948\", -1, 2), Among(u\"\\u0938\\u0901\\u0917\\u0948\", -1, 1), Among(u\"\\u092E\\u0948\", -1, 1), Among(u\"\\u0915\\u094B\",", "1), Among(u\"\\u0928\\u0947\\u091B\\u0928\\u094D\", 73, 1), Among(u\"\\u0932\\u093E\\u0928\\u094D\", -1, 1), Among(u\"\\u091B\\u093F\\u0928\\u094D\", -1, 1),", "1, 1), Among(u\"\\u0939\\u0941\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u090F\\u0915\\u093E\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u093E\", 11,", "81, 1), Among(u\"\\u090F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u093F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0947\\u091B\\u0938\\u094D\", 81,", "False # ], line 64 self.bra = self.cursor return True", "implements the stemming algorithm defined by a snowball script. '''", "1), Among(u\"\\u0907\\u092F\\u094B\", 52, 1), Among(u\"\\u092D\\u092F\\u094B\", 52, 1), Among(u\"\\u093F\\u092F\\u094B\", 52, 1),", "28, 1), Among(u\"\\u0928\\u0947\\u091B\\u0941\", 30, 1), Among(u\"\\u0928\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0941\", -1,", "Among(u\"\\u0928\\u0947\\u0915\\u093E\", 14, 1), Among(u\"\\u0926\\u093E\", -1, 1), Among(u\"\\u0907\\u0926\\u093E\", 16, 1), Among(u\"\\u093F\\u0926\\u093E\",", "1), Among(u\"\\u090F\\u0915\\u0940\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u093F\\u090F\\u0915\\u0940\", 21, 1),", "69, 1), Among(u\"\\u0928\\u0947\\u091B\\u0928\\u094D\", 73, 1), Among(u\"\\u0932\\u093E\\u0928\\u094D\", -1, 1), Among(u\"\\u091B\\u093F\\u0928\\u094D\", -1,", "1), Among(u\"\\u0928\\u0947\\u091B\\u094C\", 63, 1), Among(u\"\\u092F\\u094C\", -1, 1), Among(u\"\\u0925\\u093F\\u092F\\u094C\", 65, 1),", "58 if not self.slice_del(): return False elif among_var == 2:", "Among(u\"\\u0939\\u094B\\u0938\\u094D\", -1, 1) ] def __r_remove_category_1(self): # (, line 53", "pass self.cursor = self.limit - v_3 raise lab2() except lab3:", "# do, line 88 v_2 = self.limit - self.cursor try:", "return False # ], line 70 self.bra = self.cursor if", "a_1 = [ Among(u\"\\u0901\", -1, -1), Among(u\"\\u0902\", -1, -1), Among(u\"\\u0948\",", "if among_var == 0: return False # ], line 54", "self.limit - v_1 # delete, line 59 if not self.slice_del():", "self.bra = self.cursor return True def __r_remove_category_2(self): # (, line", "v_1 try: # literal, line 71 if not self.eq_s_b(u\"\\u091B\\u094C\"): raise", "(, line 89 # and, line 89 v_5 = self.limit", "-1, 1), Among(u\"\\u091B\\u0947\", -1, 1), Among(u\"\\u0925\\u0947\", -1, 1), Among(u\"\\u0928\\u0947\", -1,", "1), Among(u\"\\u091B\\u0941\", -1, 1), Among(u\"\\u090F\\u091B\\u0941\", 28, 1), Among(u\"\\u0947\\u091B\\u0941\", 28, 1),", "[ Among(u\"\\u0901\", -1, -1), Among(u\"\\u0902\", -1, -1), Among(u\"\\u0948\", -1, -1)", "raise lab4() raise lab3() except lab4: pass self.cursor = self.limit", "70 among_var = self.find_among_b(NepaliStemmer.a_2) if among_var == 0: return False", "-1, 1), Among(u\"\\u0915\\u094B\", -1, 2) ] a_1 = [ Among(u\"\\u0901\",", "Among(u\"\\u091B\\u094C\", -1, 1), Among(u\"\\u0907\\u091B\\u094C\", 59, 1), Among(u\"\\u090F\\u091B\\u094C\", 59, 1), Among(u\"\\u093F\\u091B\\u094C\",", "lab1() except lab2: pass # (, line 59 raise lab0()", "-1, 1), Among(u\"\\u092E\\u0948\", -1, 1), Among(u\"\\u0915\\u094B\", -1, 2) ] a_1", "Among(u\"\\u0902\", -1, 1), Among(u\"\\u0948\", -1, 2) ] a_3 = [", "lab0() except lab1: pass self.cursor = self.limit - v_1 #", "__r_remove_category_3(self): # (, line 76 # [, line 77 self.ket", "# substring, line 77 if self.find_among_b(NepaliStemmer.a_3) == 0: return False", "# (, line 59 # or, line 59 try: v_2", "87 if not self.__r_remove_category_1(): raise lab0() except lab0: pass self.cursor", "compiler # http://snowballstem.org/ from .basestemmer import BaseStemmer from .among import", "return False elif among_var == 2: # (, line 59", "Among(u\"\\u0907\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0907\\u0938\\u094D\", 79, 1), Among(u\"\\u091B\\u0938\\u094D\", -1, 1), Among(u\"\\u0907\\u091B\\u0938\\u094D\",", "1), Among(u\"\\u092D\\u092F\\u094B\", 52, 1), Among(u\"\\u093F\\u092F\\u094B\", 52, 1), Among(u\"\\u0925\\u093F\\u092F\\u094B\", 55, 1),", "do, line 89 v_4 = self.limit - self.cursor try: #", "1), Among(u\"\\u093F\\u0926\\u094B\", 49, 1), Among(u\"\\u092F\\u094B\", -1, 1), Among(u\"\\u0907\\u092F\\u094B\", 52, 1),", "52, 1), Among(u\"\\u091B\\u094C\", -1, 1), Among(u\"\\u0907\\u091B\\u094C\", 59, 1), Among(u\"\\u090F\\u091B\\u094C\", 59,", "# (, line 76 # [, line 77 self.ket =", "False # ], line 54 self.bra = self.cursor if among_var", "v_2 # literal, line 59 if not self.eq_s_b(u\"\\u0947\"): raise lab1()", "[ Among(u\"\\u0932\\u093E\\u0907\", -1, 1), Among(u\"\\u0932\\u093E\\u0908\", -1, 1), Among(u\"\\u0938\\u0901\\u0917\", -1, 1),", "delete, line 71 if not self.slice_del(): return False elif among_var", "Among(u\"\\u0925\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u092A\\u0930\\u094D\", -1, 1), Among(u\"\\u0907\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0907\\u0938\\u094D\",", "- v_5 # call remove_category_2, line 89 if not self.__r_remove_category_2():", "- self.cursor try: # call remove_category_1, line 87 if not", "21, 1), Among(u\"\\u093F\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u0947\\u0915\\u0940\", -1, 1), Among(u\"\\u0926\\u0947\\u0916\\u0940\", -1,", "Among(u\"\\u0947\\u0915\\u0948\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u0948\", 39, 1), Among(u\"\\u0926\\u0948\", -1, 1), Among(u\"\\u0907\\u0926\\u0948\",", "(, line 69 # [, line 70 self.ket = self.cursor", "= self.cursor # substring, line 77 if self.find_among_b(NepaliStemmer.a_3) == 0:", "line 53 # [, line 54 self.ket = self.cursor #", "1), Among(u\"\\u0939\\u0941\\u0928\\u0947\\u091B\", 6, 1), Among(u\"\\u0907\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u093F\\u0928\\u094D\\u091B\", 1, 1),", "Among(u\"\\u0928\\u0947\", -1, 1), Among(u\"\\u090F\\u0915\\u0948\", -1, 1), Among(u\"\\u0947\\u0915\\u0948\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u0948\",", "-1, 1), Among(u\"\\u0925\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u092A\\u0930\\u094D\", -1, 1), Among(u\"\\u0907\\u0938\\u094D\", -1,", "52, 1), Among(u\"\\u0925\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0926\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0925\\u094D\\u092F\\u094B\", 52,", "-1, 1), Among(u\"\\u0939\\u094B\\u0938\\u094D\", -1, 1) ] def __r_remove_category_1(self): # (,", "self.limit - v_1 try: # literal, line 71 if not", "- self.cursor try: # literal, line 59 if not self.eq_s_b(u\"\\u090F\"):", "# (, line 72 # literal, line 72 if not", "# (, line 59 raise lab0() except lab1: pass self.cursor", "== 1: # (, line 58 # delete, line 58", "# call remove_category_2, line 89 if not self.__r_remove_category_2(): raise lab5()", "] a_1 = [ Among(u\"\\u0901\", -1, -1), Among(u\"\\u0902\", -1, -1),", "59, 1), Among(u\"\\u0928\\u0947\\u091B\\u094C\", 63, 1), Among(u\"\\u092F\\u094C\", -1, 1), Among(u\"\\u0925\\u093F\\u092F\\u094C\", 65,", "[, line 70 self.ket = self.cursor # substring, line 70", "Among(u\"\\u092E\\u093E\\u0930\\u094D\\u092B\\u0924\", -1, 1), Among(u\"\\u0930\\u0924\", -1, 1), Among(u\"\\u0915\\u093E\", -1, 2), Among(u\"\\u092E\\u093E\",", "Among(u\"\\u0928\\u0947\\u091B\\u094C\", 63, 1), Among(u\"\\u092F\\u094C\", -1, 1), Among(u\"\\u0925\\u093F\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u094D\\u092F\\u094C\",", "def __r_check_category_2(self): # (, line 63 # [, line 64", "self.find_among_b(NepaliStemmer.a_2) if among_var == 0: return False # ], line", "raise lab0() except lab3: pass self.cursor = self.limit - v_1", "81, 1), Among(u\"\\u0928\\u0947\\u091B\\u0938\\u094D\", 85, 1), Among(u\"\\u093F\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0938\\u094D\", 87,", "self.limit - v_5 # call remove_category_2, line 89 if not", "pass class lab3(BaseException): pass class lab4(BaseException): pass class lab5(BaseException): pass", "self.find_among_b(NepaliStemmer.a_1) == 0: return False # ], line 64 self.bra", "self.cursor = self.limit_backward return True class lab0(BaseException): pass class lab1(BaseException):", "Among(u\"\\u093F\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u0947\\u0915\\u0940\", -1, 1), Among(u\"\\u0926\\u0947\\u0916\\u0940\", -1, 1), Among(u\"\\u0925\\u0940\",", "if not self.eq_s_b(u\"\\u0924\\u094D\\u0930\"): return False # delete, line 72 if", "Among(u\"\\u092F\\u094B\", -1, 1), Among(u\"\\u0907\\u092F\\u094B\", 52, 1), Among(u\"\\u092D\\u092F\\u094B\", 52, 1), Among(u\"\\u093F\\u092F\\u094B\",", "Among(u\"\\u0926\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0925\\u094D\\u092F\\u094B\", 52, 1), Among(u\"\\u091B\\u094C\", -1, 1), Among(u\"\\u0907\\u091B\\u094C\",", "1), Among(u\"\\u0928\\u0947\\u091B\\u0938\\u094D\", 85, 1), Among(u\"\\u093F\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0938\\u094D\", 87, 1),", "1), Among(u\"\\u0925\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0926\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0925\\u094D\\u092F\\u094B\", 52, 1),", "71 try: v_1 = self.limit - self.cursor try: # literal,", "except lab0: pass return True def __r_check_category_2(self): # (, line", "Among(u\"\\u0926\\u094D\\u0935\\u093E\\u0930\\u093E\", -1, 1), Among(u\"\\u0915\\u093F\", -1, 2), Among(u\"\\u092A\\u091B\\u093F\", -1, 1), Among(u\"\\u0915\\u0940\",", "64 self.bra = self.cursor return True def __r_remove_category_2(self): # (,", "v_5 # call remove_category_2, line 89 if not self.__r_remove_category_2(): raise", "class lab2(BaseException): pass class lab3(BaseException): pass class lab4(BaseException): pass class", "line 87 if not self.__r_remove_category_1(): raise lab0() except lab0: pass", "v_2 = self.limit - self.cursor try: # literal, line 59", "# literal, line 71 if not self.eq_s_b(u\"\\u091B\\u094C\"): raise lab2() raise", "raise lab5() self.cursor = self.limit - v_5 # call remove_category_2,", "Python compiler # http://snowballstem.org/ from .basestemmer import BaseStemmer from .among", "a snowball script. ''' a_0 = [ Among(u\"\\u0932\\u093E\\u0907\", -1, 1),", "lab1() raise lab0() except lab1: pass self.cursor = self.limit -", "- v_1 # do, line 88 v_2 = self.limit -", "= self.limit - self.cursor try: # call remove_category_1, line 87", "self.limit - self.cursor try: # literal, line 59 if not", "self.cursor # substring, line 77 if self.find_among_b(NepaliStemmer.a_3) == 0: return", "# delete, line 79 if not self.slice_del(): return False return", "line 63 # [, line 64 self.ket = self.cursor #", "1, 1), Among(u\"\\u090F\\u0915\\u093E\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u093F\\u090F\\u0915\\u093E\", 11,", "self.cursor self.cursor = self.limit # (, line 86 # do,", "# (, line 53 # [, line 54 self.ket =", "-1, 2), Among(u\"\\u092E\\u093E\", -1, 1), Among(u\"\\u0926\\u094D\\u0935\\u093E\\u0930\\u093E\", -1, 1), Among(u\"\\u0915\\u093F\", -1,", "64 if self.find_among_b(NepaliStemmer.a_1) == 0: return False # ], line", "0: return False # ], line 54 self.bra = self.cursor", "Among(u\"\\u090F\\u0915\\u0948\", -1, 1), Among(u\"\\u0947\\u0915\\u0948\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u0948\", 39, 1), Among(u\"\\u0926\\u0948\",", "lab3() raise lab0() except lab3: pass self.cursor = self.limit -", "89 if not self.__r_remove_category_3(): raise lab4() raise lab3() except lab4:", "-1, 1), Among(u\"\\u090F\\u091B\\u0941\", 28, 1), Among(u\"\\u0947\\u091B\\u0941\", 28, 1), Among(u\"\\u0928\\u0947\\u091B\\u0941\", 30,", "self.cursor if among_var == 1: # (, line 58 #", "substring, line 70 among_var = self.find_among_b(NepaliStemmer.a_2) if among_var == 0:", "raise lab2() raise lab0() except lab2: pass self.cursor = self.limit", "Among(u\"\\u0926\\u094B\", -1, 1), Among(u\"\\u0907\\u0926\\u094B\", 49, 1), Among(u\"\\u093F\\u0926\\u094B\", 49, 1), Among(u\"\\u092F\\u094B\",", "[, line 64 self.ket = self.cursor # substring, line 64", "if not self.__r_remove_category_3(): raise lab4() raise lab3() except lab4: pass", "1), Among(u\"\\u0928\\u0947\\u091B\\u0941\", 30, 1), Among(u\"\\u0928\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0941\", -1, 1),", "1), Among(u\"\\u0928\\u0947\\u0915\\u093E\", 14, 1), Among(u\"\\u0926\\u093E\", -1, 1), Among(u\"\\u0907\\u0926\\u093E\", 16, 1),", "Among(u\"\\u0947\\u0915\\u094B\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u094B\", 47, 1), Among(u\"\\u0926\\u094B\", -1, 1), Among(u\"\\u0907\\u0926\\u094B\",", "-1, 1), Among(u\"\\u0928\\u0947\\u0915\\u094B\", 47, 1), Among(u\"\\u0926\\u094B\", -1, 1), Among(u\"\\u0907\\u0926\\u094B\", 49,", "self.find_among_b(NepaliStemmer.a_3) == 0: return False # ], line 77 self.bra", "= self.limit_backward return True class lab0(BaseException): pass class lab1(BaseException): pass", "raise lab3() except lab4: pass self.cursor = self.limit - v_3", "Among(u\"\\u0926\\u0947\\u0916\\u0940\", -1, 1), Among(u\"\\u0925\\u0940\", -1, 1), Among(u\"\\u0926\\u0940\", -1, 1), Among(u\"\\u091B\\u0941\",", "41, 1), Among(u\"\\u090F\\u0915\\u094B\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u093F\\u090F\\u0915\\u094B\", 44,", "Among(u\"\\u0925\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0926\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0925\\u094D\\u092F\\u094B\", 52, 1), Among(u\"\\u091B\\u094C\",", "= self.limit - self.cursor try: # (, line 88 #", "pass self.cursor = self.limit - v_1 # delete, line 59", "-1, 2), Among(u\"\\u0938\\u0901\\u0917\\u0948\", -1, 1), Among(u\"\\u092E\\u0948\", -1, 1), Among(u\"\\u0915\\u094B\", -1,", "1), Among(u\"\\u091B\\u094C\", -1, 1), Among(u\"\\u0907\\u091B\\u094C\", 59, 1), Among(u\"\\u090F\\u091B\\u094C\", 59, 1),", "self.eq_s_b(u\"\\u091B\\u094C\"): raise lab2() raise lab0() except lab2: pass self.cursor =", "try: while True: try: v_3 = self.limit - self.cursor try:", "Among(u\"\\u0907\\u0926\\u093E\", 16, 1), Among(u\"\\u093F\\u0926\\u093E\", 16, 1), Among(u\"\\u0926\\u0947\\u0916\\u093F\", -1, 1), Among(u\"\\u092E\\u093E\\u0925\\u093F\",", "among_var = self.find_among_b(NepaliStemmer.a_2) if among_var == 0: return False #", "self.eq_s_b(u\"\\u092F\\u094C\"): raise lab1() raise lab0() except lab1: pass self.cursor =", "lab0() except lab3: pass self.cursor = self.limit - v_1 #", "44, 1), Among(u\"\\u093F\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u0947\\u0915\\u094B\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u094B\", 47,", "= self.limit - v_1 # delete, line 59 if not", "Among(u\"\\u0907\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u093F\\u090F\\u0915\\u093E\", 11, 1), Among(u\"\\u0947\\u0915\\u093E\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u093E\",", "literal, line 71 if not self.eq_s_b(u\"\\u091B\\u094C\"): raise lab2() raise lab0()", "class NepaliStemmer(BaseStemmer): ''' This class was automatically generated by a", "the Snowball to Python compiler # http://snowballstem.org/ from .basestemmer import", "among_var = self.find_among_b(NepaliStemmer.a_0) if among_var == 0: return False #", "Among(u\"\\u0932\\u093E\\u0907\", -1, 1), Among(u\"\\u0932\\u093E\\u0908\", -1, 1), Among(u\"\\u0938\\u0901\\u0917\", -1, 1), Among(u\"\\u0938\\u0902\\u0917\",", "line 87 v_1 = self.limit - self.cursor try: # call", "= self.limit # (, line 86 # do, line 87", "among_var == 2: # (, line 72 # literal, line", "(, line 89 # do, line 89 v_4 = self.limit", "], line 54 self.bra = self.cursor if among_var == 1:", "except lab3: pass self.cursor = self.limit - v_2 # literal,", "- v_1 # delete, line 59 if not self.slice_del(): return", "69 # [, line 70 self.ket = self.cursor # substring,", "line 58 if not self.slice_del(): return False elif among_var ==", "], line 70 self.bra = self.cursor if among_var == 1:", "1), Among(u\"\\u091B\\u0947\\u0938\\u094D\", -1, 1), Among(u\"\\u0939\\u094B\\u0938\\u094D\", -1, 1) ] def __r_remove_category_1(self):", "= self.limit - self.cursor # call check_category_2, line 89 if", "return True def __r_remove_category_2(self): # (, line 69 # [,", "Among(u\"\\u0947\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0928\\u0947\\u091B\\u0938\\u094D\", 85, 1), Among(u\"\\u093F\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0938\\u094D\",", "by a Snowball to Python compiler It implements the stemming", "Among(u\"\\u093F\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u0939\\u0941\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u090F\\u0915\\u093E\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u093E\",", "raise lab3() raise lab2() except lab3: pass self.cursor = self.limit", "59, 1), Among(u\"\\u090F\\u091B\\u094C\", 59, 1), Among(u\"\\u093F\\u091B\\u094C\", 59, 1), Among(u\"\\u0947\\u091B\\u094C\", 59,", "Among(u\"\\u0907\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u093F\\u090F\\u0915\\u0940\", 21, 1), Among(u\"\\u0947\\u0915\\u0940\", -1, 1), Among(u\"\\u0926\\u0947\\u0916\\u0940\",", "Among(u\"\\u0907\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u093F\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u0939\\u0941\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u090F\\u0915\\u093E\",", "1), Among(u\"\\u092F\\u094C\", -1, 1), Among(u\"\\u0925\\u093F\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u094D\\u092F\\u094C\", 65, 1),", "raise lab3() raise lab0() except lab3: pass self.cursor = self.limit", "Among(u\"\\u093F\\u091B\", 1, 1), Among(u\"\\u0947\\u091B\", 1, 1), Among(u\"\\u0928\\u0947\\u091B\", 5, 1), Among(u\"\\u0939\\u0941\\u0928\\u0947\\u091B\",", "line 59 if not self.slice_del(): return False except lab0: pass", "line 72 # literal, line 72 if not self.eq_s_b(u\"\\u0924\\u094D\\u0930\"): return", "and, line 89 v_5 = self.limit - self.cursor # call", "(, line 58 # delete, line 58 if not self.slice_del():", "], line 77 self.bra = self.cursor # (, line 79", "1), Among(u\"\\u093F\\u090F\\u0915\\u094B\", 44, 1), Among(u\"\\u0947\\u0915\\u094B\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u094B\", 47, 1),", "1), Among(u\"\\u0947\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0928\\u0947\\u091B\\u0928\\u094D\", 73, 1), Among(u\"\\u0932\\u093E\\u0928\\u094D\", -1, 1),", "0: return False # ], line 77 self.bra = self.cursor", "Among(u\"\\u091B\\u0947\", -1, 1), Among(u\"\\u0925\\u0947\", -1, 1), Among(u\"\\u0928\\u0947\", -1, 1), Among(u\"\\u090F\\u0915\\u0948\",", "-1, 1), Among(u\"\\u0907\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0907\\u0938\\u094D\", 79, 1), Among(u\"\\u091B\\u0938\\u094D\", -1,", "1, 1), Among(u\"\\u093F\\u091B\", 1, 1), Among(u\"\\u0947\\u091B\", 1, 1), Among(u\"\\u0928\\u0947\\u091B\", 5,", "-1, 1), Among(u\"\\u0925\\u0947\", -1, 1), Among(u\"\\u0928\\u0947\", -1, 1), Among(u\"\\u090F\\u0915\\u0948\", -1,", "if not self.eq_s_b(u\"\\u0947\"): raise lab1() except lab2: pass # (,", "v_2 self.cursor = self.limit_backward return True class lab0(BaseException): pass class", "def __r_remove_category_1(self): # (, line 53 # [, line 54", "# repeat, line 89 try: while True: try: v_3 =", "lab3: pass except lab2: pass except lab1: pass self.cursor =", "52, 1), Among(u\"\\u093F\\u092F\\u094B\", 52, 1), Among(u\"\\u0925\\u093F\\u092F\\u094B\", 55, 1), Among(u\"\\u0926\\u093F\\u092F\\u094B\", 55,", "self.limit - v_3 raise lab2() except lab3: pass except lab2:", "# call check_category_2, line 89 if not self.__r_check_category_2(): raise lab5()", "1), Among(u\"\\u0907\\u0926\\u0948\", 41, 1), Among(u\"\\u093F\\u0926\\u0948\", 41, 1), Among(u\"\\u090F\\u0915\\u094B\", -1, 1),", "Among(u\"\\u0947\\u091B\\u094C\", 59, 1), Among(u\"\\u0928\\u0947\\u091B\\u094C\", 63, 1), Among(u\"\\u092F\\u094C\", -1, 1), Among(u\"\\u0925\\u093F\\u092F\\u094C\",", "Among(u\"\\u093F\\u0938\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0938\\u094D\", 87, 1), Among(u\"\\u091B\\u0947\\u0938\\u094D\", -1, 1), Among(u\"\\u0939\\u094B\\u0938\\u094D\",", "line 88 v_2 = self.limit - self.cursor try: # (,", "try: v_1 = self.limit - self.cursor try: # (, line", "line 86 self.limit_backward = self.cursor self.cursor = self.limit # (,", "63, 1), Among(u\"\\u092F\\u094C\", -1, 1), Among(u\"\\u0925\\u093F\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u094D\\u092F\\u094C\", 65,", "= self.limit - v_1 # literal, line 71 if not", "# do, line 87 v_1 = self.limit - self.cursor try:", "73, 1), Among(u\"\\u0932\\u093E\\u0928\\u094D\", -1, 1), Among(u\"\\u091B\\u093F\\u0928\\u094D\", -1, 1), Among(u\"\\u0925\\u093F\\u0928\\u094D\", -1,", "line 64 if self.find_among_b(NepaliStemmer.a_1) == 0: return False # ],", "21, 1), Among(u\"\\u0947\\u0915\\u0940\", -1, 1), Among(u\"\\u0926\\u0947\\u0916\\u0940\", -1, 1), Among(u\"\\u0925\\u0940\", -1,", "41, 1), Among(u\"\\u093F\\u0926\\u0948\", 41, 1), Among(u\"\\u090F\\u0915\\u094B\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u094B\", 44,", "1), Among(u\"\\u0947\\u091B\", 1, 1), Among(u\"\\u0928\\u0947\\u091B\", 5, 1), Among(u\"\\u0939\\u0941\\u0928\\u0947\\u091B\", 6, 1),", "if self.find_among_b(NepaliStemmer.a_3) == 0: return False # ], line 77", "2), Among(u\"\\u0932\\u0947\", -1, 1), Among(u\"\\u0915\\u0948\", -1, 2), Among(u\"\\u0938\\u0901\\u0917\\u0948\", -1, 1),", "Among(u\"\\u0925\\u093F\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u0925\\u094D\\u092F\\u094C\", 65, 1), Among(u\"\\u091B\\u0928\\u094D\",", "class lab0(BaseException): pass class lab1(BaseException): pass class lab2(BaseException): pass class", "-1, 1), Among(u\"\\u0915\\u093F\", -1, 2), Among(u\"\\u092A\\u091B\\u093F\", -1, 1), Among(u\"\\u0915\\u0940\", -1,", "Among(u\"\\u0947\\u0915\\u0940\", -1, 1), Among(u\"\\u0926\\u0947\\u0916\\u0940\", -1, 1), Among(u\"\\u0925\\u0940\", -1, 1), Among(u\"\\u0926\\u0940\",", "# ], line 64 self.bra = self.cursor return True def", "Among(u\"\\u0926\\u0940\", -1, 1), Among(u\"\\u091B\\u0941\", -1, 1), Among(u\"\\u090F\\u091B\\u0941\", 28, 1), Among(u\"\\u0947\\u091B\\u0941\",", "81, 1), Among(u\"\\u093F\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0947\\u091B\\u0938\\u094D\", 81, 1), Among(u\"\\u0928\\u0947\\u091B\\u0938\\u094D\", 85,", "self.limit # (, line 86 # do, line 87 v_1", "72 # literal, line 72 if not self.eq_s_b(u\"\\u0924\\u094D\\u0930\"): return False", "1), Among(u\"\\u0930\\u0924\", -1, 1), Among(u\"\\u0915\\u093E\", -1, 2), Among(u\"\\u092E\\u093E\", -1, 1),", "if not self.eq_s_b(u\"\\u0928\\u094C\"): raise lab3() raise lab0() except lab3: pass", "Among(u\"\\u0947\\u091B\\u0928\\u094D\", 69, 1), Among(u\"\\u0928\\u0947\\u091B\\u0928\\u094D\", 73, 1), Among(u\"\\u0932\\u093E\\u0928\\u094D\", -1, 1), Among(u\"\\u091B\\u093F\\u0928\\u094D\",", "2) ] a_3 = [ Among(u\"\\u0925\\u093F\\u090F\", -1, 1), Among(u\"\\u091B\", -1,", "- self.cursor try: # (, line 59 # or, line", "11, 1), Among(u\"\\u0947\\u0915\\u093E\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u093E\", 14, 1), Among(u\"\\u0926\\u093E\", -1,", "True def __r_check_category_2(self): # (, line 63 # [, line", "lab3() raise lab2() except lab3: pass self.cursor = self.limit -", "delete, line 79 if not self.slice_del(): return False return True", "self.cursor = self.limit - v_2 # literal, line 59 if", "# literal, line 72 if not self.eq_s_b(u\"\\u0924\\u094D\\u0930\"): return False #", "try: # (, line 89 # and, line 89 v_5", "a Snowball to Python compiler It implements the stemming algorithm", "if not self.__r_remove_category_2(): raise lab5() except lab5: pass self.cursor =", "automatically generated by a Snowball to Python compiler It implements", "89 v_4 = self.limit - self.cursor try: # (, line", "# [, line 64 self.ket = self.cursor # substring, line", "self.limit - v_1 # do, line 88 v_2 = self.limit", "<filename>saleor-env/lib/python3.7/site-packages/snowballstemmer/nepali_stemmer.py # This file was generated automatically by the Snowball", "-1, 1), Among(u\"\\u090F\\u0915\\u0948\", -1, 1), Among(u\"\\u0947\\u0915\\u0948\", -1, 1), Among(u\"\\u0928\\u0947\\u0915\\u0948\", 39,", "1, 1), Among(u\"\\u090F\\u091B\", 1, 1), Among(u\"\\u093F\\u091B\", 1, 1), Among(u\"\\u0947\\u091B\", 1,", "raise lab1() raise lab0() except lab1: pass self.cursor = self.limit", "self.cursor try: # (, line 88 # repeat, line 89", "self.limit - self.cursor try: # call remove_category_1, line 87 if", "1), Among(u\"\\u0939\\u0941\\u0928\\u094D\\u091B\", 1, 1), Among(u\"\\u090F\\u0915\\u093E\", -1, 1), Among(u\"\\u0907\\u090F\\u0915\\u093E\", 11, 1),", "Among(u\"\\u090F\\u091B\\u094C\", 59, 1), Among(u\"\\u093F\\u091B\\u094C\", 59, 1), Among(u\"\\u0947\\u091B\\u094C\", 59, 1), Among(u\"\\u0928\\u0947\\u091B\\u094C\",", "-1, 2) ] a_3 = [ Among(u\"\\u0925\\u093F\\u090F\", -1, 1), Among(u\"\\u091B\",", "2: # (, line 72 # literal, line 72 if", "(, line 53 # [, line 54 self.ket = self.cursor", "# (, line 86 # do, line 87 v_1 =", "Among(u\"\\u092E\\u093E\", -1, 1), Among(u\"\\u0926\\u094D\\u0935\\u093E\\u0930\\u093E\", -1, 1), Among(u\"\\u0915\\u093F\", -1, 2), Among(u\"\\u092A\\u091B\\u093F\",", "Among(u\"\\u0915\\u093F\", -1, 2), Among(u\"\\u092A\\u091B\\u093F\", -1, 1), Among(u\"\\u0915\\u0940\", -1, 2), Among(u\"\\u0932\\u0947\",", "class was automatically generated by a Snowball to Python compiler", "-1, 1), Among(u\"\\u0928\\u0947\\u0915\\u0948\", 39, 1), Among(u\"\\u0926\\u0948\", -1, 1), Among(u\"\\u0907\\u0926\\u0948\", 41,", "Among(u\"\\u0915\\u094B\", -1, 2) ] a_1 = [ Among(u\"\\u0901\", -1, -1),", "line 71 if not self.eq_s_b(u\"\\u091B\\u094C\"): raise lab2() raise lab0() except", "1), Among(u\"\\u0928\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0941\", -1, 1), Among(u\"\\u0939\\u0930\\u0942\", -1, 1),", "Among(u\"\\u0938\\u0901\\u0917\", -1, 1), Among(u\"\\u0938\\u0902\\u0917\", -1, 1), Among(u\"\\u092E\\u093E\\u0930\\u094D\\u092B\\u0924\", -1, 1), Among(u\"\\u0930\\u0924\"," ]
[ "import unittest import uspec from uspec import describe, context, it", "# -*- coding: utf-8 -*- # ================================================================= # uspec #", "= locals()[\"test_class\"].__name__ @it(\"hoge\") def _(self): self.assertTrue(True) wrap() assert TEST_CLASS_NAME_GAME3 in", "@it(\"hoge\") def _(self): self.assertTrue(True) assert TestGame is not None ##################################", "-*- coding: utf-8 -*- # ================================================================= # uspec # #", "print_function, division import unittest import uspec from uspec import describe,", "================================================================= from __future__ import unicode_literals, print_function, division import unittest import", "describe, context, it ################################### class TestGame(unittest.TestCase): pass with describe(\"Game\", test_class=TestGame):", "describe(\"Game2\"): TEST_CLASS_NAME_GAME2 = test_class.__name__ @it(\"hoge\") def _(self): self.assertTrue(True) assert TEST_CLASS_NAME_GAME2", "TestGame(unittest.TestCase): pass with describe(\"Game\", test_class=TestGame): assert test_class is TestGame @it(\"hoge\")", "_(self): self.assertTrue(True) assert TestGame is not None ################################## TEST_CLASS_NAME_GAME2 =", "def wrap(): global TEST_CLASS_NAME_GAME3 with describe(\"Game3\"): TEST_CLASS_NAME_GAME3 = locals()[\"test_class\"].__name__ @it(\"hoge\")", "-*- # ================================================================= # uspec # # Copyright (c) 2020", "pass with describe(\"Game\", test_class=TestGame): assert test_class is TestGame @it(\"hoge\") def", "test_class=TestGame): assert test_class is TestGame @it(\"hoge\") def _(self): self.assertTrue(True) assert", "def _(self): self.assertTrue(True) assert TestGame is not None ################################## TEST_CLASS_NAME_GAME2", "division import unittest import uspec from uspec import describe, context,", "is not None ################################## TEST_CLASS_NAME_GAME2 = None with describe(\"Game2\"): TEST_CLASS_NAME_GAME2", "it ################################### class TestGame(unittest.TestCase): pass with describe(\"Game\", test_class=TestGame): assert test_class", "# uspec # # Copyright (c) 2020 <NAME> # #", "# ================================================================= from __future__ import unicode_literals, print_function, division import unittest", "# # Copyright (c) 2020 <NAME> # # This software", "uspec import describe, context, it ################################### class TestGame(unittest.TestCase): pass with", "from uspec import describe, context, it ################################### class TestGame(unittest.TestCase): pass", "utf-8 -*- # ================================================================= # uspec # # Copyright (c)", "global TEST_CLASS_NAME_GAME3 with describe(\"Game3\"): TEST_CLASS_NAME_GAME3 = locals()[\"test_class\"].__name__ @it(\"hoge\") def _(self):", "TestGame is not None ################################## TEST_CLASS_NAME_GAME2 = None with describe(\"Game2\"):", "def _(self): self.assertTrue(True) assert TEST_CLASS_NAME_GAME2 in globals() ################################## def wrap():", "software is released under the MIT License. # http://opensource.org/licenses/mit-license.php #", "_(self): self.assertTrue(True) assert TEST_CLASS_NAME_GAME2 in globals() ################################## def wrap(): global", "with describe(\"Game3\"): TEST_CLASS_NAME_GAME3 = locals()[\"test_class\"].__name__ @it(\"hoge\") def _(self): self.assertTrue(True) wrap()", "None ################################## TEST_CLASS_NAME_GAME2 = None with describe(\"Game2\"): TEST_CLASS_NAME_GAME2 = test_class.__name__", "# http://opensource.org/licenses/mit-license.php # ================================================================= from __future__ import unicode_literals, print_function, division", "= None with describe(\"Game2\"): TEST_CLASS_NAME_GAME2 = test_class.__name__ @it(\"hoge\") def _(self):", "context, it ################################### class TestGame(unittest.TestCase): pass with describe(\"Game\", test_class=TestGame): assert", "################################## def wrap(): global TEST_CLASS_NAME_GAME3 with describe(\"Game3\"): TEST_CLASS_NAME_GAME3 = locals()[\"test_class\"].__name__", "import describe, context, it ################################### class TestGame(unittest.TestCase): pass with describe(\"Game\",", "globals() ################################## def wrap(): global TEST_CLASS_NAME_GAME3 with describe(\"Game3\"): TEST_CLASS_NAME_GAME3 =", "TEST_CLASS_NAME_GAME3 = locals()[\"test_class\"].__name__ @it(\"hoge\") def _(self): self.assertTrue(True) wrap() assert TEST_CLASS_NAME_GAME3", "================================================================= # uspec # # Copyright (c) 2020 <NAME> #", "TEST_CLASS_NAME_GAME2 in globals() ################################## def wrap(): global TEST_CLASS_NAME_GAME3 with describe(\"Game3\"):", "@it(\"hoge\") def _(self): self.assertTrue(True) assert TEST_CLASS_NAME_GAME2 in globals() ################################## def", "with describe(\"Game2\"): TEST_CLASS_NAME_GAME2 = test_class.__name__ @it(\"hoge\") def _(self): self.assertTrue(True) assert", "@it(\"hoge\") def _(self): self.assertTrue(True) wrap() assert TEST_CLASS_NAME_GAME3 in globals() if", "assert TEST_CLASS_NAME_GAME3 in globals() if __name__ == '__main__': import unittest", "# This software is released under the MIT License. #", "################################## TEST_CLASS_NAME_GAME2 = None with describe(\"Game2\"): TEST_CLASS_NAME_GAME2 = test_class.__name__ @it(\"hoge\")", "TEST_CLASS_NAME_GAME3 with describe(\"Game3\"): TEST_CLASS_NAME_GAME3 = locals()[\"test_class\"].__name__ @it(\"hoge\") def _(self): self.assertTrue(True)", "describe(\"Game\", test_class=TestGame): assert test_class is TestGame @it(\"hoge\") def _(self): self.assertTrue(True)", "self.assertTrue(True) wrap() assert TEST_CLASS_NAME_GAME3 in globals() if __name__ == '__main__':", "# # This software is released under the MIT License.", "self.assertTrue(True) assert TEST_CLASS_NAME_GAME2 in globals() ################################## def wrap(): global TEST_CLASS_NAME_GAME3", "TestGame @it(\"hoge\") def _(self): self.assertTrue(True) assert TestGame is not None", "This software is released under the MIT License. # http://opensource.org/licenses/mit-license.php", "unicode_literals, print_function, division import unittest import uspec from uspec import", "class TestGame(unittest.TestCase): pass with describe(\"Game\", test_class=TestGame): assert test_class is TestGame", "TEST_CLASS_NAME_GAME3 in globals() if __name__ == '__main__': import unittest unittest.main(verbosity=2)", "(c) 2020 <NAME> # # This software is released under", "uspec # # Copyright (c) 2020 <NAME> # # This", "the MIT License. # http://opensource.org/licenses/mit-license.php # ================================================================= from __future__ import", "from __future__ import unicode_literals, print_function, division import unittest import uspec", "import uspec from uspec import describe, context, it ################################### class", "################################### class TestGame(unittest.TestCase): pass with describe(\"Game\", test_class=TestGame): assert test_class is", "<NAME> # # This software is released under the MIT", "None with describe(\"Game2\"): TEST_CLASS_NAME_GAME2 = test_class.__name__ @it(\"hoge\") def _(self): self.assertTrue(True)", "test_class.__name__ @it(\"hoge\") def _(self): self.assertTrue(True) assert TEST_CLASS_NAME_GAME2 in globals() ##################################", "not None ################################## TEST_CLASS_NAME_GAME2 = None with describe(\"Game2\"): TEST_CLASS_NAME_GAME2 =", "# Copyright (c) 2020 <NAME> # # This software is", "License. # http://opensource.org/licenses/mit-license.php # ================================================================= from __future__ import unicode_literals, print_function,", "Copyright (c) 2020 <NAME> # # This software is released", "_(self): self.assertTrue(True) wrap() assert TEST_CLASS_NAME_GAME3 in globals() if __name__ ==", "with describe(\"Game\", test_class=TestGame): assert test_class is TestGame @it(\"hoge\") def _(self):", "describe(\"Game3\"): TEST_CLASS_NAME_GAME3 = locals()[\"test_class\"].__name__ @it(\"hoge\") def _(self): self.assertTrue(True) wrap() assert", "is TestGame @it(\"hoge\") def _(self): self.assertTrue(True) assert TestGame is not", "assert test_class is TestGame @it(\"hoge\") def _(self): self.assertTrue(True) assert TestGame", "TEST_CLASS_NAME_GAME2 = test_class.__name__ @it(\"hoge\") def _(self): self.assertTrue(True) assert TEST_CLASS_NAME_GAME2 in", "self.assertTrue(True) assert TestGame is not None ################################## TEST_CLASS_NAME_GAME2 = None", "unittest import uspec from uspec import describe, context, it ###################################", "assert TestGame is not None ################################## TEST_CLASS_NAME_GAME2 = None with", "MIT License. # http://opensource.org/licenses/mit-license.php # ================================================================= from __future__ import unicode_literals,", "test_class is TestGame @it(\"hoge\") def _(self): self.assertTrue(True) assert TestGame is", "= test_class.__name__ @it(\"hoge\") def _(self): self.assertTrue(True) assert TEST_CLASS_NAME_GAME2 in globals()", "__future__ import unicode_literals, print_function, division import unittest import uspec from", "# ================================================================= # uspec # # Copyright (c) 2020 <NAME>", "under the MIT License. # http://opensource.org/licenses/mit-license.php # ================================================================= from __future__", "wrap(): global TEST_CLASS_NAME_GAME3 with describe(\"Game3\"): TEST_CLASS_NAME_GAME3 = locals()[\"test_class\"].__name__ @it(\"hoge\") def", "wrap() assert TEST_CLASS_NAME_GAME3 in globals() if __name__ == '__main__': import", "def _(self): self.assertTrue(True) wrap() assert TEST_CLASS_NAME_GAME3 in globals() if __name__", "in globals() ################################## def wrap(): global TEST_CLASS_NAME_GAME3 with describe(\"Game3\"): TEST_CLASS_NAME_GAME3", "coding: utf-8 -*- # ================================================================= # uspec # # Copyright", "2020 <NAME> # # This software is released under the", "locals()[\"test_class\"].__name__ @it(\"hoge\") def _(self): self.assertTrue(True) wrap() assert TEST_CLASS_NAME_GAME3 in globals()", "is released under the MIT License. # http://opensource.org/licenses/mit-license.php # =================================================================", "<reponame>MountainField/uspec # -*- coding: utf-8 -*- # ================================================================= # uspec", "released under the MIT License. # http://opensource.org/licenses/mit-license.php # ================================================================= from", "uspec from uspec import describe, context, it ################################### class TestGame(unittest.TestCase):", "http://opensource.org/licenses/mit-license.php # ================================================================= from __future__ import unicode_literals, print_function, division import", "import unicode_literals, print_function, division import unittest import uspec from uspec", "assert TEST_CLASS_NAME_GAME2 in globals() ################################## def wrap(): global TEST_CLASS_NAME_GAME3 with", "TEST_CLASS_NAME_GAME2 = None with describe(\"Game2\"): TEST_CLASS_NAME_GAME2 = test_class.__name__ @it(\"hoge\") def" ]
[ "scores_history) plt.ylabel('Snake length') plt.xlabel('Game count') plt.show() if __name__ == \"__main__\":", "36) plt.plot(range(len(scores_history)), scores_history) plt.ylabel('Snake length') plt.xlabel('Game count') plt.show() if __name__", "= 2 for i in range(GAME_COUNT): game = Game(400, \"Snake", "def main(): scores_history = [] GAME_COUNT = 2 for i", "GAME_COUNT = 2 for i in range(GAME_COUNT): game = Game(400,", "matplotlib.pyplot as plt def main(): scores_history = [] GAME_COUNT =", "= Game(400, \"Snake AI\") score = game.start() scores_history.append(score) print(\"Game:\", i)", "plt def main(): scores_history = [] GAME_COUNT = 2 for", "range(GAME_COUNT): game = Game(400, \"Snake AI\") score = game.start() scores_history.append(score)", "[] GAME_COUNT = 2 for i in range(GAME_COUNT): game =", "pygame from game.game_logic.game import Game import matplotlib.pyplot as plt def", "in range(GAME_COUNT): game = Game(400, \"Snake AI\") score = game.start()", "scores_history = [] GAME_COUNT = 2 for i in range(GAME_COUNT):", "i in range(GAME_COUNT): game = Game(400, \"Snake AI\") score =", "Game(400, \"Snake AI\") score = game.start() scores_history.append(score) print(\"Game:\", i) plt.ylim(0,", "= game.start() scores_history.append(score) print(\"Game:\", i) plt.ylim(0, 36) plt.plot(range(len(scores_history)), scores_history) plt.ylabel('Snake", "as plt def main(): scores_history = [] GAME_COUNT = 2", "Game import matplotlib.pyplot as plt def main(): scores_history = []", "2 for i in range(GAME_COUNT): game = Game(400, \"Snake AI\")", "\"Snake AI\") score = game.start() scores_history.append(score) print(\"Game:\", i) plt.ylim(0, 36)", "import pygame from game.game_logic.game import Game import matplotlib.pyplot as plt", "i) plt.ylim(0, 36) plt.plot(range(len(scores_history)), scores_history) plt.ylabel('Snake length') plt.xlabel('Game count') plt.show()", "AI\") score = game.start() scores_history.append(score) print(\"Game:\", i) plt.ylim(0, 36) plt.plot(range(len(scores_history)),", "game = Game(400, \"Snake AI\") score = game.start() scores_history.append(score) print(\"Game:\",", "import Game import matplotlib.pyplot as plt def main(): scores_history =", "scores_history.append(score) print(\"Game:\", i) plt.ylim(0, 36) plt.plot(range(len(scores_history)), scores_history) plt.ylabel('Snake length') plt.xlabel('Game", "import matplotlib.pyplot as plt def main(): scores_history = [] GAME_COUNT", "plt.ylabel('Snake length') plt.xlabel('Game count') plt.show() if __name__ == \"__main__\": main()", "print(\"Game:\", i) plt.ylim(0, 36) plt.plot(range(len(scores_history)), scores_history) plt.ylabel('Snake length') plt.xlabel('Game count')", "plt.ylim(0, 36) plt.plot(range(len(scores_history)), scores_history) plt.ylabel('Snake length') plt.xlabel('Game count') plt.show() if", "game.start() scores_history.append(score) print(\"Game:\", i) plt.ylim(0, 36) plt.plot(range(len(scores_history)), scores_history) plt.ylabel('Snake length')", "main(): scores_history = [] GAME_COUNT = 2 for i in", "for i in range(GAME_COUNT): game = Game(400, \"Snake AI\") score", "score = game.start() scores_history.append(score) print(\"Game:\", i) plt.ylim(0, 36) plt.plot(range(len(scores_history)), scores_history)", "= [] GAME_COUNT = 2 for i in range(GAME_COUNT): game", "plt.plot(range(len(scores_history)), scores_history) plt.ylabel('Snake length') plt.xlabel('Game count') plt.show() if __name__ ==", "game.game_logic.game import Game import matplotlib.pyplot as plt def main(): scores_history", "from game.game_logic.game import Game import matplotlib.pyplot as plt def main():" ]
[ "import glob def create_list(images_dir, output_file, img_ext=\".jpg\"): ImgList = os.listdir(images_dir) val_list", "main(): if len(sys.argv) < 2: print(\"Requires images directory\") sys.exit(1) elif", "2: print(\"Requires images directory\") sys.exit(1) elif len(sys.argv) < 3: images_dir", "len(sys.argv) < 3: images_dir = sys.argv[1] output_file = \"image_list.txt\" else:", "< 2: print(\"Requires images directory\") sys.exit(1) elif len(sys.argv) < 3:", "+ \"\\n\") fid.write(val_list[-1]) def main(): if len(sys.argv) < 2: print(\"Requires", "os.listdir(images_dir) val_list = [] for img in ImgList: img,ext =", "sys.exit(1) elif len(sys.argv) < 3: images_dir = sys.argv[1] output_file =", "val_list.append(img) with open(os.path.join(images_dir, output_file),'w') as fid: for line in val_list[:-1]:", "= \"image_list.txt\" else: images_dir = sys.argv[1] output_file = sys.argv[2] create_list(images_dir,", "ImgList = os.listdir(images_dir) val_list = [] for img in ImgList:", "= [] for img in ImgList: img,ext = img.split(\".\") val_list.append(img)", "val_list = [] for img in ImgList: img,ext = img.split(\".\")", "create_list(images_dir, output_file, img_ext=\".jpg\"): ImgList = os.listdir(images_dir) val_list = [] for", "= img.split(\".\") val_list.append(img) with open(os.path.join(images_dir, output_file),'w') as fid: for line", "sys.argv[1] output_file = \"image_list.txt\" else: images_dir = sys.argv[1] output_file =", "from glob import glob def create_list(images_dir, output_file, img_ext=\".jpg\"): ImgList =", "for img in ImgList: img,ext = img.split(\".\") val_list.append(img) with open(os.path.join(images_dir,", "fid: for line in val_list[:-1]: fid.write(line + \"\\n\") fid.write(val_list[-1]) def", "img_ext=\".jpg\"): ImgList = os.listdir(images_dir) val_list = [] for img in", "glob import glob def create_list(images_dir, output_file, img_ext=\".jpg\"): ImgList = os.listdir(images_dir)", "elif len(sys.argv) < 3: images_dir = sys.argv[1] output_file = \"image_list.txt\"", "output_file),'w') as fid: for line in val_list[:-1]: fid.write(line + \"\\n\")", "in val_list[:-1]: fid.write(line + \"\\n\") fid.write(val_list[-1]) def main(): if len(sys.argv)", "fid.write(line + \"\\n\") fid.write(val_list[-1]) def main(): if len(sys.argv) < 2:", "output_file, img_ext=\".jpg\"): ImgList = os.listdir(images_dir) val_list = [] for img", "img.split(\".\") val_list.append(img) with open(os.path.join(images_dir, output_file),'w') as fid: for line in", "glob def create_list(images_dir, output_file, img_ext=\".jpg\"): ImgList = os.listdir(images_dir) val_list =", "def main(): if len(sys.argv) < 2: print(\"Requires images directory\") sys.exit(1)", "open(os.path.join(images_dir, output_file),'w') as fid: for line in val_list[:-1]: fid.write(line +", "[] for img in ImgList: img,ext = img.split(\".\") val_list.append(img) with", "print(\"Requires images directory\") sys.exit(1) elif len(sys.argv) < 3: images_dir =", "if len(sys.argv) < 2: print(\"Requires images directory\") sys.exit(1) elif len(sys.argv)", "output_file = \"image_list.txt\" else: images_dir = sys.argv[1] output_file = sys.argv[2]", "directory\") sys.exit(1) elif len(sys.argv) < 3: images_dir = sys.argv[1] output_file", "ImgList: img,ext = img.split(\".\") val_list.append(img) with open(os.path.join(images_dir, output_file),'w') as fid:", "= sys.argv[1] output_file = \"image_list.txt\" else: images_dir = sys.argv[1] output_file", "import os import sys from glob import glob def create_list(images_dir,", "= os.listdir(images_dir) val_list = [] for img in ImgList: img,ext", "= sys.argv[1] output_file = sys.argv[2] create_list(images_dir, output_file) if __name__==\"__main__\": main()", "len(sys.argv) < 2: print(\"Requires images directory\") sys.exit(1) elif len(sys.argv) <", "for line in val_list[:-1]: fid.write(line + \"\\n\") fid.write(val_list[-1]) def main():", "images directory\") sys.exit(1) elif len(sys.argv) < 3: images_dir = sys.argv[1]", "img,ext = img.split(\".\") val_list.append(img) with open(os.path.join(images_dir, output_file),'w') as fid: for", "img in ImgList: img,ext = img.split(\".\") val_list.append(img) with open(os.path.join(images_dir, output_file),'w')", "line in val_list[:-1]: fid.write(line + \"\\n\") fid.write(val_list[-1]) def main(): if", "as fid: for line in val_list[:-1]: fid.write(line + \"\\n\") fid.write(val_list[-1])", "import sys from glob import glob def create_list(images_dir, output_file, img_ext=\".jpg\"):", "images_dir = sys.argv[1] output_file = \"image_list.txt\" else: images_dir = sys.argv[1]", "os import sys from glob import glob def create_list(images_dir, output_file,", "val_list[:-1]: fid.write(line + \"\\n\") fid.write(val_list[-1]) def main(): if len(sys.argv) <", "3: images_dir = sys.argv[1] output_file = \"image_list.txt\" else: images_dir =", "< 3: images_dir = sys.argv[1] output_file = \"image_list.txt\" else: images_dir", "images_dir = sys.argv[1] output_file = sys.argv[2] create_list(images_dir, output_file) if __name__==\"__main__\":", "sys from glob import glob def create_list(images_dir, output_file, img_ext=\".jpg\"): ImgList", "fid.write(val_list[-1]) def main(): if len(sys.argv) < 2: print(\"Requires images directory\")", "in ImgList: img,ext = img.split(\".\") val_list.append(img) with open(os.path.join(images_dir, output_file),'w') as", "\"image_list.txt\" else: images_dir = sys.argv[1] output_file = sys.argv[2] create_list(images_dir, output_file)", "with open(os.path.join(images_dir, output_file),'w') as fid: for line in val_list[:-1]: fid.write(line", "\"\\n\") fid.write(val_list[-1]) def main(): if len(sys.argv) < 2: print(\"Requires images", "else: images_dir = sys.argv[1] output_file = sys.argv[2] create_list(images_dir, output_file) if", "def create_list(images_dir, output_file, img_ext=\".jpg\"): ImgList = os.listdir(images_dir) val_list = []" ]
[ "# Perform CV for SVM, random forest and kNN def", "src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ # In[ ]: # Показатель оттока клиентов –", "бизнес-термин, описывающий # насколько интенсивно клиенты покидают компанию или #", "# for i in range(len(X)): # # display(\"y_true={0}, Predicted={1}\".format(y[i], y_prob[i]))", "n_jobs=-1)) try_clf(X, y, KNeighborsClassifier()) # std scaler with_mean=False accuracies: #", "не собирается. # In[ ]: # datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv #", "probabilities # def try_probab(X, y, clf_nofit): # X_tr, X_val, y_tr,", "kNN def try_clf(X, y, clf_nofit): X_tr, X_val, y_tr, y_val =", "y, clf_nofit): # X_tr, X_val, y_tr, y_val = train_test_split(X, y,", "In[3]: # Load dataset raw_churn_df = pd.read_csv('churn.csv') # In[17]: display(raw_churn_df.shape)", "# clf = clf_nofit.fit(X_tr, y_tr) # y_prob = clf.predict_proba(X_val) #", "# # try_probab(X, y, KNeighborsClassifier()) # # for i in", "range(len(Xnew)): # # print(\"X=%s, Predicted=%s\" % (Xnew[i], ynew[i])) # In[", "and discrimination # https://github.com/ghuiber/churn/blob/master/churn_measurements.py # from churn_measurements import calibration, discrimination", "data y = raw_churn_df['Churn?'] X = raw_churn_df.drop('Churn?', axis=1) # In[79]:", "RandomForestClassifier(n_estimators=100, n_jobs=-1)) try_clf(X, y, KNeighborsClassifier()) # std scaler with_mean=False accuracies:", "precision_recall_fscore_support(y_val, y_pred)) try_clf(X, y, SVC(gamma='scale')) try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) try_clf(X,", "задаче, в рамках # которой, используя имеющиеся данные, необходимо отличить", "позволяющие удержать игроков # в Блэкджек за столом. # 3.", "0.9256594724220624 # 0.9484412470023981 # 0.8896882494004796 # std scaler with_mean=True accuracies:", "насколько интенсивно клиенты покидают компанию или # прекращают оплачивать товары", "# # print(\"X=%s, Predicted=%s\" % (Xnew[i], ynew[i])) # In[ ]:", "y_tr) y_pred = clf.predict(X_val) display(clf_nofit.__class__.__name__) display(accuracy_score(y_val, y_pred)) display(confusion_matrix(y_val, y_pred)) display(\"prec,", "Показатель оттока клиентов – бизнес-термин, описывающий # насколько интенсивно клиенты", "0, 'yes': 1}) X[\"VMail Plan\"] = X[\"VMail Plan\"].map({'no': 0, 'yes':", "precision_recall_fscore_support from sklearn.model_selection import KFold, train_test_split from sklearn.ensemble import RandomForestClassifier", "std scaler with_mean=True accuracies: # 0.9256594724220624 # 0.9496402877697842 # 0.8896882494004796", "отношение количества правильно спрогнозированных уходов # к общему количеству фактических", "= clf_nofit.fit(X_tr, y_tr) # y_prob = clf.predict_proba(X_val) # # for", "= StandardScaler(with_mean=True) X = std_scaler.fit_transform(X) display(X.shape) # In[90]: # Perform", "отношение количества правильно спрогнозированных уходов # к общему количеству спрогнозированных", "или услуги. # Это ключевой показатель для многих компаний, потому", "интенсивно клиенты покидают компанию или # прекращают оплачивать товары или", "= X[\"Int'l Plan\"].map({'no': 0, 'yes': 1}) X[\"VMail Plan\"] = X[\"VMail", "X[\"Int'l Plan\"].map({'no': 0, 'yes': 1}) X[\"VMail Plan\"] = X[\"VMail Plan\"].map({'no':", "казино используют прогнозные модели, чтобы предсказать # идеальные условия в", "# Precision # Каково отношение количества правильно спрогнозированных уходов #", "20 раз дороже). # Примеры использования: # 1. мобильные операторы,", "# In[101]: # # Predict probabilities # def try_probab(X, y,", "Encode yes/no with 1/0 values X[\"Int'l Plan\"] = X[\"Int'l Plan\"].map({'no':", "операторы кабельного телевидения и # компании, обслуживающие прием платежей с", "]: # datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv # In[88]: # Load libraries", "Code', 'Phone'] X = X.drop(features_to_drop, axis=1) # In[80]: # Encode", "показатель для многих компаний, потому что # зачастую приобретение новых", "try_probab(X, y, KNeighborsClassifier()) # # for i in range(len(Xnew)): #", "# In[86]: # Recall # Каково отношение количества правильно спрогнозированных", "irrelevant features features_to_drop = ['State', 'Area Code', 'Phone'] X =", "= clf.predict(X_val) display(clf_nofit.__class__.__name__) display(accuracy_score(y_val, y_pred)) display(confusion_matrix(y_val, y_pred)) display(\"prec, rec, f1,", "X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42) clf =", "accuracies: # 0.9256594724220624 # 0.9496402877697842 # 0.8896882494004796 # In[86]: #", "yes/no with 1/0 values X[\"Int'l Plan\"] = X[\"Int'l Plan\"].map({'no': 0,", "display(pd.value_counts(y_prob[:, 1])) # try_probab(X, y, SVC(gamma='scale', probability=True)) # # try_probab(X,", "# X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42) #", "import pandas as pd import numpy as np from sklearn.preprocessing", "# In[3]: # Load dataset raw_churn_df = pd.read_csv('churn.csv') # In[17]:", "support\", precision_recall_fscore_support(y_val, y_pred)) try_clf(X, y, SVC(gamma='scale')) try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1))", "потому что # зачастую приобретение новых клиентов обходится намного дороже,", "# for i in range(len(Xnew)): # # print(\"X=%s, Predicted=%s\" %", "на билет первого класса. # Эффективное удержание клиентов сводится к", "# Encode yes/no with 1/0 values X[\"Int'l Plan\"] = X[\"Int'l", "SVC(gamma='scale')) try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) try_clf(X, y, KNeighborsClassifier()) # std", "= X[\"VMail Plan\"].map({'no': 0, 'yes': 1}) # In[81]: # Scale", "уходов # к общему количеству спрогнозированных уходов? # In[101]: #", "уходов? # In[101]: # # Predict probabilities # def try_probab(X,", "display(X.shape) # In[90]: # Perform CV for SVM, random forest", "необходимо отличить # клиентов, собирающихся уйти, от тех, кто этого", "raw_churn_df['Churn?'] X = raw_churn_df.drop('Churn?', axis=1) # In[79]: # Drop irrelevant", "for i in range(len(Xnew)): # # print(\"X=%s, Predicted=%s\" % (Xnew[i],", "# Load libraries import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') import", "X_val, y_tr, y_val = train_test_split(X, y, random_state=42) # clf =", "clf_nofit): X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42) clf", "для многих компаний, потому что # зачастую приобретение новых клиентов", "StandardScaler(with_mean=True) X = std_scaler.fit_transform(X) display(X.shape) # In[90]: # Perform CV", "display(raw_churn_df.isnull().sum()) # In[78]: # Isolate target data y = raw_churn_df['Churn?']", "def try_probab(X, y, clf_nofit): # X_tr, X_val, y_tr, y_val =", "3. Aвиакомпании могут предложить клиентам, у которых есть # жалобы,", "клиентов, собирающихся уйти, от тех, кто этого делать # не", "# display(\"y_true={0}, Predicted={1}\".format(y[i], y_prob[i])) # display(pd.value_counts(y_prob[:, 1])) # try_probab(X, y,", "первого класса. # Эффективное удержание клиентов сводится к задаче, в", "rec, f1, support\", precision_recall_fscore_support(y_val, y_pred)) try_clf(X, y, SVC(gamma='scale')) try_clf(X, y,", "1/0 values X[\"Int'l Plan\"] = X[\"Int'l Plan\"].map({'no': 0, 'yes': 1})", "кредитных карт # 2. казино используют прогнозные модели, чтобы предсказать", "операторы, операторы кабельного телевидения и # компании, обслуживающие прием платежей", "'yes': 1}) # In[81]: # Scale everything std_scaler = StandardScaler(with_mean=True)", "clf_nofit.fit(X_tr, y_tr) # y_prob = clf.predict_proba(X_val) # # for i", "display(clf_nofit.__class__.__name__) display(accuracy_score(y_val, y_pred)) display(confusion_matrix(y_val, y_pred)) display(\"prec, rec, f1, support\", precision_recall_fscore_support(y_val,", "with_mean=True accuracies: # 0.9256594724220624 # 0.9496402877697842 # 0.8896882494004796 # In[86]:", "n_jobs=-1)) # # try_probab(X, y, KNeighborsClassifier()) # # for i", "Plan\"] = X[\"VMail Plan\"].map({'no': 0, 'yes': 1}) # In[81]: #", "SVM, random forest and kNN def try_clf(X, y, clf_nofit): X_tr,", "sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support from sklearn.model_selection import KFold, train_test_split", "к общему количеству спрогнозированных уходов? # In[101]: # # Predict", "plt get_ipython().run_line_magic('matplotlib', 'inline') import pandas as pd import numpy as", "y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) # # try_probab(X, y, KNeighborsClassifier()) # #", "# идеальные условия в зале, позволяющие удержать игроков # в", "scaler with_mean=True accuracies: # 0.9256594724220624 # 0.9496402877697842 # 0.8896882494004796 #", "i in range(len(X)): # # display(\"y_true={0}, Predicted={1}\".format(y[i], y_prob[i])) # display(pd.value_counts(y_prob[:,", "# In[80]: # Encode yes/no with 1/0 values X[\"Int'l Plan\"]", "# Load dataset raw_churn_df = pd.read_csv('churn.csv') # In[17]: display(raw_churn_df.shape) display(raw_churn_df.head(),", "случаях от 5 до 20 раз дороже). # Примеры использования:", "# In[17]: display(raw_churn_df.shape) display(raw_churn_df.head(), raw_churn_df.tail()) display(raw_churn_df.columns.values) display(raw_churn_df.dtypes) display(raw_churn_df.isnull().sum()) # In[78]:", "import SVC from sklearn.neighbors import KNeighborsClassifier # In[3]: # Load", "pd import numpy as np from sklearn.preprocessing import StandardScaler from", "# Drop irrelevant features features_to_drop = ['State', 'Area Code', 'Phone']", "# в Блэкджек за столом. # 3. Aвиакомпании могут предложить", "confusion_matrix, precision_recall_fscore_support from sklearn.model_selection import KFold, train_test_split from sklearn.ensemble import", "предложить клиентам, у которых есть # жалобы, заменить их билет", "axis=1) # In[79]: # Drop irrelevant features features_to_drop = ['State',", "KFold, train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC", "# 0.9256594724220624 # 0.9496402877697842 # 0.8896882494004796 # In[86]: # Recall", "# Эффективное удержание клиентов сводится к задаче, в рамках #", "y_pred)) display(\"prec, rec, f1, support\", precision_recall_fscore_support(y_val, y_pred)) try_clf(X, y, SVC(gamma='scale'))", "модели, чтобы предсказать # идеальные условия в зале, позволяющие удержать", "# # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) # # try_probab(X, y,", "clf.predict(X_val) display(clf_nofit.__class__.__name__) display(accuracy_score(y_val, y_pred)) display(confusion_matrix(y_val, y_pred)) display(\"prec, rec, f1, support\",", "# try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) # # try_probab(X, y, KNeighborsClassifier())", "рамках # которой, используя имеющиеся данные, необходимо отличить # клиентов,", "Aвиакомпании могут предложить клиентам, у которых есть # жалобы, заменить", "# In[90]: # Perform CV for SVM, random forest and", "спрогнозированных уходов # к общему количеству фактических уходов? # Precision", "(Xnew[i], ynew[i])) # In[ ]: # todo: calibration and discrimination", "# 3. Aвиакомпании могут предложить клиентам, у которых есть #", "RandomForestClassifier(n_estimators=100, n_jobs=-1)) # # try_probab(X, y, KNeighborsClassifier()) # # for", "In[90]: # Perform CV for SVM, random forest and kNN", "удержать игроков # в Блэкджек за столом. # 3. Aвиакомпании", "clf_nofit.fit(X_tr, y_tr) y_pred = clf.predict(X_val) display(clf_nofit.__class__.__name__) display(accuracy_score(y_val, y_pred)) display(confusion_matrix(y_val, y_pred))", "try_clf(X, y, KNeighborsClassifier()) # std scaler with_mean=False accuracies: # 0.9256594724220624", "from sklearn.neighbors import KNeighborsClassifier # In[3]: # Load dataset raw_churn_df", "probability=True)) # # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) # # try_probab(X,", "что # зачастую приобретение новых клиентов обходится намного дороже, #", "sklearn.neighbors import KNeighborsClassifier # In[3]: # Load dataset raw_churn_df =", "прогнозные модели, чтобы предсказать # идеальные условия в зале, позволяющие", "y_tr, y_val = train_test_split(X, y, random_state=42) # clf = clf_nofit.fit(X_tr,", "ynew[i])) # In[ ]: # todo: calibration and discrimination #", "0.9484412470023981 # 0.8896882494004796 # std scaler with_mean=True accuracies: # 0.9256594724220624", "в рамках # которой, используя имеющиеся данные, необходимо отличить #", "= train_test_split(X, y, random_state=42) # clf = clf_nofit.fit(X_tr, y_tr) #", "y, KNeighborsClassifier()) # # for i in range(len(Xnew)): # #", "# чем удержание старых (в некоторых случаях от 5 до", "используя имеющиеся данные, необходимо отличить # клиентов, собирающихся уйти, от", "# 0.9496402877697842 # 0.8896882494004796 # In[86]: # Recall # Каково", "общему количеству спрогнозированных уходов? # In[101]: # # Predict probabilities", "платежей с помощью кредитных карт # 2. казино используют прогнозные", "билет первого класса. # Эффективное удержание клиентов сводится к задаче,", "оплачивать товары или услуги. # Это ключевой показатель для многих", "with 1/0 values X[\"Int'l Plan\"] = X[\"Int'l Plan\"].map({'no': 0, 'yes':", "X.drop(features_to_drop, axis=1) # In[80]: # Encode yes/no with 1/0 values", "In[78]: # Isolate target data y = raw_churn_df['Churn?'] X =", "In[88]: # Load libraries import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline')", "y_tr) # y_prob = clf.predict_proba(X_val) # # for i in", "# Isolate target data y = raw_churn_df['Churn?'] X = raw_churn_df.drop('Churn?',", "обходится намного дороже, # чем удержание старых (в некоторых случаях", "calibration and discrimination # https://github.com/ghuiber/churn/blob/master/churn_measurements.py # from churn_measurements import calibration,", "KNeighborsClassifier()) # # for i in range(len(Xnew)): # # print(\"X=%s,", "количества правильно спрогнозированных уходов # к общему количеству спрогнозированных уходов?", "приобретение новых клиентов обходится намного дороже, # чем удержание старых", "try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) # # try_probab(X, y, KNeighborsClassifier()) #", "from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support from sklearn.model_selection import KFold,", "np from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score, confusion_matrix,", "1}) # In[81]: # Scale everything std_scaler = StandardScaler(with_mean=True) X", "display(\"prec, rec, f1, support\", precision_recall_fscore_support(y_val, y_pred)) try_clf(X, y, SVC(gamma='scale')) try_clf(X,", "Drop irrelevant features features_to_drop = ['State', 'Area Code', 'Phone'] X", "# Это ключевой показатель для многих компаний, потому что #", "# std scaler with_mean=False accuracies: # 0.9256594724220624 # 0.9484412470023981 #", "for i in range(len(X)): # # display(\"y_true={0}, Predicted={1}\".format(y[i], y_prob[i])) #", "display(raw_churn_df.columns.values) display(raw_churn_df.dtypes) display(raw_churn_df.isnull().sum()) # In[78]: # Isolate target data y", "0.9256594724220624 # 0.9496402877697842 # 0.8896882494004796 # In[86]: # Recall #", "Predicted=%s\" % (Xnew[i], ynew[i])) # In[ ]: # todo: calibration", "# не собирается. # In[ ]: # datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv", "y, clf_nofit): X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42)", "Эффективное удержание клиентов сводится к задаче, в рамках # которой,", "import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.metrics", "SVC from sklearn.neighbors import KNeighborsClassifier # In[3]: # Load dataset", "with_mean=False accuracies: # 0.9256594724220624 # 0.9484412470023981 # 0.8896882494004796 # std", "0.9496402877697842 # 0.8896882494004796 # In[86]: # Recall # Каково отношение", "Load dataset raw_churn_df = pd.read_csv('churn.csv') # In[17]: display(raw_churn_df.shape) display(raw_churn_df.head(), raw_churn_df.tail())", "помощью кредитных карт # 2. казино используют прогнозные модели, чтобы", "y_tr, y_val = train_test_split(X, y, random_state=42) clf = clf_nofit.fit(X_tr, y_tr)", "std_scaler = StandardScaler(with_mean=True) X = std_scaler.fit_transform(X) display(X.shape) # In[90]: #", "# In[79]: # Drop irrelevant features features_to_drop = ['State', 'Area", "y, SVC(gamma='scale', probability=True)) # # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) #", "import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') import pandas as pd", "'Area Code', 'Phone'] X = X.drop(features_to_drop, axis=1) # In[80]: #", "python # coding: utf-8 # In[1]: # src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ #", "# прекращают оплачивать товары или услуги. # Это ключевой показатель", "random_state=42) clf = clf_nofit.fit(X_tr, y_tr) y_pred = clf.predict(X_val) display(clf_nofit.__class__.__name__) display(accuracy_score(y_val,", "которых есть # жалобы, заменить их билет на билет первого", "= std_scaler.fit_transform(X) display(X.shape) # In[90]: # Perform CV for SVM,", "forest and kNN def try_clf(X, y, clf_nofit): X_tr, X_val, y_tr,", "and kNN def try_clf(X, y, clf_nofit): X_tr, X_val, y_tr, y_val", "компанию или # прекращают оплачивать товары или услуги. # Это", "использования: # 1. мобильные операторы, операторы кабельного телевидения и #", "KNeighborsClassifier # In[3]: # Load dataset raw_churn_df = pd.read_csv('churn.csv') #", "f1, support\", precision_recall_fscore_support(y_val, y_pred)) try_clf(X, y, SVC(gamma='scale')) try_clf(X, y, RandomForestClassifier(n_estimators=100,", "= raw_churn_df.drop('Churn?', axis=1) # In[79]: # Drop irrelevant features features_to_drop", "# жалобы, заменить их билет на билет первого класса. #", "= ['State', 'Area Code', 'Phone'] X = X.drop(features_to_drop, axis=1) #", "# In[ ]: # Показатель оттока клиентов – бизнес-термин, описывающий", "(в некоторых случаях от 5 до 20 раз дороже). #", "in range(len(X)): # # display(\"y_true={0}, Predicted={1}\".format(y[i], y_prob[i])) # display(pd.value_counts(y_prob[:, 1]))", "Precision # Каково отношение количества правильно спрогнозированных уходов # к", "# 0.9484412470023981 # 0.8896882494004796 # std scaler with_mean=True accuracies: #", "Predicted={1}\".format(y[i], y_prob[i])) # display(pd.value_counts(y_prob[:, 1])) # try_probab(X, y, SVC(gamma='scale', probability=True))", "y_prob[i])) # display(pd.value_counts(y_prob[:, 1])) # try_probab(X, y, SVC(gamma='scale', probability=True)) #", "StandardScaler from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support from sklearn.model_selection import", "Plan\"].map({'no': 0, 'yes': 1}) # In[81]: # Scale everything std_scaler", "for SVM, random forest and kNN def try_clf(X, y, clf_nofit):", "i in range(len(Xnew)): # # print(\"X=%s, Predicted=%s\" % (Xnew[i], ynew[i]))", "фактических уходов? # Precision # Каково отношение количества правильно спрогнозированных", "# насколько интенсивно клиенты покидают компанию или # прекращают оплачивать", "от тех, кто этого делать # не собирается. # In[", "собирающихся уйти, от тех, кто этого делать # не собирается.", "# todo: calibration and discrimination # https://github.com/ghuiber/churn/blob/master/churn_measurements.py # from churn_measurements", "и # компании, обслуживающие прием платежей с помощью кредитных карт", "accuracies: # 0.9256594724220624 # 0.9484412470023981 # 0.8896882494004796 # std scaler", "random_state=42) # clf = clf_nofit.fit(X_tr, y_tr) # y_prob = clf.predict_proba(X_val)", "clf_nofit): # X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42)", "жалобы, заменить их билет на билет первого класса. # Эффективное", "std scaler with_mean=False accuracies: # 0.9256594724220624 # 0.9484412470023981 # 0.8896882494004796", "features features_to_drop = ['State', 'Area Code', 'Phone'] X = X.drop(features_to_drop,", "1])) # try_probab(X, y, SVC(gamma='scale', probability=True)) # # try_probab(X, y,", "coding: utf-8 # In[1]: # src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ # In[ ]:", "уходов # к общему количеству фактических уходов? # Precision #", "Это ключевой показатель для многих компаний, потому что # зачастую", "клиентам, у которых есть # жалобы, заменить их билет на", "# компании, обслуживающие прием платежей с помощью кредитных карт #", "std_scaler.fit_transform(X) display(X.shape) # In[90]: # Perform CV for SVM, random", "# In[88]: # Load libraries import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib',", "from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support", "dataset raw_churn_df = pd.read_csv('churn.csv') # In[17]: display(raw_churn_df.shape) display(raw_churn_df.head(), raw_churn_df.tail()) display(raw_churn_df.columns.values)", "Recall # Каково отношение количества правильно спрогнозированных уходов # к", "X[\"VMail Plan\"] = X[\"VMail Plan\"].map({'no': 0, 'yes': 1}) # In[81]:", "# Каково отношение количества правильно спрогнозированных уходов # к общему", "display(confusion_matrix(y_val, y_pred)) display(\"prec, rec, f1, support\", precision_recall_fscore_support(y_val, y_pred)) try_clf(X, y,", "In[80]: # Encode yes/no with 1/0 values X[\"Int'l Plan\"] =", "try_probab(X, y, clf_nofit): # X_tr, X_val, y_tr, y_val = train_test_split(X,", "раз дороже). # Примеры использования: # 1. мобильные операторы, операторы", "train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from", "in range(len(Xnew)): # # print(\"X=%s, Predicted=%s\" % (Xnew[i], ynew[i])) #", "дороже). # Примеры использования: # 1. мобильные операторы, операторы кабельного", "# In[81]: # Scale everything std_scaler = StandardScaler(with_mean=True) X =", "y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) try_clf(X, y, KNeighborsClassifier()) # std scaler with_mean=False", "# к общему количеству спрогнозированных уходов? # In[101]: # #", "accuracy_score, confusion_matrix, precision_recall_fscore_support from sklearn.model_selection import KFold, train_test_split from sklearn.ensemble", "билет на билет первого класса. # Эффективное удержание клиентов сводится", "# In[78]: # Isolate target data y = raw_churn_df['Churn?'] X", "display(\"y_true={0}, Predicted={1}\".format(y[i], y_prob[i])) # display(pd.value_counts(y_prob[:, 1])) # try_probab(X, y, SVC(gamma='scale',", "todo: calibration and discrimination # https://github.com/ghuiber/churn/blob/master/churn_measurements.py # from churn_measurements import", "# y_prob = clf.predict_proba(X_val) # # for i in range(len(X)):", "предсказать # идеальные условия в зале, позволяющие удержать игроков #", "собирается. # In[ ]: # datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv # In[88]:", "scaler with_mean=False accuracies: # 0.9256594724220624 # 0.9484412470023981 # 0.8896882494004796 #", "import KFold, train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.svm import", "# Показатель оттока клиентов – бизнес-термин, описывающий # насколько интенсивно", "Блэкджек за столом. # 3. Aвиакомпании могут предложить клиентам, у", "клиентов обходится намного дороже, # чем удержание старых (в некоторых", "y_pred)) try_clf(X, y, SVC(gamma='scale')) try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) try_clf(X, y,", "5 до 20 раз дороже). # Примеры использования: # 1.", "к общему количеству фактических уходов? # Precision # Каково отношение", "X = std_scaler.fit_transform(X) display(X.shape) # In[90]: # Perform CV for", "display(raw_churn_df.shape) display(raw_churn_df.head(), raw_churn_df.tail()) display(raw_churn_df.columns.values) display(raw_churn_df.dtypes) display(raw_churn_df.isnull().sum()) # In[78]: # Isolate", "In[17]: display(raw_churn_df.shape) display(raw_churn_df.head(), raw_churn_df.tail()) display(raw_churn_df.columns.values) display(raw_churn_df.dtypes) display(raw_churn_df.isnull().sum()) # In[78]: #", "= clf_nofit.fit(X_tr, y_tr) y_pred = clf.predict(X_val) display(clf_nofit.__class__.__name__) display(accuracy_score(y_val, y_pred)) display(confusion_matrix(y_val,", "# 2. казино используют прогнозные модели, чтобы предсказать # идеальные", "старых (в некоторых случаях от 5 до 20 раз дороже).", "Predict probabilities # def try_probab(X, y, clf_nofit): # X_tr, X_val,", "]: # todo: calibration and discrimination # https://github.com/ghuiber/churn/blob/master/churn_measurements.py # from", "спрогнозированных уходов? # In[101]: # # Predict probabilities # def", "покидают компанию или # прекращают оплачивать товары или услуги. #", "правильно спрогнозированных уходов # к общему количеству спрогнозированных уходов? #", "зале, позволяющие удержать игроков # в Блэкджек за столом. #", "from sklearn.model_selection import KFold, train_test_split from sklearn.ensemble import RandomForestClassifier from", "есть # жалобы, заменить их билет на билет первого класса.", "клиентов – бизнес-термин, описывающий # насколько интенсивно клиенты покидают компанию", "try_clf(X, y, clf_nofit): X_tr, X_val, y_tr, y_val = train_test_split(X, y,", "try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) try_clf(X, y, KNeighborsClassifier()) # std scaler", "utf-8 # In[1]: # src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ # In[ ]: #", "описывающий # насколько интенсивно клиенты покидают компанию или # прекращают", "as np from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score,", "идеальные условия в зале, позволяющие удержать игроков # в Блэкджек", "raw_churn_df = pd.read_csv('churn.csv') # In[17]: display(raw_churn_df.shape) display(raw_churn_df.head(), raw_churn_df.tail()) display(raw_churn_df.columns.values) display(raw_churn_df.dtypes)", "# In[ ]: # todo: calibration and discrimination # https://github.com/ghuiber/churn/blob/master/churn_measurements.py", "# datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv # In[88]: # Load libraries import", "Каково отношение количества правильно спрогнозированных уходов # к общему количеству", "1. мобильные операторы, операторы кабельного телевидения и # компании, обслуживающие", "некоторых случаях от 5 до 20 раз дороже). # Примеры", "try_clf(X, y, SVC(gamma='scale')) try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) try_clf(X, y, KNeighborsClassifier())", "sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import", "In[ ]: # datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv # In[88]: # Load", "import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier", "# # Predict probabilities # def try_probab(X, y, clf_nofit): #", "Scale everything std_scaler = StandardScaler(with_mean=True) X = std_scaler.fit_transform(X) display(X.shape) #", "# print(\"X=%s, Predicted=%s\" % (Xnew[i], ynew[i])) # In[ ]: #", "from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors", "try_probab(X, y, SVC(gamma='scale', probability=True)) # # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1))", "общему количеству фактических уходов? # Precision # Каково отношение количества", "datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv # In[88]: # Load libraries import matplotlib.pyplot", "чтобы предсказать # идеальные условия в зале, позволяющие удержать игроков", "numpy as np from sklearn.preprocessing import StandardScaler from sklearn.metrics import", "= clf.predict_proba(X_val) # # for i in range(len(X)): # #", "количества правильно спрогнозированных уходов # к общему количеству фактических уходов?", "src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv # In[88]: # Load libraries import matplotlib.pyplot as", "# # for i in range(len(Xnew)): # # print(\"X=%s, Predicted=%s\"", "кабельного телевидения и # компании, обслуживающие прием платежей с помощью", "everything std_scaler = StandardScaler(with_mean=True) X = std_scaler.fit_transform(X) display(X.shape) # In[90]:", "которой, используя имеющиеся данные, необходимо отличить # клиентов, собирающихся уйти,", "0.8896882494004796 # In[86]: # Recall # Каково отношение количества правильно", "y_pred)) display(confusion_matrix(y_val, y_pred)) display(\"prec, rec, f1, support\", precision_recall_fscore_support(y_val, y_pred)) try_clf(X,", "SVC(gamma='scale', probability=True)) # # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) # #", "до 20 раз дороже). # Примеры использования: # 1. мобильные", "values X[\"Int'l Plan\"] = X[\"Int'l Plan\"].map({'no': 0, 'yes': 1}) X[\"VMail", "X[\"VMail Plan\"].map({'no': 0, 'yes': 1}) # In[81]: # Scale everything", "за столом. # 3. Aвиакомпании могут предложить клиентам, у которых", "y = raw_churn_df['Churn?'] X = raw_churn_df.drop('Churn?', axis=1) # In[79]: #", "train_test_split(X, y, random_state=42) # clf = clf_nofit.fit(X_tr, y_tr) # y_prob", "удержание старых (в некоторых случаях от 5 до 20 раз", "y, random_state=42) clf = clf_nofit.fit(X_tr, y_tr) y_pred = clf.predict(X_val) display(clf_nofit.__class__.__name__)", "# std scaler with_mean=True accuracies: # 0.9256594724220624 # 0.9496402877697842 #", "train_test_split(X, y, random_state=42) clf = clf_nofit.fit(X_tr, y_tr) y_pred = clf.predict(X_val)", "In[101]: # # Predict probabilities # def try_probab(X, y, clf_nofit):", "# клиентов, собирающихся уйти, от тех, кто этого делать #", "класса. # Эффективное удержание клиентов сводится к задаче, в рамках", "0.8896882494004796 # std scaler with_mean=True accuracies: # 0.9256594724220624 # 0.9496402877697842", "['State', 'Area Code', 'Phone'] X = X.drop(features_to_drop, axis=1) # In[80]:", "1}) X[\"VMail Plan\"] = X[\"VMail Plan\"].map({'no': 0, 'yes': 1}) #", "get_ipython().run_line_magic('matplotlib', 'inline') import pandas as pd import numpy as np", "y_prob = clf.predict_proba(X_val) # # for i in range(len(X)): #", "делать # не собирается. # In[ ]: # datset src:", "y, KNeighborsClassifier()) # std scaler with_mean=False accuracies: # 0.9256594724220624 #", "телевидения и # компании, обслуживающие прием платежей с помощью кредитных", "Perform CV for SVM, random forest and kNN def try_clf(X,", "import accuracy_score, confusion_matrix, precision_recall_fscore_support from sklearn.model_selection import KFold, train_test_split from", "In[ ]: # todo: calibration and discrimination # https://github.com/ghuiber/churn/blob/master/churn_measurements.py #", "display(raw_churn_df.dtypes) display(raw_churn_df.isnull().sum()) # In[78]: # Isolate target data y =", "правильно спрогнозированных уходов # к общему количеству фактических уходов? #", "дороже, # чем удержание старых (в некоторых случаях от 5", "# 1. мобильные операторы, операторы кабельного телевидения и # компании,", "y, random_state=42) # clf = clf_nofit.fit(X_tr, y_tr) # y_prob =", "в зале, позволяющие удержать игроков # в Блэкджек за столом.", "clf = clf_nofit.fit(X_tr, y_tr) y_pred = clf.predict(X_val) display(clf_nofit.__class__.__name__) display(accuracy_score(y_val, y_pred))", "X[\"Int'l Plan\"] = X[\"Int'l Plan\"].map({'no': 0, 'yes': 1}) X[\"VMail Plan\"]", "display(accuracy_score(y_val, y_pred)) display(confusion_matrix(y_val, y_pred)) display(\"prec, rec, f1, support\", precision_recall_fscore_support(y_val, y_pred))", "удержание клиентов сводится к задаче, в рамках # которой, используя", "import KNeighborsClassifier # In[3]: # Load dataset raw_churn_df = pd.read_csv('churn.csv')", "# src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ # In[ ]: # Показатель оттока клиентов", "# Scale everything std_scaler = StandardScaler(with_mean=True) X = std_scaler.fit_transform(X) display(X.shape)", "y_val = train_test_split(X, y, random_state=42) # clf = clf_nofit.fit(X_tr, y_tr)", "или # прекращают оплачивать товары или услуги. # Это ключевой", "range(len(X)): # # display(\"y_true={0}, Predicted={1}\".format(y[i], y_prob[i])) # display(pd.value_counts(y_prob[:, 1])) #", "# In[1]: # src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ # In[ ]: # Показатель", "данные, необходимо отличить # клиентов, собирающихся уйти, от тех, кто", "y_val = train_test_split(X, y, random_state=42) clf = clf_nofit.fit(X_tr, y_tr) y_pred", "# 0.9256594724220624 # 0.9484412470023981 # 0.8896882494004796 # std scaler with_mean=True", "# try_probab(X, y, KNeighborsClassifier()) # # for i in range(len(Xnew)):", "https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv # In[88]: # Load libraries import matplotlib.pyplot as plt", "In[1]: # src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ # In[ ]: # Показатель оттока", "# Примеры использования: # 1. мобильные операторы, операторы кабельного телевидения", "libraries import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') import pandas as", "– бизнес-термин, описывающий # насколько интенсивно клиенты покидают компанию или", "as pd import numpy as np from sklearn.preprocessing import StandardScaler", "спрогнозированных уходов # к общему количеству спрогнозированных уходов? # In[101]:", "import StandardScaler from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support from sklearn.model_selection", "2. казино используют прогнозные модели, чтобы предсказать # идеальные условия", "pd.read_csv('churn.csv') # In[17]: display(raw_churn_df.shape) display(raw_churn_df.head(), raw_churn_df.tail()) display(raw_churn_df.columns.values) display(raw_churn_df.dtypes) display(raw_churn_df.isnull().sum()) #", "from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier # In[3]:", "прием платежей с помощью кредитных карт # 2. казино используют", "random forest and kNN def try_clf(X, y, clf_nofit): X_tr, X_val,", "имеющиеся данные, необходимо отличить # клиентов, собирающихся уйти, от тех,", "= pd.read_csv('churn.csv') # In[17]: display(raw_churn_df.shape) display(raw_churn_df.head(), raw_churn_df.tail()) display(raw_churn_df.columns.values) display(raw_churn_df.dtypes) display(raw_churn_df.isnull().sum())", "target data y = raw_churn_df['Churn?'] X = raw_churn_df.drop('Churn?', axis=1) #", "клиентов сводится к задаче, в рамках # которой, используя имеющиеся", "X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42) # clf", "карт # 2. казино используют прогнозные модели, чтобы предсказать #", "заменить их билет на билет первого класса. # Эффективное удержание", "к задаче, в рамках # которой, используя имеющиеся данные, необходимо", "# display(pd.value_counts(y_prob[:, 1])) # try_probab(X, y, SVC(gamma='scale', probability=True)) # #", "% (Xnew[i], ynew[i])) # In[ ]: # todo: calibration and", "In[ ]: # Показатель оттока клиентов – бизнес-термин, описывающий #", "clf = clf_nofit.fit(X_tr, y_tr) # y_prob = clf.predict_proba(X_val) # #", "услуги. # Это ключевой показатель для многих компаний, потому что", "RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier #", "отличить # клиентов, собирающихся уйти, от тех, кто этого делать", "# 0.8896882494004796 # std scaler with_mean=True accuracies: # 0.9256594724220624 #", "этого делать # не собирается. # In[ ]: # datset", "axis=1) # In[80]: # Encode yes/no with 1/0 values X[\"Int'l", "# coding: utf-8 # In[1]: # src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ # In[", "сводится к задаче, в рамках # которой, используя имеющиеся данные,", "столом. # 3. Aвиакомпании могут предложить клиентам, у которых есть", "уйти, от тех, кто этого делать # не собирается. #", "обслуживающие прием платежей с помощью кредитных карт # 2. казино", "y, SVC(gamma='scale')) try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) try_clf(X, y, KNeighborsClassifier()) #", "количеству фактических уходов? # Precision # Каково отношение количества правильно", "# Predict probabilities # def try_probab(X, y, clf_nofit): # X_tr,", "raw_churn_df.tail()) display(raw_churn_df.columns.values) display(raw_churn_df.dtypes) display(raw_churn_df.isnull().sum()) # In[78]: # Isolate target data", "matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') import pandas as pd import", "уходов? # Precision # Каково отношение количества правильно спрогнозированных уходов", "y_pred = clf.predict(X_val) display(clf_nofit.__class__.__name__) display(accuracy_score(y_val, y_pred)) display(confusion_matrix(y_val, y_pred)) display(\"prec, rec,", "X_val, y_tr, y_val = train_test_split(X, y, random_state=42) clf = clf_nofit.fit(X_tr,", "клиенты покидают компанию или # прекращают оплачивать товары или услуги.", "в Блэкджек за столом. # 3. Aвиакомпании могут предложить клиентам,", "as plt get_ipython().run_line_magic('matplotlib', 'inline') import pandas as pd import numpy", "компании, обслуживающие прием платежей с помощью кредитных карт # 2.", "KNeighborsClassifier()) # std scaler with_mean=False accuracies: # 0.9256594724220624 # 0.9484412470023981", "могут предложить клиентам, у которых есть # жалобы, заменить их", "'yes': 1}) X[\"VMail Plan\"] = X[\"VMail Plan\"].map({'no': 0, 'yes': 1})", "'Phone'] X = X.drop(features_to_drop, axis=1) # In[80]: # Encode yes/no", "sklearn.model_selection import KFold, train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.svm", "#!/usr/bin/env python # coding: utf-8 # In[1]: # src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/", "тех, кто этого делать # не собирается. # In[ ]:", "X = X.drop(features_to_drop, axis=1) # In[80]: # Encode yes/no with", "их билет на билет первого класса. # Эффективное удержание клиентов", "display(raw_churn_df.head(), raw_churn_df.tail()) display(raw_churn_df.columns.values) display(raw_churn_df.dtypes) display(raw_churn_df.isnull().sum()) # In[78]: # Isolate target", "Plan\"].map({'no': 0, 'yes': 1}) X[\"VMail Plan\"] = X[\"VMail Plan\"].map({'no': 0,", "новых клиентов обходится намного дороже, # чем удержание старых (в", "с помощью кредитных карт # 2. казино используют прогнозные модели,", "In[81]: # Scale everything std_scaler = StandardScaler(with_mean=True) X = std_scaler.fit_transform(X)", "# In[ ]: # datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv # In[88]: #", "# зачастую приобретение новых клиентов обходится намного дороже, # чем", "оттока клиентов – бизнес-термин, описывающий # насколько интенсивно клиенты покидают", "у которых есть # жалобы, заменить их билет на билет", "= X.drop(features_to_drop, axis=1) # In[80]: # Encode yes/no with 1/0", "количеству спрогнозированных уходов? # In[101]: # # Predict probabilities #", "# # display(\"y_true={0}, Predicted={1}\".format(y[i], y_prob[i])) # display(pd.value_counts(y_prob[:, 1])) # try_probab(X,", "Isolate target data y = raw_churn_df['Churn?'] X = raw_churn_df.drop('Churn?', axis=1)", "0, 'yes': 1}) # In[81]: # Scale everything std_scaler =", "# к общему количеству фактических уходов? # Precision # Каково", "X = raw_churn_df.drop('Churn?', axis=1) # In[79]: # Drop irrelevant features", "'inline') import pandas as pd import numpy as np from", "чем удержание старых (в некоторых случаях от 5 до 20", "http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ # In[ ]: # Показатель оттока клиентов – бизнес-термин,", "CV for SVM, random forest and kNN def try_clf(X, y,", "sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support from", "features_to_drop = ['State', 'Area Code', 'Phone'] X = X.drop(features_to_drop, axis=1)", "условия в зале, позволяющие удержать игроков # в Блэкджек за", "от 5 до 20 раз дороже). # Примеры использования: #", "игроков # в Блэкджек за столом. # 3. Aвиакомпании могут", "= raw_churn_df['Churn?'] X = raw_churn_df.drop('Churn?', axis=1) # In[79]: # Drop", "raw_churn_df.drop('Churn?', axis=1) # In[79]: # Drop irrelevant features features_to_drop =", "товары или услуги. # Это ключевой показатель для многих компаний,", "# # for i in range(len(X)): # # display(\"y_true={0}, Predicted={1}\".format(y[i],", "ключевой показатель для многих компаний, потому что # зачастую приобретение", "print(\"X=%s, Predicted=%s\" % (Xnew[i], ynew[i])) # In[ ]: # todo:", "# try_probab(X, y, SVC(gamma='scale', probability=True)) # # try_probab(X, y, RandomForestClassifier(n_estimators=100,", "= train_test_split(X, y, random_state=42) clf = clf_nofit.fit(X_tr, y_tr) y_pred =", "многих компаний, потому что # зачастую приобретение новых клиентов обходится", "sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier # In[3]: #", "прекращают оплачивать товары или услуги. # Это ключевой показатель для", "кто этого делать # не собирается. # In[ ]: #", "мобильные операторы, операторы кабельного телевидения и # компании, обслуживающие прием", "используют прогнозные модели, чтобы предсказать # идеальные условия в зале,", "def try_clf(X, y, clf_nofit): X_tr, X_val, y_tr, y_val = train_test_split(X,", "намного дороже, # чем удержание старых (в некоторых случаях от", "# def try_probab(X, y, clf_nofit): # X_tr, X_val, y_tr, y_val", "Примеры использования: # 1. мобильные операторы, операторы кабельного телевидения и", "# Recall # Каково отношение количества правильно спрогнозированных уходов #", "In[86]: # Recall # Каково отношение количества правильно спрогнозированных уходов", "Load libraries import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') import pandas", "# которой, используя имеющиеся данные, необходимо отличить # клиентов, собирающихся", "# 0.8896882494004796 # In[86]: # Recall # Каково отношение количества", "зачастую приобретение новых клиентов обходится намного дороже, # чем удержание", "Plan\"] = X[\"Int'l Plan\"].map({'no': 0, 'yes': 1}) X[\"VMail Plan\"] =", "]: # Показатель оттока клиентов – бизнес-термин, описывающий # насколько", "pandas as pd import numpy as np from sklearn.preprocessing import", "In[79]: # Drop irrelevant features features_to_drop = ['State', 'Area Code',", "компаний, потому что # зачастую приобретение новых клиентов обходится намного", "clf.predict_proba(X_val) # # for i in range(len(X)): # # display(\"y_true={0}," ]
[ "conditions: # # The above copyright notice and this permission", "Optional[str] = None class RowData(BaseModel): class Config: extra = Extra.allow", "class Sheet(BaseModel): class Config: extra = Extra.allow data: Optional[List[GridData]] =", "charge, to any person obtaining a copy # of this", "permit persons to whom the Software is # furnished to", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "Spreadsheet(BaseModel): class Config: extra = Extra.allow spreadsheetId: str sheets: List[Sheet]", "the Software, and to permit persons to whom the Software", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "(c) 2020 Airbyte # # Permission is hereby granted, free", "class Config: extra = Extra.allow title: Optional[str] = None class", "the Software is # furnished to do so, subject to", "above copyright notice and this permission notice shall be included", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "Optional[str] = None class SheetProperties(BaseModel): class Config: extra = Extra.allow", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "limitation the rights # to use, copy, modify, merge, publish,", "= None class CellData(BaseModel): class Config: extra = Extra.allow formattedValue:", "title: Optional[str] = None class SheetProperties(BaseModel): class Config: extra =", "Extra.allow data: Optional[List[GridData]] = None properties: Optional[SheetProperties] = None class", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "class CellData(BaseModel): class Config: extra = Extra.allow formattedValue: Optional[str] =", "class Config: extra = Extra.allow formattedValue: Optional[str] = None class", "= Extra.allow data: Optional[List[GridData]] = None properties: Optional[SheetProperties] = None", "without limitation the rights # to use, copy, modify, merge,", "None class GridData(BaseModel): class Config: extra = Extra.allow rowData: Optional[List[RowData]]", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "import List, Optional from pydantic import BaseModel, Extra, Field class", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "# MIT License # # Copyright (c) 2020 Airbyte #", "License # # Copyright (c) 2020 Airbyte # # Permission", "Copyright (c) 2020 Airbyte # # Permission is hereby granted,", "persons to whom the Software is # furnished to do", "extra = Extra.allow formattedValue: Optional[str] = None class RowData(BaseModel): class", "# copies or substantial portions of the Software. # #", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "class GridData(BaseModel): class Config: extra = Extra.allow rowData: Optional[List[RowData]] =", "rights # to use, copy, modify, merge, publish, distribute, sublicense,", "class Config: extra = Extra.allow data: Optional[List[GridData]] = None properties:", "associated documentation files (the \"Software\"), to deal # in the", "= Extra.allow spreadsheetId: str sheets: List[Sheet] properties: Optional[SpreadsheetProperties] = None", "# in the Software without restriction, including without limitation the", "SheetProperties(BaseModel): class Config: extra = Extra.allow title: Optional[str] = None", "documentation files (the \"Software\"), to deal # in the Software", "extra = Extra.allow data: Optional[List[GridData]] = None properties: Optional[SheetProperties] =", "and/or sell # copies of the Software, and to permit", "pydantic import BaseModel, Extra, Field class SpreadsheetProperties(BaseModel): class Config: extra", "extra = Extra.allow spreadsheetId: str sheets: List[Sheet] properties: Optional[SpreadsheetProperties] =", "copies or substantial portions of the Software. # # THE", "the rights # to use, copy, modify, merge, publish, distribute,", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "properties: Optional[SheetProperties] = None class Spreadsheet(BaseModel): class Config: extra =", "DEALINGS IN THE # SOFTWARE. from __future__ import annotations from", "Optional[str] = None class CellData(BaseModel): class Config: extra = Extra.allow", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "all # copies or substantial portions of the Software. #", "to any person obtaining a copy # of this software", "class SpreadsheetProperties(BaseModel): class Config: extra = Extra.allow title: Optional[str] =", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "= Extra.allow formattedValue: Optional[str] = None class RowData(BaseModel): class Config:", "= None properties: Optional[SheetProperties] = None class Spreadsheet(BaseModel): class Config:", "= None class GridData(BaseModel): class Config: extra = Extra.allow rowData:", "class Spreadsheet(BaseModel): class Config: extra = Extra.allow spreadsheetId: str sheets:", "Sheet(BaseModel): class Config: extra = Extra.allow data: Optional[List[GridData]] = None", "notice and this permission notice shall be included in all", "of the Software, and to permit persons to whom the", "this software and associated documentation files (the \"Software\"), to deal", "in all # copies or substantial portions of the Software.", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "is hereby granted, free of charge, to any person obtaining", "List, Optional from pydantic import BaseModel, Extra, Field class SpreadsheetProperties(BaseModel):", "# # The above copyright notice and this permission notice", "rowData: Optional[List[RowData]] = None class Sheet(BaseModel): class Config: extra =", "formattedValue: Optional[str] = None class RowData(BaseModel): class Config: extra =", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from", "Optional[SheetProperties] = None class Spreadsheet(BaseModel): class Config: extra = Extra.allow", "from __future__ import annotations from typing import List, Optional from", "RowData(BaseModel): class Config: extra = Extra.allow values: Optional[List[CellData]] = None", "extra = Extra.allow rowData: Optional[List[RowData]] = None class Sheet(BaseModel): class", "Config: extra = Extra.allow data: Optional[List[GridData]] = None properties: Optional[SheetProperties]", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "sell # copies of the Software, and to permit persons", "Optional from pydantic import BaseModel, Extra, Field class SpreadsheetProperties(BaseModel): class", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "shall be included in all # copies or substantial portions", "Software is # furnished to do so, subject to the", "and associated documentation files (the \"Software\"), to deal # in", "Software without restriction, including without limitation the rights # to", "extra = Extra.allow values: Optional[List[CellData]] = None class GridData(BaseModel): class", "and to permit persons to whom the Software is #", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "Extra.allow values: Optional[List[CellData]] = None class GridData(BaseModel): class Config: extra", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "copies of the Software, and to permit persons to whom", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "hereby granted, free of charge, to any person obtaining a", "THE # SOFTWARE. from __future__ import annotations from typing import", "whom the Software is # furnished to do so, subject", "publish, distribute, sublicense, and/or sell # copies of the Software,", "this permission notice shall be included in all # copies", "annotations from typing import List, Optional from pydantic import BaseModel,", "person obtaining a copy # of this software and associated", "# # Permission is hereby granted, free of charge, to", "without restriction, including without limitation the rights # to use,", "sublicense, and/or sell # copies of the Software, and to", "to the following conditions: # # The above copyright notice", "distribute, sublicense, and/or sell # copies of the Software, and", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "subject to the following conditions: # # The above copyright", "substantial portions of the Software. # # THE SOFTWARE IS", "notice shall be included in all # copies or substantial", "SOFTWARE. from __future__ import annotations from typing import List, Optional", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "import BaseModel, Extra, Field class SpreadsheetProperties(BaseModel): class Config: extra =", "do so, subject to the following conditions: # # The", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "and this permission notice shall be included in all #", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "in the Software without restriction, including without limitation the rights", "# # Copyright (c) 2020 Airbyte # # Permission is", "# furnished to do so, subject to the following conditions:", "Optional[List[CellData]] = None class GridData(BaseModel): class Config: extra = Extra.allow", "# Copyright (c) 2020 Airbyte # # Permission is hereby", "modify, merge, publish, distribute, sublicense, and/or sell # copies of", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "Field class SpreadsheetProperties(BaseModel): class Config: extra = Extra.allow title: Optional[str]", "any person obtaining a copy # of this software and", "GridData(BaseModel): class Config: extra = Extra.allow rowData: Optional[List[RowData]] = None", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "Software, and to permit persons to whom the Software is", "MIT License # # Copyright (c) 2020 Airbyte # #", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "class Config: extra = Extra.allow values: Optional[List[CellData]] = None class", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or", "restriction, including without limitation the rights # to use, copy,", "deal # in the Software without restriction, including without limitation", "None class RowData(BaseModel): class Config: extra = Extra.allow values: Optional[List[CellData]]", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "values: Optional[List[CellData]] = None class GridData(BaseModel): class Config: extra =", "or substantial portions of the Software. # # THE SOFTWARE", "Optional[List[GridData]] = None properties: Optional[SheetProperties] = None class Spreadsheet(BaseModel): class", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "class SheetProperties(BaseModel): class Config: extra = Extra.allow title: Optional[str] =", "Config: extra = Extra.allow values: Optional[List[CellData]] = None class GridData(BaseModel):", "extra = Extra.allow title: Optional[str] = None class SheetProperties(BaseModel): class", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "be included in all # copies or substantial portions of", "from pydantic import BaseModel, Extra, Field class SpreadsheetProperties(BaseModel): class Config:", "including without limitation the rights # to use, copy, modify,", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "copyright notice and this permission notice shall be included in", "2020 Airbyte # # Permission is hereby granted, free of", "\"Software\"), to deal # in the Software without restriction, including", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "title: Optional[str] = None class CellData(BaseModel): class Config: extra =", "# SOFTWARE. from __future__ import annotations from typing import List,", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "Config: extra = Extra.allow title: Optional[str] = None class CellData(BaseModel):", "free of charge, to any person obtaining a copy #", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "files (the \"Software\"), to deal # in the Software without", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "SpreadsheetProperties(BaseModel): class Config: extra = Extra.allow title: Optional[str] = None", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "class RowData(BaseModel): class Config: extra = Extra.allow values: Optional[List[CellData]] =", "# Permission is hereby granted, free of charge, to any", "of charge, to any person obtaining a copy # of", "software and associated documentation files (the \"Software\"), to deal #", "= None class SheetProperties(BaseModel): class Config: extra = Extra.allow title:", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "data: Optional[List[GridData]] = None properties: Optional[SheetProperties] = None class Spreadsheet(BaseModel):", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "merge, publish, distribute, sublicense, and/or sell # copies of the", "Extra, Field class SpreadsheetProperties(BaseModel): class Config: extra = Extra.allow title:", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "= None class Sheet(BaseModel): class Config: extra = Extra.allow data:", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "the Software without restriction, including without limitation the rights #", "class Config: extra = Extra.allow rowData: Optional[List[RowData]] = None class", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "# copies of the Software, and to permit persons to", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "typing import List, Optional from pydantic import BaseModel, Extra, Field", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "None class CellData(BaseModel): class Config: extra = Extra.allow formattedValue: Optional[str]", "CellData(BaseModel): class Config: extra = Extra.allow formattedValue: Optional[str] = None", "granted, free of charge, to any person obtaining a copy", "= Extra.allow rowData: Optional[List[RowData]] = None class Sheet(BaseModel): class Config:", "OTHER DEALINGS IN THE # SOFTWARE. from __future__ import annotations", "Extra.allow title: Optional[str] = None class CellData(BaseModel): class Config: extra", "class Config: extra = Extra.allow spreadsheetId: str sheets: List[Sheet] properties:", "None class Sheet(BaseModel): class Config: extra = Extra.allow data: Optional[List[GridData]]", "obtaining a copy # of this software and associated documentation", "Config: extra = Extra.allow rowData: Optional[List[RowData]] = None class Sheet(BaseModel):", "Extra.allow formattedValue: Optional[str] = None class RowData(BaseModel): class Config: extra", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "= Extra.allow title: Optional[str] = None class CellData(BaseModel): class Config:", "is # furnished to do so, subject to the following", "Extra.allow rowData: Optional[List[RowData]] = None class Sheet(BaseModel): class Config: extra", "to whom the Software is # furnished to do so,", "= None class RowData(BaseModel): class Config: extra = Extra.allow values:", "None properties: Optional[SheetProperties] = None class Spreadsheet(BaseModel): class Config: extra", "from typing import List, Optional from pydantic import BaseModel, Extra,", "copy # of this software and associated documentation files (the", "= None class Spreadsheet(BaseModel): class Config: extra = Extra.allow spreadsheetId:", "included in all # copies or substantial portions of the", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "Permission is hereby granted, free of charge, to any person", "# of this software and associated documentation files (the \"Software\"),", "furnished to do so, subject to the following conditions: #", "to do so, subject to the following conditions: # #", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "# The above copyright notice and this permission notice shall", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "import annotations from typing import List, Optional from pydantic import", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "Extra.allow title: Optional[str] = None class SheetProperties(BaseModel): class Config: extra", "so, subject to the following conditions: # # The above", "None class Spreadsheet(BaseModel): class Config: extra = Extra.allow spreadsheetId: str", "a copy # of this software and associated documentation files", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "of this software and associated documentation files (the \"Software\"), to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "<reponame>rajatariya21/airbyte # MIT License # # Copyright (c) 2020 Airbyte", "The above copyright notice and this permission notice shall be", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import", "Config: extra = Extra.allow spreadsheetId: str sheets: List[Sheet] properties: Optional[SpreadsheetProperties]", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "permission notice shall be included in all # copies or", "Airbyte # # Permission is hereby granted, free of charge,", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "Config: extra = Extra.allow formattedValue: Optional[str] = None class RowData(BaseModel):", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "= Extra.allow title: Optional[str] = None class SheetProperties(BaseModel): class Config:", "= Extra.allow values: Optional[List[CellData]] = None class GridData(BaseModel): class Config:", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "the following conditions: # # The above copyright notice and", "BaseModel, Extra, Field class SpreadsheetProperties(BaseModel): class Config: extra = Extra.allow", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "IN THE # SOFTWARE. from __future__ import annotations from typing", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "(the \"Software\"), to deal # in the Software without restriction,", "__future__ import annotations from typing import List, Optional from pydantic", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "None class SheetProperties(BaseModel): class Config: extra = Extra.allow title: Optional[str]", "following conditions: # # The above copyright notice and this", "extra = Extra.allow title: Optional[str] = None class CellData(BaseModel): class", "to permit persons to whom the Software is # furnished", "to deal # in the Software without restriction, including without", "Config: extra = Extra.allow title: Optional[str] = None class SheetProperties(BaseModel):", "Optional[List[RowData]] = None class Sheet(BaseModel): class Config: extra = Extra.allow", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND" ]
[ "api request to https://opentdb.com/ Limitations: Only 1 Category can be", "200 \"\"\" try: return await self.__request( session, num_questions, category, diffculty,", "class :param with_token: If True then the instance will uses", "type_) finally: if close_session: session.close() async def __request(self, session: ClientSession,", "from aiohttp import ClientSession from requests import get from pytrivia.__helpers", "per call. :param session: an Aiohttp client session. :param close_session:", "for https://opentdb.com/ \"\"\" from aiohttp import ClientSession from requests import", "Only 1 Category can be requested per API Call. To", "type_: Type = None) -> dict: \"\"\" Helper method for", "Maximum of 50 Questions can be retrieved per call. :param", "per API Call. To get questions from any category, don't", "session: an Aiohttp client session. :param close_session: True to close", "self.__request( session, num_questions, category, diffculty, type_) finally: if close_session: session.close()", "return self.request(num_questions, category, diffculty, type_) else: return decode_dict(result) async def", "to close the session after the request. :param num_questions: the", "1 Category can be requested per API Call. To get", "url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format( num_questions) if category is not None: url", "1 or num_questions > 50: raise ValueError url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format(", ":param close_session: True to close the session after the request.", "the diffculty of the question. None for any diffculty :param", "session, num_questions, category, diffculty, type_) finally: if close_session: session.close() async", "API Call. To get questions from any category, don't specify", "will uses a session token \"\"\" self.token = get_token() if", "async def request_async(self, session: ClientSession, close_session: bool, num_questions: int, category:", ":param type_: the type of the question. None for any", "\"\"\" Send an api request to https://opentdb.com/ Limitations: Only 1", "response code isn't 200 \"\"\" try: return await self.__request( session,", "a session token \"\"\" self.token = get_token() if with_token else", "category, diffculty, type_) finally: if close_session: session.close() async def __request(self,", "ClientSession from requests import get from pytrivia.__helpers import decode_dict, get_token,", "close_session: bool, num_questions: int, category: Category = None, diffculty: Diffculty", "num_questions, category, diffculty, type_): \"\"\" Helper method to generate request", "None def request(self, num_questions: int, category: Category = None, diffculty:", "ValueError url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format( num_questions) if category is not None:", "question. None for any category :param diffculty: the diffculty of", "else None def request(self, num_questions: int, category: Category = None,", "(3, 4): self.token = get_token() return self.request(num_questions, category, diffculty, type_)", "ClientSession, num_questions: int, category: Category = None, diffculty: Diffculty =", "can be retrieved per call. :param num_questions: the number of", "if result['response_code'] in (3, 4): self.token = get_token() return self.request(num_questions,", ":return: the api call response :rtype: dict :raises: ValueError when", "request(self, num_questions: int, category: Category = None, diffculty: Diffculty =", "url += '&type={}'.format(type_.value) if self.token is not None: url +=", "import ClientSession from requests import get from pytrivia.__helpers import decode_dict,", "of the question. None for any category :param diffculty: the", ":param num_questions: the number of questions, must be between 1", "Call. To get questions from any category, don't specify a", "dict: \"\"\" Helper method for the async request. \"\"\" resp", "\"\"\" A simple python api wrapper for https://opentdb.com/ \"\"\" from", "the api call response :rtype: dict :raises: ValueError when the", "if num_questions < 1 or num_questions > 50: raise ValueError", "when the num_questions parameter is less than 1 or greater", "the question. None for any type :return: the api call", ":raises ClientResponseError if the HTTP response code isn't 200 \"\"\"", "import get from pytrivia.__helpers import decode_dict, get_token, make_request from pytrivia.enums", "diffculty, type_): \"\"\" Helper method to generate request url. \"\"\"", "to generate request url. \"\"\" if num_questions < 1 or", "request url. \"\"\" if num_questions < 1 or num_questions >", "def __url(self, num_questions, category, diffculty, type_): \"\"\" Helper method to", "-> dict: \"\"\" Send an api request to https://opentdb.com/ Limitations:", "any type :return: the api call response :rtype: dict :raises:", "= None) -> dict: \"\"\" Send an api request to", "get_token() if with_token else None def request(self, num_questions: int, category:", "Diffculty = None, type_: Type = None) -> dict: \"\"\"", "category, diffculty, type_) else: return decode_dict(result) async def request_async(self, session:", "with_token else None def request(self, num_questions: int, category: Category =", "4): self.token = get_token() return await self.__request( session, num_questions, category,", "num_questions, category, diffculty, type_) finally: if close_session: session.close() async def", "Limitations: Only 1 Category can be requested per API Call.", "__init__(self, with_token: bool): \"\"\" Initialize an instance of the Trivia", "None) -> dict: \"\"\" Send an api request to https://opentdb.com/", "an Aiohttp client session. :param close_session: True to close the", "None for any diffculty :param type_: the type of the", "bool, num_questions: int, category: Category = None, diffculty: Diffculty =", "for any category :param diffculty: the diffculty of the question.", "Category can be requested per API Call. To get questions", "To get questions from any category, don't specify a category.", "Send an api request to https://opentdb.com/ Limitations: Only 1 Category", "must be between 1 and 50 (inclusive) :param category: the", "decode_dict(result) async def request_async(self, session: ClientSession, close_session: bool, num_questions: int,", "A simple python api wrapper for https://opentdb.com/ \"\"\" from aiohttp", "= get_token() return await self.__request( session, num_questions, category, diffculty, type_)", "category: Category = None, diffculty: Diffculty = None, type_: Type", "type_: Type = None) -> dict: \"\"\" Send an api", "make_request( session, self.__url(num_questions, category, diffculty, type_)) result = await resp.json()", "50 \"\"\" result = get( self.__url(num_questions, category, diffculty, type_)).json() if", "request to https://opentdb.com/ Limitations: Only 1 Category can be requested", "and 50 (inclusive) :param category: the category of the question.", "be between 1 and 50 (inclusive) :param category: the category", "request. :param num_questions: the number of questions, must be between", "or greater than 50 :raises ClientResponseError if the HTTP response", "url. \"\"\" if num_questions < 1 or num_questions > 50:", "num_questions) if category is not None: url += '&category={}'.format(category.value) if", "None: url += '&type={}'.format(type_.value) if self.token is not None: url", "def __init__(self, with_token: bool): \"\"\" Initialize an instance of the", "= None, type_: Type = None) -> dict: \"\"\" Send", "50 (inclusive) :param category: the category of the question. None", "is not None: url += '&type={}'.format(type_.value) if self.token is not", "questions from any category, don't specify a category. A Maximum", "category, diffculty, type_)).json() if result['response_code'] in (3, 4): self.token =", "async def __request(self, session: ClientSession, num_questions: int, category: Category =", "type_)) result = await resp.json() if result['response_code'] in (3, 4):", "result = get( self.__url(num_questions, category, diffculty, type_)).json() if result['response_code'] in", "is less than 1 or greater than 50 :raises ClientResponseError", "1 or greater than 50 \"\"\" result = get( self.__url(num_questions,", "1 and 50 (inclusive) :param category: the category of the", "api call response :rtype: dict :raises: ValueError when the num_questions", "questions, must be between 1 and 50 (inclusive) :param category:", "session: ClientSession, close_session: bool, num_questions: int, category: Category = None,", "= get( self.__url(num_questions, category, diffculty, type_)).json() if result['response_code'] in (3,", "not None: url += '&difficulty={}'.format(diffculty.value) if type_ is not None:", "retrieved per call. :param num_questions: the number of questions, must", "await self.__request( session, num_questions, category, diffculty, type_) else: return decode_dict(result)", "get_token() return await self.__request( session, num_questions, category, diffculty, type_) else:", "Category = None, diffculty: Diffculty = None, type_: Type =", "close_session: True to close the session after the request. :param", "= None, type_: Type = None) -> dict: \"\"\" Helper", "decode_dict, get_token, make_request from pytrivia.enums import * class Trivia: def", "from pytrivia.enums import * class Trivia: def __init__(self, with_token: bool):", "greater than 50 \"\"\" result = get( self.__url(num_questions, category, diffculty,", "\"\"\" try: return await self.__request( session, num_questions, category, diffculty, type_)", "self.__url(num_questions, category, diffculty, type_)) result = await resp.json() if result['response_code']", "with_token: If True then the instance will uses a session", "'&type={}'.format(type_.value) if self.token is not None: url += '&token={}'.format(self.token) return", "than 1 or greater than 50 \"\"\" result = get(", "= await resp.json() if result['response_code'] in (3, 4): self.token =", "any diffculty :param type_: the type of the question. None", "generate request url. \"\"\" if num_questions < 1 or num_questions", "of the question. None for any diffculty :param type_: the", "the Trivia class :param with_token: If True then the instance", "self.request(num_questions, category, diffculty, type_) else: return decode_dict(result) async def request_async(self,", "def __request(self, session: ClientSession, num_questions: int, category: Category = None,", "less than 1 or greater than 50 :raises ClientResponseError if", "import * class Trivia: def __init__(self, with_token: bool): \"\"\" Initialize", "raise ValueError url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format( num_questions) if category is not", "> 50: raise ValueError url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format( num_questions) if category", "\"\"\" resp = await make_request( session, self.__url(num_questions, category, diffculty, type_))", "type_ is not None: url += '&type={}'.format(type_.value) if self.token is", "session, num_questions, category, diffculty, type_) else: return decode_dict(result) def __url(self,", "* class Trivia: def __init__(self, with_token: bool): \"\"\" Initialize an", "close the session after the request. :param num_questions: the number", "num_questions > 50: raise ValueError url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format( num_questions) if", "1 or greater than 50 :raises ClientResponseError if the HTTP", "dict: \"\"\" Send an api request to https://opentdb.com/ Limitations: Only", "A Maximum of 50 Questions can be retrieved per call.", "the HTTP response code isn't 200 \"\"\" try: return await", "type_): \"\"\" Helper method to generate request url. \"\"\" if", "50 :raises ClientResponseError if the HTTP response code isn't 200", "for any type :return: the api call response :rtype: dict", "in (3, 4): self.token = get_token() return await self.__request( session,", "int, category: Category = None, diffculty: Diffculty = None, type_:", "session: ClientSession, num_questions: int, category: Category = None, diffculty: Diffculty", "self.token = get_token() return self.request(num_questions, category, diffculty, type_) else: return", "or greater than 50 \"\"\" result = get( self.__url(num_questions, category,", "close_session: session.close() async def __request(self, session: ClientSession, num_questions: int, category:", "return decode_dict(result) def __url(self, num_questions, category, diffculty, type_): \"\"\" Helper", "HTTP response code isn't 200 \"\"\" try: return await self.__request(", "pytrivia.__helpers import decode_dict, get_token, make_request from pytrivia.enums import * class", "= get_token() if with_token else None def request(self, num_questions: int,", "None, diffculty: Diffculty = None, type_: Type = None) ->", "\"\"\" result = get( self.__url(num_questions, category, diffculty, type_)).json() if result['response_code']", "num_questions: int, category: Category = None, diffculty: Diffculty = None,", "else: return decode_dict(result) def __url(self, num_questions, category, diffculty, type_): \"\"\"", "\"\"\" Helper method to generate request url. \"\"\" if num_questions", "any category :param diffculty: the diffculty of the question. None", "50: raise ValueError url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format( num_questions) if category is", "num_questions parameter is less than 1 or greater than 50", "with_token: bool): \"\"\" Initialize an instance of the Trivia class", "= 'https://opentdb.com/api.php?amount={}&encode=base64'.format( num_questions) if category is not None: url +=", "for any diffculty :param type_: the type of the question.", "the request. :param num_questions: the number of questions, must be", "to https://opentdb.com/ Limitations: Only 1 Category can be requested per", "then the instance will uses a session token \"\"\" self.token", "type_) else: return decode_dict(result) async def request_async(self, session: ClientSession, close_session:", "dict :raises: ValueError when the num_questions parameter is less than", "the number of questions, must be between 1 and 50", "type_)).json() if result['response_code'] in (3, 4): self.token = get_token() return", "of 50 Questions can be retrieved per call. :param num_questions:", "or num_questions > 50: raise ValueError url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format( num_questions)", "parameter is less than 1 or greater than 50 :raises", "None: url += '&difficulty={}'.format(diffculty.value) if type_ is not None: url", "resp.json() if result['response_code'] in (3, 4): self.token = get_token() return", "session.close() async def __request(self, session: ClientSession, num_questions: int, category: Category", "requests import get from pytrivia.__helpers import decode_dict, get_token, make_request from", "Trivia: def __init__(self, with_token: bool): \"\"\" Initialize an instance of", "aiohttp import ClientSession from requests import get from pytrivia.__helpers import", "<filename>pytrivia/trivia.py \"\"\" A simple python api wrapper for https://opentdb.com/ \"\"\"", "import decode_dict, get_token, make_request from pytrivia.enums import * class Trivia:", "self.token = get_token() if with_token else None def request(self, num_questions:", "the num_questions parameter is less than 1 or greater than", "per call. :param num_questions: the number of questions, must be", "if with_token else None def request(self, num_questions: int, category: Category", "diffculty of the question. None for any diffculty :param type_:", "call. :param session: an Aiohttp client session. :param close_session: True", "return decode_dict(result) async def request_async(self, session: ClientSession, close_session: bool, num_questions:", "'&category={}'.format(category.value) if diffculty is not None: url += '&difficulty={}'.format(diffculty.value) if", "the type of the question. None for any type :return:", "await resp.json() if result['response_code'] in (3, 4): self.token = get_token()", "the question. None for any diffculty :param type_: the type", "question. None for any type :return: the api call response", "Helper method for the async request. \"\"\" resp = await", "the async request. \"\"\" resp = await make_request( session, self.__url(num_questions,", "decode_dict(result) def __url(self, num_questions, category, diffculty, type_): \"\"\" Helper method", "the instance will uses a session token \"\"\" self.token =", "isn't 200 \"\"\" try: return await self.__request( session, num_questions, category,", "get questions from any category, don't specify a category. A", "diffculty: Diffculty = None, type_: Type = None) -> dict:", "diffculty: the diffculty of the question. None for any diffculty", "python api wrapper for https://opentdb.com/ \"\"\" from aiohttp import ClientSession", "less than 1 or greater than 50 \"\"\" result =", "\"\"\" from aiohttp import ClientSession from requests import get from", "Type = None) -> dict: \"\"\" Send an api request", "async request. \"\"\" resp = await make_request( session, self.__url(num_questions, category,", "https://opentdb.com/ \"\"\" from aiohttp import ClientSession from requests import get", "ClientResponseError if the HTTP response code isn't 200 \"\"\" try:", "num_questions, category, diffculty, type_) else: return decode_dict(result) def __url(self, num_questions,", "token \"\"\" self.token = get_token() if with_token else None def", "get_token, make_request from pytrivia.enums import * class Trivia: def __init__(self,", "bool): \"\"\" Initialize an instance of the Trivia class :param", "None) -> dict: \"\"\" Helper method for the async request.", "type_: the type of the question. None for any type", "not None: url += '&category={}'.format(category.value) if diffculty is not None:", "None, type_: Type = None) -> dict: \"\"\" Send an", "pytrivia.enums import * class Trivia: def __init__(self, with_token: bool): \"\"\"", "self.token = get_token() return await self.__request( session, num_questions, category, diffculty,", "response :rtype: dict :raises: ValueError when the num_questions parameter is", "https://opentdb.com/ Limitations: Only 1 Category can be requested per API", "code isn't 200 \"\"\" try: return await self.__request( session, num_questions,", ":param session: an Aiohttp client session. :param close_session: True to", "-> dict: \"\"\" Helper method for the async request. \"\"\"", "of the Trivia class :param with_token: If True then the", "specify a category. A Maximum of 50 Questions can be", "client session. :param close_session: True to close the session after", "result = await resp.json() if result['response_code'] in (3, 4): self.token", ":param category: the category of the question. None for any", "None for any type :return: the api call response :rtype:", "self.__request( session, num_questions, category, diffculty, type_) else: return decode_dict(result) def", "wrapper for https://opentdb.com/ \"\"\" from aiohttp import ClientSession from requests", "than 1 or greater than 50 :raises ClientResponseError if the", "Questions can be retrieved per call. :param session: an Aiohttp", "category, don't specify a category. A Maximum of 50 Questions", "def request_async(self, session: ClientSession, close_session: bool, num_questions: int, category: Category", "of 50 Questions can be retrieved per call. :param session:", "category :param diffculty: the diffculty of the question. None for", "a category. A Maximum of 50 Questions can be retrieved", "True then the instance will uses a session token \"\"\"", "session, self.__url(num_questions, category, diffculty, type_)) result = await resp.json() if", "uses a session token \"\"\" self.token = get_token() if with_token", "request. \"\"\" resp = await make_request( session, self.__url(num_questions, category, diffculty,", "if category is not None: url += '&category={}'.format(category.value) if diffculty", "url += '&difficulty={}'.format(diffculty.value) if type_ is not None: url +=", "class Trivia: def __init__(self, with_token: bool): \"\"\" Initialize an instance", "call response :rtype: dict :raises: ValueError when the num_questions parameter", "True to close the session after the request. :param num_questions:", "category, diffculty, type_): \"\"\" Helper method to generate request url.", "else: return decode_dict(result) async def request_async(self, session: ClientSession, close_session: bool,", ":param diffculty: the diffculty of the question. None for any", "self.__url(num_questions, category, diffculty, type_)).json() if result['response_code'] in (3, 4): self.token", "category, diffculty, type_) else: return decode_dict(result) def __url(self, num_questions, category,", "ValueError when the num_questions parameter is less than 1 or", "__url(self, num_questions, category, diffculty, type_): \"\"\" Helper method to generate", "Trivia class :param with_token: If True then the instance will", "diffculty is not None: url += '&difficulty={}'.format(diffculty.value) if type_ is", "None: url += '&category={}'.format(category.value) if diffculty is not None: url", "= await make_request( session, self.__url(num_questions, category, diffculty, type_)) result =", "'https://opentdb.com/api.php?amount={}&encode=base64'.format( num_questions) if category is not None: url += '&category={}'.format(category.value)", "make_request from pytrivia.enums import * class Trivia: def __init__(self, with_token:", "Helper method to generate request url. \"\"\" if num_questions <", "requested per API Call. To get questions from any category,", "after the request. :param num_questions: the number of questions, must", "question. None for any diffculty :param type_: the type of", "simple python api wrapper for https://opentdb.com/ \"\"\" from aiohttp import", "greater than 50 :raises ClientResponseError if the HTTP response code", "resp = await make_request( session, self.__url(num_questions, category, diffculty, type_)) result", "can be retrieved per call. :param session: an Aiohttp client", "any category, don't specify a category. A Maximum of 50", "num_questions: the number of questions, must be between 1 and", "from any category, don't specify a category. A Maximum of", "diffculty, type_) else: return decode_dict(result) async def request_async(self, session: ClientSession,", "finally: if close_session: session.close() async def __request(self, session: ClientSession, num_questions:", "type of the question. None for any type :return: the", "api wrapper for https://opentdb.com/ \"\"\" from aiohttp import ClientSession from", "instance will uses a session token \"\"\" self.token = get_token()", "+= '&difficulty={}'.format(diffculty.value) if type_ is not None: url += '&type={}'.format(type_.value)", "the session after the request. :param num_questions: the number of", "+= '&category={}'.format(category.value) if diffculty is not None: url += '&difficulty={}'.format(diffculty.value)", "try: return await self.__request( session, num_questions, category, diffculty, type_) finally:", "Type = None) -> dict: \"\"\" Helper method for the", "get_token() return self.request(num_questions, category, diffculty, type_) else: return decode_dict(result) async", "if close_session: session.close() async def __request(self, session: ClientSession, num_questions: int,", "category of the question. None for any category :param diffculty:", "from requests import get from pytrivia.__helpers import decode_dict, get_token, make_request", "def request(self, num_questions: int, category: Category = None, diffculty: Diffculty", "from pytrivia.__helpers import decode_dict, get_token, make_request from pytrivia.enums import *", "await self.__request( session, num_questions, category, diffculty, type_) finally: if close_session:", "session after the request. :param num_questions: the number of questions,", "__request(self, session: ClientSession, num_questions: int, category: Category = None, diffculty:", "result['response_code'] in (3, 4): self.token = get_token() return await self.__request(", "if diffculty is not None: url += '&difficulty={}'.format(diffculty.value) if type_", "Initialize an instance of the Trivia class :param with_token: If", "\"\"\" if num_questions < 1 or num_questions > 50: raise", "50 Questions can be retrieved per call. :param session: an", "Questions can be retrieved per call. :param num_questions: the number", "None, type_: Type = None) -> dict: \"\"\" Helper method", "method to generate request url. \"\"\" if num_questions < 1", "in (3, 4): self.token = get_token() return self.request(num_questions, category, diffculty,", "request_async(self, session: ClientSession, close_session: bool, num_questions: int, category: Category =", "type_) else: return decode_dict(result) def __url(self, num_questions, category, diffculty, type_):", "be retrieved per call. :param num_questions: the number of questions,", ":rtype: dict :raises: ValueError when the num_questions parameter is less", "diffculty :param type_: the type of the question. None for", "get( self.__url(num_questions, category, diffculty, type_)).json() if result['response_code'] in (3, 4):", "category is not None: url += '&category={}'.format(category.value) if diffculty is", "None for any category :param diffculty: the diffculty of the", "method for the async request. \"\"\" resp = await make_request(", "can be requested per API Call. To get questions from", "of questions, must be between 1 and 50 (inclusive) :param", "for the async request. \"\"\" resp = await make_request( session,", "+= '&type={}'.format(type_.value) if self.token is not None: url += '&token={}'.format(self.token)", "type :return: the api call response :rtype: dict :raises: ValueError", "get from pytrivia.__helpers import decode_dict, get_token, make_request from pytrivia.enums import", "parameter is less than 1 or greater than 50 \"\"\"", "the category of the question. None for any category :param", "than 50 :raises ClientResponseError if the HTTP response code isn't", "category, diffculty, type_)) result = await resp.json() if result['response_code'] in", "diffculty, type_)) result = await resp.json() if result['response_code'] in (3,", "(inclusive) :param category: the category of the question. None for", "'&difficulty={}'.format(diffculty.value) if type_ is not None: url += '&type={}'.format(type_.value) if", "an instance of the Trivia class :param with_token: If True", "result['response_code'] in (3, 4): self.token = get_token() return self.request(num_questions, category,", "of the question. None for any type :return: the api", "return await self.__request( session, num_questions, category, diffculty, type_) else: return", "an api request to https://opentdb.com/ Limitations: Only 1 Category can", "number of questions, must be between 1 and 50 (inclusive)", "return await self.__request( session, num_questions, category, diffculty, type_) finally: if", "await make_request( session, self.__url(num_questions, category, diffculty, type_)) result = await", "session token \"\"\" self.token = get_token() if with_token else None", "is less than 1 or greater than 50 \"\"\" result", "\"\"\" Helper method for the async request. \"\"\" resp =", "50 Questions can be retrieved per call. :param num_questions: the", "session. :param close_session: True to close the session after the", "the question. None for any category :param diffculty: the diffculty", "if type_ is not None: url += '&type={}'.format(type_.value) if self.token", "don't specify a category. A Maximum of 50 Questions can", "4): self.token = get_token() return self.request(num_questions, category, diffculty, type_) else:", "call. :param num_questions: the number of questions, must be between", "(3, 4): self.token = get_token() return await self.__request( session, num_questions,", "= get_token() return self.request(num_questions, category, diffculty, type_) else: return decode_dict(result)", "retrieved per call. :param session: an Aiohttp client session. :param", "\"\"\" Initialize an instance of the Trivia class :param with_token:", "if the HTTP response code isn't 200 \"\"\" try: return", "< 1 or num_questions > 50: raise ValueError url =", "url += '&category={}'.format(category.value) if diffculty is not None: url +=", "ClientSession, close_session: bool, num_questions: int, category: Category = None, diffculty:", "diffculty, type_) finally: if close_session: session.close() async def __request(self, session:", "Aiohttp client session. :param close_session: True to close the session", "diffculty, type_)).json() if result['response_code'] in (3, 4): self.token = get_token()", "is not None: url += '&difficulty={}'.format(diffculty.value) if type_ is not", "category. A Maximum of 50 Questions can be retrieved per", "be requested per API Call. To get questions from any", "if result['response_code'] in (3, 4): self.token = get_token() return await", ":param with_token: If True then the instance will uses a", "not None: url += '&type={}'.format(type_.value) if self.token is not None:", "\"\"\" self.token = get_token() if with_token else None def request(self,", "= None, diffculty: Diffculty = None, type_: Type = None)", "If True then the instance will uses a session token", "instance of the Trivia class :param with_token: If True then", "category: the category of the question. None for any category", "= None) -> dict: \"\"\" Helper method for the async", "diffculty, type_) else: return decode_dict(result) def __url(self, num_questions, category, diffculty,", ":raises: ValueError when the num_questions parameter is less than 1", "than 50 \"\"\" result = get( self.__url(num_questions, category, diffculty, type_)).json()", "be retrieved per call. :param session: an Aiohttp client session.", "num_questions < 1 or num_questions > 50: raise ValueError url", "if self.token is not None: url += '&token={}'.format(self.token) return url", "is not None: url += '&category={}'.format(category.value) if diffculty is not", "between 1 and 50 (inclusive) :param category: the category of" ]
[ "as f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk) return local_filename def", "os.path.split(fpath) dest_path = os.path.join(fname_dir,fname.split('.')[0]) print (\"Extracting %s --> %s\"%(fpath, dest_path))", "fname.endswith(\"tar\"): tar = tarfile.open(fname, \"r:\") tar.extractall(path=fname_dir) tar.close() return dest_path def", "= tarfile.open(fpath, \"r:gz\") tar.extractall(path=fname_dir) tar.close() elif fname.endswith(\"tar\"): tar = tarfile.open(fname,", "requests.get(url, stream=True) as r: r.raise_for_status() with open(local_filename, 'wb') as f:", "dest_path = os.path.join(fname_dir,fname.split('.')[0]) print (\"Extracting %s --> %s\"%(fpath, dest_path)) if", "root, dirs, files in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent", "open(local_filename, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk) return", "\"r:gz\") tar.extractall(path=fname_dir) tar.close() elif fname.endswith(\"tar\"): tar = tarfile.open(fname, \"r:\") tar.extractall(path=fname_dir)", "import os def download_file(url, directory): local_filename = os.path.join(directory, url.split('/')[-1]) print", "download_file(url, directory): local_filename = os.path.join(directory, url.split('/')[-1]) print (\"Downloading %s -->", "os def download_file(url, directory): local_filename = os.path.join(directory, url.split('/')[-1]) print (\"Downloading", "* 4 * (level) print('{}{}/'.format(indent, os.path.basename(root))) subindent = ' '", "local_filename)) with requests.get(url, stream=True) as r: r.raise_for_status() with open(local_filename, 'wb')", "' ' * 4 * (level) print('{}{}/'.format(indent, os.path.basename(root))) subindent =", "dirs, files in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent =", "url.split('/')[-1]) print (\"Downloading %s --> %s\"%(url, local_filename)) with requests.get(url, stream=True)", "r: r.raise_for_status() with open(local_filename, 'wb') as f: for chunk in", "= os.path.join(fname_dir,fname.split('.')[0]) print (\"Extracting %s --> %s\"%(fpath, dest_path)) if fname.endswith(\"tar.gz\"):", "= ' ' * 4 * (level + 1) for", "tarfile.open(fpath, \"r:gz\") tar.extractall(path=fname_dir) tar.close() elif fname.endswith(\"tar\"): tar = tarfile.open(fname, \"r:\")", "' * 4 * (level + 1) for f in", "'wb') as f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk) return local_filename", "tar.extractall(path=fname_dir) tar.close() return dest_path def list_files(startpath): for root, dirs, files", "local_filename = os.path.join(directory, url.split('/')[-1]) print (\"Downloading %s --> %s\"%(url, local_filename))", "= root.replace(startpath, '').count(os.sep) indent = ' ' * 4 *", "%s\"%(url, local_filename)) with requests.get(url, stream=True) as r: r.raise_for_status() with open(local_filename,", "'').count(os.sep) indent = ' ' * 4 * (level) print('{}{}/'.format(indent,", "os.path.join(fname_dir,fname.split('.')[0]) print (\"Extracting %s --> %s\"%(fpath, dest_path)) if fname.endswith(\"tar.gz\"): tar", "def list_files(startpath): for root, dirs, files in os.walk(startpath): level =", "import requests import tarfile import os def download_file(url, directory): local_filename", "* 4 * (level + 1) for f in files:", "= ' ' * 4 * (level) print('{}{}/'.format(indent, os.path.basename(root))) subindent", "level = root.replace(startpath, '').count(os.sep) indent = ' ' * 4", "import tarfile import os def download_file(url, directory): local_filename = os.path.join(directory,", "with requests.get(url, stream=True) as r: r.raise_for_status() with open(local_filename, 'wb') as", "dest_path def list_files(startpath): for root, dirs, files in os.walk(startpath): level", "tar.close() elif fname.endswith(\"tar\"): tar = tarfile.open(fname, \"r:\") tar.extractall(path=fname_dir) tar.close() return", "for chunk in r.iter_content(chunk_size=8192): f.write(chunk) return local_filename def extract_tar(fpath): fname_dir,", "= os.path.join(directory, url.split('/')[-1]) print (\"Downloading %s --> %s\"%(url, local_filename)) with", "= os.path.split(fpath) dest_path = os.path.join(fname_dir,fname.split('.')[0]) print (\"Extracting %s --> %s\"%(fpath,", "indent = ' ' * 4 * (level) print('{}{}/'.format(indent, os.path.basename(root)))", "f.write(chunk) return local_filename def extract_tar(fpath): fname_dir, fname = os.path.split(fpath) dest_path", "%s --> %s\"%(fpath, dest_path)) if fname.endswith(\"tar.gz\"): tar = tarfile.open(fpath, \"r:gz\")", "with open(local_filename, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk)", "in r.iter_content(chunk_size=8192): f.write(chunk) return local_filename def extract_tar(fpath): fname_dir, fname =", "tar.close() return dest_path def list_files(startpath): for root, dirs, files in", "root.replace(startpath, '').count(os.sep) indent = ' ' * 4 * (level)", "stream=True) as r: r.raise_for_status() with open(local_filename, 'wb') as f: for", "def download_file(url, directory): local_filename = os.path.join(directory, url.split('/')[-1]) print (\"Downloading %s", "4 * (level) print('{}{}/'.format(indent, os.path.basename(root))) subindent = ' ' *", "return dest_path def list_files(startpath): for root, dirs, files in os.walk(startpath):", "requests import tarfile import os def download_file(url, directory): local_filename =", "os.path.basename(root))) subindent = ' ' * 4 * (level +", "return local_filename def extract_tar(fpath): fname_dir, fname = os.path.split(fpath) dest_path =", "os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = ' ' *", "%s\"%(fpath, dest_path)) if fname.endswith(\"tar.gz\"): tar = tarfile.open(fpath, \"r:gz\") tar.extractall(path=fname_dir) tar.close()", "--> %s\"%(url, local_filename)) with requests.get(url, stream=True) as r: r.raise_for_status() with", "print (\"Extracting %s --> %s\"%(fpath, dest_path)) if fname.endswith(\"tar.gz\"): tar =", "tarfile import os def download_file(url, directory): local_filename = os.path.join(directory, url.split('/')[-1])", "fname.endswith(\"tar.gz\"): tar = tarfile.open(fpath, \"r:gz\") tar.extractall(path=fname_dir) tar.close() elif fname.endswith(\"tar\"): tar", "extract_tar(fpath): fname_dir, fname = os.path.split(fpath) dest_path = os.path.join(fname_dir,fname.split('.')[0]) print (\"Extracting", "in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = ' '", "4 * (level + 1) for f in files: print('{}{}'.format(subindent,", "--> %s\"%(fpath, dest_path)) if fname.endswith(\"tar.gz\"): tar = tarfile.open(fpath, \"r:gz\") tar.extractall(path=fname_dir)", "os.path.join(directory, url.split('/')[-1]) print (\"Downloading %s --> %s\"%(url, local_filename)) with requests.get(url,", "* (level + 1) for f in files: print('{}{}'.format(subindent, f))", "\"r:\") tar.extractall(path=fname_dir) tar.close() return dest_path def list_files(startpath): for root, dirs,", "if fname.endswith(\"tar.gz\"): tar = tarfile.open(fpath, \"r:gz\") tar.extractall(path=fname_dir) tar.close() elif fname.endswith(\"tar\"):", "tar.extractall(path=fname_dir) tar.close() elif fname.endswith(\"tar\"): tar = tarfile.open(fname, \"r:\") tar.extractall(path=fname_dir) tar.close()", "' * 4 * (level) print('{}{}/'.format(indent, os.path.basename(root))) subindent = '", "tarfile.open(fname, \"r:\") tar.extractall(path=fname_dir) tar.close() return dest_path def list_files(startpath): for root,", "dest_path)) if fname.endswith(\"tar.gz\"): tar = tarfile.open(fpath, \"r:gz\") tar.extractall(path=fname_dir) tar.close() elif", "%s --> %s\"%(url, local_filename)) with requests.get(url, stream=True) as r: r.raise_for_status()", "elif fname.endswith(\"tar\"): tar = tarfile.open(fname, \"r:\") tar.extractall(path=fname_dir) tar.close() return dest_path", "(level) print('{}{}/'.format(indent, os.path.basename(root))) subindent = ' ' * 4 *", "chunk in r.iter_content(chunk_size=8192): f.write(chunk) return local_filename def extract_tar(fpath): fname_dir, fname", "as r: r.raise_for_status() with open(local_filename, 'wb') as f: for chunk", "r.raise_for_status() with open(local_filename, 'wb') as f: for chunk in r.iter_content(chunk_size=8192):", "print (\"Downloading %s --> %s\"%(url, local_filename)) with requests.get(url, stream=True) as", "print('{}{}/'.format(indent, os.path.basename(root))) subindent = ' ' * 4 * (level", "files in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = '", "r.iter_content(chunk_size=8192): f.write(chunk) return local_filename def extract_tar(fpath): fname_dir, fname = os.path.split(fpath)", "list_files(startpath): for root, dirs, files in os.walk(startpath): level = root.replace(startpath,", "= tarfile.open(fname, \"r:\") tar.extractall(path=fname_dir) tar.close() return dest_path def list_files(startpath): for", "def extract_tar(fpath): fname_dir, fname = os.path.split(fpath) dest_path = os.path.join(fname_dir,fname.split('.')[0]) print", "(\"Downloading %s --> %s\"%(url, local_filename)) with requests.get(url, stream=True) as r:", "for root, dirs, files in os.walk(startpath): level = root.replace(startpath, '').count(os.sep)", "tar = tarfile.open(fname, \"r:\") tar.extractall(path=fname_dir) tar.close() return dest_path def list_files(startpath):", "fname = os.path.split(fpath) dest_path = os.path.join(fname_dir,fname.split('.')[0]) print (\"Extracting %s -->", "(\"Extracting %s --> %s\"%(fpath, dest_path)) if fname.endswith(\"tar.gz\"): tar = tarfile.open(fpath,", "subindent = ' ' * 4 * (level + 1)", "tar = tarfile.open(fpath, \"r:gz\") tar.extractall(path=fname_dir) tar.close() elif fname.endswith(\"tar\"): tar =", "f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk) return local_filename def extract_tar(fpath):", "directory): local_filename = os.path.join(directory, url.split('/')[-1]) print (\"Downloading %s --> %s\"%(url,", "local_filename def extract_tar(fpath): fname_dir, fname = os.path.split(fpath) dest_path = os.path.join(fname_dir,fname.split('.')[0])", "fname_dir, fname = os.path.split(fpath) dest_path = os.path.join(fname_dir,fname.split('.')[0]) print (\"Extracting %s", "* (level) print('{}{}/'.format(indent, os.path.basename(root))) subindent = ' ' * 4", "' ' * 4 * (level + 1) for f" ]
[ "@property def base_url(self): return self._data.get(\"base_url\") @property def sprite_dirs(self): if \"sprite_dirs\"", "any configured base URL.\"\"\" base = self.base_url if base: p", "@property def anneal_steps(self): return int(self._data.get(\"anneal_steps\", 9200)) def get_spritemap_out(self, dn): \"Get", "base_url(self): return self._data.get(\"base_url\") @property def sprite_dirs(self): if \"sprite_dirs\" not in", "self.base_url if base: p = urljoin(base, p) return p @property", "(key[len(prefix):].strip(), value.strip()) def iter_config_stmts(data): return ifilter(None, imap(parse_config_stmt, data.splitlines())) def iter_css_config(parser):", "root self._data = dict(base) if base else {} if parser", "def padding(self): return self._data.get(\"padding\", (1, 1)) @property def anneal_steps(self): return", "sprite_dirs(self): if \"sprite_dirs\" not in self._data: return elif self._data.get(\"output_image\"): raise", "base: p = urljoin(base, p) return p @property def base_url(self):", "value.strip()) def iter_config_stmts(data): return ifilter(None, imap(parse_config_stmt, data.splitlines())) def iter_css_config(parser): for", "get_spritemap_url(self, fname): \"Get output image URL for spritemap *fname*.\" return", "line = line.strip() if line.startswith(prefix) and \"=\" in line: (key,", "from os import path from itertools import imap, ifilter from", "self.root)) def get_css_out(self, fname): \"Get output image filename for spritemap", "@property def sprite_dirs(self): if \"sprite_dirs\" not in self._data: return elif", "fname=fname) def normpath(self, p): \"\"\"Normalize a possibly relative path *p*", "1) return (key[len(prefix):].strip(), value.strip()) def iter_config_stmts(data): return ifilter(None, imap(parse_config_stmt, data.splitlines()))", "rv and self._data.get(\"output_image\"): raise RuntimeError(\"cannot have recursive spritemapping \" \"when", "line.split(\"=\", 1) return (key[len(prefix):].strip(), value.strip()) def iter_config_stmts(data): return ifilter(None, imap(parse_config_stmt,", "and self._data.get(\"output_image\"): raise RuntimeError(\"cannot have recursive spritemapping \" \"when output_image", "return self._data.iteritems() @classmethod def from_file(cls, fname): with open(fname, \"rb\") as", "CSSParser, iter_events def parse_config_stmt(line, prefix=\"spritemapper.\"): line = line.strip() if line.startswith(prefix)", "return path.normpath(path.join(self.root, p)) def absurl(self, p): \"\"\"Make an absolute reference", "dict(base) if base else {} if parser is not None:", "import CSSParser, iter_events def parse_config_stmt(line, prefix=\"spritemapper.\"): line = line.strip() if", "is None: root = path.dirname(fname) self.root = root self._data =", "open(fname, \"rb\") as fp: return cls(CSSParser.from_file(fp), fname=fname) def normpath(self, p):", "iter_css_config(parser): for ev in iter_events(parser, lexemes=(\"comment\",)): for v in iter_config_stmts(ev.comment):", "\"Get output image URL for spritemap *fname*.\" return self.absurl(path.relpath(fname, self.root))", "urljoin from .css import CSSParser, iter_events def parse_config_stmt(line, prefix=\"spritemapper.\"): line", "path.splitext(base) names = dict(filename=fname, dirname=dirn, basename=base, extension=ext) return self.normpath(self._data[\"output_css\"].format(**names)) else:", "self._data.update(iter_css_config(parser)) def __iter__(self): # this is mostly so you can", "path from itertools import imap, ifilter from urlparse import urljoin", "\"output_image\" in self._data: return self.normpath(self._data[\"output_image\"]) @property def is_mapping_recursive(self): rv =", "self._data: (base, ext) = path.splitext(base) names = dict(filename=fname, dirname=dirn, basename=base,", "not self._data.get(\"output_image\") else: return bool(rv) @property def padding(self): return self._data.get(\"padding\",", "this is mostly so you can go CSSConfig(base=CSSConfig(..)) return self._data.iteritems()", "fname and root is None: root = path.dirname(fname) self.root =", "= urljoin(base, p) return p @property def base_url(self): return self._data.get(\"base_url\")", "from pprint import pprint from .css import CSSParser with open(fname,", "for v in iter_config_stmts(ev.comment): yield v class CSSConfig(object): def __init__(self,", "spritemap *fname*.\" return self.absurl(path.relpath(fname, self.root)) def get_css_out(self, fname): \"Get output", "with open(fname, \"rb\") as fp: return cls(CSSParser.from_file(fp), fname=fname) def normpath(self,", "self._data: return self.normpath(self._data[\"output_image\"]) @property def is_mapping_recursive(self): rv = self._data.get(\"recursive\") if", "root.\"\"\" return path.normpath(path.join(self.root, p)) def absurl(self, p): \"\"\"Make an absolute", "self._data.get(\"output_image\") else: return bool(rv) @property def padding(self): return self._data.get(\"padding\", (1,", "def get_css_out(self, fname): \"Get output image filename for spritemap directory", "= dict(filename=fname, dirname=dirn, basename=base, extension=ext) return self.normpath(self._data[\"output_css\"].format(**names)) else: return path.join(dirn,", "a possibly relative path *p* to the root.\"\"\" return path.normpath(path.join(self.root,", "iter_config_stmts(ev.comment): yield v class CSSConfig(object): def __init__(self, parser=None, base=None, root=None,", "line: (key, value) = line.split(\"=\", 1) return (key[len(prefix):].strip(), value.strip()) def", "sdirs = shlex.split(self._data[\"sprite_dirs\"]) return map(self.normpath, sdirs) @property def output_image(self): if", "to the root.\"\"\" return path.normpath(path.join(self.root, p)) def absurl(self, p): \"\"\"Make", "as fp: print \"%s\\n%s\\n\" % (fname, \"=\" * len(fname)) pprint(dict(iter_css_config(CSSParser.read_file(fp))))", "sdirs) @property def output_image(self): if \"output_image\" in self._data: return self.normpath(self._data[\"output_image\"])", "{} if parser is not None: self._data.update(iter_css_config(parser)) def __iter__(self): #", "(1, 1)) @property def anneal_steps(self): return int(self._data.get(\"anneal_steps\", 9200)) def get_spritemap_out(self,", "p) return p @property def base_url(self): return self._data.get(\"base_url\") @property def", "@property def padding(self): return self._data.get(\"padding\", (1, 1)) @property def anneal_steps(self):", "image URL for spritemap *fname*.\" return self.absurl(path.relpath(fname, self.root)) def get_css_out(self,", "print def main(): import sys for fn in sys.argv[1:]: print_config(fn)", "\" \"when output_image is set\") sdirs = shlex.split(self._data[\"sprite_dirs\"]) return map(self.normpath,", "set\") sdirs = shlex.split(self._data[\"sprite_dirs\"]) return map(self.normpath, sdirs) @property def output_image(self):", "\"=\" * len(fname)) pprint(dict(iter_css_config(CSSParser.read_file(fp)))) print def main(): import sys for", "and \"=\" in line: (key, value) = line.split(\"=\", 1) return", "filename for spritemap directory *dn*.\" if \"output_image\" in self._data: return", "normpath(self, p): \"\"\"Normalize a possibly relative path *p* to the", "to *p* from any configured base URL.\"\"\" base = self.base_url", "fname): \"Get output image filename for spritemap directory *fname*.\" (dirn,", "as fp: return cls(CSSParser.from_file(fp), fname=fname) def normpath(self, p): \"\"\"Normalize a", "for fn in sys.argv[1:]: print_config(fn) if __name__ == \"__main__\": main()", "path.dirname(fname) self.root = root self._data = dict(base) if base else", "v class CSSConfig(object): def __init__(self, parser=None, base=None, root=None, fname=None): if", "anneal_steps(self): return int(self._data.get(\"anneal_steps\", 9200)) def get_spritemap_out(self, dn): \"Get output image", "in self._data: (base, ext) = path.splitext(base) names = dict(filename=fname, dirname=dirn,", "__iter__(self): # this is mostly so you can go CSSConfig(base=CSSConfig(..))", "raise RuntimeError(\"cannot have recursive spritemapping \" \"when output_image is set\")", "else: return path.join(dirn, \"sm_\" + base) def print_config(fname): from pprint", "*dn*.\" if \"output_image\" in self._data: return self.output_image return dn +", "so you can go CSSConfig(base=CSSConfig(..)) return self._data.iteritems() @classmethod def from_file(cls,", "\"%s\\n%s\\n\" % (fname, \"=\" * len(fname)) pprint(dict(iter_css_config(CSSParser.read_file(fp)))) print def main():", "if rv and self._data.get(\"output_image\"): raise RuntimeError(\"cannot have recursive spritemapping \"", "root is None: root = path.dirname(fname) self.root = root self._data", "= path.splitext(base) names = dict(filename=fname, dirname=dirn, basename=base, extension=ext) return self.normpath(self._data[\"output_css\"].format(**names))", "(base, ext) = path.splitext(base) names = dict(filename=fname, dirname=dirn, basename=base, extension=ext)", "= line.split(\"=\", 1) return (key[len(prefix):].strip(), value.strip()) def iter_config_stmts(data): return ifilter(None,", "in iter_config_stmts(ev.comment): yield v class CSSConfig(object): def __init__(self, parser=None, base=None,", "if \"output_image\" in self._data: return self.normpath(self._data[\"output_image\"]) @property def is_mapping_recursive(self): rv", "map(self.normpath, sdirs) @property def output_image(self): if \"output_image\" in self._data: return", "def get_spritemap_url(self, fname): \"Get output image URL for spritemap *fname*.\"", "base URL.\"\"\" base = self.base_url if base: p = urljoin(base,", "raise RuntimeError(\"cannot have sprite_dirs \" \"when output_image is set\") sdirs", "__init__(self, parser=None, base=None, root=None, fname=None): if fname and root is", "output image filename for spritemap directory *fname*.\" (dirn, base) =", "if line.startswith(prefix) and \"=\" in line: (key, value) = line.split(\"=\",", "rv = self._data.get(\"recursive\") if rv and self._data.get(\"output_image\"): raise RuntimeError(\"cannot have", "= root self._data = dict(base) if base else {} if", "root=None, fname=None): if fname and root is None: root =", "= dict(base) if base else {} if parser is not", "\"output_css\" in self._data: (base, ext) = path.splitext(base) names = dict(filename=fname,", "output_image is set\") sdirs = shlex.split(self._data[\"sprite_dirs\"]) return map(self.normpath, sdirs) @property", "= path.dirname(fname) self.root = root self._data = dict(base) if base", "spritemap directory *dn*.\" if \"output_image\" in self._data: return self.output_image return", "return map(self.normpath, sdirs) @property def output_image(self): if \"output_image\" in self._data:", "# this is mostly so you can go CSSConfig(base=CSSConfig(..)) return", "self.normpath(self._data[\"output_css\"].format(**names)) else: return path.join(dirn, \"sm_\" + base) def print_config(fname): from", "*fname*.\" return self.absurl(path.relpath(fname, self.root)) def get_css_out(self, fname): \"Get output image", "padding(self): return self._data.get(\"padding\", (1, 1)) @property def anneal_steps(self): return int(self._data.get(\"anneal_steps\",", "dict(filename=fname, dirname=dirn, basename=base, extension=ext) return self.normpath(self._data[\"output_css\"].format(**names)) else: return path.join(dirn, \"sm_\"", "= shlex.split(self._data[\"sprite_dirs\"]) return map(self.normpath, sdirs) @property def output_image(self): if \"output_image\"", "None: return not self._data.get(\"output_image\") else: return bool(rv) @property def padding(self):", "image filename for spritemap directory *dn*.\" if \"output_image\" in self._data:", "absurl(self, p): \"\"\"Make an absolute reference to *p* from any", "return self._data.get(\"base_url\") @property def sprite_dirs(self): if \"sprite_dirs\" not in self._data:", "def base_url(self): return self._data.get(\"base_url\") @property def sprite_dirs(self): if \"sprite_dirs\" not", "base = self.base_url if base: p = urljoin(base, p) return", "def __iter__(self): # this is mostly so you can go", "in self._data: return self.output_image return dn + \".png\" def get_spritemap_url(self,", "self._data.get(\"output_image\"): raise RuntimeError(\"cannot have sprite_dirs \" \"when output_image is set\")", "fname): \"Get output image URL for spritemap *fname*.\" return self.absurl(path.relpath(fname,", "return cls(CSSParser.from_file(fp), fname=fname) def normpath(self, p): \"\"\"Normalize a possibly relative", "is None: return not self._data.get(\"output_image\") else: return bool(rv) @property def", "possibly relative path *p* to the root.\"\"\" return path.normpath(path.join(self.root, p))", "iter_events(parser, lexemes=(\"comment\",)): for v in iter_config_stmts(ev.comment): yield v class CSSConfig(object):", "\"when output_image is set\") sdirs = shlex.split(self._data[\"sprite_dirs\"]) return map(self.normpath, sdirs)", "import path from itertools import imap, ifilter from urlparse import", "value) = line.split(\"=\", 1) return (key[len(prefix):].strip(), value.strip()) def iter_config_stmts(data): return", "\"output_image\" in self._data: return self.output_image return dn + \".png\" def", "\"=\" in line: (key, value) = line.split(\"=\", 1) return (key[len(prefix):].strip(),", "v in iter_config_stmts(ev.comment): yield v class CSSConfig(object): def __init__(self, parser=None,", "fp: print \"%s\\n%s\\n\" % (fname, \"=\" * len(fname)) pprint(dict(iter_css_config(CSSParser.read_file(fp)))) print", "relative path *p* to the root.\"\"\" return path.normpath(path.join(self.root, p)) def", "* len(fname)) pprint(dict(iter_css_config(CSSParser.read_file(fp)))) print def main(): import sys for fn", "print_config(fname): from pprint import pprint from .css import CSSParser with", "\"Get output image filename for spritemap directory *fname*.\" (dirn, base)", "p): \"\"\"Normalize a possibly relative path *p* to the root.\"\"\"", "imap(parse_config_stmt, data.splitlines())) def iter_css_config(parser): for ev in iter_events(parser, lexemes=(\"comment\",)): for", "= path.split(fname) if \"output_css\" in self._data: (base, ext) = path.splitext(base)", "path.normpath(path.join(self.root, p)) def absurl(self, p): \"\"\"Make an absolute reference to", "\"when output_image is set\") elif rv is None: return not", "p @property def base_url(self): return self._data.get(\"base_url\") @property def sprite_dirs(self): if", "9200)) def get_spritemap_out(self, dn): \"Get output image filename for spritemap", "output_image(self): if \"output_image\" in self._data: return self.normpath(self._data[\"output_image\"]) @property def is_mapping_recursive(self):", "spritemapping \" \"when output_image is set\") elif rv is None:", "import sys for fn in sys.argv[1:]: print_config(fn) if __name__ ==", "URL for spritemap *fname*.\" return self.absurl(path.relpath(fname, self.root)) def get_css_out(self, fname):", "go CSSConfig(base=CSSConfig(..)) return self._data.iteritems() @classmethod def from_file(cls, fname): with open(fname,", "import imap, ifilter from urlparse import urljoin from .css import", "absolute reference to *p* from any configured base URL.\"\"\" base", "prefix=\"spritemapper.\"): line = line.strip() if line.startswith(prefix) and \"=\" in line:", "import shlex from os import path from itertools import imap,", "in line: (key, value) = line.split(\"=\", 1) return (key[len(prefix):].strip(), value.strip())", "for spritemap *fname*.\" return self.absurl(path.relpath(fname, self.root)) def get_css_out(self, fname): \"Get", "ifilter from urlparse import urljoin from .css import CSSParser, iter_events", "output image URL for spritemap *fname*.\" return self.absurl(path.relpath(fname, self.root)) def", "def get_spritemap_out(self, dn): \"Get output image filename for spritemap directory", "\" \"when output_image is set\") elif rv is None: return", "\"rb\") as fp: print \"%s\\n%s\\n\" % (fname, \"=\" * len(fname))", "def sprite_dirs(self): if \"sprite_dirs\" not in self._data: return elif self._data.get(\"output_image\"):", "URL.\"\"\" base = self.base_url if base: p = urljoin(base, p)", "@classmethod def from_file(cls, fname): with open(fname, \"rb\") as fp: return", "pprint from .css import CSSParser with open(fname, \"rb\") as fp:", "main(): import sys for fn in sys.argv[1:]: print_config(fn) if __name__", "is mostly so you can go CSSConfig(base=CSSConfig(..)) return self._data.iteritems() @classmethod", "mostly so you can go CSSConfig(base=CSSConfig(..)) return self._data.iteritems() @classmethod def", "\"rb\") as fp: return cls(CSSParser.from_file(fp), fname=fname) def normpath(self, p): \"\"\"Normalize", "= self.base_url if base: p = urljoin(base, p) return p", "def iter_config_stmts(data): return ifilter(None, imap(parse_config_stmt, data.splitlines())) def iter_css_config(parser): for ev", "@property def output_image(self): if \"output_image\" in self._data: return self.normpath(self._data[\"output_image\"]) @property", "if \"output_css\" in self._data: (base, ext) = path.splitext(base) names =", "rv is None: return not self._data.get(\"output_image\") else: return bool(rv) @property", "data.splitlines())) def iter_css_config(parser): for ev in iter_events(parser, lexemes=(\"comment\",)): for v", "+ base) def print_config(fname): from pprint import pprint from .css", "line.startswith(prefix) and \"=\" in line: (key, value) = line.split(\"=\", 1)", "from .css import CSSParser with open(fname, \"rb\") as fp: print", "return (key[len(prefix):].strip(), value.strip()) def iter_config_stmts(data): return ifilter(None, imap(parse_config_stmt, data.splitlines())) def", "extension=ext) return self.normpath(self._data[\"output_css\"].format(**names)) else: return path.join(dirn, \"sm_\" + base) def", "\".png\" def get_spritemap_url(self, fname): \"Get output image URL for spritemap", "int(self._data.get(\"anneal_steps\", 9200)) def get_spritemap_out(self, dn): \"Get output image filename for", "self.output_image return dn + \".png\" def get_spritemap_url(self, fname): \"Get output", "parser=None, base=None, root=None, fname=None): if fname and root is None:", "return int(self._data.get(\"anneal_steps\", 9200)) def get_spritemap_out(self, dn): \"Get output image filename", "return p @property def base_url(self): return self._data.get(\"base_url\") @property def sprite_dirs(self):", "class CSSConfig(object): def __init__(self, parser=None, base=None, root=None, fname=None): if fname", "image filename for spritemap directory *fname*.\" (dirn, base) = path.split(fname)", "urljoin(base, p) return p @property def base_url(self): return self._data.get(\"base_url\") @property", "in self._data: return elif self._data.get(\"output_image\"): raise RuntimeError(\"cannot have sprite_dirs \"", "ev in iter_events(parser, lexemes=(\"comment\",)): for v in iter_config_stmts(ev.comment): yield v", "in self._data: return self.normpath(self._data[\"output_image\"]) @property def is_mapping_recursive(self): rv = self._data.get(\"recursive\")", "\"\"\"Make an absolute reference to *p* from any configured base", "is set\") sdirs = shlex.split(self._data[\"sprite_dirs\"]) return map(self.normpath, sdirs) @property def", "shlex from os import path from itertools import imap, ifilter", "get_css_out(self, fname): \"Get output image filename for spritemap directory *fname*.\"", "from_file(cls, fname): with open(fname, \"rb\") as fp: return cls(CSSParser.from_file(fp), fname=fname)", "if \"sprite_dirs\" not in self._data: return elif self._data.get(\"output_image\"): raise RuntimeError(\"cannot", "def output_image(self): if \"output_image\" in self._data: return self.normpath(self._data[\"output_image\"]) @property def", "shlex.split(self._data[\"sprite_dirs\"]) return map(self.normpath, sdirs) @property def output_image(self): if \"output_image\" in", "set\") elif rv is None: return not self._data.get(\"output_image\") else: return", "if base else {} if parser is not None: self._data.update(iter_css_config(parser))", "= self._data.get(\"recursive\") if rv and self._data.get(\"output_image\"): raise RuntimeError(\"cannot have recursive", "return self.normpath(self._data[\"output_image\"]) @property def is_mapping_recursive(self): rv = self._data.get(\"recursive\") if rv", "cls(CSSParser.from_file(fp), fname=fname) def normpath(self, p): \"\"\"Normalize a possibly relative path", "RuntimeError(\"cannot have sprite_dirs \" \"when output_image is set\") sdirs =", "for ev in iter_events(parser, lexemes=(\"comment\",)): for v in iter_config_stmts(ev.comment): yield", "is_mapping_recursive(self): rv = self._data.get(\"recursive\") if rv and self._data.get(\"output_image\"): raise RuntimeError(\"cannot", "if \"output_image\" in self._data: return self.output_image return dn + \".png\"", "if parser is not None: self._data.update(iter_css_config(parser)) def __iter__(self): # this", "ext) = path.splitext(base) names = dict(filename=fname, dirname=dirn, basename=base, extension=ext) return", "path.split(fname) if \"output_css\" in self._data: (base, ext) = path.splitext(base) names", "for spritemap directory *dn*.\" if \"output_image\" in self._data: return self.output_image", "RuntimeError(\"cannot have recursive spritemapping \" \"when output_image is set\") elif", "line.strip() if line.startswith(prefix) and \"=\" in line: (key, value) =", "None: self._data.update(iter_css_config(parser)) def __iter__(self): # this is mostly so you", "from .css import CSSParser, iter_events def parse_config_stmt(line, prefix=\"spritemapper.\"): line =", "(fname, \"=\" * len(fname)) pprint(dict(iter_css_config(CSSParser.read_file(fp)))) print def main(): import sys", "path.join(dirn, \"sm_\" + base) def print_config(fname): from pprint import pprint", "spritemap directory *fname*.\" (dirn, base) = path.split(fname) if \"output_css\" in", "base) def print_config(fname): from pprint import pprint from .css import", "self.root = root self._data = dict(base) if base else {}", "return path.join(dirn, \"sm_\" + base) def print_config(fname): from pprint import", "for spritemap directory *fname*.\" (dirn, base) = path.split(fname) if \"output_css\"", "os import path from itertools import imap, ifilter from urlparse", "iter_config_stmts(data): return ifilter(None, imap(parse_config_stmt, data.splitlines())) def iter_css_config(parser): for ev in", "get_spritemap_out(self, dn): \"Get output image filename for spritemap directory *dn*.\"", "p)) def absurl(self, p): \"\"\"Make an absolute reference to *p*", "open(fname, \"rb\") as fp: print \"%s\\n%s\\n\" % (fname, \"=\" *", "not in self._data: return elif self._data.get(\"output_image\"): raise RuntimeError(\"cannot have sprite_dirs", "def main(): import sys for fn in sys.argv[1:]: print_config(fn) if", "self.normpath(self._data[\"output_image\"]) @property def is_mapping_recursive(self): rv = self._data.get(\"recursive\") if rv and", "elif rv is None: return not self._data.get(\"output_image\") else: return bool(rv)", "@property def is_mapping_recursive(self): rv = self._data.get(\"recursive\") if rv and self._data.get(\"output_image\"):", "def __init__(self, parser=None, base=None, root=None, fname=None): if fname and root", "sys for fn in sys.argv[1:]: print_config(fn) if __name__ == \"__main__\":", "\"sm_\" + base) def print_config(fname): from pprint import pprint from", "from any configured base URL.\"\"\" base = self.base_url if base:", "dirname=dirn, basename=base, extension=ext) return self.normpath(self._data[\"output_css\"].format(**names)) else: return path.join(dirn, \"sm_\" +", "and root is None: root = path.dirname(fname) self.root = root", "root = path.dirname(fname) self.root = root self._data = dict(base) if", "you can go CSSConfig(base=CSSConfig(..)) return self._data.iteritems() @classmethod def from_file(cls, fname):", "import urljoin from .css import CSSParser, iter_events def parse_config_stmt(line, prefix=\"spritemapper.\"):", "def from_file(cls, fname): with open(fname, \"rb\") as fp: return cls(CSSParser.from_file(fp),", "in iter_events(parser, lexemes=(\"comment\",)): for v in iter_config_stmts(ev.comment): yield v class", "self._data.get(\"output_image\"): raise RuntimeError(\"cannot have recursive spritemapping \" \"when output_image is", "have sprite_dirs \" \"when output_image is set\") sdirs = shlex.split(self._data[\"sprite_dirs\"])", "return self.output_image return dn + \".png\" def get_spritemap_url(self, fname): \"Get", "else: return bool(rv) @property def padding(self): return self._data.get(\"padding\", (1, 1))", "\"Get output image filename for spritemap directory *dn*.\" if \"output_image\"", "fname=None): if fname and root is None: root = path.dirname(fname)", "base else {} if parser is not None: self._data.update(iter_css_config(parser)) def", "print \"%s\\n%s\\n\" % (fname, \"=\" * len(fname)) pprint(dict(iter_css_config(CSSParser.read_file(fp)))) print def", "return ifilter(None, imap(parse_config_stmt, data.splitlines())) def iter_css_config(parser): for ev in iter_events(parser,", "None: root = path.dirname(fname) self.root = root self._data = dict(base)", "(key, value) = line.split(\"=\", 1) return (key[len(prefix):].strip(), value.strip()) def iter_config_stmts(data):", "output_image is set\") elif rv is None: return not self._data.get(\"output_image\")", "reference to *p* from any configured base URL.\"\"\" base =", "def iter_css_config(parser): for ev in iter_events(parser, lexemes=(\"comment\",)): for v in", "return self._data.get(\"padding\", (1, 1)) @property def anneal_steps(self): return int(self._data.get(\"anneal_steps\", 9200))", "self._data.iteritems() @classmethod def from_file(cls, fname): with open(fname, \"rb\") as fp:", "directory *fname*.\" (dirn, base) = path.split(fname) if \"output_css\" in self._data:", "p = urljoin(base, p) return p @property def base_url(self): return", "from urlparse import urljoin from .css import CSSParser, iter_events def", "from itertools import imap, ifilter from urlparse import urljoin from", "CSSConfig(object): def __init__(self, parser=None, base=None, root=None, fname=None): if fname and", "bool(rv) @property def padding(self): return self._data.get(\"padding\", (1, 1)) @property def", "else {} if parser is not None: self._data.update(iter_css_config(parser)) def __iter__(self):", "parse_config_stmt(line, prefix=\"spritemapper.\"): line = line.strip() if line.startswith(prefix) and \"=\" in", "CSSParser with open(fname, \"rb\") as fp: print \"%s\\n%s\\n\" % (fname,", "directory *dn*.\" if \"output_image\" in self._data: return self.output_image return dn", "self._data.get(\"padding\", (1, 1)) @property def anneal_steps(self): return int(self._data.get(\"anneal_steps\", 9200)) def", "configured base URL.\"\"\" base = self.base_url if base: p =", "recursive spritemapping \" \"when output_image is set\") elif rv is", "not None: self._data.update(iter_css_config(parser)) def __iter__(self): # this is mostly so", "itertools import imap, ifilter from urlparse import urljoin from .css", "is set\") elif rv is None: return not self._data.get(\"output_image\") else:", "base) = path.split(fname) if \"output_css\" in self._data: (base, ext) =", "def anneal_steps(self): return int(self._data.get(\"anneal_steps\", 9200)) def get_spritemap_out(self, dn): \"Get output", "import CSSParser with open(fname, \"rb\") as fp: print \"%s\\n%s\\n\" %", "self._data: return self.output_image return dn + \".png\" def get_spritemap_url(self, fname):", "def is_mapping_recursive(self): rv = self._data.get(\"recursive\") if rv and self._data.get(\"output_image\"): raise", "fp: return cls(CSSParser.from_file(fp), fname=fname) def normpath(self, p): \"\"\"Normalize a possibly", "yield v class CSSConfig(object): def __init__(self, parser=None, base=None, root=None, fname=None):", "if base: p = urljoin(base, p) return p @property def", "*p* from any configured base URL.\"\"\" base = self.base_url if", "base=None, root=None, fname=None): if fname and root is None: root", "return self.absurl(path.relpath(fname, self.root)) def get_css_out(self, fname): \"Get output image filename", "return self.normpath(self._data[\"output_css\"].format(**names)) else: return path.join(dirn, \"sm_\" + base) def print_config(fname):", "elif self._data.get(\"output_image\"): raise RuntimeError(\"cannot have sprite_dirs \" \"when output_image is", "have recursive spritemapping \" \"when output_image is set\") elif rv", "self._data = dict(base) if base else {} if parser is", "urlparse import urljoin from .css import CSSParser, iter_events def parse_config_stmt(line,", "names = dict(filename=fname, dirname=dirn, basename=base, extension=ext) return self.normpath(self._data[\"output_css\"].format(**names)) else: return", "len(fname)) pprint(dict(iter_css_config(CSSParser.read_file(fp)))) print def main(): import sys for fn in", "def normpath(self, p): \"\"\"Normalize a possibly relative path *p* to", "def parse_config_stmt(line, prefix=\"spritemapper.\"): line = line.strip() if line.startswith(prefix) and \"=\"", "pprint(dict(iter_css_config(CSSParser.read_file(fp)))) print def main(): import sys for fn in sys.argv[1:]:", "(dirn, base) = path.split(fname) if \"output_css\" in self._data: (base, ext)", "fname): with open(fname, \"rb\") as fp: return cls(CSSParser.from_file(fp), fname=fname) def", "basename=base, extension=ext) return self.normpath(self._data[\"output_css\"].format(**names)) else: return path.join(dirn, \"sm_\" + base)", "pprint import pprint from .css import CSSParser with open(fname, \"rb\")", "dn): \"Get output image filename for spritemap directory *dn*.\" if", "imap, ifilter from urlparse import urljoin from .css import CSSParser,", ".css import CSSParser with open(fname, \"rb\") as fp: print \"%s\\n%s\\n\"", "p): \"\"\"Make an absolute reference to *p* from any configured", "iter_events def parse_config_stmt(line, prefix=\"spritemapper.\"): line = line.strip() if line.startswith(prefix) and", "return bool(rv) @property def padding(self): return self._data.get(\"padding\", (1, 1)) @property", "an absolute reference to *p* from any configured base URL.\"\"\"", "import pprint from .css import CSSParser with open(fname, \"rb\") as", "is not None: self._data.update(iter_css_config(parser)) def __iter__(self): # this is mostly", "*fname*.\" (dirn, base) = path.split(fname) if \"output_css\" in self._data: (base,", "lexemes=(\"comment\",)): for v in iter_config_stmts(ev.comment): yield v class CSSConfig(object): def", "with open(fname, \"rb\") as fp: print \"%s\\n%s\\n\" % (fname, \"=\"", ".css import CSSParser, iter_events def parse_config_stmt(line, prefix=\"spritemapper.\"): line = line.strip()", "= line.strip() if line.startswith(prefix) and \"=\" in line: (key, value)", "def print_config(fname): from pprint import pprint from .css import CSSParser", "ifilter(None, imap(parse_config_stmt, data.splitlines())) def iter_css_config(parser): for ev in iter_events(parser, lexemes=(\"comment\",)):", "parser is not None: self._data.update(iter_css_config(parser)) def __iter__(self): # this is", "% (fname, \"=\" * len(fname)) pprint(dict(iter_css_config(CSSParser.read_file(fp)))) print def main(): import", "the root.\"\"\" return path.normpath(path.join(self.root, p)) def absurl(self, p): \"\"\"Make an", "self._data.get(\"base_url\") @property def sprite_dirs(self): if \"sprite_dirs\" not in self._data: return", "if fname and root is None: root = path.dirname(fname) self.root", "dn + \".png\" def get_spritemap_url(self, fname): \"Get output image URL", "path *p* to the root.\"\"\" return path.normpath(path.join(self.root, p)) def absurl(self,", "self.absurl(path.relpath(fname, self.root)) def get_css_out(self, fname): \"Get output image filename for", "return dn + \".png\" def get_spritemap_url(self, fname): \"Get output image", "def absurl(self, p): \"\"\"Make an absolute reference to *p* from", "*p* to the root.\"\"\" return path.normpath(path.join(self.root, p)) def absurl(self, p):", "can go CSSConfig(base=CSSConfig(..)) return self._data.iteritems() @classmethod def from_file(cls, fname): with", "CSSConfig(base=CSSConfig(..)) return self._data.iteritems() @classmethod def from_file(cls, fname): with open(fname, \"rb\")", "1)) @property def anneal_steps(self): return int(self._data.get(\"anneal_steps\", 9200)) def get_spritemap_out(self, dn):", "\"\"\"Normalize a possibly relative path *p* to the root.\"\"\" return", "\"sprite_dirs\" not in self._data: return elif self._data.get(\"output_image\"): raise RuntimeError(\"cannot have", "output image filename for spritemap directory *dn*.\" if \"output_image\" in", "sprite_dirs \" \"when output_image is set\") sdirs = shlex.split(self._data[\"sprite_dirs\"]) return", "return not self._data.get(\"output_image\") else: return bool(rv) @property def padding(self): return", "+ \".png\" def get_spritemap_url(self, fname): \"Get output image URL for", "self._data.get(\"recursive\") if rv and self._data.get(\"output_image\"): raise RuntimeError(\"cannot have recursive spritemapping", "self._data: return elif self._data.get(\"output_image\"): raise RuntimeError(\"cannot have sprite_dirs \" \"when", "return elif self._data.get(\"output_image\"): raise RuntimeError(\"cannot have sprite_dirs \" \"when output_image", "filename for spritemap directory *fname*.\" (dirn, base) = path.split(fname) if" ]
[ "def deprecated(): \"\"\" This is a deprecated method, only to", "print(\"\\nlin_mean: {}\".format(lin_mean)) print(\"lin_std: {}\".format(lin_std)) print(\"lin_keys: {}\".format(lin_keys)) print(\"\\nrfs_mean: {}\".format(rfs_mean)) print(\"rfs_std: {}\".format(rfs_std))", "= plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width, alpha=opacity, color='r', yerr=std_rfs, error_kw=error_config, label='RF') rects3", "[] rfs_std = [] rfs_keys = [] dnn_mean = []", "# Use this to determine which DNN models should be", "ax[1].set_title('Random Forests', fontsize=titlesize) ax[2].set_title('Deep Neural Networks', fontsize=titlesize) ax[0].set_ylabel('Average Squared $L_2$,", "linewidth=200) # Some matplotlib settings. plt.style.use('seaborn-darkgrid') titlesize = 21 labelsize", "by <NAME> \"\"\" import argparse from collections import defaultdict from", "for i in range(3): ax[i].set_xlabel('Algorithm', fontsize=labelsize) ax[i].set_ylim([0.0,9.0]) ax[i].tick_params(axis='y', labelsize=ticksize) ax[i].set_xticklabels([])", "enumerate(zip(rfs_mean,rfs_std,rfs_keys)): ax[1].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index =", "it publication-quality. ax[0].set_title('Linear', fontsize=titlesize) ax[1].set_title('Random Forests', fontsize=titlesize) ax[2].set_title('Deep Neural Networks',", "= sorted(results.keys()) for key in sorted_keys: info = [ss['loss'] for", "ax[2].legend(loc=\"best\", ncol=3, prop={'size':legendsize}) plt.tight_layout() plt.savefig('figures/validation_set_results_v'+vv+'.png') if __name__ == \"__main__\": pp", "necessary stuff to make it publication-quality. ax[0].set_title('Linear', fontsize=titlesize) ax[1].set_title('Random Forests',", "label='DNN') plt.xticks(np.arange(11) + bar_width / 2, ('A','B','','D','E','F','G','','','J','K')) plt.xlabel('Group') plt.ylabel('Scores') plt.title('Scores", "dnn_std.append(np.std(info)) dnn_keys.append(key) print(\"\\nlin_mean: {}\".format(lin_mean)) print(\"lin_std: {}\".format(lin_std)) print(\"lin_keys: {}\".format(lin_keys)) print(\"\\nrfs_mean: {}\".format(rfs_mean))", "{}\".format(rfs_std)) print(\"rfs_keys: {}\".format(rfs_keys)) print(\"\\nDNN results:\") for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys): print(\"{:.2f}\\t{:.2f}\\t{}\".format(mean,std,key))", "[] rfs_keys = [] dnn_mean = [] dnn_std = []", "fig, ax = plt.subplots() bar_width = 0.80 opacity = 0.5", "alpha=opacity, color='r', yerr=std_rfs, error_kw=error_config, label='RF') rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width,", "0 for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean > dnn_threshold: continue", "= 0.80 opacity = 0.5 error_config = {'ecolor': '0.3'} rects1", "which DNN models should be here. dnn_threshold = 3.0 real_index", "yerr=std, error_kw=error_config, label=key[4:]) real_index += 1 # Some rather tedious", "ax[2].bar(np.array([real_index]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index += 1", "import sys np.set_printoptions(suppress=True, linewidth=200) # Some matplotlib settings. plt.style.use('seaborn-darkgrid') titlesize", "one per algorithm category. width_ratio = [len(lin_keys),len(rfs_keys),real_index] fig, ax =", "{'ecolor': '0.0', 'linewidth':3.0} def deprecated(): \"\"\" This is a deprecated", "lin_std = [] lin_keys = [] rfs_mean = [] rfs_std", "rects1 = plt.bar(np.array([0,1]), means_lin, bar_width, alpha=opacity, color='b', yerr=std_lin, error_kw=error_config, label='Lin')", "print(\"lin_keys: {}\".format(lin_keys)) print(\"\\nrfs_mean: {}\".format(rfs_mean)) print(\"rfs_std: {}\".format(rfs_std)) print(\"rfs_keys: {}\".format(rfs_keys)) print(\"\\nDNN results:\")", "[] lin_std = [] lin_keys = [] rfs_mean = []", "'results/results_kfolds10_v'+VERSION+'.npy' results = np.load(file_name)[()] print(\"results has keys: {}\".format(results.keys())) plot(results, VERSION)", "opacity = 1.0 error_config = {'ecolor': '0.0', 'linewidth':3.0} def deprecated():", "graph. (c) September 2017 by <NAME> \"\"\" import argparse from", "def plot(results, vv): lin_mean = [] lin_std = [] lin_keys", "ax[0].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) for ii,(mean,std,key) in", "dnn_keys = [] sorted_keys = sorted(results.keys()) for key in sorted_keys:", "import Dense, Activation import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt", "alpha=opacity, color='y', yerr=std_dnn, error_kw=error_config, label='DNN') plt.xticks(np.arange(11) + bar_width / 2,", "for key in sorted_keys: info = [ss['loss'] for ss in", "+= 1 # Some rather tedious but necessary stuff to", "ax = plt.subplots() bar_width = 0.80 opacity = 0.5 error_config", "sys.exit() # Use this to determine which DNN models should", "in enumerate(zip(rfs_mean,rfs_std,rfs_keys)): ax[1].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index", "However, I find this unwieldly. \"\"\" fig, ax = plt.subplots()", "= 0 for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean > dnn_threshold:", "CV', fontsize=labelsize) for i in range(3): ax[i].set_xlabel('Algorithm', fontsize=labelsize) ax[i].set_ylim([0.0,9.0]) ax[i].tick_params(axis='y',", "ss in results[key]] if 'Lin' in key: lin_mean.append(np.mean(info)) lin_std.append(np.std(info)) lin_keys.append(key)", "Squared $L_2$, 10-Fold CV', fontsize=labelsize) for i in range(3): ax[i].set_xlabel('Algorithm',", "publication-quality. ax[0].set_title('Linear', fontsize=titlesize) ax[1].set_title('Random Forests', fontsize=titlesize) ax[2].set_title('Deep Neural Networks', fontsize=titlesize)", "dnn_mean.append(np.mean(info)) dnn_std.append(np.std(info)) dnn_keys.append(key) print(\"\\nlin_mean: {}\".format(lin_mean)) print(\"lin_std: {}\".format(lin_std)) print(\"lin_keys: {}\".format(lin_keys)) print(\"\\nrfs_mean:", "have it # split across three different subplots, one per", "'RFs' in key: rfs_mean.append(np.mean(info)) rfs_std.append(np.std(info)) rfs_keys.append(key) elif 'DNN' in key:", "lin_keys.append(key) elif 'RFs' in key: rfs_mean.append(np.mean(info)) rfs_std.append(np.std(info)) rfs_keys.append(key) elif 'DNN'", "3.0 real_index = 0 for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean", "continue real_index += 1 # Gah! Now I can finally", "\"__main__\": pp = argparse.ArgumentParser() pp.add_argument('--version', type=int) pp.add_argument('--kfolds', type=int, default=10) args", "rfs_mean = [] rfs_std = [] rfs_keys = [] dnn_mean", "for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)): ax[0].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config,", "\"\"\" fig, ax = plt.subplots() bar_width = 0.80 opacity =", "three different subplots, one per algorithm category. width_ratio = [len(lin_keys),len(rfs_keys),real_index]", "prop={'size':legendsize}) plt.tight_layout() plt.savefig('figures/validation_set_results_v'+vv+'.png') if __name__ == \"__main__\": pp = argparse.ArgumentParser()", "collections import defaultdict from keras.models import Sequential from keras.layers import", "rfs_keys = [] dnn_mean = [] dnn_std = [] dnn_keys", "= [] dnn_keys = [] sorted_keys = sorted(results.keys()) for key", "sorted_keys = sorted(results.keys()) for key in sorted_keys: info = [ss['loss']", "pp.parse_args() assert args.version is not None VERSION = str(args.version).zfill(2) file_name", "algorithm category. width_ratio = [len(lin_keys),len(rfs_keys),real_index] fig, ax = plt.subplots(nrows=1, ncols=3,", "rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width, alpha=opacity, color='y', yerr=std_dnn, error_kw=error_config, label='DNN')", "Now I can finally make the bar chart. I think", "fontsize=titlesize) ax[1].set_title('Random Forests', fontsize=titlesize) ax[2].set_title('Deep Neural Networks', fontsize=titlesize) ax[0].set_ylabel('Average Squared", "September 2017 by <NAME> \"\"\" import argparse from collections import", "from keras.models import Sequential from keras.layers import Dense, Activation import", "plt.legend() plt.savefig('figures/validation_set_results.png') def plot(results, vv): lin_mean = [] lin_std =", "tedious but necessary stuff to make it publication-quality. ax[0].set_title('Linear', fontsize=titlesize)", "1 # Gah! Now I can finally make the bar", "fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5), gridspec_kw={'width_ratios':width_ratio}) for ii,(mean,std,key) in", "(mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys): print(\"{:.2f}\\t{:.2f}\\t{}\".format(mean,std,key)) # sys.exit() # Use this to", "results:\") for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys): print(\"{:.2f}\\t{:.2f}\\t{}\".format(mean,std,key)) # sys.exit() # Use", "label='Lin') rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width, alpha=opacity, color='r', yerr=std_rfs, error_kw=error_config,", "key: lin_mean.append(np.mean(info)) lin_std.append(np.std(info)) lin_keys.append(key) elif 'RFs' in key: rfs_mean.append(np.mean(info)) rfs_std.append(np.std(info))", "__name__ == \"__main__\": pp = argparse.ArgumentParser() pp.add_argument('--version', type=int) pp.add_argument('--kfolds', type=int,", "dnn_std = [] dnn_keys = [] sorted_keys = sorted(results.keys()) for", "if mean > dnn_threshold: continue ax[2].bar(np.array([real_index]), mean, bar_width, alpha=opacity, yerr=std,", "in enumerate(zip(lin_mean,lin_std,lin_keys)): ax[0].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) for", "to show how to possibly combine these into one plot.", "VERSION = str(args.version).zfill(2) file_name = 'results/results_kfolds10_v'+VERSION+'.npy' results = np.load(file_name)[()] print(\"results", "np.set_printoptions(suppress=True, linewidth=200) # Some matplotlib settings. plt.style.use('seaborn-darkgrid') titlesize = 21", "{}\".format(rfs_mean)) print(\"rfs_std: {}\".format(rfs_std)) print(\"rfs_keys: {}\".format(rfs_keys)) print(\"\\nDNN results:\") for (mean,std,key) in", "key in sorted_keys: info = [ss['loss'] for ss in results[key]]", "settings. plt.style.use('seaborn-darkgrid') titlesize = 21 labelsize = 17 legendsize =", "this to determine which DNN models should be here. dnn_threshold", "into one plot. However, I find this unwieldly. \"\"\" fig,", "rfs_std.append(np.std(info)) rfs_keys.append(key) elif 'DNN' in key: dnn_mean.append(np.mean(info)) dnn_std.append(np.std(info)) dnn_keys.append(key) print(\"\\nlin_mean:", "Activation import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy", "'Lin' in key: lin_mean.append(np.mean(info)) lin_std.append(np.std(info)) lin_keys.append(key) elif 'RFs' in key:", "yerr=std, error_kw=error_config, label=key[4:]) for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)): ax[1].bar(np.array([ii]), mean, bar_width,", "[] sorted_keys = sorted(results.keys()) for key in sorted_keys: info =", "for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean > dnn_threshold: continue real_index", "= plt.bar(np.array([0,1]), means_lin, bar_width, alpha=opacity, color='b', yerr=std_lin, error_kw=error_config, label='Lin') rects2", "# split across three different subplots, one per algorithm category.", "defaultdict from keras.models import Sequential from keras.layers import Dense, Activation", "rfs_std = [] rfs_keys = [] dnn_mean = [] dnn_std", "bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index += 1 # Some", "plt.tight_layout() plt.legend() plt.savefig('figures/validation_set_results.png') def plot(results, vv): lin_mean = [] lin_std", "color='y', yerr=std_dnn, error_kw=error_config, label='DNN') plt.xticks(np.arange(11) + bar_width / 2, ('A','B','','D','E','F','G','','','J','K'))", "alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index = 0 for ii,(mean,std,key) in", "to have it # split across three different subplots, one", "ncol=2, prop={'size':legendsize}) ax[2].legend(loc=\"best\", ncol=3, prop={'size':legendsize}) plt.tight_layout() plt.savefig('figures/validation_set_results_v'+vv+'.png') if __name__ ==", "label=key[4:]) real_index += 1 # Some rather tedious but necessary", "bar graph. (c) September 2017 by <NAME> \"\"\" import argparse", "is a deprecated method, only to show how to possibly", "stuff to make it publication-quality. ax[0].set_title('Linear', fontsize=titlesize) ax[1].set_title('Random Forests', fontsize=titlesize)", "enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean > dnn_threshold: continue real_index += 1 #", "2, ('A','B','','D','E','F','G','','','J','K')) plt.xlabel('Group') plt.ylabel('Scores') plt.title('Scores by group and gender') plt.tight_layout()", "matplotlib.pyplot as plt import numpy as np import sys np.set_printoptions(suppress=True,", "= 0.80 opacity = 1.0 error_config = {'ecolor': '0.0', 'linewidth':3.0}", "ax[1].legend(loc=\"best\", ncol=2, prop={'size':legendsize}) ax[2].legend(loc=\"best\", ncol=3, prop={'size':legendsize}) plt.tight_layout() plt.savefig('figures/validation_set_results_v'+vv+'.png') if __name__", "as np import sys np.set_printoptions(suppress=True, linewidth=200) # Some matplotlib settings.", "= 17 legendsize = 15 ticksize = 15 bar_width =", "\"\"\" import argparse from collections import defaultdict from keras.models import", "1 # Some rather tedious but necessary stuff to make", "plt.savefig('figures/validation_set_results.png') def plot(results, vv): lin_mean = [] lin_std = []", "plt.subplots() bar_width = 0.80 opacity = 0.5 error_config = {'ecolor':", "one plot. However, I find this unwieldly. \"\"\" fig, ax", "bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)): ax[1].bar(np.array([ii]),", "args.version is not None VERSION = str(args.version).zfill(2) file_name = 'results/results_kfolds10_v'+VERSION+'.npy'", "type=int, default=10) args = pp.parse_args() assert args.version is not None", "key: dnn_mean.append(np.mean(info)) dnn_std.append(np.std(info)) dnn_keys.append(key) print(\"\\nlin_mean: {}\".format(lin_mean)) print(\"lin_std: {}\".format(lin_std)) print(\"lin_keys: {}\".format(lin_keys))", "if mean > dnn_threshold: continue real_index += 1 # Gah!", "argparse.ArgumentParser() pp.add_argument('--version', type=int) pp.add_argument('--kfolds', type=int, default=10) args = pp.parse_args() assert", "0.5 error_config = {'ecolor': '0.3'} rects1 = plt.bar(np.array([0,1]), means_lin, bar_width,", "0.80 opacity = 0.5 error_config = {'ecolor': '0.3'} rects1 =", "enumerate(zip(lin_mean,lin_std,lin_keys)): ax[0].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) for ii,(mean,std,key)", "= 0.5 error_config = {'ecolor': '0.3'} rects1 = plt.bar(np.array([0,1]), means_lin,", "for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)): ax[1].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config,", "matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import", "import defaultdict from keras.models import Sequential from keras.layers import Dense,", "means_rfs, bar_width, alpha=opacity, color='r', yerr=std_rfs, error_kw=error_config, label='RF') rects3 = plt.bar(np.array([9,10]),", "it's easiest to have it # split across three different", "possibly combine these into one plot. However, I find this", "Gah! Now I can finally make the bar chart. I", "/ 2, ('A','B','','D','E','F','G','','','J','K')) plt.xlabel('Group') plt.ylabel('Scores') plt.title('Scores by group and gender')", "> dnn_threshold: continue real_index += 1 # Gah! Now I", "show how to possibly combine these into one plot. However,", "$L_2$, 10-Fold CV', fontsize=labelsize) for i in range(3): ax[i].set_xlabel('Algorithm', fontsize=labelsize)", "find this unwieldly. \"\"\" fig, ax = plt.subplots() bar_width =", "make it publication-quality. ax[0].set_title('Linear', fontsize=titlesize) ax[1].set_title('Random Forests', fontsize=titlesize) ax[2].set_title('Deep Neural", "= 21 labelsize = 17 legendsize = 15 ticksize =", "Networks', fontsize=titlesize) ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV', fontsize=labelsize) for i", "= [len(lin_keys),len(rfs_keys),real_index] fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5), gridspec_kw={'width_ratios':width_ratio}) for", "legendsize = 15 ticksize = 15 bar_width = 0.80 opacity", "in key: rfs_mean.append(np.mean(info)) rfs_std.append(np.std(info)) rfs_keys.append(key) elif 'DNN' in key: dnn_mean.append(np.mean(info))", "{}\".format(lin_std)) print(\"lin_keys: {}\".format(lin_keys)) print(\"\\nrfs_mean: {}\".format(rfs_mean)) print(\"rfs_std: {}\".format(rfs_std)) print(\"rfs_keys: {}\".format(rfs_keys)) print(\"\\nDNN", "# Gah! Now I can finally make the bar chart.", "ax[2].set_title('Deep Neural Networks', fontsize=titlesize) ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV', fontsize=labelsize)", "from collections import defaultdict from keras.models import Sequential from keras.layers", "bar_width / 2, ('A','B','','D','E','F','G','','','J','K')) plt.xlabel('Group') plt.ylabel('Scores') plt.title('Scores by group and", "mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index = 0 for", "easiest to have it # split across three different subplots,", "bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index = 0 for ii,(mean,std,key)", "\"\"\" A bar graph. (c) September 2017 by <NAME> \"\"\"", "= [] rfs_mean = [] rfs_std = [] rfs_keys =", "plt.style.use('seaborn-darkgrid') titlesize = 21 labelsize = 17 legendsize = 15", "in key: dnn_mean.append(np.mean(info)) dnn_std.append(np.std(info)) dnn_keys.append(key) print(\"\\nlin_mean: {}\".format(lin_mean)) print(\"lin_std: {}\".format(lin_std)) print(\"lin_keys:", "bar_width, alpha=opacity, color='y', yerr=std_dnn, error_kw=error_config, label='DNN') plt.xticks(np.arange(11) + bar_width /", "in sorted_keys: info = [ss['loss'] for ss in results[key]] if", "category. width_ratio = [len(lin_keys),len(rfs_keys),real_index] fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5),", "label=key[4:]) real_index = 0 for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean", "[len(lin_keys),len(rfs_keys),real_index] fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5), gridspec_kw={'width_ratios':width_ratio}) for ii,(mean,std,key)", "'DNN' in key: dnn_mean.append(np.mean(info)) dnn_std.append(np.std(info)) dnn_keys.append(key) print(\"\\nlin_mean: {}\".format(lin_mean)) print(\"lin_std: {}\".format(lin_std))", "is not None VERSION = str(args.version).zfill(2) file_name = 'results/results_kfolds10_v'+VERSION+'.npy' results", "alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index += 1 # Some rather", "lin_mean = [] lin_std = [] lin_keys = [] rfs_mean", "elif 'RFs' in key: rfs_mean.append(np.mean(info)) rfs_std.append(np.std(info)) rfs_keys.append(key) elif 'DNN' in", "this unwieldly. \"\"\" fig, ax = plt.subplots() bar_width = 0.80", "for ss in results[key]] if 'Lin' in key: lin_mean.append(np.mean(info)) lin_std.append(np.std(info))", "ax[0].legend(loc=\"best\", ncol=1, prop={'size':legendsize}) ax[1].legend(loc=\"best\", ncol=2, prop={'size':legendsize}) ax[2].legend(loc=\"best\", ncol=3, prop={'size':legendsize}) plt.tight_layout()", "ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean > dnn_threshold: continue real_index +=", "sys np.set_printoptions(suppress=True, linewidth=200) # Some matplotlib settings. plt.style.use('seaborn-darkgrid') titlesize =", "figsize=(16,5), gridspec_kw={'width_ratios':width_ratio}) for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)): ax[0].bar(np.array([ii]), mean, bar_width, alpha=opacity,", "deprecated method, only to show how to possibly combine these", "labelsize = 17 legendsize = 15 ticksize = 15 bar_width", "prop={'size':legendsize}) ax[2].legend(loc=\"best\", ncol=3, prop={'size':legendsize}) plt.tight_layout() plt.savefig('figures/validation_set_results_v'+vv+'.png') if __name__ == \"__main__\":", "method, only to show how to possibly combine these into", "elif 'DNN' in key: dnn_mean.append(np.mean(info)) dnn_std.append(np.std(info)) dnn_keys.append(key) print(\"\\nlin_mean: {}\".format(lin_mean)) print(\"lin_std:", "deprecated(): \"\"\" This is a deprecated method, only to show", "ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)): ax[0].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:])", "= plt.bar(np.array([9,10]), means_dnn, bar_width, alpha=opacity, color='y', yerr=std_dnn, error_kw=error_config, label='DNN') plt.xticks(np.arange(11)", "different subplots, one per algorithm category. width_ratio = [len(lin_keys),len(rfs_keys),real_index] fig,", "ncol=3, prop={'size':legendsize}) plt.tight_layout() plt.savefig('figures/validation_set_results_v'+vv+'.png') if __name__ == \"__main__\": pp =", "{'ecolor': '0.3'} rects1 = plt.bar(np.array([0,1]), means_lin, bar_width, alpha=opacity, color='b', yerr=std_lin,", "[] rfs_mean = [] rfs_std = [] rfs_keys = []", "keras.models import Sequential from keras.layers import Dense, Activation import matplotlib", "plt import numpy as np import sys np.set_printoptions(suppress=True, linewidth=200) #", "range(3): ax[i].set_xlabel('Algorithm', fontsize=labelsize) ax[i].set_ylim([0.0,9.0]) ax[i].tick_params(axis='y', labelsize=ticksize) ax[i].set_xticklabels([]) ax[0].legend(loc=\"best\", ncol=1, prop={'size':legendsize})", "the bar chart. I think it's easiest to have it", "error_kw=error_config, label=key[4:]) for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)): ax[1].bar(np.array([ii]), mean, bar_width, alpha=opacity,", "I find this unwieldly. \"\"\" fig, ax = plt.subplots() bar_width", "plt.title('Scores by group and gender') plt.tight_layout() plt.legend() plt.savefig('figures/validation_set_results.png') def plot(results,", "if __name__ == \"__main__\": pp = argparse.ArgumentParser() pp.add_argument('--version', type=int) pp.add_argument('--kfolds',", "alpha=opacity, color='b', yerr=std_lin, error_kw=error_config, label='Lin') rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width,", "to determine which DNN models should be here. dnn_threshold =", "width_ratio = [len(lin_keys),len(rfs_keys),real_index] fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5), gridspec_kw={'width_ratios':width_ratio})", "I think it's easiest to have it # split across", "= 15 ticksize = 15 bar_width = 0.80 opacity =", "plt.savefig('figures/validation_set_results_v'+vv+'.png') if __name__ == \"__main__\": pp = argparse.ArgumentParser() pp.add_argument('--version', type=int)", "> dnn_threshold: continue ax[2].bar(np.array([real_index]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:])", "[] dnn_keys = [] sorted_keys = sorted(results.keys()) for key in", "if 'Lin' in key: lin_mean.append(np.mean(info)) lin_std.append(np.std(info)) lin_keys.append(key) elif 'RFs' in", "= [ss['loss'] for ss in results[key]] if 'Lin' in key:", "a deprecated method, only to show how to possibly combine", "combine these into one plot. However, I find this unwieldly.", "lin_keys = [] rfs_mean = [] rfs_std = [] rfs_keys", "'0.0', 'linewidth':3.0} def deprecated(): \"\"\" This is a deprecated method,", "lin_mean.append(np.mean(info)) lin_std.append(np.std(info)) lin_keys.append(key) elif 'RFs' in key: rfs_mean.append(np.mean(info)) rfs_std.append(np.std(info)) rfs_keys.append(key)", "error_kw=error_config, label=key[4:]) real_index = 0 for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if", "in range(3): ax[i].set_xlabel('Algorithm', fontsize=labelsize) ax[i].set_ylim([0.0,9.0]) ax[i].tick_params(axis='y', labelsize=ticksize) ax[i].set_xticklabels([]) ax[0].legend(loc=\"best\", ncol=1,", "plt.xticks(np.arange(11) + bar_width / 2, ('A','B','','D','E','F','G','','','J','K')) plt.xlabel('Group') plt.ylabel('Scores') plt.title('Scores by", "plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width, alpha=opacity, color='r', yerr=std_rfs, error_kw=error_config, label='RF') rects3 =", "yerr=std_lin, error_kw=error_config, label='Lin') rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width, alpha=opacity, color='r',", "color='b', yerr=std_lin, error_kw=error_config, label='Lin') rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width, alpha=opacity,", "mean > dnn_threshold: continue ax[2].bar(np.array([real_index]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config,", "error_config = {'ecolor': '0.0', 'linewidth':3.0} def deprecated(): \"\"\" This is", "A bar graph. (c) September 2017 by <NAME> \"\"\" import", "= argparse.ArgumentParser() pp.add_argument('--version', type=int) pp.add_argument('--kfolds', type=int, default=10) args = pp.parse_args()", "21 labelsize = 17 legendsize = 15 ticksize = 15", "= 'results/results_kfolds10_v'+VERSION+'.npy' results = np.load(file_name)[()] print(\"results has keys: {}\".format(results.keys())) plot(results,", "opacity = 0.5 error_config = {'ecolor': '0.3'} rects1 = plt.bar(np.array([0,1]),", "dnn_threshold: continue ax[2].bar(np.array([real_index]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index", "= [] dnn_std = [] dnn_keys = [] sorted_keys =", "means_lin, bar_width, alpha=opacity, color='b', yerr=std_lin, error_kw=error_config, label='Lin') rects2 = plt.bar(np.array([3,4,5,6,7]),", "Some matplotlib settings. plt.style.use('seaborn-darkgrid') titlesize = 21 labelsize = 17", "rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width, alpha=opacity, color='r', yerr=std_rfs, error_kw=error_config, label='RF')", "think it's easiest to have it # split across three", "fontsize=titlesize) ax[2].set_title('Deep Neural Networks', fontsize=titlesize) ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV',", "Neural Networks', fontsize=titlesize) ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV', fontsize=labelsize) for", "enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean > dnn_threshold: continue ax[2].bar(np.array([real_index]), mean, bar_width, alpha=opacity,", "17 legendsize = 15 ticksize = 15 bar_width = 0.80", "print(\"rfs_keys: {}\".format(rfs_keys)) print(\"\\nDNN results:\") for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys): print(\"{:.2f}\\t{:.2f}\\t{}\".format(mean,std,key)) #", "plot. However, I find this unwieldly. \"\"\" fig, ax =", "'0.3'} rects1 = plt.bar(np.array([0,1]), means_lin, bar_width, alpha=opacity, color='b', yerr=std_lin, error_kw=error_config,", "ax[i].set_xlabel('Algorithm', fontsize=labelsize) ax[i].set_ylim([0.0,9.0]) ax[i].tick_params(axis='y', labelsize=ticksize) ax[i].set_xticklabels([]) ax[0].legend(loc=\"best\", ncol=1, prop={'size':legendsize}) ax[1].legend(loc=\"best\",", "keras.layers import Dense, Activation import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as", "and gender') plt.tight_layout() plt.legend() plt.savefig('figures/validation_set_results.png') def plot(results, vv): lin_mean =", "Use this to determine which DNN models should be here.", "real_index = 0 for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean >", "rfs_keys.append(key) elif 'DNN' in key: dnn_mean.append(np.mean(info)) dnn_std.append(np.std(info)) dnn_keys.append(key) print(\"\\nlin_mean: {}\".format(lin_mean))", "for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys): print(\"{:.2f}\\t{:.2f}\\t{}\".format(mean,std,key)) # sys.exit() # Use this", "prop={'size':legendsize}) ax[1].legend(loc=\"best\", ncol=2, prop={'size':legendsize}) ax[2].legend(loc=\"best\", ncol=3, prop={'size':legendsize}) plt.tight_layout() plt.savefig('figures/validation_set_results_v'+vv+'.png') if", "= [] lin_keys = [] rfs_mean = [] rfs_std =", "here. dnn_threshold = 3.0 real_index = 0 for ii,(mean,std,key) in", "vv): lin_mean = [] lin_std = [] lin_keys = []", "= [] lin_std = [] lin_keys = [] rfs_mean =", "\"\"\" This is a deprecated method, only to show how", "models should be here. dnn_threshold = 3.0 real_index = 0", "in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean > dnn_threshold: continue real_index += 1", "error_config = {'ecolor': '0.3'} rects1 = plt.bar(np.array([0,1]), means_lin, bar_width, alpha=opacity,", "fontsize=labelsize) ax[i].set_ylim([0.0,9.0]) ax[i].tick_params(axis='y', labelsize=ticksize) ax[i].set_xticklabels([]) ax[0].legend(loc=\"best\", ncol=1, prop={'size':legendsize}) ax[1].legend(loc=\"best\", ncol=2,", "0.80 opacity = 1.0 error_config = {'ecolor': '0.0', 'linewidth':3.0} def", "bar chart. I think it's easiest to have it #", "= {'ecolor': '0.0', 'linewidth':3.0} def deprecated(): \"\"\" This is a", "1.0 error_config = {'ecolor': '0.0', 'linewidth':3.0} def deprecated(): \"\"\" This", "matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np", "import Sequential from keras.layers import Dense, Activation import matplotlib matplotlib.use('Agg')", "key: rfs_mean.append(np.mean(info)) rfs_std.append(np.std(info)) rfs_keys.append(key) elif 'DNN' in key: dnn_mean.append(np.mean(info)) dnn_std.append(np.std(info))", "= 1.0 error_config = {'ecolor': '0.0', 'linewidth':3.0} def deprecated(): \"\"\"", "type=int) pp.add_argument('--kfolds', type=int, default=10) args = pp.parse_args() assert args.version is", "str(args.version).zfill(2) file_name = 'results/results_kfolds10_v'+VERSION+'.npy' results = np.load(file_name)[()] print(\"results has keys:", "how to possibly combine these into one plot. However, I", "10-Fold CV', fontsize=labelsize) for i in range(3): ax[i].set_xlabel('Algorithm', fontsize=labelsize) ax[i].set_ylim([0.0,9.0])", "continue ax[2].bar(np.array([real_index]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index +=", "I can finally make the bar chart. I think it's", "None VERSION = str(args.version).zfill(2) file_name = 'results/results_kfolds10_v'+VERSION+'.npy' results = np.load(file_name)[()]", "gender') plt.tight_layout() plt.legend() plt.savefig('figures/validation_set_results.png') def plot(results, vv): lin_mean = []", "= pp.parse_args() assert args.version is not None VERSION = str(args.version).zfill(2)", "determine which DNN models should be here. dnn_threshold = 3.0", "'linewidth':3.0} def deprecated(): \"\"\" This is a deprecated method, only", "= [] dnn_mean = [] dnn_std = [] dnn_keys =", "argparse from collections import defaultdict from keras.models import Sequential from", "matplotlib settings. plt.style.use('seaborn-darkgrid') titlesize = 21 labelsize = 17 legendsize", "mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index += 1 #", "for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean > dnn_threshold: continue ax[2].bar(np.array([real_index]),", "error_kw=error_config, label=key[4:]) real_index += 1 # Some rather tedious but", "only to show how to possibly combine these into one", "plt.bar(np.array([0,1]), means_lin, bar_width, alpha=opacity, color='b', yerr=std_lin, error_kw=error_config, label='Lin') rects2 =", "ax[1].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) real_index = 0", "to make it publication-quality. ax[0].set_title('Linear', fontsize=titlesize) ax[1].set_title('Random Forests', fontsize=titlesize) ax[2].set_title('Deep", "split across three different subplots, one per algorithm category. width_ratio", "lin_std.append(np.std(info)) lin_keys.append(key) elif 'RFs' in key: rfs_mean.append(np.mean(info)) rfs_std.append(np.std(info)) rfs_keys.append(key) elif", "{}\".format(lin_mean)) print(\"lin_std: {}\".format(lin_std)) print(\"lin_keys: {}\".format(lin_keys)) print(\"\\nrfs_mean: {}\".format(rfs_mean)) print(\"rfs_std: {}\".format(rfs_std)) print(\"rfs_keys:", "fontsize=titlesize) ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV', fontsize=labelsize) for i in", "info = [ss['loss'] for ss in results[key]] if 'Lin' in", "DNN models should be here. dnn_threshold = 3.0 real_index =", "print(\"{:.2f}\\t{:.2f}\\t{}\".format(mean,std,key)) # sys.exit() # Use this to determine which DNN", "as plt import numpy as np import sys np.set_printoptions(suppress=True, linewidth=200)", "ax[i].tick_params(axis='y', labelsize=ticksize) ax[i].set_xticklabels([]) ax[0].legend(loc=\"best\", ncol=1, prop={'size':legendsize}) ax[1].legend(loc=\"best\", ncol=2, prop={'size':legendsize}) ax[2].legend(loc=\"best\",", "args = pp.parse_args() assert args.version is not None VERSION =", "ax[i].set_ylim([0.0,9.0]) ax[i].tick_params(axis='y', labelsize=ticksize) ax[i].set_xticklabels([]) ax[0].legend(loc=\"best\", ncol=1, prop={'size':legendsize}) ax[1].legend(loc=\"best\", ncol=2, prop={'size':legendsize})", "# Some rather tedious but necessary stuff to make it", "real_index += 1 # Gah! Now I can finally make", "should be here. dnn_threshold = 3.0 real_index = 0 for", "in key: lin_mean.append(np.mean(info)) lin_std.append(np.std(info)) lin_keys.append(key) elif 'RFs' in key: rfs_mean.append(np.mean(info))", "import argparse from collections import defaultdict from keras.models import Sequential", "dnn_keys.append(key) print(\"\\nlin_mean: {}\".format(lin_mean)) print(\"lin_std: {}\".format(lin_std)) print(\"lin_keys: {}\".format(lin_keys)) print(\"\\nrfs_mean: {}\".format(rfs_mean)) print(\"rfs_std:", "+= 1 # Gah! Now I can finally make the", "= str(args.version).zfill(2) file_name = 'results/results_kfolds10_v'+VERSION+'.npy' results = np.load(file_name)[()] print(\"results has", "error_kw=error_config, label='Lin') rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width, alpha=opacity, color='r', yerr=std_rfs,", "label='RF') rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width, alpha=opacity, color='y', yerr=std_dnn, error_kw=error_config,", "yerr=std, error_kw=error_config, label=key[4:]) real_index = 0 for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):", "rfs_mean.append(np.mean(info)) rfs_std.append(np.std(info)) rfs_keys.append(key) elif 'DNN' in key: dnn_mean.append(np.mean(info)) dnn_std.append(np.std(info)) dnn_keys.append(key)", "Dense, Activation import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import", "subplots, one per algorithm category. width_ratio = [len(lin_keys),len(rfs_keys),real_index] fig, ax", "these into one plot. However, I find this unwieldly. \"\"\"", "gridspec_kw={'width_ratios':width_ratio}) for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)): ax[0].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std,", "15 bar_width = 0.80 opacity = 1.0 error_config = {'ecolor':", "[] dnn_std = [] dnn_keys = [] sorted_keys = sorted(results.keys())", "mean > dnn_threshold: continue real_index += 1 # Gah! Now", "by group and gender') plt.tight_layout() plt.legend() plt.savefig('figures/validation_set_results.png') def plot(results, vv):", "np import sys np.set_printoptions(suppress=True, linewidth=200) # Some matplotlib settings. plt.style.use('seaborn-darkgrid')", "[ss['loss'] for ss in results[key]] if 'Lin' in key: lin_mean.append(np.mean(info))", "# Some matplotlib settings. plt.style.use('seaborn-darkgrid') titlesize = 21 labelsize =", "numpy as np import sys np.set_printoptions(suppress=True, linewidth=200) # Some matplotlib", "Sequential from keras.layers import Dense, Activation import matplotlib matplotlib.use('Agg') import", "make the bar chart. I think it's easiest to have", "= [] rfs_keys = [] dnn_mean = [] dnn_std =", "import numpy as np import sys np.set_printoptions(suppress=True, linewidth=200) # Some", "in zip(dnn_mean,dnn_std,dnn_keys): print(\"{:.2f}\\t{:.2f}\\t{}\".format(mean,std,key)) # sys.exit() # Use this to determine", "be here. dnn_threshold = 3.0 real_index = 0 for ii,(mean,std,key)", "= [] sorted_keys = sorted(results.keys()) for key in sorted_keys: info", "error_kw=error_config, label='RF') rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width, alpha=opacity, color='y', yerr=std_dnn,", "results[key]] if 'Lin' in key: lin_mean.append(np.mean(info)) lin_std.append(np.std(info)) lin_keys.append(key) elif 'RFs'", "2017 by <NAME> \"\"\" import argparse from collections import defaultdict", "plt.ylabel('Scores') plt.title('Scores by group and gender') plt.tight_layout() plt.legend() plt.savefig('figures/validation_set_results.png') def", "yerr=std_dnn, error_kw=error_config, label='DNN') plt.xticks(np.arange(11) + bar_width / 2, ('A','B','','D','E','F','G','','','J','K')) plt.xlabel('Group')", "# sys.exit() # Use this to determine which DNN models", "ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV', fontsize=labelsize) for i in range(3):", "labelsize=ticksize) ax[i].set_xticklabels([]) ax[0].legend(loc=\"best\", ncol=1, prop={'size':legendsize}) ax[1].legend(loc=\"best\", ncol=2, prop={'size':legendsize}) ax[2].legend(loc=\"best\", ncol=3,", "it # split across three different subplots, one per algorithm", "fontsize=labelsize) for i in range(3): ax[i].set_xlabel('Algorithm', fontsize=labelsize) ax[i].set_ylim([0.0,9.0]) ax[i].tick_params(axis='y', labelsize=ticksize)", "ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5), gridspec_kw={'width_ratios':width_ratio}) for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)):", "Some rather tedious but necessary stuff to make it publication-quality.", "group and gender') plt.tight_layout() plt.legend() plt.savefig('figures/validation_set_results.png') def plot(results, vv): lin_mean", "This is a deprecated method, only to show how to", "but necessary stuff to make it publication-quality. ax[0].set_title('Linear', fontsize=titlesize) ax[1].set_title('Random", "+ bar_width / 2, ('A','B','','D','E','F','G','','','J','K')) plt.xlabel('Group') plt.ylabel('Scores') plt.title('Scores by group", "plot(results, vv): lin_mean = [] lin_std = [] lin_keys =", "bar_width = 0.80 opacity = 0.5 error_config = {'ecolor': '0.3'}", "sorted_keys: info = [ss['loss'] for ss in results[key]] if 'Lin'", "dnn_threshold = 3.0 real_index = 0 for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):", "= {'ecolor': '0.3'} rects1 = plt.bar(np.array([0,1]), means_lin, bar_width, alpha=opacity, color='b',", "[] lin_keys = [] rfs_mean = [] rfs_std = []", "sorted(results.keys()) for key in sorted_keys: info = [ss['loss'] for ss", "means_dnn, bar_width, alpha=opacity, color='y', yerr=std_dnn, error_kw=error_config, label='DNN') plt.xticks(np.arange(11) + bar_width", "unwieldly. \"\"\" fig, ax = plt.subplots() bar_width = 0.80 opacity", "{}\".format(lin_keys)) print(\"\\nrfs_mean: {}\".format(rfs_mean)) print(\"rfs_std: {}\".format(rfs_std)) print(\"rfs_keys: {}\".format(rfs_keys)) print(\"\\nDNN results:\") for", "ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)): ax[1].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:])", "== \"__main__\": pp = argparse.ArgumentParser() pp.add_argument('--version', type=int) pp.add_argument('--kfolds', type=int, default=10)", "print(\"\\nDNN results:\") for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys): print(\"{:.2f}\\t{:.2f}\\t{}\".format(mean,std,key)) # sys.exit() #", "print(\"\\nrfs_mean: {}\".format(rfs_mean)) print(\"rfs_std: {}\".format(rfs_std)) print(\"rfs_keys: {}\".format(rfs_keys)) print(\"\\nDNN results:\") for (mean,std,key)", "ncol=1, prop={'size':legendsize}) ax[1].legend(loc=\"best\", ncol=2, prop={'size':legendsize}) ax[2].legend(loc=\"best\", ncol=3, prop={'size':legendsize}) plt.tight_layout() plt.savefig('figures/validation_set_results_v'+vv+'.png')", "('A','B','','D','E','F','G','','','J','K')) plt.xlabel('Group') plt.ylabel('Scores') plt.title('Scores by group and gender') plt.tight_layout() plt.legend()", "to possibly combine these into one plot. However, I find", "ax[i].set_xticklabels([]) ax[0].legend(loc=\"best\", ncol=1, prop={'size':legendsize}) ax[1].legend(loc=\"best\", ncol=2, prop={'size':legendsize}) ax[2].legend(loc=\"best\", ncol=3, prop={'size':legendsize})", "ncols=3, figsize=(16,5), gridspec_kw={'width_ratios':width_ratio}) for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)): ax[0].bar(np.array([ii]), mean, bar_width,", "error_kw=error_config, label='DNN') plt.xticks(np.arange(11) + bar_width / 2, ('A','B','','D','E','F','G','','','J','K')) plt.xlabel('Group') plt.ylabel('Scores')", "pp = argparse.ArgumentParser() pp.add_argument('--version', type=int) pp.add_argument('--kfolds', type=int, default=10) args =", "yerr=std_rfs, error_kw=error_config, label='RF') rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width, alpha=opacity, color='y',", "plt.bar(np.array([9,10]), means_dnn, bar_width, alpha=opacity, color='y', yerr=std_dnn, error_kw=error_config, label='DNN') plt.xticks(np.arange(11) +", "plt.tight_layout() plt.savefig('figures/validation_set_results_v'+vv+'.png') if __name__ == \"__main__\": pp = argparse.ArgumentParser() pp.add_argument('--version',", "ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean > dnn_threshold: continue ax[2].bar(np.array([real_index]), mean,", "ticksize = 15 bar_width = 0.80 opacity = 1.0 error_config", "in results[key]] if 'Lin' in key: lin_mean.append(np.mean(info)) lin_std.append(np.std(info)) lin_keys.append(key) elif", "import matplotlib.pyplot as plt import numpy as np import sys", "zip(dnn_mean,dnn_std,dnn_keys): print(\"{:.2f}\\t{:.2f}\\t{}\".format(mean,std,key)) # sys.exit() # Use this to determine which", "Forests', fontsize=titlesize) ax[2].set_title('Deep Neural Networks', fontsize=titlesize) ax[0].set_ylabel('Average Squared $L_2$, 10-Fold", "bar_width, alpha=opacity, color='r', yerr=std_rfs, error_kw=error_config, label='RF') rects3 = plt.bar(np.array([9,10]), means_dnn,", "dnn_mean = [] dnn_std = [] dnn_keys = [] sorted_keys", "file_name = 'results/results_kfolds10_v'+VERSION+'.npy' results = np.load(file_name)[()] print(\"results has keys: {}\".format(results.keys()))", "15 ticksize = 15 bar_width = 0.80 opacity = 1.0", "finally make the bar chart. I think it's easiest to", "i in range(3): ax[i].set_xlabel('Algorithm', fontsize=labelsize) ax[i].set_ylim([0.0,9.0]) ax[i].tick_params(axis='y', labelsize=ticksize) ax[i].set_xticklabels([]) ax[0].legend(loc=\"best\",", "real_index += 1 # Some rather tedious but necessary stuff", "print(\"rfs_std: {}\".format(rfs_std)) print(\"rfs_keys: {}\".format(rfs_keys)) print(\"\\nDNN results:\") for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys):", "= plt.subplots(nrows=1, ncols=3, figsize=(16,5), gridspec_kw={'width_ratios':width_ratio}) for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)): ax[0].bar(np.array([ii]),", "bar_width = 0.80 opacity = 1.0 error_config = {'ecolor': '0.0',", "dnn_threshold: continue real_index += 1 # Gah! Now I can", "can finally make the bar chart. I think it's easiest", "(c) September 2017 by <NAME> \"\"\" import argparse from collections", "print(\"lin_std: {}\".format(lin_std)) print(\"lin_keys: {}\".format(lin_keys)) print(\"\\nrfs_mean: {}\".format(rfs_mean)) print(\"rfs_std: {}\".format(rfs_std)) print(\"rfs_keys: {}\".format(rfs_keys))", "= [] rfs_std = [] rfs_keys = [] dnn_mean =", "default=10) args = pp.parse_args() assert args.version is not None VERSION", "{}\".format(rfs_keys)) print(\"\\nDNN results:\") for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys): print(\"{:.2f}\\t{:.2f}\\t{}\".format(mean,std,key)) # sys.exit()", "pp.add_argument('--kfolds', type=int, default=10) args = pp.parse_args() assert args.version is not", "assert args.version is not None VERSION = str(args.version).zfill(2) file_name =", "= 3.0 real_index = 0 for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if", "<NAME> \"\"\" import argparse from collections import defaultdict from keras.models", "bar_width, alpha=opacity, color='b', yerr=std_lin, error_kw=error_config, label='Lin') rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs,", "= plt.subplots() bar_width = 0.80 opacity = 0.5 error_config =", "import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as", "chart. I think it's easiest to have it # split", "[] dnn_mean = [] dnn_std = [] dnn_keys = []", "from keras.layers import Dense, Activation import matplotlib matplotlib.use('Agg') import matplotlib.pyplot", "pp.add_argument('--version', type=int) pp.add_argument('--kfolds', type=int, default=10) args = pp.parse_args() assert args.version", "plt.subplots(nrows=1, ncols=3, figsize=(16,5), gridspec_kw={'width_ratios':width_ratio}) for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)): ax[0].bar(np.array([ii]), mean,", "in enumerate(zip(dnn_mean,dnn_std,dnn_keys)): if mean > dnn_threshold: continue ax[2].bar(np.array([real_index]), mean, bar_width,", "titlesize = 21 labelsize = 17 legendsize = 15 ticksize", "ax[0].set_title('Linear', fontsize=titlesize) ax[1].set_title('Random Forests', fontsize=titlesize) ax[2].set_title('Deep Neural Networks', fontsize=titlesize) ax[0].set_ylabel('Average", "alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)): ax[1].bar(np.array([ii]), mean,", "across three different subplots, one per algorithm category. width_ratio =", "= 15 bar_width = 0.80 opacity = 1.0 error_config =", "color='r', yerr=std_rfs, error_kw=error_config, label='RF') rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width, alpha=opacity,", "label=key[4:]) for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)): ax[1].bar(np.array([ii]), mean, bar_width, alpha=opacity, yerr=std,", "not None VERSION = str(args.version).zfill(2) file_name = 'results/results_kfolds10_v'+VERSION+'.npy' results =", "per algorithm category. width_ratio = [len(lin_keys),len(rfs_keys),real_index] fig, ax = plt.subplots(nrows=1,", "rather tedious but necessary stuff to make it publication-quality. ax[0].set_title('Linear',", "mean, bar_width, alpha=opacity, yerr=std, error_kw=error_config, label=key[4:]) for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)):", "plt.xlabel('Group') plt.ylabel('Scores') plt.title('Scores by group and gender') plt.tight_layout() plt.legend() plt.savefig('figures/validation_set_results.png')" ]
[ "= \\'([0-9.]+)\\'''') def get_version(): init = open(os.path.join(ROOT, 'application', '__init__.py')).read() return", "re ROOT = os.path.dirname(__file__) VERSION_RE = re.compile(r'''__version__ = \\'([0-9.]+)\\'''') def", "python from setuptools import find_packages, setup import os import re", "get_version(): init = open(os.path.join(ROOT, 'application', '__init__.py')).read() return VERSION_RE.search(init).group(1) setup( name='groceries-api',", "from setuptools import find_packages, setup import os import re ROOT", "setuptools import find_packages, setup import os import re ROOT =", "return VERSION_RE.search(init).group(1) setup( name='groceries-api', version=get_version(), license='MIT', packages=find_packages(), include_package_data=True, install_requires=[ 'alembic==0.7.5.post2',", "'Flask-SQLAlchemy==2.0', 'gunicorn==19.3.0', 'psycopg2==2.6.1', 'PyJWT==1.1.0', 'requests==2.8.1', 'six==1.9.0', ], extras_require={ 'dev': {", "import os import re ROOT = os.path.dirname(__file__) VERSION_RE = re.compile(r'''__version__", "<filename>setup.py #!/usr/bin/env python from setuptools import find_packages, setup import os", "import re ROOT = os.path.dirname(__file__) VERSION_RE = re.compile(r'''__version__ = \\'([0-9.]+)\\'''')", "os.path.dirname(__file__) VERSION_RE = re.compile(r'''__version__ = \\'([0-9.]+)\\'''') def get_version(): init =", "VERSION_RE.search(init).group(1) setup( name='groceries-api', version=get_version(), license='MIT', packages=find_packages(), include_package_data=True, install_requires=[ 'alembic==0.7.5.post2', 'APScheduler==3.1.0',", "'Flask==0.10.1', 'Flask-Cors==2.0.0', 'Flask-SQLAlchemy==2.0', 'gunicorn==19.3.0', 'psycopg2==2.6.1', 'PyJWT==1.1.0', 'requests==2.8.1', 'six==1.9.0', ], extras_require={", "open(os.path.join(ROOT, 'application', '__init__.py')).read() return VERSION_RE.search(init).group(1) setup( name='groceries-api', version=get_version(), license='MIT', packages=find_packages(),", "], extras_require={ 'dev': { 'coverage==3.7.1', 'coveralls==0.5', 'flake8==2.4.0', 'mock==1.0.1', 'pytest==2.7.0', 'tox==2.1.1',", "packages=find_packages(), include_package_data=True, install_requires=[ 'alembic==0.7.5.post2', 'APScheduler==3.1.0', 'Flask==0.10.1', 'Flask-Cors==2.0.0', 'Flask-SQLAlchemy==2.0', 'gunicorn==19.3.0', 'psycopg2==2.6.1',", "init = open(os.path.join(ROOT, 'application', '__init__.py')).read() return VERSION_RE.search(init).group(1) setup( name='groceries-api', version=get_version(),", "version=get_version(), license='MIT', packages=find_packages(), include_package_data=True, install_requires=[ 'alembic==0.7.5.post2', 'APScheduler==3.1.0', 'Flask==0.10.1', 'Flask-Cors==2.0.0', 'Flask-SQLAlchemy==2.0',", "'psycopg2==2.6.1', 'PyJWT==1.1.0', 'requests==2.8.1', 'six==1.9.0', ], extras_require={ 'dev': { 'coverage==3.7.1', 'coveralls==0.5',", "extras_require={ 'dev': { 'coverage==3.7.1', 'coveralls==0.5', 'flake8==2.4.0', 'mock==1.0.1', 'pytest==2.7.0', 'tox==2.1.1', },", "include_package_data=True, install_requires=[ 'alembic==0.7.5.post2', 'APScheduler==3.1.0', 'Flask==0.10.1', 'Flask-Cors==2.0.0', 'Flask-SQLAlchemy==2.0', 'gunicorn==19.3.0', 'psycopg2==2.6.1', 'PyJWT==1.1.0',", "license='MIT', packages=find_packages(), include_package_data=True, install_requires=[ 'alembic==0.7.5.post2', 'APScheduler==3.1.0', 'Flask==0.10.1', 'Flask-Cors==2.0.0', 'Flask-SQLAlchemy==2.0', 'gunicorn==19.3.0',", "'APScheduler==3.1.0', 'Flask==0.10.1', 'Flask-Cors==2.0.0', 'Flask-SQLAlchemy==2.0', 'gunicorn==19.3.0', 'psycopg2==2.6.1', 'PyJWT==1.1.0', 'requests==2.8.1', 'six==1.9.0', ],", "setup import os import re ROOT = os.path.dirname(__file__) VERSION_RE =", "def get_version(): init = open(os.path.join(ROOT, 'application', '__init__.py')).read() return VERSION_RE.search(init).group(1) setup(", "'alembic==0.7.5.post2', 'APScheduler==3.1.0', 'Flask==0.10.1', 'Flask-Cors==2.0.0', 'Flask-SQLAlchemy==2.0', 'gunicorn==19.3.0', 'psycopg2==2.6.1', 'PyJWT==1.1.0', 'requests==2.8.1', 'six==1.9.0',", "= open(os.path.join(ROOT, 'application', '__init__.py')).read() return VERSION_RE.search(init).group(1) setup( name='groceries-api', version=get_version(), license='MIT',", "os import re ROOT = os.path.dirname(__file__) VERSION_RE = re.compile(r'''__version__ =", "'six==1.9.0', ], extras_require={ 'dev': { 'coverage==3.7.1', 'coveralls==0.5', 'flake8==2.4.0', 'mock==1.0.1', 'pytest==2.7.0',", "'application', '__init__.py')).read() return VERSION_RE.search(init).group(1) setup( name='groceries-api', version=get_version(), license='MIT', packages=find_packages(), include_package_data=True,", "'gunicorn==19.3.0', 'psycopg2==2.6.1', 'PyJWT==1.1.0', 'requests==2.8.1', 'six==1.9.0', ], extras_require={ 'dev': { 'coverage==3.7.1',", "= os.path.dirname(__file__) VERSION_RE = re.compile(r'''__version__ = \\'([0-9.]+)\\'''') def get_version(): init", "install_requires=[ 'alembic==0.7.5.post2', 'APScheduler==3.1.0', 'Flask==0.10.1', 'Flask-Cors==2.0.0', 'Flask-SQLAlchemy==2.0', 'gunicorn==19.3.0', 'psycopg2==2.6.1', 'PyJWT==1.1.0', 'requests==2.8.1',", "find_packages, setup import os import re ROOT = os.path.dirname(__file__) VERSION_RE", "re.compile(r'''__version__ = \\'([0-9.]+)\\'''') def get_version(): init = open(os.path.join(ROOT, 'application', '__init__.py')).read()", "setup( name='groceries-api', version=get_version(), license='MIT', packages=find_packages(), include_package_data=True, install_requires=[ 'alembic==0.7.5.post2', 'APScheduler==3.1.0', 'Flask==0.10.1',", "\\'([0-9.]+)\\'''') def get_version(): init = open(os.path.join(ROOT, 'application', '__init__.py')).read() return VERSION_RE.search(init).group(1)", "name='groceries-api', version=get_version(), license='MIT', packages=find_packages(), include_package_data=True, install_requires=[ 'alembic==0.7.5.post2', 'APScheduler==3.1.0', 'Flask==0.10.1', 'Flask-Cors==2.0.0',", "'dev': { 'coverage==3.7.1', 'coveralls==0.5', 'flake8==2.4.0', 'mock==1.0.1', 'pytest==2.7.0', 'tox==2.1.1', }, },", "'Flask-Cors==2.0.0', 'Flask-SQLAlchemy==2.0', 'gunicorn==19.3.0', 'psycopg2==2.6.1', 'PyJWT==1.1.0', 'requests==2.8.1', 'six==1.9.0', ], extras_require={ 'dev':", "'__init__.py')).read() return VERSION_RE.search(init).group(1) setup( name='groceries-api', version=get_version(), license='MIT', packages=find_packages(), include_package_data=True, install_requires=[", "VERSION_RE = re.compile(r'''__version__ = \\'([0-9.]+)\\'''') def get_version(): init = open(os.path.join(ROOT,", "ROOT = os.path.dirname(__file__) VERSION_RE = re.compile(r'''__version__ = \\'([0-9.]+)\\'''') def get_version():", "= re.compile(r'''__version__ = \\'([0-9.]+)\\'''') def get_version(): init = open(os.path.join(ROOT, 'application',", "{ 'coverage==3.7.1', 'coveralls==0.5', 'flake8==2.4.0', 'mock==1.0.1', 'pytest==2.7.0', 'tox==2.1.1', }, }, )", "#!/usr/bin/env python from setuptools import find_packages, setup import os import", "'PyJWT==1.1.0', 'requests==2.8.1', 'six==1.9.0', ], extras_require={ 'dev': { 'coverage==3.7.1', 'coveralls==0.5', 'flake8==2.4.0',", "import find_packages, setup import os import re ROOT = os.path.dirname(__file__)", "'requests==2.8.1', 'six==1.9.0', ], extras_require={ 'dev': { 'coverage==3.7.1', 'coveralls==0.5', 'flake8==2.4.0', 'mock==1.0.1'," ]
[ "defenseTube.setTangible(1) defenseCollNode = CollisionNode('DefenseCol') defenseCollNode.addSolid(defenseTube) self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode) self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan))", "self.witnessToon.loop('Sit') self.__arrangeToonsAroundWitnessToon() camera.reparentTo(render) camera.setPos(self.witnessToon, -9, 12, 6) camera.lookAt(self.witnessToon, 0, 0,", "if not isinstance(cn, CollisionNode): self.notify.warning('Not a collision node: %s' %", "= Point3(*ToontownGlobals.LawbotBossBottomPos) deathPos = Point3(*ToontownGlobals.LawbotBossDeathPos) self.setPosHpr(startPos, startHpr) bossTrack = Sequence()", "self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') self.__clearOnscreenMessage() self.stingMusic.stop() def enterBattleTwo(self): self.notify.debug('----- enterBattleTwo') self.cleanupIntervals()", "'BattleThree': self.notify.warning('returning from setTaunt, not in battle three state, state=%s',", "mopath, track, delayDeletes): self.notify.debug('----- __walkToonToPromotion') toon = base.cr.doId2do.get(toonId) if toon:", "self.notify.debug('makeToonsWait') for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon:", "self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesANodePath.stash() insidesB = CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0, 2.0,", "self.onscreenMessage = None self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1, 1, 1, 1),", "dnaNetString = 't\\x1b\\x00\\x01\\x01\\x00\\x03\\x00\\x03\\x01\\x10\\x13\\x00\\x13\\x13' npc = Toon.Toon() npc.setDNAString(dnaNetString) npc.setName(TTLocalizer.WitnessToonName) npc.setPickable(0) npc.setPlayerType(NametagGroup.CCNonPlayer)", "= 1 cannonIndex = self.cannonIndex numJurors = 0 if not", "%s has no attackCode tag.' % repr(entry.getIntoNodePath())) return attackCode =", "cannon = self.cannons[index] toon = self.cr.doId2do.get(toonId) self.notify.debug('cannonId = %d' %", "print st return chatString = TTLocalizer.LawbotBossTaunts[1] if tauntIndex == 0:", "delay, mopath, track, delayDeletes): self.notify.debug('----- __walkToonToPromotion') toon = base.cr.doId2do.get(toonId) if", "camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0), startPos=Point3(-22, -90, 35),", "ElevatorConstants from toontown.toonbase import ToontownTimer OneBossCog = None class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog,", "def __doWitnessPrepareBattleThreeChat(self): self.notify.debug('__doWitnessPrepareBattleThreeChat: original self.numToonJurorsSeated = %d' % self.numToonJurorsSeated) self.countToonJurors()", "self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterReward(self): self.cleanupIntervals()", "pos = %s' % bnWorldPos) pos = render.getRelativePoint(battleNode, pos) self.notify.debug('walktToonsToBattlePosition:", "2), Func(camera.setHpr, 180, 10, 0), Func(self.__doWitnessPrepareBattleThreeChat)) return movie def countToonJurors(self):", "* from panda3d.core import * from libotp import * from", "self.realWitnessStand = self.geom.find('**/WitnessStand') if not self.realWitnessStand.isEmpty(): pass self.reflectedWitnessStand = self.geom.find('**/Witnessstand_Geo_Reflect')", "no avatarDoId tag.' % repr(entry.getIntoNodePath())) return doId = int(avatarDoId) if", "self.geom.reparentTo(render) self.loadWitnessStand() self.loadScale() self.scaleNodePath.stash() self.loadJuryBox() self.loadPodium() ug = self.geom.find('**/Reflections') ug.setBin('ground',", "self.rollBossToPoint(fromPos=myFromPos, fromHpr=None, toPos=myToPos, toHpr=None, reverse=0) rollTrack = Sequence( Func(self.getGeomNode().setH, 180),", "radians = angle * math.pi / 180.0 x = math.cos(radians)", "Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0, 90, 20)),", "duration=0)), Func(node.detachNode))) seq = Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack, Func(door.request, 'close'))", "*= 1.0 if diffDamage >= 0: percentDamaged = diffDamage /", "def __cleanupWitnessToon(self): self.__hideWitnessToon() if self.witnessToon: self.witnessToon.removeActive() self.witnessToon.delete() self.witnessToon = None", "panda3d.core import * from libotp import * from direct.fsm import", "self.reflectedWitnessStand.isEmpty(): pass colNode = self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision') colNode.setName('WitnessStand') def loadScale(self): self.useProgrammerScale =", "= False def makeScaleReflectDamage(self): diffDamage = self.bossDamage - ToontownGlobals.LawbotBossInitialDamage diffDamage", "self.podium.getZ() - ToontownGlobals.LawbotBossBattleTwoPosHpr[2] if not self.debugPositions: self.podium.setZ(newZ) self.reflectedPodium = self.geom.find('**/Podium_Geo1_Refl')", "= self.state if stateName == 'Elevator': self.placeToonInElevator(toon) def setLawyerIds(self, lawyerIds):", "startHpr, bottomPos, None, 1) bossTrack.append(track) track, hpr = self.rollBossToPoint(bottomPos, startHpr,", "Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4, CFSpeech)), (24, Sequence( Func(self.clearChat), self.loseCogSuits(self.toonsA + self.toonsB, render,", "[bossDamage]) def d_healBoss(self, bossHeal): self.notify.debug('----- d_bossHeal') self.sendUpdate('healBoss', [bossHeal]) def d_hitBossInsides(self):", "self.rollBossToPoint(startPos, None, battlePos, None, 0) bossTrack.append(track) track, hpr = self.rollBossToPoint(battlePos,", "1 self.doAnimate() self.__hideWitnessToon() if not self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty():", "= CollisionNode('BossZap') targetNode.addSolid(target) targetNode.setCollideMask(ToontownGlobals.PieBitmask) self.targetNodePath = self.pelvis.attachNewNode(targetNode) self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog)) shield", "to NearVictory') self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration()) else: self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie) if self.recoverRate: taskMgr.add(self.__recoverBossDamage,", "self.betweenBattleMusic.stop() def enterPrepareBattleTwo(self): self.notify.debug('----- enterPrepareBattleTwo') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render)", "ToontownGlobals.PieCodeToon: return avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId') if avatarDoId == '': self.notify.warning('Toon", "self.notify.debug('not found %s' % stuffToHide) self.geom.reparentTo(render) self.loadWitnessStand() self.loadScale() self.scaleNodePath.stash() self.loadJuryBox()", "0) curPos = self.juryBox.getPos() endingAbsPos = Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1]", "looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueDefeat(self): self.notify.debug('----- __continueDefeat') self.stopAnimate() self.doneBarrier('Defeat') def", "= 1 def __hideWitnessToon(self): if self.witnessToonOnstage: self.witnessToon.removeActive() self.witnessToon.detachNode() self.witnessToonOnstage =", "1, 1)), name=intervalName) self.panFlashInterval = seq seq.start() self.storeInterval(seq, intervalName) def", "self.accept('outOfPies', self.__outOfPies) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.happy = 0 self.raised = 0 self.forward", "victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel,", "origin) self.notify.debug('prosecutionPanPos = %s' % prosecutionPanPos) self.notify.debug('prosecutionPanRelPos = %s' %", "self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesBNodePath.stash() target = CollisionTube(0, -1, 4, 0, -1,", "Track( (0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0, CFSpeech)), (5.6, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1, CFSpeech)), (12,", "0, 1.0, 0, 1.0) self.beamNodePath = NodePath('scaleBeam') self.beamNodePath.attachNewNode(beamGeom) self.beamNodePath.setPos(0, 0,", "self.getGearFrisbee() gearModel.setScale(0.1) t = self.getBossDamage() / 100.0 gearTrack = Parallel()", "1.0, g = 1.0, b = 1.0, a = 1.0):", "= MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True)", "direct.gui.DirectGui import * from panda3d.core import * from libotp import", "CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6) prosecutionTube.setTangible(1) prosecutionCollNode =", "original self.numToonJurorsSeated = %d' % self.numToonJurorsSeated) self.countToonJurors() self.notify.debug('after calling self.countToonJurors,", "isOpen: self.insidesBNodePath.unstash() else: self.insidesBNodePath.stash() def __toonsToPromotionPosition(self, toonIds, battleNode): self.notify.debug('----- __toonsToPromotionPosition')", "__hideToons(self): for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon:", "name=intervalName) self.panFlashInterval = seq seq.start() self.storeInterval(seq, intervalName) def saySomething(self, chatString):", "if chair.toonJurorIndex == cannonIndex: retVal += 1 return retVal def", "self.reflectedPodium.getZ() if not self.debugPositions: self.reflectedPodium.setZ(reflectedZ) if not self.reflectedPodium.isEmpty(): if self.debugPositions:", "Sequence(prepareBattleThreeMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __onToBattleThree(self, elapsed): self.notify.debug('----- __onToBattleThree')", "TTLocalizer.LawbotBossTempIntro4, CFSpeech)), (24, Sequence( Func(self.clearChat), self.loseCogSuits(self.toonsA + self.toonsB, render, (-2.798,", "x = dist * math.sin(angle) y = dist * math.cos(angle)", "DirectLabel(text=text, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.35),", "30, 8, 180, 0, 0) def exitElevator(self): self.notify.debug('----- exitElevator') DistributedBossCog.DistributedBossCog.exitElevator(self)", "self.reparentTo(render) self.__showWitnessToon() prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie() intervalName = 'prepareBattleTwo' seq =", "toon, pieCode): if pieCode == ToontownGlobals.PieCodeBossInsides: if toon == localAvatar:", "def disable(self): global OneBossCog self.notify.debug('----- disable') DistributedBossCog.DistributedBossCog.disable(self) self.request('Off') self.unloadEnvironment() self.__cleanupWitnessToon()", "if self.numToonJurorsSeated == 0: juryResult = TTLocalizer.WitnessToonNoJuror elif self.numToonJurorsSeated ==", "self.happy = 0 self.raised = 0 self.forward = 1 self.doAnimate()", "toonId: self.cannonIndex = cannonIndex def numJurorsSeatedByCannon(self, cannonIndex): retVal = 0", "points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] for i in xrange(len(toonIds)): toon", "== localAvatar: self.d_hitBoss(self.panDamage) elif pieCode == ToontownGlobals.PieCodeProsecutionPan: self.flashGreen() if toon", "0 self.raised = 0 self.forward = 1 intervalName = 'DefeatMovie'", "setTaunt(self, tauntIndex, extraInfo): gotError = False if not hasattr(self, 'state'):", "Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180))", "tris.closePrimitive() tris.addVertex(0) tris.addVertex(4) tris.addVertex(5) tris.closePrimitive() tris.addVertex(1) tris.addVertex(0) tris.addVertex(5) tris.closePrimitive() tris.addVertex(4)", "pos, h = points[i] origPos = pos self.notify.debug('origPos = %s'", "if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() camera.reparentTo(self.elevatorModel) camera.setPosHpr(0, 30, 8, 180, 0,", "= math.cos(radians) * radius y = math.sin(radians) * radius toon.setPos(self.witnessToon,", "= VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5]) bossTrack = Sequence() self.notify.debug('calling setPosHpr') myInterval", "[]) def __finalPieSplat(self, toon, pieCode): if pieCode != ToontownGlobals.PieCodeDefensePan: return", "self.uniqueName('WaitingMessage')) def exitPrepareBattleThree(self): self.notify.debug('----- exitPrepareBattleThree') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') intervalName =", "= None self.onscreenMessage = None self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage self.elevatorType =", "'LawbotBoss.enterReward')) ival.delayDeletes = delayDeletes ival.start() self.storeInterval(ival, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9,", "Sequence( ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1), ActorInterval(self, 'Ff_lookRt', duration=3), ActorInterval(self,", "Sequence( Func(self.clearChat), self.loseCogSuits(self.toonsA + self.toonsB, render, (-2.798, -70, 10, 180,", "index += 1 else: self.notify.warning('No cannon %d but we have", "npc.setPlayerType(NametagGroup.CCNonPlayer) npc.animFSM.request('Sit') self.witnessToon = npc self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr) def __cleanupWitnessToon(self): self.__hideWitnessToon() if", "recoverRate self.recoverStartTime = recoverStartTime taskName = 'RecoverBossDamage' taskMgr.remove(taskName) if self.bossDamageMovie:", "not self.reflectedWitnessStand.isEmpty(): pass colNode = self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision') colNode.setName('WitnessStand') def loadScale(self): self.useProgrammerScale", "bonusWeight self.notify.debug('toon %d has weight of %d' % (toonId, newWeight))", "(1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1, CFSpeech)), (5.5, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2, CFSpeech)), (9.5, Sequence(Func(camera.wrtReparentTo,", "def getBossDamage(self): self.notify.debug('----- getBossDamage') now = globalClock.getFrameTime() elapsed = now", "if self.bonusTimer: self.bonusTimer.hide() def enteredBonusState(self): self.witnessToon.clearChat() text = TTLocalizer.WitnessToonBonus %", "= self.geom.find('**/Podium') newZ = self.podium.getZ() - ToontownGlobals.LawbotBossBattleTwoPosHpr[2] if not self.debugPositions:", "prosecutionPanPos = self.prosecutionPanNodePath.getPos() origin = Point3(0, 0, 0) prosecutionPanRelPos =", "Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __onToBattleTwo(self, elapsedTime =", "cannonIndex == None and cannonIndex >= 0: diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]", "0, 1.0, 0.25) self.defensePanNodePath = NodePath('defensePan') self.defensePanNodePath.attachNewNode(defensePanGeom) self.defensePanNodePath.setPos(0, -2, 0)", "= %s' % beamBoundsCenter) beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter()", "= points[i] origPos = pos self.notify.debug('origPos = %s' % origPos)", "0.85: self.unstashBaseCol() else: self.stashBaseCol() def unloadEnvironment(self): self.notify.debug('----- unloadEnvironment') DistributedBossCog.DistributedBossCog.unloadEnvironment(self) self.geom.removeNode()", "Func(self.unstickToons), name=self.uniqueName('Introduction')) def walkToonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('walkToonsToBattlePosition-----------------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' %", "self.bossMaxDamage) if diffSettings[4]: localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight) def __doneBattleThree(self): self.notify.debug('----- __doneBattleThree') self.setState('NearVictory')", "self.clearChat() self.reparentTo(render) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) self.__showWitnessToon() prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie() self.acceptOnce('doneChatPage',", "1 self.juryBoxIval = None self.juryTimer = None self.witnessToon = None", "self.countToonJurors, numToonJurorsSeated=%d' % self.numToonJurorsSeated) if self.numToonJurorsSeated == 0: juryResult =", "npc.setPickable(0) npc.setPlayerType(NametagGroup.CCNonPlayer) npc.animFSM.request('Sit') self.witnessToon = npc self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr) def __cleanupWitnessToon(self): self.__hideWitnessToon()", "0 for chair in self.chairs.values(): if chair.state == 'ToonJuror': if", "localAvatar.setPos(-3, 0, 0) base.localAvatar.orbitalCamera.start() self.clearChat() self.witnessToon.clearChat() self.reparentTo(render) self.happy = 1", "xrange(10): self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg')) render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog)) insidesA = CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0,", "def __init__(self, cr): self.notify.debug('----- __init___') DistributedBossCog.DistributedBossCog.__init__(self, cr) FSM.FSM.__init__(self, 'DistributedLawbotBoss') self.lawyers", "name=self.uniqueName('Introduction')) def walkToonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('walkToonsToBattlePosition-----------------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds,", "self.lawyerRequest = self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers) def __gotLawyers(self, lawyers): self.lawyerRequest = None", "self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() self.__showWitnessToon() self.witnessToon.reparentTo(render) self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr) self.witnessToon.loop('Sit')", "!= ToontownGlobals.PieCodeDefensePan: return self.sendUpdate('finalPieSplat', []) self.ignore('pieSplat') def cleanupAttacks(self): self.notify.debug('----- cleanupAttacks')", "self.__hideWitnessToon() if self.battleA == None or self.battleB == None: pass", "ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward)) intervalName = 'RewardMovie' delayDeletes =", "entry): self.notify.debug('__enterDefenseCol') def __enterProsecutionCol(self, entry): self.notify.debug('__enterProsecutionCol') def makeVictoryMovie(self): myFromPos =", "intervalName) def __onToBattleTwo(self, elapsedTime = 0): self.notify.debug('----- __onToBattleTwo') self.doneBarrier('PrepareBattleTwo') taskMgr.doMethodLater(1,", "= self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterVictory(self): self.notify.debug('----- enterVictory') self.cleanupIntervals() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)", "1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.35), scale=0.1) return def", "self.witnessToon = None self.witnessToonOnstage = False self.numToonJurorsSeated = 0 self.mainDoor", "d_hitBossInsides(self): self.notify.debug('----- d_hitBossInsides') self.sendUpdate('hitBossInsides', []) def d_hitDefensePan(self): self.notify.debug('----- d_hitDefensePan') self.sendUpdate('hitDefensePan',", "= 0 for key in self.chairs.keys(): chair = self.chairs[key] if", "__continueDefeat(self): self.notify.debug('----- __continueDefeat') self.stopAnimate() self.doneBarrier('Defeat') def exitDefeat(self): self.notify.debug('----- exitDefeat') self.stopAnimate()", "= CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6) defenseTube.setTangible(1) defenseCollNode", "planeNode.setCollideMask(ToontownGlobals.PieBitmask) self.geom.attachNewNode(planeNode) self.door3 = self.geom.find('**/SlidingDoor1/') if self.door3.isEmpty(): self.door3 = self.geom.find('**/interior/CR3_Door')", "self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterDefeat(self): self.notify.debug('-----", "10, 180, 0, 0)))), (27, Sequence( self.toonNormalEyes(self.involvedToons), Func(self.loop, 'Ff_neutral'), Func(self.setChatAbsolute,", "%d' % cannon.doId) cannonPos = cannon.nodePath.getPos(render) self.notify.debug('cannonPos = %s' %", "self.geom.find('**/Podium') newZ = self.podium.getZ() - ToontownGlobals.LawbotBossBattleTwoPosHpr[2] if not self.debugPositions: self.podium.setZ(newZ)", "self.toonsEnterB.fFaceForward = 1 self.toonsEnterB.timeScale = 35 def __unloadMopaths(self): self.notify.debug('----- __unloadMopaths')", "self.ignore('enterWitnessStand') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.ignore('begin-pie') self.ignore('enterDefenseCol') self.ignore('enterProsecutionCol') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)", "= 1 self.forward = 1 self.doAnimate() self.__hideWitnessToon() if not self.mainDoor.isEmpty():", "gavel, entry): self.notify.debug('touchedGavel') attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr == '':", "Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.witnessToon.setLocalPageChat, chatString,", "bossTrack = Sequence() bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos, startHpr,", "loadScaleOld(self): startingTilt = 0 self.scaleNodePath = NodePath('injusticeScale') beamGeom = self.createBlock(0.25,", "__finalPieSplat(self, toon, pieCode): if pieCode != ToontownGlobals.PieCodeDefensePan: return self.sendUpdate('finalPieSplat', [])", "not cannonIndex == None and cannonIndex >= 0: diffSettings =", "h = points[i] toon.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2], h,", "= None return def __showWitnessToon(self): if not self.witnessToonOnstage: self.witnessToon.addActive() self.witnessToon.reparentTo(self.geom)", "self.cr.doId2do.get(toonId) if toon: toon.hide() def __showToons(self): for toonId in self.involvedToons:", "if not self.elevatorEntrance.isEmpty(): pass def enterBattleOne(self): self.notify.debug('----- LawbotBoss.enterBattleOne ') DistributedBossCog.DistributedBossCog.enterBattleOne(self)", "self.raised = 1 self.forward = 1 self.doAnimate() self.accept('enterWitnessStand', self.__touchedWitnessStand) self.accept('pieSplat',", "Func(door.request, 'close')) self.__cleanupStrafe() self.strafeInterval = seq seq.start() def replaceCollisionPolysWithPlanes(self, model):", "== localAvatar: pass elif pieCode == ToontownGlobals.PieCodeLawyer: pass def __localPieSplat(self,", "solid = cn.getSolid(i) if isinstance(solid, CollisionPolygon): plane = Plane(solid.getPlane()) planes.append(plane)", "pieCode, entry): if pieCode == ToontownGlobals.PieCodeLawyer: self.__lawyerGotHit(entry) if pieCode !=", "toon.setPos(self.witnessToon, x, y, 0) toon.headsUp(self.witnessToon) toon.loop('neutral') toon.show() def __talkAboutPromotion(self, speech):", "self.storeInterval(seq, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueDefeat(self): self.notify.debug('----- __continueDefeat')", "task): self.notify.debug('----- __recoverBossDamage') if self.bossDamageMovie: self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie) return Task.cont", "loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg') self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg') self.evidenceHitSfx = loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg') self.warningSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg')", "import StackTrace from direct.gui.DirectGui import * from panda3d.core import *", "= delayDeletes ival.start() self.storeInterval(ival, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def", "% toon.getPos()) def touchedGavel(self, gavel, entry): self.notify.debug('touchedGavel') attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode')", "def __makePrepareBattleThreeMovie(self): movie = Sequence(Func(camera.reparentTo, render), Func(camera.setPos, -15, 15, 20),", "points[i] origPos = pos self.notify.debug('origPos = %s' % origPos) self.notify.debug('batlleNode.getTransform", "= 0 self.raised = 0 self.forward = 1 intervalName =", "localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) if self.bossDamageMovie: self.bossDamageMovie.finish() self.bossDamageMovie = None self.unstickBoss() taskName =", "numGears for i in xrange(numGears): node = gearRoot.attachNewNode(str(i)) node.hide() node.setPos(0,", "Mopath.Mopath() self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB') self.toonsEnterB.fFaceForward = 1 self.toonsEnterB.timeScale = 35 def __unloadMopaths(self):", "[]) self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) base.playSfx(self.piesRestockSfx) if not self.everThrownPie: taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice'))", "center = (numToons - 1) / 2.0 for i in", "base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueVictory(self): self.notify.debug('----- __continueVictory') self.stopAnimate() self.doneBarrier('Victory')", "= None return def flashPanBlue(self): self.cleanupPanFlash() intervalName = 'FlashPanBlue' self.defensePanNodePath.setColorScale(1,", "self.notify.debug('----- __walkSuitToPoint') vector = Vec3(toPos - fromPos) distance = vector.length()", "'DistributedLawbotBoss') self.lawyers = [] self.lawyerRequest = None self.bossDamage = 0", "= 0): self.notify.debug('----- __doneEpilogue') intervalName = 'EpilogueMovieToonAnim' self.clearInterval(intervalName) track =", "insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesANodePath = self.axle.attachNewNode(insidesANode) self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesANodePath.stash() insidesB", "toon.getName() else: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] self.saySomething(chatString) def toonGotHealed(self, toonId): toon", "ToontownGlobals.LawbotBossWinningTilt self.setScaleTilt(tilt) if self.bossDamage < ToontownGlobals.LawbotBossMaxDamage * 0.85: self.unstashBaseCol() else:", "def __makeRollToBattleTwoMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2]) if self.arenaSide: topRampPos", "track, hpr = self.rollBossToPoint(bottomPos, startHpr, deathPos, None, 1) bossTrack.append(track) duration", "%s' % stuffToHide) stuffToHide.wrtReparentTo(self.reflectedMainDoor) else: self.notify.debug('not found %s' % stuffToHide)", "if side == 0: gearRoot.setPos(0, -7, 3) gearRoot.setHpr(180, 0, 0)", "str in itemsToHide: stuffToHide = self.geom.find('**/%s' % str) if not", "self.betweenBattleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg') self.battleTwoMusic = base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg') floor = self.geom.find('**/MidVaultFloor1') if", "self.stashBaseCol() def unloadEnvironment(self): self.notify.debug('----- unloadEnvironment') DistributedBossCog.DistributedBossCog.unloadEnvironment(self) self.geom.removeNode() del self.geom def", "% (self.numJurorsLocalToonSeated, self.bonusWeight) trialSpeech += '\\x07' trialSpeech += weightBonusText self.witnessToon.setLocalPageChat(trialSpeech,", "= Parallel() points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] self.notify.debug('walkToonsToBattlePosition: points =", "y1, z2) vertexWriter.addData3f(x1, y2, z2) vertexWriter.addData3f(x2, y2, z2) for index", "Func(taskMgr.doMethodLater, 0.01, self.unstashBoss, 'unstashBoss')), name=self.uniqueName('BattleTwoMovie')) def __makeRollToBattleThreeMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0],", "def __makePrepareBattleTwoMovie(self): chatString = TTLocalizer.WitnessToonPrepareBattleTwo % ToontownGlobals.LawbotBossJurorsForBalancedScale movie = Sequence(Func(camera.reparentTo,", "= 'prepareBattleTwoCannonsAppear' seq = Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName)", "toontown.building import ElevatorUtils from toontown.battle import RewardPanel from toontown.toon import", "- 0) tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt self.setScaleTilt(tilt) if self.bossDamage", "seq.start() self.storeInterval(seq, intervalName) def __onToBattleTwo(self, elapsedTime = 0): self.notify.debug('----- __onToBattleTwo')", "ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), Parallel(self.podium.posInterval(5.0, finalPodiumPos), self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos),", "from setTaunt, no attr nametag') gotError = True if gotError:", "self.juryTimer = ToontownTimer.ToontownTimer() self.juryTimer.posInTopRightCorner() self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime) def exitBattleTwo(self): self.notify.debug('----- exitBattleTwo') intervalName", "= 1 self.raised = 1 self.forward = 1 intervalName =", "0.1 planes.sort(lambda p1, p2: p1.compareTo(p2, threshold)) lastPlane = None for", "= CollisionNode(self.uniqueName('ProsecutionCol')) prosecutionCollNode.addSolid(prosecutionTube) self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode) self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) standGeom =", "self.unstickBoss() taskName = 'RecoverBossDamage' taskMgr.remove(taskName) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() return", "not collList: collList = [model] for cnp in collList: cn", "1) bossTrack.append(track) duration = bossTrack.getDuration() return bossTrack def __showOnscreenMessage(self, text):", "self.witnessToon.addActive() def enterElevator(self): self.notify.debug('----- enterElevator') DistributedBossCog.DistributedBossCog.enterElevator(self) self.witnessToon.removeActive() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.happy", "points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] else: points = list(BattleBase.BattleBase.toonPoints[3]) points.extend(BattleBase.BattleBase.toonPoints[len(toonIds)", "self.scaleNodePath.setScale(5.0) self.scaleNodePath.wrtReparentTo(self.geom) self.setScaleTilt(startingTilt) def setScaleTilt(self, tilt): self.beamNodePath.setP(tilt) if self.useProgrammerScale: self.defensePanNodePath.setP(-tilt)", "npc.setName(TTLocalizer.WitnessToonName) npc.setPickable(0) npc.setPlayerType(NametagGroup.CCNonPlayer) npc.animFSM.request('Sit') self.witnessToon = npc self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr) def __cleanupWitnessToon(self):", "= cannonIndex def numJurorsSeatedByCannon(self, cannonIndex): retVal = 0 for chair", "bossTrack = Sequence() self.notify.debug('calling setPosHpr') myInterval = camera.posHprInterval(8, Point3(-22, -100,", "+ ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime,", "self.bonusTimer localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() if OneBossCog == self: OneBossCog = None return", "a collision node: %s' % repr(cnp)) break newCollideMask = newCollideMask", "CFSpeech)))) track.append(dialogTrack) return Sequence( Func(self.stickToonsToFloor), track, Func(self.unstickToons), name=self.uniqueName('Introduction')) def walkToonsToBattlePosition(self,", "self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1, 1)), name=intervalName) self.panFlashInterval = seq seq.start()", "def __pieSplat(self, toon, pieCode): if pieCode == ToontownGlobals.PieCodeBossInsides: if toon", "gotError: st = StackTrace() print st return chatString = TTLocalizer.LawbotBossTaunts[1]", "self.doAnimate() self.accept('enterWitnessStand', self.__touchedWitnessStand) self.accept('pieSplat', self.__pieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) self.accept('begin-pie',", "self.cannons: cannon = self.cannons[index] toon = self.cr.doId2do.get(toonId) self.notify.debug('cannonId = %d'", "self.setPosHpr(startPos, startHpr) bossTrack = Sequence() bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr =", "itemsToHide: stuffToHide = self.geom.find('**/%s' % str) if not stuffToHide.isEmpty(): self.notify.debug('found", "self.scaleNodePath.find('**/StandLocator1') beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() negBeamLocatorPos = -beamLocatorPos", "self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.happy = 1 self.raised = 1 self.forward = 1", "self.notify.debug('----- exitBattleTwo') intervalName = self.uniqueName('Drop') self.clearInterval(intervalName) self.cleanupBattles() self.battleTwoMusic.stop() localAvatar.inventory.setBattleCreditMultiplier(1) if", "5.0), Point3(-4.0, -2.0, 0.5), Point3(4.0, -2.0, 0.5)) insidesANode = CollisionNode('BossZap')", "exitPrepareBattleTwo') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') self.__clearOnscreenMessage() self.stingMusic.stop() def enterBattleTwo(self): self.notify.debug('----- enterBattleTwo')", "5.0), Point3(-4.0, -2.0, 5.0), Point3(-4.0, -2.0, 0.5), Point3(4.0, -2.0, 0.5))", "from direct.interval.IntervalGlobal import * from toontown.battle.BattleProps import * from direct.distributed.ClockDelta", "Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3, CFSpeech), self.door3.posInterval(2, doorEndPos, startPos=doorStartPos))), (13.1, Sequence(self.door3.posInterval(1, doorStartPos)))) retTrack", "self.toonsB, render, (-2.798, -70, 10, 180, 0, 0)))), (27, Sequence(", "Func(node.show), Parallel(node.posInterval(1, Point3(x, y, 0), fluid=1), node.hprInterval(1, VBase3(h, 0, 0),", "/ (numGears - 1) - 0.5) * spread x =", "toon: if index in self.cannons: cannon = self.cannons[index] cannonSeq =", "direct.distributed.ClockDelta import * from direct.showbase.PythonUtil import Functor from direct.showbase.PythonUtil import", "= 1 self.doAnimate() self.setDizzy(1) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def exitNearVictory(self):", "0 self.scaleNodePath = NodePath('injusticeScale') beamGeom = self.createBlock(0.25, 2, 0.125, -0.25,", "seq.append(Wait(5.0)) seq.append(Func(self.clearChat)) return seq def __makeBossDamageMovie(self): self.notify.debug('---- __makeBossDamageMovie') startPos =", "'open'), Wait(0.7), gearTrack, Func(door.request, 'close')) self.__cleanupStrafe() self.strafeInterval = seq seq.start()", "return Sequence( Func(self.stickToonsToFloor), track, Func(self.unstickToons), name=self.uniqueName('Introduction')) def walkToonsToBattlePosition(self, toonIds, battleNode):", "35), Point3(-10, -13, 0), startPos=Point3(-22, -90, 35), startHpr=Point3(-10, -13, 0),", "= Sequence(Func(camera.reparentTo, render), Func(camera.setPos, -15, 15, 20), Func(camera.setHpr, -90, 0,", "state, state=%s', self.state) gotError = True if not hasattr(self, 'nametag'):", "if self.cr: place = self.cr.playGame.getPlace() if place and hasattr(place, 'fsm'):", "positionToons') def __makePrepareBattleTwoMovie(self): chatString = TTLocalizer.WitnessToonPrepareBattleTwo % ToontownGlobals.LawbotBossJurorsForBalancedScale movie =", "= Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone))) self.storeInterval(track, intervalName) track.start() def exitEpilogue(self): self.notify.debug('----- exitEpilogue')", "(9.5, Sequence(Func(camera.wrtReparentTo, render))), (9.6, Parallel( rollTrack, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3, CFSpeech), self.door3.posInterval(2,", "== 'ToonJuror': self.numToonJurorsSeated += 1 self.notify.debug('self.numToonJurorsSeated = %d' % self.numToonJurorsSeated)", "= -1 return def announceGenerate(self): global OneBossCog self.notify.debug('----- announceGenerate') DistributedBossCog.DistributedBossCog.announceGenerate(self)", "self.getBossDamage() / 100.0 gearTrack = Parallel() numGears = int(4 +", "self.elevatorEntrance.isEmpty(): pass def enterBattleOne(self): self.notify.debug('----- LawbotBoss.enterBattleOne ') DistributedBossCog.DistributedBossCog.enterBattleOne(self) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)", "self.scaleNodePath.getRelativePoint(self.beamNodePath, origin) beamRenderPos = render.getRelativePoint(self.beamNodePath, origin) self.notify.debug('beamPos = %s' %", "not self.bonusTimer: self.bonusTimer = ToontownTimer.ToontownTimer() self.bonusTimer.posInTopRightCorner() self.bonusTimer.show() self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer) def", "-0.25, -0.25, 3) self.standNodePath = NodePath('scaleStand') self.standNodePath.attachNewNode(standGeom) self.standNodePath.reparentTo(self.scaleNodePath) self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.scaleNodePath.setScale(5.0)", "result = %s' % pos) self.notify.debug('walkToonsToBattlePosition: final pos = %s'", "self.baseColStashed = False self.battleDifficulty = 0 self.bonusWeight = 0 self.numJurorsLocalToonSeated", "int(attackCodeStr) into = entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def createBlock(self, x1, y1,", "= 1 self.doAnimate() self.accept('enterWitnessStand', self.__touchedWitnessStand) self.accept('pieSplat', self.__pieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies',", "self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True) ival =", "self.unstashBoss, 'unstashBoss') def __onToPrepareBattleTwo(self): self.notify.debug('----- __onToPrepareBattleTwo') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.doneBarrier('RollToBattleTwo') def", "'Ff_lookRt', duration=3, startTime=3, endTime=0), ActorInterval(self, 'Ff_neutral', duration=2), ActorInterval(self, 'Ff_speech', duration=7,", "in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie')) track", "1] else: points = list(BattleBase.BattleBase.toonPoints[3]) points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) - 5]) self.notify.debug('toonsToBattlePosition: points", "makeEpilogueMovie(self): epSpeech = TTLocalizer.WitnessToonCongratulations epSpeech = self.__talkAboutPromotion(epSpeech) bossTrack = Sequence(Func(self.witnessToon.animFSM.request,", "movie def __doWitnessPrepareBattleThreeChat(self): self.notify.debug('__doWitnessPrepareBattleThreeChat: original self.numToonJurorsSeated = %d' % self.numToonJurorsSeated)", "return bossTrack def makeDefeatMovie(self): bossTrack = Track((0.0, Sequence(Func(self.clearChat), Func(self.reverseHead), ActorInterval(self,", "if toon == localAvatar: self.d_hitBoss(1) if self.dizzy: self.flashRed() self.doAnimate('hit', now=1)", "direct.interval.IntervalGlobal import * from toontown.battle.BattleProps import * from direct.distributed.ClockDelta import", "self.loadEnvironment() self.__makeWitnessToon() self.__loadMopaths() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu() if OneBossCog != None: self.notify.warning('Multiple BossCogs", "beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath, origin) beamRenderPos = render.getRelativePoint(self.beamNodePath, origin) self.notify.debug('beamPos =", "track, hpr = self.rollBossToPoint(startPos, startHpr, bottomPos, None, 1) bossTrack.append(track) track,", "beamRenderPos) beamBoundsCenter = self.beamNodePath.getBounds().getCenter() self.notify.debug('beamBoundsCenter = %s' % beamBoundsCenter) beamLocatorBounds", "unloadEnvironment') DistributedBossCog.DistributedBossCog.unloadEnvironment(self) self.geom.removeNode() del self.geom def __loadMopaths(self): self.notify.debug('----- __loadMopaths') self.toonsEnterA", "= 'RewardMovie' self.clearInterval(intervalName) self.unstash() self.rewardPanel.destroy() del self.rewardPanel self.battleThreeMusicTime = 0", "battleHpr = VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5]) bossTrack = Sequence() myInterval =", "self.__cleanupStrafe() self.strafeInterval = seq seq.start() def replaceCollisionPolysWithPlanes(self, model): newCollisionNode =", "has no attackCode tag.' % repr(entry.getIntoNodePath())) return attackCode = int(attackCodeStr)", "toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.stopLookAround() toon.stopSmooth()", "toontown.building import ElevatorConstants from toontown.toonbase import ToontownTimer OneBossCog = None", "self.notify.debug('----- __onToBattleThree') self.doneBarrier('PrepareBattleThree') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleThree(self): self.notify.debug('----- exitPrepareBattleThree')", "else: self.notify.warning('Unexpected collision solid: %s' % repr(solid)) newCollisionNode.addSolid(plane) newCollisionNode.setIntoCollideMask(newCollideMask) threshold", "name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __onToBattleThree(self, elapsed): self.notify.debug('----- __onToBattleThree') self.doneBarrier('PrepareBattleThree')", "= %s' % center) self.witnessToon.setPos(center) self.witnessToon.setH(180) self.witnessToon.setZ(self.witnessToon.getZ() - 1.5) self.witnessToon.setY(self.witnessToon.getY()", "= juryWeightBonus % (self.numJurorsLocalToonSeated, self.bonusWeight) trialSpeech += '\\x07' trialSpeech +=", "= NodePath('defensePan') self.defensePanNodePath.attachNewNode(defensePanGeom) self.defensePanNodePath.setPos(0, -2, 0) self.defensePanNodePath.reparentTo(self.beamNodePath) defenseTube = CollisionTube(0,", "colorScale=VBase4(1, 1, 1, 1)), name=intervalName) self.panFlashInterval = seq seq.start() self.storeInterval(seq,", "/ 180.0 if direction == 1: spread = -spread dist", "from setTaunt, not in battle three state, state=%s', self.state) gotError", "def toonEnteredCannon(self, toonId, cannonIndex): if base.localAvatar.doId == toonId: self.cannonIndex =", "ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: numJurors = self.numJurorsSeatedByCannon(cannonIndex) bonusWeight = numJurors -", "delayDeletes): self.notify.debug('----- makeIntroductionMovie') for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId)", "intervalName = 'prepareBattleTwo' seq = Sequence(prepareBattleTwoMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName)", "= 'EpilogueMovieToonAnim' self.clearInterval(intervalName) track = Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone))) self.storeInterval(track, intervalName) track.start()", "'prepareBattleTwoCannonsAppear' seq = Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) def", "= self.geom.find('**/JuryBox_Geo_Reflect') reflectedJuryBoxPos = self.reflectedJuryBox.getPos() newReflectedPos = reflectedJuryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos)", "self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie')) track =", "self.__showWitnessToon() prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie() self.acceptOnce('doneChatPage', self.__onToBattleThree) intervalName = 'prepareBattleThree' seq", "if self.juryTimer: self.juryTimer.destroy() del self.juryTimer if self.bonusTimer: self.bonusTimer.destroy() del self.bonusTimer", "Wait(3), Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10,", "beamLocatorPos) def loadScaleNew(self): self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale') self.beamNodePath = self.scaleNodePath.find('**/scaleBeam') self.defensePanNodePath", "= Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9,", "self.baseSideCol.stash() self.baseColStashed = True def unstashBaseCol(self): if self.baseColStashed: self.notify.debug('unstashBaseCol') self.baseTopCol.unstash()", "self.bonusTimer.destroy() del self.bonusTimer localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() if OneBossCog == self: OneBossCog =", "juryResult = TTLocalizer.WitnessToonSomeJurors % self.numToonJurorsSeated juryResult += '\\x07' trialSpeech =", "self.notify.debug('----- delete') DistributedBossCog.DistributedBossCog.delete(self) def d_hitBoss(self, bossDamage): self.notify.debug('----- d_hitBoss') self.sendUpdate('hitBoss', [bossDamage])", "self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA') self.toonsEnterA.fFaceForward = 1 self.toonsEnterA.timeScale = 35 self.toonsEnterB = Mopath.Mopath()", "localAvatar.inventory.setBattleCreditMultiplier(1) if self.juryTimer: self.juryTimer.destroy() del self.juryTimer self.juryTimer = None for", "startTime=2, duration=10, loop=1), ActorInterval(self, 'Ff_lookRt', duration=3), ActorInterval(self, 'Ff_lookRt', duration=3, startTime=3,", "'RewardMovie' self.clearInterval(intervalName) self.unstash() self.rewardPanel.destroy() del self.rewardPanel self.battleThreeMusicTime = 0 self.battleThreeMusic.stop()", "bossTrack.append(Func(self.getGeomNode().setH, 180)) bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos, None, battlePos,", "newCollisionNode = CollisionNode('collisions') newCollideMask = BitMask32(0) planes = [] collList", "return bossTrack def __makeWitnessToon(self): dnaNetString = 't\\x1b\\x00\\x01\\x01\\x00\\x03\\x00\\x03\\x01\\x10\\x13\\x00\\x13\\x13' npc = Toon.Toon()", "0 newWeight = defaultWeight + bonusWeight self.notify.debug('toon %d has weight", "= NodePath('scaleStand') self.standNodePath.attachNewNode(standGeom) self.standNodePath.reparentTo(self.scaleNodePath) self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.scaleNodePath.setScale(5.0) self.scaleNodePath.wrtReparentTo(self.geom) self.setScaleTilt(startingTilt) def setScaleTilt(self,", "y2, z2, r = 1.0, g = 1.0, b =", "beamRenderPos = render.getRelativePoint(self.beamNodePath, origin) self.notify.debug('beamPos = %s' % beamPos) self.notify.debug('beamRelPos", "TTLocalizer.BossCogNameWithDept % {'name': self._name, 'dept': SuitDNA.getDeptFullname(self.style.dept)} self.setDisplayName(nameInfo) self.piesRestockSfx = loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg')", "self.notify.debug('----- LawbotBoss.enterBattleOne ') DistributedBossCog.DistributedBossCog.enterBattleOne(self) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.clearChat() self.loop('Ff_neutral') self.notify.debug('self.battleANode =", "loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator') elevatorModel.reparentTo(self.elevatorEntrance) self.setupElevator(elevatorModel) self.promotionMusic = base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.betweenBattleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg') self.battleTwoMusic", "diffDamage / (ToontownGlobals.LawbotBossInitialDamage - 0) tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt", "ToontownTimer OneBossCog = None class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss')", "self.witnessToon.clearChat() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.happy = 1 self.raised = 1 self.forward", "ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) curReflectedPos = self.reflectedJuryBox.getPos() reflectedEndingAbsPos = Point3(curReflectedPos[0]", "0.5, 0, -0.5, -0.5, -2, 1.0, 0, 0, 1.0) self.prosecutionPanNodePath", "self.chairs.values(): chair.stopCogsFlying() return def enterRollToBattleThree(self): self.notify.debug('----- enterRollToBattleThree') self.reparentTo(render) self.stickBossToFloor() intervalName", "(float(i) / (numGears - 1) - 0.5) * spread x", "nameInfo = TTLocalizer.BossCogNameWithDept % {'name': self._name, 'dept': SuitDNA.getDeptFullname(self.style.dept)} self.setDisplayName(nameInfo) self.piesRestockSfx", "__enterProsecutionCol(self, entry): self.notify.debug('__enterProsecutionCol') def makeVictoryMovie(self): myFromPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2])", "Point3(0, 0, -50))) planeNode = CollisionNode('dropPlane') planeNode.addSolid(plane) planeNode.setCollideMask(ToontownGlobals.PieBitmask) self.geom.attachNewNode(planeNode) self.door3", "self.setScaleTilt(startingTilt) def setScaleTilt(self, tilt): self.beamNodePath.setP(tilt) if self.useProgrammerScale: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) else:", "= %s' % beamRenderPos) beamBoundsCenter = self.beamNodePath.getBounds().getCenter() self.notify.debug('beamBoundsCenter = %s'", "track.append(bossAnimTrack) attackToons = TTLocalizer.BossCogAttackToons dialogTrack = Track( (0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0,", "self.cleanupIntervals() self.reparentTo(render) self.setPos(*ToontownGlobals.LawbotBossDeathPos) self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr) self.clearChat() self.releaseToons(finalBattle=1) self.accept('pieSplat', self.__finalPieSplat) self.accept('localPieSplat', self.__localPieSplat)", "entry): attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr == '': self.notify.warning('Node %s", "if not hasattr(self, 'nametag'): self.notify.warning('returning from setTaunt, no attr nametag')", "self.bonusTimer: self.bonusTimer.hide() def enteredBonusState(self): self.witnessToon.clearChat() text = TTLocalizer.WitnessToonBonus % (ToontownGlobals.LawbotBossBonusWeightMultiplier,", "= self.geom.find('**/interior/CR3_Door') self.mainDoor = self.geom.find('**/Door_1') if not self.mainDoor.isEmpty(): itemsToHide =", "not self.debugPositions: self.podium.setZ(newZ) self.reflectedPodium = self.geom.find('**/Podium_Geo1_Refl') reflectedZ = self.reflectedPodium.getZ() if", "hpr, battlePos, battleHpr, 0) self.makeToonsWait() finalPodiumPos = Point3(self.podium.getX(), self.podium.getY(), self.podium.getZ()", "- ToontownGlobals.LawbotBossInitialDamage) tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt else: percentDamaged =", "pos[1] + 10, pos[2], h, 0, 0) def __outOfPies(self): self.notify.debug('-----", "self.defensePanNodePath.setPos(0, -2, 0) self.defensePanNodePath.reparentTo(self.beamNodePath) defenseTube = CollisionTube(0, 0, -0.5, 0,", "tris.addVertex(0) tris.addVertex(1) tris.addVertex(2) tris.closePrimitive() tris.addVertex(1) tris.addVertex(3) tris.addVertex(2) tris.closePrimitive() tris.addVertex(2) tris.addVertex(3)", "= %s' % battleNode.getParent()) self.notify.debug('battleNode.parent().getPos() = %s' % battleNode.getParent().getPos()) bnParent", "= TTLocalizer.WitnessToonAllJurors else: juryResult = TTLocalizer.WitnessToonSomeJurors % self.numToonJurorsSeated juryResult +=", "{'name': self._name, 'dept': SuitDNA.getDeptFullname(self.style.dept)} self.setDisplayName(nameInfo) self.piesRestockSfx = loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg') self.rampSlideSfx =", "multiCannons = Parallel() index = 0 self.involvedToons.sort() for toonId in", "self.notify.debug('toonsToBattlePosition: points = %s' % points[0][0]) for i in xrange(len(toonIds)):", "toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.loop('neutral') def", "Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.__doWitnessPrepareBattleThreeChat)) return", "= 25 self.evidenceHitSfx = None self.toonUpSfx = None self.bonusTimer =", "self.happy = 0 self.raised = 0 self.forward = 1 intervalName", "7, 3) door = self.doorB gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack)) gearModel = self.getGearFrisbee()", "% pos) self.notify.debug('walkToonsToBattlePosition: final pos = %s' % pos) ival.append(Sequence(Func(toon.setPlayRate,", "self.notify.debug('----- exitElevator') DistributedBossCog.DistributedBossCog.exitElevator(self) self.witnessToon.removeActive() def enterIntroduction(self): self.notify.debug('----- enterIntroduction') self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)", "None self.witnessToonOnstage = False self.numToonJurorsSeated = 0 self.mainDoor = None", "= %d' % self.numToonJurorsSeated) return def cleanupPanFlash(self): if self.panFlashInterval: self.panFlashInterval.finish()", "seq.start() self.storeInterval(seq, intervalName) self.bossHealthBar.deinitialize() base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueVictory(self):", "self.notify.debug('----- exitPrepareBattleThree') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') intervalName = 'PrepareBattleThree' self.clearInterval(intervalName) self.__clearOnscreenMessage()", "BattleBase.BattleBase.toonPoints[len(toonIds) - 1] for i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i])", "1)), name=intervalName) self.panFlashInterval = seq seq.start() self.storeInterval(seq, intervalName) def saySomething(self,", "self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) def __onToPrepareBattleThree(self): self.notify.debug('----- __onToPrepareBattleThree') self.unstickBoss()", "self.numToonJurorsSeated == 1: juryResult = TTLocalizer.WitnessToonOneJuror elif self.numToonJurorsSeated == 12:", "seq = Sequence(name=intervalName) seq.append(Func(self.setChatAbsolute, chatString, CFSpeech)) seq.append(Wait(4.0)) seq.append(Func(self.clearChat)) oldSeq =", "True if gotError: st = StackTrace() print st return chatString", "gearTrack = Parallel() numGears = int(4 + 6 * t", "= 1 self.doAnimate() self.__hideWitnessToon() if not self.mainDoor.isEmpty(): self.mainDoor.stash() if not", "= self.cr.doId2do.get(toonId) if toon: toon.show() def __arrangeToonsAroundWitnessToon(self): radius = 7", "weightBonusText self.witnessToon.setLocalPageChat(trialSpeech, 0) def __makePrepareBattleThreeMovie(self): movie = Sequence(Func(camera.reparentTo, render), Func(camera.setPos,", "= self.pelvis.attachNewNode(shieldNode) disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide') disk.find('**/+CollisionNode').setName('BossZap') disk.reparentTo(self.pelvis) disk.setZ(0.8) self.loadEnvironment() self.__makeWitnessToon()", "0) def __recoverBossDamage(self, task): self.notify.debug('----- __recoverBossDamage') if self.bossDamageMovie: self.bossDamageMovie.setT(self.getBossDamage() *", "render.getRelativePoint(self.prosecutionPanNodePath, origin) self.notify.debug('prosecutionPanPos = %s' % prosecutionPanPos) self.notify.debug('prosecutionPanRelPos = %s'", "= %f' % rollTrackDuration) doorStartPos = self.door3.getPos() doorEndPos = Point3(doorStartPos[0],", "toon: angle = 90 - 15 * (i - center)", "2.0, 0.5), Point3(-4.0, 2.0, 0.5)) insidesBNode = CollisionNode('BossZap') insidesBNode.addSolid(insidesB) insidesBNode.setCollideMask(ToontownGlobals.PieBitmask", "0, 0.35), scale=0.1) return def __clearOnscreenMessage(self): if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage", "1 self.raised = 1 self.forward = 1 self.doAnimate() self.accept('enterWitnessStand', self.__touchedWitnessStand)", "exitDefeat(self): self.notify.debug('----- exitDefeat') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop()", "= render.getRelativePoint(battleNode, pos) self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint result = %s' % pos)", "if toon: base.playSfx(self.toonUpSfx, node=toon) def hideBonusTimer(self): if self.bonusTimer: self.bonusTimer.hide() def", "-2.0, 5.0), Point3(-4.0, -2.0, 5.0), Point3(-4.0, -2.0, 0.5), Point3(4.0, -2.0,", "else: points = list(BattleBase.BattleBase.toonPoints[3]) points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) - 5]) self.notify.debug('toonsToBattlePosition: points =", "import ToontownGlobals from toontown.toonbase import ToontownBattleGlobals import DistributedBossCog from toontown.toonbase", "self.uniqueName('Drop') seq = Sequence(name=name) seq += [Wait(0.0)] if hasLocalToon: seq", "time = 5.0 - 4.0 * t spread = 60", "None self.lawyers = lawyers for i in xrange(len(self.lawyers)): suit =", "% bnWorldPos) pos = render.getRelativePoint(battleNode, pos) self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint result =", "= %s ' % prosecutionLocatorPos) self.notify.debug('prosecutionLocatorRelPos = %s ' %", "delayDeletes): self.notify.debug('----- __walkToonToPromotion') toon = base.cr.doId2do.get(toonId) if toon: destPos =", "self.releaseToons(finalBattle=1) self.accept('pieSplat', self.__finalPieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.happy =", "elif not self.state == 'BattleThree': self.notify.warning('returning from setTaunt, not in", "self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleTwo(self): self.notify.debug('----- enterPrepareBattleTwo') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat()", "in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward')) ival.delayDeletes", "self.storeInterval(track, intervalName) track.start() def exitEpilogue(self): self.notify.debug('----- exitEpilogue') self.clearInterval('EpilogueMovieToonAnim') self.unstash() self.epilogueMusic.stop()", "self.stopAnimate() self.controlToons() self.__showWitnessToon() self.witnessToon.reparentTo(render) self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr) self.witnessToon.loop('Sit') self.__arrangeToonsAroundWitnessToon() camera.reparentTo(render) camera.setPos(self.witnessToon, -9,", "__makeBossDamageMovie') startPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) startHpr = Point3(*ToontownGlobals.LawbotBossBattleThreeHpr) bottomPos", "if toon: toon.stopLookAround() toon.stopSmooth() if self.hasLocalToon(): self.toMovieMode() for toonId in", "'unstashBoss') def __onToPrepareBattleTwo(self): self.notify.debug('----- __onToPrepareBattleTwo') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.doneBarrier('RollToBattleTwo') def exitRollToBattleTwo(self):", "seq = Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1,", "from toontown.battle.BattleProps import * from direct.distributed.ClockDelta import * from direct.showbase.PythonUtil", "12, 6) camera.lookAt(self.witnessToon, 0, 0, 3) intervalName = 'EpilogueMovie' seq", "= self.getGearFrisbee() gearModel.setScale(0.1) t = self.getBossDamage() / 100.0 gearTrack =", "= model.findAllMatches('**/+CollisionNode') if not collList: collList = [model] for cnp", "self.flashGreen() if toon == localAvatar: pass elif pieCode == ToontownGlobals.PieCodeLawyer:", "bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0)", "DistributedBossCog.DistributedBossCog.exitBattleOne(self) def stashBoss(self): self.stash() def unstashBoss(self, task): self.unstash() self.reparentTo(render) def", "None self.strafeInterval = None self.onscreenMessage = None self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage", "None self.baseColStashed = False self.battleDifficulty = 0 self.bonusWeight = 0", "def exitIntroduction(self): self.notify.debug('----- exitIntroduction') DistributedBossCog.DistributedBossCog.exitIntroduction(self) self.promotionMusic.stop() if not self.mainDoor.isEmpty(): pass", "12: juryResult = TTLocalizer.WitnessToonAllJurors else: juryResult = TTLocalizer.WitnessToonSomeJurors % self.numToonJurorsSeated", "seq.append(Func(self.setChatAbsolute, chatString, CFSpeech)) seq.append(Wait(4.0)) seq.append(Func(self.clearChat)) oldSeq = self.activeIntervals.get(intervalName) if oldSeq:", "0 if not cannonIndex == None and cannonIndex >= 0:", "None: self.notify.warning('Multiple BossCogs visible.') OneBossCog = self return def disable(self):", "self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers) def __gotLawyers(self, lawyers): self.lawyerRequest =", "= 1 self.forward = 1 intervalName = 'VictoryMovie' seq =", "def delete(self): self.notify.debug('----- delete') DistributedBossCog.DistributedBossCog.delete(self) def d_hitBoss(self, bossDamage): self.notify.debug('----- d_hitBoss')", "self.betweenBattleMusic.stop() self.promotionMusic.stop() self.stingMusic.stop() self.battleTwoMusic.stop() self.battleThreeMusic.stop() self.epilogueMusic.stop() if self.juryTimer: self.juryTimer.destroy() del", "stashBaseCol(self): if not self.baseColStashed: self.notify.debug('stashBaseCol') self.baseTopCol.stash() self.baseSideCol.stash() self.baseColStashed = True", "toonId): toon = base.cr.doId2do.get(toonId) if toon: base.playSfx(self.toonUpSfx, node=toon) def hideBonusTimer(self):", "TTLocalizer.LawbotBossTaunts[tauntIndex] self.saySomething(chatString) def toonGotHealed(self, toonId): toon = base.cr.doId2do.get(toonId) if toon:", "random.uniform(-720, 720) gearTrack.append(Sequence(Wait(i * rate), Func(node.show), Parallel(node.posInterval(1, Point3(x, y, 0),", "pos.setY(pos.getY() + 2.0) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos()", "tris.addVertex(2) tris.addVertex(6) tris.addVertex(4) tris.closePrimitive() tris.addVertex(1) tris.addVertex(5) tris.addVertex(3) tris.closePrimitive() tris.addVertex(3) tris.addVertex(5)", "+ ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath,", "tris.closePrimitive() tris.addVertex(1) tris.addVertex(3) tris.addVertex(2) tris.closePrimitive() tris.addVertex(2) tris.addVertex(3) tris.addVertex(6) tris.closePrimitive() tris.addVertex(3)", "return def enterRollToBattleThree(self): self.notify.debug('----- enterRollToBattleThree') self.reparentTo(render) self.stickBossToFloor() intervalName = 'RollToBattleThree'", "return doId = int(avatarDoId) if doId != localAvatar.doId: self.d_hitToon(doId) def", "Func(self.getGeomNode().setH, 180), rollThroughDoor[0], Func(self.getGeomNode().setH, 0)) rollTrackDuration = rollTrack.getDuration() self.notify.debug('rollTrackDuration =", "= bossDamage self.recoverRate = recoverRate self.recoverStartTime = recoverStartTime taskName =", "render.getTransform=%s' % (battleNode.getTransform(), render.getTransform())) self.notify.debug('render.getScale()=%s battleNode.getScale()=%s' % (render.getScale(), battleNode.getScale())) myCurPos", "= CollisionNode('DefenseCol') defenseCollNode.addSolid(defenseTube) self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode) self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) prosecutionPanGeom =", "== 0: juryResult = TTLocalizer.WitnessToonNoJuror elif self.numToonJurorsSeated == 1: juryResult", "makeToonsWait(self): self.notify.debug('makeToonsWait') for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if", "intervalName = 'EpilogueMovie' seq = Sequence(self.makeEpilogueMovie(), name=intervalName) seq.start() self.storeInterval(seq, intervalName)", "= Point3(*ToontownGlobals.LawbotBossTopRampPosB) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosB) else: topRampPos", "% negBeamLocatorPos) self.beamNodePath.setPos(beamLocatorPos) self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale) self.scaleNodePath.wrtReparentTo(self.geom) self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol') oldBitMask =", "tris.addVertex(4) tris.closePrimitive() cubeGeom = Geom(myVertexData) cubeGeom.addPrimitive(tris) cubeGN = GeomNode('cube') cubeGN.addGeom(cubeGeom)", "toon.loop('neutral') toon.show() def __talkAboutPromotion(self, speech): if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel: newCogSuitLevel", "0.5), Point3(-4.0, 2.0, 0.5)) insidesBNode = CollisionNode('BossZap') insidesBNode.addSolid(insidesB) insidesBNode.setCollideMask(ToontownGlobals.PieBitmask |", "not self.useCannons: self.toonsToBattlePosition(self.toonsA, self.battleANode) self.toonsToBattlePosition(self.toonsB, self.battleBNode) base.playMusic(self.battleTwoMusic, looping=1, volume=0.9) self.startJuryBoxMoving()", "self.scaleNodePath = NodePath('injusticeScale') beamGeom = self.createBlock(0.25, 2, 0.125, -0.25, -2,", "if self.bonusTimer: self.bonusTimer.destroy() del self.bonusTimer localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() if OneBossCog == self:", "juryBoxPos = self.juryBox.getPos() newPos = juryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not", "self.storeInterval(seq, intervalName) self.accept('doneChatPage', self.__doneEpilogue) base.playMusic(self.epilogueMusic, looping=1, volume=0.9) def __doneEpilogue(self, elapsedTime", "self.promotionMusic.stop() if not self.mainDoor.isEmpty(): pass if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.unstash() if", "else: self.notify.debug('not found %s' % stuffToHide) self.geom.reparentTo(render) self.loadWitnessStand() self.loadScale() self.scaleNodePath.stash()", "self.loadWitnessStand() self.loadScale() self.scaleNodePath.stash() self.loadJuryBox() self.loadPodium() ug = self.geom.find('**/Reflections') ug.setBin('ground', -10)", "self.scaleNodePath.find('**/defensePan') self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan') self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol') self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) self.prosecutionColNodePath", "self.acceptOnce('doneChatPage', self.__showCannonsAppearing) base.playMusic(self.stingMusic, looping=0, volume=1.0) def __showCannonsAppearing(self, elapsedTime = 0):", "elif pieCode == ToontownGlobals.PieCodeProsecutionPan: self.flashGreen() if toon == localAvatar: pass", "def makeDefeatMovie(self): bossTrack = Track((0.0, Sequence(Func(self.clearChat), Func(self.reverseHead), ActorInterval(self, 'Ff_speech'))), (1.0,", "0 def __hideToons(self): for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId)", "self.stopAnimate() self.controlToons() panelName = self.uniqueName('reward') self.rewardPanel = RewardPanel.RewardPanel(panelName) victory, camVictory,", "%s' % bnWorldPos) pos = render.getRelativePoint(battleNode, pos) self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint result", "(5.5, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2, CFSpeech)), (9.5, Sequence(Func(camera.wrtReparentTo, render))), (9.6, Parallel( rollTrack,", "def __hideWitnessToon(self): if self.witnessToonOnstage: self.witnessToon.removeActive() self.witnessToon.detachNode() self.witnessToonOnstage = 0 def", "0)] seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam, CFSpeech)) seq.append(Wait(5.0)) seq.append(Func(self.clearChat)) return seq def __makeBossDamageMovie(self):", "self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) self.__showWitnessToon() prepareBattleThreeMovie", "vertexWriter.addData3f(x1, y1, z2) vertexWriter.addData3f(x2, y1, z2) vertexWriter.addData3f(x1, y2, z2) vertexWriter.addData3f(x2,", "self.baseTopCol.stash() self.baseSideCol.stash() self.baseColStashed = True def unstashBaseCol(self): if self.baseColStashed: self.notify.debug('unstashBaseCol')", "Point3(0, 0, 0) prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin) panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath,", "= Sequence(Parallel(victory, camVictory), Func(self.__doneReward)) intervalName = 'RewardMovie' delayDeletes = []", "return def flashPanBlue(self): self.cleanupPanFlash() intervalName = 'FlashPanBlue' self.defensePanNodePath.setColorScale(1, 1, 1,", "y1, z1, x2, y2, z2, r = 1.0, g =", "toon.headsUp(self.witnessToon) toon.loop('neutral') toon.show() def __talkAboutPromotion(self, speech): if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel:", "self.useProgrammerScale: self.loadScaleOld() else: self.loadScaleNew() def __debugScale(self): prosecutionPanPos = self.prosecutionPanNodePath.getPos() origin", "base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss') def __onToPrepareBattleTwo(self): self.notify.debug('----- __onToPrepareBattleTwo')", "exitBattleOne') DistributedBossCog.DistributedBossCog.exitBattleOne(self) def stashBoss(self): self.stash() def unstashBoss(self, task): self.unstash() self.reparentTo(render)", "self.standNodePath = NodePath('scaleStand') self.standNodePath.attachNewNode(standGeom) self.standNodePath.reparentTo(self.scaleNodePath) self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.scaleNodePath.setScale(5.0) self.scaleNodePath.wrtReparentTo(self.geom) self.setScaleTilt(startingTilt) def", "skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList,", "def createBlock(self, x1, y1, z1, x2, y2, z2, r =", "__continueDefeat') self.stopAnimate() self.doneBarrier('Defeat') def exitDefeat(self): self.notify.debug('----- exitDefeat') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)", "self.cannons[index] cannon.cannon.show() def getChairParent(self): return self.juryBox def startJuryBoxMoving(self): if self.juryBoxIval:", "self.scaleNodePath.find('**/BaseHighCol') oldBitMask = self.baseHighCol.getCollideMask() newBitMask = oldBitMask & ~ToontownGlobals.PieBitmask newBitMask", "import DirectNotifyGlobal from toontown.toonbase import ToontownGlobals from toontown.toonbase import ToontownBattleGlobals", "(ToontownGlobals.SuitWalkSpeed * 1.8) return Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos), node.posInterval(time, toPos))", "self.podium.getY(), self.podium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) finalReflectedPodiumPos = Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(), self.reflectedPodium.getZ() +", "in self.cannons: cannon = self.cannons[index] cannonSeq = cannon.generateCannonAppearTrack(toon) multiCannons.append(cannonSeq) index", "self.betweenBattleMusic.stop() def enterBattleThree(self): DistributedBossCog.DistributedBossCog.enterBattleThree(self) self.scaleNodePath.unstash() localAvatar.setPos(-3, 0, 0) base.localAvatar.orbitalCamera.start() self.clearChat()", "self.accept('outOfPies', self.__outOfPies) self.accept('begin-pie', self.__foundPieButton) self.accept('enterDefenseCol', self.__enterDefenseCol) self.accept('enterProsecutionCol', self.__enterProsecutionCol) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) taskMgr.doMethodLater(30,", "= RewardPanel.RewardPanel(panelName) victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts,", "self.cr: place = self.cr.playGame.getPlace() if place and hasattr(place, 'fsm'): place.setState('waitForBattle')", "0) self.makeToonsWait() return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwoMovie')) def toNeutralMode(self):", "tris.addVertex(0) tris.addVertex(4) tris.addVertex(5) tris.closePrimitive() tris.addVertex(1) tris.addVertex(0) tris.addVertex(5) tris.closePrimitive() tris.addVertex(4) tris.addVertex(6)", "self.defenseHighCol.stash() self.defenseHighCol.setCollideMask(newBitMask) self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision') self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col') self.defenseLocator.hide() self.prosecutionLocator.hide()", "toon), toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral')) track.append(ival) delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion')) def", "= 0 self.involvedToons.sort() for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId)", "toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie')) track = Parallel() bossAnimTrack = Sequence( ActorInterval(self,", "def __doneReward(self): self.notify.debug('----- __doneReward') self.doneBarrier('Reward') self.toWalkMode() def exitReward(self): self.notify.debug('----- exitReward')", "if toon: toon.hide() def __showToons(self): for toonId in self.involvedToons: toon", "== 0: if extraInfo < len(self.involvedToons): toonId = self.involvedToons[extraInfo] toon", "= 5.0 - 4.0 * t spread = 60 *", "Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1, CFSpeech)), (12, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2, CFSpeech)), (18, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3,", "self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan') self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol') self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) self.prosecutionColNodePath =", "return def exitBattleOne(self): self.notify.debug('----- exitBattleOne') DistributedBossCog.DistributedBossCog.exitBattleOne(self) def stashBoss(self): self.stash() def", "= Sequence() self.notify.debug('calling setPosHpr') myInterval = camera.posHprInterval(8, Point3(-22, -100, 35),", "'nametag'): self.notify.warning('returning from setTaunt, no attr nametag') gotError = True", "0, 0) prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin) panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath, origin)", "shieldNode.addSolid(shield) shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask) shieldNodePath = self.pelvis.attachNewNode(shieldNode) disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide')", "self.notify.debug('----- enterElevator') DistributedBossCog.DistributedBossCog.enterElevator(self) self.witnessToon.removeActive() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.happy = 1 self.raised", "angle = (float(i) / (numGears - 1) - 0.5) *", "self.notify.debug('self.battleANode = %s' % self.battleANode) self.__hideWitnessToon() if self.battleA == None", "0) toon.headsUp(self.witnessToon) toon.loop('neutral') toon.show() def __talkAboutPromotion(self, speech): if self.prevCogSuitLevel <", "Point3(*ToontownGlobals.LawbotBossTopRampPosB) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosB) else: topRampPos =", "= 'prepareBattleThree' seq = Sequence(prepareBattleThreeMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName) def", "self.bossDamage - ToontownGlobals.LawbotBossInitialDamage diffDamage *= 1.0 if diffDamage >= 0:", "= self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin) panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath, origin) self.notify.debug('prosecutionPanPos = %s'", "Func(node.headsUp, toPos), node.posInterval(time, toPos)) def __makeRollToBattleTwoMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1],", "self.sendUpdate('touchWitnessStand', []) self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) base.playSfx(self.piesRestockSfx) if not self.everThrownPie: taskMgr.doMethodLater(30, self.__howToThrowPies,", "duration=2), ActorInterval(self, 'Ff_speech', duration=7, loop=1)) track.append(bossAnimTrack) attackToons = TTLocalizer.BossCogAttackToons dialogTrack", "self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.clearChat() self.witnessToon.clearChat() self.releaseToons(finalBattle=1) self.__showWitnessToon() if not self.useCannons: self.toonsToBattlePosition(self.toonsA, self.battleANode)", "= self.battleThreeMusic.getTime() self.battleThreeMusic.stop() return def enterNearVictory(self): self.cleanupIntervals() self.reparentTo(render) self.setPos(*ToontownGlobals.LawbotBossDeathPos) self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr)", "__outOfPies(self): self.notify.debug('----- outOfPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence) taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice')) def __howToGetPies(self, task):", "cannon = self.cannons[index] cannon.cannon.show() def getChairParent(self): return self.juryBox def startJuryBoxMoving(self):", "def stashBoss(self): self.stash() def unstashBoss(self, task): self.unstash() self.reparentTo(render) def enterRollToBattleTwo(self):", "intervalName = self.uniqueName('Drop') self.clearInterval(intervalName) self.cleanupBattles() self.battleTwoMusic.stop() localAvatar.inventory.setBattleCreditMultiplier(1) if self.juryTimer: self.juryTimer.destroy()", "cleanupAttacks(self): self.notify.debug('----- cleanupAttacks') self.__cleanupStrafe() def __cleanupStrafe(self): self.notify.debug('----- __cleanupStrage') if self.strafeInterval:", "= Sequence(Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10,", "self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) else: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) def stashBaseCol(self): if not self.baseColStashed:", "20)), ParallelEndTogether(MopathInterval(mopath, toon), toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral')) track.append(ival) delayDeletes.append(DelayDelete.DelayDelete(toon,", "i in xrange(numToons): toon = self.cr.doId2do.get(self.involvedToons[i]) if toon: angle =", "def enterReward(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() panelName =", "libotp import * from direct.fsm import FSM from direct.fsm import", "NodePath(newCollisionNode) def makeIntroductionMovie(self, delayDeletes): self.notify.debug('----- makeIntroductionMovie') for toonId in self.involvedToons:", "self.notify.debug('----- disable') DistributedBossCog.DistributedBossCog.disable(self) self.request('Off') self.unloadEnvironment() self.__cleanupWitnessToon() self.__unloadMopaths() self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) self.__cleanupStrafe()", "in xrange(10): self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg')) render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog)) insidesA = CollisionPolygon(Point3(4.0, -2.0, 5.0),", "Track( (0.5, Sequence( Func(self.clearChat), Func(camera.reparentTo, render), Func(camera.setPos, -3, 45, 25),", "self.numJurorsSeatedByCannon(cannonIndex) bonusWeight = numJurors - diffSettings[5] if bonusWeight < 0:", "newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)] if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel: speech += TTLocalizer.WitnessToonLastPromotion", "self.forward = 1 self.doAnimate() self.setDizzy(1) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def", "-2.0, 0.5)) insidesANode = CollisionNode('BossZap') insidesANode.addSolid(insidesA) insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesANodePath", "= Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) myToPos = Point3(myFromPos[0], myFromPos[1] + 30,", "notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss') debugPositions = False def __init__(self, cr): self.notify.debug('-----", "tris.closePrimitive() tris.addVertex(3) tris.addVertex(7) tris.addVertex(6) tris.closePrimitive() tris.addVertex(0) tris.addVertex(2) tris.addVertex(4) tris.closePrimitive() tris.addVertex(2)", "None or self.battleB == None: pass return def exitBattleOne(self): self.notify.debug('-----", "def enterIntroduction(self): self.notify.debug('----- enterIntroduction') self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.stopAnimate() self.__hideWitnessToon() DistributedBossCog.DistributedBossCog.enterIntroduction(self) base.playMusic(self.promotionMusic,", "toontown.toon import NPCToons from direct.task import Task import random import", "ival.append(Sequence(Func(toon.setPlayRate, 0.8, 'walk'), Func(toon.loop, 'walk'), toon.posInterval(3, pos), Func(toon.setPlayRate, 1, 'walk'),", "self.involvedToons.sort() for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon:", "self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() panelName = self.uniqueName('reward') self.rewardPanel", "render.getRelativePoint(battleNode, pos) self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint result = %s' % pos) self.notify.debug('walkToonsToBattlePosition:", "Sequence( Func(self.clearChat), Func(camera.reparentTo, render), Func(camera.setPos, -3, 45, 25), Func(camera.setHpr, 0,", "self.bonusTimer = ToontownTimer.ToontownTimer() self.bonusTimer.posInTopRightCorner() self.bonusTimer.show() self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer) def setAttackCode(self, attackCode,", "self.notify.debug('finish the movie then transition to NearVictory') self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration()) else: self.bossDamageMovie.resumeUntil(self.bossDamage", "self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr) self.clearChat() self.releaseToons(finalBattle=1) self.accept('pieSplat', self.__finalPieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)", "elif self.numToonJurorsSeated == 12: juryResult = TTLocalizer.WitnessToonAllJurors else: juryResult =", "origin) self.notify.debug('prosecutionLocatorPos = %s ' % prosecutionLocatorPos) self.notify.debug('prosecutionLocatorRelPos = %s", "bossTrack.append(Func(self.getGeomNode().setH, 180)) track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0)", "oldSeq: oldSeq.finish() seq.start() self.storeInterval(seq, intervalName) def setTaunt(self, tauntIndex, extraInfo): gotError", "CollisionNode): self.notify.warning('Not a collision node: %s' % repr(cnp)) break newCollideMask", "self.battleThreeMusic.stop() return def enterNearVictory(self): self.cleanupIntervals() self.reparentTo(render) self.setPos(*ToontownGlobals.LawbotBossDeathPos) self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr) self.clearChat() self.releaseToons(finalBattle=1)", "pieCode == ToontownGlobals.PieCodeLawyer: pass def __localPieSplat(self, pieCode, entry): if pieCode", "toon.wrtReparentTo(render) pos, h = points[i] if i > 3: pos.setY(pos.getY()", "2.0 for i in xrange(numToons): toon = self.cr.doId2do.get(self.involvedToons[i]) if toon:", "= self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward')) ival.delayDeletes = delayDeletes ival.start()", "floor = self.geom.find('**/CR3_Floor') self.evFloor = self.replaceCollisionPolysWithPlanes(floor) self.evFloor.reparentTo(self.geom) self.evFloor.setName('floor') plane =", ">= self.bossMaxDamage: self.notify.debug('finish the movie then transition to NearVictory') self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration())", "origin) self.notify.debug('beamPos = %s' % beamPos) self.notify.debug('beamRelPos = %s' %", "ToontownGlobals.WallBitmask) self.insidesBNodePath = self.axle.attachNewNode(insidesBNode) self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesBNodePath.stash() target = CollisionTube(0,", "from direct.fsm import ClassicFSM from direct.fsm import State from direct.directnotify", "self.makeToonsWait() return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwoMovie')) def toNeutralMode(self): if", "stuffToHide) stuffToHide.wrtReparentTo(self.reflectedMainDoor) else: self.notify.debug('not found %s' % stuffToHide) self.geom.reparentTo(render) self.loadWitnessStand()", "tris.addVertex(1) tris.addVertex(5) tris.addVertex(3) tris.closePrimitive() tris.addVertex(3) tris.addVertex(5) tris.addVertex(7) tris.closePrimitive() tris.addVertex(0) tris.addVertex(4)", "ToontownGlobals.LawbotBossMaxDamage * 0.85: self.unstashBaseCol() else: self.stashBaseCol() def unloadEnvironment(self): self.notify.debug('----- unloadEnvironment')", "localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar)) multiCannons = Parallel() index = 0 self.involvedToons.sort()", "attr state') gotError = True elif not self.state == 'BattleThree':", "self.releaseToons(finalBattle=1) self.stashBoss() self.toonsToBattlePosition(self.involvedToons, self.battleANode) self.stickBossToFloor() intervalName = 'RollToBattleTwo' seq =", "newCollisionNode.setIntoCollideMask(newCollideMask) threshold = 0.1 planes.sort(lambda p1, p2: p1.compareTo(p2, threshold)) lastPlane", "10, 0), Func(self.witnessToon.setLocalPageChat, chatString, 0)) return movie def __doWitnessPrepareBattleThreeChat(self): self.notify.debug('__doWitnessPrepareBattleThreeChat:", "toontown.toonbase import ToontownTimer OneBossCog = None class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM): notify", "d_hitBoss(self, bossDamage): self.notify.debug('----- d_hitBoss') self.sendUpdate('hitBoss', [bossDamage]) def d_healBoss(self, bossHeal): self.notify.debug('-----", "import TTLocalizer import SuitDNA from toontown.toon import Toon from toontown.battle", "self.geom.setScale(1) self.elevatorEntrance = self.geom.find('**/elevator_origin') self.elevatorEntrance.getChildren().detach() self.elevatorEntrance.setScale(1) elevatorModel = loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator') elevatorModel.reparentTo(self.elevatorEntrance)", "render.getRelativePoint(self.beamNodePath, origin) self.notify.debug('beamPos = %s' % beamPos) self.notify.debug('beamRelPos = %s'", "seatCenter = self.realWitnessStand.find('**/witnessStandSeatEdge') center = seatCenter.getPos() self.notify.debug('center = %s' %", "(i - center) radians = angle * math.pi / 180.0", "self.geom.find('**/Door_1') if not self.mainDoor.isEmpty(): itemsToHide = ['interior/Door_1'] for str in", "ToontownGlobals.LawbotBossInitialDamage) tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt else: percentDamaged = diffDamage", "tag.' % repr(entry.getIntoNodePath())) return doId = int(avatarDoId) if doId !=", "toon.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2], h, 0, 0) def", "no attackCode tag.' % repr(entry.getIntoNodePath())) return attackCode = int(attackCodeStr) into", "0, 10, 0))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1, CFSpeech)), (5.5, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2,", "= loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3') self.geom.setPos(0, 0, -71.601) self.geom.setScale(1) self.elevatorEntrance = self.geom.find('**/elevator_origin') self.elevatorEntrance.getChildren().detach()", "self.setName(TTLocalizer.LawbotBossName) nameInfo = TTLocalizer.BossCogNameWithDept % {'name': self._name, 'dept': SuitDNA.getDeptFullname(self.style.dept)} self.setDisplayName(nameInfo)", "= self.prosecutionLocator.getBounds() prosecutionLocPos = prosecutionLocBounds.getCenter() self.notify.debug('prosecutionLocatorPos = %s' % prosecutionLocPos)", "def makeIntroductionMovie(self, delayDeletes): self.notify.debug('----- makeIntroductionMovie') for toonId in self.involvedToons: toon", "h, 0, 0) self.notify.debug('new toon pos %s ' % toon.getPos())", "= %s' % prosecutionPanPos) self.notify.debug('prosecutionPanRelPos = %s' % prosecutionPanRelPos) self.notify.debug('panRenderPos", "curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) curReflectedPos = self.reflectedJuryBox.getPos() reflectedEndingAbsPos = Point3(curReflectedPos[0] +", "toon == localAvatar: pass elif pieCode == ToontownGlobals.PieCodeLawyer: pass def", "newCollisionNode.addSolid(cp) lastPlane = plane return NodePath(newCollisionNode) def makeIntroductionMovie(self, delayDeletes): self.notify.debug('-----", "for toonId in self.involvedToons: if index in self.cannons: cannon =", "self.battleB == None: pass return def exitBattleOne(self): self.notify.debug('----- exitBattleOne') DistributedBossCog.DistributedBossCog.exitBattleOne(self)", "self.getPos()) self.notify.debug('battleNode.parent() = %s' % battleNode.getParent()) self.notify.debug('battleNode.parent().getPos() = %s' %", "def startJuryBoxMoving(self): if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None self.juryBox.setPos(-30, 0,", "blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()),", "not self.baseColStashed: self.notify.debug('stashBaseCol') self.baseTopCol.stash() self.baseSideCol.stash() self.baseColStashed = True def unstashBaseCol(self):", "not stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.reflectedMainDoor) else: self.notify.debug('not found", "self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1,", "= %s' % beamLocatorPos) def loadScaleNew(self): self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale') self.beamNodePath", "in self.lawyers: if lawyerDoId == lawyer.doId: lawyer.sendUpdate('hitByToon', []) def __finalPieSplat(self,", "0: if extraInfo < len(self.involvedToons): toonId = self.involvedToons[extraInfo] toon =", "chair.toonJurorIndex == cannonIndex: retVal += 1 return retVal def calculateWeightOfToon(self,", "cannon.nodePath.getPos(render) self.notify.debug('cannonPos = %s' % cannonPos) if toon: self.notify.debug('toon =", "threshold) != 0: cp = CollisionPlane(plane) newCollisionNode.addSolid(cp) lastPlane = plane", "trialSpeech += TTLocalizer.WitnessToonPrepareBattleThree diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: newWeight, self.bonusWeight,", "enterElevator') DistributedBossCog.DistributedBossCog.enterElevator(self) self.witnessToon.removeActive() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.happy = 1 self.raised =", "1] for i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon:", "= newBitMask & ~ToontownGlobals.CameraBitmask self.baseHighCol.setCollideMask(newBitMask) self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol') self.defenseHighCol.stash() self.defenseHighCol.setCollideMask(newBitMask)", "= Plane(solid.getPlane()) planes.append(plane) else: self.notify.warning('Unexpected collision solid: %s' % repr(solid))", "0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) track, hpr = self.rollBossToPoint(startPos, None,", "1 self.forward = 1 intervalName = 'VictoryMovie' seq = Sequence(self.makeVictoryMovie(),", "0: gearRoot.setPos(0, -7, 3) gearRoot.setHpr(180, 0, 0) door = self.doorA", "self.notify.debug('----- exitRollToBattleThree') self.unstickBoss() intervalName = 'RollToBattleThree' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleThree(self):", "avatarDoId == '': self.notify.warning('Toon %s has no avatarDoId tag.' %", "pass colNode = self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision') colNode.setName('WitnessStand') def loadScale(self): self.useProgrammerScale = base.config.GetBool('want-injustice-scale-debug',", "self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward')) ival.delayDeletes = delayDeletes ival.start() self.storeInterval(ival,", "toon: base.playSfx(self.toonUpSfx, node=toon) def hideBonusTimer(self): if self.bonusTimer: self.bonusTimer.hide() def enteredBonusState(self):", "in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.reparentTo(render) pos, h", "loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg') self.toonUpSfx = loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg') self.strafeSfx = [] for i in", "= self.scaleNodePath.find('**/DefenseLocator') defenseLocBounds = self.defenseLocator.getBounds() defenseLocPos = defenseLocBounds.getCenter() self.notify.debug('defenseLocatorPos =", "topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosB) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosB) else:", "self.notify.debug('----- exitReward') intervalName = 'RewardMovie' self.clearInterval(intervalName) self.unstash() self.rewardPanel.destroy() del self.rewardPanel", "battleNode=%s' % (toonIds, battleNode)) ival = Parallel() points = BattleBase.BattleBase.toonPoints[len(toonIds)", "tris.addVertex(4) tris.closePrimitive() tris.addVertex(2) tris.addVertex(6) tris.addVertex(4) tris.closePrimitive() tris.addVertex(1) tris.addVertex(5) tris.addVertex(3) tris.closePrimitive()", "__makeBossDamageMovie(self): self.notify.debug('---- __makeBossDamageMovie') startPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) startHpr =", "self.strafeSfx = [] for i in xrange(10): self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg')) render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog))", "self.witnessToon.setPos(center) self.witnessToon.setH(180) self.witnessToon.setZ(self.witnessToon.getZ() - 1.5) self.witnessToon.setY(self.witnessToon.getY() - 1.15) self.witnessToonOnstage =", "entry): self.notify.debug('__enterProsecutionCol') def makeVictoryMovie(self): myFromPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) myToPos", "gFormat = GeomVertexFormat.getV3n3cpt2() myVertexData = GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic)", "loop=1)) return bossTrack def makeEpilogueMovie(self): epSpeech = TTLocalizer.WitnessToonCongratulations epSpeech =", "self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin) locatorRenderPos = render.getRelativePoint(self.prosecutionLocator, origin) self.notify.debug('prosecutionLocatorPos = %s '", "% (ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration) self.witnessToon.setChatAbsolute(text, CFSpeech | CFTimeout) base.playSfx(self.toonUpSfx) if not", "self.bossDamageMovie: self.bossDamageMovie.finish() self.bossDamageMovie = None self.unstickBoss() taskName = 'RecoverBossDamage' taskMgr.remove(taskName)", "self.__makeWitnessToon() self.__loadMopaths() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu() if OneBossCog != None: self.notify.warning('Multiple BossCogs visible.')", "self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterVictory(self): self.notify.debug('----- enterVictory') self.cleanupIntervals() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.loop('neutral')", "hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0) self.makeToonsWait() finalPodiumPos =", "self.notify.debug('----- d_hitDefensePan') self.sendUpdate('hitDefensePan', []) def d_hitProsecutionPan(self): self.notify.debug('----- d_hitProsecutionPan') self.sendUpdate('hitProsecutionPan', [])", "vertexWriter.addData3f(x1, y1, z1) vertexWriter.addData3f(x2, y1, z1) vertexWriter.addData3f(x1, y2, z1) vertexWriter.addData3f(x2,", "in self.chairs.keys(): chair = self.chairs[key] if chair.state == 'ToonJuror' or", "str(ToontownGlobals.PieCodeProsecutionPan)) self.standNodePath = self.scaleNodePath.find('**/scaleStand') self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator') defenseLocBounds =", "targetNode.setCollideMask(ToontownGlobals.PieBitmask) self.targetNodePath = self.pelvis.attachNewNode(targetNode) self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog)) shield = CollisionTube(0, 1,", "15, 20), Func(camera.setHpr, -90, 0, 0), Wait(3), Func(camera.reparentTo, self.witnessToon), Func(camera.setPos,", "enterIntroduction') self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.stopAnimate() self.__hideWitnessToon() DistributedBossCog.DistributedBossCog.enterIntroduction(self) base.playMusic(self.promotionMusic, looping=1, volume=0.9) if", "for i in xrange(10): self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg')) render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog)) insidesA = CollisionPolygon(Point3(4.0,", "+= 1 self.notify.debug('self.numToonJurorsSeated = %d' % self.numToonJurorsSeated) return def cleanupPanFlash(self):", "= %s' % self.getPos()) self.notify.debug('battleNode.parent() = %s' % battleNode.getParent()) self.notify.debug('battleNode.parent().getPos()", "__cleanupJuryBox(self): self.notify.debug('----- __cleanupJuryBox') if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None if", "OneBossCog = None return def delete(self): self.notify.debug('----- delete') DistributedBossCog.DistributedBossCog.delete(self) def", "localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.clearChat() self.witnessToon.clearChat() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.happy = 1 self.raised =", "if i > 3: pos.setY(pos.getY() + 2.0) bnParent = battleNode.getParent()", "tris.addVertex(7) tris.addVertex(6) tris.closePrimitive() tris.addVertex(0) tris.addVertex(2) tris.addVertex(4) tris.closePrimitive() tris.addVertex(2) tris.addVertex(6) tris.addVertex(4)", "0, 0, 0)] seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam, CFSpeech)) seq.append(Wait(5.0)) seq.append(Func(self.clearChat)) return seq", "toontown.distributed import DelayDelete from toontown.battle import MovieToonVictory from toontown.building import", "toonId, delay, mopath, track, delayDeletes): self.notify.debug('----- __walkToonToPromotion') toon = base.cr.doId2do.get(toonId)", "self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterVictory(self): self.notify.debug('----- enterVictory') self.cleanupIntervals() self.reparentTo(render)", "speech += TTLocalizer.WitnessToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel + 1) if newCogSuitLevel in", "door = self.doorB gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack)) gearModel = self.getGearFrisbee() gearModel.setScale(0.1) t", "= CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0, 2.0, 5.0), Point3(4.0, 2.0, 0.5),", "1 self.doAnimate() self.accept('enterWitnessStand', self.__touchedWitnessStand) self.accept('pieSplat', self.__pieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies)", "= self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers) def __gotLawyers(self, lawyers): self.lawyerRequest = None self.lawyers", "pos, h = points[i] toon.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2],", "1, 'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0, 90, 20)), ParallelEndTogether(MopathInterval(mopath, toon),", "%s' % points[0][0]) for i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i])", "gotToon(self, toon): stateName = self.state if stateName == 'Elevator': self.placeToonInElevator(toon)", "self.evFloor.setName('floor') plane = CollisionPlane(Plane(Vec3(0, 0, 1), Point3(0, 0, -50))) planeNode", "False def __init__(self, cr): self.notify.debug('----- __init___') DistributedBossCog.DistributedBossCog.__init__(self, cr) FSM.FSM.__init__(self, 'DistributedLawbotBoss')", "negBeamLocatorPos) self.beamNodePath.setPos(beamLocatorPos) self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale) self.scaleNodePath.wrtReparentTo(self.geom) self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol') oldBitMask = self.baseHighCol.getCollideMask()", "base.localAvatar.doId == toonId: self.cannonIndex = cannonIndex def numJurorsSeatedByCannon(self, cannonIndex): retVal", "= Point3(*ToontownGlobals.LawbotBossBattleThreeHpr) bottomPos = Point3(*ToontownGlobals.LawbotBossBottomPos) deathPos = Point3(*ToontownGlobals.LawbotBossDeathPos) self.setPosHpr(startPos, startHpr)", "self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None self.juryBox.setPos(-30, 0, -12.645) self.reflectedJuryBox.setPos(-30, 0,", "Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos,", "i in xrange(numGears): node = gearRoot.attachNewNode(str(i)) node.hide() node.setPos(0, 0, 0)", "self.geom def __loadMopaths(self): self.notify.debug('----- __loadMopaths') self.toonsEnterA = Mopath.Mopath() self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA') self.toonsEnterA.fFaceForward", "from toontown.toonbase import ToontownBattleGlobals import DistributedBossCog from toontown.toonbase import TTLocalizer", "in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.stopLookAround() toon.stopSmooth() if", "hasLocalToon: seq += [Func(self.show), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0,", "cleanupAttacks') self.__cleanupStrafe() def __cleanupStrafe(self): self.notify.debug('----- __cleanupStrage') if self.strafeInterval: self.strafeInterval.finish() self.strafeInterval", "self.notify.debug('prosecutionPanRelPos = %s' % prosecutionPanRelPos) self.notify.debug('panRenderPos = %s' % panRenderPos)", "self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode) self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) standGeom = self.createBlock(0.25, 0.25, 0, -0.25, -0.25,", "self.notify.debug('----- __continueVictory') self.stopAnimate() self.doneBarrier('Victory') def exitVictory(self): self.notify.debug('----- exitVictory') self.stopAnimate() self.unstash()", "0)))), (27, Sequence( self.toonNormalEyes(self.involvedToons), Func(self.loop, 'Ff_neutral'), Func(self.setChatAbsolute, attackToons, CFSpeech)))) track.append(dialogTrack)", "> 0: if self.bonusWeight == 1: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty) else:", "__showWitnessToon(self): if not self.witnessToonOnstage: self.witnessToon.addActive() self.witnessToon.reparentTo(self.geom) seatCenter = self.realWitnessStand.find('**/witnessStandSeatEdge') center", "insidesBNode.addSolid(insidesB) insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesBNodePath = self.axle.attachNewNode(insidesBNode) self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesBNodePath.stash()", "vector.length() time = distance / (ToontownGlobals.SuitWalkSpeed * 1.8) return Sequence(Func(node.setPos,", "bossTrack def makeEpilogueMovie(self): epSpeech = TTLocalizer.WitnessToonCongratulations epSpeech = self.__talkAboutPromotion(epSpeech) bossTrack", "name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) def __onToPrepareBattleThree(self): self.notify.debug('-----", "intervalName = 'EpilogueMovieToonAnim' self.clearInterval(intervalName) track = Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone))) self.storeInterval(track, intervalName)", "rollTrack = Sequence( Func(self.getGeomNode().setH, 180), rollThroughDoor[0], Func(self.getGeomNode().setH, 0)) rollTrackDuration =", "diffSettings[4]: localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight) def __doneBattleThree(self): self.notify.debug('----- __doneBattleThree') self.setState('NearVictory') self.unstickBoss() def", "[]) def d_hitToon(self, toonId): self.notify.debug('----- d_hitToon') self.sendUpdate('hitToon', [toonId]) def gotToon(self,", "(ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage) tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt else: percentDamaged", "return Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos), node.posInterval(time, toPos)) def __makeRollToBattleTwoMovie(self): startPos", "taskMgr.remove(self.uniqueName('PieAdvice')) self.__cleanupStrafe() self.__cleanupJuryBox() render.clearTag('pieCode') self.targetNodePath.detachNode() self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = None self.betweenBattleMusic.stop()", "index = 0 self.involvedToons.sort() for toonId in self.involvedToons: if index", "' % prosecutionLocatorRelPos) self.notify.debug('locatorRenderPos = %s' % locatorRenderPos) beamPos =", "= False def __init__(self, cr): self.notify.debug('----- __init___') DistributedBossCog.DistributedBossCog.__init__(self, cr) FSM.FSM.__init__(self,", "self.geom.setPos(0, 0, -71.601) self.geom.setScale(1) self.elevatorEntrance = self.geom.find('**/elevator_origin') self.elevatorEntrance.getChildren().detach() self.elevatorEntrance.setScale(1) elevatorModel", "defaultWeight = 1 bonusWeight = 0 newWeight = 1 cannonIndex", "lawyerIds): self.lawyers = [] self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers) def", "localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterReward(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat()", "intervalName) def setTaunt(self, tauntIndex, extraInfo): gotError = False if not", "-0.25, -2, -0.125, 0, 1.0, 0, 1.0) self.beamNodePath = NodePath('scaleBeam')", "render), Func(camera.setPos, -15, 15, 20), Func(camera.setHpr, -90, 0, 0), Wait(3),", "direct.showutil import Rope from toontown.distributed import DelayDelete from toontown.battle import", "self.accept('enterWitnessStand', self.__touchedWitnessStand) self.accept('pieSplat', self.__pieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) self.accept('begin-pie', self.__foundPieButton)", "toonId, cannonIndex): if base.localAvatar.doId == toonId: self.cannonIndex = cannonIndex def", "self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol') self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) self.standNodePath = self.scaleNodePath.find('**/scaleStand') self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.defenseLocator", "def loadCannons(self): pass def loadWitnessStand(self): self.realWitnessStand = self.geom.find('**/WitnessStand') if not", "seatCenter.getPos() self.notify.debug('center = %s' % center) self.witnessToon.setPos(center) self.witnessToon.setH(180) self.witnessToon.setZ(self.witnessToon.getZ() -", "base.config.GetBool('lawbot-boss-cheat', 0): self.panDamage = 25 self.evidenceHitSfx = None self.toonUpSfx =", "(5.6, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1, CFSpeech)), (12, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2, CFSpeech)), (18, Func(self.setChatAbsolute,", "CollisionNode('dropPlane') planeNode.addSolid(plane) planeNode.setCollideMask(ToontownGlobals.PieBitmask) self.geom.attachNewNode(planeNode) self.door3 = self.geom.find('**/SlidingDoor1/') if self.door3.isEmpty(): self.door3", "base.playMusic(self.battleThreeMusic, looping=1, volume=0.9) self.__showWitnessToon() diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage,", "gearRoot = self.rotateNode.attachNewNode('gearRoot') if side == 0: gearRoot.setPos(0, -7, 3)", "if place and hasattr(place, 'fsm'): place.setState('waitForBattle') def makeToonsWait(self): self.notify.debug('makeToonsWait') for", "exitWaitForToons') DistributedBossCog.DistributedBossCog.exitWaitForToons(self) self.geom.show() self.witnessToon.addActive() def enterElevator(self): self.notify.debug('----- enterElevator') DistributedBossCog.DistributedBossCog.enterElevator(self) self.witnessToon.removeActive()", "delete') DistributedBossCog.DistributedBossCog.delete(self) def d_hitBoss(self, bossDamage): self.notify.debug('----- d_hitBoss') self.sendUpdate('hitBoss', [bossDamage]) def", "repr(entry.getIntoNodePath())) return doId = int(avatarDoId) if doId != localAvatar.doId: self.d_hitToon(doId)", "self.notify.debug('after calling self.countToonJurors, numToonJurorsSeated=%d' % self.numToonJurorsSeated) if self.numToonJurorsSeated == 0:", "= self.scaleNodePath.find('**/Scale_base_side_col') self.defenseLocator.hide() self.prosecutionLocator.hide() self.beamLocator.hide() def loadScaleOld(self): startingTilt = 0", "0: if self.bonusWeight == 1: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty) else: juryWeightBonus", "attackCode, avId) if attackCode == ToontownGlobals.BossCogAreaAttack: self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt) base.playSfx(self.warningSfx) def setBattleDifficulty(self,", "bonusWeight = 0 newWeight = defaultWeight + bonusWeight self.notify.debug('toon %d", "base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) self.__showWitnessToon() prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie() self.acceptOnce('doneChatPage', self.__onToBattleThree) intervalName", "seq seq.start() def replaceCollisionPolysWithPlanes(self, model): newCollisionNode = CollisionNode('collisions') newCollideMask =", "toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward')) ival.delayDeletes = delayDeletes ival.start() self.storeInterval(ival, intervalName) base.playMusic(self.battleThreeMusic,", "if len(toonIds) < 5: points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] else:", "'prepareBattleThree' seq = Sequence(prepareBattleThreeMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __onToBattleThree(self,", "== lawyer.doId: lawyer.sendUpdate('hitByToon', []) def __finalPieSplat(self, toon, pieCode): if pieCode", "bossDamage, recoverRate, timestamp): recoverStartTime = globalClockDelta.networkToLocalTime(timestamp) self.bossDamage = bossDamage self.recoverRate", "CollisionTube(0, 1, 4, 0, 1, 7, 3.5) shieldNode = CollisionNode('BossZap')", "ToontownGlobals.LawbotBossBattleTwoPosHpr[5]) bossTrack = Sequence() self.notify.debug('calling setPosHpr') myInterval = camera.posHprInterval(8, Point3(-22,", "time=self.battleThreeMusicTime) def exitNearVictory(self): self.notify.debug('----- exitNearVictory') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice'))", "self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration()) else: self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie) if self.recoverRate: taskMgr.add(self.__recoverBossDamage, taskName) self.makeScaleReflectDamage()", "self.unstickBoss() intervalName = 'RollToBattleTwo' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleTwo(self): self.notify.debug('----- enterPrepareBattleTwo')", "direct.fsm import State from direct.directnotify import DirectNotifyGlobal from toontown.toonbase import", "import MovieToonVictory from toontown.building import ElevatorUtils from toontown.battle import RewardPanel", "return Task.cont def __walkToonToPromotion(self, toonId, delay, mopath, track, delayDeletes): self.notify.debug('-----", "self.door3 = self.geom.find('**/SlidingDoor1/') if self.door3.isEmpty(): self.door3 = self.geom.find('**/interior/CR3_Door') self.mainDoor =", "toon pos %s ' % toon.getPos()) def touchedGavel(self, gavel, entry):", "self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.loop('neutral') localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.clearChat() self.witnessToon.clearChat() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.happy =", "bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) self.notify.debug('battle node", "__onToBattleTwo') self.doneBarrier('PrepareBattleTwo') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleTwo(self): self.notify.debug('----- exitPrepareBattleTwo') self.show()", "self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator') defenseLocBounds = self.defenseLocator.getBounds() defenseLocPos = defenseLocBounds.getCenter()", "= 1 self.raised = 1 self.forward = 1 self.doAnimate() self.__hideWitnessToon()", "TTLocalizer.WitnessToonAllJurors else: juryResult = TTLocalizer.WitnessToonSomeJurors % self.numToonJurorsSeated juryResult += '\\x07'", "time=self.battleThreeMusicTime) def __doneReward(self): self.notify.debug('----- __doneReward') self.doneBarrier('Reward') self.toWalkMode() def exitReward(self): self.notify.debug('-----", "looping=1, volume=0.9, time=self.battleThreeMusicTime) def exitNearVictory(self): self.notify.debug('----- exitNearVictory') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies')", "self.witnessToon.clearChat() self.reparentTo(render) self.happy = 1 self.raised = 1 self.forward =", "outOfPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence) taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice')) def __howToGetPies(self, task): self.notify.debug('----- __howToGetPies')", "None for plane in planes: if lastPlane == None or", "attackCode = int(attackCodeStr) into = entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def createBlock(self,", "0 self.battleThreeMusic.stop() def enterEpilogue(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons()", "__onToPrepareBattleTwo') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.doneBarrier('RollToBattleTwo') def exitRollToBattleTwo(self): self.notify.debug('----- exitRollToBattleTwo') self.unstickBoss() intervalName", "self.startJuryBoxMoving() for index in xrange(len(self.cannons)): cannon = self.cannons[index] cannon.cannon.show() def", "self.stickBossToFloor() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage base.playMusic(self.battleThreeMusic, looping=1, volume=0.9) self.__showWitnessToon() diffSettings", "prosecutionLocatorRelPos) self.notify.debug('locatorRenderPos = %s' % locatorRenderPos) beamPos = self.beamNodePath.getPos() beamRelPos", "~ToontownGlobals.PieBitmask newBitMask = newBitMask & ~ToontownGlobals.CameraBitmask self.baseHighCol.setCollideMask(newBitMask) self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol')", "self.cleanupIntervals() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.loop('neutral') localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.clearChat() self.witnessToon.clearChat() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.happy", "pieCode == ToontownGlobals.PieCodeProsecutionPan: self.flashGreen() if toon == localAvatar: pass elif", "diff) self.battleDifficulty = diff def toonEnteredCannon(self, toonId, cannonIndex): if base.localAvatar.doId", "Func(self.witnessToon.setLocalPageChat, epSpeech, 0)) return bossTrack def makeDefeatMovie(self): bossTrack = Track((0.0,", "= %s' % prosecutionLocPos) self.prosecutionPanNodePath.setPos(prosecutionLocPos) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) self.beamLocator = self.scaleNodePath.find('**/StandLocator1') beamLocatorBounds", "Func(self.stashBoss), self.posInterval(5.0, battlePos), Func(taskMgr.doMethodLater, 0.01, self.unstashBoss, 'unstashBoss')), name=self.uniqueName('BattleTwoMovie')) def __makeRollToBattleThreeMovie(self):", "Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0)] seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam, CFSpeech)) seq.append(Wait(5.0))", "percentDamaged * ToontownGlobals.LawbotBossWinningTilt else: percentDamaged = diffDamage / (ToontownGlobals.LawbotBossInitialDamage -", "volume=0.9, time=self.battleThreeMusicTime) def __doneReward(self): self.notify.debug('----- __doneReward') self.doneBarrier('Reward') self.toWalkMode() def exitReward(self):", "def __loadMopaths(self): self.notify.debug('----- __loadMopaths') self.toonsEnterA = Mopath.Mopath() self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA') self.toonsEnterA.fFaceForward =", "DistributedBossCog.DistributedBossCog.exitBattleThree(self) NametagGlobals.setMasterArrowsOn(1) bossDoneEventName = self.uniqueName('DestroyedBoss') self.ignore(bossDoneEventName) taskMgr.remove(self.uniqueName('StandUp')) self.ignore('enterWitnessStand') self.ignore('pieSplat') self.ignore('localPieSplat')", "direct.showbase.PythonUtil import Functor from direct.showbase.PythonUtil import StackTrace from direct.gui.DirectGui import", "%s' % origPos) self.notify.debug('batlleNode.getTransform = %s render.getTransform=%s' % (battleNode.getTransform(), render.getTransform()))", "intervalName = 'ChiefJusticeTaunt' seq = Sequence(name=intervalName) seq.append(Func(self.setChatAbsolute, chatString, CFSpeech)) seq.append(Wait(4.0))", "self.notify.debug('rollTrackDuration = %f' % rollTrackDuration) doorStartPos = self.door3.getPos() doorEndPos =", "return attackCode = int(attackCodeStr) into = entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def", "7 numToons = len(self.involvedToons) center = (numToons - 1) /", "planes = [] collList = model.findAllMatches('**/+CollisionNode') if not collList: collList", "self.d_hitBoss(self.panDamage) elif pieCode == ToontownGlobals.PieCodeProsecutionPan: self.flashGreen() if toon == localAvatar:", "- bossDamage, self.bossMaxDamage) def getBossDamage(self): self.notify.debug('----- getBossDamage') now = globalClock.getFrameTime()", "2.0) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) toon.setPosHpr(battleNode,", "no attr nametag') gotError = True if gotError: st =", "CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons,", "180)) track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0) bossTrack.append(track)", "self.insidesANodePath: if isOpen: self.insidesANodePath.unstash() else: self.insidesANodePath.stash() def doorBCallback(self, isOpen): if", "time = distance / (ToontownGlobals.SuitWalkSpeed * 1.8) return Sequence(Func(node.setPos, fromPos),", "self.clearChat() self.witnessToon.clearChat() self.reparentTo(render) self.happy = 1 self.raised = 1 self.forward", "ToontownGlobals.PieCodeLawyer: self.__lawyerGotHit(entry) if pieCode != ToontownGlobals.PieCodeToon: return avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId')", "= plane return NodePath(newCollisionNode) def makeIntroductionMovie(self, delayDeletes): self.notify.debug('----- makeIntroductionMovie') for", "points = %s' % points[0][0]) for i in xrange(len(toonIds)): toon", "self.prosecutionPanNodePath.getPos() origin = Point3(0, 0, 0) prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin)", "def doorACallback(self, isOpen): if self.insidesANodePath: if isOpen: self.insidesANodePath.unstash() else: self.insidesANodePath.stash()", "y, 0), fluid=1), node.hprInterval(1, VBase3(h, 0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2,", "0 self.involvedToons.sort() for toonId in self.involvedToons: if index in self.cannons:", "xrange(numGears): node = gearRoot.attachNewNode(str(i)) node.hide() node.setPos(0, 0, 0) gear =", "into) def createBlock(self, x1, y1, z1, x2, y2, z2, r", "self.beamNodePath.attachNewNode(beamGeom) self.beamNodePath.setPos(0, 0, 3) self.beamNodePath.reparentTo(self.scaleNodePath) defensePanGeom = self.createBlock(0.5, 0.5, 0,", "= None self.juryTimer = None self.witnessToon = None self.witnessToonOnstage =", "self.defensePanNodePath.setPos(defenseLocPos) self.defensePanNodePath.reparentTo(self.beamNodePath) self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos()) self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator') prosecutionLocBounds =", "= GeomVertexWriter(myVertexData, 'texcoord') vertexWriter.addData3f(x1, y1, z1) vertexWriter.addData3f(x2, y1, z1) vertexWriter.addData3f(x1,", "= None class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss') debugPositions =", "0, -1.5, 0.6) defenseTube.setTangible(1) defenseCollNode = CollisionNode('DefenseCol') defenseCollNode.addSolid(defenseTube) self.defenseColNodePath =", "return def doStrafe(self, side, direction): gearRoot = self.rotateNode.attachNewNode('gearRoot') if side", "__onToPrepareBattleTwo(self): self.notify.debug('----- __onToPrepareBattleTwo') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.doneBarrier('RollToBattleTwo') def exitRollToBattleTwo(self): self.notify.debug('----- exitRollToBattleTwo')", "self.lawyerRequest = None self.bossDamage = 0 self.attackCode = None self.attackAvId", "[] for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon:", "self.ignore('enterProsecutionCol') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) if self.bossDamageMovie: self.bossDamageMovie.finish() self.bossDamageMovie = None", "CollisionNode('BossZap') insidesBNode.addSolid(insidesB) insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesBNodePath = self.axle.attachNewNode(insidesBNode) self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides))", "self.notify.debug('panRenderPos = %s' % panRenderPos) prosecutionLocatorPos = self.prosecutionLocator.getPos() prosecutionLocatorRelPos =", "seq = Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack, Func(door.request, 'close')) self.__cleanupStrafe() self.strafeInterval", "from toontown.toonbase import TTLocalizer import SuitDNA from toontown.toon import Toon", "self.battleThreeMusicTime = 0 self.insidesANodePath = None self.insidesBNodePath = None self.strafeInterval", "= base.cr.doId2do.get(toonIds[i]) if toon: toon.wrtReparentTo(render) pos, h = points[i] if", "= CollisionPlane(Plane(Vec3(0, 0, 1), Point3(0, 0, -50))) planeNode = CollisionNode('dropPlane')", "volume=0.25) if toon == localAvatar: self.d_hitBoss(self.panDamage) elif pieCode == ToontownGlobals.PieCodeProsecutionPan:", "g = 1.0, b = 1.0, a = 1.0): gFormat", "+= 1 return retVal def calculateWeightOfToon(self, toonId): defaultWeight = 1", "self.promotionMusic.stop() self.stingMusic.stop() self.battleTwoMusic.stop() self.battleThreeMusic.stop() self.epilogueMusic.stop() if self.juryTimer: self.juryTimer.destroy() del self.juryTimer", "toonId = self.involvedToons[extraInfo] toon = base.cr.doId2do.get(toonId) if toon: chatString =", "b = 1.0, a = 1.0): gFormat = GeomVertexFormat.getV3n3cpt2() myVertexData", "self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg')) render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog)) insidesA = CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0, -2.0,", "targetNode.addSolid(target) targetNode.setCollideMask(ToontownGlobals.PieBitmask) self.targetNodePath = self.pelvis.attachNewNode(targetNode) self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog)) shield = CollisionTube(0,", "= %s' % points[0][0]) for i in xrange(len(toonIds)): toon =", "juryResult trialSpeech += TTLocalizer.WitnessToonPrepareBattleThree diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: newWeight,", "Parallel() index = 0 self.involvedToons.sort() for toonId in self.involvedToons: toon", "newBitMask = newBitMask & ~ToontownGlobals.CameraBitmask self.baseHighCol.setCollideMask(newBitMask) self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol') self.defenseHighCol.stash()", "taskMgr.remove(self.uniqueName('PieAdvice')) base.playSfx(self.piesRestockSfx) if not self.everThrownPie: taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice')) def __pieSplat(self,", "track = Parallel() bossAnimTrack = Sequence( ActorInterval(self, 'Ff_speech', startTime=2, duration=10,", "in xrange(len(self.lawyers)): suit = self.lawyers[i] suit.fsm.request('neutral') suit.loop('neutral') suit.setBossCogId(self.doId) return def", "h = random.uniform(-720, 720) gearTrack.append(Sequence(Wait(i * rate), Func(node.show), Parallel(node.posInterval(1, Point3(x,", "self.cleanupPanFlash() intervalName = 'FlashPanBlue' self.defensePanNodePath.setColorScale(1, 1, 1, 1) seq =", "Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(), self.reflectedPodium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0),", "== cannonIndex: retVal += 1 return retVal def calculateWeightOfToon(self, toonId):", "angle = 90 - 15 * (i - center) radians", "3.5) shieldNode = CollisionNode('BossZap') shieldNode.addSolid(shield) shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask) shieldNodePath =", "newZ = self.podium.getZ() - ToontownGlobals.LawbotBossBattleTwoPosHpr[2] if not self.debugPositions: self.podium.setZ(newZ) self.reflectedPodium", "= render.getRelativePoint(self.prosecutionPanNodePath, origin) self.notify.debug('prosecutionPanPos = %s' % prosecutionPanPos) self.notify.debug('prosecutionPanRelPos =", "points[i] toon.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2], h, 0, 0)", "ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: newWeight, self.bonusWeight, self.numJurorsLocalToonSeated = self.calculateWeightOfToon(base.localAvatar.doId) if self.bonusWeight", "self.calculateWeightOfToon(base.localAvatar.doId) if self.bonusWeight > 0: if self.bonusWeight == 1: juryWeightBonus", "def setBossDamage(self, bossDamage, recoverRate, timestamp): recoverStartTime = globalClockDelta.networkToLocalTime(timestamp) self.bossDamage =", "pieCode == ToontownGlobals.PieCodeLawyer: self.__lawyerGotHit(entry) if pieCode != ToontownGlobals.PieCodeToon: return avatarDoId", "self.witnessToon.removeActive() def exitWaitForToons(self): self.notify.debug('----- exitWaitForToons') DistributedBossCog.DistributedBossCog.exitWaitForToons(self) self.geom.show() self.witnessToon.addActive() def enterElevator(self):", "self return def disable(self): global OneBossCog self.notify.debug('----- disable') DistributedBossCog.DistributedBossCog.disable(self) self.request('Off')", "startPos=doorStartPos))), (13.1, Sequence(self.door3.posInterval(1, doorStartPos)))) retTrack = Parallel(bossTrack, ActorInterval(self, 'Ff_speech', loop=1))", "self.notify.debug('toon = %s' % toon.getName()) toon.reparentTo(cannon.nodePath) toon.setPos(0, 8, 0) toon.setH(180)", "seq.append(Func(self.clearChat)) return seq def __makeBossDamageMovie(self): self.notify.debug('---- __makeBossDamageMovie') startPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0],", "0 newWeight = 1 cannonIndex = self.cannonIndex numJurors = 0", "DistributedBossCog.DistributedBossCog.delete(self) def d_hitBoss(self, bossDamage): self.notify.debug('----- d_hitBoss') self.sendUpdate('hitBoss', [bossDamage]) def d_healBoss(self,", "= 0 if not cannonIndex == None and cannonIndex >=", "base.playSfx(self.toonUpSfx) if not self.bonusTimer: self.bonusTimer = ToontownTimer.ToontownTimer() self.bonusTimer.posInTopRightCorner() self.bonusTimer.show() self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration,", "for i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.reparentTo(render)", "% repr(solid)) newCollisionNode.addSolid(plane) newCollisionNode.setIntoCollideMask(newCollideMask) threshold = 0.1 planes.sort(lambda p1, p2:", "for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: if", "= None self.panFlashInterval = None self.panDamage = ToontownGlobals.LawbotBossDefensePanDamage if base.config.GetBool('lawbot-boss-cheat',", "self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col') self.defenseLocator.hide() self.prosecutionLocator.hide() self.beamLocator.hide() def loadScaleOld(self): startingTilt =", "self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None if self.juryBox: self.juryBox.removeNode() return def", "1.0) colorWriter.addData4f(r, g, b, a) texWriter.addData2f(1.0, 1.0) tris = GeomTriangles(Geom.UHDynamic)", "self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) standGeom = self.createBlock(0.25, 0.25, 0, -0.25, -0.25, 3)", "exitIntroduction') DistributedBossCog.DistributedBossCog.exitIntroduction(self) self.promotionMusic.stop() if not self.mainDoor.isEmpty(): pass if not self.reflectedMainDoor.isEmpty():", "1 else: self.notify.warning('No cannon %d but we have a toon", "0, 1, 1)), self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1, 1)), name=intervalName) self.panFlashInterval", "= NodePath('prosecutionPan') self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom) self.prosecutionPanNodePath.setPos(0, 2, 0) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) prosecutionTube = CollisionTube(0,", "exitBattleThree(self): self.notify.debug('----- exitBattleThree') DistributedBossCog.DistributedBossCog.exitBattleThree(self) NametagGlobals.setMasterArrowsOn(1) bossDoneEventName = self.uniqueName('DestroyedBoss') self.ignore(bossDoneEventName) taskMgr.remove(self.uniqueName('StandUp'))", "def exitBattleThree(self): self.notify.debug('----- exitBattleThree') DistributedBossCog.DistributedBossCog.exitBattleThree(self) NametagGlobals.setMasterArrowsOn(1) bossDoneEventName = self.uniqueName('DestroyedBoss') self.ignore(bossDoneEventName)", "if doId != localAvatar.doId: self.d_hitToon(doId) def __lawyerGotHit(self, entry): lawyerCol =", "bossTrack def __showOnscreenMessage(self, text): self.notify.debug('----- __showOnscreenmessage') if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage", "= None self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter,", "== 0: gearRoot.setPos(0, -7, 3) gearRoot.setHpr(180, 0, 0) door =", "180.0 if direction == 1: spread = -spread dist =", "== None or self.battleB == None: pass return def exitBattleOne(self):", "= ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage, self.bossMaxDamage) if diffSettings[4]: localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight)", "self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer) def setAttackCode(self, attackCode, avId = 0): DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode,", "self.clearChat() self.witnessToon.clearChat() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.happy = 1 self.raised = 1", "= Point3(myFromPos[0], myFromPos[1] + 30, myFromPos[2]) rollThroughDoor = self.rollBossToPoint(fromPos=myFromPos, fromHpr=None,", "base.playMusic(self.battleTwoMusic, looping=1, volume=0.9) self.startJuryBoxMoving() for index in xrange(len(self.cannons)): cannon =", "juryResult += '\\x07' trialSpeech = juryResult trialSpeech += TTLocalizer.WitnessToonPrepareBattleThree diffSettings", "startHpr=Point3(-10, -13, 0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1, CFSpeech), Func(camera.reparentTo,", "self.debugPositions: self.reflectedPodium.show() def loadCannons(self): pass def loadWitnessStand(self): self.realWitnessStand = self.geom.find('**/WitnessStand')", "pieCode == ToontownGlobals.PieCodeDefensePan: self.flashRed() self.flashPanBlue() base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath, volume=0.25) if toon", "/ 2.0 for i in xrange(numToons): toon = self.cr.doId2do.get(self.involvedToons[i]) if", "y = math.sin(radians) * radius toon.setPos(self.witnessToon, x, y, 0) toon.headsUp(self.witnessToon)", "self.witnessToon.addActive() self.witnessToon.reparentTo(self.geom) seatCenter = self.realWitnessStand.find('**/witnessStandSeatEdge') center = seatCenter.getPos() self.notify.debug('center =", "self.accept('begin-pie', self.__foundPieButton) self.accept('enterDefenseCol', self.__enterDefenseCol) self.accept('enterProsecutionCol', self.__enterProsecutionCol) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice'))", "direct.showbase.ShowBase import * from direct.interval.IntervalGlobal import * from toontown.battle.BattleProps import", "= self.geom.find('**/interiorrefl/CR3_Door') if not self.reflectedMainDoor.isEmpty(): itemsToHide = ['Reflections/Door_1'] for str", "self.battleANode) self.toonsToBattlePosition(self.toonsB, self.battleBNode) base.playMusic(self.battleTwoMusic, looping=1, volume=0.9) self.startJuryBoxMoving() for index in", "% repr(entry.getIntoNodePath())) return doId = int(avatarDoId) if doId != localAvatar.doId:", "if self.dizzy: self.flashRed() self.doAnimate('hit', now=1) elif pieCode == ToontownGlobals.PieCodeDefensePan: self.flashRed()", "d_hitProsecutionPan(self): self.notify.debug('----- d_hitProsecutionPan') self.sendUpdate('hitProsecutionPan', []) def d_hitToon(self, toonId): self.notify.debug('----- d_hitToon')", "juryWeightBonus % (self.numJurorsLocalToonSeated, self.bonusWeight) trialSpeech += '\\x07' trialSpeech += weightBonusText", "= Track((0.0, Sequence(Func(self.clearChat), Func(self.reverseHead), ActorInterval(self, 'Ff_speech'))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins, CFSpeech)))", "self.__howToGetPies, self.uniqueName('PieAdvice')) def __howToGetPies(self, task): self.notify.debug('----- __howToGetPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence) def __howToThrowPies(self,", "seq seq.start() self.storeInterval(seq, intervalName) def saySomething(self, chatString): intervalName = 'ChiefJusticeTaunt'", "0): DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode, avId) if attackCode == ToontownGlobals.BossCogAreaAttack: self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt) base.playSfx(self.warningSfx)", "0, 0) def exitElevator(self): self.notify.debug('----- exitElevator') DistributedBossCog.DistributedBossCog.exitElevator(self) self.witnessToon.removeActive() def enterIntroduction(self):", "(battleNode.getTransform(), render.getTransform())) self.notify.debug('render.getScale()=%s battleNode.getScale()=%s' % (render.getScale(), battleNode.getScale())) myCurPos = self.getPos()", "base.playSfx(self.warningSfx) def setBattleDifficulty(self, diff): self.notify.debug('battleDifficulty = %d' % diff) self.battleDifficulty", "+= TTLocalizer.WitnessToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1) return speech def __positionToonsInFrontOfCannons(self):", "TTLocalizer.LawbotBossDefenseWins1, CFSpeech)), (5.5, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2, CFSpeech)), (9.5, Sequence(Func(camera.wrtReparentTo, render))), (9.6,", "self.notify.debug('----- d_hitToon') self.sendUpdate('hitToon', [toonId]) def gotToon(self, toon): stateName = self.state", "> 3: pos.setY(pos.getY() + 2.0) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos", "toon =%d' % (index, toonId)) allCannonsAppear.append(multiCannons) intervalName = 'prepareBattleTwoCannonsAppear' seq", "self.witnessToon.detachNode() self.witnessToonOnstage = 0 def __hideToons(self): for toonId in self.involvedToons:", "threshold)) lastPlane = None for plane in planes: if lastPlane", "bossTrack, Func(self.getGeomNode().setH, 0), Parallel(self.podium.posInterval(5.0, finalPodiumPos), self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos), Func(self.stashBoss), self.posInterval(5.0, battlePos),", "def makeToonsWait(self): self.notify.debug('makeToonsWait') for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId)", "0 self.forward = 1 intervalName = 'DefeatMovie' seq = Sequence(self.makeDefeatMovie(),", "0.01, self.unstashBoss, 'unstashBoss')), name=self.uniqueName('BattleTwoMovie')) def __makeRollToBattleThreeMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1],", "three state, state=%s', self.state) gotError = True if not hasattr(self,", "== 'Elevator': self.placeToonInElevator(toon) def setLawyerIds(self, lawyerIds): self.lawyers = [] self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest)", "'Ff_speech', loop=1)) return bossTrack def makeEpilogueMovie(self): epSpeech = TTLocalizer.WitnessToonCongratulations epSpeech", "def exitBattleOne(self): self.notify.debug('----- exitBattleOne') DistributedBossCog.DistributedBossCog.exitBattleOne(self) def stashBoss(self): self.stash() def unstashBoss(self,", "Point3(doorStartPos[0], doorStartPos[1], doorStartPos[2] + 25) bossTrack = Track( (0.5, Sequence(", "3) gearRoot.setHpr(180, 0, 0) door = self.doorA else: gearRoot.setPos(0, 7,", "myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0), startPos=Point3(-22,", "4.0 * t spread = 60 * math.pi / 180.0", "= Sequence( Func(self.getGeomNode().setH, 180), rollThroughDoor[0], Func(self.getGeomNode().setH, 0)) rollTrackDuration = rollTrack.getDuration()", "self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() def exitIntroduction(self): self.notify.debug('----- exitIntroduction')", "self.stopAnimate() self.doneBarrier('Victory') def exitVictory(self): self.notify.debug('----- exitVictory') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime", "math.cos(radians) * radius y = math.sin(radians) * radius toon.setPos(self.witnessToon, x,", "-0.5, -2, 1.0, 0, 0, 1.0) self.prosecutionPanNodePath = NodePath('prosecutionPan') self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom)", "diff def toonEnteredCannon(self, toonId, cannonIndex): if base.localAvatar.doId == toonId: self.cannonIndex", "self.toonsEnterA.reset() self.toonsEnterB.reset() def enterOff(self): self.notify.debug('----- enterOff') DistributedBossCog.DistributedBossCog.enterOff(self) if self.witnessToon: self.witnessToon.clearChat()", "panelName = self.uniqueName('reward') self.rewardPanel = RewardPanel.RewardPanel(panelName) victory, camVictory, skipper =", "__recoverBossDamage') if self.bossDamageMovie: self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie) return Task.cont def __walkToonToPromotion(self,", "'ToonJuror' or chair.state == None and chair.newState == 'ToonJuror': self.numToonJurorsSeated", "= GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic) vertexWriter = GeomVertexWriter(myVertexData, 'vertex')", "+ ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) curReflectedPos = self.reflectedJuryBox.getPos() reflectedEndingAbsPos = Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0],", "self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage self.elevatorType = ElevatorConstants.ELEVATOR_CJ self.gavels = {} self.chairs", "GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic) vertexWriter = GeomVertexWriter(myVertexData, 'vertex') normalWriter", "1, 'walk'), Func(toon.loop, 'neutral'))) return ival def toonsToBattlePosition(self, toonIds, battleNode):", "rollTrack, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3, CFSpeech), self.door3.posInterval(2, doorEndPos, startPos=doorStartPos))), (13.1, Sequence(self.door3.posInterval(1, doorStartPos))))", "= Sequence(self.makeEpilogueMovie(), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.accept('doneChatPage', self.__doneEpilogue) base.playMusic(self.epilogueMusic, looping=1,", "self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint result = %s' % pos) self.notify.debug('walkToonsToBattlePosition: final pos", "Vec3(toPos - fromPos) distance = vector.length() time = distance /", "self.witnessToon.setH(180) self.witnessToon.setZ(self.witnessToon.getZ() - 1.5) self.witnessToon.setY(self.witnessToon.getY() - 1.15) self.witnessToonOnstage = 1", "attr nametag') gotError = True if gotError: st = StackTrace()", "self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None return def __showWaitingMessage(self, task): self.notify.debug('-----", "= self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin) locatorRenderPos = render.getRelativePoint(self.prosecutionLocator, origin) self.notify.debug('prosecutionLocatorPos = %s", "0, -0.25, -0.25, 3) self.standNodePath = NodePath('scaleStand') self.standNodePath.attachNewNode(standGeom) self.standNodePath.reparentTo(self.scaleNodePath) self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr)", "% (index, toonId)) allCannonsAppear.append(multiCannons) intervalName = 'prepareBattleTwoCannonsAppear' seq = Sequence(allCannonsAppear,", "= self.getPos() self.notify.debug('myCurPos = %s' % self.getPos()) self.notify.debug('battleNode.parent() = %s'", "if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel: speech += TTLocalizer.WitnessToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel +", "TTLocalizer.LawbotBossDefenseWins3, CFSpeech), self.door3.posInterval(2, doorEndPos, startPos=doorStartPos))), (13.1, Sequence(self.door3.posInterval(1, doorStartPos)))) retTrack =", "diffSettings[5] if bonusWeight < 0: bonusWeight = 0 newWeight =", "self.panFlashInterval = None self.panDamage = ToontownGlobals.LawbotBossDefensePanDamage if base.config.GetBool('lawbot-boss-cheat', 0): self.panDamage", "= 'RecoverBossDamage' taskMgr.remove(taskName) if self.bossDamageMovie: if self.bossDamage >= self.bossMaxDamage: self.notify.debug('finish", "= ['Reflections/Door_1'] for str in itemsToHide: stuffToHide = self.geom.find('**/%s' %", "self.bossDamage < ToontownGlobals.LawbotBossMaxDamage * 0.85: self.unstashBaseCol() else: self.stashBaseCol() def unloadEnvironment(self):", "self.rewardPanel self.battleThreeMusicTime = 0 self.battleThreeMusic.stop() def enterEpilogue(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat()", "def enterDefeat(self): self.notify.debug('----- enterDefeat') self.cleanupIntervals() localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.reparentTo(render) self.clearChat() self.releaseToons(finalBattle=1) self.happy", "bossTrack def __makeWitnessToon(self): dnaNetString = 't\\x1b\\x00\\x01\\x01\\x00\\x03\\x00\\x03\\x01\\x10\\x13\\x00\\x13\\x13' npc = Toon.Toon() npc.setDNAString(dnaNetString)", "self.reflectedMainDoor.stash() def exitIntroduction(self): self.notify.debug('----- exitIntroduction') DistributedBossCog.DistributedBossCog.exitIntroduction(self) self.promotionMusic.stop() if not self.mainDoor.isEmpty():", "self.cleanupIntervals() localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.reparentTo(render) self.clearChat() self.releaseToons(finalBattle=1) self.happy = 0 self.raised =", "self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode)) if len(toonIds) < 5: points", "self.numToonJurorsSeated) if self.numToonJurorsSeated == 0: juryResult = TTLocalizer.WitnessToonNoJuror elif self.numToonJurorsSeated", "TTLocalizer.LawbotBossPassExam, CFSpeech)) seq.append(Wait(5.0)) seq.append(Func(self.clearChat)) return seq def __makeBossDamageMovie(self): self.notify.debug('---- __makeBossDamageMovie')", "cubeGeom.addPrimitive(tris) cubeGN = GeomNode('cube') cubeGN.addGeom(cubeGeom) return cubeGN def __enterDefenseCol(self, entry):", "if pieCode != ToontownGlobals.PieCodeDefensePan: return self.sendUpdate('finalPieSplat', []) self.ignore('pieSplat') def cleanupAttacks(self):", "gear = gearModel.instanceTo(node) angle = (float(i) / (numGears - 1)", "= Point3(*ToontownGlobals.LawbotBossTopRampPosA) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosA) battlePos =", "Sequence() bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos, startHpr, bottomPos, None,", "1) bossTrack.append(track) track, hpr = self.rollBossToPoint(bottomPos, startHpr, deathPos, None, 1)", "enterWaitForToons') DistributedBossCog.DistributedBossCog.enterWaitForToons(self) self.geom.hide() self.witnessToon.removeActive() def exitWaitForToons(self): self.notify.debug('----- exitWaitForToons') DistributedBossCog.DistributedBossCog.exitWaitForToons(self) self.geom.show()", "= [] for i in xrange(10): self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg')) render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog)) insidesA", "intervalName = 'prepareBattleTwoCannonsAppear' seq = Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName) seq.start() self.storeInterval(seq,", "avId) if attackCode == ToontownGlobals.BossCogAreaAttack: self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt) base.playSfx(self.warningSfx) def setBattleDifficulty(self, diff):", "if not hasattr(self, 'state'): self.notify.warning('returning from setTaunt, no attr state')", "=%s' % renderPos) index += 1 self.notify.debug('done with positionToons') def", "ToontownGlobals.LawbotBossBonusDuration) self.witnessToon.setChatAbsolute(text, CFSpeech | CFTimeout) base.playSfx(self.toonUpSfx) if not self.bonusTimer: self.bonusTimer", "'vertex') normalWriter = GeomVertexWriter(myVertexData, 'normal') colorWriter = GeomVertexWriter(myVertexData, 'color') texWriter", "return NodePath(newCollisionNode) def makeIntroductionMovie(self, delayDeletes): self.notify.debug('----- makeIntroductionMovie') for toonId in", "elif pieCode == ToontownGlobals.PieCodeDefensePan: self.flashRed() self.flashPanBlue() base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath, volume=0.25) if", "CollisionNode('BossZap') insidesANode.addSolid(insidesA) insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesANodePath = self.axle.attachNewNode(insidesANode) self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides))", "self.involvedToons.sort() for toonId in self.involvedToons: if index in self.cannons: cannon", "if diffDamage >= 0: percentDamaged = diffDamage / (ToontownGlobals.LawbotBossMaxDamage -", "= angle * math.pi / 180.0 x = math.cos(radians) *", "= CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6) prosecutionTube.setTangible(1) prosecutionCollNode", "setBattleDifficulty(self, diff): self.notify.debug('battleDifficulty = %d' % diff) self.battleDifficulty = diff", "= Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(), self.reflectedPodium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH,", "%s' % prosecutionLocPos) self.prosecutionPanNodePath.setPos(prosecutionLocPos) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) self.beamLocator = self.scaleNodePath.find('**/StandLocator1') beamLocatorBounds =", "Func(self.stickToonsToFloor), track, Func(self.unstickToons), name=self.uniqueName('Introduction')) def walkToonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('walkToonsToBattlePosition-----------------------------------------------') self.notify.debug('toonIds=%s", "self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) self.__showWitnessToon() prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie()", "self.geom.find('**/Reflections') ug.setBin('ground', -10) def loadJuryBox(self): self.juryBox = self.geom.find('**/JuryBox') juryBoxPos =", "self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) DistributedBossCog.DistributedBossCog.enterFrolic(self) self.show() def doorACallback(self, isOpen): if self.insidesANodePath: if isOpen:", "self.numToonJurorsSeated = 0 for key in self.chairs.keys(): chair = self.chairs[key]", "__onToPrepareBattleThree(self): self.notify.debug('----- __onToPrepareBattleThree') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.doneBarrier('RollToBattleThree') def exitRollToBattleThree(self): self.notify.debug('----- exitRollToBattleThree')", "self.doAnimate('hit', now=1) elif pieCode == ToontownGlobals.PieCodeDefensePan: self.flashRed() self.flashPanBlue() base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath,", "startHpr) bossTrack = Sequence() bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos,", "DistributedBossCog.DistributedBossCog.enterElevator(self) self.witnessToon.removeActive() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.happy = 1 self.raised = 1", "* from direct.distributed.ClockDelta import * from direct.showbase.PythonUtil import Functor from", "-100, 35), Point3(-10, -13, 0), startPos=Point3(-22, -90, 35), startHpr=Point3(-10, -13,", "self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss') def __onToPrepareBattleTwo(self):", "90 - 15 * (i - center) radians = angle", "def walkToonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('walkToonsToBattlePosition-----------------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode))", "xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: pos, h = points[i]", "toPos): self.notify.debug('----- __walkSuitToPoint') vector = Vec3(toPos - fromPos) distance =", "self.reflectedJuryBox.show() self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) def loadPodium(self): self.podium = self.geom.find('**/Podium') newZ", "= diffDamage / (ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage) tilt = percentDamaged *", "len(toonIds) < 5: points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] else: points", "-2.0, 5.0), Point3(-4.0, -2.0, 0.5), Point3(4.0, -2.0, 0.5)) insidesANode =", "self.state) gotError = True if not hasattr(self, 'nametag'): self.notify.warning('returning from", "self.recoverRate = 0 self.recoverStartTime = 0 self.bossDamageMovie = None self.everThrownPie", "from toontown.toonbase import ToontownTimer OneBossCog = None class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM):", "dist * math.sin(angle) y = dist * math.cos(angle) h =", "toon.hide() def __showToons(self): for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId)", "1: spread = -spread dist = 50 rate = time", "mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2) localAvatar.inventory.setBattleCreditMultiplier(mult) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.clearChat() self.witnessToon.clearChat() self.releaseToons(finalBattle=1) self.__showWitnessToon()", "y2, z1) vertexWriter.addData3f(x1, y1, z2) vertexWriter.addData3f(x2, y1, z2) vertexWriter.addData3f(x1, y2,", "z1) vertexWriter.addData3f(x1, y1, z2) vertexWriter.addData3f(x2, y1, z2) vertexWriter.addData3f(x1, y2, z2)", "% stuffToHide) stuffToHide.wrtReparentTo(self.reflectedMainDoor) else: self.notify.debug('not found %s' % stuffToHide) self.geom.reparentTo(render)", "def exitRollToBattleTwo(self): self.notify.debug('----- exitRollToBattleTwo') self.unstickBoss() intervalName = 'RollToBattleTwo' self.clearInterval(intervalName) self.betweenBattleMusic.stop()", "= self.scaleNodePath.find('**/scaleStand') self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator') defenseLocBounds = self.defenseLocator.getBounds() defenseLocPos", "self.juryBoxIval.finish() self.juryBoxIval = None if self.juryBox: self.juryBox.removeNode() return def doStrafe(self,", "if not self.reflectedWitnessStand.isEmpty(): pass colNode = self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision') colNode.setName('WitnessStand') def loadScale(self):", "self.useCannons = 1 self.juryBoxIval = None self.juryTimer = None self.witnessToon", "getBossDamage') now = globalClock.getFrameTime() elapsed = now - self.recoverStartTime return", "= 0 self.scaleNodePath = NodePath('injusticeScale') beamGeom = self.createBlock(0.25, 2, 0.125,", "if not self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() camera.reparentTo(self.elevatorModel) camera.setPosHpr(0,", "bossDamage): self.notify.debug('----- d_hitBoss') self.sendUpdate('hitBoss', [bossDamage]) def d_healBoss(self, bossHeal): self.notify.debug('----- d_bossHeal')", "prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol')) prosecutionCollNode.addSolid(prosecutionTube) self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode) self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) standGeom", "self.__touchedWitnessStand) self.accept('pieSplat', self.__pieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) self.accept('begin-pie', self.__foundPieButton) self.accept('enterDefenseCol',", "exitNearVictory') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.setDizzy(0) self.battleThreeMusicTime =", "enteredBonusState(self): self.witnessToon.clearChat() text = TTLocalizer.WitnessToonBonus % (ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration) self.witnessToon.setChatAbsolute(text, CFSpeech", "beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() self.notify.debug('beamLocatorPos = %s' %", "-0.5, -0.5, -2, 0, 0, 1.0, 0.25) self.defensePanNodePath = NodePath('defensePan')", "def enterVictory(self): self.notify.debug('----- enterVictory') self.cleanupIntervals() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.loop('neutral') localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.clearChat()", "def setLawyerIds(self, lawyerIds): self.lawyers = [] self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = self.cr.relatedObjectMgr.requestObjects(lawyerIds,", "Func(self.__doneReward)) intervalName = 'RewardMovie' delayDeletes = [] for toonId in", "__showOnscreenmessage') if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None self.onscreenMessage = DirectLabel(text=text,", "self.accept('pieSplat', self.__finalPieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.happy = 0", "25 self.evidenceHitSfx = None self.toonUpSfx = None self.bonusTimer = None", "'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos, startHpr, bottomPos, None, 1) bossTrack.append(track)", "0, 0) self.notify.debug('new toon pos %s ' % toon.getPos()) def", "'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0) bossTrack.append(track)", "itemsToHide = ['Reflections/Door_1'] for str in itemsToHide: stuffToHide = self.geom.find('**/%s'", "self.elevatorEntrance.setScale(1) elevatorModel = loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator') elevatorModel.reparentTo(self.elevatorEntrance) self.setupElevator(elevatorModel) self.promotionMusic = base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.betweenBattleMusic", "TTLocalizer.WitnessToonNoJuror elif self.numToonJurorsSeated == 1: juryResult = TTLocalizer.WitnessToonOneJuror elif self.numToonJurorsSeated", "texWriter.addData2f(1.0, 1.0) tris = GeomTriangles(Geom.UHDynamic) tris.addVertex(0) tris.addVertex(1) tris.addVertex(2) tris.closePrimitive() tris.addVertex(1)", "Point3(myFromPos[0], myFromPos[1] + 30, myFromPos[2]) rollThroughDoor = self.rollBossToPoint(fromPos=myFromPos, fromHpr=None, toPos=myToPos,", "if avatarDoId == '': self.notify.warning('Toon %s has no avatarDoId tag.'", "not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.unstash() if not self.elevatorEntrance.isEmpty(): pass def enterBattleOne(self): self.notify.debug('-----", "= Sequence(Func(self.witnessToon.animFSM.request, 'neutral'), Func(self.witnessToon.setLocalPageChat, epSpeech, 0)) return bossTrack def makeDefeatMovie(self):", "= 0 def __hideToons(self): for toonId in self.involvedToons: toon =", "floor.isEmpty(): floor = self.geom.find('**/CR3_Floor') self.evFloor = self.replaceCollisionPolysWithPlanes(floor) self.evFloor.reparentTo(self.geom) self.evFloor.setName('floor') plane", "intervalName = 'RewardMovie' self.clearInterval(intervalName) self.unstash() self.rewardPanel.destroy() del self.rewardPanel self.battleThreeMusicTime =", "curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos),", "attackCode = int(attackCodeStr) into = entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def touchedGavelHandle(self,", "ToontownGlobals.PieCodeDefensePan: return self.sendUpdate('finalPieSplat', []) self.ignore('pieSplat') def cleanupAttacks(self): self.notify.debug('----- cleanupAttacks') self.__cleanupStrafe()", "%s' % beamBoundsCenter) beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() self.notify.debug('beamLocatorPos", "self.panFlashInterval: self.panFlashInterval.finish() self.panFlashInterval = None return def flashPanBlue(self): self.cleanupPanFlash() intervalName", "self.geom.find('**/SlidingDoor1/') if self.door3.isEmpty(): self.door3 = self.geom.find('**/interior/CR3_Door') self.mainDoor = self.geom.find('**/Door_1') if", "if self.recoverRate: taskMgr.add(self.__recoverBossDamage, taskName) self.makeScaleReflectDamage() self.bossHealthBar.update(self.bossMaxDamage - bossDamage, self.bossMaxDamage) def", "self.notify.debug('prosecutionPanPos = %s' % prosecutionPanPos) self.notify.debug('prosecutionPanRelPos = %s' % prosecutionPanRelPos)", "battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) toon.setPosHpr(battleNode, pos[0], pos[1], pos[2], h,", "TTLocalizer.WitnessToonPrepareBattleTwo % ToontownGlobals.LawbotBossJurorsForBalancedScale movie = Sequence(Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8,", "from toontown.toon import Toon from toontown.battle import BattleBase from direct.directutil", "bossHeal): self.notify.debug('----- d_bossHeal') self.sendUpdate('healBoss', [bossHeal]) def d_hitBossInsides(self): self.notify.debug('----- d_hitBossInsides') self.sendUpdate('hitBossInsides',", "' % prosecutionLocatorPos) self.notify.debug('prosecutionLocatorRelPos = %s ' % prosecutionLocatorRelPos) self.notify.debug('locatorRenderPos", "self.door3 = self.geom.find('**/interior/CR3_Door') self.mainDoor = self.geom.find('**/Door_1') if not self.mainDoor.isEmpty(): itemsToHide", "MovieToonVictory from toontown.building import ElevatorUtils from toontown.battle import RewardPanel from", "VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5]) bossTrack = Sequence() myInterval = camera.posHprInterval(8, Point3(-22,", "self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) self.accept('begin-pie', self.__foundPieButton) self.accept('enterDefenseCol', self.__enterDefenseCol) self.accept('enterProsecutionCol', self.__enterProsecutionCol)", "= int(avatarDoId) if doId != localAvatar.doId: self.d_hitToon(doId) def __lawyerGotHit(self, entry):", "self.lawyers: if lawyerDoId == lawyer.doId: lawyer.sendUpdate('hitByToon', []) def __finalPieSplat(self, toon,", "self.geom.removeNode() del self.geom def __loadMopaths(self): self.notify.debug('----- __loadMopaths') self.toonsEnterA = Mopath.Mopath()", "% pos) ival.append(Sequence(Func(toon.setPlayRate, 0.8, 'walk'), Func(toon.loop, 'walk'), toon.posInterval(3, pos), Func(toon.setPlayRate,", "self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel: newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)] if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel:", "else: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] self.saySomething(chatString) def toonGotHealed(self, toonId): toon =", "= 1.0, a = 1.0): gFormat = GeomVertexFormat.getV3n3cpt2() myVertexData =", "gearTrack, Func(door.request, 'close')) self.__cleanupStrafe() self.strafeInterval = seq seq.start() def replaceCollisionPolysWithPlanes(self,", "cubeGN.addGeom(cubeGeom) return cubeGN def __enterDefenseCol(self, entry): self.notify.debug('__enterDefenseCol') def __enterProsecutionCol(self, entry):", "beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() negBeamLocatorPos = -beamLocatorPos self.notify.debug('beamLocatorPos", "= NodePath('injusticeScale') beamGeom = self.createBlock(0.25, 2, 0.125, -0.25, -2, -0.125,", "diff): self.notify.debug('battleDifficulty = %d' % diff) self.battleDifficulty = diff def", "= self.defenseLocator.getBounds() defenseLocPos = defenseLocBounds.getCenter() self.notify.debug('defenseLocatorPos = %s' % defenseLocPos)", "['interior/Door_1'] for str in itemsToHide: stuffToHide = self.geom.find('**/%s' % str)", "base.cr.doId2do.get(toonIds[i]) if toon: pos, h = points[i] origPos = pos", "self.cr.doId2do.get(toonId) if toon: toon.show() def __arrangeToonsAroundWitnessToon(self): radius = 7 numToons", "self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator') defenseLocBounds = self.defenseLocator.getBounds() defenseLocPos = defenseLocBounds.getCenter() self.notify.debug('defenseLocatorPos", "self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.witnessToon.setLocalPageChat,", "self.juryTimer = None for chair in self.chairs.values(): chair.stopCogsFlying() return def", "pieCode != ToontownGlobals.PieCodeDefensePan: return self.sendUpdate('finalPieSplat', []) self.ignore('pieSplat') def cleanupAttacks(self): self.notify.debug('-----", "if self.bossDamageMovie: self.bossDamageMovie.finish() self.bossDamageMovie = None self.unstickBoss() taskName = 'RecoverBossDamage'", "plane.compareTo(lastPlane, threshold) != 0: cp = CollisionPlane(plane) newCollisionNode.addSolid(cp) lastPlane =", "lawyerCol = entry.getIntoNodePath() names = lawyerCol.getName().split('-') lawyerDoId = int(names[1]) for", "Func(self.__continueVictory), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.bossHealthBar.deinitialize() base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)", "= GeomVertexFormat.getV3n3cpt2() myVertexData = GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic) vertexWriter", "0.125, -0.25, -2, -0.125, 0, 1.0, 0, 1.0) self.beamNodePath =", "= diff def toonEnteredCannon(self, toonId, cannonIndex): if base.localAvatar.doId == toonId:", "intervalName = 'RollToBattleTwo' seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName) seq.start() self.storeInterval(seq,", "self.notify.debug('----- enterDefeat') self.cleanupIntervals() localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.reparentTo(render) self.clearChat() self.releaseToons(finalBattle=1) self.happy = 0", "%s' % battleNode.getParent()) self.notify.debug('battleNode.parent().getPos() = %s' % battleNode.getParent().getPos()) bnParent =", "def __showCannonsAppearing(self, elapsedTime = 0): allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar),", "numJurors = self.numJurorsSeatedByCannon(cannonIndex) bonusWeight = numJurors - diffSettings[5] if bonusWeight", "bossTrack = Track( (0.5, Sequence( Func(self.clearChat), Func(camera.reparentTo, render), Func(camera.setPos, -3,", "doorEndPos, startPos=doorStartPos))), (13.1, Sequence(self.door3.posInterval(1, doorStartPos)))) retTrack = Parallel(bossTrack, ActorInterval(self, 'Ff_speech',", "= 35 def __unloadMopaths(self): self.notify.debug('----- __unloadMopaths') self.toonsEnterA.reset() self.toonsEnterB.reset() def enterOff(self):", "ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) def loadPodium(self): self.podium = self.geom.find('**/Podium') newZ = self.podium.getZ() -", "rollTrackDuration = rollTrack.getDuration() self.notify.debug('rollTrackDuration = %f' % rollTrackDuration) doorStartPos =", "Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)), Func(node.detachNode))) seq = Sequence(Func(door.request, 'open'), Wait(0.7),", "finalPodiumPos), self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos), Func(self.stashBoss), self.posInterval(5.0, battlePos), Func(taskMgr.doMethodLater, 0.01, self.unstashBoss, 'unstashBoss')),", "0, 1), Point3(0, 0, -50))) planeNode = CollisionNode('dropPlane') planeNode.addSolid(plane) planeNode.setCollideMask(ToontownGlobals.PieBitmask)", "0 self.bonusWeight = 0 self.numJurorsLocalToonSeated = 0 self.cannonIndex = -1", "defenseLocPos) self.defensePanNodePath.setPos(defenseLocPos) self.defensePanNodePath.reparentTo(self.beamNodePath) self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos()) self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator') prosecutionLocBounds", "d_healBoss(self, bossHeal): self.notify.debug('----- d_bossHeal') self.sendUpdate('healBoss', [bossHeal]) def d_hitBossInsides(self): self.notify.debug('----- d_hitBossInsides')", "self.loadScaleNew() def __debugScale(self): prosecutionPanPos = self.prosecutionPanNodePath.getPos() origin = Point3(0, 0,", "__continueVictory') self.stopAnimate() self.doneBarrier('Victory') def exitVictory(self): self.notify.debug('----- exitVictory') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)", "self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True) ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward))", "for i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: pos,", "self.notify.debug('----- __unloadMopaths') self.toonsEnterA.reset() self.toonsEnterB.reset() def enterOff(self): self.notify.debug('----- enterOff') DistributedBossCog.DistributedBossCog.enterOff(self) if", "from setTaunt, no attr state') gotError = True elif not", "self.notify.debug('stashBaseCol') self.baseTopCol.stash() self.baseSideCol.stash() self.baseColStashed = True def unstashBaseCol(self): if self.baseColStashed:", "cubeGeom = Geom(myVertexData) cubeGeom.addPrimitive(tris) cubeGN = GeomNode('cube') cubeGN.addGeom(cubeGeom) return cubeGN", "+ 25) bossTrack = Track( (0.5, Sequence( Func(self.clearChat), Func(camera.reparentTo, render),", "DistributedBossCog.DistributedBossCog.exitElevator(self) self.witnessToon.removeActive() def enterIntroduction(self): self.notify.debug('----- enterIntroduction') self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.stopAnimate() self.__hideWitnessToon()", "= TTLocalizer.WitnessToonCongratulations epSpeech = self.__talkAboutPromotion(epSpeech) bossTrack = Sequence(Func(self.witnessToon.animFSM.request, 'neutral'), Func(self.witnessToon.setLocalPageChat,", "8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.__doWitnessPrepareBattleThreeChat)) return movie def", "bossDoneEventName = self.uniqueName('DestroyedBoss') self.ignore(bossDoneEventName) taskMgr.remove(self.uniqueName('StandUp')) self.ignore('enterWitnessStand') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.ignore('begin-pie')", "/ (ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage) tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt else:", "insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesBNodePath = self.axle.attachNewNode(insidesBNode) self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesBNodePath.stash() target", "* t + 0.5) time = 5.0 - 4.0 *", "import * from direct.showbase.PythonUtil import Functor from direct.showbase.PythonUtil import StackTrace", "ActorInterval(self, 'Ff_speech', duration=7, loop=1)) track.append(bossAnimTrack) attackToons = TTLocalizer.BossCogAttackToons dialogTrack =", "= gearModel.instanceTo(node) angle = (float(i) / (numGears - 1) -", "0), Wait(3), Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180,", "movie def countToonJurors(self): self.numToonJurorsSeated = 0 for key in self.chairs.keys():", "self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.happy = 0 self.raised =", "self.baseColStashed: self.notify.debug('stashBaseCol') self.baseTopCol.stash() self.baseSideCol.stash() self.baseColStashed = True def unstashBaseCol(self): if", "self.reflectedPodium = self.geom.find('**/Podium_Geo1_Refl') reflectedZ = self.reflectedPodium.getZ() if not self.debugPositions: self.reflectedPodium.setZ(reflectedZ)", "self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) self.__cleanupStrafe() self.__cleanupJuryBox() render.clearTag('pieCode') self.targetNodePath.detachNode() self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = None", "self.notify.debug('----- d_hitBossInsides') self.sendUpdate('hitBossInsides', []) def d_hitDefensePan(self): self.notify.debug('----- d_hitDefensePan') self.sendUpdate('hitDefensePan', [])", "else: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosA) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosA)", "class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss') debugPositions = False def", "return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwoMovie')) def toNeutralMode(self): if self.cr:", "self.scaleNodePath.wrtReparentTo(self.geom) self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol') oldBitMask = self.baseHighCol.getCollideMask() newBitMask = oldBitMask", "self.makeScaleReflectDamage() self.bossHealthBar.update(self.bossMaxDamage - bossDamage, self.bossMaxDamage) def getBossDamage(self): self.notify.debug('----- getBossDamage') now", "localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH,", "0, -12.645) self.reflectedJuryBox.setPos(-30, 0, 0) curPos = self.juryBox.getPos() endingAbsPos =", "base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __doneReward(self): self.notify.debug('----- __doneReward') self.doneBarrier('Reward') self.toWalkMode()", "__doneReward(self): self.notify.debug('----- __doneReward') self.doneBarrier('Reward') self.toWalkMode() def exitReward(self): self.notify.debug('----- exitReward') intervalName", "0.25) self.defensePanNodePath = NodePath('defensePan') self.defensePanNodePath.attachNewNode(defensePanGeom) self.defensePanNodePath.setPos(0, -2, 0) self.defensePanNodePath.reparentTo(self.beamNodePath) defenseTube", "def loadScaleOld(self): startingTilt = 0 self.scaleNodePath = NodePath('injusticeScale') beamGeom =", "'ToonJuror': if chair.toonJurorIndex == cannonIndex: retVal += 1 return retVal", "makeVictoryMovie(self): myFromPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) myToPos = Point3(myFromPos[0], myFromPos[1]", "ToontownGlobals.LawbotBossBattleOnePosHpr[2]) if self.arenaSide: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosB) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB) p3Pos", "= 1.0): gFormat = GeomVertexFormat.getV3n3cpt2() myVertexData = GeomVertexData('holds my vertices',", "= globalClock.getFrameTime() elapsed = now - self.recoverStartTime return max(self.bossDamage -", "= Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0,", "def __continueVictory(self): self.notify.debug('----- __continueVictory') self.stopAnimate() self.doneBarrier('Victory') def exitVictory(self): self.notify.debug('----- exitVictory')", "rollThroughDoor = self.rollBossToPoint(fromPos=myFromPos, fromHpr=None, toPos=myToPos, toHpr=None, reverse=0) rollTrack = Sequence(", "bossTrack = Sequence(Func(self.witnessToon.animFSM.request, 'neutral'), Func(self.witnessToon.setLocalPageChat, epSpeech, 0)) return bossTrack def", "if lawyerDoId == lawyer.doId: lawyer.sendUpdate('hitByToon', []) def __finalPieSplat(self, toon, pieCode):", "= CollisionNode('collisions') newCollideMask = BitMask32(0) planes = [] collList =", "1 self.notify.debug('done with positionToons') def __makePrepareBattleTwoMovie(self): chatString = TTLocalizer.WitnessToonPrepareBattleTwo %", "__howToThrowPies(self, task): self.notify.debug('----- __howToThrowPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies) def __foundPieButton(self): self.everThrownPie = 1", "unloadEnvironment(self): self.notify.debug('----- unloadEnvironment') DistributedBossCog.DistributedBossCog.unloadEnvironment(self) self.geom.removeNode() del self.geom def __loadMopaths(self): self.notify.debug('-----", "% (render.getScale(), battleNode.getScale())) myCurPos = self.getPos() self.notify.debug('myCurPos = %s' %", "%s ' % prosecutionLocatorPos) self.notify.debug('prosecutionLocatorRelPos = %s ' % prosecutionLocatorRelPos)", "self.numToonJurorsSeated) self.countToonJurors() self.notify.debug('after calling self.countToonJurors, numToonJurorsSeated=%d' % self.numToonJurorsSeated) if self.numToonJurorsSeated", "if hasLocalToon: seq += [Func(self.show), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr,", "in itemsToHide: stuffToHide = self.geom.find('**/%s' % str) if not stuffToHide.isEmpty():", "Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.bossHealthBar.deinitialize() base.playMusic(self.battleThreeMusic, looping=1, volume=0.9,", "loader.loadModel('phase_9/models/char/bossCog-gearCollide') disk.find('**/+CollisionNode').setName('BossZap') disk.reparentTo(self.pelvis) disk.setZ(0.8) self.loadEnvironment() self.__makeWitnessToon() self.__loadMopaths() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu() if OneBossCog", "if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel: newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)] if newCogSuitLevel ==", "place.setState('waitForBattle') def makeToonsWait(self): self.notify.debug('makeToonsWait') for toonId in self.involvedToons: toon =", "def cleanupAttacks(self): self.notify.debug('----- cleanupAttacks') self.__cleanupStrafe() def __cleanupStrafe(self): self.notify.debug('----- __cleanupStrage') if", "loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg') self.strafeSfx = [] for i in xrange(10): self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg')) render.setTag('pieCode',", "localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0)] seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam, CFSpeech)) seq.append(Wait(5.0)) seq.append(Func(self.clearChat))", "ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5]) bossTrack = Sequence() self.notify.debug('calling", "= Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosB) else: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosA) topRampTurnPos", "self.bonusWeight = 0 self.numJurorsLocalToonSeated = 0 self.cannonIndex = -1 return", "self.beamLocator = self.scaleNodePath.find('**/StandLocator1') beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() negBeamLocatorPos", "self.strafeInterval: self.strafeInterval.finish() self.strafeInterval = None return def __cleanupJuryBox(self): self.notify.debug('----- __cleanupJuryBox')", "self.notify.debug('----- enterPrepareBattleTwo') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) self.__showWitnessToon() prepareBattleTwoMovie =", "pieCode): if pieCode == ToontownGlobals.PieCodeBossInsides: if toon == localAvatar: self.d_hitBossInsides()", "self.unstickBoss() def exitBattleThree(self): self.notify.debug('----- exitBattleThree') DistributedBossCog.DistributedBossCog.exitBattleThree(self) NametagGlobals.setMasterArrowsOn(1) bossDoneEventName = self.uniqueName('DestroyedBoss')", "- fromPos) distance = vector.length() time = distance / (ToontownGlobals.SuitWalkSpeed", "= rollTrack.getDuration() self.notify.debug('rollTrackDuration = %f' % rollTrackDuration) doorStartPos = self.door3.getPos()", "gotError = True elif not self.state == 'BattleThree': self.notify.warning('returning from", "= [] self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers) def __gotLawyers(self, lawyers):", "self.juryTimer.destroy() del self.juryTimer if self.bonusTimer: self.bonusTimer.destroy() del self.bonusTimer localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() if", "if diffSettings[4]: newWeight, self.bonusWeight, self.numJurorsLocalToonSeated = self.calculateWeightOfToon(base.localAvatar.doId) if self.bonusWeight >", "self.witnessToonOnstage: self.witnessToon.addActive() self.witnessToon.reparentTo(self.geom) seatCenter = self.realWitnessStand.find('**/witnessStandSeatEdge') center = seatCenter.getPos() self.notify.debug('center", "self.debugPositions: self.reflectedJuryBox.setPos(newReflectedPos) if not self.reflectedJuryBox.isEmpty(): if self.debugPositions: self.reflectedJuryBox.show() self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() +", "stuffToHide.wrtReparentTo(self.mainDoor) else: self.notify.debug('not found %s' % stuffToHide) self.reflectedMainDoor = self.geom.find('**/interiorrefl/CR3_Door')", "= self.scaleNodePath.find('**/ProsecutionCol') self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) self.standNodePath = self.scaleNodePath.find('**/scaleStand') self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.defenseLocator =", "collision solid: %s' % repr(solid)) newCollisionNode.addSolid(plane) newCollisionNode.setIntoCollideMask(newCollideMask) threshold = 0.1", "self.ignore('localPieSplat') self.ignore('outOfPies') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.setDizzy(0) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop()", "self.__showWitnessToon() diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage, self.bossMaxDamage) if diffSettings[4]:", "VBase3(h, 0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)), Func(node.detachNode))) seq", "hideBonusTimer(self): if self.bonusTimer: self.bonusTimer.hide() def enteredBonusState(self): self.witnessToon.clearChat() text = TTLocalizer.WitnessToonBonus", "for key in self.chairs.keys(): chair = self.chairs[key] if chair.state ==", "base.cr.doId2do.get(toonId) if toon: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] % toon.getName() else: chatString", "self.doneBarrier('RollToBattleThree') def exitRollToBattleThree(self): self.notify.debug('----- exitRollToBattleThree') self.unstickBoss() intervalName = 'RollToBattleThree' self.clearInterval(intervalName)", "speech): if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel: newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)] if newCogSuitLevel", "* t spread = 60 * math.pi / 180.0 if", "enterOff') DistributedBossCog.DistributedBossCog.enterOff(self) if self.witnessToon: self.witnessToon.clearChat() def enterWaitForToons(self): self.notify.debug('----- enterWaitForToons') DistributedBossCog.DistributedBossCog.enterWaitForToons(self)", "self.beamNodePath.reparentTo(self.scaleNodePath) defensePanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 0,", "model): newCollisionNode = CollisionNode('collisions') newCollideMask = BitMask32(0) planes = []", "index += 1 self.notify.debug('done with positionToons') def __makePrepareBattleTwoMovie(self): chatString =", "Sequence(name=name) seq += [Wait(0.0)] if hasLocalToon: seq += [Func(self.show), Func(camera.reparentTo,", "self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers) def __gotLawyers(self, lawyers): self.lawyerRequest = None self.lawyers =", "speech += TTLocalizer.WitnessToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1) return speech def", "if self.witnessToon: self.witnessToon.removeActive() self.witnessToon.delete() self.witnessToon = None return def __showWitnessToon(self):", "battlePos, battleHpr, 0) self.makeToonsWait() finalPodiumPos = Point3(self.podium.getX(), self.podium.getY(), self.podium.getZ() +", "* elapsed / 60.0, 0) def __recoverBossDamage(self, task): self.notify.debug('----- __recoverBossDamage')", "finalReflectedPodiumPos), Func(self.stashBoss), self.posInterval(5.0, battlePos), Func(taskMgr.doMethodLater, 0.01, self.unstashBoss, 'unstashBoss')), name=self.uniqueName('BattleTwoMovie')) def", "base.playMusic(self.promotionMusic, looping=1, volume=0.9) if not self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty():", "None or plane.compareTo(lastPlane, threshold) != 0: cp = CollisionPlane(plane) newCollisionNode.addSolid(cp)", "self.notify.debug('----- __loadMopaths') self.toonsEnterA = Mopath.Mopath() self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA') self.toonsEnterA.fFaceForward = 1 self.toonsEnterA.timeScale", "= CollisionTube(0, -1, 4, 0, -1, 9, 3.5) targetNode =", "vertexWriter = GeomVertexWriter(myVertexData, 'vertex') normalWriter = GeomVertexWriter(myVertexData, 'normal') colorWriter =", "juryResult = TTLocalizer.WitnessToonOneJuror elif self.numToonJurorsSeated == 12: juryResult = TTLocalizer.WitnessToonAllJurors", "self.setDizzy(1) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def exitNearVictory(self): self.notify.debug('----- exitNearVictory') self.ignore('pieSplat')", "bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwoMovie')) def toNeutralMode(self): if self.cr: place =", "isOpen): if self.insidesANodePath: if isOpen: self.insidesANodePath.unstash() else: self.insidesANodePath.stash() def doorBCallback(self,", "TTLocalizer.LawbotBossTempIntro0, CFSpeech)), (5.6, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1, CFSpeech)), (12, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2, CFSpeech)),", "%d has weight of %d' % (toonId, newWeight)) return (newWeight,", "import * from direct.interval.IntervalGlobal import * from toontown.battle.BattleProps import *", "del self.bonusTimer localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() if OneBossCog == self: OneBossCog = None", "colNode = self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision') colNode.setName('WitnessStand') def loadScale(self): self.useProgrammerScale = base.config.GetBool('want-injustice-scale-debug', 0)", "return def enterNearVictory(self): self.cleanupIntervals() self.reparentTo(render) self.setPos(*ToontownGlobals.LawbotBossDeathPos) self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr) self.clearChat() self.releaseToons(finalBattle=1) self.accept('pieSplat',", "if self.bonusWeight == 1: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty) else: juryWeightBonus =", "0, 0, 1.0) self.prosecutionPanNodePath = NodePath('prosecutionPan') self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom) self.prosecutionPanNodePath.setPos(0, 2, 0)", "self.clearInterval(intervalName) self.unstash() self.rewardPanel.destroy() del self.rewardPanel self.battleThreeMusicTime = 0 self.battleThreeMusic.stop() def", "__pieSplat(self, toon, pieCode): if pieCode == ToontownGlobals.PieCodeBossInsides: if toon ==", "self.juryBox.removeNode() return def doStrafe(self, side, direction): gearRoot = self.rotateNode.attachNewNode('gearRoot') if", "pass return def exitBattleOne(self): self.notify.debug('----- exitBattleOne') DistributedBossCog.DistributedBossCog.exitBattleOne(self) def stashBoss(self): self.stash()", "0): self.notify.debug('----- __onToBattleTwo') self.doneBarrier('PrepareBattleTwo') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleTwo(self): self.notify.debug('-----", "y2, z2) for index in xrange(8): normalWriter.addData3f(1.0, 1.0, 1.0) colorWriter.addData4f(r,", "index in xrange(8): normalWriter.addData3f(1.0, 1.0, 1.0) colorWriter.addData4f(r, g, b, a)", "toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.show() def", "self.witnessToon.setLocalPageChat(trialSpeech, 0) def __makePrepareBattleThreeMovie(self): movie = Sequence(Func(camera.reparentTo, render), Func(camera.setPos, -15,", "= Point3(*ToontownGlobals.LawbotBossP3PosB) else: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosA) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA) p3Pos", "self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin) panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath, origin) self.notify.debug('prosecutionPanPos = %s' %", "entry.getIntoNodePath() names = lawyerCol.getName().split('-') lawyerDoId = int(names[1]) for lawyer in", "ival def toonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds,", "self.uniqueName('WaitingMessage')) def exitPrepareBattleTwo(self): self.notify.debug('----- exitPrepareBattleTwo') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') self.__clearOnscreenMessage() self.stingMusic.stop()", "+= weightBonusText self.witnessToon.setLocalPageChat(trialSpeech, 0) def __makePrepareBattleThreeMovie(self): movie = Sequence(Func(camera.reparentTo, render),", "self.bossDamage = 0 self.attackCode = None self.attackAvId = 0 self.recoverRate", "doorStartPos[1], doorStartPos[2] + 25) bossTrack = Track( (0.5, Sequence( Func(self.clearChat),", "center) radians = angle * math.pi / 180.0 x =", "self.prosecutionPanNodePath.setPos(0, 2, 0) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) prosecutionTube = CollisionTube(0, 0, -0.5, 0,", "0, 0, 3) intervalName = 'EpilogueMovie' seq = Sequence(self.makeEpilogueMovie(), name=intervalName)", "battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) toon.setPosHpr(battleNode, pos[0], pos[1], pos[2],", "% battleNode.getParent()) self.notify.debug('battleNode.parent().getPos() = %s' % battleNode.getParent().getPos()) bnParent = battleNode.getParent()", "origin = Point3(0, 0, 0) prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin) panRenderPos", "camVictory), Func(self.__doneReward)) intervalName = 'RewardMovie' delayDeletes = [] for toonId", "= battleNode.getPos() battleNode.wrtReparentTo(bnParent) toon.setPosHpr(battleNode, pos[0], pos[1], pos[2], h, 0, 0)", "= TTLocalizer.WitnessToonNoJuror elif self.numToonJurorsSeated == 1: juryResult = TTLocalizer.WitnessToonOneJuror elif", "localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight) def __doneBattleThree(self): self.notify.debug('----- __doneBattleThree') self.setState('NearVictory') self.unstickBoss() def exitBattleThree(self):", "-0.125, 0, 1.0, 0, 1.0) self.beamNodePath = NodePath('scaleBeam') self.beamNodePath.attachNewNode(beamGeom) self.beamNodePath.setPos(0,", "suit.fsm.request('neutral') suit.loop('neutral') suit.setBossCogId(self.doId) return def setBossDamage(self, bossDamage, recoverRate, timestamp): recoverStartTime", "tris.addVertex(3) tris.closePrimitive() tris.addVertex(3) tris.addVertex(5) tris.addVertex(7) tris.closePrimitive() tris.addVertex(0) tris.addVertex(4) tris.addVertex(5) tris.closePrimitive()", "CogDisguiseGlobals from toontown.building import ElevatorConstants from toontown.toonbase import ToontownTimer OneBossCog", "= DirectLabel(text=text, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0,", "'neutral'), Func(self.witnessToon.setLocalPageChat, epSpeech, 0)) return bossTrack def makeDefeatMovie(self): bossTrack =", "movie = Sequence(Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180,", "direct.directnotify import DirectNotifyGlobal from toontown.toonbase import ToontownGlobals from toontown.toonbase import", "= self.prosecutionPanNodePath.getPos() origin = Point3(0, 0, 0) prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath,", "fromPos) distance = vector.length() time = distance / (ToontownGlobals.SuitWalkSpeed *", "self.__finalPieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.happy = 0 self.raised", "tris.addVertex(7) tris.closePrimitive() tris.addVertex(7) tris.addVertex(5) tris.addVertex(4) tris.closePrimitive() cubeGeom = Geom(myVertexData) cubeGeom.addPrimitive(tris)", "ToontownGlobals.PieCodeBossInsides: if toon == localAvatar: self.d_hitBossInsides() self.flashRed() elif pieCode ==", "= None self.everThrownPie = 0 self.battleThreeMusicTime = 0 self.insidesANodePath =", "import Rope from toontown.distributed import DelayDelete from toontown.battle import MovieToonVictory", "Parallel( rollTrack, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3, CFSpeech), self.door3.posInterval(2, doorEndPos, startPos=doorStartPos))), (13.1, Sequence(self.door3.posInterval(1,", "= defaultWeight + bonusWeight self.notify.debug('toon %d has weight of %d'", "self.cannons = {} self.useCannons = 1 self.juryBoxIval = None self.juryTimer", "looping=1, volume=0.9) self.startJuryBoxMoving() for index in xrange(len(self.cannons)): cannon = self.cannons[index]", "exitVictory(self): self.notify.debug('----- exitVictory') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop()", "newBitMask = oldBitMask & ~ToontownGlobals.PieBitmask newBitMask = newBitMask & ~ToontownGlobals.CameraBitmask", "has no avatarDoId tag.' % repr(entry.getIntoNodePath())) return doId = int(avatarDoId)", "= self.geom.find('**/Door_1') if not self.mainDoor.isEmpty(): itemsToHide = ['interior/Door_1'] for str", "self.reparentTo(render) self.clearChat() self.releaseToons(finalBattle=1) self.happy = 0 self.raised = 0 self.forward", "== ToontownGlobals.PieCodeProsecutionPan: self.flashGreen() if toon == localAvatar: pass elif pieCode", "gearModel.instanceTo(node) angle = (float(i) / (numGears - 1) - 0.5)", "enterEpilogue(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() self.__showWitnessToon() self.witnessToon.reparentTo(render) self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr)", "< 5: points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] else: points =", "d_hitDefensePan(self): self.notify.debug('----- d_hitDefensePan') self.sendUpdate('hitDefensePan', []) def d_hitProsecutionPan(self): self.notify.debug('----- d_hitProsecutionPan') self.sendUpdate('hitProsecutionPan',", "self.accept('doneChatPage', self.__doneEpilogue) base.playMusic(self.epilogueMusic, looping=1, volume=0.9) def __doneEpilogue(self, elapsedTime = 0):", "cannonIndex >= 0: diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: numJurors =", "pos[2], h, 0, 0) def __outOfPies(self): self.notify.debug('----- outOfPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence) taskMgr.doMethodLater(20,", "localAvatar: self.d_hitBossInsides() self.flashRed() elif pieCode == ToontownGlobals.PieCodeBossCog: if toon ==", "def __localPieSplat(self, pieCode, entry): if pieCode == ToontownGlobals.PieCodeLawyer: self.__lawyerGotHit(entry) if", "1 intervalName = 'VictoryMovie' seq = Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName) seq.start()", "tris.addVertex(5) tris.addVertex(3) tris.closePrimitive() tris.addVertex(3) tris.addVertex(5) tris.addVertex(7) tris.closePrimitive() tris.addVertex(0) tris.addVertex(4) tris.addVertex(5)", "0: percentDamaged = diffDamage / (ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage) tilt =", "self.d_hitToon(doId) def __lawyerGotHit(self, entry): lawyerCol = entry.getIntoNodePath() names = lawyerCol.getName().split('-')", "name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueDefeat(self):", "self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.show() def __arrangeToonsAroundWitnessToon(self): radius", "self.toonsEnterB.reset() def enterOff(self): self.notify.debug('----- enterOff') DistributedBossCog.DistributedBossCog.enterOff(self) if self.witnessToon: self.witnessToon.clearChat() def", "def d_hitBossInsides(self): self.notify.debug('----- d_hitBossInsides') self.sendUpdate('hitBossInsides', []) def d_hitDefensePan(self): self.notify.debug('----- d_hitDefensePan')", "enterBattleThree(self): DistributedBossCog.DistributedBossCog.enterBattleThree(self) self.scaleNodePath.unstash() localAvatar.setPos(-3, 0, 0) base.localAvatar.orbitalCamera.start() self.clearChat() self.witnessToon.clearChat() self.reparentTo(render)", "toon: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] % toon.getName() else: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex]", "ival.start() self.storeInterval(ival, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __doneReward(self): self.notify.debug('-----", "stuffToHide) self.geom.reparentTo(render) self.loadWitnessStand() self.loadScale() self.scaleNodePath.stash() self.loadJuryBox() self.loadPodium() ug = self.geom.find('**/Reflections')", "self.evFloor.reparentTo(self.geom) self.evFloor.setName('floor') plane = CollisionPlane(Plane(Vec3(0, 0, 1), Point3(0, 0, -50)))", "not hasattr(self, 'state'): self.notify.warning('returning from setTaunt, no attr state') gotError", "tris.addVertex(2) tris.addVertex(4) tris.closePrimitive() tris.addVertex(2) tris.addVertex(6) tris.addVertex(4) tris.closePrimitive() tris.addVertex(1) tris.addVertex(5) tris.addVertex(3)", "bossTrack = Track((0.0, Sequence(Func(self.clearChat), Func(self.reverseHead), ActorInterval(self, 'Ff_speech'))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins,", "/ (ToontownGlobals.LawbotBossInitialDamage - 0) tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt self.setScaleTilt(tilt)", "self.promotionMusic = base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.betweenBattleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg') self.battleTwoMusic = base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg') floor", "unstashBaseCol(self): if self.baseColStashed: self.notify.debug('unstashBaseCol') self.baseTopCol.unstash() self.baseSideCol.unstash() self.baseColStashed = False def", "None return def __cleanupJuryBox(self): self.notify.debug('----- __cleanupJuryBox') if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval", "= %s' % bnWorldPos) pos = render.getRelativePoint(battleNode, pos) self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint", "self.notify.debug('prosecutionLocatorPos = %s' % prosecutionLocPos) self.prosecutionPanNodePath.setPos(prosecutionLocPos) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) self.beamLocator = self.scaleNodePath.find('**/StandLocator1')", "toPos)) def __makeRollToBattleTwoMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2]) if self.arenaSide:", "def enterElevator(self): self.notify.debug('----- enterElevator') DistributedBossCog.DistributedBossCog.enterElevator(self) self.witnessToon.removeActive() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.happy =", "for str in itemsToHide: stuffToHide = self.geom.find('**/%s' % str) if", "panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath, origin) self.notify.debug('prosecutionPanPos = %s' % prosecutionPanPos) self.notify.debug('prosecutionPanRelPos", "def __continueDefeat(self): self.notify.debug('----- __continueDefeat') self.stopAnimate() self.doneBarrier('Defeat') def exitDefeat(self): self.notify.debug('----- exitDefeat')", "ToontownGlobals.LawbotBossBattleThreePosHpr[5]) bossTrack = Sequence() myInterval = camera.posHprInterval(8, Point3(-22, -100, 35),", "DistributedBossCog.DistributedBossCog.unloadEnvironment(self) self.geom.removeNode() del self.geom def __loadMopaths(self): self.notify.debug('----- __loadMopaths') self.toonsEnterA =", "self.witnessToon.setChatAbsolute(text, CFSpeech | CFTimeout) base.playSfx(self.toonUpSfx) if not self.bonusTimer: self.bonusTimer =", "self.releaseToons(finalBattle=1) self.__showWitnessToon() if not self.useCannons: self.toonsToBattlePosition(self.toonsA, self.battleANode) self.toonsToBattlePosition(self.toonsB, self.battleBNode) base.playMusic(self.battleTwoMusic,", "int(4 + 6 * t + 0.5) time = 5.0", "0) prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin) panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath, origin) self.notify.debug('prosecutionPanPos", "normalWriter = GeomVertexWriter(myVertexData, 'normal') colorWriter = GeomVertexWriter(myVertexData, 'color') texWriter =", "= self.lawyers[i] suit.fsm.request('neutral') suit.loop('neutral') suit.setBossCogId(self.doId) return def setBossDamage(self, bossDamage, recoverRate,", "self.battleTwoMusic.stop() self.battleThreeMusic.stop() self.epilogueMusic.stop() if self.juryTimer: self.juryTimer.destroy() del self.juryTimer if self.bonusTimer:", "enterReward(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() panelName = self.uniqueName('reward')", "blendType='noBlend')), Func(toon.suit.loop, 'neutral')) track.append(ival) delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion')) def __walkSuitToPoint(self, node, fromPos,", "self.storeInterval(seq, intervalName) self.acceptOnce('doneChatPage', self.__showCannonsAppearing) base.playMusic(self.stingMusic, looping=0, volume=1.0) def __showCannonsAppearing(self, elapsedTime", "looping=1, volume=0.9) self.__showWitnessToon() diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage, self.bossMaxDamage)", "self.unstashBaseCol() else: self.stashBaseCol() def unloadEnvironment(self): self.notify.debug('----- unloadEnvironment') DistributedBossCog.DistributedBossCog.unloadEnvironment(self) self.geom.removeNode() del", "saySomething(self, chatString): intervalName = 'ChiefJusticeTaunt' seq = Sequence(name=intervalName) seq.append(Func(self.setChatAbsolute, chatString,", "if self.bossDamage >= self.bossMaxDamage: self.notify.debug('finish the movie then transition to", "= self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode) self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) standGeom = self.createBlock(0.25, 0.25, 0, -0.25,", "base.playMusic(self.stingMusic, looping=0, volume=1.0) def __showCannonsAppearing(self, elapsedTime = 0): allCannonsAppear =", "self.doneBarrier('PrepareBattleThree') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleThree(self): self.notify.debug('----- exitPrepareBattleThree') self.show() taskMgr.remove(self.uniqueName('WaitingMessage'))", "from toontown.building import ElevatorConstants from toontown.toonbase import ToontownTimer OneBossCog =", "defenseCollNode.addSolid(defenseTube) self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode) self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) prosecutionPanGeom = self.createBlock(0.5, 0.5,", "if toon == localAvatar: self.d_hitBossInsides() self.flashRed() elif pieCode == ToontownGlobals.PieCodeBossCog:", "(ToontownGlobals.MaxCogSuitLevel + 1) return speech def __positionToonsInFrontOfCannons(self): self.notify.debug('__positionToonsInFrontOfCannons') index =", "1.8) return Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos), node.posInterval(time, toPos)) def __makeRollToBattleTwoMovie(self):", "vertexWriter.addData3f(x1, y2, z1) vertexWriter.addData3f(x2, y2, z1) vertexWriter.addData3f(x1, y1, z2) vertexWriter.addData3f(x2,", "0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)), Func(node.detachNode))) seq = Sequence(Func(door.request,", "import BattleBase from direct.directutil import Mopath from direct.showutil import Rope", "= 1 self.juryBoxIval = None self.juryTimer = None self.witnessToon =", "toon.setPosHpr(battleNode, pos[0], pos[1], pos[2], h, 0, 0) self.notify.debug('new toon pos", "[] self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers) def __gotLawyers(self, lawyers): self.lawyerRequest", "exitBattleTwo(self): self.notify.debug('----- exitBattleTwo') intervalName = self.uniqueName('Drop') self.clearInterval(intervalName) self.cleanupBattles() self.battleTwoMusic.stop() localAvatar.inventory.setBattleCreditMultiplier(1)", "self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.clearChat() self.loop('Ff_neutral') self.notify.debug('self.battleANode = %s' % self.battleANode) self.__hideWitnessToon() if", "battleNode.getPos() battleNode.wrtReparentTo(bnParent) toon.setPosHpr(battleNode, pos[0], pos[1], pos[2], h, 0, 0) self.notify.debug('new", "= Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0, 90,", "1.0): gFormat = GeomVertexFormat.getV3n3cpt2() myVertexData = GeomVertexData('holds my vertices', gFormat,", "self.standNodePath = self.scaleNodePath.find('**/scaleStand') self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator') defenseLocBounds = self.defenseLocator.getBounds()", "battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) self.notify.debug('battle node world pos =", "return self.juryBox def startJuryBoxMoving(self): if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None", "0), startPos=Point3(-22, -90, 35), startHpr=Point3(-10, -13, 0), blendType='easeInOut') chatTrack =", "self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 0, 0, 1.0, 0.25)", "solid: %s' % repr(solid)) newCollisionNode.addSolid(plane) newCollisionNode.setIntoCollideMask(newCollideMask) threshold = 0.1 planes.sort(lambda", "plane in planes: if lastPlane == None or plane.compareTo(lastPlane, threshold)", "= StackTrace() print st return chatString = TTLocalizer.LawbotBossTaunts[1] if tauntIndex", "if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.unstash() if not self.elevatorEntrance.isEmpty(): pass def enterBattleOne(self):", "'walk'), toon.posInterval(3, pos), Func(toon.setPlayRate, 1, 'walk'), Func(toon.loop, 'neutral'))) return ival", "gearRoot.setPos(0, -7, 3) gearRoot.setHpr(180, 0, 0) door = self.doorA else:", "__doneEpilogue') intervalName = 'EpilogueMovieToonAnim' self.clearInterval(intervalName) track = Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone))) self.storeInterval(track,", "self.bonusTimer.hide() def enteredBonusState(self): self.witnessToon.clearChat() text = TTLocalizer.WitnessToonBonus % (ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration)", "'VictoryMovie' seq = Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.bossHealthBar.deinitialize()", "2, 0.125, -0.25, -2, -0.125, 0, 1.0, 0, 1.0) self.beamNodePath", "% self.numToonJurorsSeated) return def cleanupPanFlash(self): if self.panFlashInterval: self.panFlashInterval.finish() self.panFlashInterval =", "= ElevatorConstants.ELEVATOR_CJ self.gavels = {} self.chairs = {} self.cannons =", "self.__cleanupWitnessToon() self.__unloadMopaths() self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) self.__cleanupStrafe() self.__cleanupJuryBox() render.clearTag('pieCode') self.targetNodePath.detachNode() self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest", "if not self.everThrownPie: taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice')) def __pieSplat(self, toon, pieCode):", "self.witnessToonOnstage = 0 def __hideToons(self): for toonId in self.involvedToons: toon", "intervalName = 'prepareBattleThree' seq = Sequence(prepareBattleThreeMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName)", "def stashBaseCol(self): if not self.baseColStashed: self.notify.debug('stashBaseCol') self.baseTopCol.stash() self.baseSideCol.stash() self.baseColStashed =", "toon: toon.wrtReparentTo(render) pos, h = points[i] if i > 3:", "Sequence() self.notify.debug('calling setPosHpr') myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10,", "toon): stateName = self.state if stateName == 'Elevator': self.placeToonInElevator(toon) def", "60 * math.pi / 180.0 if direction == 1: spread", "self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterReward(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons()", "loadEnvironment') DistributedBossCog.DistributedBossCog.loadEnvironment(self) self.geom = loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3') self.geom.setPos(0, 0, -71.601) self.geom.setScale(1) self.elevatorEntrance", "= self.baseHighCol.getCollideMask() newBitMask = oldBitMask & ~ToontownGlobals.PieBitmask newBitMask = newBitMask", "self.notify.debug('----- __howToThrowPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies) def __foundPieButton(self): self.everThrownPie = 1 self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice'))", "Point3(-22, -100, 35), Point3(-10, -13, 0), startPos=Point3(-22, -90, 35), startHpr=Point3(-10,", "not self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() def exitIntroduction(self): self.notify.debug('-----", "= TTLocalizer.BossCogAttackToons dialogTrack = Track( (0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0, CFSpeech)), (5.6,", "return movie def countToonJurors(self): self.numToonJurorsSeated = 0 for key in", "self.numToonJurorsSeated juryResult += '\\x07' trialSpeech = juryResult trialSpeech += TTLocalizer.WitnessToonPrepareBattleThree", "if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1,", "(22, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4, CFSpeech)), (24, Sequence( Func(self.clearChat), self.loseCogSuits(self.toonsA + self.toonsB,", "Sequence( self.toonNormalEyes(self.involvedToons), Func(self.loop, 'Ff_neutral'), Func(self.setChatAbsolute, attackToons, CFSpeech)))) track.append(dialogTrack) return Sequence(", "ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) startHpr = Point3(*ToontownGlobals.LawbotBossBattleThreeHpr) bottomPos = Point3(*ToontownGlobals.LawbotBossBottomPos) deathPos =", "Point3(-10, -13, 0), startPos=Point3(-22, -90, 35), startHpr=Point3(-10, -13, 0), blendType='easeInOut')", "if self.witnessToon: self.witnessToon.clearChat() def enterWaitForToons(self): self.notify.debug('----- enterWaitForToons') DistributedBossCog.DistributedBossCog.enterWaitForToons(self) self.geom.hide() self.witnessToon.removeActive()", "enterRollToBattleThree') self.reparentTo(render) self.stickBossToFloor() intervalName = 'RollToBattleThree' seq = Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree),", "self.__hideWitnessToon() if not self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() camera.reparentTo(self.elevatorModel)", "if not self.witnessToonOnstage: self.witnessToon.addActive() self.witnessToon.reparentTo(self.geom) seatCenter = self.realWitnessStand.find('**/witnessStandSeatEdge') center =", "seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) def __onToPrepareBattleThree(self): self.notify.debug('----- __onToPrepareBattleThree')", "Rope from toontown.distributed import DelayDelete from toontown.battle import MovieToonVictory from", "self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol') self.defenseHighCol.stash() self.defenseHighCol.setCollideMask(newBitMask) self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision') self.baseSideCol =", "seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1,", "CFSpeech | CFTimeout) base.playSfx(self.toonUpSfx) if not self.bonusTimer: self.bonusTimer = ToontownTimer.ToontownTimer()", "% prosecutionPanPos) self.notify.debug('prosecutionPanRelPos = %s' % prosecutionPanRelPos) self.notify.debug('panRenderPos = %s'", "ug.setBin('ground', -10) def loadJuryBox(self): self.juryBox = self.geom.find('**/JuryBox') juryBoxPos = self.juryBox.getPos()", "self.notify.debug('calling setPosHpr') myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13,", "localAvatar: self.d_hitBoss(self.panDamage) elif pieCode == ToontownGlobals.PieCodeProsecutionPan: self.flashGreen() if toon ==", "self.notify.debug('__enterDefenseCol') def __enterProsecutionCol(self, entry): self.notify.debug('__enterProsecutionCol') def makeVictoryMovie(self): myFromPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0],", "enterBattleOne(self): self.notify.debug('----- LawbotBoss.enterBattleOne ') DistributedBossCog.DistributedBossCog.enterBattleOne(self) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.clearChat() self.loop('Ff_neutral') self.notify.debug('self.battleANode", "%d but we have a toon =%d' % (index, toonId))", "newPos = juryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.juryBox.setPos(newPos) self.reflectedJuryBox", "= [] collList = model.findAllMatches('**/+CollisionNode') if not collList: collList =", "self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale) self.scaleNodePath.wrtReparentTo(self.geom) self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol') oldBitMask = self.baseHighCol.getCollideMask() newBitMask =", "self.makeToonsWait() finalPodiumPos = Point3(self.podium.getX(), self.podium.getY(), self.podium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) finalReflectedPodiumPos =", "= None self.panDamage = ToontownGlobals.LawbotBossDefensePanDamage if base.config.GetBool('lawbot-boss-cheat', 0): self.panDamage =", "self.raised = 1 self.forward = 1 self.doAnimate() self.__hideWitnessToon() if not", "toon.getPos(render) self.notify.debug('renderPos =%s' % renderPos) index += 1 self.notify.debug('done with", "def __outOfPies(self): self.notify.debug('----- outOfPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence) taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice')) def __howToGetPies(self,", "1 self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) def __touchedWitnessStand(self, entry): self.sendUpdate('touchWitnessStand', []) self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice'))", "= 'FlashPanBlue' self.defensePanNodePath.setColorScale(1, 1, 1, 1) seq = Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0,", "exitRollToBattleThree') self.unstickBoss() intervalName = 'RollToBattleThree' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleThree(self): self.notify.debug('-----", "self.ignore('outOfPies') self.ignore('begin-pie') self.ignore('enterDefenseCol') self.ignore('enterProsecutionCol') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) if self.bossDamageMovie: self.bossDamageMovie.finish()", "0) door = self.doorA else: gearRoot.setPos(0, 7, 3) door =", "< len(self.involvedToons): toonId = self.involvedToons[extraInfo] toon = base.cr.doId2do.get(toonId) if toon:", "self.setDisplayName(nameInfo) self.piesRestockSfx = loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg') self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg') self.evidenceHitSfx = loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg')", "self.pelvis.attachNewNode(shieldNode) disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide') disk.find('**/+CollisionNode').setName('BossZap') disk.reparentTo(self.pelvis) disk.setZ(0.8) self.loadEnvironment() self.__makeWitnessToon() self.__loadMopaths()", "-90, 35), startHpr=Point3(-10, -13, 0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1,", "chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr,", "self.notify.debug('----- __toonsToPromotionPosition') points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] for i in", "1.0, 1.0) colorWriter.addData4f(r, g, b, a) texWriter.addData2f(1.0, 1.0) tris =", "OneBossCog self.notify.debug('----- announceGenerate') DistributedBossCog.DistributedBossCog.announceGenerate(self) self.setName(TTLocalizer.LawbotBossName) nameInfo = TTLocalizer.BossCogNameWithDept % {'name':", "battleHpr, 0) self.makeToonsWait() finalPodiumPos = Point3(self.podium.getX(), self.podium.getY(), self.podium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2])", "= self.geom.find('**/Reflections') ug.setBin('ground', -10) def loadJuryBox(self): self.juryBox = self.geom.find('**/JuryBox') juryBoxPos", "beamBoundsCenter) beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() self.notify.debug('beamLocatorPos = %s'", "__makePrepareBattleTwoMovie(self): chatString = TTLocalizer.WitnessToonPrepareBattleTwo % ToontownGlobals.LawbotBossJurorsForBalancedScale movie = Sequence(Func(camera.reparentTo, self.witnessToon),", "if base.localAvatar.doId == toonId: self.cannonIndex = cannonIndex def numJurorsSeatedByCannon(self, cannonIndex):", "Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10, 0),", "if direction == 1: spread = -spread dist = 50", "= 90 - 15 * (i - center) radians =", "None self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None,", "self.ignore('doneChatPage') intervalName = 'PrepareBattleThree' self.clearInterval(intervalName) self.__clearOnscreenMessage() self.betweenBattleMusic.stop() def enterBattleThree(self): DistributedBossCog.DistributedBossCog.enterBattleThree(self)", "self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterDefeat(self): self.notify.debug('----- enterDefeat') self.cleanupIntervals() localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.reparentTo(render) self.clearChat()", "prosecutionPanRelPos) self.notify.debug('panRenderPos = %s' % panRenderPos) prosecutionLocatorPos = self.prosecutionLocator.getPos() prosecutionLocatorRelPos", "% self.defensePanNodePath.getPos()) self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator') prosecutionLocBounds = self.prosecutionLocator.getBounds() prosecutionLocPos =", "self.strafeInterval = None self.onscreenMessage = None self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage self.elevatorType", "self.reparentTo(render) def enterRollToBattleTwo(self): self.notify.debug('----- enterRollToBattleTwo') self.releaseToons(finalBattle=1) self.stashBoss() self.toonsToBattlePosition(self.involvedToons, self.battleANode) self.stickBossToFloor()", "- 1] else: points = list(BattleBase.BattleBase.toonPoints[3]) points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) - 5]) self.notify.debug('toonsToBattlePosition:", "self.notify.debug('done with positionToons') def __makePrepareBattleTwoMovie(self): chatString = TTLocalizer.WitnessToonPrepareBattleTwo % ToontownGlobals.LawbotBossJurorsForBalancedScale", "Func(self.__onToBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __onToBattleTwo(self, elapsedTime = 0):", "- ToontownGlobals.LawbotBossInitialDamage diffDamage *= 1.0 if diffDamage >= 0: percentDamaged", "intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) def __onToPrepareBattleThree(self): self.notify.debug('----- __onToPrepareBattleThree') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)", "= 1 intervalName = 'VictoryMovie' seq = Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName)", "Sequence(Func(camera.wrtReparentTo, render))), (9.6, Parallel( rollTrack, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3, CFSpeech), self.door3.posInterval(2, doorEndPos,", "3) self.beamNodePath.reparentTo(self.scaleNodePath) defensePanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2,", "if not stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.mainDoor) else: self.notify.debug('not", "= reflectedJuryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.reflectedJuryBox.setPos(newReflectedPos) if not", "1 bonusWeight = 0 newWeight = 1 cannonIndex = self.cannonIndex", "self.debugPositions: self.podium.setZ(newZ) self.reflectedPodium = self.geom.find('**/Podium_Geo1_Refl') reflectedZ = self.reflectedPodium.getZ() if not", "def cleanupPanFlash(self): if self.panFlashInterval: self.panFlashInterval.finish() self.panFlashInterval = None return def", "-2, -0.125, 0, 1.0, 0, 1.0) self.beamNodePath = NodePath('scaleBeam') self.beamNodePath.attachNewNode(beamGeom)", "= toon.getPos() self.placeToonInElevator(toon) toon.wrtReparentTo(render) ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'),", "self.insidesANodePath.stash() def doorBCallback(self, isOpen): if self.insidesBNodePath: if isOpen: self.insidesBNodePath.unstash() else:", "self.battleThreeMusic.stop() self.epilogueMusic.stop() if self.juryTimer: self.juryTimer.destroy() del self.juryTimer if self.bonusTimer: self.bonusTimer.destroy()", "self.sendUpdate('finalPieSplat', []) self.ignore('pieSplat') def cleanupAttacks(self): self.notify.debug('----- cleanupAttacks') self.__cleanupStrafe() def __cleanupStrafe(self):", "def __hideToons(self): for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if", "toon: self.notify.debug('toon = %s' % toon.getName()) toon.reparentTo(cannon.nodePath) toon.setPos(0, 8, 0)", "disk.reparentTo(self.pelvis) disk.setZ(0.8) self.loadEnvironment() self.__makeWitnessToon() self.__loadMopaths() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu() if OneBossCog != None:", "self.axle.attachNewNode(insidesBNode) self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesBNodePath.stash() target = CollisionTube(0, -1, 4, 0,", "Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0, 90, 20)), ParallelEndTogether(MopathInterval(mopath,", "prosecutionLocBounds = self.prosecutionLocator.getBounds() prosecutionLocPos = prosecutionLocBounds.getCenter() self.notify.debug('prosecutionLocatorPos = %s' %", "__cleanupStrafe(self): self.notify.debug('----- __cleanupStrage') if self.strafeInterval: self.strafeInterval.finish() self.strafeInterval = None return", "self.witnessToon.reparentTo(self.geom) seatCenter = self.realWitnessStand.find('**/witnessStandSeatEdge') center = seatCenter.getPos() self.notify.debug('center = %s'", "taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss') def __onToPrepareBattleTwo(self): self.notify.debug('----- __onToPrepareBattleTwo') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.doneBarrier('RollToBattleTwo')", "hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0) self.makeToonsWait() return Sequence(chatTrack,", "Func(self.__onToPrepareBattleThree), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) def __onToPrepareBattleThree(self):", "lastPlane = None for plane in planes: if lastPlane ==", "RewardPanel from toontown.toon import NPCToons from direct.task import Task import", "MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True) ival", "touchedGavelHandle(self, gavel, entry): attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr == '':", "'RollToBattleThree' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleThree(self): self.notify.debug('----- enterPrepareBattleThree') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons)", "int(avatarDoId) if doId != localAvatar.doId: self.d_hitToon(doId) def __lawyerGotHit(self, entry): lawyerCol", "0, -50))) planeNode = CollisionNode('dropPlane') planeNode.addSolid(plane) planeNode.setCollideMask(ToontownGlobals.PieBitmask) self.geom.attachNewNode(planeNode) self.door3 =", "__cleanupStrage') if self.strafeInterval: self.strafeInterval.finish() self.strafeInterval = None return def __cleanupJuryBox(self):", "Point3(*ToontownGlobals.LawbotBossDeathPos) self.setPosHpr(startPos, startHpr) bossTrack = Sequence() bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr", "taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') self.__clearOnscreenMessage() self.stingMusic.stop() def enterBattleTwo(self): self.notify.debug('----- enterBattleTwo') self.cleanupIntervals() mult", "Parallel(node.posInterval(1, Point3(x, y, 0), fluid=1), node.hprInterval(1, VBase3(h, 0, 0), fluid=1),", "= self.geom.find('**/WitnessStand') if not self.realWitnessStand.isEmpty(): pass self.reflectedWitnessStand = self.geom.find('**/Witnessstand_Geo_Reflect') if", "self.everThrownPie = 0 self.battleThreeMusicTime = 0 self.insidesANodePath = None self.insidesBNodePath", "self.cr.doId2do.get(toonId) if toon: toon.loop('neutral') def makeEndOfBattleMovie(self, hasLocalToon): name = self.uniqueName('Drop')", "Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)", "intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueDefeat(self): self.notify.debug('----- __continueDefeat') self.stopAnimate()", "lawyer in self.lawyers: if lawyerDoId == lawyer.doId: lawyer.sendUpdate('hitByToon', []) def", "[toonId]) def gotToon(self, toon): stateName = self.state if stateName ==", "+= '\\x07' trialSpeech += weightBonusText self.witnessToon.setLocalPageChat(trialSpeech, 0) def __makePrepareBattleThreeMovie(self): movie", "self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) self.accept('begin-pie', self.__foundPieButton) self.accept('enterDefenseCol', self.__enterDefenseCol) self.accept('enterProsecutionCol', self.__enterProsecutionCol) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)", "def __cleanupStrafe(self): self.notify.debug('----- __cleanupStrage') if self.strafeInterval: self.strafeInterval.finish() self.strafeInterval = None", "self.recoverStartTime return max(self.bossDamage - self.recoverRate * elapsed / 60.0, 0)", "self.reflectedJuryBox.isEmpty(): if self.debugPositions: self.reflectedJuryBox.show() self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) def loadPodium(self): self.podium", "seq = Sequence(prepareBattleTwoMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.acceptOnce('doneChatPage', self.__showCannonsAppearing) base.playMusic(self.stingMusic,", "= Point3(0, 0, 0) prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin) panRenderPos =", "self.insidesBNodePath.unstash() else: self.insidesBNodePath.stash() def __toonsToPromotionPosition(self, toonIds, battleNode): self.notify.debug('----- __toonsToPromotionPosition') points", "t + 0.5) time = 5.0 - 4.0 * t", "import FSM from direct.fsm import ClassicFSM from direct.fsm import State", "= newCollideMask | cn.getIntoCollideMask() for i in xrange(cn.getNumSolids()): solid =", "Sequence() myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0),", "def __foundPieButton(self): self.everThrownPie = 1 self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) def __touchedWitnessStand(self, entry):", "2.0, 5.0), Point3(4.0, 2.0, 5.0), Point3(4.0, 2.0, 0.5), Point3(-4.0, 2.0,", "45, 25), Func(camera.setHpr, 0, 10, 0))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1, CFSpeech)),", "Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0, CFSpeech)), (5.6, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1, CFSpeech)), (12, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2,", "'prepareBattleTwo' seq = Sequence(prepareBattleTwoMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.acceptOnce('doneChatPage', self.__showCannonsAppearing)", "def saySomething(self, chatString): intervalName = 'ChiefJusticeTaunt' seq = Sequence(name=intervalName) seq.append(Func(self.setChatAbsolute,", "self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr) def __cleanupWitnessToon(self): self.__hideWitnessToon() if self.witnessToon: self.witnessToon.removeActive() self.witnessToon.delete() self.witnessToon =", "None if self.juryBox: self.juryBox.removeNode() return def doStrafe(self, side, direction): gearRoot", "into) def touchedGavelHandle(self, gavel, entry): attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr", "__loadMopaths(self): self.notify.debug('----- __loadMopaths') self.toonsEnterA = Mopath.Mopath() self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA') self.toonsEnterA.fFaceForward = 1", "y1, z2) vertexWriter.addData3f(x2, y1, z2) vertexWriter.addData3f(x1, y2, z2) vertexWriter.addData3f(x2, y2,", "0) self.notify.debug('new toon pos %s ' % toon.getPos()) def touchedGavel(self,", "isOpen): if self.insidesBNodePath: if isOpen: self.insidesBNodePath.unstash() else: self.insidesBNodePath.stash() def __toonsToPromotionPosition(self,", "exitReward(self): self.notify.debug('----- exitReward') intervalName = 'RewardMovie' self.clearInterval(intervalName) self.unstash() self.rewardPanel.destroy() del", "CollisionNode(self.uniqueName('ProsecutionCol')) prosecutionCollNode.addSolid(prosecutionTube) self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode) self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) standGeom = self.createBlock(0.25,", "toon = base.cr.doId2do.get(toonId) if toon: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] % toon.getName()", "toon.setPos(0, 8, 0) toon.setH(180) renderPos = toon.getPos(render) self.notify.debug('renderPos =%s' %", "ClassicFSM from direct.fsm import State from direct.directnotify import DirectNotifyGlobal from", "localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar)) multiCannons = Parallel() index =", "self.__clearOnscreenMessage() self.stingMusic.stop() def enterBattleTwo(self): self.notify.debug('----- enterBattleTwo') self.cleanupIntervals() mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2)", "self.__showWitnessToon() prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie() intervalName = 'prepareBattleTwo' seq = Sequence(prepareBattleTwoMovie,", "gearRoot.setPos(0, 7, 3) door = self.doorB gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack)) gearModel =", "tris.addVertex(6) tris.addVertex(7) tris.closePrimitive() tris.addVertex(7) tris.addVertex(5) tris.addVertex(4) tris.closePrimitive() cubeGeom = Geom(myVertexData)", "self.cr.doId2do.get(toonId) if toon: if index in self.cannons: cannon = self.cannons[index]", "+= TTLocalizer.WitnessToonHPBoost else: speech += TTLocalizer.WitnessToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1)", "trialSpeech = juryResult trialSpeech += TTLocalizer.WitnessToonPrepareBattleThree diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if", "spread = 60 * math.pi / 180.0 if direction ==", "trialSpeech += '\\x07' trialSpeech += weightBonusText self.witnessToon.setLocalPageChat(trialSpeech, 0) def __makePrepareBattleThreeMovie(self):", "Point3(*ToontownGlobals.LawbotBossBottomPos) deathPos = Point3(*ToontownGlobals.LawbotBossDeathPos) self.setPosHpr(startPos, startHpr) bossTrack = Sequence() bossTrack.append(Func(self.loop,", "'normal') colorWriter = GeomVertexWriter(myVertexData, 'color') texWriter = GeomVertexWriter(myVertexData, 'texcoord') vertexWriter.addData3f(x1,", "self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence) taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice')) def __howToGetPies(self, task): self.notify.debug('----- __howToGetPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence)", "ToontownGlobals.MaxCogSuitLevel: newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)] if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel: speech +=", "0)) return movie def __doWitnessPrepareBattleThreeChat(self): self.notify.debug('__doWitnessPrepareBattleThreeChat: original self.numToonJurorsSeated = %d'", "entry): self.sendUpdate('touchWitnessStand', []) self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) base.playSfx(self.piesRestockSfx) if not self.everThrownPie: taskMgr.doMethodLater(30,", "toon = self.cr.doId2do.get(toonId) self.notify.debug('cannonId = %d' % cannon.doId) cannonPos =", "= ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: newWeight, self.bonusWeight, self.numJurorsLocalToonSeated = self.calculateWeightOfToon(base.localAvatar.doId) if", "- 1] for i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if", "def __showToons(self): for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if", "0 for key in self.chairs.keys(): chair = self.chairs[key] if chair.state", "track.append(ival) delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion')) def __walkSuitToPoint(self, node, fromPos, toPos): self.notify.debug('----- __walkSuitToPoint')", "'FlashPanBlue' self.defensePanNodePath.setColorScale(1, 1, 1, 1) seq = Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0, 0,", "0.5) * spread x = dist * math.sin(angle) y =", "intervalName) self.acceptOnce('doneChatPage', self.__showCannonsAppearing) base.playMusic(self.stingMusic, looping=0, volume=1.0) def __showCannonsAppearing(self, elapsedTime =", "self.beamNodePath.setPos(beamLocatorPos) self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale) self.scaleNodePath.wrtReparentTo(self.geom) self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol') oldBitMask = self.baseHighCol.getCollideMask() newBitMask", "loadJuryBox(self): self.juryBox = self.geom.find('**/JuryBox') juryBoxPos = self.juryBox.getPos() newPos = juryBoxPos", "= GeomTriangles(Geom.UHDynamic) tris.addVertex(0) tris.addVertex(1) tris.addVertex(2) tris.closePrimitive() tris.addVertex(1) tris.addVertex(3) tris.addVertex(2) tris.closePrimitive()", "camera.lookAt(self.witnessToon, 0, 0, 3) intervalName = 'EpilogueMovie' seq = Sequence(self.makeEpilogueMovie(),", "self.notify.warning('returning from setTaunt, no attr state') gotError = True elif", "def __onToPrepareBattleTwo(self): self.notify.debug('----- __onToPrepareBattleTwo') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.doneBarrier('RollToBattleTwo') def exitRollToBattleTwo(self): self.notify.debug('-----", "(0.5, Sequence( Func(self.clearChat), Func(camera.reparentTo, render), Func(camera.setPos, -3, 45, 25), Func(camera.setHpr,", "__makeRollToBattleTwoMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2]) if self.arenaSide: topRampPos =", "text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.35), scale=0.1)", "taskMgr.remove(taskName) if self.bossDamageMovie: if self.bossDamage >= self.bossMaxDamage: self.notify.debug('finish the movie", "self.prosecutionLocator.hide() self.beamLocator.hide() def loadScaleOld(self): startingTilt = 0 self.scaleNodePath = NodePath('injusticeScale')", "from direct.fsm import State from direct.directnotify import DirectNotifyGlobal from toontown.toonbase", "| ToontownGlobals.WallBitmask) self.insidesANodePath = self.axle.attachNewNode(insidesANode) self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesANodePath.stash() insidesB =", "ToontownBattleGlobals.getBossBattleCreditMultiplier(2) localAvatar.inventory.setBattleCreditMultiplier(mult) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.clearChat() self.witnessToon.clearChat() self.releaseToons(finalBattle=1) self.__showWitnessToon() if not", "self.witnessToon = None return def __showWitnessToon(self): if not self.witnessToonOnstage: self.witnessToon.addActive()", "self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) self.__showWitnessToon() prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie() intervalName", "self.recoverStartTime = recoverStartTime taskName = 'RecoverBossDamage' taskMgr.remove(taskName) if self.bossDamageMovie: if", "self.defenseHighCol.setCollideMask(newBitMask) self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision') self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col') self.defenseLocator.hide() self.prosecutionLocator.hide() self.beamLocator.hide()", "random import math from toontown.coghq import CogDisguiseGlobals from toontown.building import", "del self.geom def __loadMopaths(self): self.notify.debug('----- __loadMopaths') self.toonsEnterA = Mopath.Mopath() self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA')", "self.controlToons() self.setToonsToNeutral(self.involvedToons) self.happy = 1 self.raised = 1 self.forward =", "from toontown.battle import RewardPanel from toontown.toon import NPCToons from direct.task", "LawbotBoss.enterBattleOne ') DistributedBossCog.DistributedBossCog.enterBattleOne(self) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.clearChat() self.loop('Ff_neutral') self.notify.debug('self.battleANode = %s'", "cannon.cannon.show() def getChairParent(self): return self.juryBox def startJuryBoxMoving(self): if self.juryBoxIval: self.juryBoxIval.finish()", "from direct.showbase.PythonUtil import StackTrace from direct.gui.DirectGui import * from panda3d.core", "if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() def exitIntroduction(self): self.notify.debug('----- exitIntroduction') DistributedBossCog.DistributedBossCog.exitIntroduction(self) self.promotionMusic.stop()", "stuffToHide) stuffToHide.wrtReparentTo(self.mainDoor) else: self.notify.debug('not found %s' % stuffToHide) self.reflectedMainDoor =", "node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0)) self.juryBoxIval.start() self.juryTimer = ToontownTimer.ToontownTimer() self.juryTimer.posInTopRightCorner() self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime)", "-7, 3) gearRoot.setHpr(180, 0, 0) door = self.doorA else: gearRoot.setPos(0,", "beamLocatorPos) self.notify.debug('negBeamLocatorPos = %s' % negBeamLocatorPos) self.beamNodePath.setPos(beamLocatorPos) self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale) self.scaleNodePath.wrtReparentTo(self.geom) self.baseHighCol", "== 1: spread = -spread dist = 50 rate =", "setLawyerIds(self, lawyerIds): self.lawyers = [] self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers)", "setBossDamage(self, bossDamage, recoverRate, timestamp): recoverStartTime = globalClockDelta.networkToLocalTime(timestamp) self.bossDamage = bossDamage", "side == 0: gearRoot.setPos(0, -7, 3) gearRoot.setHpr(180, 0, 0) door", "1.0, a = 1.0): gFormat = GeomVertexFormat.getV3n3cpt2() myVertexData = GeomVertexData('holds", "Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0)) self.juryBoxIval.start()", "self.prosecutionPanNodePath.reparentTo(self.beamNodePath) self.beamLocator = self.scaleNodePath.find('**/StandLocator1') beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter()", "= %s' % pos) ival.append(Sequence(Func(toon.setPlayRate, 0.8, 'walk'), Func(toon.loop, 'walk'), toon.posInterval(3,", "self.stingMusic.stop() def enterBattleTwo(self): self.notify.debug('----- enterBattleTwo') self.cleanupIntervals() mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2) localAvatar.inventory.setBattleCreditMultiplier(mult)", "self.notify.debug('center = %s' % center) self.witnessToon.setPos(center) self.witnessToon.setH(180) self.witnessToon.setZ(self.witnessToon.getZ() - 1.5)", "(toonIds, battleNode)) if len(toonIds) < 5: points = BattleBase.BattleBase.toonPoints[len(toonIds) -", "volume=0.9, time=self.battleThreeMusicTime) def exitNearVictory(self): self.notify.debug('----- exitNearVictory') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.__clearOnscreenMessage()", "= base.cr.doId2do.get(toonId) if toon: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] % toon.getName() else:", "npc = Toon.Toon() npc.setDNAString(dnaNetString) npc.setName(TTLocalizer.WitnessToonName) npc.setPickable(0) npc.setPlayerType(NametagGroup.CCNonPlayer) npc.animFSM.request('Sit') self.witnessToon =", "return def cleanupPanFlash(self): if self.panFlashInterval: self.panFlashInterval.finish() self.panFlashInterval = None return", "self.hideBonusTimer) def setAttackCode(self, attackCode, avId = 0): DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode, avId)", "-15, 15, 20), Func(camera.setHpr, -90, 0, 0), Wait(3), Func(camera.reparentTo, self.witnessToon),", "and hasattr(place, 'fsm'): place.setState('waitForBattle') def makeToonsWait(self): self.notify.debug('makeToonsWait') for toonId in", "pos[1], pos[2], h, 0, 0) self.notify.debug('new toon pos %s '", "radius y = math.sin(radians) * radius toon.setPos(self.witnessToon, x, y, 0)", "self.notify.debug('----- __recoverBossDamage') if self.bossDamageMovie: self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie) return Task.cont def", "self.prosecutionPanNodePath.reparentTo(self.beamNodePath) prosecutionTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6)", "in xrange(numGears): node = gearRoot.attachNewNode(str(i)) node.hide() node.setPos(0, 0, 0) gear", "== None: pass return def exitBattleOne(self): self.notify.debug('----- exitBattleOne') DistributedBossCog.DistributedBossCog.exitBattleOne(self) def", "= {} self.useCannons = 1 self.juryBoxIval = None self.juryTimer =", "'Ff_speech', startTime=2, duration=10, loop=1), ActorInterval(self, 'Ff_lookRt', duration=3), ActorInterval(self, 'Ff_lookRt', duration=3,", "Parallel() points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] self.notify.debug('walkToonsToBattlePosition: points = %s'", "= 'RecoverBossDamage' taskMgr.remove(taskName) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() return def enterNearVictory(self):", "DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss') debugPositions = False def __init__(self,", "= 1 self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) def __touchedWitnessStand(self, entry): self.sendUpdate('touchWitnessStand', []) self.__clearOnscreenMessage()", "% (ToontownGlobals.MaxCogSuitLevel + 1) if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels: speech +=", "%s' % toon.getName()) toon.reparentTo(cannon.nodePath) toon.setPos(0, 8, 0) toon.setH(180) renderPos =", "self.attackCode = None self.attackAvId = 0 self.recoverRate = 0 self.recoverStartTime", "= cannon.nodePath.getPos(render) self.notify.debug('cannonPos = %s' % cannonPos) if toon: self.notify.debug('toon", "ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) curReflectedPos = self.reflectedJuryBox.getPos() reflectedEndingAbsPos = Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1]", "= ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: numJurors = self.numJurorsSeatedByCannon(cannonIndex) bonusWeight = numJurors", "import DistributedBossCog from toontown.toonbase import TTLocalizer import SuitDNA from toontown.toon", "+= [Func(self.show), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0)]", "loop=1, volume=1.0)) self.juryBoxIval.start() self.juryTimer = ToontownTimer.ToontownTimer() self.juryTimer.posInTopRightCorner() self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime) def exitBattleTwo(self):", "(render.getScale(), battleNode.getScale())) myCurPos = self.getPos() self.notify.debug('myCurPos = %s' % self.getPos())", "exitElevator') DistributedBossCog.DistributedBossCog.exitElevator(self) self.witnessToon.removeActive() def enterIntroduction(self): self.notify.debug('----- enterIntroduction') self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.stopAnimate()", "= Parallel() index = 0 self.involvedToons.sort() for toonId in self.involvedToons:", "self.debugPositions: self.reflectedJuryBox.show() self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) def loadPodium(self): self.podium = self.geom.find('**/Podium')", "localAvatar: pass elif pieCode == ToontownGlobals.PieCodeLawyer: pass def __localPieSplat(self, pieCode,", "self.betweenBattleMusic.stop() def enterPrepareBattleThree(self): self.notify.debug('----- enterPrepareBattleThree') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render)", "self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage base.playMusic(self.battleThreeMusic, looping=1, volume=0.9) self.__showWitnessToon() diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]", "self.controlToons() panelName = self.uniqueName('reward') self.rewardPanel = RewardPanel.RewardPanel(panelName) victory, camVictory, skipper", "Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar)) multiCannons = Parallel() index = 0", "self.numToonJurorsSeated = %d' % self.numToonJurorsSeated) self.countToonJurors() self.notify.debug('after calling self.countToonJurors, numToonJurorsSeated=%d'", "tris.closePrimitive() tris.addVertex(2) tris.addVertex(3) tris.addVertex(6) tris.closePrimitive() tris.addVertex(3) tris.addVertex(7) tris.addVertex(6) tris.closePrimitive() tris.addVertex(0)", "8, 0) toon.setH(180) renderPos = toon.getPos(render) self.notify.debug('renderPos =%s' % renderPos)", "toonGotHealed(self, toonId): toon = base.cr.doId2do.get(toonId) if toon: base.playSfx(self.toonUpSfx, node=toon) def", "self.everThrownPie = 1 self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) def __touchedWitnessStand(self, entry): self.sendUpdate('touchWitnessStand', [])", "self.defensePanNodePath = NodePath('defensePan') self.defensePanNodePath.attachNewNode(defensePanGeom) self.defensePanNodePath.setPos(0, -2, 0) self.defensePanNodePath.reparentTo(self.beamNodePath) defenseTube =", "targetNode = CollisionNode('BossZap') targetNode.addSolid(target) targetNode.setCollideMask(ToontownGlobals.PieBitmask) self.targetNodePath = self.pelvis.attachNewNode(targetNode) self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog))", "exitRollToBattleTwo') self.unstickBoss() intervalName = 'RollToBattleTwo' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleTwo(self): self.notify.debug('-----", "toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie'))", "recoverRate, timestamp): recoverStartTime = globalClockDelta.networkToLocalTime(timestamp) self.bossDamage = bossDamage self.recoverRate =", "self.reflectedJuryBox = self.geom.find('**/JuryBox_Geo_Reflect') reflectedJuryBoxPos = self.reflectedJuryBox.getPos() newReflectedPos = reflectedJuryBoxPos -", "% prosecutionLocatorPos) self.notify.debug('prosecutionLocatorRelPos = %s ' % prosecutionLocatorRelPos) self.notify.debug('locatorRenderPos =", "= juryResult trialSpeech += TTLocalizer.WitnessToonPrepareBattleThree diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]:", "0.5)) insidesANode = CollisionNode('BossZap') insidesANode.addSolid(insidesA) insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesANodePath =", "loadCannons(self): pass def loadWitnessStand(self): self.realWitnessStand = self.geom.find('**/WitnessStand') if not self.realWitnessStand.isEmpty():", "= None self.baseColStashed = False self.battleDifficulty = 0 self.bonusWeight =", "looping=1, volume=0.9) if not self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash()", "__howToGetPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence) def __howToThrowPies(self, task): self.notify.debug('----- __howToThrowPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies) def __foundPieButton(self):", "self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.doneBarrier('RollToBattleThree') def exitRollToBattleThree(self): self.notify.debug('----- exitRollToBattleThree') self.unstickBoss() intervalName =", "0, 0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr", "def __onToPrepareBattleThree(self): self.notify.debug('----- __onToPrepareBattleThree') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.doneBarrier('RollToBattleThree') def exitRollToBattleThree(self): self.notify.debug('-----", "self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() return def enterNearVictory(self): self.cleanupIntervals() self.reparentTo(render) self.setPos(*ToontownGlobals.LawbotBossDeathPos)", "= 0 self.bossDamageMovie = None self.everThrownPie = 0 self.battleThreeMusicTime =", "def loadEnvironment(self): self.notify.debug('----- loadEnvironment') DistributedBossCog.DistributedBossCog.loadEnvironment(self) self.geom = loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3') self.geom.setPos(0, 0,", "CFSpeech), self.door3.posInterval(2, doorEndPos, startPos=doorStartPos))), (13.1, Sequence(self.door3.posInterval(1, doorStartPos)))) retTrack = Parallel(bossTrack,", "toon == localAvatar: self.d_hitBossInsides() self.flashRed() elif pieCode == ToontownGlobals.PieCodeBossCog: if", "z2) vertexWriter.addData3f(x2, y1, z2) vertexWriter.addData3f(x1, y2, z2) vertexWriter.addData3f(x2, y2, z2)", "'RecoverBossDamage' taskMgr.remove(taskName) if self.bossDamageMovie: if self.bossDamage >= self.bossMaxDamage: self.notify.debug('finish the", "battleNode.getParent().getPos()) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) self.notify.debug('battle", "r = 1.0, g = 1.0, b = 1.0, a", "-2, 0, 0, 1.0, 0.25) self.defensePanNodePath = NodePath('defensePan') self.defensePanNodePath.attachNewNode(defensePanGeom) self.defensePanNodePath.setPos(0,", "self.attackAvId = 0 self.recoverRate = 0 self.recoverStartTime = 0 self.bossDamageMovie", "0, -1.5, 0.6) prosecutionTube.setTangible(1) prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol')) prosecutionCollNode.addSolid(prosecutionTube) self.prosecutionColNodePath =", "taskMgr.remove(taskName) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() return def enterNearVictory(self): self.cleanupIntervals() self.reparentTo(render)", "= self.__talkAboutPromotion(epSpeech) bossTrack = Sequence(Func(self.witnessToon.animFSM.request, 'neutral'), Func(self.witnessToon.setLocalPageChat, epSpeech, 0)) return", "oldBitMask = self.baseHighCol.getCollideMask() newBitMask = oldBitMask & ~ToontownGlobals.PieBitmask newBitMask =", "0) toon.setH(180) renderPos = toon.getPos(render) self.notify.debug('renderPos =%s' % renderPos) index", "self.juryBoxIval.finish() self.juryBoxIval = None self.juryBox.setPos(-30, 0, -12.645) self.reflectedJuryBox.setPos(-30, 0, 0)", "percentDamaged * ToontownGlobals.LawbotBossWinningTilt self.setScaleTilt(tilt) if self.bossDamage < ToontownGlobals.LawbotBossMaxDamage * 0.85:", "exitDefeat') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterReward(self):", "self.notify.debug('toon %d has weight of %d' % (toonId, newWeight)) return", "= 'RewardMovie' delayDeletes = [] for toonId in self.involvedToons: toon", "i in xrange(cn.getNumSolids()): solid = cn.getSolid(i) if isinstance(solid, CollisionPolygon): plane", "1.0, b = 1.0, a = 1.0): gFormat = GeomVertexFormat.getV3n3cpt2()", "exitPrepareBattleTwo(self): self.notify.debug('----- exitPrepareBattleTwo') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') self.__clearOnscreenMessage() self.stingMusic.stop() def enterBattleTwo(self):", "self.bossMaxDamage) def getBossDamage(self): self.notify.debug('----- getBossDamage') now = globalClock.getFrameTime() elapsed =", "self.geom.find('**/CR3_Floor') self.evFloor = self.replaceCollisionPolysWithPlanes(floor) self.evFloor.reparentTo(self.geom) self.evFloor.setName('floor') plane = CollisionPlane(Plane(Vec3(0, 0,", "enterRollToBattleTwo(self): self.notify.debug('----- enterRollToBattleTwo') self.releaseToons(finalBattle=1) self.stashBoss() self.toonsToBattlePosition(self.involvedToons, self.battleANode) self.stickBossToFloor() intervalName =", "ug = self.geom.find('**/Reflections') ug.setBin('ground', -10) def loadJuryBox(self): self.juryBox = self.geom.find('**/JuryBox')", "base.playMusic(self.epilogueMusic, looping=1, volume=0.9) def __doneEpilogue(self, elapsedTime = 0): self.notify.debug('----- __doneEpilogue')", "CFSpeech)), (24, Sequence( Func(self.clearChat), self.loseCogSuits(self.toonsA + self.toonsB, render, (-2.798, -70,", "def __finalPieSplat(self, toon, pieCode): if pieCode != ToontownGlobals.PieCodeDefensePan: return self.sendUpdate('finalPieSplat',", "disable(self): global OneBossCog self.notify.debug('----- disable') DistributedBossCog.DistributedBossCog.disable(self) self.request('Off') self.unloadEnvironment() self.__cleanupWitnessToon() self.__unloadMopaths()", "tilt): self.beamNodePath.setP(tilt) if self.useProgrammerScale: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) else: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) def", "insidesBNode = CollisionNode('BossZap') insidesBNode.addSolid(insidesB) insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesBNodePath = self.axle.attachNewNode(insidesBNode)", "ToontownGlobals.PieCodeProsecutionPan: self.flashGreen() if toon == localAvatar: pass elif pieCode ==", "!= ToontownGlobals.PieCodeToon: return avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId') if avatarDoId == '':", "in collList: cn = cnp.node() if not isinstance(cn, CollisionNode): self.notify.warning('Not", "texWriter = GeomVertexWriter(myVertexData, 'texcoord') vertexWriter.addData3f(x1, y1, z1) vertexWriter.addData3f(x2, y1, z1)", "else: speech += TTLocalizer.WitnessToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1) return speech", "stuffToHide = self.geom.find('**/%s' % str) if not stuffToHide.isEmpty(): self.notify.debug('found %s'", "+ ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) curReflectedPos = self.reflectedJuryBox.getPos() reflectedEndingAbsPos =", "import CogDisguiseGlobals from toontown.building import ElevatorConstants from toontown.toonbase import ToontownTimer", "- 1.5) self.witnessToon.setY(self.witnessToon.getY() - 1.15) self.witnessToonOnstage = 1 def __hideWitnessToon(self):", "self.defensePanNodePath = self.scaleNodePath.find('**/defensePan') self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan') self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol') self.defenseColNodePath.setTag('pieCode',", "index in self.cannons: cannon = self.cannons[index] toon = self.cr.doId2do.get(toonId) self.notify.debug('cannonId", "xrange(cn.getNumSolids()): solid = cn.getSolid(i) if isinstance(solid, CollisionPolygon): plane = Plane(solid.getPlane())", "ToontownTimer.ToontownTimer() self.juryTimer.posInTopRightCorner() self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime) def exitBattleTwo(self): self.notify.debug('----- exitBattleTwo') intervalName = self.uniqueName('Drop')", "__recoverBossDamage(self, task): self.notify.debug('----- __recoverBossDamage') if self.bossDamageMovie: self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie) return", "== ToontownGlobals.PieCodeBossCog: if toon == localAvatar: self.d_hitBoss(1) if self.dizzy: self.flashRed()", "bonusWeight = numJurors - diffSettings[5] if bonusWeight < 0: bonusWeight", "DistributedBossCog.DistributedBossCog.enterOff(self) if self.witnessToon: self.witnessToon.clearChat() def enterWaitForToons(self): self.notify.debug('----- enterWaitForToons') DistributedBossCog.DistributedBossCog.enterWaitForToons(self) self.geom.hide()", "== self: OneBossCog = None return def delete(self): self.notify.debug('----- delete')", "pass def loadWitnessStand(self): self.realWitnessStand = self.geom.find('**/WitnessStand') if not self.realWitnessStand.isEmpty(): pass", "elapsedTime = 0): allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()),", "battlePos), Func(taskMgr.doMethodLater, 0.01, self.unstashBoss, 'unstashBoss')), name=self.uniqueName('BattleTwoMovie')) def __makeRollToBattleThreeMovie(self): startPos =", "from direct.directnotify import DirectNotifyGlobal from toontown.toonbase import ToontownGlobals from toontown.toonbase", "points[0][0]) for i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon:", "= 0 self.forward = 1 self.doAnimate() self.setDizzy(1) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9,", "toonIds, battleNode): self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode)) if len(toonIds)", "-1, 4, 0, -1, 9, 3.5) targetNode = CollisionNode('BossZap') targetNode.addSolid(target)", "distance / (ToontownGlobals.SuitWalkSpeed * 1.8) return Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos),", "['Reflections/Door_1'] for str in itemsToHide: stuffToHide = self.geom.find('**/%s' % str)", "newWeight, self.bonusWeight, self.numJurorsLocalToonSeated = self.calculateWeightOfToon(base.localAvatar.doId) if self.bonusWeight > 0: if", "= self.cr.doId2do.get(toonId) if toon: toon.loop('neutral') def makeEndOfBattleMovie(self, hasLocalToon): name =", "self.notify.debug('__positionToonsInFrontOfCannons') index = 0 self.involvedToons.sort() for toonId in self.involvedToons: if", "if not stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.reflectedMainDoor) else: self.notify.debug('not", "self.insidesBNodePath = None self.strafeInterval = None self.onscreenMessage = None self.bossMaxDamage", "self.baseColStashed = False def makeScaleReflectDamage(self): diffDamage = self.bossDamage - ToontownGlobals.LawbotBossInitialDamage", "self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.setDizzy(0) self.battleThreeMusicTime = self.battleThreeMusic.getTime()", "pass self.reflectedWitnessStand = self.geom.find('**/Witnessstand_Geo_Reflect') if not self.reflectedWitnessStand.isEmpty(): pass colNode =", "TTLocalizer.WitnessToonHPBoost else: speech += TTLocalizer.WitnessToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1) return", "self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) self.__showWitnessToon() prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie() intervalName = 'prepareBattleTwo'", "DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss') debugPositions = False def __init__(self, cr): self.notify.debug('----- __init___') DistributedBossCog.DistributedBossCog.__init__(self,", "= loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg') self.strafeSfx = [] for i in xrange(10): self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg'))", "ToontownGlobals.LawbotBossBattleThreePosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5]) bossTrack = Sequence() myInterval", "= self.scaleNodePath.find('**/Scale_base_top_collision') self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col') self.defenseLocator.hide() self.prosecutionLocator.hide() self.beamLocator.hide() def loadScaleOld(self):", "0, 3) self.beamNodePath.reparentTo(self.scaleNodePath) defensePanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5,", "Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwoMovie')) def toNeutralMode(self): if self.cr: place", "hpr, battlePos, battleHpr, 0) self.makeToonsWait() return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0),", "= (numToons - 1) / 2.0 for i in xrange(numToons):", "= self.uniqueName('DestroyedBoss') self.ignore(bossDoneEventName) taskMgr.remove(self.uniqueName('StandUp')) self.ignore('enterWitnessStand') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.ignore('begin-pie') self.ignore('enterDefenseCol')", "self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale') self.beamNodePath = self.scaleNodePath.find('**/scaleBeam') self.defensePanNodePath = self.scaleNodePath.find('**/defensePan') self.prosecutionPanNodePath", "cannonIndex def numJurorsSeatedByCannon(self, cannonIndex): retVal = 0 for chair in", "beamLocatorBounds.getCenter() negBeamLocatorPos = -beamLocatorPos self.notify.debug('beamLocatorPos = %s' % beamLocatorPos) self.notify.debug('negBeamLocatorPos", "self.unstash() self.reparentTo(render) def enterRollToBattleTwo(self): self.notify.debug('----- enterRollToBattleTwo') self.releaseToons(finalBattle=1) self.stashBoss() self.toonsToBattlePosition(self.involvedToons, self.battleANode)", "elapsed / 60.0, 0) def __recoverBossDamage(self, task): self.notify.debug('----- __recoverBossDamage') if", "self.__pieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) self.accept('begin-pie', self.__foundPieButton) self.accept('enterDefenseCol', self.__enterDefenseCol) self.accept('enterProsecutionCol',", "= globalClockDelta.networkToLocalTime(timestamp) self.bossDamage = bossDamage self.recoverRate = recoverRate self.recoverStartTime =", "toon.show() def __talkAboutPromotion(self, speech): if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel: newCogSuitLevel =", "%s ' % prosecutionLocatorRelPos) self.notify.debug('locatorRenderPos = %s' % locatorRenderPos) beamPos", "= Sequence(name=intervalName) seq.append(Func(self.setChatAbsolute, chatString, CFSpeech)) seq.append(Wait(4.0)) seq.append(Func(self.clearChat)) oldSeq = self.activeIntervals.get(intervalName)", "= None self.strafeInterval = None self.onscreenMessage = None self.bossMaxDamage =", "self._name, 'dept': SuitDNA.getDeptFullname(self.style.dept)} self.setDisplayName(nameInfo) self.piesRestockSfx = loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg') self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg')", "startHpr, deathPos, None, 1) bossTrack.append(track) duration = bossTrack.getDuration() return bossTrack", "self.recoverRate: taskMgr.add(self.__recoverBossDamage, taskName) self.makeScaleReflectDamage() self.bossHealthBar.update(self.bossMaxDamage - bossDamage, self.bossMaxDamage) def getBossDamage(self):", "self.reflectedPodium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), Parallel(self.podium.posInterval(5.0, finalPodiumPos),", "self.insidesANodePath.unstash() else: self.insidesANodePath.stash() def doorBCallback(self, isOpen): if self.insidesBNodePath: if isOpen:", "None self.everThrownPie = 0 self.battleThreeMusicTime = 0 self.insidesANodePath = None", "self.mainDoor = None self.reflectedMainDoor = None self.panFlashInterval = None self.panDamage", "enterIntroduction(self): self.notify.debug('----- enterIntroduction') self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.stopAnimate() self.__hideWitnessToon() DistributedBossCog.DistributedBossCog.enterIntroduction(self) base.playMusic(self.promotionMusic, looping=1,", "stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.reflectedMainDoor) else: self.notify.debug('not found %s'", "return movie def __doWitnessPrepareBattleThreeChat(self): self.notify.debug('__doWitnessPrepareBattleThreeChat: original self.numToonJurorsSeated = %d' %", "xrange(len(self.cannons)): cannon = self.cannons[index] cannon.cannon.show() def getChairParent(self): return self.juryBox def", "recoverStartTime taskName = 'RecoverBossDamage' taskMgr.remove(taskName) if self.bossDamageMovie: if self.bossDamage >=", "= loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg') self.evidenceHitSfx = loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg') self.warningSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg') self.juryMovesSfx =", "def __cleanupJuryBox(self): self.notify.debug('----- __cleanupJuryBox') if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None", "lawyerDoId = int(names[1]) for lawyer in self.lawyers: if lawyerDoId ==", "ActorInterval(self, 'Ff_lookRt', duration=3, startTime=3, endTime=0), ActorInterval(self, 'Ff_neutral', duration=2), ActorInterval(self, 'Ff_speech',", "in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: pos, h =", "= CollisionPlane(plane) newCollisionNode.addSolid(cp) lastPlane = plane return NodePath(newCollisionNode) def makeIntroductionMovie(self,", "'RollToBattleThree' seq = Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic,", "self.mainDoor.isEmpty(): pass if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.unstash() if not self.elevatorEntrance.isEmpty(): pass", "self.standNodePath.attachNewNode(standGeom) self.standNodePath.reparentTo(self.scaleNodePath) self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.scaleNodePath.setScale(5.0) self.scaleNodePath.wrtReparentTo(self.geom) self.setScaleTilt(startingTilt) def setScaleTilt(self, tilt): self.beamNodePath.setP(tilt)", "self.replaceCollisionPolysWithPlanes(floor) self.evFloor.reparentTo(self.geom) self.evFloor.setName('floor') plane = CollisionPlane(Plane(Vec3(0, 0, 1), Point3(0, 0,", "def flashPanBlue(self): self.cleanupPanFlash() intervalName = 'FlashPanBlue' self.defensePanNodePath.setColorScale(1, 1, 1, 1)", "def gotToon(self, toon): stateName = self.state if stateName == 'Elevator':", "self.cleanupIntervals() mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2) localAvatar.inventory.setBattleCreditMultiplier(mult) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.clearChat() self.witnessToon.clearChat() self.releaseToons(finalBattle=1)", "__showWaitingMessage') self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors) def loadEnvironment(self): self.notify.debug('----- loadEnvironment') DistributedBossCog.DistributedBossCog.loadEnvironment(self) self.geom = loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3')", "- diffSettings[5] if bonusWeight < 0: bonusWeight = 0 newWeight", "if self.door3.isEmpty(): self.door3 = self.geom.find('**/interior/CR3_Door') self.mainDoor = self.geom.find('**/Door_1') if not", "def exitElevator(self): self.notify.debug('----- exitElevator') DistributedBossCog.DistributedBossCog.exitElevator(self) self.witnessToon.removeActive() def enterIntroduction(self): self.notify.debug('----- enterIntroduction')", "toontown.toonbase import ToontownBattleGlobals import DistributedBossCog from toontown.toonbase import TTLocalizer import", "camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0,", "0, -71.601) self.geom.setScale(1) self.elevatorEntrance = self.geom.find('**/elevator_origin') self.elevatorEntrance.getChildren().detach() self.elevatorEntrance.setScale(1) elevatorModel =", "base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) def __onToPrepareBattleThree(self): self.notify.debug('----- __onToPrepareBattleThree') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.doneBarrier('RollToBattleThree')", "Point3(x, y, 0), fluid=1), node.hprInterval(1, VBase3(h, 0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i],", "newCollideMask = BitMask32(0) planes = [] collList = model.findAllMatches('**/+CollisionNode') if", "return speech def __positionToonsInFrontOfCannons(self): self.notify.debug('__positionToonsInFrontOfCannons') index = 0 self.involvedToons.sort() for", "reflectedZ = self.reflectedPodium.getZ() if not self.debugPositions: self.reflectedPodium.setZ(reflectedZ) if not self.reflectedPodium.isEmpty():", "self.unstickBoss() intervalName = 'RollToBattleThree' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleThree(self): self.notify.debug('----- enterPrepareBattleThree')", "def d_healBoss(self, bossHeal): self.notify.debug('----- d_bossHeal') self.sendUpdate('healBoss', [bossHeal]) def d_hitBossInsides(self): self.notify.debug('-----", "render.clearTag('pieCode') self.targetNodePath.detachNode() self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = None self.betweenBattleMusic.stop() self.promotionMusic.stop() self.stingMusic.stop() self.battleTwoMusic.stop()", "def __makeBossDamageMovie(self): self.notify.debug('---- __makeBossDamageMovie') startPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) startHpr", "if toon == localAvatar: pass elif pieCode == ToontownGlobals.PieCodeLawyer: pass", "%s' % beamLocatorPos) self.notify.debug('negBeamLocatorPos = %s' % negBeamLocatorPos) self.beamNodePath.setPos(beamLocatorPos) self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale)", "= None self.insidesBNodePath = None self.strafeInterval = None self.onscreenMessage =", "self.gavels = {} self.chairs = {} self.cannons = {} self.useCannons", "self.hasLocalToon(): self.toMovieMode() for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if", "pos, h = points[i] if i > 3: pos.setY(pos.getY() +", "% beamLocatorPos) def loadScaleNew(self): self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale') self.beamNodePath = self.scaleNodePath.find('**/scaleBeam')", "1) seq = Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0, 0, 1, 1)), self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1,", "newCollisionNode.addSolid(plane) newCollisionNode.setIntoCollideMask(newCollideMask) threshold = 0.1 planes.sort(lambda p1, p2: p1.compareTo(p2, threshold))", "intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss') def __onToPrepareBattleTwo(self): self.notify.debug('-----", "self.__onToBattleThree) intervalName = 'prepareBattleThree' seq = Sequence(prepareBattleThreeMovie, name=intervalName) seq.start() self.storeInterval(seq,", "- 0.5) * spread x = dist * math.sin(angle) y", "name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.acceptOnce('doneChatPage', self.__showCannonsAppearing) base.playMusic(self.stingMusic, looping=0, volume=1.0) def", "then transition to NearVictory') self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration()) else: self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie) if", "Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) startHpr = Point3(*ToontownGlobals.LawbotBossBattleThreeHpr) bottomPos = Point3(*ToontownGlobals.LawbotBossBottomPos) deathPos", "self.setDizzy(0) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterVictory(self): self.notify.debug('----- enterVictory') self.cleanupIntervals()", "center) self.witnessToon.setPos(center) self.witnessToon.setH(180) self.witnessToon.setZ(self.witnessToon.getZ() - 1.5) self.witnessToon.setY(self.witnessToon.getY() - 1.15) self.witnessToonOnstage", "seq = Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __onToBattleTwo(self,", "- 15 * (i - center) radians = angle *", "self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward')) ival.delayDeletes =", "= self.pelvis.attachNewNode(targetNode) self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog)) shield = CollisionTube(0, 1, 4, 0,", "False self.numToonJurorsSeated = 0 self.mainDoor = None self.reflectedMainDoor = None", "%s' % prosecutionPanPos) self.notify.debug('prosecutionPanRelPos = %s' % prosecutionPanRelPos) self.notify.debug('panRenderPos =", "self.__arrangeToonsAroundWitnessToon() camera.reparentTo(render) camera.setPos(self.witnessToon, -9, 12, 6) camera.lookAt(self.witnessToon, 0, 0, 3)", "= self.rollBossToPoint(startPos, None, battlePos, None, 0) bossTrack.append(track) track, hpr =", "self.reflectedJuryBox.getPos() newReflectedPos = reflectedJuryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.reflectedJuryBox.setPos(newReflectedPos)", "self.doorB gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack)) gearModel = self.getGearFrisbee() gearModel.setScale(0.1) t = self.getBossDamage()", "1 def __hideWitnessToon(self): if self.witnessToonOnstage: self.witnessToon.removeActive() self.witnessToon.detachNode() self.witnessToonOnstage = 0", "self.battleTwoMusic.stop() localAvatar.inventory.setBattleCreditMultiplier(1) if self.juryTimer: self.juryTimer.destroy() del self.juryTimer self.juryTimer = None", "| ToontownGlobals.WallBitmask) self.insidesBNodePath = self.axle.attachNewNode(insidesBNode) self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesBNodePath.stash() target =", "def setTaunt(self, tauntIndex, extraInfo): gotError = False if not hasattr(self,", "self.defensePanNodePath.setColorScale(1, 1, 1, 1) seq = Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0, 0, 1,", "a = 1.0): gFormat = GeomVertexFormat.getV3n3cpt2() myVertexData = GeomVertexData('holds my", "None self.juryMovesSfx = None self.baseColStashed = False self.battleDifficulty = 0", "= Vec3(toPos - fromPos) distance = vector.length() time = distance", "self.juryBoxIval = None self.juryTimer = None self.witnessToon = None self.witnessToonOnstage", "BattleBase from direct.directutil import Mopath from direct.showutil import Rope from", "bossTrack.getDuration() return bossTrack def __showOnscreenMessage(self, text): self.notify.debug('----- __showOnscreenmessage') if self.onscreenMessage:", "renderPos) index += 1 self.notify.debug('done with positionToons') def __makePrepareBattleTwoMovie(self): chatString", "taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice')) def __howToGetPies(self, task): self.notify.debug('----- __howToGetPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence) def", "cubeGN = GeomNode('cube') cubeGN.addGeom(cubeGeom) return cubeGN def __enterDefenseCol(self, entry): self.notify.debug('__enterDefenseCol')", "in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.loop('neutral') def makeEndOfBattleMovie(self,", "battleNode=%s' % (toonIds, battleNode)) if len(toonIds) < 5: points =", "% {'name': self._name, 'dept': SuitDNA.getDeptFullname(self.style.dept)} self.setDisplayName(nameInfo) self.piesRestockSfx = loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg') self.rampSlideSfx", "ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) finalReflectedPodiumPos = Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(), self.reflectedPodium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) return Sequence(chatTrack,", "= loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg') self.warningSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg') self.juryMovesSfx = loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg') self.toonUpSfx =", "Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) track, hpr", "stateName = self.state if stateName == 'Elevator': self.placeToonInElevator(toon) def setLawyerIds(self,", "self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) def stashBaseCol(self): if not self.baseColStashed: self.notify.debug('stashBaseCol') self.baseTopCol.stash() self.baseSideCol.stash()", "curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) curReflectedPos = self.reflectedJuryBox.getPos() reflectedEndingAbsPos", "= Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])", "i in xrange(10): self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg')) render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog)) insidesA = CollisionPolygon(Point3(4.0, -2.0,", "self.reflectedPodium.isEmpty(): if self.debugPositions: self.reflectedPodium.show() def loadCannons(self): pass def loadWitnessStand(self): self.realWitnessStand", "2.0, 5.0), Point3(4.0, 2.0, 0.5), Point3(-4.0, 2.0, 0.5)) insidesBNode =", "%d' % self.numToonJurorsSeated) return def cleanupPanFlash(self): if self.panFlashInterval: self.panFlashInterval.finish() self.panFlashInterval", "== None and cannonIndex >= 0: diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if", "= ToontownTimer.ToontownTimer() self.juryTimer.posInTopRightCorner() self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime) def exitBattleTwo(self): self.notify.debug('----- exitBattleTwo') intervalName =", "numToonJurorsSeated=%d' % self.numToonJurorsSeated) if self.numToonJurorsSeated == 0: juryResult = TTLocalizer.WitnessToonNoJuror", "defenseCollNode = CollisionNode('DefenseCol') defenseCollNode.addSolid(defenseTube) self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode) self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) prosecutionPanGeom", "0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr =", "= GeomVertexWriter(myVertexData, 'vertex') normalWriter = GeomVertexWriter(myVertexData, 'normal') colorWriter = GeomVertexWriter(myVertexData,", "makeDefeatMovie(self): bossTrack = Track((0.0, Sequence(Func(self.clearChat), Func(self.reverseHead), ActorInterval(self, 'Ff_speech'))), (1.0, Func(self.setChatAbsolute,", "bossDamage self.recoverRate = recoverRate self.recoverStartTime = recoverStartTime taskName = 'RecoverBossDamage'", "Sequence(prepareBattleTwoMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.acceptOnce('doneChatPage', self.__showCannonsAppearing) base.playMusic(self.stingMusic, looping=0, volume=1.0)", "= %s' % beamRelPos) self.notify.debug('beamRenderPos = %s' % beamRenderPos) beamBoundsCenter", "in xrange(8): normalWriter.addData3f(1.0, 1.0, 1.0) colorWriter.addData4f(r, g, b, a) texWriter.addData2f(1.0,", "self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.doneBarrier('RollToBattleThree') def exitRollToBattleThree(self): self.notify.debug('----- exitRollToBattleThree') self.unstickBoss() intervalName = 'RollToBattleThree'", "TTLocalizer.WitnessToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel + 1) if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels: speech", "curPos = self.juryBox.getPos() endingAbsPos = Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] +", "[bossHeal]) def d_hitBossInsides(self): self.notify.debug('----- d_hitBossInsides') self.sendUpdate('hitBossInsides', []) def d_hitDefensePan(self): self.notify.debug('-----", "direction): gearRoot = self.rotateNode.attachNewNode('gearRoot') if side == 0: gearRoot.setPos(0, -7,", "= int(attackCodeStr) into = entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def createBlock(self, x1,", "= dist * math.sin(angle) y = dist * math.cos(angle) h", "= Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.bossHealthBar.deinitialize() base.playMusic(self.battleThreeMusic, looping=1,", "ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime,", "self.notify.debug('----- exitBattleOne') DistributedBossCog.DistributedBossCog.exitBattleOne(self) def stashBoss(self): self.stash() def unstashBoss(self, task): self.unstash()", "__talkAboutPromotion(self, speech): if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel: newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)] if", "-0.5, -0.5, -2, 1.0, 0, 0, 1.0) self.prosecutionPanNodePath = NodePath('prosecutionPan')", "-90, 0, 0), Wait(3), Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2),", "self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt) base.playSfx(self.warningSfx) def setBattleDifficulty(self, diff): self.notify.debug('battleDifficulty = %d' % diff)", "ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5]) bossTrack = Sequence()", "looping=0, volume=1.0) def __showCannonsAppearing(self, elapsedTime = 0): allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons),", "gearTrack.append(Sequence(Wait(i * rate), Func(node.show), Parallel(node.posInterval(1, Point3(x, y, 0), fluid=1), node.hprInterval(1,", "= Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0,", "= prosecutionLocBounds.getCenter() self.notify.debug('prosecutionLocatorPos = %s' % prosecutionLocPos) self.prosecutionPanNodePath.setPos(prosecutionLocPos) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) self.beamLocator", "8, 180, 0, 0) def exitElevator(self): self.notify.debug('----- exitElevator') DistributedBossCog.DistributedBossCog.exitElevator(self) self.witnessToon.removeActive()", "node = gearRoot.attachNewNode(str(i)) node.hide() node.setPos(0, 0, 0) gear = gearModel.instanceTo(node)", "exitBattleThree') DistributedBossCog.DistributedBossCog.exitBattleThree(self) NametagGlobals.setMasterArrowsOn(1) bossDoneEventName = self.uniqueName('DestroyedBoss') self.ignore(bossDoneEventName) taskMgr.remove(self.uniqueName('StandUp')) self.ignore('enterWitnessStand') self.ignore('pieSplat')", "now - self.recoverStartTime return max(self.bossDamage - self.recoverRate * elapsed /", "- Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.juryBox.setPos(newPos) self.reflectedJuryBox = self.geom.find('**/JuryBox_Geo_Reflect') reflectedJuryBoxPos", "self.notify.debug('beamLocatorPos = %s' % beamLocatorPos) def loadScaleNew(self): self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale')", "seq = Sequence(self.makeEpilogueMovie(), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.accept('doneChatPage', self.__doneEpilogue) base.playMusic(self.epilogueMusic,", "door = self.doorA else: gearRoot.setPos(0, 7, 3) door = self.doorB", "from libotp import * from direct.fsm import FSM from direct.fsm", "% stuffToHide) self.geom.reparentTo(render) self.loadWitnessStand() self.loadScale() self.scaleNodePath.stash() self.loadJuryBox() self.loadPodium() ug =", "Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0,", "{} self.useCannons = 1 self.juryBoxIval = None self.juryTimer = None", "for plane in planes: if lastPlane == None or plane.compareTo(lastPlane,", "loadScaleNew(self): self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale') self.beamNodePath = self.scaleNodePath.find('**/scaleBeam') self.defensePanNodePath = self.scaleNodePath.find('**/defensePan')", "delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion')) def __walkSuitToPoint(self, node, fromPos, toPos): self.notify.debug('----- __walkSuitToPoint') vector", "self.forward = 1 self.doAnimate() self.__hideWitnessToon() if not self.mainDoor.isEmpty(): self.mainDoor.stash() if", "0, 0, 0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) bossTrack.append(Func(self.loop, 'Ff_neutral')) track,", "[Wait(0.0)] if hasLocalToon: seq += [Func(self.show), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()),", "None self.juryBox.setPos(-30, 0, -12.645) self.reflectedJuryBox.setPos(-30, 0, 0) curPos = self.juryBox.getPos()", "break newCollideMask = newCollideMask | cn.getIntoCollideMask() for i in xrange(cn.getNumSolids()):", "toon = self.cr.doId2do.get(toonId) if toon: toon.show() def __arrangeToonsAroundWitnessToon(self): radius =", "self.__talkAboutPromotion(epSpeech) bossTrack = Sequence(Func(self.witnessToon.animFSM.request, 'neutral'), Func(self.witnessToon.setLocalPageChat, epSpeech, 0)) return bossTrack", "self.dizzy: self.flashRed() self.doAnimate('hit', now=1) elif pieCode == ToontownGlobals.PieCodeDefensePan: self.flashRed() self.flashPanBlue()", "for chair in self.chairs.values(): if chair.state == 'ToonJuror': if chair.toonJurorIndex", "% (toonIds, battleNode)) ival = Parallel() points = BattleBase.BattleBase.toonPoints[len(toonIds) -", "OneBossCog != None: self.notify.warning('Multiple BossCogs visible.') OneBossCog = self return", "exitBattleOne(self): self.notify.debug('----- exitBattleOne') DistributedBossCog.DistributedBossCog.exitBattleOne(self) def stashBoss(self): self.stash() def unstashBoss(self, task):", "self.defensePanNodePath.attachNewNode(defenseCollNode) self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) prosecutionPanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5,", "(9.6, Parallel( rollTrack, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3, CFSpeech), self.door3.posInterval(2, doorEndPos, startPos=doorStartPos))), (13.1,", "self.geom = loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3') self.geom.setPos(0, 0, -71.601) self.geom.setScale(1) self.elevatorEntrance = self.geom.find('**/elevator_origin')", "self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() def exitIntroduction(self): self.notify.debug('----- exitIntroduction') DistributedBossCog.DistributedBossCog.exitIntroduction(self) self.promotionMusic.stop() if not", "self.notify.warning('Unexpected collision solid: %s' % repr(solid)) newCollisionNode.addSolid(plane) newCollisionNode.setIntoCollideMask(newCollideMask) threshold =", "if pieCode == ToontownGlobals.PieCodeBossInsides: if toon == localAvatar: self.d_hitBossInsides() self.flashRed()", "self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode) self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) standGeom = self.createBlock(0.25, 0.25, 0,", "xrange(8): normalWriter.addData3f(1.0, 1.0, 1.0) colorWriter.addData4f(r, g, b, a) texWriter.addData2f(1.0, 1.0)", "= None return def __showWaitingMessage(self, task): self.notify.debug('----- __showWaitingMessage') self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors) def", "repr(solid)) newCollisionNode.addSolid(plane) newCollisionNode.setIntoCollideMask(newCollideMask) threshold = 0.1 planes.sort(lambda p1, p2: p1.compareTo(p2,", "delete(self): self.notify.debug('----- delete') DistributedBossCog.DistributedBossCog.delete(self) def d_hitBoss(self, bossDamage): self.notify.debug('----- d_hitBoss') self.sendUpdate('hitBoss',", "self.accept('enterProsecutionCol', self.__enterProsecutionCol) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice')) self.stickBossToFloor() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.bossMaxDamage =", "seq.start() self.storeInterval(seq, intervalName) self.acceptOnce('doneChatPage', self.__showCannonsAppearing) base.playMusic(self.stingMusic, looping=0, volume=1.0) def __showCannonsAppearing(self,", "%s' % pos) ival.append(Sequence(Func(toon.setPlayRate, 0.8, 'walk'), Func(toon.loop, 'walk'), toon.posInterval(3, pos),", "def __positionToonsInFrontOfCannons(self): self.notify.debug('__positionToonsInFrontOfCannons') index = 0 self.involvedToons.sort() for toonId in", "= cn.getSolid(i) if isinstance(solid, CollisionPolygon): plane = Plane(solid.getPlane()) planes.append(plane) else:", "render), Func(camera.setPos, -3, 45, 25), Func(camera.setHpr, 0, 10, 0))), (1.0,", "= Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battlePos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) battleHpr", "(ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration) self.witnessToon.setChatAbsolute(text, CFSpeech | CFTimeout) base.playSfx(self.toonUpSfx) if not self.bonusTimer:", "NPCToons from direct.task import Task import random import math from", "self.notify.debug('----- announceGenerate') DistributedBossCog.DistributedBossCog.announceGenerate(self) self.setName(TTLocalizer.LawbotBossName) nameInfo = TTLocalizer.BossCogNameWithDept % {'name': self._name,", "self.bossDamageMovie = None self.everThrownPie = 0 self.battleThreeMusicTime = 0 self.insidesANodePath", "DistributedBossCog.DistributedBossCog.announceGenerate(self) self.setName(TTLocalizer.LawbotBossName) nameInfo = TTLocalizer.BossCogNameWithDept % {'name': self._name, 'dept': SuitDNA.getDeptFullname(self.style.dept)}", "tris.closePrimitive() tris.addVertex(3) tris.addVertex(5) tris.addVertex(7) tris.closePrimitive() tris.addVertex(0) tris.addVertex(4) tris.addVertex(5) tris.closePrimitive() tris.addVertex(1)", "0, 0) base.localAvatar.orbitalCamera.start() self.clearChat() self.witnessToon.clearChat() self.reparentTo(render) self.happy = 1 self.raised", "=%d' % (index, toonId)) allCannonsAppear.append(multiCannons) intervalName = 'prepareBattleTwoCannonsAppear' seq =", "isinstance(solid, CollisionPolygon): plane = Plane(solid.getPlane()) planes.append(plane) else: self.notify.warning('Unexpected collision solid:", "self.notify.debug('unstashBaseCol') self.baseTopCol.unstash() self.baseSideCol.unstash() self.baseColStashed = False def makeScaleReflectDamage(self): diffDamage =", "exitPrepareBattleThree') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') intervalName = 'PrepareBattleThree' self.clearInterval(intervalName) self.__clearOnscreenMessage() self.betweenBattleMusic.stop()", "'ChiefJusticeTaunt' seq = Sequence(name=intervalName) seq.append(Func(self.setChatAbsolute, chatString, CFSpeech)) seq.append(Wait(4.0)) seq.append(Func(self.clearChat)) oldSeq", "self.notify.debug('----- __init___') DistributedBossCog.DistributedBossCog.__init__(self, cr) FSM.FSM.__init__(self, 'DistributedLawbotBoss') self.lawyers = [] self.lawyerRequest", "origPos = pos self.notify.debug('origPos = %s' % origPos) self.notify.debug('batlleNode.getTransform =", "flashPanBlue(self): self.cleanupPanFlash() intervalName = 'FlashPanBlue' self.defensePanNodePath.setColorScale(1, 1, 1, 1) seq", "oldBitMask & ~ToontownGlobals.PieBitmask newBitMask = newBitMask & ~ToontownGlobals.CameraBitmask self.baseHighCol.setCollideMask(newBitMask) self.defenseHighCol", "cannonPos) if toon: self.notify.debug('toon = %s' % toon.getName()) toon.reparentTo(cannon.nodePath) toon.setPos(0,", "Func(node.detachNode))) seq = Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack, Func(door.request, 'close')) self.__cleanupStrafe()", "== ToontownGlobals.PieCodeDefensePan: self.flashRed() self.flashPanBlue() base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath, volume=0.25) if toon ==", "== '': self.notify.warning('Node %s has no attackCode tag.' % repr(entry.getIntoNodePath()))", "self.witnessToon.removeActive() self.witnessToon.delete() self.witnessToon = None return def __showWitnessToon(self): if not", "15 * (i - center) radians = angle * math.pi", "0 self.involvedToons.sort() for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if", "True elif not self.state == 'BattleThree': self.notify.warning('returning from setTaunt, not", "self.notify.debug('----- outOfPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence) taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice')) def __howToGetPies(self, task): self.notify.debug('-----", "def enterBattleOne(self): self.notify.debug('----- LawbotBoss.enterBattleOne ') DistributedBossCog.DistributedBossCog.enterBattleOne(self) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.clearChat() self.loop('Ff_neutral')", "node.setPos(0, 0, 0) gear = gearModel.instanceTo(node) angle = (float(i) /", "for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.loop('neutral')", "Sequence(Func(camera.reparentTo, render), Func(camera.setPos, -15, 15, 20), Func(camera.setHpr, -90, 0, 0),", "= None self.unstickBoss() taskName = 'RecoverBossDamage' taskMgr.remove(taskName) self.battleThreeMusicTime = self.battleThreeMusic.getTime()", "self.uniqueName('PieAdvice')) def __pieSplat(self, toon, pieCode): if pieCode == ToontownGlobals.PieCodeBossInsides: if", "self.notify.debug('----- unloadEnvironment') DistributedBossCog.DistributedBossCog.unloadEnvironment(self) self.geom.removeNode() del self.geom def __loadMopaths(self): self.notify.debug('----- __loadMopaths')", "return def setBossDamage(self, bossDamage, recoverRate, timestamp): recoverStartTime = globalClockDelta.networkToLocalTime(timestamp) self.bossDamage", "self.clearChat() self.releaseToons(finalBattle=1) self.accept('pieSplat', self.__finalPieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.happy", "import Mopath from direct.showutil import Rope from toontown.distributed import DelayDelete", "[] self.lawyerRequest = None self.bossDamage = 0 self.attackCode = None", "pos), Func(toon.setPlayRate, 1, 'walk'), Func(toon.loop, 'neutral'))) return ival def toonsToBattlePosition(self,", "Mopath.Mopath() self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA') self.toonsEnterA.fFaceForward = 1 self.toonsEnterA.timeScale = 35 self.toonsEnterB =", "toon.reparentTo(render) pos, h = points[i] toon.setPosHpr(battleNode, pos[0], pos[1] + 10,", "CFSpeech)), (18, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3, CFSpeech)), (22, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4, CFSpeech)), (24,", "self.ignore('begin-pie') self.ignore('enterDefenseCol') self.ignore('enterProsecutionCol') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) if self.bossDamageMovie: self.bossDamageMovie.finish() self.bossDamageMovie", "* math.cos(angle) h = random.uniform(-720, 720) gearTrack.append(Sequence(Wait(i * rate), Func(node.show),", "1, 7, 3.5) shieldNode = CollisionNode('BossZap') shieldNode.addSolid(shield) shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask)", "0 self.numJurorsLocalToonSeated = 0 self.cannonIndex = -1 return def announceGenerate(self):", "%s' % beamRelPos) self.notify.debug('beamRenderPos = %s' % beamRenderPos) beamBoundsCenter =", "self.reflectedPodium.getY(), self.reflectedPodium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), Parallel(self.podium.posInterval(5.0,", "self.clearChat() self.witnessToon.clearChat() self.releaseToons(finalBattle=1) self.__showWitnessToon() if not self.useCannons: self.toonsToBattlePosition(self.toonsA, self.battleANode) self.toonsToBattlePosition(self.toonsB,", "diffDamage *= 1.0 if diffDamage >= 0: percentDamaged = diffDamage", "in self.chairs.values(): chair.stopCogsFlying() return def enterRollToBattleThree(self): self.notify.debug('----- enterRollToBattleThree') self.reparentTo(render) self.stickBossToFloor()", "def __enterDefenseCol(self, entry): self.notify.debug('__enterDefenseCol') def __enterProsecutionCol(self, entry): self.notify.debug('__enterProsecutionCol') def makeVictoryMovie(self):", "= 0 self.attackCode = None self.attackAvId = 0 self.recoverRate =", "self.scaleNodePath.find('**/DefenseHighCol') self.defenseHighCol.stash() self.defenseHighCol.setCollideMask(newBitMask) self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision') self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col') self.defenseLocator.hide()", "defenseTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6) defenseTube.setTangible(1)", "attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr == '': self.notify.warning('Node %s has", "self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 1.0, 0, 0, 1.0)", "self.witnessToon = npc self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr) def __cleanupWitnessToon(self): self.__hideWitnessToon() if self.witnessToon: self.witnessToon.removeActive()", "model.findAllMatches('**/+CollisionNode') if not collList: collList = [model] for cnp in", "base.localAvatar.orbitalCamera.start() self.clearChat() self.witnessToon.clearChat() self.reparentTo(render) self.happy = 1 self.raised = 1", "newCogSuitLevel in ToontownGlobals.CogSuitHPLevels: speech += TTLocalizer.WitnessToonHPBoost else: speech += TTLocalizer.WitnessToonMaxed", "%s' % pos) self.notify.debug('walkToonsToBattlePosition: final pos = %s' % pos)", "camera.setPosHpr(0, 30, 8, 180, 0, 0) def exitElevator(self): self.notify.debug('----- exitElevator')", "Func(toon.suit.loop, 'neutral')) track.append(ival) delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion')) def __walkSuitToPoint(self, node, fromPos, toPos):", "ToontownTimer.ToontownTimer() self.bonusTimer.posInTopRightCorner() self.bonusTimer.show() self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer) def setAttackCode(self, attackCode, avId =", "duration=3, startTime=3, endTime=0), ActorInterval(self, 'Ff_neutral', duration=2), ActorInterval(self, 'Ff_speech', duration=7, loop=1))", "self.chairs.values(): if chair.state == 'ToonJuror': if chair.toonJurorIndex == cannonIndex: retVal", "5.0), Point3(4.0, 2.0, 5.0), Point3(4.0, 2.0, 0.5), Point3(-4.0, 2.0, 0.5))", "if OneBossCog != None: self.notify.warning('Multiple BossCogs visible.') OneBossCog = self", "== localAvatar: self.d_hitBossInsides() self.flashRed() elif pieCode == ToontownGlobals.PieCodeBossCog: if toon", "self.elevatorEntrance.getChildren().detach() self.elevatorEntrance.setScale(1) elevatorModel = loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator') elevatorModel.reparentTo(self.elevatorEntrance) self.setupElevator(elevatorModel) self.promotionMusic = base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg')", "def getChairParent(self): return self.juryBox def startJuryBoxMoving(self): if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval", "self.targetNodePath = self.pelvis.attachNewNode(targetNode) self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog)) shield = CollisionTube(0, 1, 4,", "self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() camera.reparentTo(self.elevatorModel) camera.setPosHpr(0, 30, 8,", "= None for plane in planes: if lastPlane == None", "self.notify.debug('----- exitNearVictory') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.setDizzy(0) self.battleThreeMusicTime", "__gotLawyers(self, lawyers): self.lawyerRequest = None self.lawyers = lawyers for i", "vertexWriter.addData3f(x2, y1, z2) vertexWriter.addData3f(x1, y2, z2) vertexWriter.addData3f(x2, y2, z2) for", "self.notify.debug('----- d_hitBoss') self.sendUpdate('hitBoss', [bossDamage]) def d_healBoss(self, bossHeal): self.notify.debug('----- d_bossHeal') self.sendUpdate('healBoss',", "20), Func(camera.setHpr, -90, 0, 0), Wait(3), Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0,", "not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() camera.reparentTo(self.elevatorModel) camera.setPosHpr(0, 30, 8, 180, 0, 0)", "intervalName) self.accept('doneChatPage', self.__doneEpilogue) base.playMusic(self.epilogueMusic, looping=1, volume=0.9) def __doneEpilogue(self, elapsedTime =", "FSM.FSM.__init__(self, 'DistributedLawbotBoss') self.lawyers = [] self.lawyerRequest = None self.bossDamage =", "TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty) if juryWeightBonus: weightBonusText = juryWeightBonus % (self.numJurorsLocalToonSeated, self.bonusWeight) trialSpeech", "toontown.toon import Toon from toontown.battle import BattleBase from direct.directutil import", "0 self.recoverStartTime = 0 self.bossDamageMovie = None self.everThrownPie = 0", "toonId in self.involvedToons: if index in self.cannons: cannon = self.cannons[index]", "self.useProgrammerScale: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) else: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) def stashBaseCol(self): if not", "self.accept('pieSplat', self.__pieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) self.accept('begin-pie', self.__foundPieButton) self.accept('enterDefenseCol', self.__enterDefenseCol)", "= seatCenter.getPos() self.notify.debug('center = %s' % center) self.witnessToon.setPos(center) self.witnessToon.setH(180) self.witnessToon.setZ(self.witnessToon.getZ()", "math.pi / 180.0 if direction == 1: spread = -spread", "chatString, CFSpeech)) seq.append(Wait(4.0)) seq.append(Func(self.clearChat)) oldSeq = self.activeIntervals.get(intervalName) if oldSeq: oldSeq.finish()", "= self.scaleNodePath.find('**/scaleBeam') self.defensePanNodePath = self.scaleNodePath.find('**/defensePan') self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan') self.defenseColNodePath =", "def hideBonusTimer(self): if self.bonusTimer: self.bonusTimer.hide() def enteredBonusState(self): self.witnessToon.clearChat() text =", "-90, 35), startHpr=Point3(-10, -13, 0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1,", "self.baseColStashed: self.notify.debug('unstashBaseCol') self.baseTopCol.unstash() self.baseSideCol.unstash() self.baseColStashed = False def makeScaleReflectDamage(self): diffDamage", "DistributedBossCog.DistributedBossCog.loadEnvironment(self) self.geom = loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3') self.geom.setPos(0, 0, -71.601) self.geom.setScale(1) self.elevatorEntrance =", "Parallel() bossAnimTrack = Sequence( ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1), ActorInterval(self,", "self.beamNodePath.setPos(0, 0, 3) self.beamNodePath.reparentTo(self.scaleNodePath) defensePanGeom = self.createBlock(0.5, 0.5, 0, -0.5,", "from toontown.distributed import DelayDelete from toontown.battle import MovieToonVictory from toontown.building", "global OneBossCog self.notify.debug('----- disable') DistributedBossCog.DistributedBossCog.disable(self) self.request('Off') self.unloadEnvironment() self.__cleanupWitnessToon() self.__unloadMopaths() self.__clearOnscreenMessage()", "CollisionNode('BossZap') targetNode.addSolid(target) targetNode.setCollideMask(ToontownGlobals.PieBitmask) self.targetNodePath = self.pelvis.attachNewNode(targetNode) self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog)) shield =", "self.notify.debug('----- exitVictory') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def", "str(ToontownGlobals.PieCodeBossCog)) shield = CollisionTube(0, 1, 4, 0, 1, 7, 3.5)", "toon == localAvatar: self.d_hitBoss(1) if self.dizzy: self.flashRed() self.doAnimate('hit', now=1) elif", "= TTLocalizer.LawbotBossTaunts[tauntIndex] % toon.getName() else: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] self.saySomething(chatString) def", "= self.juryBox.getPos() endingAbsPos = Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1],", "topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosA) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosA) battlePos", "% beamPos) self.notify.debug('beamRelPos = %s' % beamRelPos) self.notify.debug('beamRenderPos = %s'", "looping=1, volume=0.9) taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss') def __onToPrepareBattleTwo(self): self.notify.debug('----- __onToPrepareBattleTwo') self.unstickBoss()", "endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0)) self.juryBoxIval.start() self.juryTimer", "exitReward') intervalName = 'RewardMovie' self.clearInterval(intervalName) self.unstash() self.rewardPanel.destroy() del self.rewardPanel self.battleThreeMusicTime", "in planes: if lastPlane == None or plane.compareTo(lastPlane, threshold) !=", "i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: pos, h", "Point3(4.0, 2.0, 5.0), Point3(4.0, 2.0, 0.5), Point3(-4.0, 2.0, 0.5)) insidesBNode", "= self.reflectedPodium.getZ() if not self.debugPositions: self.reflectedPodium.setZ(reflectedZ) if not self.reflectedPodium.isEmpty(): if", "b, a) texWriter.addData2f(1.0, 1.0) tris = GeomTriangles(Geom.UHDynamic) tris.addVertex(0) tris.addVertex(1) tris.addVertex(2)", "NametagGlobals.setMasterArrowsOn(1) bossDoneEventName = self.uniqueName('DestroyedBoss') self.ignore(bossDoneEventName) taskMgr.remove(self.uniqueName('StandUp')) self.ignore('enterWitnessStand') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies')", "= self.geom.find('**/CR3_Floor') self.evFloor = self.replaceCollisionPolysWithPlanes(floor) self.evFloor.reparentTo(self.geom) self.evFloor.setName('floor') plane = CollisionPlane(Plane(Vec3(0,", "allowGroupShot=0, uberList=self.uberList, noSkip=True) ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward)) intervalName =", "= self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision') colNode.setName('WitnessStand') def loadScale(self): self.useProgrammerScale = base.config.GetBool('want-injustice-scale-debug', 0) if", "= Parallel() bossAnimTrack = Sequence( ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1),", "(0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0, CFSpeech)), (5.6, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1, CFSpeech)), (12, Func(self.setChatAbsolute,", "self.standNodePath.reparentTo(self.scaleNodePath) self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.scaleNodePath.setScale(5.0) self.scaleNodePath.wrtReparentTo(self.geom) self.setScaleTilt(startingTilt) def setScaleTilt(self, tilt): self.beamNodePath.setP(tilt) if", "def enterBattleThree(self): DistributedBossCog.DistributedBossCog.enterBattleThree(self) self.scaleNodePath.unstash() localAvatar.setPos(-3, 0, 0) base.localAvatar.orbitalCamera.start() self.clearChat() self.witnessToon.clearChat()", "%f' % rollTrackDuration) doorStartPos = self.door3.getPos() doorEndPos = Point3(doorStartPos[0], doorStartPos[1],", "defenseLocPos = defenseLocBounds.getCenter() self.notify.debug('defenseLocatorPos = %s' % defenseLocPos) self.defensePanNodePath.setPos(defenseLocPos) self.defensePanNodePath.reparentTo(self.beamNodePath)", "10, 0))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1, CFSpeech)), (5.5, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2, CFSpeech)),", "Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosA) battlePos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battleHpr", "self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterReward(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash()", "= True elif not self.state == 'BattleThree': self.notify.warning('returning from setTaunt,", "-0.5, -2, 0, 0, 1.0, 0.25) self.defensePanNodePath = NodePath('defensePan') self.defensePanNodePath.attachNewNode(defensePanGeom)", "movie = Sequence(Func(camera.reparentTo, render), Func(camera.setPos, -15, 15, 20), Func(camera.setHpr, -90,", "self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode) self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) prosecutionPanGeom = self.createBlock(0.5, 0.5, 0,", "[Func(self.show), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0)] seq.append(Func(self.setChatAbsolute,", "self.notify.debug('defenseLocatorPos = %s' % defenseLocPos) self.defensePanNodePath.setPos(defenseLocPos) self.defensePanNodePath.reparentTo(self.beamNodePath) self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos())", "3.5) targetNode = CollisionNode('BossZap') targetNode.addSolid(target) targetNode.setCollideMask(ToontownGlobals.PieBitmask) self.targetNodePath = self.pelvis.attachNewNode(targetNode) self.targetNodePath.setTag('pieCode',", "self.battleThreeMusic.getTime() self.battleThreeMusic.stop() return def enterNearVictory(self): self.cleanupIntervals() self.reparentTo(render) self.setPos(*ToontownGlobals.LawbotBossDeathPos) self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr) self.clearChat()", "planeNode = CollisionNode('dropPlane') planeNode.addSolid(plane) planeNode.setCollideMask(ToontownGlobals.PieBitmask) self.geom.attachNewNode(planeNode) self.door3 = self.geom.find('**/SlidingDoor1/') if", "__touchedWitnessStand(self, entry): self.sendUpdate('touchWitnessStand', []) self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) base.playSfx(self.piesRestockSfx) if not self.everThrownPie:", "CFSpeech))) return bossTrack def __makeWitnessToon(self): dnaNetString = 't\\x1b\\x00\\x01\\x01\\x00\\x03\\x00\\x03\\x01\\x10\\x13\\x00\\x13\\x13' npc =", "if isinstance(solid, CollisionPolygon): plane = Plane(solid.getPlane()) planes.append(plane) else: self.notify.warning('Unexpected collision", "self.clearInterval(intervalName) self.cleanupBattles() self.battleTwoMusic.stop() localAvatar.inventory.setBattleCreditMultiplier(1) if self.juryTimer: self.juryTimer.destroy() del self.juryTimer self.juryTimer", "volume=0.9) def __doneEpilogue(self, elapsedTime = 0): self.notify.debug('----- __doneEpilogue') intervalName =", "self.notify.debug('__doWitnessPrepareBattleThreeChat: original self.numToonJurorsSeated = %d' % self.numToonJurorsSeated) self.countToonJurors() self.notify.debug('after calling", "myFromPos[1] + 30, myFromPos[2]) rollThroughDoor = self.rollBossToPoint(fromPos=myFromPos, fromHpr=None, toPos=myToPos, toHpr=None,", "= Parallel() numGears = int(4 + 6 * t +", "self.notify.debug('__enterProsecutionCol') def makeVictoryMovie(self): myFromPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) myToPos =", "None: pass return def exitBattleOne(self): self.notify.debug('----- exitBattleOne') DistributedBossCog.DistributedBossCog.exitBattleOne(self) def stashBoss(self):", "= None for chair in self.chairs.values(): chair.stopCogsFlying() return def enterRollToBattleThree(self):", "Func(camera.setPos, -15, 15, 20), Func(camera.setHpr, -90, 0, 0), Wait(3), Func(camera.reparentTo,", "self.storeInterval(seq, intervalName) def saySomething(self, chatString): intervalName = 'ChiefJusticeTaunt' seq =", "* from direct.showbase.PythonUtil import Functor from direct.showbase.PythonUtil import StackTrace from", "now = globalClock.getFrameTime() elapsed = now - self.recoverStartTime return max(self.bossDamage", "None self.bonusTimer = None self.warningSfx = None self.juryMovesSfx = None", "self.toonsEnterA.timeScale = 35 self.toonsEnterB = Mopath.Mopath() self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB') self.toonsEnterB.fFaceForward = 1", "from toontown.toonbase import ToontownGlobals from toontown.toonbase import ToontownBattleGlobals import DistributedBossCog", "now=1) elif pieCode == ToontownGlobals.PieCodeDefensePan: self.flashRed() self.flashPanBlue() base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath, volume=0.25)", "'': self.notify.warning('Node %s has no attackCode tag.' % repr(entry.getIntoNodePath())) return", "makeIntroductionMovie(self, delayDeletes): self.notify.debug('----- makeIntroductionMovie') for toonId in self.involvedToons: toon =", "self.everThrownPie: taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice')) def __pieSplat(self, toon, pieCode): if pieCode", "intervalName = 'RollToBattleThree' seq = Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName) seq.start() self.storeInterval(seq,", "localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)] if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel: speech += TTLocalizer.WitnessToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel", "from toontown.coghq import CogDisguiseGlobals from toontown.building import ElevatorConstants from toontown.toonbase", "ToontownGlobals.LawbotBossWinningTilt else: percentDamaged = diffDamage / (ToontownGlobals.LawbotBossInitialDamage - 0) tilt", "base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg') floor = self.geom.find('**/MidVaultFloor1') if floor.isEmpty(): floor = self.geom.find('**/CR3_Floor') self.evFloor", "cr) FSM.FSM.__init__(self, 'DistributedLawbotBoss') self.lawyers = [] self.lawyerRequest = None self.bossDamage", "35), startHpr=Point3(-10, -13, 0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1, CFSpeech),", "None return def delete(self): self.notify.debug('----- delete') DistributedBossCog.DistributedBossCog.delete(self) def d_hitBoss(self, bossDamage):", "self.doorA else: gearRoot.setPos(0, 7, 3) door = self.doorB gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack))", "self.reflectedJuryBox.setPos(newReflectedPos) if not self.reflectedJuryBox.isEmpty(): if self.debugPositions: self.reflectedJuryBox.show() self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])", "= Mopath.Mopath() self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB') self.toonsEnterB.fFaceForward = 1 self.toonsEnterB.timeScale = 35 def", "h = points[i] if i > 3: pos.setY(pos.getY() + 2.0)", "seq = Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.battleThreeMusic, looping=1,", "makeEndOfBattleMovie(self, hasLocalToon): name = self.uniqueName('Drop') seq = Sequence(name=name) seq +=", "= self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterReward(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate()", "render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog)) insidesA = CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0, -2.0, 5.0),", "getChairParent(self): return self.juryBox def startJuryBoxMoving(self): if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval =", "if floor.isEmpty(): floor = self.geom.find('**/CR3_Floor') self.evFloor = self.replaceCollisionPolysWithPlanes(floor) self.evFloor.reparentTo(self.geom) self.evFloor.setName('floor')", "720) gearTrack.append(Sequence(Wait(i * rate), Func(node.show), Parallel(node.posInterval(1, Point3(x, y, 0), fluid=1),", "plane = CollisionPlane(Plane(Vec3(0, 0, 1), Point3(0, 0, -50))) planeNode =", "NodePath('scaleStand') self.standNodePath.attachNewNode(standGeom) self.standNodePath.reparentTo(self.scaleNodePath) self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.scaleNodePath.setScale(5.0) self.scaleNodePath.wrtReparentTo(self.geom) self.setScaleTilt(startingTilt) def setScaleTilt(self, tilt):", "name=self.uniqueName('BattleTwoMovie')) def toNeutralMode(self): if self.cr: place = self.cr.playGame.getPlace() if place", "2), Func(camera.setHpr, 180, 10, 0), Func(self.witnessToon.setLocalPageChat, chatString, 0)) return movie", "if self.panFlashInterval: self.panFlashInterval.finish() self.panFlashInterval = None return def flashPanBlue(self): self.cleanupPanFlash()", "base.playSfx(self.toonUpSfx, node=toon) def hideBonusTimer(self): if self.bonusTimer: self.bonusTimer.hide() def enteredBonusState(self): self.witnessToon.clearChat()", "= diffDamage / (ToontownGlobals.LawbotBossInitialDamage - 0) tilt = percentDamaged *", "__positionToonsInFrontOfCannons(self): self.notify.debug('__positionToonsInFrontOfCannons') index = 0 self.involvedToons.sort() for toonId in self.involvedToons:", "beamPos) self.notify.debug('beamRelPos = %s' % beamRelPos) self.notify.debug('beamRenderPos = %s' %", "toon.wrtReparentTo(render) ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1,", "self.unstash() self.rewardPanel.destroy() del self.rewardPanel self.battleThreeMusicTime = 0 self.battleThreeMusic.stop() def enterEpilogue(self):", "self.notify.debug('----- d_hitProsecutionPan') self.sendUpdate('hitProsecutionPan', []) def d_hitToon(self, toonId): self.notify.debug('----- d_hitToon') self.sendUpdate('hitToon',", "!= None: self.notify.warning('Multiple BossCogs visible.') OneBossCog = self return def", "taskMgr.add(self.__recoverBossDamage, taskName) self.makeScaleReflectDamage() self.bossHealthBar.update(self.bossMaxDamage - bossDamage, self.bossMaxDamage) def getBossDamage(self): self.notify.debug('-----", "vertexWriter.addData3f(x2, y2, z2) for index in xrange(8): normalWriter.addData3f(1.0, 1.0, 1.0)", "track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0) self.makeToonsWait() finalPodiumPos", "= base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.betweenBattleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg') self.battleTwoMusic = base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg') floor =", "self.notify.debug('----- enterWaitForToons') DistributedBossCog.DistributedBossCog.enterWaitForToons(self) self.geom.hide() self.witnessToon.removeActive() def exitWaitForToons(self): self.notify.debug('----- exitWaitForToons') DistributedBossCog.DistributedBossCog.exitWaitForToons(self)", "panRenderPos) prosecutionLocatorPos = self.prosecutionLocator.getPos() prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin) locatorRenderPos =", "pieCode == ToontownGlobals.PieCodeBossInsides: if toon == localAvatar: self.d_hitBossInsides() self.flashRed() elif", "int(names[1]) for lawyer in self.lawyers: if lawyerDoId == lawyer.doId: lawyer.sendUpdate('hitByToon',", "setScaleTilt(self, tilt): self.beamNodePath.setP(tilt) if self.useProgrammerScale: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) else: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt)", "toon.reparentTo(cannon.nodePath) toon.setPos(0, 8, 0) toon.setH(180) renderPos = toon.getPos(render) self.notify.debug('renderPos =%s'", "% points[0][0]) for i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if", "tauntIndex, extraInfo): gotError = False if not hasattr(self, 'state'): self.notify.warning('returning", "not hasattr(self, 'nametag'): self.notify.warning('returning from setTaunt, no attr nametag') gotError", "self.cannons: cannon = self.cannons[index] cannonSeq = cannon.generateCannonAppearTrack(toon) multiCannons.append(cannonSeq) index +=", "ival = Parallel() points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] self.notify.debug('walkToonsToBattlePosition: points", "epSpeech = self.__talkAboutPromotion(epSpeech) bossTrack = Sequence(Func(self.witnessToon.animFSM.request, 'neutral'), Func(self.witnessToon.setLocalPageChat, epSpeech, 0))", "self.setToonsToNeutral(self.involvedToons) self.happy = 1 self.raised = 1 self.forward = 1", "enterPrepareBattleThree') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) self.__showWitnessToon()", "self.uniqueName('reward') self.rewardPanel = RewardPanel.RewardPanel(panelName) victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons,", "[model] for cnp in collList: cn = cnp.node() if not", "diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: newWeight, self.bonusWeight, self.numJurorsLocalToonSeated = self.calculateWeightOfToon(base.localAvatar.doId)", "= False self.numToonJurorsSeated = 0 self.mainDoor = None self.reflectedMainDoor =", "0) tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt self.setScaleTilt(tilt) if self.bossDamage <", "str) if not stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.mainDoor) else:", "ToontownGlobals.CameraBitmask) shieldNodePath = self.pelvis.attachNewNode(shieldNode) disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide') disk.find('**/+CollisionNode').setName('BossZap') disk.reparentTo(self.pelvis) disk.setZ(0.8)", "0 self.forward = 1 self.doAnimate() self.setDizzy(1) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)", "'Ff_speech', duration=7, loop=1)) track.append(bossAnimTrack) attackToons = TTLocalizer.BossCogAttackToons dialogTrack = Track(", "= bossTrack.getDuration() return bossTrack def __showOnscreenMessage(self, text): self.notify.debug('----- __showOnscreenmessage') if", "0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos,", "tris.addVertex(6) tris.closePrimitive() tris.addVertex(0) tris.addVertex(2) tris.addVertex(4) tris.closePrimitive() tris.addVertex(2) tris.addVertex(6) tris.addVertex(4) tris.closePrimitive()", "else: juryResult = TTLocalizer.WitnessToonSomeJurors % self.numToonJurorsSeated juryResult += '\\x07' trialSpeech", "if not self.reflectedPodium.isEmpty(): if self.debugPositions: self.reflectedPodium.show() def loadCannons(self): pass def", "self.cannonIndex = -1 return def announceGenerate(self): global OneBossCog self.notify.debug('----- announceGenerate')", "else: self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie) if self.recoverRate: taskMgr.add(self.__recoverBossDamage, taskName) self.makeScaleReflectDamage() self.bossHealthBar.update(self.bossMaxDamage", "= None self.juryBox.setPos(-30, 0, -12.645) self.reflectedJuryBox.setPos(-30, 0, 0) curPos =", "if gotError: st = StackTrace() print st return chatString =", "SuitDNA.getDeptFullname(self.style.dept)} self.setDisplayName(nameInfo) self.piesRestockSfx = loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg') self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg') self.evidenceHitSfx =", "self.notify.debug('----- makeIntroductionMovie') for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if", "'Ff_speech'))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins, CFSpeech))) return bossTrack def __makeWitnessToon(self): dnaNetString", "reflectedJuryBoxPos = self.reflectedJuryBox.getPos() newReflectedPos = reflectedJuryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not", "self.juryBoxIval = None if self.juryBox: self.juryBox.removeNode() return def doStrafe(self, side,", "collList: cn = cnp.node() if not isinstance(cn, CollisionNode): self.notify.warning('Not a", "if not cannonIndex == None and cannonIndex >= 0: diffSettings", "import Functor from direct.showbase.PythonUtil import StackTrace from direct.gui.DirectGui import *", "-1.5, 0.6) defenseTube.setTangible(1) defenseCollNode = CollisionNode('DefenseCol') defenseCollNode.addSolid(defenseTube) self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode)", "self.witnessToon.clearChat() def enterWaitForToons(self): self.notify.debug('----- enterWaitForToons') DistributedBossCog.DistributedBossCog.enterWaitForToons(self) self.geom.hide() self.witnessToon.removeActive() def exitWaitForToons(self):", "toon.stopSmooth() if self.hasLocalToon(): self.toMovieMode() for toonId in self.involvedToons: toon =", "if toon: if index in self.cannons: cannon = self.cannons[index] cannonSeq", "elapsedTime = 0): self.notify.debug('----- __onToBattleTwo') self.doneBarrier('PrepareBattleTwo') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def", "str(ToontownGlobals.PieCodeDefensePan)) self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol') self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) self.standNodePath = self.scaleNodePath.find('**/scaleStand') self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr)", "+ 6 * t + 0.5) time = 5.0 -", "1.0) self.prosecutionPanNodePath = NodePath('prosecutionPan') self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom) self.prosecutionPanNodePath.setPos(0, 2, 0) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) prosecutionTube", "= TTLocalizer.WitnessToonOneJuror elif self.numToonJurorsSeated == 12: juryResult = TTLocalizer.WitnessToonAllJurors else:", "DistributedBossCog.DistributedBossCog.disable(self) self.request('Off') self.unloadEnvironment() self.__cleanupWitnessToon() self.__unloadMopaths() self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) self.__cleanupStrafe() self.__cleanupJuryBox() render.clearTag('pieCode')", "if self.debugPositions: self.reflectedJuryBox.show() self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) def loadPodium(self): self.podium =", "def makeScaleReflectDamage(self): diffDamage = self.bossDamage - ToontownGlobals.LawbotBossInitialDamage diffDamage *= 1.0", "FSM.FSM): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss') debugPositions = False def __init__(self, cr):", "= self.uniqueName('Drop') self.clearInterval(intervalName) self.cleanupBattles() self.battleTwoMusic.stop() localAvatar.inventory.setBattleCreditMultiplier(1) if self.juryTimer: self.juryTimer.destroy() del", "looping=1, volume=0.9, time=self.battleThreeMusicTime) def __doneReward(self): self.notify.debug('----- __doneReward') self.doneBarrier('Reward') self.toWalkMode() def", "self.notify.debug('prosecutionLocatorRelPos = %s ' % prosecutionLocatorRelPos) self.notify.debug('locatorRenderPos = %s' %", "dist * math.cos(angle) h = random.uniform(-720, 720) gearTrack.append(Sequence(Wait(i * rate),", "None class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss') debugPositions = False", "retVal def calculateWeightOfToon(self, toonId): defaultWeight = 1 bonusWeight = 0", "name=self.uniqueName('BattleTwoMovie')) def __makeRollToBattleThreeMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battlePos =", "cn.getIntoCollideMask() for i in xrange(cn.getNumSolids()): solid = cn.getSolid(i) if isinstance(solid,", "for index in xrange(8): normalWriter.addData3f(1.0, 1.0, 1.0) colorWriter.addData4f(r, g, b,", "weight of %d' % (toonId, newWeight)) return (newWeight, bonusWeight, numJurors)", "not self.state == 'BattleThree': self.notify.warning('returning from setTaunt, not in battle", "if juryWeightBonus: weightBonusText = juryWeightBonus % (self.numJurorsLocalToonSeated, self.bonusWeight) trialSpeech +=", "self.bossDamageMovie = None self.unstickBoss() taskName = 'RecoverBossDamage' taskMgr.remove(taskName) self.battleThreeMusicTime =", "enterPrepareBattleThree(self): self.notify.debug('----- enterPrepareBattleThree') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) base.playMusic(self.betweenBattleMusic, looping=1,", "self.notify.debug('----- exitBattleThree') DistributedBossCog.DistributedBossCog.exitBattleThree(self) NametagGlobals.setMasterArrowsOn(1) bossDoneEventName = self.uniqueName('DestroyedBoss') self.ignore(bossDoneEventName) taskMgr.remove(self.uniqueName('StandUp')) self.ignore('enterWitnessStand')", "% repr(entry.getIntoNodePath())) return attackCode = int(attackCodeStr) into = entry.getIntoNodePath() self.zapLocalToon(attackCode,", "'color') texWriter = GeomVertexWriter(myVertexData, 'texcoord') vertexWriter.addData3f(x1, y1, z1) vertexWriter.addData3f(x2, y1,", "25), Func(camera.setHpr, 0, 10, 0))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1, CFSpeech)), (5.5,", "= base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg') floor = self.geom.find('**/MidVaultFloor1') if floor.isEmpty(): floor = self.geom.find('**/CR3_Floor')", "intervalName = 'DefeatMovie' seq = Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName) seq.start() self.storeInterval(seq,", "self.recoverStartTime = 0 self.bossDamageMovie = None self.everThrownPie = 0 self.battleThreeMusicTime", "'DefeatMovie' seq = Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.battleThreeMusic,", "if not collList: collList = [model] for cnp in collList:", "node, fromPos, toPos): self.notify.debug('----- __walkSuitToPoint') vector = Vec3(toPos - fromPos)", "% ToontownGlobals.LawbotBossJurorsForBalancedScale movie = Sequence(Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2),", "or chair.state == None and chair.newState == 'ToonJuror': self.numToonJurorsSeated +=", "= Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5]) bossTrack", "def enterOff(self): self.notify.debug('----- enterOff') DistributedBossCog.DistributedBossCog.enterOff(self) if self.witnessToon: self.witnessToon.clearChat() def enterWaitForToons(self):", "seq += [Wait(0.0)] if hasLocalToon: seq += [Func(self.show), Func(camera.reparentTo, localAvatar),", "prosecutionLocPos) self.prosecutionPanNodePath.setPos(prosecutionLocPos) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) self.beamLocator = self.scaleNodePath.find('**/StandLocator1') beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos", "track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0) bossTrack.append(track) track,", "/ numGears for i in xrange(numGears): node = gearRoot.attachNewNode(str(i)) node.hide()", "(ToontownGlobals.MaxCogSuitLevel + 1) if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels: speech += TTLocalizer.WitnessToonHPBoost", "= lawyers for i in xrange(len(self.lawyers)): suit = self.lawyers[i] suit.fsm.request('neutral')", "= Point3(*ToontownGlobals.LawbotBossP3PosA) battlePos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3],", "100.0 gearTrack = Parallel() numGears = int(4 + 6 *", "intervalName) def saySomething(self, chatString): intervalName = 'ChiefJusticeTaunt' seq = Sequence(name=intervalName)", "seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam, CFSpeech)) seq.append(Wait(5.0)) seq.append(Func(self.clearChat)) return seq def __makeBossDamageMovie(self): self.notify.debug('----", "CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0, 2.0, 5.0), Point3(4.0, 2.0, 0.5), Point3(-4.0,", "points[i] if i > 3: pos.setY(pos.getY() + 2.0) bnParent =", "bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) toon.setPosHpr(battleNode, pos[0],", "= self.beamNodePath.getPos() beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath, origin) beamRenderPos = render.getRelativePoint(self.beamNodePath, origin)", "* 0.85: self.unstashBaseCol() else: self.stashBaseCol() def unloadEnvironment(self): self.notify.debug('----- unloadEnvironment') DistributedBossCog.DistributedBossCog.unloadEnvironment(self)", "0) self.makeToonsWait() finalPodiumPos = Point3(self.podium.getX(), self.podium.getY(), self.podium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) finalReflectedPodiumPos", "self.notify.debug('batlleNode.getTransform = %s render.getTransform=%s' % (battleNode.getTransform(), render.getTransform())) self.notify.debug('render.getScale()=%s battleNode.getScale()=%s' %", "self.notify.warning('Toon %s has no avatarDoId tag.' % repr(entry.getIntoNodePath())) return doId", "__showOnscreenMessage(self, text): self.notify.debug('----- __showOnscreenmessage') if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None", "CFSpeech)), (5.5, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2, CFSpeech)), (9.5, Sequence(Func(camera.wrtReparentTo, render))), (9.6, Parallel(", "Point3(4.0, 2.0, 0.5), Point3(-4.0, 2.0, 0.5)) insidesBNode = CollisionNode('BossZap') insidesBNode.addSolid(insidesB)", "'LawbotBoss.__walkToonToPromotion')) def __walkSuitToPoint(self, node, fromPos, toPos): self.notify.debug('----- __walkSuitToPoint') vector =", "Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwoMovie')) def toNeutralMode(self): if self.cr: place = self.cr.playGame.getPlace()", "origin) beamRenderPos = render.getRelativePoint(self.beamNodePath, origin) self.notify.debug('beamPos = %s' % beamPos)", "= 1 self.forward = 1 self.doAnimate() self.accept('enterWitnessStand', self.__touchedWitnessStand) self.accept('pieSplat', self.__pieSplat)", "self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) if self.bossDamageMovie: self.bossDamageMovie.finish() self.bossDamageMovie = None self.unstickBoss()", "0.5) time = 5.0 - 4.0 * t spread =", "chair.newState == 'ToonJuror': self.numToonJurorsSeated += 1 self.notify.debug('self.numToonJurorsSeated = %d' %", "0) base.localAvatar.orbitalCamera.start() self.clearChat() self.witnessToon.clearChat() self.reparentTo(render) self.happy = 1 self.raised =", "Toon.Toon() npc.setDNAString(dnaNetString) npc.setName(TTLocalizer.WitnessToonName) npc.setPickable(0) npc.setPlayerType(NametagGroup.CCNonPlayer) npc.animFSM.request('Sit') self.witnessToon = npc self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr)", "chair in self.chairs.values(): chair.stopCogsFlying() return def enterRollToBattleThree(self): self.notify.debug('----- enterRollToBattleThree') self.reparentTo(render)", "= entry.getIntoNodePath() names = lawyerCol.getName().split('-') lawyerDoId = int(names[1]) for lawyer", "import State from direct.directnotify import DirectNotifyGlobal from toontown.toonbase import ToontownGlobals", "toon = self.cr.doId2do.get(toonId) if toon: toon.loop('neutral') def makeEndOfBattleMovie(self, hasLocalToon): name", "__hideWitnessToon(self): if self.witnessToonOnstage: self.witnessToon.removeActive() self.witnessToon.detachNode() self.witnessToonOnstage = 0 def __hideToons(self):", "TTLocalizer.LawbotBossTrialChat1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0),", "self.notify.debug('----- __onToPrepareBattleThree') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.doneBarrier('RollToBattleThree') def exitRollToBattleThree(self): self.notify.debug('----- exitRollToBattleThree') self.unstickBoss()", "def __makeWitnessToon(self): dnaNetString = 't\\x1b\\x00\\x01\\x01\\x00\\x03\\x00\\x03\\x01\\x10\\x13\\x00\\x13\\x13' npc = Toon.Toon() npc.setDNAString(dnaNetString) npc.setName(TTLocalizer.WitnessToonName)", "self.strafeInterval.finish() self.strafeInterval = None return def __cleanupJuryBox(self): self.notify.debug('----- __cleanupJuryBox') if", "threshold = 0.1 planes.sort(lambda p1, p2: p1.compareTo(p2, threshold)) lastPlane =", "if not self.reflectedMainDoor.isEmpty(): itemsToHide = ['Reflections/Door_1'] for str in itemsToHide:", "%s' % stuffToHide) self.geom.reparentTo(render) self.loadWitnessStand() self.loadScale() self.scaleNodePath.stash() self.loadJuryBox() self.loadPodium() ug", "self.__lawyerGotHit(entry) if pieCode != ToontownGlobals.PieCodeToon: return avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId') if", "percentDamaged = diffDamage / (ToontownGlobals.LawbotBossInitialDamage - 0) tilt = percentDamaged", "planes: if lastPlane == None or plane.compareTo(lastPlane, threshold) != 0:", "tris.closePrimitive() tris.addVertex(0) tris.addVertex(2) tris.addVertex(4) tris.closePrimitive() tris.addVertex(2) tris.addVertex(6) tris.addVertex(4) tris.closePrimitive() tris.addVertex(1)", "self.cannons[index] cannonSeq = cannon.generateCannonAppearTrack(toon) multiCannons.append(cannonSeq) index += 1 else: self.notify.warning('No", "else: gearRoot.setPos(0, 7, 3) door = self.doorB gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack)) gearModel", "self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors) def loadEnvironment(self): self.notify.debug('----- loadEnvironment') DistributedBossCog.DistributedBossCog.loadEnvironment(self) self.geom = loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3') self.geom.setPos(0,", "self.stickBossToFloor() intervalName = 'RollToBattleThree' seq = Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName) seq.start()", "= 'DefeatMovie' seq = Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName) seq.start() self.storeInterval(seq, intervalName)", "| cn.getIntoCollideMask() for i in xrange(cn.getNumSolids()): solid = cn.getSolid(i) if", "= self.cr.doId2do.get(self.involvedToons[i]) if toon: angle = 90 - 15 *", "Func(toon.loop, 'walk'), toon.posInterval(3, pos), Func(toon.setPlayRate, 1, 'walk'), Func(toon.loop, 'neutral'))) return", "tris.addVertex(1) tris.addVertex(3) tris.addVertex(2) tris.closePrimitive() tris.addVertex(2) tris.addVertex(3) tris.addVertex(6) tris.closePrimitive() tris.addVertex(3) tris.addVertex(7)", "self.sendUpdate('hitToon', [toonId]) def gotToon(self, toon): stateName = self.state if stateName", "None self.panFlashInterval = None self.panDamage = ToontownGlobals.LawbotBossDefensePanDamage if base.config.GetBool('lawbot-boss-cheat', 0):", "toon.stopLookAround() toon.stopSmooth() if self.hasLocalToon(): self.toMovieMode() for toonId in self.involvedToons: toon", "= numJurors - diffSettings[5] if bonusWeight < 0: bonusWeight =", "myCurPos = self.getPos() self.notify.debug('myCurPos = %s' % self.getPos()) self.notify.debug('battleNode.parent() =", "Func(camera.lookAt, localAvatar)) multiCannons = Parallel() index = 0 self.involvedToons.sort() for", "int(attackCodeStr) into = entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def touchedGavelHandle(self, gavel, entry):", "Func(self.__onToPrepareBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) taskMgr.doMethodLater(0.01, self.unstashBoss,", "import * from direct.distributed.ClockDelta import * from direct.showbase.PythonUtil import Functor", "Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5]) bossTrack =", "__toonsToPromotionPosition') points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] for i in xrange(len(toonIds)):", "= 't\\x1b\\x00\\x01\\x01\\x00\\x03\\x00\\x03\\x01\\x10\\x13\\x00\\x13\\x13' npc = Toon.Toon() npc.setDNAString(dnaNetString) npc.setName(TTLocalizer.WitnessToonName) npc.setPickable(0) npc.setPlayerType(NametagGroup.CCNonPlayer) npc.animFSM.request('Sit')", "oldSeq.finish() seq.start() self.storeInterval(seq, intervalName) def setTaunt(self, tauntIndex, extraInfo): gotError =", "0, 0, 0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) track, hpr =", "if self.bossDamageMovie: if self.bossDamage >= self.bossMaxDamage: self.notify.debug('finish the movie then", "self.stopAnimate() self.__hideWitnessToon() DistributedBossCog.DistributedBossCog.enterIntroduction(self) base.playMusic(self.promotionMusic, looping=1, volume=0.9) if not self.mainDoor.isEmpty(): self.mainDoor.stash()", "enterNearVictory(self): self.cleanupIntervals() self.reparentTo(render) self.setPos(*ToontownGlobals.LawbotBossDeathPos) self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr) self.clearChat() self.releaseToons(finalBattle=1) self.accept('pieSplat', self.__finalPieSplat) self.accept('localPieSplat',", "* math.sin(angle) y = dist * math.cos(angle) h = random.uniform(-720,", "tris.addVertex(4) tris.addVertex(6) tris.addVertex(7) tris.closePrimitive() tris.addVertex(7) tris.addVertex(5) tris.addVertex(4) tris.closePrimitive() cubeGeom =", "we have a toon =%d' % (index, toonId)) allCannonsAppear.append(multiCannons) intervalName", "collList: collList = [model] for cnp in collList: cn =", "if toon: toon.reparentTo(render) pos, h = points[i] toon.setPosHpr(battleNode, pos[0], pos[1]", "Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2, CFSpeech)), (9.5, Sequence(Func(camera.wrtReparentTo, render))), (9.6, Parallel( rollTrack, Func(self.setChatAbsolute,", "self.juryTimer.posInTopRightCorner() self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime) def exitBattleTwo(self): self.notify.debug('----- exitBattleTwo') intervalName = self.uniqueName('Drop') self.clearInterval(intervalName)", "return max(self.bossDamage - self.recoverRate * elapsed / 60.0, 0) def", "self.juryBox.getPos() endingAbsPos = Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2]", "toontown.battle import RewardPanel from toontown.toon import NPCToons from direct.task import", "battleNode): self.notify.debug('walkToonsToBattlePosition-----------------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode)) ival = Parallel()", "disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide') disk.find('**/+CollisionNode').setName('BossZap') disk.reparentTo(self.pelvis) disk.setZ(0.8) self.loadEnvironment() self.__makeWitnessToon() self.__loadMopaths() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu()", "if self.useProgrammerScale: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) else: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) def stashBaseCol(self): if", "[] collList = model.findAllMatches('**/+CollisionNode') if not collList: collList = [model]", "self.geom.find('**/interior/CR3_Door') self.mainDoor = self.geom.find('**/Door_1') if not self.mainDoor.isEmpty(): itemsToHide = ['interior/Door_1']", "__arrangeToonsAroundWitnessToon(self): radius = 7 numToons = len(self.involvedToons) center = (numToons", "collList = model.findAllMatches('**/+CollisionNode') if not collList: collList = [model] for", "'\\x07' trialSpeech = juryResult trialSpeech += TTLocalizer.WitnessToonPrepareBattleThree diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]", "def setBattleDifficulty(self, diff): self.notify.debug('battleDifficulty = %d' % diff) self.battleDifficulty =", "self.podium = self.geom.find('**/Podium') newZ = self.podium.getZ() - ToontownGlobals.LawbotBossBattleTwoPosHpr[2] if not", "self.sendUpdate('hitDefensePan', []) def d_hitProsecutionPan(self): self.notify.debug('----- d_hitProsecutionPan') self.sendUpdate('hitProsecutionPan', []) def d_hitToon(self,", "announceGenerate(self): global OneBossCog self.notify.debug('----- announceGenerate') DistributedBossCog.DistributedBossCog.announceGenerate(self) self.setName(TTLocalizer.LawbotBossName) nameInfo = TTLocalizer.BossCogNameWithDept", "beamPos = self.beamNodePath.getPos() beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath, origin) beamRenderPos = render.getRelativePoint(self.beamNodePath,", "__walkSuitToPoint(self, node, fromPos, toPos): self.notify.debug('----- __walkSuitToPoint') vector = Vec3(toPos -", "track = Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone))) self.storeInterval(track, intervalName) track.start() def exitEpilogue(self): self.notify.debug('-----", "0): allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar))", "-9, 12, 6) camera.lookAt(self.witnessToon, 0, 0, 3) intervalName = 'EpilogueMovie'", "ParallelEndTogether(MopathInterval(mopath, toon), toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral')) track.append(ival) delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion'))", "in self.chairs.values(): if chair.state == 'ToonJuror': if chair.toonJurorIndex == cannonIndex:", "__doneReward') self.doneBarrier('Reward') self.toWalkMode() def exitReward(self): self.notify.debug('----- exitReward') intervalName = 'RewardMovie'", "Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) curReflectedPos", "TTLocalizer.LawbotBossTempJury1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0),", "- 4.0 * t spread = 60 * math.pi /", "exitVictory') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterDefeat(self):", "Mopath from direct.showutil import Rope from toontown.distributed import DelayDelete from", "5]) self.notify.debug('toonsToBattlePosition: points = %s' % points[0][0]) for i in", "def d_hitToon(self, toonId): self.notify.debug('----- d_hitToon') self.sendUpdate('hitToon', [toonId]) def gotToon(self, toon):", "= 1 intervalName = 'DefeatMovie' seq = Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName)", "self.scaleNodePath.find('**/DefenseLocator') defenseLocBounds = self.defenseLocator.getBounds() defenseLocPos = defenseLocBounds.getCenter() self.notify.debug('defenseLocatorPos = %s'", "from direct.showbase.PythonUtil import Functor from direct.showbase.PythonUtil import StackTrace from direct.gui.DirectGui", "Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5]) bossTrack =", "GeomNode('cube') cubeGN.addGeom(cubeGeom) return cubeGN def __enterDefenseCol(self, entry): self.notify.debug('__enterDefenseCol') def __enterProsecutionCol(self,", "max(self.bossDamage - self.recoverRate * elapsed / 60.0, 0) def __recoverBossDamage(self,", "(12, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2, CFSpeech)), (18, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3, CFSpeech)), (22, Func(self.setChatAbsolute,", "origPos) self.notify.debug('batlleNode.getTransform = %s render.getTransform=%s' % (battleNode.getTransform(), render.getTransform())) self.notify.debug('render.getScale()=%s battleNode.getScale()=%s'", "StackTrace from direct.gui.DirectGui import * from panda3d.core import * from", "chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] % toon.getName() else: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] self.saySomething(chatString)", "self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision') colNode.setName('WitnessStand') def loadScale(self): self.useProgrammerScale = base.config.GetBool('want-injustice-scale-debug', 0) if self.useProgrammerScale:", "== '': self.notify.warning('Toon %s has no avatarDoId tag.' % repr(entry.getIntoNodePath()))", "from toontown.building import ElevatorUtils from toontown.battle import RewardPanel from toontown.toon", "toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral')) track.append(ival) delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion')) def __walkSuitToPoint(self,", "intervalName = 'PrepareBattleThree' self.clearInterval(intervalName) self.__clearOnscreenMessage() self.betweenBattleMusic.stop() def enterBattleThree(self): DistributedBossCog.DistributedBossCog.enterBattleThree(self) self.scaleNodePath.unstash()", "Point3(0, 90, 20)), ParallelEndTogether(MopathInterval(mopath, toon), toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral'))", "self.notify.debug('----- __onToPrepareBattleTwo') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.doneBarrier('RollToBattleTwo') def exitRollToBattleTwo(self): self.notify.debug('----- exitRollToBattleTwo') self.unstickBoss()", "toHpr=None, reverse=0) rollTrack = Sequence( Func(self.getGeomNode().setH, 180), rollThroughDoor[0], Func(self.getGeomNode().setH, 0))", "relief=None, pos=(0, 0, 0.35), scale=0.1) return def __clearOnscreenMessage(self): if self.onscreenMessage:", "direct.fsm import ClassicFSM from direct.fsm import State from direct.directnotify import", "def __showWaitingMessage(self, task): self.notify.debug('----- __showWaitingMessage') self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors) def loadEnvironment(self): self.notify.debug('----- loadEnvironment')", "angle * math.pi / 180.0 x = math.cos(radians) * radius", "else: self.notify.warning('No cannon %d but we have a toon =%d'", "toon: toon.stopLookAround() toon.stopSmooth() if self.hasLocalToon(): self.toMovieMode() for toonId in self.involvedToons:", "self.storeInterval(seq, intervalName) def __onToBattleTwo(self, elapsedTime = 0): self.notify.debug('----- __onToBattleTwo') self.doneBarrier('PrepareBattleTwo')", "self.arenaSide: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosB) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosB)", "ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos),", "3) intervalName = 'EpilogueMovie' seq = Sequence(self.makeEpilogueMovie(), name=intervalName) seq.start() self.storeInterval(seq,", "self.elevatorType = ElevatorConstants.ELEVATOR_CJ self.gavels = {} self.chairs = {} self.cannons", "1.0 if diffDamage >= 0: percentDamaged = diffDamage / (ToontownGlobals.LawbotBossMaxDamage", "0: diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: numJurors = self.numJurorsSeatedByCannon(cannonIndex) bonusWeight", "= self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 0, 0, 1.0,", "+= TTLocalizer.WitnessToonPrepareBattleThree diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: newWeight, self.bonusWeight, self.numJurorsLocalToonSeated", "None self.panDamage = ToontownGlobals.LawbotBossDefensePanDamage if base.config.GetBool('lawbot-boss-cheat', 0): self.panDamage = 25", "CollisionNode('DefenseCol') defenseCollNode.addSolid(defenseTube) self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode) self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) prosecutionPanGeom = self.createBlock(0.5,", "tris.addVertex(1) tris.addVertex(0) tris.addVertex(5) tris.closePrimitive() tris.addVertex(4) tris.addVertex(6) tris.addVertex(7) tris.closePrimitive() tris.addVertex(7) tris.addVertex(5)", "5.0), Point3(4.0, 2.0, 0.5), Point3(-4.0, 2.0, 0.5)) insidesBNode = CollisionNode('BossZap')", "self.rewardPanel.destroy() del self.rewardPanel self.battleThreeMusicTime = 0 self.battleThreeMusic.stop() def enterEpilogue(self): self.cleanupIntervals()", "fluid=1), node.hprInterval(1, VBase3(h, 0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)),", "1) if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels: speech += TTLocalizer.WitnessToonHPBoost else: speech", "allCallback=self.__gotLawyers) def __gotLawyers(self, lawyers): self.lawyerRequest = None self.lawyers = lawyers", "self.setPos(*ToontownGlobals.LawbotBossDeathPos) self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr) self.clearChat() self.releaseToons(finalBattle=1) self.accept('pieSplat', self.__finalPieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies)", "= GeomVertexWriter(myVertexData, 'normal') colorWriter = GeomVertexWriter(myVertexData, 'color') texWriter = GeomVertexWriter(myVertexData,", "self.axle.attachNewNode(insidesANode) self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesANodePath.stash() insidesB = CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0,", "= True if gotError: st = StackTrace() print st return", "destPos = toon.getPos() self.placeToonInElevator(toon) toon.wrtReparentTo(render) ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1,", "self.defenseLocator.getBounds() defenseLocPos = defenseLocBounds.getCenter() self.notify.debug('defenseLocatorPos = %s' % defenseLocPos) self.defensePanNodePath.setPos(defenseLocPos)", "self.notify.warning('No cannon %d but we have a toon =%d' %", "+ ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) self.juryBoxIval =", "doId != localAvatar.doId: self.d_hitToon(doId) def __lawyerGotHit(self, entry): lawyerCol = entry.getIntoNodePath()", "if self.bonusWeight > 0: if self.bonusWeight == 1: juryWeightBonus =", "self.notify.debug('battleDifficulty = %d' % diff) self.battleDifficulty = diff def toonEnteredCannon(self,", "__doneBattleThree(self): self.notify.debug('----- __doneBattleThree') self.setState('NearVictory') self.unstickBoss() def exitBattleThree(self): self.notify.debug('----- exitBattleThree') DistributedBossCog.DistributedBossCog.exitBattleThree(self)", "1: juryResult = TTLocalizer.WitnessToonOneJuror elif self.numToonJurorsSeated == 12: juryResult =", "self.battleThreeMusic.stop() def enterReward(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() panelName", "self.__makePrepareBattleTwoMovie() intervalName = 'prepareBattleTwo' seq = Sequence(prepareBattleTwoMovie, name=intervalName) seq.start() self.storeInterval(seq,", "(-2.798, -70, 10, 180, 0, 0)))), (27, Sequence( self.toonNormalEyes(self.involvedToons), Func(self.loop,", "self.doAnimate() self.__hideWitnessToon() if not self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash()", "= self.defensePanNodePath.attachNewNode(defenseCollNode) self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) prosecutionPanGeom = self.createBlock(0.5, 0.5, 0, -0.5,", "del self.juryTimer self.juryTimer = None for chair in self.chairs.values(): chair.stopCogsFlying()", "def __lawyerGotHit(self, entry): lawyerCol = entry.getIntoNodePath() names = lawyerCol.getName().split('-') lawyerDoId", "__doneBattleThree') self.setState('NearVictory') self.unstickBoss() def exitBattleThree(self): self.notify.debug('----- exitBattleThree') DistributedBossCog.DistributedBossCog.exitBattleThree(self) NametagGlobals.setMasterArrowsOn(1) bossDoneEventName", "npc.setDNAString(dnaNetString) npc.setName(TTLocalizer.WitnessToonName) npc.setPickable(0) npc.setPlayerType(NametagGroup.CCNonPlayer) npc.animFSM.request('Sit') self.witnessToon = npc self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr) def", "self.defensePanNodePath.attachNewNode(defensePanGeom) self.defensePanNodePath.setPos(0, -2, 0) self.defensePanNodePath.reparentTo(self.beamNodePath) defenseTube = CollisionTube(0, 0, -0.5,", "= loader.loadModel('phase_11/models/lawbotHQ/scale') self.beamNodePath = self.scaleNodePath.find('**/scaleBeam') self.defensePanNodePath = self.scaleNodePath.find('**/defensePan') self.prosecutionPanNodePath =", "bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) self.notify.debug('battle node world pos = %s'", "self: OneBossCog = None return def delete(self): self.notify.debug('----- delete') DistributedBossCog.DistributedBossCog.delete(self)", "if self.bossDamageMovie: self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie) return Task.cont def __walkToonToPromotion(self, toonId,", "-beamLocatorPos self.notify.debug('beamLocatorPos = %s' % beamLocatorPos) self.notify.debug('negBeamLocatorPos = %s' %", "__doneEpilogue(self, elapsedTime = 0): self.notify.debug('----- __doneEpilogue') intervalName = 'EpilogueMovieToonAnim' self.clearInterval(intervalName)", "self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) def __touchedWitnessStand(self, entry): self.sendUpdate('touchWitnessStand', []) self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) base.playSfx(self.piesRestockSfx)", "y2, z1) vertexWriter.addData3f(x2, y2, z1) vertexWriter.addData3f(x1, y1, z2) vertexWriter.addData3f(x2, y1,", "in self.cannons: cannon = self.cannons[index] toon = self.cr.doId2do.get(toonId) self.notify.debug('cannonId =", "exitRollToBattleThree(self): self.notify.debug('----- exitRollToBattleThree') self.unstickBoss() intervalName = 'RollToBattleThree' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def", "def __walkSuitToPoint(self, node, fromPos, toPos): self.notify.debug('----- __walkSuitToPoint') vector = Vec3(toPos", "= 35 self.toonsEnterB = Mopath.Mopath() self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB') self.toonsEnterB.fFaceForward = 1 self.toonsEnterB.timeScale", "- ToontownGlobals.LawbotBossBattleTwoPosHpr[2] if not self.debugPositions: self.podium.setZ(newZ) self.reflectedPodium = self.geom.find('**/Podium_Geo1_Refl') reflectedZ", "replaceCollisionPolysWithPlanes(self, model): newCollisionNode = CollisionNode('collisions') newCollideMask = BitMask32(0) planes =", "ToontownGlobals.LawbotBossBattleTwoPosHpr[2] if not self.debugPositions: self.podium.setZ(newZ) self.reflectedPodium = self.geom.find('**/Podium_Geo1_Refl') reflectedZ =", "cn = cnp.node() if not isinstance(cn, CollisionNode): self.notify.warning('Not a collision", "name = self.uniqueName('Drop') seq = Sequence(name=name) seq += [Wait(0.0)] if", "radius = 7 numToons = len(self.involvedToons) center = (numToons -", "curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx,", "cr): self.notify.debug('----- __init___') DistributedBossCog.DistributedBossCog.__init__(self, cr) FSM.FSM.__init__(self, 'DistributedLawbotBoss') self.lawyers = []", "self.mainDoor.isEmpty(): itemsToHide = ['interior/Door_1'] for str in itemsToHide: stuffToHide =", "final pos = %s' % pos) ival.append(Sequence(Func(toon.setPlayRate, 0.8, 'walk'), Func(toon.loop,", "startPos = Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2]) if self.arenaSide: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosB)", "= %s' % defenseLocPos) self.defensePanNodePath.setPos(defenseLocPos) self.defensePanNodePath.reparentTo(self.beamNodePath) self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos()) self.prosecutionLocator", "bonusWeight < 0: bonusWeight = 0 newWeight = defaultWeight +", "Point3(*ToontownGlobals.LawbotBossP3PosB) else: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosA) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA) p3Pos =", "self.clearInterval(intervalName) track = Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone))) self.storeInterval(track, intervalName) track.start() def exitEpilogue(self):", "bossTrack = Sequence() myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10,", "self.prosecutionPanNodePath.setPos(prosecutionLocPos) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) self.beamLocator = self.scaleNodePath.find('**/StandLocator1') beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos =", "math.sin(angle) y = dist * math.cos(angle) h = random.uniform(-720, 720)", "Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.juryBox.setPos(newPos) self.reflectedJuryBox = self.geom.find('**/JuryBox_Geo_Reflect') reflectedJuryBoxPos =", "= CollisionNode('BossZap') insidesANode.addSolid(insidesA) insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesANodePath = self.axle.attachNewNode(insidesANode) self.insidesANodePath.setTag('pieCode',", "return def announceGenerate(self): global OneBossCog self.notify.debug('----- announceGenerate') DistributedBossCog.DistributedBossCog.announceGenerate(self) self.setName(TTLocalizer.LawbotBossName) nameInfo", "Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), Parallel(self.podium.posInterval(5.0, finalPodiumPos), self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos), Func(self.stashBoss), self.posInterval(5.0,", "ElevatorUtils from toontown.battle import RewardPanel from toontown.toon import NPCToons from", "0.35), scale=0.1) return def __clearOnscreenMessage(self): if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage =", "self.geom.find('**/interiorrefl/CR3_Door') if not self.reflectedMainDoor.isEmpty(): itemsToHide = ['Reflections/Door_1'] for str in", "= self.bossDamage - ToontownGlobals.LawbotBossInitialDamage diffDamage *= 1.0 if diffDamage >=", "self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.stopAnimate() self.__hideWitnessToon() DistributedBossCog.DistributedBossCog.enterIntroduction(self) base.playMusic(self.promotionMusic, looping=1, volume=0.9) if not self.mainDoor.isEmpty():", "z2) vertexWriter.addData3f(x2, y2, z2) for index in xrange(8): normalWriter.addData3f(1.0, 1.0,", "180, 0, 0)))), (27, Sequence( self.toonNormalEyes(self.involvedToons), Func(self.loop, 'Ff_neutral'), Func(self.setChatAbsolute, attackToons,", "= False self.battleDifficulty = 0 self.bonusWeight = 0 self.numJurorsLocalToonSeated =", "== None and chair.newState == 'ToonJuror': self.numToonJurorsSeated += 1 self.notify.debug('self.numToonJurorsSeated", "self.scaleNodePath.find('**/Scale_base_top_collision') self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col') self.defenseLocator.hide() self.prosecutionLocator.hide() self.beamLocator.hide() def loadScaleOld(self): startingTilt", "toon == localAvatar: self.d_hitBoss(self.panDamage) elif pieCode == ToontownGlobals.PieCodeProsecutionPan: self.flashGreen() if", "import ElevatorUtils from toontown.battle import RewardPanel from toontown.toon import NPCToons", "battleNode.getScale())) myCurPos = self.getPos() self.notify.debug('myCurPos = %s' % self.getPos()) self.notify.debug('battleNode.parent()", "intervalName = 'FlashPanBlue' self.defensePanNodePath.setColorScale(1, 1, 1, 1) seq = Sequence(self.defensePanNodePath.colorScaleInterval(0.1,", "= self.doorA else: gearRoot.setPos(0, 7, 3) door = self.doorB gearRoot.setTag('attackCode',", "= self.scaleNodePath.find('**/defensePan') self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan') self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol') self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan))", "-1.5, 0.6) prosecutionTube.setTangible(1) prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol')) prosecutionCollNode.addSolid(prosecutionTube) self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode)", "Track((0.0, Sequence(Func(self.clearChat), Func(self.reverseHead), ActorInterval(self, 'Ff_speech'))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins, CFSpeech))) return", "* spread x = dist * math.sin(angle) y = dist", "in battle three state, state=%s', self.state) gotError = True if", "self.juryTimer self.juryTimer = None for chair in self.chairs.values(): chair.stopCogsFlying() return", "'Ff_neutral', duration=2), ActorInterval(self, 'Ff_speech', duration=7, loop=1)) track.append(bossAnimTrack) attackToons = TTLocalizer.BossCogAttackToons", "Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) taskMgr.doMethodLater(0.01,", "prosecutionLocPos = prosecutionLocBounds.getCenter() self.notify.debug('prosecutionLocatorPos = %s' % prosecutionLocPos) self.prosecutionPanNodePath.setPos(prosecutionLocPos) self.prosecutionPanNodePath.reparentTo(self.beamNodePath)", "globalClock.getFrameTime() elapsed = now - self.recoverStartTime return max(self.bossDamage - self.recoverRate", "self.toonsEnterA.fFaceForward = 1 self.toonsEnterA.timeScale = 35 self.toonsEnterB = Mopath.Mopath() self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB')", "/ 60.0, 0) def __recoverBossDamage(self, task): self.notify.debug('----- __recoverBossDamage') if self.bossDamageMovie:", "self.flashPanBlue() base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath, volume=0.25) if toon == localAvatar: self.d_hitBoss(self.panDamage) elif", "% (toonIds, battleNode)) if len(toonIds) < 5: points = BattleBase.BattleBase.toonPoints[len(toonIds)", "0, 3) intervalName = 'EpilogueMovie' seq = Sequence(self.makeEpilogueMovie(), name=intervalName) seq.start()", "Functor from direct.showbase.PythonUtil import StackTrace from direct.gui.DirectGui import * from", "self.juryTimer: self.juryTimer.destroy() del self.juryTimer self.juryTimer = None for chair in", "CFTimeout) base.playSfx(self.toonUpSfx) if not self.bonusTimer: self.bonusTimer = ToontownTimer.ToontownTimer() self.bonusTimer.posInTopRightCorner() self.bonusTimer.show()", "+ ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), Parallel(self.podium.posInterval(5.0, finalPodiumPos), self.reflectedPodium.posInterval(5.0,", "DistributedBossCog.DistributedBossCog.enterIntroduction(self) base.playMusic(self.promotionMusic, looping=1, volume=0.9) if not self.mainDoor.isEmpty(): self.mainDoor.stash() if not", "%s' % repr(cnp)) break newCollideMask = newCollideMask | cn.getIntoCollideMask() for", "battleNode)) if len(toonIds) < 5: points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]", "= ToontownBattleGlobals.getBossBattleCreditMultiplier(2) localAvatar.inventory.setBattleCreditMultiplier(mult) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.clearChat() self.witnessToon.clearChat() self.releaseToons(finalBattle=1) self.__showWitnessToon() if", "xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.reparentTo(render) pos, h =", "= 0): allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt,", "OneBossCog = self return def disable(self): global OneBossCog self.notify.debug('----- disable')", "from panda3d.core import * from libotp import * from direct.fsm", "self.door3.getPos() doorEndPos = Point3(doorStartPos[0], doorStartPos[1], doorStartPos[2] + 25) bossTrack =", "from toontown.battle import MovieToonVictory from toontown.building import ElevatorUtils from toontown.battle", "attackToons, CFSpeech)))) track.append(dialogTrack) return Sequence( Func(self.stickToonsToFloor), track, Func(self.unstickToons), name=self.uniqueName('Introduction')) def", "lawyer.doId: lawyer.sendUpdate('hitByToon', []) def __finalPieSplat(self, toon, pieCode): if pieCode !=", "self.cannonIndex numJurors = 0 if not cannonIndex == None and", "__continueVictory(self): self.notify.debug('----- __continueVictory') self.stopAnimate() self.doneBarrier('Victory') def exitVictory(self): self.notify.debug('----- exitVictory') self.stopAnimate()", "renderPos = toon.getPos(render) self.notify.debug('renderPos =%s' % renderPos) index += 1", "intervalName = 'RollToBattleTwo' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleTwo(self): self.notify.debug('----- enterPrepareBattleTwo') self.cleanupIntervals()", "Parallel(self.podium.posInterval(5.0, finalPodiumPos), self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos), Func(self.stashBoss), self.posInterval(5.0, battlePos), Func(taskMgr.doMethodLater, 0.01, self.unstashBoss,", "self.__enterDefenseCol) self.accept('enterProsecutionCol', self.__enterProsecutionCol) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice')) self.stickBossToFloor() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.bossMaxDamage", "for i in xrange(numGears): node = gearRoot.attachNewNode(str(i)) node.hide() node.setPos(0, 0,", "= self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0) self.makeToonsWait() return Sequence(chatTrack, bossTrack,", "% stuffToHide) stuffToHide.wrtReparentTo(self.mainDoor) else: self.notify.debug('not found %s' % stuffToHide) self.reflectedMainDoor", "= self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() self.notify.debug('beamLocatorPos = %s' % beamLocatorPos)", "tris.addVertex(5) tris.addVertex(7) tris.closePrimitive() tris.addVertex(0) tris.addVertex(4) tris.addVertex(5) tris.closePrimitive() tris.addVertex(1) tris.addVertex(0) tris.addVertex(5)", "t = self.getBossDamage() / 100.0 gearTrack = Parallel() numGears =", "0) gear = gearModel.instanceTo(node) angle = (float(i) / (numGears -", "in ToontownGlobals.CogSuitHPLevels: speech += TTLocalizer.WitnessToonHPBoost else: speech += TTLocalizer.WitnessToonMaxed %", "self.notify.warning('returning from setTaunt, no attr nametag') gotError = True if", "+ bonusWeight self.notify.debug('toon %d has weight of %d' % (toonId,", "def __debugScale(self): prosecutionPanPos = self.prosecutionPanNodePath.getPos() origin = Point3(0, 0, 0)", "juryResult = TTLocalizer.WitnessToonAllJurors else: juryResult = TTLocalizer.WitnessToonSomeJurors % self.numToonJurorsSeated juryResult", "self.battleANode) self.__hideWitnessToon() if self.battleA == None or self.battleB == None:", "__unloadMopaths(self): self.notify.debug('----- __unloadMopaths') self.toonsEnterA.reset() self.toonsEnterB.reset() def enterOff(self): self.notify.debug('----- enterOff') DistributedBossCog.DistributedBossCog.enterOff(self)", "self.door3.isEmpty(): self.door3 = self.geom.find('**/interior/CR3_Door') self.mainDoor = self.geom.find('**/Door_1') if not self.mainDoor.isEmpty():", "if self.baseColStashed: self.notify.debug('unstashBaseCol') self.baseTopCol.unstash() self.baseSideCol.unstash() self.baseColStashed = False def makeScaleReflectDamage(self):", "doorACallback(self, isOpen): if self.insidesANodePath: if isOpen: self.insidesANodePath.unstash() else: self.insidesANodePath.stash() def", "for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.hide()", "stateName == 'Elevator': self.placeToonInElevator(toon) def setLawyerIds(self, lawyerIds): self.lawyers = []", "if index in self.cannons: cannon = self.cannons[index] toon = self.cr.doId2do.get(toonId)", "cannonIndex: retVal += 1 return retVal def calculateWeightOfToon(self, toonId): defaultWeight", "Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1, CFSpeech)), (5.5, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2, CFSpeech)), (9.5, Sequence(Func(camera.wrtReparentTo, render))),", "return seq def __makeBossDamageMovie(self): self.notify.debug('---- __makeBossDamageMovie') startPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1],", "loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg') self.warningSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg') self.juryMovesSfx = loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg') self.toonUpSfx = loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg')", "the movie then transition to NearVictory') self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration()) else: self.bossDamageMovie.resumeUntil(self.bossDamage *", "self.notify.debug('----- __showOnscreenmessage') if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None self.onscreenMessage =", "no attr state') gotError = True elif not self.state ==", "beamBoundsCenter = self.beamNodePath.getBounds().getCenter() self.notify.debug('beamBoundsCenter = %s' % beamBoundsCenter) beamLocatorBounds =", "if self.insidesBNodePath: if isOpen: self.insidesBNodePath.unstash() else: self.insidesBNodePath.stash() def __toonsToPromotionPosition(self, toonIds,", "self.doneBarrier('Defeat') def exitDefeat(self): self.notify.debug('----- exitDefeat') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime =", "def replaceCollisionPolysWithPlanes(self, model): newCollisionNode = CollisionNode('collisions') newCollideMask = BitMask32(0) planes", "= BitMask32(0) planes = [] collList = model.findAllMatches('**/+CollisionNode') if not", "self.raised = 0 self.forward = 1 self.doAnimate() self.setDizzy(1) base.playMusic(self.battleThreeMusic, looping=1,", "base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath, volume=0.25) if toon == localAvatar: self.d_hitBoss(self.panDamage) elif pieCode", "% (ToontownGlobals.MaxCogSuitLevel + 1) return speech def __positionToonsInFrontOfCannons(self): self.notify.debug('__positionToonsInFrontOfCannons') index", "d_bossHeal') self.sendUpdate('healBoss', [bossHeal]) def d_hitBossInsides(self): self.notify.debug('----- d_hitBossInsides') self.sendUpdate('hitBossInsides', []) def", "seq.start() self.storeInterval(seq, intervalName) def __onToBattleThree(self, elapsed): self.notify.debug('----- __onToBattleThree') self.doneBarrier('PrepareBattleThree') taskMgr.doMethodLater(1,", "enterRollToBattleTwo') self.releaseToons(finalBattle=1) self.stashBoss() self.toonsToBattlePosition(self.involvedToons, self.battleANode) self.stickBossToFloor() intervalName = 'RollToBattleTwo' seq", "self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = None self.betweenBattleMusic.stop() self.promotionMusic.stop() self.stingMusic.stop() self.battleTwoMusic.stop() self.battleThreeMusic.stop() self.epilogueMusic.stop()", "SuitDNA from toontown.toon import Toon from toontown.battle import BattleBase from", "cnp.node() if not isinstance(cn, CollisionNode): self.notify.warning('Not a collision node: %s'", "'Elevator': self.placeToonInElevator(toon) def setLawyerIds(self, lawyerIds): self.lawyers = [] self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest", "render, (-2.798, -70, 10, 180, 0, 0)))), (27, Sequence( self.toonNormalEyes(self.involvedToons),", "def __showWitnessToon(self): if not self.witnessToonOnstage: self.witnessToon.addActive() self.witnessToon.reparentTo(self.geom) seatCenter = self.realWitnessStand.find('**/witnessStandSeatEdge')", "= %d' % self.numToonJurorsSeated) self.countToonJurors() self.notify.debug('after calling self.countToonJurors, numToonJurorsSeated=%d' %", "= points[i] toon.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2], h, 0,", "self.bonusWeight, self.numJurorsLocalToonSeated = self.calculateWeightOfToon(base.localAvatar.doId) if self.bonusWeight > 0: if self.bonusWeight", "toNeutralMode(self): if self.cr: place = self.cr.playGame.getPlace() if place and hasattr(place,", "self.baseSideCol.unstash() self.baseColStashed = False def makeScaleReflectDamage(self): diffDamage = self.bossDamage -", "= 0 for chair in self.chairs.values(): if chair.state == 'ToonJuror':", "text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.35), scale=0.1) return def __clearOnscreenMessage(self): if", "Func(self.localToonToSafeZone))) self.storeInterval(track, intervalName) track.start() def exitEpilogue(self): self.notify.debug('----- exitEpilogue') self.clearInterval('EpilogueMovieToonAnim') self.unstash()", "self.notify.debug('----- enterBattleTwo') self.cleanupIntervals() mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2) localAvatar.inventory.setBattleCreditMultiplier(mult) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.clearChat()", "1 self.forward = 1 self.doAnimate() self.__hideWitnessToon() if not self.mainDoor.isEmpty(): self.mainDoor.stash()", "self.uniqueName('DestroyedBoss') self.ignore(bossDoneEventName) taskMgr.remove(self.uniqueName('StandUp')) self.ignore('enterWitnessStand') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.ignore('begin-pie') self.ignore('enterDefenseCol') self.ignore('enterProsecutionCol')", "volume=1.0) def __showCannonsAppearing(self, elapsedTime = 0): allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo,", "looping=1, volume=0.9) def __doneEpilogue(self, elapsedTime = 0): self.notify.debug('----- __doneEpilogue') intervalName", "def __doneBattleThree(self): self.notify.debug('----- __doneBattleThree') self.setState('NearVictory') self.unstickBoss() def exitBattleThree(self): self.notify.debug('----- exitBattleThree')", "self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0,", "6 * t + 0.5) time = 5.0 - 4.0", "self.beamNodePath = NodePath('scaleBeam') self.beamNodePath.attachNewNode(beamGeom) self.beamNodePath.setPos(0, 0, 3) self.beamNodePath.reparentTo(self.scaleNodePath) defensePanGeom =", "rollTrack.getDuration() self.notify.debug('rollTrackDuration = %f' % rollTrackDuration) doorStartPos = self.door3.getPos() doorEndPos", "self.insidesBNodePath = self.axle.attachNewNode(insidesBNode) self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesBNodePath.stash() target = CollisionTube(0, -1,", "distance = vector.length() time = distance / (ToontownGlobals.SuitWalkSpeed * 1.8)", "def enterPrepareBattleThree(self): self.notify.debug('----- enterPrepareBattleThree') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) base.playMusic(self.betweenBattleMusic,", "= TTLocalizer.LawbotBossTaunts[tauntIndex] self.saySomething(chatString) def toonGotHealed(self, toonId): toon = base.cr.doId2do.get(toonId) if", "= Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar)) multiCannons =", "% locatorRenderPos) beamPos = self.beamNodePath.getPos() beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath, origin) beamRenderPos", "0.6) prosecutionTube.setTangible(1) prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol')) prosecutionCollNode.addSolid(prosecutionTube) self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode) self.prosecutionColNodePath.setTag('pieCode',", "i > 3: pos.setY(pos.getY() + 2.0) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render)", "1, 1, 1) seq = Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0, 0, 1, 1)),", "def __onToBattleThree(self, elapsed): self.notify.debug('----- __onToBattleThree') self.doneBarrier('PrepareBattleThree') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def", "pos[0], pos[1], pos[2], h, 0, 0) self.notify.debug('new toon pos %s", "= %s render.getTransform=%s' % (battleNode.getTransform(), render.getTransform())) self.notify.debug('render.getScale()=%s battleNode.getScale()=%s' % (render.getScale(),", "doorStartPos = self.door3.getPos() doorEndPos = Point3(doorStartPos[0], doorStartPos[1], doorStartPos[2] + 25)", "= self.podium.getZ() - ToontownGlobals.LawbotBossBattleTwoPosHpr[2] if not self.debugPositions: self.podium.setZ(newZ) self.reflectedPodium =", "loadWitnessStand(self): self.realWitnessStand = self.geom.find('**/WitnessStand') if not self.realWitnessStand.isEmpty(): pass self.reflectedWitnessStand =", "self.panFlashInterval = seq seq.start() self.storeInterval(seq, intervalName) def saySomething(self, chatString): intervalName", "newWeight = 1 cannonIndex = self.cannonIndex numJurors = 0 if", "tauntIndex == 0: if extraInfo < len(self.involvedToons): toonId = self.involvedToons[extraInfo]", "= BattleBase.BattleBase.toonPoints[len(toonIds) - 1] for i in xrange(len(toonIds)): toon =", "ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1), ActorInterval(self, 'Ff_lookRt', duration=3), ActorInterval(self, 'Ff_lookRt',", "if toon: self.notify.debug('toon = %s' % toon.getName()) toon.reparentTo(cannon.nodePath) toon.setPos(0, 8,", "localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight) def __doneBattleThree(self): self.notify.debug('----- __doneBattleThree') self.setState('NearVictory') self.unstickBoss() def exitBattleThree(self): self.notify.debug('-----", "1 intervalName = 'DefeatMovie' seq = Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName) seq.start()", "50 rate = time / numGears for i in xrange(numGears):", "= 'RollToBattleThree' seq = Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName) seq.start() self.storeInterval(seq, intervalName)", "-13, 0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1, CFSpeech), Func(camera.reparentTo, localAvatar),", "1, 1, 1)), name=intervalName) self.panFlashInterval = seq seq.start() self.storeInterval(seq, intervalName)", "self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB') self.toonsEnterB.fFaceForward = 1 self.toonsEnterB.timeScale = 35 def __unloadMopaths(self): self.notify.debug('-----", "__enterDefenseCol(self, entry): self.notify.debug('__enterDefenseCol') def __enterProsecutionCol(self, entry): self.notify.debug('__enterProsecutionCol') def makeVictoryMovie(self): myFromPos", "State from direct.directnotify import DirectNotifyGlobal from toontown.toonbase import ToontownGlobals from", "RewardPanel.RewardPanel(panelName) victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList,", "1 self.forward = 1 self.doAnimate() self.accept('enterWitnessStand', self.__touchedWitnessStand) self.accept('pieSplat', self.__pieSplat) self.accept('localPieSplat',", "CollisionPolygon): plane = Plane(solid.getPlane()) planes.append(plane) else: self.notify.warning('Unexpected collision solid: %s'", "= {} self.cannons = {} self.useCannons = 1 self.juryBoxIval =", "self.notify.debug('----- enterRollToBattleThree') self.reparentTo(render) self.stickBossToFloor() intervalName = 'RollToBattleThree' seq = Sequence(self.__makeRollToBattleThreeMovie(),", "insidesANode = CollisionNode('BossZap') insidesANode.addSolid(insidesA) insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesANodePath = self.axle.attachNewNode(insidesANode)", "[]) self.ignore('pieSplat') def cleanupAttacks(self): self.notify.debug('----- cleanupAttacks') self.__cleanupStrafe() def __cleanupStrafe(self): self.notify.debug('-----", "None, 1) bossTrack.append(track) track, hpr = self.rollBossToPoint(bottomPos, startHpr, deathPos, None,", "self.notify.debug('beamRenderPos = %s' % beamRenderPos) beamBoundsCenter = self.beamNodePath.getBounds().getCenter() self.notify.debug('beamBoundsCenter =", "= list(BattleBase.BattleBase.toonPoints[3]) points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) - 5]) self.notify.debug('toonsToBattlePosition: points = %s' %", "in xrange(numToons): toon = self.cr.doId2do.get(self.involvedToons[i]) if toon: angle = 90", "cp = CollisionPlane(plane) newCollisionNode.addSolid(cp) lastPlane = plane return NodePath(newCollisionNode) def", "fromHpr=None, toPos=myToPos, toHpr=None, reverse=0) rollTrack = Sequence( Func(self.getGeomNode().setH, 180), rollThroughDoor[0],", "self.happy = 1 self.raised = 1 self.forward = 1 self.doAnimate()", "-1 return def announceGenerate(self): global OneBossCog self.notify.debug('----- announceGenerate') DistributedBossCog.DistributedBossCog.announceGenerate(self) self.setName(TTLocalizer.LawbotBossName)", "__cleanupJuryBox') if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None if self.juryBox: self.juryBox.removeNode()", "else: self.loadScaleNew() def __debugScale(self): prosecutionPanPos = self.prosecutionPanNodePath.getPos() origin = Point3(0,", "exitPrepareBattleThree(self): self.notify.debug('----- exitPrepareBattleThree') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') intervalName = 'PrepareBattleThree' self.clearInterval(intervalName)", "gearModel = self.getGearFrisbee() gearModel.setScale(0.1) t = self.getBossDamage() / 100.0 gearTrack", "loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg') self.juryMovesSfx = loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg') self.toonUpSfx = loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg') self.strafeSfx = []", "pieCode == ToontownGlobals.PieCodeBossCog: if toon == localAvatar: self.d_hitBoss(1) if self.dizzy:", "DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode, avId) if attackCode == ToontownGlobals.BossCogAreaAttack: self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt) base.playSfx(self.warningSfx) def", "Func(toon.setPlayRate, 1, 'walk'), Func(toon.loop, 'neutral'))) return ival def toonsToBattlePosition(self, toonIds,", "looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueVictory(self): self.notify.debug('----- __continueVictory') self.stopAnimate() self.doneBarrier('Victory') def", "= Sequence(name=name) seq += [Wait(0.0)] if hasLocalToon: seq += [Func(self.show),", "0.25, 0, -0.25, -0.25, 3) self.standNodePath = NodePath('scaleStand') self.standNodePath.attachNewNode(standGeom) self.standNodePath.reparentTo(self.scaleNodePath)", "= 0): DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode, avId) if attackCode == ToontownGlobals.BossCogAreaAttack: self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt)", "def loadScaleNew(self): self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale') self.beamNodePath = self.scaleNodePath.find('**/scaleBeam') self.defensePanNodePath =", "self.toWalkMode() def exitReward(self): self.notify.debug('----- exitReward') intervalName = 'RewardMovie' self.clearInterval(intervalName) self.unstash()", "ToontownBattleGlobals import DistributedBossCog from toontown.toonbase import TTLocalizer import SuitDNA from", "track, delayDeletes): self.notify.debug('----- __walkToonToPromotion') toon = base.cr.doId2do.get(toonId) if toon: destPos", "exitEpilogue') self.clearInterval('EpilogueMovieToonAnim') self.unstash() self.epilogueMusic.stop() def enterFrolic(self): self.notify.debug('----- enterFrolic') self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) DistributedBossCog.DistributedBossCog.enterFrolic(self)", "collision node: %s' % repr(cnp)) break newCollideMask = newCollideMask |", "self.notify.debug('cannonId = %d' % cannon.doId) cannonPos = cannon.nodePath.getPos(render) self.notify.debug('cannonPos =", "TTLocalizer.LawbotBossTaunts[tauntIndex] % toon.getName() else: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] self.saySomething(chatString) def toonGotHealed(self,", "toontown.battle.BattleProps import * from direct.distributed.ClockDelta import * from direct.showbase.PythonUtil import", "if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None if self.juryBox: self.juryBox.removeNode() return", "shieldNodePath = self.pelvis.attachNewNode(shieldNode) disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide') disk.find('**/+CollisionNode').setName('BossZap') disk.reparentTo(self.pelvis) disk.setZ(0.8) self.loadEnvironment()", "vertexWriter.addData3f(x2, y1, z1) vertexWriter.addData3f(x1, y2, z1) vertexWriter.addData3f(x2, y2, z1) vertexWriter.addData3f(x1,", "= DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss') debugPositions = False def __init__(self, cr): self.notify.debug('----- __init___')", "seq += [Func(self.show), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0,", "= percentDamaged * ToontownGlobals.LawbotBossWinningTilt self.setScaleTilt(tilt) if self.bossDamage < ToontownGlobals.LawbotBossMaxDamage *", "Sequence(self.makeEpilogueMovie(), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.accept('doneChatPage', self.__doneEpilogue) base.playMusic(self.epilogueMusic, looping=1, volume=0.9)", "% origPos) self.notify.debug('batlleNode.getTransform = %s render.getTransform=%s' % (battleNode.getTransform(), render.getTransform())) self.notify.debug('render.getScale()=%s", "if not self.bonusTimer: self.bonusTimer = ToontownTimer.ToontownTimer() self.bonusTimer.posInTopRightCorner() self.bonusTimer.show() self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer)", "if base.config.GetBool('lawbot-boss-cheat', 0): self.panDamage = 25 self.evidenceHitSfx = None self.toonUpSfx", "-1, 9, 3.5) targetNode = CollisionNode('BossZap') targetNode.addSolid(target) targetNode.setCollideMask(ToontownGlobals.PieBitmask) self.targetNodePath =", "for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.stopLookAround()", "cannonIndex): retVal = 0 for chair in self.chairs.values(): if chair.state", "self.notify.debug('touchedGavel') attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr == '': self.notify.warning('Node %s", "center = seatCenter.getPos() self.notify.debug('center = %s' % center) self.witnessToon.setPos(center) self.witnessToon.setH(180)", "self.ignore('pieSplat') def cleanupAttacks(self): self.notify.debug('----- cleanupAttacks') self.__cleanupStrafe() def __cleanupStrafe(self): self.notify.debug('----- __cleanupStrage')", "VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5]) bossTrack = Sequence() self.notify.debug('calling setPosHpr') myInterval =", "GeomVertexWriter(myVertexData, 'color') texWriter = GeomVertexWriter(myVertexData, 'texcoord') vertexWriter.addData3f(x1, y1, z1) vertexWriter.addData3f(x2,", "x2, y2, z2, r = 1.0, g = 1.0, b", "= lawyerCol.getName().split('-') lawyerDoId = int(names[1]) for lawyer in self.lawyers: if", "self.geom.show() self.witnessToon.addActive() def enterElevator(self): self.notify.debug('----- enterElevator') DistributedBossCog.DistributedBossCog.enterElevator(self) self.witnessToon.removeActive() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)", "elapsed = now - self.recoverStartTime return max(self.bossDamage - self.recoverRate *", "self.useProgrammerScale = base.config.GetBool('want-injustice-scale-debug', 0) if self.useProgrammerScale: self.loadScaleOld() else: self.loadScaleNew() def", "self.__hideWitnessToon() DistributedBossCog.DistributedBossCog.enterIntroduction(self) base.playMusic(self.promotionMusic, looping=1, volume=0.9) if not self.mainDoor.isEmpty(): self.mainDoor.stash() if", "y2, z2) vertexWriter.addData3f(x2, y2, z2) for index in xrange(8): normalWriter.addData3f(1.0,", "prosecutionLocatorPos) self.notify.debug('prosecutionLocatorRelPos = %s ' % prosecutionLocatorRelPos) self.notify.debug('locatorRenderPos = %s'", "self.battleThreeMusic.stop() def enterDefeat(self): self.notify.debug('----- enterDefeat') self.cleanupIntervals() localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.reparentTo(render) self.clearChat() self.releaseToons(finalBattle=1)", "if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie')) track = Parallel() bossAnimTrack = Sequence(", "node=toon) def hideBonusTimer(self): if self.bonusTimer: self.bonusTimer.hide() def enteredBonusState(self): self.witnessToon.clearChat() text", "= loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator') elevatorModel.reparentTo(self.elevatorEntrance) self.setupElevator(elevatorModel) self.promotionMusic = base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.betweenBattleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg')", "self.cr.doId2do.get(self.involvedToons[i]) if toon: angle = 90 - 15 * (i", "% panRenderPos) prosecutionLocatorPos = self.prosecutionLocator.getPos() prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin) locatorRenderPos", "= None return def __cleanupJuryBox(self): self.notify.debug('----- __cleanupJuryBox') if self.juryBoxIval: self.juryBoxIval.finish()", "Task import random import math from toontown.coghq import CogDisguiseGlobals from", "dialogTrack = Track( (0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0, CFSpeech)), (5.6, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1,", "self.scaleNodePath.find('**/Scale_base_side_col') self.defenseLocator.hide() self.prosecutionLocator.hide() self.beamLocator.hide() def loadScaleOld(self): startingTilt = 0 self.scaleNodePath", "180, 0, 0) def exitElevator(self): self.notify.debug('----- exitElevator') DistributedBossCog.DistributedBossCog.exitElevator(self) self.witnessToon.removeActive() def", "toon.posInterval(3, pos), Func(toon.setPlayRate, 1, 'walk'), Func(toon.loop, 'neutral'))) return ival def", "direct.task import Task import random import math from toontown.coghq import", "but we have a toon =%d' % (index, toonId)) allCannonsAppear.append(multiCannons)", "tris.addVertex(6) tris.addVertex(4) tris.closePrimitive() tris.addVertex(1) tris.addVertex(5) tris.addVertex(3) tris.closePrimitive() tris.addVertex(3) tris.addVertex(5) tris.addVertex(7)", "self.reflectedJuryBox.setPos(-30, 0, 0) curPos = self.juryBox.getPos() endingAbsPos = Point3(curPos[0] +", "self.forward = 1 intervalName = 'DefeatMovie' seq = Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat),", "ToontownGlobals.PieCodeBossCog: if toon == localAvatar: self.d_hitBoss(1) if self.dizzy: self.flashRed() self.doAnimate('hit',", "in self.involvedToons: if index in self.cannons: cannon = self.cannons[index] toon", "0, 1, 7, 3.5) shieldNode = CollisionNode('BossZap') shieldNode.addSolid(shield) shieldNode.setCollideMask(ToontownGlobals.PieBitmask |", "createBlock(self, x1, y1, z1, x2, y2, z2, r = 1.0,", "str(ToontownGlobals.PieCodeDefensePan)) prosecutionPanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 1.0,", "if toon: toon.wrtReparentTo(render) pos, h = points[i] if i >", "walkToonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('walkToonsToBattlePosition-----------------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode)) ival", "d_hitToon') self.sendUpdate('hitToon', [toonId]) def gotToon(self, toon): stateName = self.state if", "0), name=self.uniqueName('BattleTwoMovie')) def toNeutralMode(self): if self.cr: place = self.cr.playGame.getPlace() if", "= 0 self.mainDoor = None self.reflectedMainDoor = None self.panFlashInterval =", "self.useCannons: self.toonsToBattlePosition(self.toonsA, self.battleANode) self.toonsToBattlePosition(self.toonsB, self.battleBNode) base.playMusic(self.battleTwoMusic, looping=1, volume=0.9) self.startJuryBoxMoving() for", "self.flashRed() self.flashPanBlue() base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath, volume=0.25) if toon == localAvatar: self.d_hitBoss(self.panDamage)", "= self.scaleNodePath.getRelativePoint(self.beamNodePath, origin) beamRenderPos = render.getRelativePoint(self.beamNodePath, origin) self.notify.debug('beamPos = %s'", "self.__unloadMopaths() self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) self.__cleanupStrafe() self.__cleanupJuryBox() render.clearTag('pieCode') self.targetNodePath.detachNode() self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest =", "%s' % beamRenderPos) beamBoundsCenter = self.beamNodePath.getBounds().getCenter() self.notify.debug('beamBoundsCenter = %s' %", "def unstashBaseCol(self): if self.baseColStashed: self.notify.debug('unstashBaseCol') self.baseTopCol.unstash() self.baseSideCol.unstash() self.baseColStashed = False", "p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosB) else: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosA) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA)", "%s' % cannonPos) if toon: self.notify.debug('toon = %s' % toon.getName())", "__makeWitnessToon(self): dnaNetString = 't\\x1b\\x00\\x01\\x01\\x00\\x03\\x00\\x03\\x01\\x10\\x13\\x00\\x13\\x13' npc = Toon.Toon() npc.setDNAString(dnaNetString) npc.setName(TTLocalizer.WitnessToonName) npc.setPickable(0)", "self.evidenceHitSfx = None self.toonUpSfx = None self.bonusTimer = None self.warningSfx", "rollThroughDoor[0], Func(self.getGeomNode().setH, 0)) rollTrackDuration = rollTrack.getDuration() self.notify.debug('rollTrackDuration = %f' %", "Task.cont def __walkToonToPromotion(self, toonId, delay, mopath, track, delayDeletes): self.notify.debug('----- __walkToonToPromotion')", "into = entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def touchedGavelHandle(self, gavel, entry): attackCodeStr", "endingAbsPos = Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] +", "self.juryBox.getPos() newPos = juryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.juryBox.setPos(newPos)", "__onToPrepareBattleThree') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.doneBarrier('RollToBattleThree') def exitRollToBattleThree(self): self.notify.debug('----- exitRollToBattleThree') self.unstickBoss() intervalName", "= Parallel(bossTrack, ActorInterval(self, 'Ff_speech', loop=1)) return bossTrack def makeEpilogueMovie(self): epSpeech", "-0.5, 0, 0, -1.5, 0.6) defenseTube.setTangible(1) defenseCollNode = CollisionNode('DefenseCol') defenseCollNode.addSolid(defenseTube)", "bossTrack def makeDefeatMovie(self): bossTrack = Track((0.0, Sequence(Func(self.clearChat), Func(self.reverseHead), ActorInterval(self, 'Ff_speech'))),", "0, -1, 9, 3.5) targetNode = CollisionNode('BossZap') targetNode.addSolid(target) targetNode.setCollideMask(ToontownGlobals.PieBitmask) self.targetNodePath", "= 'ChiefJusticeTaunt' seq = Sequence(name=intervalName) seq.append(Func(self.setChatAbsolute, chatString, CFSpeech)) seq.append(Wait(4.0)) seq.append(Func(self.clearChat))", "shield = CollisionTube(0, 1, 4, 0, 1, 7, 3.5) shieldNode", "self.notify.debug('beamLocatorPos = %s' % beamLocatorPos) self.notify.debug('negBeamLocatorPos = %s' % negBeamLocatorPos)", "Func(camera.setPos, -3, 45, 25), Func(camera.setHpr, 0, 10, 0))), (1.0, Func(self.setChatAbsolute,", "if toon: toon.loop('neutral') def makeEndOfBattleMovie(self, hasLocalToon): name = self.uniqueName('Drop') seq", "= Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)", "return bossTrack def makeEpilogueMovie(self): epSpeech = TTLocalizer.WitnessToonCongratulations epSpeech = self.__talkAboutPromotion(epSpeech)", "toonEnteredCannon(self, toonId, cannonIndex): if base.localAvatar.doId == toonId: self.cannonIndex = cannonIndex", "1 return retVal def calculateWeightOfToon(self, toonId): defaultWeight = 1 bonusWeight", "-13, 0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1, CFSpeech), Func(camera.reparentTo, localAvatar),", "= self.geom.find('**/%s' % str) if not stuffToHide.isEmpty(): self.notify.debug('found %s' %", "self.notify.debug('----- __cleanupJuryBox') if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None if self.juryBox:", "defenseLocBounds.getCenter() self.notify.debug('defenseLocatorPos = %s' % defenseLocPos) self.defensePanNodePath.setPos(defenseLocPos) self.defensePanNodePath.reparentTo(self.beamNodePath) self.notify.debug('defensePanNodePath.getPos()=%s' %", "__foundPieButton(self): self.everThrownPie = 1 self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) def __touchedWitnessStand(self, entry): self.sendUpdate('touchWitnessStand',", "= self.cannons[index] toon = self.cr.doId2do.get(toonId) self.notify.debug('cannonId = %d' % cannon.doId)", "nametag') gotError = True if gotError: st = StackTrace() print", "from direct.fsm import FSM from direct.fsm import ClassicFSM from direct.fsm", "def doorBCallback(self, isOpen): if self.insidesBNodePath: if isOpen: self.insidesBNodePath.unstash() else: self.insidesBNodePath.stash()", "d_hitBoss') self.sendUpdate('hitBoss', [bossDamage]) def d_healBoss(self, bossHeal): self.notify.debug('----- d_bossHeal') self.sendUpdate('healBoss', [bossHeal])", "i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.wrtReparentTo(render) pos,", "toon.setH(180) renderPos = toon.getPos(render) self.notify.debug('renderPos =%s' % renderPos) index +=", "GeomTriangles(Geom.UHDynamic) tris.addVertex(0) tris.addVertex(1) tris.addVertex(2) tris.closePrimitive() tris.addVertex(1) tris.addVertex(3) tris.addVertex(2) tris.closePrimitive() tris.addVertex(2)", "x, y, 0) toon.headsUp(self.witnessToon) toon.loop('neutral') toon.show() def __talkAboutPromotion(self, speech): if", "-50))) planeNode = CollisionNode('dropPlane') planeNode.addSolid(plane) planeNode.setCollideMask(ToontownGlobals.PieBitmask) self.geom.attachNewNode(planeNode) self.door3 = self.geom.find('**/SlidingDoor1/')", "0 self.insidesANodePath = None self.insidesBNodePath = None self.strafeInterval = None", "def __touchedWitnessStand(self, entry): self.sendUpdate('touchWitnessStand', []) self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) base.playSfx(self.piesRestockSfx) if not", "enterWaitForToons(self): self.notify.debug('----- enterWaitForToons') DistributedBossCog.DistributedBossCog.enterWaitForToons(self) self.geom.hide() self.witnessToon.removeActive() def exitWaitForToons(self): self.notify.debug('----- exitWaitForToons')", "topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosB) else: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosA)", "self.notify.debug('origPos = %s' % origPos) self.notify.debug('batlleNode.getTransform = %s render.getTransform=%s' %", "= beamLocatorBounds.getCenter() negBeamLocatorPos = -beamLocatorPos self.notify.debug('beamLocatorPos = %s' % beamLocatorPos)", "ToontownGlobals.WallBitmask) self.insidesANodePath = self.axle.attachNewNode(insidesANode) self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesANodePath.stash() insidesB = CollisionPolygon(Point3(-4.0,", "self.defensePanNodePath.getPos()) self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator') prosecutionLocBounds = self.prosecutionLocator.getBounds() prosecutionLocPos = prosecutionLocBounds.getCenter()", "= GeomNode('cube') cubeGN.addGeom(cubeGeom) return cubeGN def __enterDefenseCol(self, entry): self.notify.debug('__enterDefenseCol') def", "toontown.toonbase import ToontownGlobals from toontown.toonbase import ToontownBattleGlobals import DistributedBossCog from", "0, 0)] seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam, CFSpeech)) seq.append(Wait(5.0)) seq.append(Func(self.clearChat)) return seq def", "Func(self.witnessToon.setLocalPageChat, chatString, 0)) return movie def __doWitnessPrepareBattleThreeChat(self): self.notify.debug('__doWitnessPrepareBattleThreeChat: original self.numToonJurorsSeated", "self.notify.debug('----- enterPrepareBattleThree') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)", "tris.addVertex(1) tris.addVertex(2) tris.closePrimitive() tris.addVertex(1) tris.addVertex(3) tris.addVertex(2) tris.closePrimitive() tris.addVertex(2) tris.addVertex(3) tris.addVertex(6)", "= self.uniqueName('Drop') seq = Sequence(name=name) seq += [Wait(0.0)] if hasLocalToon:", "self.toonsToBattlePosition(self.involvedToons, self.battleANode) self.stickBossToFloor() intervalName = 'RollToBattleTwo' seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo),", "origin) locatorRenderPos = render.getRelativePoint(self.prosecutionLocator, origin) self.notify.debug('prosecutionLocatorPos = %s ' %", "self.bonusTimer = None self.warningSfx = None self.juryMovesSfx = None self.baseColStashed", "0, -0.5, -0.5, -2, 1.0, 0, 0, 1.0) self.prosecutionPanNodePath =", "entry.getIntoNodePath().getNetTag('avatarDoId') if avatarDoId == '': self.notify.warning('Toon %s has no avatarDoId", "speech def __positionToonsInFrontOfCannons(self): self.notify.debug('__positionToonsInFrontOfCannons') index = 0 self.involvedToons.sort() for toonId", "taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice')) def __pieSplat(self, toon, pieCode): if pieCode ==", "normalWriter.addData3f(1.0, 1.0, 1.0) colorWriter.addData4f(r, g, b, a) texWriter.addData2f(1.0, 1.0) tris", "self.witnessToon.setZ(self.witnessToon.getZ() - 1.5) self.witnessToon.setY(self.witnessToon.getY() - 1.15) self.witnessToonOnstage = 1 def", "= juryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.juryBox.setPos(newPos) self.reflectedJuryBox =", "== ToontownGlobals.PieCodeLawyer: pass def __localPieSplat(self, pieCode, entry): if pieCode ==", "!= localAvatar.doId: self.d_hitToon(doId) def __lawyerGotHit(self, entry): lawyerCol = entry.getIntoNodePath() names", "self.reflectedPodium.setZ(reflectedZ) if not self.reflectedPodium.isEmpty(): if self.debugPositions: self.reflectedPodium.show() def loadCannons(self): pass", "self.bossDamage = bossDamage self.recoverRate = recoverRate self.recoverStartTime = recoverStartTime taskName", "bottomPos, None, 1) bossTrack.append(track) track, hpr = self.rollBossToPoint(bottomPos, startHpr, deathPos,", "self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) self.standNodePath = self.scaleNodePath.find('**/scaleStand') self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator') defenseLocBounds", "fromPos, toPos): self.notify.debug('----- __walkSuitToPoint') vector = Vec3(toPos - fromPos) distance", "0, 1.0) self.beamNodePath = NodePath('scaleBeam') self.beamNodePath.attachNewNode(beamGeom) self.beamNodePath.setPos(0, 0, 3) self.beamNodePath.reparentTo(self.scaleNodePath)", "+ 1) if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels: speech += TTLocalizer.WitnessToonHPBoost else:", "elif pieCode == ToontownGlobals.PieCodeLawyer: pass def __localPieSplat(self, pieCode, entry): if", "def exitEpilogue(self): self.notify.debug('----- exitEpilogue') self.clearInterval('EpilogueMovieToonAnim') self.unstash() self.epilogueMusic.stop() def enterFrolic(self): self.notify.debug('-----", "topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosA) battlePos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1],", "0, 0) door = self.doorA else: gearRoot.setPos(0, 7, 3) door", "seq.start() def replaceCollisionPolysWithPlanes(self, model): newCollisionNode = CollisionNode('collisions') newCollideMask = BitMask32(0)", "delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie')) track = Parallel() bossAnimTrack = Sequence( ActorInterval(self, 'Ff_speech',", "self.storeInterval(ival, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __doneReward(self): self.notify.debug('----- __doneReward')", "-70, 10, 180, 0, 0)))), (27, Sequence( self.toonNormalEyes(self.involvedToons), Func(self.loop, 'Ff_neutral'),", "tris.closePrimitive() cubeGeom = Geom(myVertexData) cubeGeom.addPrimitive(tris) cubeGN = GeomNode('cube') cubeGN.addGeom(cubeGeom) return", "'dept': SuitDNA.getDeptFullname(self.style.dept)} self.setDisplayName(nameInfo) self.piesRestockSfx = loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg') self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg') self.evidenceHitSfx", "math.pi / 180.0 x = math.cos(radians) * radius y =", "10, 0), Func(self.__doWitnessPrepareBattleThreeChat)) return movie def countToonJurors(self): self.numToonJurorsSeated = 0", "== 'ToonJuror' or chair.state == None and chair.newState == 'ToonJuror':", "finalPodiumPos = Point3(self.podium.getX(), self.podium.getY(), self.podium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) finalReflectedPodiumPos = Point3(self.reflectedPodium.getX(),", "-71.601) self.geom.setScale(1) self.elevatorEntrance = self.geom.find('**/elevator_origin') self.elevatorEntrance.getChildren().detach() self.elevatorEntrance.setScale(1) elevatorModel = loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator')", "Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.reflectedJuryBox.setPos(newReflectedPos) if not self.reflectedJuryBox.isEmpty(): if self.debugPositions:", "__makePrepareBattleThreeMovie(self): movie = Sequence(Func(camera.reparentTo, render), Func(camera.setPos, -15, 15, 20), Func(camera.setHpr,", "Func(self.loop, 'Ff_neutral'), Func(self.setChatAbsolute, attackToons, CFSpeech)))) track.append(dialogTrack) return Sequence( Func(self.stickToonsToFloor), track,", "lawyerCol.getName().split('-') lawyerDoId = int(names[1]) for lawyer in self.lawyers: if lawyerDoId", "self.panDamage = ToontownGlobals.LawbotBossDefensePanDamage if base.config.GetBool('lawbot-boss-cheat', 0): self.panDamage = 25 self.evidenceHitSfx", "Func(self.setChatAbsolute, attackToons, CFSpeech)))) track.append(dialogTrack) return Sequence( Func(self.stickToonsToFloor), track, Func(self.unstickToons), name=self.uniqueName('Introduction'))", "self.epilogueMusic.stop() if self.juryTimer: self.juryTimer.destroy() del self.juryTimer if self.bonusTimer: self.bonusTimer.destroy() del", "== None or plane.compareTo(lastPlane, threshold) != 0: cp = CollisionPlane(plane)", "/ (ToontownGlobals.SuitWalkSpeed * 1.8) return Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos), node.posInterval(time,", "ToontownGlobals.LawbotBossBattleThreePosHpr[2]) startHpr = Point3(*ToontownGlobals.LawbotBossBattleThreeHpr) bottomPos = Point3(*ToontownGlobals.LawbotBossBottomPos) deathPos = Point3(*ToontownGlobals.LawbotBossDeathPos)", "not self.reflectedJuryBox.isEmpty(): if self.debugPositions: self.reflectedJuryBox.show() self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) def loadPodium(self):", "deathPos, None, 1) bossTrack.append(track) duration = bossTrack.getDuration() return bossTrack def", "0.5)) insidesBNode = CollisionNode('BossZap') insidesBNode.addSolid(insidesB) insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesBNodePath =", "direct.directutil import Mopath from direct.showutil import Rope from toontown.distributed import", "found %s' % stuffToHide) self.geom.reparentTo(render) self.loadWitnessStand() self.loadScale() self.scaleNodePath.stash() self.loadJuryBox() self.loadPodium()", "NodePath('injusticeScale') beamGeom = self.createBlock(0.25, 2, 0.125, -0.25, -2, -0.125, 0,", "import RewardPanel from toontown.toon import NPCToons from direct.task import Task", "self.battleDifficulty = 0 self.bonusWeight = 0 self.numJurorsLocalToonSeated = 0 self.cannonIndex", "ToontownGlobals.LawbotBossMaxDamage base.playMusic(self.battleThreeMusic, looping=1, volume=0.9) self.__showWitnessToon() diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] self.bossHealthBar.initialize(self.bossMaxDamage -", "__toonsToPromotionPosition(self, toonIds, battleNode): self.notify.debug('----- __toonsToPromotionPosition') points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]", "1), Point3(0, 0, -50))) planeNode = CollisionNode('dropPlane') planeNode.addSolid(plane) planeNode.setCollideMask(ToontownGlobals.PieBitmask) self.geom.attachNewNode(planeNode)", "= None self.lawyers = lawyers for i in xrange(len(self.lawyers)): suit", "math.cos(angle) h = random.uniform(-720, 720) gearTrack.append(Sequence(Wait(i * rate), Func(node.show), Parallel(node.posInterval(1,", "0 self.battleThreeMusicTime = 0 self.insidesANodePath = None self.insidesBNodePath = None", "CollisionPlane(plane) newCollisionNode.addSolid(cp) lastPlane = plane return NodePath(newCollisionNode) def makeIntroductionMovie(self, delayDeletes):", "DelayDelete from toontown.battle import MovieToonVictory from toontown.building import ElevatorUtils from", "ElevatorConstants.ELEVATOR_CJ self.gavels = {} self.chairs = {} self.cannons = {}", "DirectNotifyGlobal from toontown.toonbase import ToontownGlobals from toontown.toonbase import ToontownBattleGlobals import", "None self.bossDamage = 0 self.attackCode = None self.attackAvId = 0", "= ['interior/Door_1'] for str in itemsToHide: stuffToHide = self.geom.find('**/%s' %", "Sequence(self.door3.posInterval(1, doorStartPos)))) retTrack = Parallel(bossTrack, ActorInterval(self, 'Ff_speech', loop=1)) return bossTrack", "import NPCToons from direct.task import Task import random import math", "= %s' % beamLocatorPos) self.notify.debug('negBeamLocatorPos = %s' % negBeamLocatorPos) self.beamNodePath.setPos(beamLocatorPos)", "self.reparentTo(render) self.setPos(*ToontownGlobals.LawbotBossDeathPos) self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr) self.clearChat() self.releaseToons(finalBattle=1) self.accept('pieSplat', self.__finalPieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies',", "time / numGears for i in xrange(numGears): node = gearRoot.attachNewNode(str(i))", "= %s' % pos) self.notify.debug('walkToonsToBattlePosition: final pos = %s' %", "= math.sin(radians) * radius toon.setPos(self.witnessToon, x, y, 0) toon.headsUp(self.witnessToon) toon.loop('neutral')", "== 1: juryResult = TTLocalizer.WitnessToonOneJuror elif self.numToonJurorsSeated == 12: juryResult", "self.warningSfx = None self.juryMovesSfx = None self.baseColStashed = False self.battleDifficulty", "toonId): defaultWeight = 1 bonusWeight = 0 newWeight = 1", "duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0)) self.juryBoxIval.start() self.juryTimer = ToontownTimer.ToontownTimer() self.juryTimer.posInTopRightCorner() self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime) def", "0.8, 'walk'), Func(toon.loop, 'walk'), toon.posInterval(3, pos), Func(toon.setPlayRate, 1, 'walk'), Func(toon.loop,", "0 self.raised = 0 self.forward = 1 self.doAnimate() self.setDizzy(1) base.playMusic(self.battleThreeMusic,", "'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0, 90, 20)), ParallelEndTogether(MopathInterval(mopath, toon), toon.posInterval(2,", "destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral')) track.append(ival) delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion')) def __walkSuitToPoint(self, node,", "%s render.getTransform=%s' % (battleNode.getTransform(), render.getTransform())) self.notify.debug('render.getScale()=%s battleNode.getScale()=%s' % (render.getScale(), battleNode.getScale()))", "0), Func(self.__doWitnessPrepareBattleThreeChat)) return movie def countToonJurors(self): self.numToonJurorsSeated = 0 for", "self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() negBeamLocatorPos = -beamLocatorPos self.notify.debug('beamLocatorPos = %s'", "+ self.toonsB, render, (-2.798, -70, 10, 180, 0, 0)))), (27,", "= self.__makePrepareBattleTwoMovie() intervalName = 'prepareBattleTwo' seq = Sequence(prepareBattleTwoMovie, name=intervalName) seq.start()", "Func(self.reverseHead), ActorInterval(self, 'Ff_speech'))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins, CFSpeech))) return bossTrack def", "numJurors - diffSettings[5] if bonusWeight < 0: bonusWeight = 0", "FSM from direct.fsm import ClassicFSM from direct.fsm import State from", "found %s' % stuffToHide) self.reflectedMainDoor = self.geom.find('**/interiorrefl/CR3_Door') if not self.reflectedMainDoor.isEmpty():", "getBossDamage(self): self.notify.debug('----- getBossDamage') now = globalClock.getFrameTime() elapsed = now -", "else: percentDamaged = diffDamage / (ToontownGlobals.LawbotBossInitialDamage - 0) tilt =", "self.notify.debug('render.getScale()=%s battleNode.getScale()=%s' % (render.getScale(), battleNode.getScale())) myCurPos = self.getPos() self.notify.debug('myCurPos =", "lawyer.sendUpdate('hitByToon', []) def __finalPieSplat(self, toon, pieCode): if pieCode != ToontownGlobals.PieCodeDefensePan:", "3) door = self.doorB gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack)) gearModel = self.getGearFrisbee() gearModel.setScale(0.1)", "if toon: destPos = toon.getPos() self.placeToonInElevator(toon) toon.wrtReparentTo(render) ival = Sequence(Wait(delay),", "= 0 self.bonusWeight = 0 self.numJurorsLocalToonSeated = 0 self.cannonIndex =", "1) return speech def __positionToonsInFrontOfCannons(self): self.notify.debug('__positionToonsInFrontOfCannons') index = 0 self.involvedToons.sort()", "= self.__makePrepareBattleThreeMovie() self.acceptOnce('doneChatPage', self.__onToBattleThree) intervalName = 'prepareBattleThree' seq = Sequence(prepareBattleThreeMovie,", "* from libotp import * from direct.fsm import FSM from", "self.reflectedPodium.show() def loadCannons(self): pass def loadWitnessStand(self): self.realWitnessStand = self.geom.find('**/WitnessStand') if", "self.uniqueName('PieAdvice')) self.stickBossToFloor() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage base.playMusic(self.battleThreeMusic, looping=1, volume=0.9) self.__showWitnessToon()", "base.cr.doId2do.get(toonIds[i]) if toon: toon.reparentTo(render) pos, h = points[i] toon.setPosHpr(battleNode, pos[0],", "self.storeInterval(seq, intervalName) def __onToBattleThree(self, elapsed): self.notify.debug('----- __onToBattleThree') self.doneBarrier('PrepareBattleThree') taskMgr.doMethodLater(1, self.__showWaitingMessage,", "self.numToonJurorsSeated == 12: juryResult = TTLocalizer.WitnessToonAllJurors else: juryResult = TTLocalizer.WitnessToonSomeJurors", "self.d_hitBoss(1) if self.dizzy: self.flashRed() self.doAnimate('hit', now=1) elif pieCode == ToontownGlobals.PieCodeDefensePan:", "= self.geom.find('**/JuryBox') juryBoxPos = self.juryBox.getPos() newPos = juryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos)", "suit.setBossCogId(self.doId) return def setBossDamage(self, bossDamage, recoverRate, timestamp): recoverStartTime = globalClockDelta.networkToLocalTime(timestamp)", "0) bossTrack.append(track) track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0)", "1 self.raised = 1 self.forward = 1 self.doAnimate() self.__hideWitnessToon() if", "= TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty) else: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty) if juryWeightBonus: weightBonusText =", "startHpr=Point3(-10, -13, 0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1, CFSpeech), Func(camera.reparentTo,", "% toon.getName()) toon.reparentTo(cannon.nodePath) toon.setPos(0, 8, 0) toon.setH(180) renderPos = toon.getPos(render)", "= ToontownGlobals.LawbotBossDefensePanDamage if base.config.GetBool('lawbot-boss-cheat', 0): self.panDamage = 25 self.evidenceHitSfx =", "target = CollisionTube(0, -1, 4, 0, -1, 9, 3.5) targetNode", "toon: pos, h = points[i] origPos = pos self.notify.debug('origPos =", "place = self.cr.playGame.getPlace() if place and hasattr(place, 'fsm'): place.setState('waitForBattle') def", "prosecutionLocBounds.getCenter() self.notify.debug('prosecutionLocatorPos = %s' % prosecutionLocPos) self.prosecutionPanNodePath.setPos(prosecutionLocPos) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) self.beamLocator =", "= TTLocalizer.WitnessToonBonus % (ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration) self.witnessToon.setChatAbsolute(text, CFSpeech | CFTimeout) base.playSfx(self.toonUpSfx)", "z2) vertexWriter.addData3f(x1, y2, z2) vertexWriter.addData3f(x2, y2, z2) for index in", "tris.addVertex(6) tris.closePrimitive() tris.addVertex(3) tris.addVertex(7) tris.addVertex(6) tris.closePrimitive() tris.addVertex(0) tris.addVertex(2) tris.addVertex(4) tris.closePrimitive()", "(toonIds, battleNode)) ival = Parallel() points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]", "stashBoss(self): self.stash() def unstashBoss(self, task): self.unstash() self.reparentTo(render) def enterRollToBattleTwo(self): self.notify.debug('-----", "if oldSeq: oldSeq.finish() seq.start() self.storeInterval(seq, intervalName) def setTaunt(self, tauntIndex, extraInfo):", "== 'ToonJuror': if chair.toonJurorIndex == cannonIndex: retVal += 1 return", "localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.reparentTo(render) self.clearChat() self.releaseToons(finalBattle=1) self.happy = 0 self.raised = 0", "Point3(self.podium.getX(), self.podium.getY(), self.podium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) finalReflectedPodiumPos = Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(), self.reflectedPodium.getZ()", "% beamLocatorPos) self.notify.debug('negBeamLocatorPos = %s' % negBeamLocatorPos) self.beamNodePath.setPos(beamLocatorPos) self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale) self.scaleNodePath.wrtReparentTo(self.geom)", "allCannonsAppear.append(multiCannons) intervalName = 'prepareBattleTwoCannonsAppear' seq = Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName) seq.start()", "track.start() def exitEpilogue(self): self.notify.debug('----- exitEpilogue') self.clearInterval('EpilogueMovieToonAnim') self.unstash() self.epilogueMusic.stop() def enterFrolic(self):", "def touchedGavelHandle(self, gavel, entry): attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr ==", "%s' % panRenderPos) prosecutionLocatorPos = self.prosecutionLocator.getPos() prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin)", "lastPlane = plane return NodePath(newCollisionNode) def makeIntroductionMovie(self, delayDeletes): self.notify.debug('----- makeIntroductionMovie')", "1 cannonIndex = self.cannonIndex numJurors = 0 if not cannonIndex", "def enterPrepareBattleTwo(self): self.notify.debug('----- enterPrepareBattleTwo') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) self.__showWitnessToon()", "+= TTLocalizer.WitnessToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel + 1) if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels:", "gearRoot.setHpr(180, 0, 0) door = self.doorA else: gearRoot.setPos(0, 7, 3)", "cannonSeq = cannon.generateCannonAppearTrack(toon) multiCannons.append(cannonSeq) index += 1 else: self.notify.warning('No cannon", "doStrafe(self, side, direction): gearRoot = self.rotateNode.attachNewNode('gearRoot') if side == 0:", "self.epilogueMusic.stop() def enterFrolic(self): self.notify.debug('----- enterFrolic') self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) DistributedBossCog.DistributedBossCog.enterFrolic(self) self.show() def doorACallback(self,", "bottomPos = Point3(*ToontownGlobals.LawbotBossBottomPos) deathPos = Point3(*ToontownGlobals.LawbotBossDeathPos) self.setPosHpr(startPos, startHpr) bossTrack =", "self.juryTimer = None self.witnessToon = None self.witnessToonOnstage = False self.numToonJurorsSeated", "if attackCodeStr == '': self.notify.warning('Node %s has no attackCode tag.'", "def countToonJurors(self): self.numToonJurorsSeated = 0 for key in self.chairs.keys(): chair", "% renderPos) index += 1 self.notify.debug('done with positionToons') def __makePrepareBattleTwoMovie(self):", "= Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) startHpr = Point3(*ToontownGlobals.LawbotBossBattleThreeHpr) bottomPos = Point3(*ToontownGlobals.LawbotBossBottomPos)", "self.lawyers = [] self.lawyerRequest = None self.bossDamage = 0 self.attackCode", "self.reflectedMainDoor = None self.panFlashInterval = None self.panDamage = ToontownGlobals.LawbotBossDefensePanDamage if", "self.doneBarrier('Victory') def exitVictory(self): self.notify.debug('----- exitVictory') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime =", "0.5), Point3(4.0, -2.0, 0.5)) insidesANode = CollisionNode('BossZap') insidesANode.addSolid(insidesA) insidesANode.setCollideMask(ToontownGlobals.PieBitmask |", "= TTLocalizer.BossCogNameWithDept % {'name': self._name, 'dept': SuitDNA.getDeptFullname(self.style.dept)} self.setDisplayName(nameInfo) self.piesRestockSfx =", "self.bonusTimer.show() self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer) def setAttackCode(self, attackCode, avId = 0): DistributedBossCog.DistributedBossCog.setAttackCode(self,", "return chatString = TTLocalizer.LawbotBossTaunts[1] if tauntIndex == 0: if extraInfo", "= [] self.lawyerRequest = None self.bossDamage = 0 self.attackCode =", "self.realWitnessStand.find('**/witnessStandSeatEdge') center = seatCenter.getPos() self.notify.debug('center = %s' % center) self.witnessToon.setPos(center)", "base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg') self.battleTwoMusic = base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg') floor = self.geom.find('**/MidVaultFloor1') if floor.isEmpty(): floor", "180), rollThroughDoor[0], Func(self.getGeomNode().setH, 0)) rollTrackDuration = rollTrack.getDuration() self.notify.debug('rollTrackDuration = %f'", "0 self.cannonIndex = -1 return def announceGenerate(self): global OneBossCog self.notify.debug('-----", "localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.setDizzy(0) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterVictory(self): self.notify.debug('----- enterVictory')", "= %s' % cannonPos) if toon: self.notify.debug('toon = %s' %", "CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0, -2.0, 5.0), Point3(-4.0, -2.0, 0.5), Point3(4.0,", "self.placeToonInElevator(toon) toon.wrtReparentTo(render) ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop, 'walk'),", "z2) for index in xrange(8): normalWriter.addData3f(1.0, 1.0, 1.0) colorWriter.addData4f(r, g,", "self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.reflectedMainDoor) else: self.notify.debug('not found %s' %", "taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') intervalName = 'PrepareBattleThree' self.clearInterval(intervalName) self.__clearOnscreenMessage() self.betweenBattleMusic.stop() def enterBattleThree(self):", "self.reflectedMainDoor.unstash() if not self.elevatorEntrance.isEmpty(): pass def enterBattleOne(self): self.notify.debug('----- LawbotBoss.enterBattleOne ')", "diffSettings[4]: newWeight, self.bonusWeight, self.numJurorsLocalToonSeated = self.calculateWeightOfToon(base.localAvatar.doId) if self.bonusWeight > 0:", "return self.sendUpdate('finalPieSplat', []) self.ignore('pieSplat') def cleanupAttacks(self): self.notify.debug('----- cleanupAttacks') self.__cleanupStrafe() def", "task): self.unstash() self.reparentTo(render) def enterRollToBattleTwo(self): self.notify.debug('----- enterRollToBattleTwo') self.releaseToons(finalBattle=1) self.stashBoss() self.toonsToBattlePosition(self.involvedToons,", "DistributedBossCog.DistributedBossCog.exitIntroduction(self) self.promotionMusic.stop() if not self.mainDoor.isEmpty(): pass if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.unstash()", "localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.happy = 0 self.raised = 0 self.forward = 1", "self.unstash() self.epilogueMusic.stop() def enterFrolic(self): self.notify.debug('----- enterFrolic') self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) DistributedBossCog.DistributedBossCog.enterFrolic(self) self.show() def", "0: cp = CollisionPlane(plane) newCollisionNode.addSolid(cp) lastPlane = plane return NodePath(newCollisionNode)", "= 0 self.recoverStartTime = 0 self.bossDamageMovie = None self.everThrownPie =", "return def delete(self): self.notify.debug('----- delete') DistributedBossCog.DistributedBossCog.delete(self) def d_hitBoss(self, bossDamage): self.notify.debug('-----", "ToontownGlobals.CogSuitHPLevels: speech += TTLocalizer.WitnessToonHPBoost else: speech += TTLocalizer.WitnessToonMaxed % (ToontownGlobals.MaxCogSuitLevel", "self.recoverRate * elapsed / 60.0, 0) def __recoverBossDamage(self, task): self.notify.debug('-----", "+ 30, myFromPos[2]) rollThroughDoor = self.rollBossToPoint(fromPos=myFromPos, fromHpr=None, toPos=myToPos, toHpr=None, reverse=0)", "tris.closePrimitive() tris.addVertex(2) tris.addVertex(6) tris.addVertex(4) tris.closePrimitive() tris.addVertex(1) tris.addVertex(5) tris.addVertex(3) tris.closePrimitive() tris.addVertex(3)", "diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: numJurors = self.numJurorsSeatedByCannon(cannonIndex) bonusWeight =", "timestamp): recoverStartTime = globalClockDelta.networkToLocalTime(timestamp) self.bossDamage = bossDamage self.recoverRate = recoverRate", "self.ignore('enterDefenseCol') self.ignore('enterProsecutionCol') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) if self.bossDamageMovie: self.bossDamageMovie.finish() self.bossDamageMovie =", "__init___') DistributedBossCog.DistributedBossCog.__init__(self, cr) FSM.FSM.__init__(self, 'DistributedLawbotBoss') self.lawyers = [] self.lawyerRequest =", "Point3(*ToontownGlobals.LawbotBossTopRampPosA) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosA) battlePos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0],", "Parallel(bossTrack, ActorInterval(self, 'Ff_speech', loop=1)) return bossTrack def makeEpilogueMovie(self): epSpeech =", "toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: if index", "taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice')) self.stickBossToFloor() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage base.playMusic(self.battleThreeMusic, looping=1,", "self.__foundPieButton) self.accept('enterDefenseCol', self.__enterDefenseCol) self.accept('enterProsecutionCol', self.__enterProsecutionCol) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice')) self.stickBossToFloor()", "tris.addVertex(5) tris.closePrimitive() tris.addVertex(4) tris.addVertex(6) tris.addVertex(7) tris.closePrimitive() tris.addVertex(7) tris.addVertex(5) tris.addVertex(4) tris.closePrimitive()", "= self.scaleNodePath.find('**/ProsecutionLocator') prosecutionLocBounds = self.prosecutionLocator.getBounds() prosecutionLocPos = prosecutionLocBounds.getCenter() self.notify.debug('prosecutionLocatorPos =", "battleNode.getParent()) self.notify.debug('battleNode.parent().getPos() = %s' % battleNode.getParent().getPos()) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render)", "self.__cleanupStrafe() self.__cleanupJuryBox() render.clearTag('pieCode') self.targetNodePath.detachNode() self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = None self.betweenBattleMusic.stop() self.promotionMusic.stop()", "not in battle three state, state=%s', self.state) gotError = True", "self.scaleNodePath.find('**/scaleBeam') self.defensePanNodePath = self.scaleNodePath.find('**/defensePan') self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan') self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol')", "180)) bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos, None, battlePos, None,", "self.bonusWeight > 0: if self.bonusWeight == 1: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty)", "pos) ival.append(Sequence(Func(toon.setPlayRate, 0.8, 'walk'), Func(toon.loop, 'walk'), toon.posInterval(3, pos), Func(toon.setPlayRate, 1,", "GeomVertexWriter(myVertexData, 'normal') colorWriter = GeomVertexWriter(myVertexData, 'color') texWriter = GeomVertexWriter(myVertexData, 'texcoord')", "taskMgr.remove(self.uniqueName('StandUp')) self.ignore('enterWitnessStand') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.ignore('begin-pie') self.ignore('enterDefenseCol') self.ignore('enterProsecutionCol') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice'))", "10, pos[2], h, 0, 0) def __outOfPies(self): self.notify.debug('----- outOfPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence)", "entry): if pieCode == ToontownGlobals.PieCodeLawyer: self.__lawyerGotHit(entry) if pieCode != ToontownGlobals.PieCodeToon:", "= self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 1.0, 0, 0,", "1, 1) seq = Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0, 0, 1, 1)), self.defensePanNodePath.colorScaleInterval(0.3,", "CFSpeech)) seq.append(Wait(4.0)) seq.append(Func(self.clearChat)) oldSeq = self.activeIntervals.get(intervalName) if oldSeq: oldSeq.finish() seq.start()", "self.reparentTo(render) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) self.__showWitnessToon() prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie() self.acceptOnce('doneChatPage', self.__onToBattleThree)", "* radius y = math.sin(radians) * radius toon.setPos(self.witnessToon, x, y,", "setTaunt, not in battle three state, state=%s', self.state) gotError =", "place and hasattr(place, 'fsm'): place.setState('waitForBattle') def makeToonsWait(self): self.notify.debug('makeToonsWait') for toonId", "+ ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) def loadPodium(self): self.podium = self.geom.find('**/Podium') newZ = self.podium.getZ()", "enterElevator(self): self.notify.debug('----- enterElevator') DistributedBossCog.DistributedBossCog.enterElevator(self) self.witnessToon.removeActive() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.happy = 1", "index in self.cannons: cannon = self.cannons[index] cannonSeq = cannon.generateCannonAppearTrack(toon) multiCannons.append(cannonSeq)", "'neutral')) track.append(ival) delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion')) def __walkSuitToPoint(self, node, fromPos, toPos): self.notify.debug('-----", "self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() camera.reparentTo(self.elevatorModel) camera.setPosHpr(0, 30, 8, 180,", "'unstashBoss')), name=self.uniqueName('BattleTwoMovie')) def __makeRollToBattleThreeMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battlePos", "if not self.realWitnessStand.isEmpty(): pass self.reflectedWitnessStand = self.geom.find('**/Witnessstand_Geo_Reflect') if not self.reflectedWitnessStand.isEmpty():", "0) def __outOfPies(self): self.notify.debug('----- outOfPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence) taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice')) def", "self.beamNodePath.getBounds().getCenter() self.notify.debug('beamBoundsCenter = %s' % beamBoundsCenter) beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos", "self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.doneBarrier('RollToBattleTwo') def exitRollToBattleTwo(self): self.notify.debug('----- exitRollToBattleTwo') self.unstickBoss() intervalName =", "self.notify.debug('battleNode.parent().getPos() = %s' % battleNode.getParent().getPos()) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos", "= percentDamaged * ToontownGlobals.LawbotBossWinningTilt else: percentDamaged = diffDamage / (ToontownGlobals.LawbotBossInitialDamage", "ToontownGlobals.LawbotBossBattleThreePosHpr[2]) myToPos = Point3(myFromPos[0], myFromPos[1] + 30, myFromPos[2]) rollThroughDoor =", "= 0 newWeight = defaultWeight + bonusWeight self.notify.debug('toon %d has", "toon.getPos() self.placeToonInElevator(toon) toon.wrtReparentTo(render) ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop,", "self.notify.debug('negBeamLocatorPos = %s' % negBeamLocatorPos) self.beamNodePath.setPos(beamLocatorPos) self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale) self.scaleNodePath.wrtReparentTo(self.geom) self.baseHighCol =", "self.geom.find('**/Witnessstand_Geo_Reflect') if not self.reflectedWitnessStand.isEmpty(): pass colNode = self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision') colNode.setName('WitnessStand') def", "self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol') self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol') self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan))", "taskMgr.remove(self.uniqueName('PieAdvice')) def __touchedWitnessStand(self, entry): self.sendUpdate('touchWitnessStand', []) self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) base.playSfx(self.piesRestockSfx) if", "= None self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage self.elevatorType = ElevatorConstants.ELEVATOR_CJ self.gavels =", "* from toontown.battle.BattleProps import * from direct.distributed.ClockDelta import * from", "0, -0.5, 0, 0, -1.5, 0.6) defenseTube.setTangible(1) defenseCollNode = CollisionNode('DefenseCol')", "self.zapLocalToon(attackCode, into) def touchedGavelHandle(self, gavel, entry): attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if", "0))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1, CFSpeech)), (5.5, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2, CFSpeech)), (9.5,", "= self.rollBossToPoint(bottomPos, startHpr, deathPos, None, 1) bossTrack.append(track) duration = bossTrack.getDuration()", "Sequence(Func(self.clearChat), Func(self.reverseHead), ActorInterval(self, 'Ff_speech'))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins, CFSpeech))) return bossTrack", "= recoverStartTime taskName = 'RecoverBossDamage' taskMgr.remove(taskName) if self.bossDamageMovie: if self.bossDamage", "TTLocalizer.WitnessToonPrepareBattleThree diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: newWeight, self.bonusWeight, self.numJurorsLocalToonSeated =", "== toonId: self.cannonIndex = cannonIndex def numJurorsSeatedByCannon(self, cannonIndex): retVal =", "has weight of %d' % (toonId, newWeight)) return (newWeight, bonusWeight,", "if self.hasLocalToon(): self.toMovieMode() for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId)", "1 self.toonsEnterB.timeScale = 35 def __unloadMopaths(self): self.notify.debug('----- __unloadMopaths') self.toonsEnterA.reset() self.toonsEnterB.reset()", "self.doAnimate() self.setDizzy(1) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def exitNearVictory(self): self.notify.debug('----- exitNearVictory')", "else: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty) if juryWeightBonus: weightBonusText = juryWeightBonus %", "ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) curReflectedPos = self.reflectedJuryBox.getPos()", "None self.warningSfx = None self.juryMovesSfx = None self.baseColStashed = False", "self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.stopLookAround() toon.stopSmooth() if self.hasLocalToon():", "/ 180.0 x = math.cos(radians) * radius y = math.sin(radians)", "self.notify.debug('new toon pos %s ' % toon.getPos()) def touchedGavel(self, gavel,", "= CollisionNode('BossZap') shieldNode.addSolid(shield) shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask) shieldNodePath = self.pelvis.attachNewNode(shieldNode) disk", "180, 10, 0), Func(self.witnessToon.setLocalPageChat, chatString, 0)) return movie def __doWitnessPrepareBattleThreeChat(self):", "blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()),", "1.0, 0.25) self.defensePanNodePath = NodePath('defensePan') self.defensePanNodePath.attachNewNode(defensePanGeom) self.defensePanNodePath.setPos(0, -2, 0) self.defensePanNodePath.reparentTo(self.beamNodePath)", "= 1.0, g = 1.0, b = 1.0, a =", "'LawbotBoss.makeIntroductionMovie')) track = Parallel() bossAnimTrack = Sequence( ActorInterval(self, 'Ff_speech', startTime=2,", "self.toonUpSfx = loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg') self.strafeSfx = [] for i in xrange(10):", "recoverStartTime = globalClockDelta.networkToLocalTime(timestamp) self.bossDamage = bossDamage self.recoverRate = recoverRate self.recoverStartTime", "prosecutionLocatorPos = self.prosecutionLocator.getPos() prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin) locatorRenderPos = render.getRelativePoint(self.prosecutionLocator,", "vertexWriter.addData3f(x2, y2, z1) vertexWriter.addData3f(x1, y1, z2) vertexWriter.addData3f(x2, y1, z2) vertexWriter.addData3f(x1,", "curReflectedPos = self.reflectedJuryBox.getPos() reflectedEndingAbsPos = Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] +", "self.notify.debug('myCurPos = %s' % self.getPos()) self.notify.debug('battleNode.parent() = %s' % battleNode.getParent())", "+ 1) return speech def __positionToonsInFrontOfCannons(self): self.notify.debug('__positionToonsInFrontOfCannons') index = 0", "if extraInfo < len(self.involvedToons): toonId = self.involvedToons[extraInfo] toon = base.cr.doId2do.get(toonId)", "self.bonusWeight) trialSpeech += '\\x07' trialSpeech += weightBonusText self.witnessToon.setLocalPageChat(trialSpeech, 0) def", "toon = self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward')) ival.delayDeletes = delayDeletes", "self.storeInterval(seq, intervalName) def setTaunt(self, tauntIndex, extraInfo): gotError = False if", "% self.getPos()) self.notify.debug('battleNode.parent() = %s' % battleNode.getParent()) self.notify.debug('battleNode.parent().getPos() = %s'", "self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.unstash() if not self.elevatorEntrance.isEmpty(): pass def enterBattleOne(self): self.notify.debug('----- LawbotBoss.enterBattleOne", "self.bossDamageMovie: if self.bossDamage >= self.bossMaxDamage: self.notify.debug('finish the movie then transition", "if chair.state == 'ToonJuror' or chair.state == None and chair.newState", "return ival def toonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' %", "from direct.showutil import Rope from toontown.distributed import DelayDelete from toontown.battle", "Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1))", "% (battleNode.getTransform(), render.getTransform())) self.notify.debug('render.getScale()=%s battleNode.getScale()=%s' % (render.getScale(), battleNode.getScale())) myCurPos =", "if self.insidesANodePath: if isOpen: self.insidesANodePath.unstash() else: self.insidesANodePath.stash() def doorBCallback(self, isOpen):", "= entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr == '': self.notify.warning('Node %s has no", "__unloadMopaths') self.toonsEnterA.reset() self.toonsEnterB.reset() def enterOff(self): self.notify.debug('----- enterOff') DistributedBossCog.DistributedBossCog.enterOff(self) if self.witnessToon:", "a toon =%d' % (index, toonId)) allCannonsAppear.append(multiCannons) intervalName = 'prepareBattleTwoCannonsAppear'", "seq = Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0, 0, 1, 1)), self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1, 1,", "insidesANode.addSolid(insidesA) insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesANodePath = self.axle.attachNewNode(insidesANode) self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesANodePath.stash()", "gearRoot.attachNewNode(str(i)) node.hide() node.setPos(0, 0, 0) gear = gearModel.instanceTo(node) angle =", "def exitReward(self): self.notify.debug('----- exitReward') intervalName = 'RewardMovie' self.clearInterval(intervalName) self.unstash() self.rewardPanel.destroy()", "= self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie')) track = Parallel() bossAnimTrack", "= beamLocatorBounds.getCenter() self.notify.debug('beamLocatorPos = %s' % beamLocatorPos) def loadScaleNew(self): self.scaleNodePath", "* (i - center) radians = angle * math.pi /", "chatString): intervalName = 'ChiefJusticeTaunt' seq = Sequence(name=intervalName) seq.append(Func(self.setChatAbsolute, chatString, CFSpeech))", "ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5]) bossTrack = Sequence() self.notify.debug('calling setPosHpr') myInterval = camera.posHprInterval(8,", "Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosB) else: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosA) topRampTurnPos =", "self.notify.debug('----- __showWaitingMessage') self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors) def loadEnvironment(self): self.notify.debug('----- loadEnvironment') DistributedBossCog.DistributedBossCog.loadEnvironment(self) self.geom =", "'t\\x1b\\x00\\x01\\x01\\x00\\x03\\x00\\x03\\x01\\x10\\x13\\x00\\x13\\x13' npc = Toon.Toon() npc.setDNAString(dnaNetString) npc.setName(TTLocalizer.WitnessToonName) npc.setPickable(0) npc.setPlayerType(NametagGroup.CCNonPlayer) npc.animFSM.request('Sit') self.witnessToon", "taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleTwo(self): self.notify.debug('----- exitPrepareBattleTwo') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage')", "import * from toontown.battle.BattleProps import * from direct.distributed.ClockDelta import *", "= %s' % beamPos) self.notify.debug('beamRelPos = %s' % beamRelPos) self.notify.debug('beamRenderPos", "= 0 self.forward = 1 intervalName = 'DefeatMovie' seq =", "cubeGN def __enterDefenseCol(self, entry): self.notify.debug('__enterDefenseCol') def __enterProsecutionCol(self, entry): self.notify.debug('__enterProsecutionCol') def", "% prosecutionPanRelPos) self.notify.debug('panRenderPos = %s' % panRenderPos) prosecutionLocatorPos = self.prosecutionLocator.getPos()", "delayDeletes = [] for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId)", "0: juryResult = TTLocalizer.WitnessToonNoJuror elif self.numToonJurorsSeated == 1: juryResult =", "startPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) startHpr = Point3(*ToontownGlobals.LawbotBossBattleThreeHpr) bottomPos =", "TTLocalizer import SuitDNA from toontown.toon import Toon from toontown.battle import", "import ClassicFSM from direct.fsm import State from direct.directnotify import DirectNotifyGlobal", "shieldNode = CollisionNode('BossZap') shieldNode.addSolid(shield) shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask) shieldNodePath = self.pelvis.attachNewNode(shieldNode)", "localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) track,", "base.config.GetBool('want-injustice-scale-debug', 0) if self.useProgrammerScale: self.loadScaleOld() else: self.loadScaleNew() def __debugScale(self): prosecutionPanPos", "toonId)) allCannonsAppear.append(multiCannons) intervalName = 'prepareBattleTwoCannonsAppear' seq = Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName)", "def __showOnscreenMessage(self, text): self.notify.debug('----- __showOnscreenmessage') if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage =", "enterPrepareBattleTwo(self): self.notify.debug('----- enterPrepareBattleTwo') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) self.__showWitnessToon() prepareBattleTwoMovie", "= self.cannons[index] cannonSeq = cannon.generateCannonAppearTrack(toon) multiCannons.append(cannonSeq) index += 1 else:", "= 0 self.battleThreeMusic.stop() def enterEpilogue(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate()", "= self.scaleNodePath.find('**/prosecutionPan') self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol') self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol')", "self.lawyers = [] self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers) def __gotLawyers(self,", "def toonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode))", "- 1) / 2.0 for i in xrange(numToons): toon =", "< ToontownGlobals.LawbotBossMaxDamage * 0.85: self.unstashBaseCol() else: self.stashBaseCol() def unloadEnvironment(self): self.notify.debug('-----", "allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar)) multiCannons", "NearVictory') self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration()) else: self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie) if self.recoverRate: taskMgr.add(self.__recoverBossDamage, taskName)", "self.forward = 1 intervalName = 'VictoryMovie' seq = Sequence(self.makeVictoryMovie(), Func(self.__continueVictory),", "self.storeInterval(seq, intervalName) self.bossHealthBar.deinitialize() base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueVictory(self): self.notify.debug('-----", "-spread dist = 50 rate = time / numGears for", "= defenseLocBounds.getCenter() self.notify.debug('defenseLocatorPos = %s' % defenseLocPos) self.defensePanNodePath.setPos(defenseLocPos) self.defensePanNodePath.reparentTo(self.beamNodePath) self.notify.debug('defensePanNodePath.getPos()=%s'", "reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0)) self.juryBoxIval.start() self.juryTimer = ToontownTimer.ToontownTimer()", ">= 0: percentDamaged = diffDamage / (ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage) tilt", "def enterRollToBattleThree(self): self.notify.debug('----- enterRollToBattleThree') self.reparentTo(render) self.stickBossToFloor() intervalName = 'RollToBattleThree' seq", "self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.setDizzy(0) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterVictory(self):", "pieCode): if pieCode != ToontownGlobals.PieCodeDefensePan: return self.sendUpdate('finalPieSplat', []) self.ignore('pieSplat') def", "CFSpeech)), (5.6, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1, CFSpeech)), (12, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2, CFSpeech)), (18,", "z1) vertexWriter.addData3f(x2, y1, z1) vertexWriter.addData3f(x1, y2, z1) vertexWriter.addData3f(x2, y2, z1)", "global OneBossCog self.notify.debug('----- announceGenerate') DistributedBossCog.DistributedBossCog.announceGenerate(self) self.setName(TTLocalizer.LawbotBossName) nameInfo = TTLocalizer.BossCogNameWithDept %", "toontown.battle import MovieToonVictory from toontown.building import ElevatorUtils from toontown.battle import", "a) texWriter.addData2f(1.0, 1.0) tris = GeomTriangles(Geom.UHDynamic) tris.addVertex(0) tris.addVertex(1) tris.addVertex(2) tris.closePrimitive()", "= localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)] if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel: speech += TTLocalizer.WitnessToonLastPromotion %", "toonId): self.notify.debug('----- d_hitToon') self.sendUpdate('hitToon', [toonId]) def gotToon(self, toon): stateName =", "self.reflectedMainDoor.stash() camera.reparentTo(self.elevatorModel) camera.setPosHpr(0, 30, 8, 180, 0, 0) def exitElevator(self):", "track, Func(self.unstickToons), name=self.uniqueName('Introduction')) def walkToonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('walkToonsToBattlePosition-----------------------------------------------') self.notify.debug('toonIds=%s battleNode=%s'", "self.toMovieMode() for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon:", "self.notify.debug('walkToonsToBattlePosition-----------------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode)) ival = Parallel() points", "0)) return bossTrack def makeDefeatMovie(self): bossTrack = Track((0.0, Sequence(Func(self.clearChat), Func(self.reverseHead),", "= time / numGears for i in xrange(numGears): node =", "if OneBossCog == self: OneBossCog = None return def delete(self):", "self.evFloor = self.replaceCollisionPolysWithPlanes(floor) self.evFloor.reparentTo(self.geom) self.evFloor.setName('floor') plane = CollisionPlane(Plane(Vec3(0, 0, 1),", "NodePath('prosecutionPan') self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom) self.prosecutionPanNodePath.setPos(0, 2, 0) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) prosecutionTube = CollisionTube(0, 0,", "import * from direct.fsm import FSM from direct.fsm import ClassicFSM", "finalReflectedPodiumPos = Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(), self.reflectedPodium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) return Sequence(chatTrack, bossTrack,", "self.geom.find('**/Podium_Geo1_Refl') reflectedZ = self.reflectedPodium.getZ() if not self.debugPositions: self.reflectedPodium.setZ(reflectedZ) if not", "self.__showWitnessToon() if not self.useCannons: self.toonsToBattlePosition(self.toonsA, self.battleANode) self.toonsToBattlePosition(self.toonsB, self.battleBNode) base.playMusic(self.battleTwoMusic, looping=1,", "self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos()) self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator') prosecutionLocBounds = self.prosecutionLocator.getBounds() prosecutionLocPos", "== ToontownGlobals.PieCodeLawyer: self.__lawyerGotHit(entry) if pieCode != ToontownGlobals.PieCodeToon: return avatarDoId =", "= 60 * math.pi / 180.0 if direction == 1:", "isinstance(cn, CollisionNode): self.notify.warning('Not a collision node: %s' % repr(cnp)) break", "(27, Sequence( self.toonNormalEyes(self.involvedToons), Func(self.loop, 'Ff_neutral'), Func(self.setChatAbsolute, attackToons, CFSpeech)))) track.append(dialogTrack) return", "self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie) return Task.cont def __walkToonToPromotion(self, toonId, delay, mopath,", "task): self.notify.debug('----- __howToGetPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence) def __howToThrowPies(self, task): self.notify.debug('----- __howToThrowPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies)", "') DistributedBossCog.DistributedBossCog.enterBattleOne(self) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.clearChat() self.loop('Ff_neutral') self.notify.debug('self.battleANode = %s' %", "toon = self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie')) track = Parallel()", "BattleBase.BattleBase.toonPoints[len(toonIds) - 1] self.notify.debug('walkToonsToBattlePosition: points = %s' % points[0][0]) for", "for i in xrange(numToons): toon = self.cr.doId2do.get(self.involvedToons[i]) if toon: angle", "= self.activeIntervals.get(intervalName) if oldSeq: oldSeq.finish() seq.start() self.storeInterval(seq, intervalName) def setTaunt(self,", "retVal = 0 for chair in self.chairs.values(): if chair.state ==", "prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie() self.acceptOnce('doneChatPage', self.__onToBattleThree) intervalName = 'prepareBattleThree' seq =", "= 'prepareBattleTwo' seq = Sequence(prepareBattleTwoMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.acceptOnce('doneChatPage',", "if not self.baseColStashed: self.notify.debug('stashBaseCol') self.baseTopCol.stash() self.baseSideCol.stash() self.baseColStashed = True def", "toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.reparentTo(render) pos, h = points[i]", "= %s' % panRenderPos) prosecutionLocatorPos = self.prosecutionLocator.getPos() prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator,", "self.notify.warning('Not a collision node: %s' % repr(cnp)) break newCollideMask =", "Sequence( Func(self.getGeomNode().setH, 180), rollThroughDoor[0], Func(self.getGeomNode().setH, 0)) rollTrackDuration = rollTrack.getDuration() self.notify.debug('rollTrackDuration", "battlePos, None, 0) bossTrack.append(track) track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos,", "== 'BattleThree': self.notify.warning('returning from setTaunt, not in battle three state,", "numJurors = 0 if not cannonIndex == None and cannonIndex", "h, 0, 0) def __outOfPies(self): self.notify.debug('----- outOfPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence) taskMgr.doMethodLater(20, self.__howToGetPies,", "self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision') self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col') self.defenseLocator.hide() self.prosecutionLocator.hide() self.beamLocator.hide() def", "CFSpeech)), (12, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2, CFSpeech)), (18, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3, CFSpeech)), (22,", "def __recoverBossDamage(self, task): self.notify.debug('----- __recoverBossDamage') if self.bossDamageMovie: self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie)", "self.notify.warning('returning from setTaunt, not in battle three state, state=%s', self.state)", "return bossTrack def __showOnscreenMessage(self, text): self.notify.debug('----- __showOnscreenmessage') if self.onscreenMessage: self.onscreenMessage.destroy()", "def enterFrolic(self): self.notify.debug('----- enterFrolic') self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) DistributedBossCog.DistributedBossCog.enterFrolic(self) self.show() def doorACallback(self, isOpen):", "if diffSettings[4]: localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight) def __doneBattleThree(self): self.notify.debug('----- __doneBattleThree') self.setState('NearVictory') self.unstickBoss()", "self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() self.__showWitnessToon() self.witnessToon.reparentTo(render) self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr) self.witnessToon.loop('Sit') self.__arrangeToonsAroundWitnessToon()", "CFSpeech)), (22, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4, CFSpeech)), (24, Sequence( Func(self.clearChat), self.loseCogSuits(self.toonsA +", "self.bonusWeight == 1: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty) else: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty)", "str) if not stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.reflectedMainDoor) else:", "self.piesRestockSfx = loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg') self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg') self.evidenceHitSfx = loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg') self.warningSfx", "colNode.setName('WitnessStand') def loadScale(self): self.useProgrammerScale = base.config.GetBool('want-injustice-scale-debug', 0) if self.useProgrammerScale: self.loadScaleOld()", "0.5, 0, -0.5, -0.5, -2, 0, 0, 1.0, 0.25) self.defensePanNodePath", "% battleNode.getParent().getPos()) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent)", "self.rollBossToPoint(bottomPos, startHpr, deathPos, None, 1) bossTrack.append(track) duration = bossTrack.getDuration() return", "GeomVertexWriter(myVertexData, 'vertex') normalWriter = GeomVertexWriter(myVertexData, 'normal') colorWriter = GeomVertexWriter(myVertexData, 'color')", "self.doneBarrier('Reward') self.toWalkMode() def exitReward(self): self.notify.debug('----- exitReward') intervalName = 'RewardMovie' self.clearInterval(intervalName)", "self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime) def exitBattleTwo(self): self.notify.debug('----- exitBattleTwo') intervalName = self.uniqueName('Drop') self.clearInterval(intervalName) self.cleanupBattles()", "noSkip=True) ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward)) intervalName = 'RewardMovie' delayDeletes", "entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def createBlock(self, x1, y1, z1, x2, y2,", "self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True) ival = Sequence(Parallel(victory,", "= base.cr.doId2do.get(toonIds[i]) if toon: toon.reparentTo(render) pos, h = points[i] toon.setPosHpr(battleNode,", "avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId') if avatarDoId == '': self.notify.warning('Toon %s has", "y = dist * math.cos(angle) h = random.uniform(-720, 720) gearTrack.append(Sequence(Wait(i", "None self.unstickBoss() taskName = 'RecoverBossDamage' taskMgr.remove(taskName) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop()", "% toon.getName() else: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] self.saySomething(chatString) def toonGotHealed(self, toonId):", "(index, toonId)) allCannonsAppear.append(multiCannons) intervalName = 'prepareBattleTwoCannonsAppear' seq = Sequence(allCannonsAppear, Func(self.__onToBattleTwo),", "= self.cannons[index] cannon.cannon.show() def getChairParent(self): return self.juryBox def startJuryBoxMoving(self): if", "radius toon.setPos(self.witnessToon, x, y, 0) toon.headsUp(self.witnessToon) toon.loop('neutral') toon.show() def __talkAboutPromotion(self,", "def __onToBattleTwo(self, elapsedTime = 0): self.notify.debug('----- __onToBattleTwo') self.doneBarrier('PrepareBattleTwo') taskMgr.doMethodLater(1, self.__showWaitingMessage,", "newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel: speech += TTLocalizer.WitnessToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel + 1)", "= Track( (0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0, CFSpeech)), (5.6, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1, CFSpeech)),", "self.loadPodium() ug = self.geom.find('**/Reflections') ug.setBin('ground', -10) def loadJuryBox(self): self.juryBox =", "= %s' % origPos) self.notify.debug('batlleNode.getTransform = %s render.getTransform=%s' % (battleNode.getTransform(),", "if toon == localAvatar: self.d_hitBoss(self.panDamage) elif pieCode == ToontownGlobals.PieCodeProsecutionPan: self.flashGreen()", "newCollideMask | cn.getIntoCollideMask() for i in xrange(cn.getNumSolids()): solid = cn.getSolid(i)", "= 'RollToBattleThree' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleThree(self): self.notify.debug('----- enterPrepareBattleThree') self.cleanupIntervals() self.controlToons()", "elif pieCode == ToontownGlobals.PieCodeBossCog: if toon == localAvatar: self.d_hitBoss(1) if", "import ToontownBattleGlobals import DistributedBossCog from toontown.toonbase import TTLocalizer import SuitDNA", "seq.append(Func(self.clearChat)) oldSeq = self.activeIntervals.get(intervalName) if oldSeq: oldSeq.finish() seq.start() self.storeInterval(seq, intervalName)", "0, 0) curPos = self.juryBox.getPos() endingAbsPos = Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0],", "self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode)) ival = Parallel() points =", "self.cr.doId2do.get(toonId) self.notify.debug('cannonId = %d' % cannon.doId) cannonPos = cannon.nodePath.getPos(render) self.notify.debug('cannonPos", "node=self.defensePanNodePath, volume=0.25) if toon == localAvatar: self.d_hitBoss(self.panDamage) elif pieCode ==", "= self.geom.find('**/SlidingDoor1/') if self.door3.isEmpty(): self.door3 = self.geom.find('**/interior/CR3_Door') self.mainDoor = self.geom.find('**/Door_1')", "self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) def loadPodium(self): self.podium = self.geom.find('**/Podium') newZ =", "0 self.bossDamageMovie = None self.everThrownPie = 0 self.battleThreeMusicTime = 0", "self.baseHighCol.getCollideMask() newBitMask = oldBitMask & ~ToontownGlobals.PieBitmask newBitMask = newBitMask &", "self.state if stateName == 'Elevator': self.placeToonInElevator(toon) def setLawyerIds(self, lawyerIds): self.lawyers", "1.0, 0, 1.0) self.beamNodePath = NodePath('scaleBeam') self.beamNodePath.attachNewNode(beamGeom) self.beamNodePath.setPos(0, 0, 3)", "def exitNearVictory(self): self.notify.debug('----- exitNearVictory') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)", "enterVictory(self): self.notify.debug('----- enterVictory') self.cleanupIntervals() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.loop('neutral') localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.clearChat() self.witnessToon.clearChat()", "else: self.insidesBNodePath.stash() def __toonsToPromotionPosition(self, toonIds, battleNode): self.notify.debug('----- __toonsToPromotionPosition') points =", "from direct.task import Task import random import math from toontown.coghq", "beamLocatorPos = beamLocatorBounds.getCenter() negBeamLocatorPos = -beamLocatorPos self.notify.debug('beamLocatorPos = %s' %", "def __doneEpilogue(self, elapsedTime = 0): self.notify.debug('----- __doneEpilogue') intervalName = 'EpilogueMovieToonAnim'", "suit = self.lawyers[i] suit.fsm.request('neutral') suit.loop('neutral') suit.setBossCogId(self.doId) return def setBossDamage(self, bossDamage,", "self.stickBossToFloor() intervalName = 'RollToBattleTwo' seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName) seq.start()", "extraInfo < len(self.involvedToons): toonId = self.involvedToons[extraInfo] toon = base.cr.doId2do.get(toonId) if", "uberList=self.uberList, noSkip=True) ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward)) intervalName = 'RewardMovie'", "in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.hide() def __showToons(self):", "% str) if not stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.reflectedMainDoor)", "st return chatString = TTLocalizer.LawbotBossTaunts[1] if tauntIndex == 0: if", "= Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2]) if self.arenaSide: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosB) topRampTurnPos", "ToontownGlobals.LawbotBossMaxDamage self.elevatorType = ElevatorConstants.ELEVATOR_CJ self.gavels = {} self.chairs = {}", "- 5]) self.notify.debug('toonsToBattlePosition: points = %s' % points[0][0]) for i", "transition to NearVictory') self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration()) else: self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie) if self.recoverRate:", "bossTrack.append(track) track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0) self.makeToonsWait()", "= random.uniform(-720, 720) gearTrack.append(Sequence(Wait(i * rate), Func(node.show), Parallel(node.posInterval(1, Point3(x, y,", "0, 0, -1.5, 0.6) defenseTube.setTangible(1) defenseCollNode = CollisionNode('DefenseCol') defenseCollNode.addSolid(defenseTube) self.defenseColNodePath", "0) def __makePrepareBattleThreeMovie(self): movie = Sequence(Func(camera.reparentTo, render), Func(camera.setPos, -15, 15,", "def enterWaitForToons(self): self.notify.debug('----- enterWaitForToons') DistributedBossCog.DistributedBossCog.enterWaitForToons(self) self.geom.hide() self.witnessToon.removeActive() def exitWaitForToons(self): self.notify.debug('-----", "- self.bossDamage, self.bossMaxDamage) if diffSettings[4]: localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight) def __doneBattleThree(self): self.notify.debug('-----", "self.insidesBNodePath.stash() target = CollisionTube(0, -1, 4, 0, -1, 9, 3.5)", "self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) prosecutionPanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2,", "2, 0) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) prosecutionTube = CollisionTube(0, 0, -0.5, 0, 0,", "self.notify.debug('----- exitIntroduction') DistributedBossCog.DistributedBossCog.exitIntroduction(self) self.promotionMusic.stop() if not self.mainDoor.isEmpty(): pass if not", "Toon from toontown.battle import BattleBase from direct.directutil import Mopath from", "= 'RollToBattleTwo' seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName)", "toonIds, battleNode): self.notify.debug('----- __toonsToPromotionPosition') points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] for", "loop=1), ActorInterval(self, 'Ff_lookRt', duration=3), ActorInterval(self, 'Ff_lookRt', duration=3, startTime=3, endTime=0), ActorInterval(self,", "battleHpr = VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5]) bossTrack = Sequence() self.notify.debug('calling setPosHpr')", "weightBonusText = juryWeightBonus % (self.numJurorsLocalToonSeated, self.bonusWeight) trialSpeech += '\\x07' trialSpeech", "self.notify.debug('----- __onToBattleTwo') self.doneBarrier('PrepareBattleTwo') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleTwo(self): self.notify.debug('----- exitPrepareBattleTwo')", "None return def flashPanBlue(self): self.cleanupPanFlash() intervalName = 'FlashPanBlue' self.defensePanNodePath.setColorScale(1, 1,", "juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty) if juryWeightBonus: weightBonusText = juryWeightBonus % (self.numJurorsLocalToonSeated,", "repr(cnp)) break newCollideMask = newCollideMask | cn.getIntoCollideMask() for i in", "loader.loadModel('phase_11/models/lawbotHQ/scale') self.beamNodePath = self.scaleNodePath.find('**/scaleBeam') self.defensePanNodePath = self.scaleNodePath.find('**/defensePan') self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan')", "__clearOnscreenMessage(self): if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None return def __showWaitingMessage(self,", "beamLocatorBounds.getCenter() self.notify.debug('beamLocatorPos = %s' % beamLocatorPos) def loadScaleNew(self): self.scaleNodePath =", "not isinstance(cn, CollisionNode): self.notify.warning('Not a collision node: %s' % repr(cnp))", "state') gotError = True elif not self.state == 'BattleThree': self.notify.warning('returning", "{} self.cannons = {} self.useCannons = 1 self.juryBoxIval = None", "3) self.standNodePath = NodePath('scaleStand') self.standNodePath.attachNewNode(standGeom) self.standNodePath.reparentTo(self.scaleNodePath) self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.scaleNodePath.setScale(5.0) self.scaleNodePath.wrtReparentTo(self.geom) self.setScaleTilt(startingTilt)", "direction == 1: spread = -spread dist = 50 rate", "= None self.attackAvId = 0 self.recoverRate = 0 self.recoverStartTime =", "= -spread dist = 50 rate = time / numGears", "self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.mainDoor) else: self.notify.debug('not found %s' %", "self.notify.debug('battle node world pos = %s' % bnWorldPos) pos =", "repr(entry.getIntoNodePath())) return attackCode = int(attackCodeStr) into = entry.getIntoNodePath() self.zapLocalToon(attackCode, into)", "toon: toon.show() def __arrangeToonsAroundWitnessToon(self): radius = 7 numToons = len(self.involvedToons)", "toon: toon.hide() def __showToons(self): for toonId in self.involvedToons: toon =", "= 7 numToons = len(self.involvedToons) center = (numToons - 1)", "x = math.cos(radians) * radius y = math.sin(radians) * radius", "return retVal def calculateWeightOfToon(self, toonId): defaultWeight = 1 bonusWeight =", "seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss') def", "__walkToonToPromotion') toon = base.cr.doId2do.get(toonId) if toon: destPos = toon.getPos() self.placeToonInElevator(toon)", "+ 0.5) time = 5.0 - 4.0 * t spread", "cnp in collList: cn = cnp.node() if not isinstance(cn, CollisionNode):", "seq.start() self.storeInterval(seq, intervalName) def setTaunt(self, tauntIndex, extraInfo): gotError = False", "z1) vertexWriter.addData3f(x2, y2, z1) vertexWriter.addData3f(x1, y1, z2) vertexWriter.addData3f(x2, y1, z2)", "for lawyer in self.lawyers: if lawyerDoId == lawyer.doId: lawyer.sendUpdate('hitByToon', [])", "ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battlePos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4],", "Sequence( Func(self.stickToonsToFloor), track, Func(self.unstickToons), name=self.uniqueName('Introduction')) def walkToonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('walkToonsToBattlePosition-----------------------------------------------')", "= None self.witnessToon = None self.witnessToonOnstage = False self.numToonJurorsSeated =", "from toontown.battle import BattleBase from direct.directutil import Mopath from direct.showutil", "return def __clearOnscreenMessage(self): if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None return", "st = StackTrace() print st return chatString = TTLocalizer.LawbotBossTaunts[1] if", "= self.axle.attachNewNode(insidesANode) self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesANodePath.stash() insidesB = CollisionPolygon(Point3(-4.0, 2.0, 5.0),", "self.geom.find('**/%s' % str) if not stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide)", "render.getRelativePoint(self.prosecutionLocator, origin) self.notify.debug('prosecutionLocatorPos = %s ' % prosecutionLocatorPos) self.notify.debug('prosecutionLocatorRelPos =", "intervalName) self.bossHealthBar.deinitialize() base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueVictory(self): self.notify.debug('----- __continueVictory')", "doorBCallback(self, isOpen): if self.insidesBNodePath: if isOpen: self.insidesBNodePath.unstash() else: self.insidesBNodePath.stash() def", "toon = self.cr.doId2do.get(toonId) if toon: if index in self.cannons: cannon", "base.cr.doId2do.get(toonId) if toon: base.playSfx(self.toonUpSfx, node=toon) def hideBonusTimer(self): if self.bonusTimer: self.bonusTimer.hide()", "volume=0.9) self.__showWitnessToon() prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie() self.acceptOnce('doneChatPage', self.__onToBattleThree) intervalName = 'prepareBattleThree'", "cannon %d but we have a toon =%d' % (index,", "CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6) defenseTube.setTangible(1) defenseCollNode =", "not self.realWitnessStand.isEmpty(): pass self.reflectedWitnessStand = self.geom.find('**/Witnessstand_Geo_Reflect') if not self.reflectedWitnessStand.isEmpty(): pass", "self.defensePanNodePath.reparentTo(self.beamNodePath) defenseTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6)", "base.cr.doId2do.get(toonId) if toon: destPos = toon.getPos() self.placeToonInElevator(toon) toon.wrtReparentTo(render) ival =", "battlePos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5])", "Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos), node.posInterval(time, toPos)) def __makeRollToBattleTwoMovie(self): startPos =", "self.onscreenMessage.destroy() self.onscreenMessage = None self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1, 1, 1,", "= render.getRelativePoint(self.prosecutionLocator, origin) self.notify.debug('prosecutionLocatorPos = %s ' % prosecutionLocatorPos) self.notify.debug('prosecutionLocatorRelPos", "'EpilogueMovieToonAnim' self.clearInterval(intervalName) track = Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone))) self.storeInterval(track, intervalName) track.start() def", "self.toonNormalEyes(self.involvedToons), Func(self.loop, 'Ff_neutral'), Func(self.setChatAbsolute, attackToons, CFSpeech)))) track.append(dialogTrack) return Sequence( Func(self.stickToonsToFloor),", "disk.setZ(0.8) self.loadEnvironment() self.__makeWitnessToon() self.__loadMopaths() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu() if OneBossCog != None: self.notify.warning('Multiple", "35), startHpr=Point3(-10, -13, 0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1, CFSpeech),", "tris.addVertex(5) tris.addVertex(4) tris.closePrimitive() cubeGeom = Geom(myVertexData) cubeGeom.addPrimitive(tris) cubeGN = GeomNode('cube')", "self.geom.find('**/elevator_origin') self.elevatorEntrance.getChildren().detach() self.elevatorEntrance.setScale(1) elevatorModel = loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator') elevatorModel.reparentTo(self.elevatorEntrance) self.setupElevator(elevatorModel) self.promotionMusic =", "self.ignore('localPieSplat') self.ignore('outOfPies') self.ignore('begin-pie') self.ignore('enterDefenseCol') self.ignore('enterProsecutionCol') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) if self.bossDamageMovie:", "self.notify.debug('self.numToonJurorsSeated = %d' % self.numToonJurorsSeated) return def cleanupPanFlash(self): if self.panFlashInterval:", "self.juryBox def startJuryBoxMoving(self): if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None self.juryBox.setPos(-30,", "intervalName) def __onToBattleThree(self, elapsed): self.notify.debug('----- __onToBattleThree') self.doneBarrier('PrepareBattleThree') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage'))", "enterRollToBattleThree(self): self.notify.debug('----- enterRollToBattleThree') self.reparentTo(render) self.stickBossToFloor() intervalName = 'RollToBattleThree' seq =", "self.cannons[index] toon = self.cr.doId2do.get(toonId) self.notify.debug('cannonId = %d' % cannon.doId) cannonPos", "elevatorModel.reparentTo(self.elevatorEntrance) self.setupElevator(elevatorModel) self.promotionMusic = base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.betweenBattleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg') self.battleTwoMusic =", "self.juryMovesSfx = loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg') self.toonUpSfx = loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg') self.strafeSfx = [] for", "= Sequence(prepareBattleTwoMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.acceptOnce('doneChatPage', self.__showCannonsAppearing) base.playMusic(self.stingMusic, looping=0,", "= 0.1 planes.sort(lambda p1, p2: p1.compareTo(p2, threshold)) lastPlane = None", "task): self.notify.debug('----- __howToThrowPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies) def __foundPieButton(self): self.everThrownPie = 1 self.__clearOnscreenMessage()", "self.cr.playGame.getPlace() if place and hasattr(place, 'fsm'): place.setState('waitForBattle') def makeToonsWait(self): self.notify.debug('makeToonsWait')", "= self.getBossDamage() / 100.0 gearTrack = Parallel() numGears = int(4", "from direct.gui.DirectGui import * from panda3d.core import * from libotp", "announceGenerate') DistributedBossCog.DistributedBossCog.announceGenerate(self) self.setName(TTLocalizer.LawbotBossName) nameInfo = TTLocalizer.BossCogNameWithDept % {'name': self._name, 'dept':", "= now - self.recoverStartTime return max(self.bossDamage - self.recoverRate * elapsed", "Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battlePos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) battleHpr =", "'fsm'): place.setState('waitForBattle') def makeToonsWait(self): self.notify.debug('makeToonsWait') for toonId in self.involvedToons: toon", "self.battleTwoMusic = base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg') floor = self.geom.find('**/MidVaultFloor1') if floor.isEmpty(): floor =", "self.__clearOnscreenMessage() self.betweenBattleMusic.stop() def enterBattleThree(self): DistributedBossCog.DistributedBossCog.enterBattleThree(self) self.scaleNodePath.unstash() localAvatar.setPos(-3, 0, 0) base.localAvatar.orbitalCamera.start()", "hpr = self.rollBossToPoint(bottomPos, startHpr, deathPos, None, 1) bossTrack.append(track) duration =", "def d_hitBoss(self, bossDamage): self.notify.debug('----- d_hitBoss') self.sendUpdate('hitBoss', [bossDamage]) def d_healBoss(self, bossHeal):", "= self.doorB gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack)) gearModel = self.getGearFrisbee() gearModel.setScale(0.1) t =", "render.getRelativePoint result = %s' % pos) self.notify.debug('walkToonsToBattlePosition: final pos =", "my vertices', gFormat, Geom.UHDynamic) vertexWriter = GeomVertexWriter(myVertexData, 'vertex') normalWriter =", "StackTrace() print st return chatString = TTLocalizer.LawbotBossTaunts[1] if tauntIndex ==", "% beamBoundsCenter) beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() self.notify.debug('beamLocatorPos =", "prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie() intervalName = 'prepareBattleTwo' seq = Sequence(prepareBattleTwoMovie, name=intervalName)", "not self.witnessToonOnstage: self.witnessToon.addActive() self.witnessToon.reparentTo(self.geom) seatCenter = self.realWitnessStand.find('**/witnessStandSeatEdge') center = seatCenter.getPos()", "= TTLocalizer.WitnessToonSomeJurors % self.numToonJurorsSeated juryResult += '\\x07' trialSpeech = juryResult", "toon = self.cr.doId2do.get(toonId) if toon: toon.hide() def __showToons(self): for toonId", "self.scaleNodePath.find('**/prosecutionPan') self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol') self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol') self.prosecutionColNodePath.setTag('pieCode',", "self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.scaleNodePath.setScale(5.0) self.scaleNodePath.wrtReparentTo(self.geom) self.setScaleTilt(startingTilt) def setScaleTilt(self, tilt): self.beamNodePath.setP(tilt) if self.useProgrammerScale:", "tris.addVertex(3) tris.addVertex(6) tris.closePrimitive() tris.addVertex(3) tris.addVertex(7) tris.addVertex(6) tris.closePrimitive() tris.addVertex(0) tris.addVertex(2) tris.addVertex(4)", "ToontownGlobals.LawbotBossInitialDamage diffDamage *= 1.0 if diffDamage >= 0: percentDamaged =", "prosecutionTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6) prosecutionTube.setTangible(1)", "self.bossHealthBar.update(self.bossMaxDamage - bossDamage, self.bossMaxDamage) def getBossDamage(self): self.notify.debug('----- getBossDamage') now =", "base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueDefeat(self): self.notify.debug('----- __continueDefeat') self.stopAnimate() self.doneBarrier('Defeat')", "= len(self.involvedToons) center = (numToons - 1) / 2.0 for", "self.stopAnimate() self.doneBarrier('Defeat') def exitDefeat(self): self.notify.debug('----- exitDefeat') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime", "chatString = TTLocalizer.WitnessToonPrepareBattleTwo % ToontownGlobals.LawbotBossJurorsForBalancedScale movie = Sequence(Func(camera.reparentTo, self.witnessToon), Func(camera.setPos,", "= recoverRate self.recoverStartTime = recoverStartTime taskName = 'RecoverBossDamage' taskMgr.remove(taskName) if", "self.baseColStashed = True def unstashBaseCol(self): if self.baseColStashed: self.notify.debug('unstashBaseCol') self.baseTopCol.unstash() self.baseSideCol.unstash()", "loop=1)) track.append(bossAnimTrack) attackToons = TTLocalizer.BossCogAttackToons dialogTrack = Track( (0, Func(self.setChatAbsolute,", "planeNode.addSolid(plane) planeNode.setCollideMask(ToontownGlobals.PieBitmask) self.geom.attachNewNode(planeNode) self.door3 = self.geom.find('**/SlidingDoor1/') if self.door3.isEmpty(): self.door3 =", "CFSpeech)), (9.5, Sequence(Func(camera.wrtReparentTo, render))), (9.6, Parallel( rollTrack, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3, CFSpeech),", "newReflectedPos = reflectedJuryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.reflectedJuryBox.setPos(newReflectedPos) if", "base.playSfx(self.piesRestockSfx) if not self.everThrownPie: taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice')) def __pieSplat(self, toon,", "self.notify.debug('----- __doneReward') self.doneBarrier('Reward') self.toWalkMode() def exitReward(self): self.notify.debug('----- exitReward') intervalName =", "self.warningSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg') self.juryMovesSfx = loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg') self.toonUpSfx = loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg') self.strafeSfx", "in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: if index in", "battlePos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5])", "= entry.getIntoNodePath().getNetTag('avatarDoId') if avatarDoId == '': self.notify.warning('Toon %s has no", "volume=0.9, time=self.battleThreeMusicTime) def __continueVictory(self): self.notify.debug('----- __continueVictory') self.stopAnimate() self.doneBarrier('Victory') def exitVictory(self):", "= int(4 + 6 * t + 0.5) time =", "bossAnimTrack = Sequence( ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1), ActorInterval(self, 'Ff_lookRt',", "TTLocalizer.LawbotBossTempIntro3, CFSpeech)), (22, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4, CFSpeech)), (24, Sequence( Func(self.clearChat), self.loseCogSuits(self.toonsA", "Func(camera.setHpr, 180, 10, 0), Func(self.witnessToon.setLocalPageChat, chatString, 0)) return movie def", "None, 1) bossTrack.append(track) duration = bossTrack.getDuration() return bossTrack def __showOnscreenMessage(self,", "if attackCode == ToontownGlobals.BossCogAreaAttack: self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt) base.playSfx(self.warningSfx) def setBattleDifficulty(self, diff): self.notify.debug('battleDifficulty", "toon.show() def __arrangeToonsAroundWitnessToon(self): radius = 7 numToons = len(self.involvedToons) center", "self.rewardPanel = RewardPanel.RewardPanel(panelName) victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds,", "toontown.toonbase import TTLocalizer import SuitDNA from toontown.toon import Toon from", "None self.juryTimer = None self.witnessToon = None self.witnessToonOnstage = False", "= seq seq.start() def replaceCollisionPolysWithPlanes(self, model): newCollisionNode = CollisionNode('collisions') newCollideMask", "entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def touchedGavelHandle(self, gavel, entry): attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode')", "DistributedBossCog.DistributedBossCog.__init__(self, cr) FSM.FSM.__init__(self, 'DistributedLawbotBoss') self.lawyers = [] self.lawyerRequest = None", "import * from libotp import * from direct.fsm import FSM", "else: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) def stashBaseCol(self): if not self.baseColStashed: self.notify.debug('stashBaseCol') self.baseTopCol.stash()", "self.clearInterval('EpilogueMovieToonAnim') self.unstash() self.epilogueMusic.stop() def enterFrolic(self): self.notify.debug('----- enterFrolic') self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) DistributedBossCog.DistributedBossCog.enterFrolic(self) self.show()", "self.notify.debug('----- __doneEpilogue') intervalName = 'EpilogueMovieToonAnim' self.clearInterval(intervalName) track = Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone)))", "self.__howToThrowPies, self.uniqueName('PieAdvice')) def __pieSplat(self, toon, pieCode): if pieCode == ToontownGlobals.PieCodeBossInsides:", "oldSeq = self.activeIntervals.get(intervalName) if oldSeq: oldSeq.finish() seq.start() self.storeInterval(seq, intervalName) def", "self.prosecutionLocator.getBounds() prosecutionLocPos = prosecutionLocBounds.getCenter() self.notify.debug('prosecutionLocatorPos = %s' % prosecutionLocPos) self.prosecutionPanNodePath.setPos(prosecutionLocPos)", "self.releaseToons(finalBattle=1) self.happy = 0 self.raised = 0 self.forward = 1", "i in xrange(len(self.lawyers)): suit = self.lawyers[i] suit.fsm.request('neutral') suit.loop('neutral') suit.setBossCogId(self.doId) return", "None self.betweenBattleMusic.stop() self.promotionMusic.stop() self.stingMusic.stop() self.battleTwoMusic.stop() self.battleThreeMusic.stop() self.epilogueMusic.stop() if self.juryTimer: self.juryTimer.destroy()", "0, 0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) track, hpr = self.rollBossToPoint(startPos,", "0 self.attackCode = None self.attackAvId = 0 self.recoverRate = 0", "Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0,", "-0.25, 3) self.standNodePath = NodePath('scaleStand') self.standNodePath.attachNewNode(standGeom) self.standNodePath.reparentTo(self.scaleNodePath) self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.scaleNodePath.setScale(5.0) self.scaleNodePath.wrtReparentTo(self.geom)", "self.notify.debug('beamPos = %s' % beamPos) self.notify.debug('beamRelPos = %s' % beamRelPos)", "= (float(i) / (numGears - 1) - 0.5) * spread", "self.witnessToon.delete() self.witnessToon = None return def __showWitnessToon(self): if not self.witnessToonOnstage:", "self.notify.debug('----- cleanupAttacks') self.__cleanupStrafe() def __cleanupStrafe(self): self.notify.debug('----- __cleanupStrage') if self.strafeInterval: self.strafeInterval.finish()", "newCollideMask = newCollideMask | cn.getIntoCollideMask() for i in xrange(cn.getNumSolids()): solid", "% center) self.witnessToon.setPos(center) self.witnessToon.setH(180) self.witnessToon.setZ(self.witnessToon.getZ() - 1.5) self.witnessToon.setY(self.witnessToon.getY() - 1.15)", "battlePos, battleHpr, 0) self.makeToonsWait() return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwoMovie'))", "y1, z1) vertexWriter.addData3f(x1, y2, z1) vertexWriter.addData3f(x2, y2, z1) vertexWriter.addData3f(x1, y1,", "self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.happy = 1 self.raised = 1 self.forward =", "self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterDefeat(self): self.notify.debug('----- enterDefeat') self.cleanupIntervals() localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)", "= render.getRelativePoint(self.beamNodePath, origin) self.notify.debug('beamPos = %s' % beamPos) self.notify.debug('beamRelPos =", "toontown.coghq import CogDisguiseGlobals from toontown.building import ElevatorConstants from toontown.toonbase import", "if not self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() def exitIntroduction(self):", "= cnp.node() if not isinstance(cn, CollisionNode): self.notify.warning('Not a collision node:", "toon: destPos = toon.getPos() self.placeToonInElevator(toon) toon.wrtReparentTo(render) ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate,", "self.reflectedMainDoor.isEmpty(): itemsToHide = ['Reflections/Door_1'] for str in itemsToHide: stuffToHide =", "self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterDefeat(self): self.notify.debug('----- enterDefeat')", "= self.rotateNode.attachNewNode('gearRoot') if side == 0: gearRoot.setPos(0, -7, 3) gearRoot.setHpr(180,", "def setScaleTilt(self, tilt): self.beamNodePath.setP(tilt) if self.useProgrammerScale: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) else: self.defensePanNodePath.setP(-tilt)", "self.witnessToon.removeActive() self.witnessToon.detachNode() self.witnessToonOnstage = 0 def __hideToons(self): for toonId in", "self.onscreenMessage.destroy() self.onscreenMessage = None return def __showWaitingMessage(self, task): self.notify.debug('----- __showWaitingMessage')", "self.debugPositions: self.reflectedPodium.setZ(reflectedZ) if not self.reflectedPodium.isEmpty(): if self.debugPositions: self.reflectedPodium.show() def loadCannons(self):", "TTLocalizer.WitnessToonOneJuror elif self.numToonJurorsSeated == 12: juryResult = TTLocalizer.WitnessToonAllJurors else: juryResult", "self.ignore(bossDoneEventName) taskMgr.remove(self.uniqueName('StandUp')) self.ignore('enterWitnessStand') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.ignore('begin-pie') self.ignore('enterDefenseCol') self.ignore('enterProsecutionCol') self.__clearOnscreenMessage()", "myVertexData = GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic) vertexWriter = GeomVertexWriter(myVertexData,", "0: bonusWeight = 0 newWeight = defaultWeight + bonusWeight self.notify.debug('toon", "self.raised = 0 self.forward = 1 intervalName = 'DefeatMovie' seq", "self.notify.debug('----- exitPrepareBattleTwo') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') self.__clearOnscreenMessage() self.stingMusic.stop() def enterBattleTwo(self): self.notify.debug('-----", "if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None return def __showWaitingMessage(self, task):", "self.raised = 1 self.forward = 1 intervalName = 'VictoryMovie' seq", "= self.rollBossToPoint(fromPos=myFromPos, fromHpr=None, toPos=myToPos, toHpr=None, reverse=0) rollTrack = Sequence( Func(self.getGeomNode().setH,", "= camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0), startPos=Point3(-22, -90,", "from toontown.toon import NPCToons from direct.task import Task import random", "self.stashBoss() self.toonsToBattlePosition(self.involvedToons, self.battleANode) self.stickBossToFloor() intervalName = 'RollToBattleTwo' seq = Sequence(self.__makeRollToBattleTwoMovie(),", "itemsToHide = ['interior/Door_1'] for str in itemsToHide: stuffToHide = self.geom.find('**/%s'", "g, b, a) texWriter.addData2f(1.0, 1.0) tris = GeomTriangles(Geom.UHDynamic) tris.addVertex(0) tris.addVertex(1)", "BitMask32(0) planes = [] collList = model.findAllMatches('**/+CollisionNode') if not collList:", "Point3(4.0, -2.0, 0.5)) insidesANode = CollisionNode('BossZap') insidesANode.addSolid(insidesA) insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask)", "self.notify.debug('prosecutionLocatorPos = %s ' % prosecutionLocatorPos) self.notify.debug('prosecutionLocatorRelPos = %s '", "duration=10, loop=1), ActorInterval(self, 'Ff_lookRt', duration=3), ActorInterval(self, 'Ff_lookRt', duration=3, startTime=3, endTime=0),", "60.0, 0) def __recoverBossDamage(self, task): self.notify.debug('----- __recoverBossDamage') if self.bossDamageMovie: self.bossDamageMovie.setT(self.getBossDamage()", "(numGears - 1) - 0.5) * spread x = dist", "SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0)) self.juryBoxIval.start() self.juryTimer = ToontownTimer.ToontownTimer() self.juryTimer.posInTopRightCorner()", "bossTrack.append(track) duration = bossTrack.getDuration() return bossTrack def __showOnscreenMessage(self, text): self.notify.debug('-----", "= 0 self.cannonIndex = -1 return def announceGenerate(self): global OneBossCog", "= ToontownGlobals.LawbotBossMaxDamage self.elevatorType = ElevatorConstants.ELEVATOR_CJ self.gavels = {} self.chairs =", "self.scaleNodePath.stash() self.loadJuryBox() self.loadPodium() ug = self.geom.find('**/Reflections') ug.setBin('ground', -10) def loadJuryBox(self):", "if self.battleA == None or self.battleB == None: pass return", "seq.append(Wait(4.0)) seq.append(Func(self.clearChat)) oldSeq = self.activeIntervals.get(intervalName) if oldSeq: oldSeq.finish() seq.start() self.storeInterval(seq,", "if diffSettings[4]: numJurors = self.numJurorsSeatedByCannon(cannonIndex) bonusWeight = numJurors - diffSettings[5]", "= {} self.chairs = {} self.cannons = {} self.useCannons =", "self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True) ival = Sequence(Parallel(victory, camVictory),", "CollisionNode('collisions') newCollideMask = BitMask32(0) planes = [] collList = model.findAllMatches('**/+CollisionNode')", "= CollisionTube(0, 1, 4, 0, 1, 7, 3.5) shieldNode =", "ToontownGlobals.PieCodeLawyer: pass def __localPieSplat(self, pieCode, entry): if pieCode == ToontownGlobals.PieCodeLawyer:", "self.geom.find('**/JuryBox') juryBoxPos = self.juryBox.getPos() newPos = juryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if", "self.cleanupBattles() self.battleTwoMusic.stop() localAvatar.inventory.setBattleCreditMultiplier(1) if self.juryTimer: self.juryTimer.destroy() del self.juryTimer self.juryTimer =", "%s has no avatarDoId tag.' % repr(entry.getIntoNodePath())) return doId =", "entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr == '': self.notify.warning('Node %s has no attackCode", "'ToonJuror': self.numToonJurorsSeated += 1 self.notify.debug('self.numToonJurorsSeated = %d' % self.numToonJurorsSeated) return", "negBeamLocatorPos = -beamLocatorPos self.notify.debug('beamLocatorPos = %s' % beamLocatorPos) self.notify.debug('negBeamLocatorPos =", "= battleNode.getPos() battleNode.wrtReparentTo(bnParent) self.notify.debug('battle node world pos = %s' %", "self.bonusTimer.posInTopRightCorner() self.bonusTimer.show() self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer) def setAttackCode(self, attackCode, avId = 0):", "bossDamage, self.bossMaxDamage) def getBossDamage(self): self.notify.debug('----- getBossDamage') now = globalClock.getFrameTime() elapsed", "= 1 self.raised = 1 self.forward = 1 self.doAnimate() self.accept('enterWitnessStand',", "5.0 - 4.0 * t spread = 60 * math.pi", "loadEnvironment(self): self.notify.debug('----- loadEnvironment') DistributedBossCog.DistributedBossCog.loadEnvironment(self) self.geom = loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3') self.geom.setPos(0, 0, -71.601)", "= loader.loadModel('phase_9/models/char/bossCog-gearCollide') disk.find('**/+CollisionNode').setName('BossZap') disk.reparentTo(self.pelvis) disk.setZ(0.8) self.loadEnvironment() self.__makeWitnessToon() self.__loadMopaths() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu() if", "= self.createBlock(0.25, 2, 0.125, -0.25, -2, -0.125, 0, 1.0, 0,", "= None self.warningSfx = None self.juryMovesSfx = None self.baseColStashed =", "- self.recoverRate * elapsed / 60.0, 0) def __recoverBossDamage(self, task):", "juryResult = TTLocalizer.WitnessToonNoJuror elif self.numToonJurorsSeated == 1: juryResult = TTLocalizer.WitnessToonOneJuror", "colorScale=VBase4(0, 0, 1, 1)), self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1, 1)), name=intervalName)", "= Point3(*ToontownGlobals.LawbotBossDeathPos) self.setPosHpr(startPos, startHpr) bossTrack = Sequence() bossTrack.append(Func(self.loop, 'Ff_neutral')) track,", "self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleTwo(self): self.notify.debug('----- exitPrepareBattleTwo') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') self.__clearOnscreenMessage()", "self.setupElevator(elevatorModel) self.promotionMusic = base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.betweenBattleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg') self.battleTwoMusic = base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg')", "% beamRelPos) self.notify.debug('beamRenderPos = %s' % beamRenderPos) beamBoundsCenter = self.beamNodePath.getBounds().getCenter()", "None self.toonUpSfx = None self.bonusTimer = None self.warningSfx = None", "tris.addVertex(0) tris.addVertex(5) tris.closePrimitive() tris.addVertex(4) tris.addVertex(6) tris.addVertex(7) tris.closePrimitive() tris.addVertex(7) tris.addVertex(5) tris.addVertex(4)", "self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) self.__showWitnessToon() prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie() intervalName =", "diffDamage / (ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage) tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt", "__onToBattleThree') self.doneBarrier('PrepareBattleThree') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleThree(self): self.notify.debug('----- exitPrepareBattleThree') self.show()", "ToontownGlobals.LawbotBossDefensePanDamage if base.config.GetBool('lawbot-boss-cheat', 0): self.panDamage = 25 self.evidenceHitSfx = None", "not self.mainDoor.isEmpty(): itemsToHide = ['interior/Door_1'] for str in itemsToHide: stuffToHide", "0, 0), Wait(3), Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr,", "rollTrackDuration) doorStartPos = self.door3.getPos() doorEndPos = Point3(doorStartPos[0], doorStartPos[1], doorStartPos[2] +", "self.prosecutionPanNodePath = NodePath('prosecutionPan') self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom) self.prosecutionPanNodePath.setPos(0, 2, 0) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) prosecutionTube =", "True if not hasattr(self, 'nametag'): self.notify.warning('returning from setTaunt, no attr", "self.notify.debug('battleNode.parent() = %s' % battleNode.getParent()) self.notify.debug('battleNode.parent().getPos() = %s' % battleNode.getParent().getPos())", "diffDamage = self.bossDamage - ToontownGlobals.LawbotBossInitialDamage diffDamage *= 1.0 if diffDamage", "Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0)] seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam,", "speech += TTLocalizer.WitnessToonHPBoost else: speech += TTLocalizer.WitnessToonMaxed % (ToontownGlobals.MaxCogSuitLevel +", "%s' % stuffToHide) self.reflectedMainDoor = self.geom.find('**/interiorrefl/CR3_Door') if not self.reflectedMainDoor.isEmpty(): itemsToHide", "self.toonsToBattlePosition(self.toonsA, self.battleANode) self.toonsToBattlePosition(self.toonsB, self.battleBNode) base.playMusic(self.battleTwoMusic, looping=1, volume=0.9) self.startJuryBoxMoving() for index", "'RecoverBossDamage' taskMgr.remove(taskName) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() return def enterNearVictory(self): self.cleanupIntervals()", "t spread = 60 * math.pi / 180.0 if direction", "= dist * math.cos(angle) h = random.uniform(-720, 720) gearTrack.append(Sequence(Wait(i *", "calling self.countToonJurors, numToonJurorsSeated=%d' % self.numToonJurorsSeated) if self.numToonJurorsSeated == 0: juryResult", "ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2]) if self.arenaSide: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosB) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB)", "math from toontown.coghq import CogDisguiseGlobals from toontown.building import ElevatorConstants from", "def exitPrepareBattleTwo(self): self.notify.debug('----- exitPrepareBattleTwo') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') self.__clearOnscreenMessage() self.stingMusic.stop() def", "1 self.doAnimate() self.setDizzy(1) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def exitNearVictory(self): self.notify.debug('-----", "= None if self.juryBox: self.juryBox.removeNode() return def doStrafe(self, side, direction):", "= %s' % locatorRenderPos) beamPos = self.beamNodePath.getPos() beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath,", "side, direction): gearRoot = self.rotateNode.attachNewNode('gearRoot') if side == 0: gearRoot.setPos(0,", "= None self.reflectedMainDoor = None self.panFlashInterval = None self.panDamage =", "DistributedBossCog from toontown.toonbase import TTLocalizer import SuitDNA from toontown.toon import", "%s ' % toon.getPos()) def touchedGavel(self, gavel, entry): self.notify.debug('touchedGavel') attackCodeStr", "BossCogs visible.') OneBossCog = self return def disable(self): global OneBossCog", "1 self.raised = 1 self.forward = 1 intervalName = 'VictoryMovie'", "d_hitToon(self, toonId): self.notify.debug('----- d_hitToon') self.sendUpdate('hitToon', [toonId]) def gotToon(self, toon): stateName", "* math.pi / 180.0 x = math.cos(radians) * radius y", "countToonJurors(self): self.numToonJurorsSeated = 0 for key in self.chairs.keys(): chair =", "self.sendUpdate('healBoss', [bossHeal]) def d_hitBossInsides(self): self.notify.debug('----- d_hitBossInsides') self.sendUpdate('hitBossInsides', []) def d_hitDefensePan(self):", "not self.reflectedMainDoor.isEmpty(): itemsToHide = ['Reflections/Door_1'] for str in itemsToHide: stuffToHide", "Func(self.getGeomNode().setH, 0), Parallel(self.podium.posInterval(5.0, finalPodiumPos), self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos), Func(self.stashBoss), self.posInterval(5.0, battlePos), Func(taskMgr.doMethodLater,", "self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.happy = 0 self.raised = 0", "= 0 self.battleThreeMusicTime = 0 self.insidesANodePath = None self.insidesBNodePath =", "None self.insidesBNodePath = None self.strafeInterval = None self.onscreenMessage = None", "Geom(myVertexData) cubeGeom.addPrimitive(tris) cubeGN = GeomNode('cube') cubeGN.addGeom(cubeGeom) return cubeGN def __enterDefenseCol(self,", "defensePanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 0, 0,", "Func(camera.setHpr, -90, 0, 0), Wait(3), Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8,", "self.involvedToons[extraInfo] toon = base.cr.doId2do.get(toonId) if toon: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] %", "% stuffToHide) self.reflectedMainDoor = self.geom.find('**/interiorrefl/CR3_Door') if not self.reflectedMainDoor.isEmpty(): itemsToHide =", "and chair.newState == 'ToonJuror': self.numToonJurorsSeated += 1 self.notify.debug('self.numToonJurorsSeated = %d'", "= CollisionNode('BossZap') insidesBNode.addSolid(insidesB) insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesBNodePath = self.axle.attachNewNode(insidesBNode) self.insidesBNodePath.setTag('pieCode',", "p2: p1.compareTo(p2, threshold)) lastPlane = None for plane in planes:", "= self.scaleNodePath.find('**/StandLocator1') beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() negBeamLocatorPos =", "ActorInterval(self, 'Ff_neutral', duration=2), ActorInterval(self, 'Ff_speech', duration=7, loop=1)) track.append(bossAnimTrack) attackToons =", "= Toon.Toon() npc.setDNAString(dnaNetString) npc.setName(TTLocalizer.WitnessToonName) npc.setPickable(0) npc.setPlayerType(NametagGroup.CCNonPlayer) npc.animFSM.request('Sit') self.witnessToon = npc", "in xrange(len(self.cannons)): cannon = self.cannons[index] cannon.cannon.show() def getChairParent(self): return self.juryBox", "= 0 self.recoverRate = 0 self.recoverStartTime = 0 self.bossDamageMovie =", "1.0) tris = GeomTriangles(Geom.UHDynamic) tris.addVertex(0) tris.addVertex(1) tris.addVertex(2) tris.closePrimitive() tris.addVertex(1) tris.addVertex(3)", "1) / 2.0 for i in xrange(numToons): toon = self.cr.doId2do.get(self.involvedToons[i])", "TTLocalizer.WitnessToonSomeJurors % self.numToonJurorsSeated juryResult += '\\x07' trialSpeech = juryResult trialSpeech", "self.notify.debug('----- loadEnvironment') DistributedBossCog.DistributedBossCog.loadEnvironment(self) self.geom = loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3') self.geom.setPos(0, 0, -71.601) self.geom.setScale(1)", "__localPieSplat(self, pieCode, entry): if pieCode == ToontownGlobals.PieCodeLawyer: self.__lawyerGotHit(entry) if pieCode", "unstashBoss(self, task): self.unstash() self.reparentTo(render) def enterRollToBattleTwo(self): self.notify.debug('----- enterRollToBattleTwo') self.releaseToons(finalBattle=1) self.stashBoss()", "= None self.toonUpSfx = None self.bonusTimer = None self.warningSfx =", "= 'VictoryMovie' seq = Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName) seq.start() self.storeInterval(seq, intervalName)", "self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True) ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward)) intervalName", "0 self.mainDoor = None self.reflectedMainDoor = None self.panFlashInterval = None", "-2.0, 0.5), Point3(4.0, -2.0, 0.5)) insidesANode = CollisionNode('BossZap') insidesANode.addSolid(insidesA) insidesANode.setCollideMask(ToontownGlobals.PieBitmask", "self.notify.debug('locatorRenderPos = %s' % locatorRenderPos) beamPos = self.beamNodePath.getPos() beamRelPos =", "return def __cleanupJuryBox(self): self.notify.debug('----- __cleanupJuryBox') if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval =", "battleNode)) ival = Parallel() points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] self.notify.debug('walkToonsToBattlePosition:", "pos) self.notify.debug('walkToonsToBattlePosition: final pos = %s' % pos) ival.append(Sequence(Func(toon.setPlayRate, 0.8,", "% rollTrackDuration) doorStartPos = self.door3.getPos() doorEndPos = Point3(doorStartPos[0], doorStartPos[1], doorStartPos[2]", "def exitVictory(self): self.notify.debug('----- exitVictory') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime()", "'PrepareBattleThree' self.clearInterval(intervalName) self.__clearOnscreenMessage() self.betweenBattleMusic.stop() def enterBattleThree(self): DistributedBossCog.DistributedBossCog.enterBattleThree(self) self.scaleNodePath.unstash() localAvatar.setPos(-3, 0,", "return cubeGN def __enterDefenseCol(self, entry): self.notify.debug('__enterDefenseCol') def __enterProsecutionCol(self, entry): self.notify.debug('__enterProsecutionCol')", "chatString, 0)) return movie def __doWitnessPrepareBattleThreeChat(self): self.notify.debug('__doWitnessPrepareBattleThreeChat: original self.numToonJurorsSeated =", "% diff) self.battleDifficulty = diff def toonEnteredCannon(self, toonId, cannonIndex): if", "= self.calculateWeightOfToon(base.localAvatar.doId) if self.bonusWeight > 0: if self.bonusWeight == 1:", "0, 0) gear = gearModel.instanceTo(node) angle = (float(i) / (numGears", "self.geom.hide() self.witnessToon.removeActive() def exitWaitForToons(self): self.notify.debug('----- exitWaitForToons') DistributedBossCog.DistributedBossCog.exitWaitForToons(self) self.geom.show() self.witnessToon.addActive() def", "Func(camera.setHpr, 0, 10, 0))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1, CFSpeech)), (5.5, Func(self.setChatAbsolute,", "hpr = self.rollBossToPoint(startPos, startHpr, bottomPos, None, 1) bossTrack.append(track) track, hpr", "DistributedBossCog.DistributedBossCog.enterFrolic(self) self.show() def doorACallback(self, isOpen): if self.insidesANodePath: if isOpen: self.insidesANodePath.unstash()", "None self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage self.elevatorType = ElevatorConstants.ELEVATOR_CJ self.gavels = {}", "def enterRollToBattleTwo(self): self.notify.debug('----- enterRollToBattleTwo') self.releaseToons(finalBattle=1) self.stashBoss() self.toonsToBattlePosition(self.involvedToons, self.battleANode) self.stickBossToFloor() intervalName", "= %d' % cannon.doId) cannonPos = cannon.nodePath.getPos(render) self.notify.debug('cannonPos = %s'", "= TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty) if juryWeightBonus: weightBonusText = juryWeightBonus % (self.numJurorsLocalToonSeated, self.bonusWeight)", "toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.wrtReparentTo(render) pos, h = points[i]", "self.notify.debug('----- __cleanupStrage') if self.strafeInterval: self.strafeInterval.finish() self.strafeInterval = None return def", "gotError = True if not hasattr(self, 'nametag'): self.notify.warning('returning from setTaunt,", "self.stash() def unstashBoss(self, task): self.unstash() self.reparentTo(render) def enterRollToBattleTwo(self): self.notify.debug('----- enterRollToBattleTwo')", "% beamRenderPos) beamBoundsCenter = self.beamNodePath.getBounds().getCenter() self.notify.debug('beamBoundsCenter = %s' % beamBoundsCenter)", "self.pelvis.attachNewNode(targetNode) self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog)) shield = CollisionTube(0, 1, 4, 0, 1,", "def toonGotHealed(self, toonId): toon = base.cr.doId2do.get(toonId) if toon: base.playSfx(self.toonUpSfx, node=toon)", "import math from toontown.coghq import CogDisguiseGlobals from toontown.building import ElevatorConstants", "self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog)) shield = CollisionTube(0, 1, 4, 0, 1, 7,", "tris.addVertex(4) tris.addVertex(5) tris.closePrimitive() tris.addVertex(1) tris.addVertex(0) tris.addVertex(5) tris.closePrimitive() tris.addVertex(4) tris.addVertex(6) tris.addVertex(7)", "if toon: angle = 90 - 15 * (i -", "enterBattleTwo(self): self.notify.debug('----- enterBattleTwo') self.cleanupIntervals() mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2) localAvatar.inventory.setBattleCreditMultiplier(mult) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr)", "- 1) - 0.5) * spread x = dist *", "= Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0, 0, 1, 1)), self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1,", "from direct.directutil import Mopath from direct.showutil import Rope from toontown.distributed", "ToontownGlobals.BossCogAreaAttack: self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt) base.playSfx(self.warningSfx) def setBattleDifficulty(self, diff): self.notify.debug('battleDifficulty = %d' %", "-12.645) self.reflectedJuryBox.setPos(-30, 0, 0) curPos = self.juryBox.getPos() endingAbsPos = Point3(curPos[0]", "self.witnessToonOnstage = 1 def __hideWitnessToon(self): if self.witnessToonOnstage: self.witnessToon.removeActive() self.witnessToon.detachNode() self.witnessToonOnstage", "self.__loadMopaths() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu() if OneBossCog != None: self.notify.warning('Multiple BossCogs visible.') OneBossCog", "self.doneBarrier('RollToBattleTwo') def exitRollToBattleTwo(self): self.notify.debug('----- exitRollToBattleTwo') self.unstickBoss() intervalName = 'RollToBattleTwo' self.clearInterval(intervalName)", "tris.addVertex(2) tris.addVertex(3) tris.addVertex(6) tris.closePrimitive() tris.addVertex(3) tris.addVertex(7) tris.addVertex(6) tris.closePrimitive() tris.addVertex(0) tris.addVertex(2)", "self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.ignore('begin-pie') self.ignore('enterDefenseCol') self.ignore('enterProsecutionCol') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) if", "def makeEpilogueMovie(self): epSpeech = TTLocalizer.WitnessToonCongratulations epSpeech = self.__talkAboutPromotion(epSpeech) bossTrack =", "diffDamage >= 0: percentDamaged = diffDamage / (ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage)", "Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) self.juryBoxIval", "localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice')) self.stickBossToFloor() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage base.playMusic(self.battleThreeMusic,", "if self.bossDamage < ToontownGlobals.LawbotBossMaxDamage * 0.85: self.unstashBaseCol() else: self.stashBaseCol() def", "TTLocalizer.LawbotBossTempIntro1, CFSpeech)), (12, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2, CFSpeech)), (18, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3, CFSpeech)),", "= %s' % battleNode.getParent().getPos()) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos =", "def makeEndOfBattleMovie(self, hasLocalToon): name = self.uniqueName('Drop') seq = Sequence(name=name) seq", "p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosA) battlePos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battleHpr =", "for index in xrange(len(self.cannons)): cannon = self.cannons[index] cannon.cannon.show() def getChairParent(self):", "= loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg') self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg') self.evidenceHitSfx = loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg') self.warningSfx =", "standGeom = self.createBlock(0.25, 0.25, 0, -0.25, -0.25, 3) self.standNodePath =", "name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.bossHealthBar.deinitialize() base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def", "self.juryBox = self.geom.find('**/JuryBox') juryBoxPos = self.juryBox.getPos() newPos = juryBoxPos -", "% prosecutionLocatorRelPos) self.notify.debug('locatorRenderPos = %s' % locatorRenderPos) beamPos = self.beamNodePath.getPos()", "self.toonsToBattlePosition(self.toonsB, self.battleBNode) base.playMusic(self.battleTwoMusic, looping=1, volume=0.9) self.startJuryBoxMoving() for index in xrange(len(self.cannons)):", "= %s' % self.battleANode) self.__hideWitnessToon() if self.battleA == None or", "p1.compareTo(p2, threshold)) lastPlane = None for plane in planes: if", "%s' % self.getPos()) self.notify.debug('battleNode.parent() = %s' % battleNode.getParent()) self.notify.debug('battleNode.parent().getPos() =", "self.evidenceHitSfx = loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg') self.warningSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg') self.juryMovesSfx = loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg') self.toonUpSfx", "plane = Plane(solid.getPlane()) planes.append(plane) else: self.notify.warning('Unexpected collision solid: %s' %", "__cleanupWitnessToon(self): self.__hideWitnessToon() if self.witnessToon: self.witnessToon.removeActive() self.witnessToon.delete() self.witnessToon = None return", "None and chair.newState == 'ToonJuror': self.numToonJurorsSeated += 1 self.notify.debug('self.numToonJurorsSeated =", "battle three state, state=%s', self.state) gotError = True if not", "chair.state == 'ToonJuror': if chair.toonJurorIndex == cannonIndex: retVal += 1", "z1) vertexWriter.addData3f(x1, y2, z1) vertexWriter.addData3f(x2, y2, z1) vertexWriter.addData3f(x1, y1, z2)", "self.sendUpdate('hitBossInsides', []) def d_hitDefensePan(self): self.notify.debug('----- d_hitDefensePan') self.sendUpdate('hitDefensePan', []) def d_hitProsecutionPan(self):", "4, 0, -1, 9, 3.5) targetNode = CollisionNode('BossZap') targetNode.addSolid(target) targetNode.setCollideMask(ToontownGlobals.PieBitmask)", "vertices', gFormat, Geom.UHDynamic) vertexWriter = GeomVertexWriter(myVertexData, 'vertex') normalWriter = GeomVertexWriter(myVertexData,", "self.notify.debug('----- exitWaitForToons') DistributedBossCog.DistributedBossCog.exitWaitForToons(self) self.geom.show() self.witnessToon.addActive() def enterElevator(self): self.notify.debug('----- enterElevator') DistributedBossCog.DistributedBossCog.enterElevator(self)", "+ 10, pos[2], h, 0, 0) def __outOfPies(self): self.notify.debug('----- outOfPies')", "= Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0))", "self.notify.debug('----- getBossDamage') now = globalClock.getFrameTime() elapsed = now - self.recoverStartTime", "self.notify.debug('----- enterVictory') self.cleanupIntervals() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.loop('neutral') localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.clearChat() self.witnessToon.clearChat() self.controlToons()", "self.geom.find('**/MidVaultFloor1') if floor.isEmpty(): floor = self.geom.find('**/CR3_Floor') self.evFloor = self.replaceCollisionPolysWithPlanes(floor) self.evFloor.reparentTo(self.geom)", "makeIntroductionMovie') for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon:", "self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.doneBarrier('RollToBattleTwo') def exitRollToBattleTwo(self): self.notify.debug('----- exitRollToBattleTwo') self.unstickBoss() intervalName = 'RollToBattleTwo'", "Sequence(Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10, 0),", "1: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty) else: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty) if juryWeightBonus:", "name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __onToBattleTwo(self, elapsedTime = 0): self.notify.debug('-----", "1.0) self.beamNodePath = NodePath('scaleBeam') self.beamNodePath.attachNewNode(beamGeom) self.beamNodePath.setPos(0, 0, 3) self.beamNodePath.reparentTo(self.scaleNodePath) defensePanGeom", "self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage base.playMusic(self.battleThreeMusic, looping=1, volume=0.9) self.__showWitnessToon() diffSettings =", "def unstashBoss(self, task): self.unstash() self.reparentTo(render) def enterRollToBattleTwo(self): self.notify.debug('----- enterRollToBattleTwo') self.releaseToons(finalBattle=1)", "= self.juryBox.getPos() newPos = juryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions:", "def loadJuryBox(self): self.juryBox = self.geom.find('**/JuryBox') juryBoxPos = self.juryBox.getPos() newPos =", "gFormat, Geom.UHDynamic) vertexWriter = GeomVertexWriter(myVertexData, 'vertex') normalWriter = GeomVertexWriter(myVertexData, 'normal')", "0, 0, -1.5, 0.6) prosecutionTube.setTangible(1) prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol')) prosecutionCollNode.addSolid(prosecutionTube) self.prosecutionColNodePath", "self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies) def __foundPieButton(self): self.everThrownPie = 1 self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) def __touchedWitnessStand(self,", "self.loseCogSuits(self.toonsA + self.toonsB, render, (-2.798, -70, 10, 180, 0, 0)))),", "= BattleBase.BattleBase.toonPoints[len(toonIds) - 1] else: points = list(BattleBase.BattleBase.toonPoints[3]) points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) -", "Func(self.clearChat), self.loseCogSuits(self.toonsA + self.toonsB, render, (-2.798, -70, 10, 180, 0,", "DistributedBossCog.DistributedBossCog.enterWaitForToons(self) self.geom.hide() self.witnessToon.removeActive() def exitWaitForToons(self): self.notify.debug('----- exitWaitForToons') DistributedBossCog.DistributedBossCog.exitWaitForToons(self) self.geom.show() self.witnessToon.addActive()", "self.__enterProsecutionCol) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice')) self.stickBossToFloor() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage", "= battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) toon.setPosHpr(battleNode, pos[0], pos[1],", "- center) radians = angle * math.pi / 180.0 x", "self.bossDamageToMovie) return Task.cont def __walkToonToPromotion(self, toonId, delay, mopath, track, delayDeletes):", "with positionToons') def __makePrepareBattleTwoMovie(self): chatString = TTLocalizer.WitnessToonPrepareBattleTwo % ToontownGlobals.LawbotBossJurorsForBalancedScale movie", "self.doneBarrier('PrepareBattleTwo') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleTwo(self): self.notify.debug('----- exitPrepareBattleTwo') self.show() taskMgr.remove(self.uniqueName('WaitingMessage'))", "setAttackCode(self, attackCode, avId = 0): DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode, avId) if attackCode", "beamLocatorPos = beamLocatorBounds.getCenter() self.notify.debug('beamLocatorPos = %s' % beamLocatorPos) def loadScaleNew(self):", "2.0, 0.5)) insidesBNode = CollisionNode('BossZap') insidesBNode.addSolid(insidesB) insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesBNodePath", "0) self.defensePanNodePath.reparentTo(self.beamNodePath) defenseTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5,", "self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() camera.reparentTo(self.elevatorModel) camera.setPosHpr(0, 30, 8, 180, 0, 0) def", "tris.addVertex(3) tris.addVertex(5) tris.addVertex(7) tris.closePrimitive() tris.addVertex(0) tris.addVertex(4) tris.addVertex(5) tris.closePrimitive() tris.addVertex(1) tris.addVertex(0)", "ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5]) bossTrack = Sequence() myInterval = camera.posHprInterval(8, Point3(-22, -100,", "toon = self.cr.doId2do.get(self.involvedToons[i]) if toon: angle = 90 - 15", "= ToontownTimer.ToontownTimer() self.bonusTimer.posInTopRightCorner() self.bonusTimer.show() self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer) def setAttackCode(self, attackCode, avId", "CollisionPlane(Plane(Vec3(0, 0, 1), Point3(0, 0, -50))) planeNode = CollisionNode('dropPlane') planeNode.addSolid(plane)", "1.15) self.witnessToonOnstage = 1 def __hideWitnessToon(self): if self.witnessToonOnstage: self.witnessToon.removeActive() self.witnessToon.detachNode()", "name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss')", "defaultWeight + bonusWeight self.notify.debug('toon %d has weight of %d' %", "enterPrepareBattleTwo') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) self.__showWitnessToon() prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie()", "prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin) locatorRenderPos = render.getRelativePoint(self.prosecutionLocator, origin) self.notify.debug('prosecutionLocatorPos =", "tag.' % repr(entry.getIntoNodePath())) return attackCode = int(attackCodeStr) into = entry.getIntoNodePath()", "self.bossMaxDamage: self.notify.debug('finish the movie then transition to NearVictory') self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration()) else:", "chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr,", "GeomVertexWriter(myVertexData, 'texcoord') vertexWriter.addData3f(x1, y1, z1) vertexWriter.addData3f(x2, y1, z1) vertexWriter.addData3f(x1, y2,", "volume=0.9) taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss') def __onToPrepareBattleTwo(self): self.notify.debug('----- __onToPrepareBattleTwo') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr)", "rate = time / numGears for i in xrange(numGears): node", "volume=0.9, time=self.battleThreeMusicTime) def __continueDefeat(self): self.notify.debug('----- __continueDefeat') self.stopAnimate() self.doneBarrier('Defeat') def exitDefeat(self):", "self.createBlock(0.25, 2, 0.125, -0.25, -2, -0.125, 0, 1.0, 0, 1.0)", "elapsedTime = 0): self.notify.debug('----- __doneEpilogue') intervalName = 'EpilogueMovieToonAnim' self.clearInterval(intervalName) track", "self.notify.debug('walkToonsToBattlePosition: points = %s' % points[0][0]) for i in xrange(len(toonIds)):", "self.notify.warning('Node %s has no attackCode tag.' % repr(entry.getIntoNodePath())) return attackCode", "math.sin(radians) * radius toon.setPos(self.witnessToon, x, y, 0) toon.headsUp(self.witnessToon) toon.loop('neutral') toon.show()", "if self.debugPositions: self.reflectedPodium.show() def loadCannons(self): pass def loadWitnessStand(self): self.realWitnessStand =", "enterDefeat(self): self.notify.debug('----- enterDefeat') self.cleanupIntervals() localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.reparentTo(render) self.clearChat() self.releaseToons(finalBattle=1) self.happy =", "= self.cr.doId2do.get(toonId) if toon: if index in self.cannons: cannon =", "self.strafeInterval = seq seq.start() def replaceCollisionPolysWithPlanes(self, model): newCollisionNode = CollisionNode('collisions')", "tris.closePrimitive() tris.addVertex(4) tris.addVertex(6) tris.addVertex(7) tris.closePrimitive() tris.addVertex(7) tris.addVertex(5) tris.addVertex(4) tris.closePrimitive() cubeGeom", "if not self.mainDoor.isEmpty(): pass if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.unstash() if not", "self.witnessToon.clearChat() text = TTLocalizer.WitnessToonBonus % (ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration) self.witnessToon.setChatAbsolute(text, CFSpeech |", "d_hitProsecutionPan') self.sendUpdate('hitProsecutionPan', []) def d_hitToon(self, toonId): self.notify.debug('----- d_hitToon') self.sendUpdate('hitToon', [toonId])", "battleNode.wrtReparentTo(bnParent) toon.setPosHpr(battleNode, pos[0], pos[1], pos[2], h, 0, 0) self.notify.debug('new toon", "ActorInterval(self, 'Ff_speech'))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins, CFSpeech))) return bossTrack def __makeWitnessToon(self):", "%d' % self.numToonJurorsSeated) self.countToonJurors() self.notify.debug('after calling self.countToonJurors, numToonJurorsSeated=%d' % self.numToonJurorsSeated)", "'walk'), Func(toon.loop, 'neutral'))) return ival def toonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------')", "= self.uniqueName('reward') self.rewardPanel = RewardPanel.RewardPanel(panelName) victory, camVictory, skipper = MovieToonVictory.doToonVictory(1,", "self.lawyers[i] suit.fsm.request('neutral') suit.loop('neutral') suit.setBossCogId(self.doId) return def setBossDamage(self, bossDamage, recoverRate, timestamp):", "world pos = %s' % bnWorldPos) pos = render.getRelativePoint(battleNode, pos)", "TTLocalizer.WitnessToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1) return speech def __positionToonsInFrontOfCannons(self): self.notify.debug('__positionToonsInFrontOfCannons')", "= TTLocalizer.WitnessToonPrepareBattleTwo % ToontownGlobals.LawbotBossJurorsForBalancedScale movie = Sequence(Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0,", "8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.witnessToon.setLocalPageChat, chatString, 0)) return", "if self.juryTimer: self.juryTimer.destroy() del self.juryTimer self.juryTimer = None for chair", "volume=0.9) def __onToPrepareBattleThree(self): self.notify.debug('----- __onToPrepareBattleThree') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.doneBarrier('RollToBattleThree') def exitRollToBattleThree(self):", "self.juryBox: self.juryBox.removeNode() return def doStrafe(self, side, direction): gearRoot = self.rotateNode.attachNewNode('gearRoot')", "Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0, 90, 20)), ParallelEndTogether(MopathInterval(mopath, toon), toon.posInterval(2, destPos,", "Point3(-4.0, -2.0, 0.5), Point3(4.0, -2.0, 0.5)) insidesANode = CollisionNode('BossZap') insidesANode.addSolid(insidesA)", "self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.clearChat() self.loop('Ff_neutral') self.notify.debug('self.battleANode = %s' % self.battleANode) self.__hideWitnessToon()", "= None return def delete(self): self.notify.debug('----- delete') DistributedBossCog.DistributedBossCog.delete(self) def d_hitBoss(self,", "gavel, entry): attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr == '': self.notify.warning('Node", "1 self.toonsEnterA.timeScale = 35 self.toonsEnterB = Mopath.Mopath() self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB') self.toonsEnterB.fFaceForward =", "= self.cr.doId2do.get(toonId) if toon: toon.hide() def __showToons(self): for toonId in", "0, 0, 1.0, 0.25) self.defensePanNodePath = NodePath('defensePan') self.defensePanNodePath.attachNewNode(defensePanGeom) self.defensePanNodePath.setPos(0, -2,", "self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.loop('neutral') def makeEndOfBattleMovie(self, hasLocalToon):", "self.loadJuryBox() self.loadPodium() ug = self.geom.find('**/Reflections') ug.setBin('ground', -10) def loadJuryBox(self): self.juryBox", "self.accept('enterDefenseCol', self.__enterDefenseCol) self.accept('enterProsecutionCol', self.__enterProsecutionCol) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice')) self.stickBossToFloor() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)", "def exitWaitForToons(self): self.notify.debug('----- exitWaitForToons') DistributedBossCog.DistributedBossCog.exitWaitForToons(self) self.geom.show() self.witnessToon.addActive() def enterElevator(self): self.notify.debug('-----", "Func(camera.setHpr, 180, 10, 0), Func(self.__doWitnessPrepareBattleThreeChat)) return movie def countToonJurors(self): self.numToonJurorsSeated", "self.panDamage = 25 self.evidenceHitSfx = None self.toonUpSfx = None self.bonusTimer", "self.bossDamage >= self.bossMaxDamage: self.notify.debug('finish the movie then transition to NearVictory')", "& ~ToontownGlobals.CameraBitmask self.baseHighCol.setCollideMask(newBitMask) self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol') self.defenseHighCol.stash() self.defenseHighCol.setCollideMask(newBitMask) self.baseTopCol =", "35 def __unloadMopaths(self): self.notify.debug('----- __unloadMopaths') self.toonsEnterA.reset() self.toonsEnterB.reset() def enterOff(self): self.notify.debug('-----", "stuffToHide.wrtReparentTo(self.reflectedMainDoor) else: self.notify.debug('not found %s' % stuffToHide) self.geom.reparentTo(render) self.loadWitnessStand() self.loadScale()", "- 1.15) self.witnessToonOnstage = 1 def __hideWitnessToon(self): if self.witnessToonOnstage: self.witnessToon.removeActive()", "False if not hasattr(self, 'state'): self.notify.warning('returning from setTaunt, no attr", "self.bonusTimer: self.bonusTimer.destroy() del self.bonusTimer localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() if OneBossCog == self: OneBossCog", "newWeight = defaultWeight + bonusWeight self.notify.debug('toon %d has weight of", "self.toonsEnterA = Mopath.Mopath() self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA') self.toonsEnterA.fFaceForward = 1 self.toonsEnterA.timeScale = 35", "False self.battleDifficulty = 0 self.bonusWeight = 0 self.numJurorsLocalToonSeated = 0", "self.sendUpdate('hitProsecutionPan', []) def d_hitToon(self, toonId): self.notify.debug('----- d_hitToon') self.sendUpdate('hitToon', [toonId]) def", "self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie) if self.recoverRate: taskMgr.add(self.__recoverBossDamage, taskName) self.makeScaleReflectDamage() self.bossHealthBar.update(self.bossMaxDamage -", "startJuryBoxMoving(self): if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None self.juryBox.setPos(-30, 0, -12.645)", "self.beamNodePath = self.scaleNodePath.find('**/scaleBeam') self.defensePanNodePath = self.scaleNodePath.find('**/defensePan') self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan') self.defenseColNodePath", "self.lawyerRequest = None self.lawyers = lawyers for i in xrange(len(self.lawyers)):", "duration=7, loop=1)) track.append(bossAnimTrack) attackToons = TTLocalizer.BossCogAttackToons dialogTrack = Track( (0,", "toon = base.cr.doId2do.get(toonId) if toon: destPos = toon.getPos() self.placeToonInElevator(toon) toon.wrtReparentTo(render)", "Func(self.__continueDefeat), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def", "volume=0.9) if not self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() def", "else: self.stashBaseCol() def unloadEnvironment(self): self.notify.debug('----- unloadEnvironment') DistributedBossCog.DistributedBossCog.unloadEnvironment(self) self.geom.removeNode() del self.geom", "= Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])", "= 0 self.insidesANodePath = None self.insidesBNodePath = None self.strafeInterval =", "self.panFlashInterval.finish() self.panFlashInterval = None return def flashPanBlue(self): self.cleanupPanFlash() intervalName =", "doorStartPos[2] + 25) bossTrack = Track( (0.5, Sequence( Func(self.clearChat), Func(camera.reparentTo,", "self.witnessToonOnstage: self.witnessToon.removeActive() self.witnessToon.detachNode() self.witnessToonOnstage = 0 def __hideToons(self): for toonId", "= int(names[1]) for lawyer in self.lawyers: if lawyerDoId == lawyer.doId:", "/ 100.0 gearTrack = Parallel() numGears = int(4 + 6", "chair = self.chairs[key] if chair.state == 'ToonJuror' or chair.state ==", "self.notify.debug('walkToonsToBattlePosition: final pos = %s' % pos) ival.append(Sequence(Func(toon.setPlayRate, 0.8, 'walk'),", "hasLocalToon): name = self.uniqueName('Drop') seq = Sequence(name=name) seq += [Wait(0.0)]", "= self.scaleNodePath.find('**/BaseHighCol') oldBitMask = self.baseHighCol.getCollideMask() newBitMask = oldBitMask & ~ToontownGlobals.PieBitmask", "GeomVertexFormat.getV3n3cpt2() myVertexData = GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic) vertexWriter =", "ActorInterval(self, 'Ff_speech', loop=1)) return bossTrack def makeEpilogueMovie(self): epSpeech = TTLocalizer.WitnessToonCongratulations", "== ToontownGlobals.BossCogAreaAttack: self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt) base.playSfx(self.warningSfx) def setBattleDifficulty(self, diff): self.notify.debug('battleDifficulty = %d'", "= 1 self.toonsEnterA.timeScale = 35 self.toonsEnterB = Mopath.Mopath() self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB') self.toonsEnterB.fFaceForward", "seq = Sequence(prepareBattleThreeMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __onToBattleThree(self, elapsed):", "if toon: pos, h = points[i] origPos = pos self.notify.debug('origPos", "self.lawyerRequest = None self.betweenBattleMusic.stop() self.promotionMusic.stop() self.stingMusic.stop() self.battleTwoMusic.stop() self.battleThreeMusic.stop() self.epilogueMusic.stop() if", "def unloadEnvironment(self): self.notify.debug('----- unloadEnvironment') DistributedBossCog.DistributedBossCog.unloadEnvironment(self) self.geom.removeNode() del self.geom def __loadMopaths(self):", "None return def __showWaitingMessage(self, task): self.notify.debug('----- __showWaitingMessage') self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors) def loadEnvironment(self):", "reverse=0) rollTrack = Sequence( Func(self.getGeomNode().setH, 180), rollThroughDoor[0], Func(self.getGeomNode().setH, 0)) rollTrackDuration", "self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage, self.bossMaxDamage) if diffSettings[4]: localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight) def __doneBattleThree(self):", "None self.witnessToon = None self.witnessToonOnstage = False self.numToonJurorsSeated = 0", "pos[0], pos[1] + 10, pos[2], h, 0, 0) def __outOfPies(self):", "bonusWeight = 0 newWeight = 1 cannonIndex = self.cannonIndex numJurors", "self.lawyers = lawyers for i in xrange(len(self.lawyers)): suit = self.lawyers[i]", "return def disable(self): global OneBossCog self.notify.debug('----- disable') DistributedBossCog.DistributedBossCog.disable(self) self.request('Off') self.unloadEnvironment()", "entry): lawyerCol = entry.getIntoNodePath() names = lawyerCol.getName().split('-') lawyerDoId = int(names[1])", "self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol') self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) self.standNodePath = self.scaleNodePath.find('**/scaleStand')", "self.scaleNodePath.unstash() localAvatar.setPos(-3, 0, 0) base.localAvatar.orbitalCamera.start() self.clearChat() self.witnessToon.clearChat() self.reparentTo(render) self.happy =", "'RollToBattleTwo' seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic,", "self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator') prosecutionLocBounds = self.prosecutionLocator.getBounds() prosecutionLocPos = prosecutionLocBounds.getCenter() self.notify.debug('prosecutionLocatorPos", "self.prosecutionPanNodePath.setP(-tilt) else: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) def stashBaseCol(self): if not self.baseColStashed: self.notify.debug('stashBaseCol')", "+ ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) finalReflectedPodiumPos = Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(), self.reflectedPodium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) return", "duration = bossTrack.getDuration() return bossTrack def __showOnscreenMessage(self, text): self.notify.debug('----- __showOnscreenmessage')", "str(ToontownGlobals.PieCodeBossInsides)) self.insidesANodePath.stash() insidesB = CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0, 2.0, 5.0),", "localAvatar.doId: self.d_hitToon(doId) def __lawyerGotHit(self, entry): lawyerCol = entry.getIntoNodePath() names =", "TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty) else: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty) if juryWeightBonus: weightBonusText = juryWeightBonus", "(13.1, Sequence(self.door3.posInterval(1, doorStartPos)))) retTrack = Parallel(bossTrack, ActorInterval(self, 'Ff_speech', loop=1)) return", "self.zapLocalToon(attackCode, into) def createBlock(self, x1, y1, z1, x2, y2, z2,", "Point3(-4.0, 2.0, 0.5)) insidesBNode = CollisionNode('BossZap') insidesBNode.addSolid(insidesB) insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask)", "0, 1.0) self.prosecutionPanNodePath = NodePath('prosecutionPan') self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom) self.prosecutionPanNodePath.setPos(0, 2, 0) self.prosecutionPanNodePath.reparentTo(self.beamNodePath)", "chair.stopCogsFlying() return def enterRollToBattleThree(self): self.notify.debug('----- enterRollToBattleThree') self.reparentTo(render) self.stickBossToFloor() intervalName =", "toon: toon.reparentTo(render) pos, h = points[i] toon.setPosHpr(battleNode, pos[0], pos[1] +", "self.countToonJurors() self.notify.debug('after calling self.countToonJurors, numToonJurorsSeated=%d' % self.numToonJurorsSeated) if self.numToonJurorsSeated ==", "tris.closePrimitive() tris.addVertex(1) tris.addVertex(5) tris.addVertex(3) tris.closePrimitive() tris.addVertex(3) tris.addVertex(5) tris.addVertex(7) tris.closePrimitive() tris.addVertex(0)", "Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone))) self.storeInterval(track, intervalName) track.start() def exitEpilogue(self): self.notify.debug('----- exitEpilogue') self.clearInterval('EpilogueMovieToonAnim')", "= None self.betweenBattleMusic.stop() self.promotionMusic.stop() self.stingMusic.stop() self.battleTwoMusic.stop() self.battleThreeMusic.stop() self.epilogueMusic.stop() if self.juryTimer:", "if self.arenaSide: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosB) topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB) p3Pos =", "= base.cr.doId2do.get(toonIds[i]) if toon: pos, h = points[i] origPos =", "points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) - 5]) self.notify.debug('toonsToBattlePosition: points = %s' % points[0][0]) for", "epSpeech, 0)) return bossTrack def makeDefeatMovie(self): bossTrack = Track((0.0, Sequence(Func(self.clearChat),", "+= '\\x07' trialSpeech = juryResult trialSpeech += TTLocalizer.WitnessToonPrepareBattleThree diffSettings =", "def enterEpilogue(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() self.__showWitnessToon() self.witnessToon.reparentTo(render)", "%s' % center) self.witnessToon.setPos(center) self.witnessToon.setH(180) self.witnessToon.setZ(self.witnessToon.getZ() - 1.5) self.witnessToon.setY(self.witnessToon.getY() -", "seq = Sequence(name=name) seq += [Wait(0.0)] if hasLocalToon: seq +=", "tris.addVertex(5) tris.closePrimitive() tris.addVertex(1) tris.addVertex(0) tris.addVertex(5) tris.closePrimitive() tris.addVertex(4) tris.addVertex(6) tris.addVertex(7) tris.closePrimitive()", "= self.geom.find('**/elevator_origin') self.elevatorEntrance.getChildren().detach() self.elevatorEntrance.setScale(1) elevatorModel = loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator') elevatorModel.reparentTo(self.elevatorEntrance) self.setupElevator(elevatorModel) self.promotionMusic", "== ToontownGlobals.PieCodeBossInsides: if toon == localAvatar: self.d_hitBossInsides() self.flashRed() elif pieCode", "taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) if self.bossDamageMovie: self.bossDamageMovie.finish() self.bossDamageMovie = None self.unstickBoss() taskName", "def __howToGetPies(self, task): self.notify.debug('----- __howToGetPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence) def __howToThrowPies(self, task): self.notify.debug('-----", "def doStrafe(self, side, direction): gearRoot = self.rotateNode.attachNewNode('gearRoot') if side ==", "= 0 self.involvedToons.sort() for toonId in self.involvedToons: if index in", "TTLocalizer.LawbotBossDefenseWins2, CFSpeech)), (9.5, Sequence(Func(camera.wrtReparentTo, render))), (9.6, Parallel( rollTrack, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3,", "= False if not hasattr(self, 'state'): self.notify.warning('returning from setTaunt, no", "exitWaitForToons(self): self.notify.debug('----- exitWaitForToons') DistributedBossCog.DistributedBossCog.exitWaitForToons(self) self.geom.show() self.witnessToon.addActive() def enterElevator(self): self.notify.debug('----- enterElevator')", "% defenseLocPos) self.defensePanNodePath.setPos(defenseLocPos) self.defensePanNodePath.reparentTo(self.beamNodePath) self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos()) self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator')", "floor = self.geom.find('**/MidVaultFloor1') if floor.isEmpty(): floor = self.geom.find('**/CR3_Floor') self.evFloor =", "self.__showCannonsAppearing) base.playMusic(self.stingMusic, looping=0, volume=1.0) def __showCannonsAppearing(self, elapsedTime = 0): allCannonsAppear", "* rate), Func(node.show), Parallel(node.posInterval(1, Point3(x, y, 0), fluid=1), node.hprInterval(1, VBase3(h,", "import ElevatorConstants from toontown.toonbase import ToontownTimer OneBossCog = None class", "def d_hitDefensePan(self): self.notify.debug('----- d_hitDefensePan') self.sendUpdate('hitDefensePan', []) def d_hitProsecutionPan(self): self.notify.debug('----- d_hitProsecutionPan')", "gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack)) gearModel = self.getGearFrisbee() gearModel.setScale(0.1) t = self.getBossDamage() /", "elevatorModel = loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator') elevatorModel.reparentTo(self.elevatorEntrance) self.setupElevator(elevatorModel) self.promotionMusic = base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.betweenBattleMusic =", "DistributedBossCog.DistributedBossCog.enterBattleOne(self) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.clearChat() self.loop('Ff_neutral') self.notify.debug('self.battleANode = %s' % self.battleANode)", "self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie')) track = Parallel() bossAnimTrack =", "battleNode): self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode)) if len(toonIds) <", "'Ff_lookRt', duration=3), ActorInterval(self, 'Ff_lookRt', duration=3, startTime=3, endTime=0), ActorInterval(self, 'Ff_neutral', duration=2),", "localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) bossTrack.append(Func(self.loop,", "p1, p2: p1.compareTo(p2, threshold)) lastPlane = None for plane in", "numToons = len(self.involvedToons) center = (numToons - 1) / 2.0", "setPosHpr') myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0),", "| CFTimeout) base.playSfx(self.toonUpSfx) if not self.bonusTimer: self.bonusTimer = ToontownTimer.ToontownTimer() self.bonusTimer.posInTopRightCorner()", "def setAttackCode(self, attackCode, avId = 0): DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode, avId) if", "= self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0) self.makeToonsWait() finalPodiumPos = Point3(self.podium.getX(),", "prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin) panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath, origin) self.notify.debug('prosecutionPanPos =", "import random import math from toontown.coghq import CogDisguiseGlobals from toontown.building", "lawyers): self.lawyerRequest = None self.lawyers = lawyers for i in", "y, 0) toon.headsUp(self.witnessToon) toon.loop('neutral') toon.show() def __talkAboutPromotion(self, speech): if self.prevCogSuitLevel", "self.setScaleTilt(tilt) if self.bossDamage < ToontownGlobals.LawbotBossMaxDamage * 0.85: self.unstashBaseCol() else: self.stashBaseCol()", "setTaunt, no attr nametag') gotError = True if gotError: st", "Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar)) multiCannons = Parallel()", "loadScale(self): self.useProgrammerScale = base.config.GetBool('want-injustice-scale-debug', 0) if self.useProgrammerScale: self.loadScaleOld() else: self.loadScaleNew()", "TTLocalizer.LawbotBossTempIntro2, CFSpeech)), (18, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3, CFSpeech)), (22, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4, CFSpeech)),", "-2, 0) self.defensePanNodePath.reparentTo(self.beamNodePath) defenseTube = CollisionTube(0, 0, -0.5, 0, 0,", "disable') DistributedBossCog.DistributedBossCog.disable(self) self.request('Off') self.unloadEnvironment() self.__cleanupWitnessToon() self.__unloadMopaths() self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) self.__cleanupStrafe() self.__cleanupJuryBox()", "self.flashRed() elif pieCode == ToontownGlobals.PieCodeBossCog: if toon == localAvatar: self.d_hitBoss(1)", "= int(attackCodeStr) into = entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def touchedGavelHandle(self, gavel,", "newBitMask & ~ToontownGlobals.CameraBitmask self.baseHighCol.setCollideMask(newBitMask) self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol') self.defenseHighCol.stash() self.defenseHighCol.setCollideMask(newBitMask) self.baseTopCol", "= self.beamNodePath.getBounds().getCenter() self.notify.debug('beamBoundsCenter = %s' % beamBoundsCenter) beamLocatorBounds = self.beamLocator.getBounds()", "__loadMopaths') self.toonsEnterA = Mopath.Mopath() self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA') self.toonsEnterA.fFaceForward = 1 self.toonsEnterA.timeScale =", "* ToontownGlobals.LawbotBossWinningTilt self.setScaleTilt(tilt) if self.bossDamage < ToontownGlobals.LawbotBossMaxDamage * 0.85: self.unstashBaseCol()", "self.scaleNodePath.find('**/ProsecutionCol') self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) self.standNodePath = self.scaleNodePath.find('**/scaleStand') self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator')", "self.insidesBNodePath.stash() def __toonsToPromotionPosition(self, toonIds, battleNode): self.notify.debug('----- __toonsToPromotionPosition') points = BattleBase.BattleBase.toonPoints[len(toonIds)", "battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) self.notify.debug('battle node world pos", "TTLocalizer.WitnessToonCongratulations epSpeech = self.__talkAboutPromotion(epSpeech) bossTrack = Sequence(Func(self.witnessToon.animFSM.request, 'neutral'), Func(self.witnessToon.setLocalPageChat, epSpeech,", "Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) track, hpr = self.rollBossToPoint(startPos, None, battlePos,", "taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleThree(self): self.notify.debug('----- exitPrepareBattleThree') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage')", "base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def exitNearVictory(self): self.notify.debug('----- exitNearVictory') self.ignore('pieSplat') self.ignore('localPieSplat')", "h = points[i] origPos = pos self.notify.debug('origPos = %s' %", "hasattr(self, 'state'): self.notify.warning('returning from setTaunt, no attr state') gotError =", "0) if self.useProgrammerScale: self.loadScaleOld() else: self.loadScaleNew() def __debugScale(self): prosecutionPanPos =", "self.mainDoor = self.geom.find('**/Door_1') if not self.mainDoor.isEmpty(): itemsToHide = ['interior/Door_1'] for", "-3, 45, 25), Func(camera.setHpr, 0, 10, 0))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1,", "not self.mainDoor.isEmpty(): self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() camera.reparentTo(self.elevatorModel) camera.setPosHpr(0, 30,", "+ ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) curReflectedPos =", "cannonIndex): if base.localAvatar.doId == toonId: self.cannonIndex = cannonIndex def numJurorsSeatedByCannon(self,", "pos[2], h, 0, 0) self.notify.debug('new toon pos %s ' %", "beamRelPos) self.notify.debug('beamRenderPos = %s' % beamRenderPos) beamBoundsCenter = self.beamNodePath.getBounds().getCenter() self.notify.debug('beamBoundsCenter", "1.5) self.witnessToon.setY(self.witnessToon.getY() - 1.15) self.witnessToonOnstage = 1 def __hideWitnessToon(self): if", "= battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) self.notify.debug('battle node world", "7, 3.5) shieldNode = CollisionNode('BossZap') shieldNode.addSolid(shield) shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask) shieldNodePath", "self.prosecutionPanNodePath.setP(-tilt) def stashBaseCol(self): if not self.baseColStashed: self.notify.debug('stashBaseCol') self.baseTopCol.stash() self.baseSideCol.stash() self.baseColStashed", "def enteredBonusState(self): self.witnessToon.clearChat() text = TTLocalizer.WitnessToonBonus % (ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration) self.witnessToon.setChatAbsolute(text,", "NodePath('defensePan') self.defensePanNodePath.attachNewNode(defensePanGeom) self.defensePanNodePath.setPos(0, -2, 0) self.defensePanNodePath.reparentTo(self.beamNodePath) defenseTube = CollisionTube(0, 0,", "self.ignore('outOfPies') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.setDizzy(0) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def", "self.battleBNode) base.playMusic(self.battleTwoMusic, looping=1, volume=0.9) self.startJuryBoxMoving() for index in xrange(len(self.cannons)): cannon", "= gearRoot.attachNewNode(str(i)) node.hide() node.setPos(0, 0, 0) gear = gearModel.instanceTo(node) angle", "xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.wrtReparentTo(render) pos, h =", "def __unloadMopaths(self): self.notify.debug('----- __unloadMopaths') self.toonsEnterA.reset() self.toonsEnterB.reset() def enterOff(self): self.notify.debug('----- enterOff')", "self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0) self.makeToonsWait() finalPodiumPos = Point3(self.podium.getX(), self.podium.getY(),", "= [] for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if", "* math.pi / 180.0 if direction == 1: spread =", "1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos, None,", "Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack, Func(door.request, 'close')) self.__cleanupStrafe() self.strafeInterval = seq", "self.juryMovesSfx = None self.baseColStashed = False self.battleDifficulty = 0 self.bonusWeight", "Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins, CFSpeech))) return bossTrack def __makeWitnessToon(self): dnaNetString = 't\\x1b\\x00\\x01\\x01\\x00\\x03\\x00\\x03\\x01\\x10\\x13\\x00\\x13\\x13'", "ToontownGlobals from toontown.toonbase import ToontownBattleGlobals import DistributedBossCog from toontown.toonbase import", "self.battleANode) self.stickBossToFloor() intervalName = 'RollToBattleTwo' seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName)", "self.__howToGetPies, self.uniqueName('PieAdvice')) self.stickBossToFloor() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage base.playMusic(self.battleThreeMusic, looping=1, volume=0.9)", "volume=0.9) self.startJuryBoxMoving() for index in xrange(len(self.cannons)): cannon = self.cannons[index] cannon.cannon.show()", "%s' % locatorRenderPos) beamPos = self.beamNodePath.getPos() beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath, origin)", "seq.start() self.storeInterval(seq, intervalName) self.accept('doneChatPage', self.__doneEpilogue) base.playMusic(self.epilogueMusic, looping=1, volume=0.9) def __doneEpilogue(self,", "camera.reparentTo(self.elevatorModel) camera.setPosHpr(0, 30, 8, 180, 0, 0) def exitElevator(self): self.notify.debug('-----", "__howToThrowPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies) def __foundPieButton(self): self.everThrownPie = 1 self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) def", "= 1 bonusWeight = 0 newWeight = 1 cannonIndex =", "localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() if OneBossCog == self: OneBossCog = None return def", "cannonIndex = self.cannonIndex numJurors = 0 if not cannonIndex ==", "1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) track, hpr = self.rollBossToPoint(startPos, None, battlePos, None,", "self.juryBoxIval = None self.juryBox.setPos(-30, 0, -12.645) self.reflectedJuryBox.setPos(-30, 0, 0) curPos", "pos %s ' % toon.getPos()) def touchedGavel(self, gavel, entry): self.notify.debug('touchedGavel')", "toontown.battle import BattleBase from direct.directutil import Mopath from direct.showutil import", "exitElevator(self): self.notify.debug('----- exitElevator') DistributedBossCog.DistributedBossCog.exitElevator(self) self.witnessToon.removeActive() def enterIntroduction(self): self.notify.debug('----- enterIntroduction') self.reparentTo(render)", "self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.loop('neutral') localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.clearChat() self.witnessToon.clearChat() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.happy = 1", "self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.__doWitnessPrepareBattleThreeChat))", "self.chairs.keys(): chair = self.chairs[key] if chair.state == 'ToonJuror' or chair.state", "self.numToonJurorsSeated += 1 self.notify.debug('self.numToonJurorsSeated = %d' % self.numToonJurorsSeated) return def", "Point3(*ToontownGlobals.LawbotBossBattleThreeHpr) bottomPos = Point3(*ToontownGlobals.LawbotBossBottomPos) deathPos = Point3(*ToontownGlobals.LawbotBossDeathPos) self.setPosHpr(startPos, startHpr) bossTrack", "self.battleA == None or self.battleB == None: pass return def", "1.0, 0, 0, 1.0) self.prosecutionPanNodePath = NodePath('prosecutionPan') self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom) self.prosecutionPanNodePath.setPos(0, 2,", "key in self.chairs.keys(): chair = self.chairs[key] if chair.state == 'ToonJuror'", "not self.debugPositions: self.juryBox.setPos(newPos) self.reflectedJuryBox = self.geom.find('**/JuryBox_Geo_Reflect') reflectedJuryBoxPos = self.reflectedJuryBox.getPos() newReflectedPos", "self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) base.playSfx(self.piesRestockSfx) if not self.everThrownPie: taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice')) def", "= self.cannonIndex numJurors = 0 if not cannonIndex == None", "in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.show() def __arrangeToonsAroundWitnessToon(self):", "self.defenseLocator.hide() self.prosecutionLocator.hide() self.beamLocator.hide() def loadScaleOld(self): startingTilt = 0 self.scaleNodePath =", "percentDamaged = diffDamage / (ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage) tilt = percentDamaged", "* self.bossDamageToMovie) if self.recoverRate: taskMgr.add(self.__recoverBossDamage, taskName) self.makeScaleReflectDamage() self.bossHealthBar.update(self.bossMaxDamage - bossDamage,", "self.acceptOnce('doneChatPage', self.__onToBattleThree) intervalName = 'prepareBattleThree' seq = Sequence(prepareBattleThreeMovie, name=intervalName) seq.start()", "self.__hideWitnessToon() if self.witnessToon: self.witnessToon.removeActive() self.witnessToon.delete() self.witnessToon = None return def", "localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterDefeat(self): self.notify.debug('----- enterDefeat') self.cleanupIntervals()", "TTLocalizer.LawbotBossTaunts[1] if tauntIndex == 0: if extraInfo < len(self.involvedToons): toonId", "myFromPos[2]) rollThroughDoor = self.rollBossToPoint(fromPos=myFromPos, fromHpr=None, toPos=myToPos, toHpr=None, reverse=0) rollTrack =", "= Geom(myVertexData) cubeGeom.addPrimitive(tris) cubeGN = GeomNode('cube') cubeGN.addGeom(cubeGeom) return cubeGN def", "__doWitnessPrepareBattleThreeChat(self): self.notify.debug('__doWitnessPrepareBattleThreeChat: original self.numToonJurorsSeated = %d' % self.numToonJurorsSeated) self.countToonJurors() self.notify.debug('after", "for chair in self.chairs.values(): chair.stopCogsFlying() return def enterRollToBattleThree(self): self.notify.debug('----- enterRollToBattleThree')", "setTaunt, no attr state') gotError = True elif not self.state", "0): self.panDamage = 25 self.evidenceHitSfx = None self.toonUpSfx = None", "bnWorldPos) pos = render.getRelativePoint(battleNode, pos) self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint result = %s'", "self.notify.debug('not found %s' % stuffToHide) self.reflectedMainDoor = self.geom.find('**/interiorrefl/CR3_Door') if not", "avId = 0): DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode, avId) if attackCode == ToontownGlobals.BossCogAreaAttack:", "= entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def touchedGavelHandle(self, gavel, entry): attackCodeStr =", "if self.juryBox: self.juryBox.removeNode() return def doStrafe(self, side, direction): gearRoot =", "battleNode.wrtReparentTo(bnParent) self.notify.debug('battle node world pos = %s' % bnWorldPos) pos", "* from direct.interval.IntervalGlobal import * from toontown.battle.BattleProps import * from", "= CollisionNode('dropPlane') planeNode.addSolid(plane) planeNode.setCollideMask(ToontownGlobals.PieBitmask) self.geom.attachNewNode(planeNode) self.door3 = self.geom.find('**/SlidingDoor1/') if self.door3.isEmpty():", "makeScaleReflectDamage(self): diffDamage = self.bossDamage - ToontownGlobals.LawbotBossInitialDamage diffDamage *= 1.0 if", "self.activeIntervals.get(intervalName) if oldSeq: oldSeq.finish() seq.start() self.storeInterval(seq, intervalName) def setTaunt(self, tauntIndex,", "self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0) self.makeToonsWait() return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH,", "spread = -spread dist = 50 rate = time /", "= distance / (ToontownGlobals.SuitWalkSpeed * 1.8) return Sequence(Func(node.setPos, fromPos), Func(node.headsUp,", "= self.cr.doId2do.get(toonId) if toon: toon.stopLookAround() toon.stopSmooth() if self.hasLocalToon(): self.toMovieMode() for", "colorWriter = GeomVertexWriter(myVertexData, 'color') texWriter = GeomVertexWriter(myVertexData, 'texcoord') vertexWriter.addData3f(x1, y1,", "self.numToonJurorsSeated == 0: juryResult = TTLocalizer.WitnessToonNoJuror elif self.numToonJurorsSeated == 1:", "debugPositions = False def __init__(self, cr): self.notify.debug('----- __init___') DistributedBossCog.DistributedBossCog.__init__(self, cr)", "pass if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.unstash() if not self.elevatorEntrance.isEmpty(): pass def", "xrange(numToons): toon = self.cr.doId2do.get(self.involvedToons[i]) if toon: angle = 90 -", "localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0)] seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam, CFSpeech))", "exitEpilogue(self): self.notify.debug('----- exitEpilogue') self.clearInterval('EpilogueMovieToonAnim') self.unstash() self.epilogueMusic.stop() def enterFrolic(self): self.notify.debug('----- enterFrolic')", "__lawyerGotHit(self, entry): lawyerCol = entry.getIntoNodePath() names = lawyerCol.getName().split('-') lawyerDoId =", "self.chairs = {} self.cannons = {} self.useCannons = 1 self.juryBoxIval", "[]) def d_hitDefensePan(self): self.notify.debug('----- d_hitDefensePan') self.sendUpdate('hitDefensePan', []) def d_hitProsecutionPan(self): self.notify.debug('-----", "diffSettings[4]: numJurors = self.numJurorsSeatedByCannon(cannonIndex) bonusWeight = numJurors - diffSettings[5] if", "tris = GeomTriangles(Geom.UHDynamic) tris.addVertex(0) tris.addVertex(1) tris.addVertex(2) tris.closePrimitive() tris.addVertex(1) tris.addVertex(3) tris.addVertex(2)", "not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() def exitIntroduction(self): self.notify.debug('----- exitIntroduction') DistributedBossCog.DistributedBossCog.exitIntroduction(self) self.promotionMusic.stop() if", "from direct.distributed.ClockDelta import * from direct.showbase.PythonUtil import Functor from direct.showbase.PythonUtil", "self.stingMusic.stop() self.battleTwoMusic.stop() self.battleThreeMusic.stop() self.epilogueMusic.stop() if self.juryTimer: self.juryTimer.destroy() del self.juryTimer if", "self.notify.debug('beamBoundsCenter = %s' % beamBoundsCenter) beamLocatorBounds = self.beamLocator.getBounds() beamLocatorPos =", "= NodePath('scaleBeam') self.beamNodePath.attachNewNode(beamGeom) self.beamNodePath.setPos(0, 0, 3) self.beamNodePath.reparentTo(self.scaleNodePath) defensePanGeom = self.createBlock(0.5,", "self.notify.debug('----- enterRollToBattleTwo') self.releaseToons(finalBattle=1) self.stashBoss() self.toonsToBattlePosition(self.involvedToons, self.battleANode) self.stickBossToFloor() intervalName = 'RollToBattleTwo'", "intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __doneReward(self): self.notify.debug('----- __doneReward') self.doneBarrier('Reward')", "30, myFromPos[2]) rollThroughDoor = self.rollBossToPoint(fromPos=myFromPos, fromHpr=None, toPos=myToPos, toHpr=None, reverse=0) rollTrack", "or plane.compareTo(lastPlane, threshold) != 0: cp = CollisionPlane(plane) newCollisionNode.addSolid(cp) lastPlane", "seq.start() self.storeInterval(seq, intervalName) def saySomething(self, chatString): intervalName = 'ChiefJusticeTaunt' seq", "tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt else: percentDamaged = diffDamage /", "0 self.recoverRate = 0 self.recoverStartTime = 0 self.bossDamageMovie = None", "self.witnessToon: self.witnessToon.clearChat() def enterWaitForToons(self): self.notify.debug('----- enterWaitForToons') DistributedBossCog.DistributedBossCog.enterWaitForToons(self) self.geom.hide() self.witnessToon.removeActive() def", "%s' % beamPos) self.notify.debug('beamRelPos = %s' % beamRelPos) self.notify.debug('beamRenderPos =", "render.getTransform())) self.notify.debug('render.getScale()=%s battleNode.getScale()=%s' % (render.getScale(), battleNode.getScale())) myCurPos = self.getPos() self.notify.debug('myCurPos", "= self.createBlock(0.25, 0.25, 0, -0.25, -0.25, 3) self.standNodePath = NodePath('scaleStand')", "self.witnessToon: self.witnessToon.removeActive() self.witnessToon.delete() self.witnessToon = None return def __showWitnessToon(self): if", "seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueDefeat(self): self.notify.debug('-----", "def exitPrepareBattleThree(self): self.notify.debug('----- exitPrepareBattleThree') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') intervalName = 'PrepareBattleThree'", "movie then transition to NearVictory') self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration()) else: self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie)", "'RollToBattleTwo' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleTwo(self): self.notify.debug('----- enterPrepareBattleTwo') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons)", "self.reflectedWitnessStand = self.geom.find('**/Witnessstand_Geo_Reflect') if not self.reflectedWitnessStand.isEmpty(): pass colNode = self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision')", "__howToGetPies(self, task): self.notify.debug('----- __howToGetPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence) def __howToThrowPies(self, task): self.notify.debug('----- __howToThrowPies')", "enterOff(self): self.notify.debug('----- enterOff') DistributedBossCog.DistributedBossCog.enterOff(self) if self.witnessToon: self.witnessToon.clearChat() def enterWaitForToons(self): self.notify.debug('-----", "= [model] for cnp in collList: cn = cnp.node() if", "= %s' % negBeamLocatorPos) self.beamNodePath.setPos(beamLocatorPos) self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale) self.scaleNodePath.wrtReparentTo(self.geom) self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol')", "not self.elevatorEntrance.isEmpty(): pass def enterBattleOne(self): self.notify.debug('----- LawbotBoss.enterBattleOne ') DistributedBossCog.DistributedBossCog.enterBattleOne(self) self.reparentTo(render)", "and cannonIndex >= 0: diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: numJurors", "enterDefeat') self.cleanupIntervals() localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.reparentTo(render) self.clearChat() self.releaseToons(finalBattle=1) self.happy = 0 self.raised", "hasattr(place, 'fsm'): place.setState('waitForBattle') def makeToonsWait(self): self.notify.debug('makeToonsWait') for toonId in self.involvedToons:", "Func(self.clearChat), Func(camera.reparentTo, render), Func(camera.setPos, -3, 45, 25), Func(camera.setHpr, 0, 10,", "def loadScale(self): self.useProgrammerScale = base.config.GetBool('want-injustice-scale-debug', 0) if self.useProgrammerScale: self.loadScaleOld() else:", "self.notify.debug('----- exitEpilogue') self.clearInterval('EpilogueMovieToonAnim') self.unstash() self.epilogueMusic.stop() def enterFrolic(self): self.notify.debug('----- enterFrolic') self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)", "for i in xrange(cn.getNumSolids()): solid = cn.getSolid(i) if isinstance(solid, CollisionPolygon):", "self.numJurorsLocalToonSeated = 0 self.cannonIndex = -1 return def announceGenerate(self): global", "(18, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3, CFSpeech)), (22, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4, CFSpeech)), (24, Sequence(", "return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), Parallel(self.podium.posInterval(5.0, finalPodiumPos), self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos), Func(self.stashBoss),", "self.numToonJurorsSeated) return def cleanupPanFlash(self): if self.panFlashInterval: self.panFlashInterval.finish() self.panFlashInterval = None", "from direct.showbase.ShowBase import * from direct.interval.IntervalGlobal import * from toontown.battle.BattleProps", "self.reflectedJuryBox.getPos() reflectedEndingAbsPos = Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2]", "self.juryBoxIval.start() self.juryTimer = ToontownTimer.ToontownTimer() self.juryTimer.posInTopRightCorner() self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime) def exitBattleTwo(self): self.notify.debug('----- exitBattleTwo')", "name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.accept('doneChatPage', self.__doneEpilogue) base.playMusic(self.epilogueMusic, looping=1, volume=0.9) def", "volume=0.2, node=self), duration=0)), Func(node.detachNode))) seq = Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack,", "doorStartPos)))) retTrack = Parallel(bossTrack, ActorInterval(self, 'Ff_speech', loop=1)) return bossTrack def", "== 1: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty) else: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty) if", "intervalName = 'RewardMovie' delayDeletes = [] for toonId in self.involvedToons:", "180.0 x = math.cos(radians) * radius y = math.sin(radians) *", "in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.wrtReparentTo(render) pos, h", "self.scaleNodePath.find('**/scaleStand') self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr) self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator') defenseLocBounds = self.defenseLocator.getBounds() defenseLocPos =", "CollisionNode('BossZap') shieldNode.addSolid(shield) shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask) shieldNodePath = self.pelvis.attachNewNode(shieldNode) disk =", "z1, x2, y2, z2, r = 1.0, g = 1.0,", "ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) myToPos = Point3(myFromPos[0], myFromPos[1] + 30, myFromPos[2]) rollThroughDoor", "toPos), node.posInterval(time, toPos)) def __makeRollToBattleTwoMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2])", "0.6) defenseTube.setTangible(1) defenseCollNode = CollisionNode('DefenseCol') defenseCollNode.addSolid(defenseTube) self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode) self.defenseColNodePath.setTag('pieCode',", "def toNeutralMode(self): if self.cr: place = self.cr.playGame.getPlace() if place and", "if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels: speech += TTLocalizer.WitnessToonHPBoost else: speech +=", "self.chairs[key] if chair.state == 'ToonJuror' or chair.state == None and", "Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) def", "1) - 0.5) * spread x = dist * math.sin(angle)", "= BattleBase.BattleBase.toonPoints[len(toonIds) - 1] self.notify.debug('walkToonsToBattlePosition: points = %s' % points[0][0])", "Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar)) multiCannons = Parallel() index", "'texcoord') vertexWriter.addData3f(x1, y1, z1) vertexWriter.addData3f(x2, y1, z1) vertexWriter.addData3f(x1, y2, z1)", "1)), self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1, 1)), name=intervalName) self.panFlashInterval = seq", ">= 0: diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]: numJurors = self.numJurorsSeatedByCannon(cannonIndex)", "defenseLocBounds = self.defenseLocator.getBounds() defenseLocPos = defenseLocBounds.getCenter() self.notify.debug('defenseLocatorPos = %s' %", "into = entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def createBlock(self, x1, y1, z1,", "(1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins, CFSpeech))) return bossTrack def __makeWitnessToon(self): dnaNetString =", "pos = render.getRelativePoint(battleNode, pos) self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint result = %s' %", "TTLocalizer.LawbotBossProsecutionWins, CFSpeech))) return bossTrack def __makeWitnessToon(self): dnaNetString = 't\\x1b\\x00\\x01\\x01\\x00\\x03\\x00\\x03\\x01\\x10\\x13\\x00\\x13\\x13' npc", "tris.closePrimitive() tris.addVertex(1) tris.addVertex(0) tris.addVertex(5) tris.closePrimitive() tris.addVertex(4) tris.addVertex(6) tris.addVertex(7) tris.closePrimitive() tris.addVertex(7)", "False def makeScaleReflectDamage(self): diffDamage = self.bossDamage - ToontownGlobals.LawbotBossInitialDamage diffDamage *=", "self.witnessToon.clearChat() self.releaseToons(finalBattle=1) self.__showWitnessToon() if not self.useCannons: self.toonsToBattlePosition(self.toonsA, self.battleANode) self.toonsToBattlePosition(self.toonsB, self.battleBNode)", "0, 8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.__doWitnessPrepareBattleThreeChat)) return movie", "numJurorsSeatedByCannon(self, cannonIndex): retVal = 0 for chair in self.chairs.values(): if", "def makeVictoryMovie(self): myFromPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) myToPos = Point3(myFromPos[0],", "= points[i] if i > 3: pos.setY(pos.getY() + 2.0) bnParent", "0), Parallel(self.podium.posInterval(5.0, finalPodiumPos), self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos), Func(self.stashBoss), self.posInterval(5.0, battlePos), Func(taskMgr.doMethodLater, 0.01,", "if self.useProgrammerScale: self.loadScaleOld() else: self.loadScaleNew() def __debugScale(self): prosecutionPanPos = self.prosecutionPanNodePath.getPos()", "self.onscreenMessage = None return def __showWaitingMessage(self, task): self.notify.debug('----- __showWaitingMessage') self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors)", "= Sequence(prepareBattleThreeMovie, name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __onToBattleThree(self, elapsed): self.notify.debug('-----", "self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterReward(self): self.cleanupIntervals() self.clearChat()", "self.notify.debug('beamRelPos = %s' % beamRelPos) self.notify.debug('beamRenderPos = %s' % beamRenderPos)", "self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0)) self.juryBoxIval.start() self.juryTimer =", "self.battleThreeMusic.stop() def enterEpilogue(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() self.__showWitnessToon()", "[]) def d_hitProsecutionPan(self): self.notify.debug('----- d_hitProsecutionPan') self.sendUpdate('hitProsecutionPan', []) def d_hitToon(self, toonId):", "self.witnessToon.reparentTo(render) self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr) self.witnessToon.loop('Sit') self.__arrangeToonsAroundWitnessToon() camera.reparentTo(render) camera.setPos(self.witnessToon, -9, 12, 6) camera.lookAt(self.witnessToon,", "self.reparentTo(render) self.stickBossToFloor() intervalName = 'RollToBattleThree' seq = Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName)", "Sequence(Parallel(victory, camVictory), Func(self.__doneReward)) intervalName = 'RewardMovie' delayDeletes = [] for", "'walk'), Func(toon.loop, 'walk'), toon.posInterval(3, pos), Func(toon.setPlayRate, 1, 'walk'), Func(toon.loop, 'neutral')))", "+= 1 self.notify.debug('done with positionToons') def __makePrepareBattleTwoMovie(self): chatString = TTLocalizer.WitnessToonPrepareBattleTwo", "= self.cr.doId2do.get(toonId) self.notify.debug('cannonId = %d' % cannon.doId) cannonPos = cannon.nodePath.getPos(render)", "Func(toon.loop, 'neutral'))) return ival def toonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------') self.notify.debug('toonIds=%s", "self.__outOfPies) self.accept('begin-pie', self.__foundPieButton) self.accept('enterDefenseCol', self.__enterDefenseCol) self.accept('enterProsecutionCol', self.__enterProsecutionCol) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) taskMgr.doMethodLater(30, self.__howToGetPies,", "== 12: juryResult = TTLocalizer.WitnessToonAllJurors else: juryResult = TTLocalizer.WitnessToonSomeJurors %", "self.notify.debug('----- __doneBattleThree') self.setState('NearVictory') self.unstickBoss() def exitBattleThree(self): self.notify.debug('----- exitBattleThree') DistributedBossCog.DistributedBossCog.exitBattleThree(self) NametagGlobals.setMasterArrowsOn(1)", "gotError = True if gotError: st = StackTrace() print st", "def __arrangeToonsAroundWitnessToon(self): radius = 7 numToons = len(self.involvedToons) center =", "def __clearOnscreenMessage(self): if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None return def", "self.beamLocator.hide() def loadScaleOld(self): startingTilt = 0 self.scaleNodePath = NodePath('injusticeScale') beamGeom", "for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon,", "localAvatar.inventory.setBattleCreditMultiplier(mult) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.clearChat() self.witnessToon.clearChat() self.releaseToons(finalBattle=1) self.__showWitnessToon() if not self.useCannons:", "self.scaleNodePath.find('**/ProsecutionLocator') prosecutionLocBounds = self.prosecutionLocator.getBounds() prosecutionLocPos = prosecutionLocBounds.getCenter() self.notify.debug('prosecutionLocatorPos = %s'", "cannon.doId) cannonPos = cannon.nodePath.getPos(render) self.notify.debug('cannonPos = %s' % cannonPos) if", "ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage, self.bossMaxDamage) if diffSettings[4]: localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight) def", "cannon.generateCannonAppearTrack(toon) multiCannons.append(cannonSeq) index += 1 else: self.notify.warning('No cannon %d but", "node=self), duration=0)), Func(node.detachNode))) seq = Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack, Func(door.request,", "npc.animFSM.request('Sit') self.witnessToon = npc self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr) def __cleanupWitnessToon(self): self.__hideWitnessToon() if self.witnessToon:", "= loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg') self.toonUpSfx = loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg') self.strafeSfx = [] for i", "self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') intervalName = 'PrepareBattleThree' self.clearInterval(intervalName) self.__clearOnscreenMessage() self.betweenBattleMusic.stop() def", "z2, r = 1.0, g = 1.0, b = 1.0,", "self.posInterval(5.0, battlePos), Func(taskMgr.doMethodLater, 0.01, self.unstashBoss, 'unstashBoss')), name=self.uniqueName('BattleTwoMovie')) def __makeRollToBattleThreeMovie(self): startPos", "1, 1)), self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1, 1)), name=intervalName) self.panFlashInterval =", "self.witnessToon.removeActive() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.happy = 1 self.raised = 1 self.forward", "def loadWitnessStand(self): self.realWitnessStand = self.geom.find('**/WitnessStand') if not self.realWitnessStand.isEmpty(): pass self.reflectedWitnessStand", "% self.battleANode) self.__hideWitnessToon() if self.battleA == None or self.battleB ==", "self.loadScale() self.scaleNodePath.stash() self.loadJuryBox() self.loadPodium() ug = self.geom.find('**/Reflections') ug.setBin('ground', -10) def", "= entry.getIntoNodePath() self.zapLocalToon(attackCode, into) def createBlock(self, x1, y1, z1, x2,", "+= 1 else: self.notify.warning('No cannon %d but we have a", "self.loop('neutral') localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.clearChat() self.witnessToon.clearChat() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.happy = 1 self.raised", "6) camera.lookAt(self.witnessToon, 0, 0, 3) intervalName = 'EpilogueMovie' seq =", "Func(self.__doWitnessPrepareBattleThreeChat)) return movie def countToonJurors(self): self.numToonJurorsSeated = 0 for key", "def __talkAboutPromotion(self, speech): if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel: newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)]", "= True if not hasattr(self, 'nametag'): self.notify.warning('returning from setTaunt, no", "= self.numJurorsSeatedByCannon(cannonIndex) bonusWeight = numJurors - diffSettings[5] if bonusWeight <", "avatarDoId tag.' % repr(entry.getIntoNodePath())) return doId = int(avatarDoId) if doId", "loadPodium(self): self.podium = self.geom.find('**/Podium') newZ = self.podium.getZ() - ToontownGlobals.LawbotBossBattleTwoPosHpr[2] if", "points = list(BattleBase.BattleBase.toonPoints[3]) points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) - 5]) self.notify.debug('toonsToBattlePosition: points = %s'", "% self.numToonJurorsSeated) if self.numToonJurorsSeated == 0: juryResult = TTLocalizer.WitnessToonNoJuror elif", "looping=1, volume=0.9) def __onToPrepareBattleThree(self): self.notify.debug('----- __onToPrepareBattleThree') self.unstickBoss() self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.doneBarrier('RollToBattleThree') def", "def d_hitProsecutionPan(self): self.notify.debug('----- d_hitProsecutionPan') self.sendUpdate('hitProsecutionPan', []) def d_hitToon(self, toonId): self.notify.debug('-----", "self.notify.warning('Multiple BossCogs visible.') OneBossCog = self return def disable(self): global", "multiCannons.append(cannonSeq) index += 1 else: self.notify.warning('No cannon %d but we", "self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() self.__showWitnessToon() self.witnessToon.reparentTo(render) self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr) self.witnessToon.loop('Sit') self.__arrangeToonsAroundWitnessToon() camera.reparentTo(render)", "stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.mainDoor) else: self.notify.debug('not found %s'", "enterFrolic(self): self.notify.debug('----- enterFrolic') self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) DistributedBossCog.DistributedBossCog.enterFrolic(self) self.show() def doorACallback(self, isOpen): if", "Plane(solid.getPlane()) planes.append(plane) else: self.notify.warning('Unexpected collision solid: %s' % repr(solid)) newCollisionNode.addSolid(plane)", "= None self.bonusTimer = None self.warningSfx = None self.juryMovesSfx =", "track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0) self.makeToonsWait() return", "0, 8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.witnessToon.setLocalPageChat, chatString, 0))", "180, 10, 0), Func(self.__doWitnessPrepareBattleThreeChat)) return movie def countToonJurors(self): self.numToonJurorsSeated =", "state=%s', self.state) gotError = True if not hasattr(self, 'nametag'): self.notify.warning('returning", "* self.bossDamageToMovie) return Task.cont def __walkToonToPromotion(self, toonId, delay, mopath, track,", "Point3(-4.0, -2.0, 5.0), Point3(-4.0, -2.0, 0.5), Point3(4.0, -2.0, 0.5)) insidesANode", "toon.getPos()) def touchedGavel(self, gavel, entry): self.notify.debug('touchedGavel') attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if", "if not self.debugPositions: self.juryBox.setPos(newPos) self.reflectedJuryBox = self.geom.find('**/JuryBox_Geo_Reflect') reflectedJuryBoxPos = self.reflectedJuryBox.getPos()", "35 self.toonsEnterB = Mopath.Mopath() self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB') self.toonsEnterB.fFaceForward = 1 self.toonsEnterB.timeScale =", "[] for i in xrange(10): self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg')) render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog)) insidesA =", "self.toonsEnterB.timeScale = 35 def __unloadMopaths(self): self.notify.debug('----- __unloadMopaths') self.toonsEnterA.reset() self.toonsEnterB.reset() def", "taskName = 'RecoverBossDamage' taskMgr.remove(taskName) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() return def", "not self.debugPositions: self.reflectedPodium.setZ(reflectedZ) if not self.reflectedPodium.isEmpty(): if self.debugPositions: self.reflectedPodium.show() def", "self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() panelName = self.uniqueName('reward') self.rewardPanel = RewardPanel.RewardPanel(panelName)", "self.__cleanupStrafe() def __cleanupStrafe(self): self.notify.debug('----- __cleanupStrage') if self.strafeInterval: self.strafeInterval.finish() self.strafeInterval =", "ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5]) bossTrack = Sequence()", "-0.5, 0, 0, -1.5, 0.6) prosecutionTube.setTangible(1) prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol')) prosecutionCollNode.addSolid(prosecutionTube)", "xrange(len(self.lawyers)): suit = self.lawyers[i] suit.fsm.request('neutral') suit.loop('neutral') suit.setBossCogId(self.doId) return def setBossDamage(self,", "self.juryBox.setPos(-30, 0, -12.645) self.reflectedJuryBox.setPos(-30, 0, 0) curPos = self.juryBox.getPos() endingAbsPos", "self.rollBossToPoint(startPos, startHpr, bottomPos, None, 1) bossTrack.append(track) track, hpr = self.rollBossToPoint(bottomPos,", "self.juryBox.setPos(newPos) self.reflectedJuryBox = self.geom.find('**/JuryBox_Geo_Reflect') reflectedJuryBoxPos = self.reflectedJuryBox.getPos() newReflectedPos = reflectedJuryBoxPos", "%s' % self.battleANode) self.__hideWitnessToon() if self.battleA == None or self.battleB", "% repr(cnp)) break newCollideMask = newCollideMask | cn.getIntoCollideMask() for i", "= None self.bossDamage = 0 self.attackCode = None self.attackAvId =", "self.show() def doorACallback(self, isOpen): if self.insidesANodePath: if isOpen: self.insidesANodePath.unstash() else:", "3: pos.setY(pos.getY() + 2.0) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos =", "self.clearChat() self.loop('Ff_neutral') self.notify.debug('self.battleANode = %s' % self.battleANode) self.__hideWitnessToon() if self.battleA", "gearModel.setScale(0.1) t = self.getBossDamage() / 100.0 gearTrack = Parallel() numGears", "= npc self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr) def __cleanupWitnessToon(self): self.__hideWitnessToon() if self.witnessToon: self.witnessToon.removeActive() self.witnessToon.delete()", "= self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() negBeamLocatorPos = -beamLocatorPos self.notify.debug('beamLocatorPos =", "= self.involvedToons[extraInfo] toon = base.cr.doId2do.get(toonId) if toon: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex]", "self.notify.debug('----- __howToGetPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence) def __howToThrowPies(self, task): self.notify.debug('----- __howToThrowPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies) def", "reflectedEndingAbsPos = Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] +", "OneBossCog == self: OneBossCog = None return def delete(self): self.notify.debug('-----", "if stateName == 'Elevator': self.placeToonInElevator(toon) def setLawyerIds(self, lawyerIds): self.lawyers =", "if index in self.cannons: cannon = self.cannons[index] cannonSeq = cannon.generateCannonAppearTrack(toon)", "ival.delayDeletes = delayDeletes ival.start() self.storeInterval(ival, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)", "% cannon.doId) cannonPos = cannon.nodePath.getPos(render) self.notify.debug('cannonPos = %s' % cannonPos)", "= pos self.notify.debug('origPos = %s' % origPos) self.notify.debug('batlleNode.getTransform = %s", "= self.geom.find('**/Witnessstand_Geo_Reflect') if not self.reflectedWitnessStand.isEmpty(): pass colNode = self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision') colNode.setName('WitnessStand')", "self.__showWitnessToon() self.witnessToon.reparentTo(render) self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr) self.witnessToon.loop('Sit') self.__arrangeToonsAroundWitnessToon() camera.reparentTo(render) camera.setPos(self.witnessToon, -9, 12, 6)", "= self.prosecutionLocator.getPos() prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin) locatorRenderPos = render.getRelativePoint(self.prosecutionLocator, origin)", "volume=0.9) self.__showWitnessToon() diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage, self.bossMaxDamage) if", "= self return def disable(self): global OneBossCog self.notify.debug('----- disable') DistributedBossCog.DistributedBossCog.disable(self)", "= Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5]) bossTrack", "taskName = 'RecoverBossDamage' taskMgr.remove(taskName) if self.bossDamageMovie: if self.bossDamage >= self.bossMaxDamage:", "0, -0.5, 0, 0, -1.5, 0.6) prosecutionTube.setTangible(1) prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol'))", "time=self.battleThreeMusicTime) def __continueVictory(self): self.notify.debug('----- __continueVictory') self.stopAnimate() self.doneBarrier('Victory') def exitVictory(self): self.notify.debug('-----", "insidesA = CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0, -2.0, 5.0), Point3(-4.0, -2.0,", "self.uniqueName('Drop') self.clearInterval(intervalName) self.cleanupBattles() self.battleTwoMusic.stop() localAvatar.inventory.setBattleCreditMultiplier(1) if self.juryTimer: self.juryTimer.destroy() del self.juryTimer", "toon.getName()) toon.reparentTo(cannon.nodePath) toon.setPos(0, 8, 0) toon.setH(180) renderPos = toon.getPos(render) self.notify.debug('renderPos", "__init__(self, cr): self.notify.debug('----- __init___') DistributedBossCog.DistributedBossCog.__init__(self, cr) FSM.FSM.__init__(self, 'DistributedLawbotBoss') self.lawyers =", "self.geom.attachNewNode(planeNode) self.door3 = self.geom.find('**/SlidingDoor1/') if self.door3.isEmpty(): self.door3 = self.geom.find('**/interior/CR3_Door') self.mainDoor", "self.juryTimer if self.bonusTimer: self.bonusTimer.destroy() del self.bonusTimer localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() if OneBossCog ==", "%s' % negBeamLocatorPos) self.beamNodePath.setPos(beamLocatorPos) self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale) self.scaleNodePath.wrtReparentTo(self.geom) self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol') oldBitMask", "= Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)", "taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.setDizzy(0) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterVictory(self): self.notify.debug('-----", "retTrack = Parallel(bossTrack, ActorInterval(self, 'Ff_speech', loop=1)) return bossTrack def makeEpilogueMovie(self):", "tris.addVertex(7) tris.closePrimitive() tris.addVertex(0) tris.addVertex(4) tris.addVertex(5) tris.closePrimitive() tris.addVertex(1) tris.addVertex(0) tris.addVertex(5) tris.closePrimitive()", "taskName) self.makeScaleReflectDamage() self.bossHealthBar.update(self.bossMaxDamage - bossDamage, self.bossMaxDamage) def getBossDamage(self): self.notify.debug('----- getBossDamage')", "self.juryTimer: self.juryTimer.destroy() del self.juryTimer if self.bonusTimer: self.bonusTimer.destroy() del self.bonusTimer localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu()", "self.notify.debug('----- __walkToonToPromotion') toon = base.cr.doId2do.get(toonId) if toon: destPos = toon.getPos()", "exitIntroduction(self): self.notify.debug('----- exitIntroduction') DistributedBossCog.DistributedBossCog.exitIntroduction(self) self.promotionMusic.stop() if not self.mainDoor.isEmpty(): pass if", "str(ToontownGlobals.PieCodeProsecutionPan)) standGeom = self.createBlock(0.25, 0.25, 0, -0.25, -0.25, 3) self.standNodePath", "shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask) shieldNodePath = self.pelvis.attachNewNode(shieldNode) disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide') disk.find('**/+CollisionNode').setName('BossZap')", "node.hprInterval(1, VBase3(h, 0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)), Func(node.detachNode)))", "startPos=Point3(-22, -90, 35), startHpr=Point3(-10, -13, 0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute,", "origin) panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath, origin) self.notify.debug('prosecutionPanPos = %s' % prosecutionPanPos)", "self.cannonIndex = cannonIndex def numJurorsSeatedByCannon(self, cannonIndex): retVal = 0 for", "del self.rewardPanel self.battleThreeMusicTime = 0 self.battleThreeMusic.stop() def enterEpilogue(self): self.cleanupIntervals() self.clearChat()", "self.forward = 1 self.doAnimate() self.accept('enterWitnessStand', self.__touchedWitnessStand) self.accept('pieSplat', self.__pieSplat) self.accept('localPieSplat', self.__localPieSplat)", "= base.config.GetBool('want-injustice-scale-debug', 0) if self.useProgrammerScale: self.loadScaleOld() else: self.loadScaleNew() def __debugScale(self):", "pass def enterBattleOne(self): self.notify.debug('----- LawbotBoss.enterBattleOne ') DistributedBossCog.DistributedBossCog.enterBattleOne(self) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.clearChat()", "__onToBattleTwo(self, elapsedTime = 0): self.notify.debug('----- __onToBattleTwo') self.doneBarrier('PrepareBattleTwo') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage'))", "pos) self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint result = %s' % pos) self.notify.debug('walkToonsToBattlePosition: final", "def exitRollToBattleThree(self): self.notify.debug('----- exitRollToBattleThree') self.unstickBoss() intervalName = 'RollToBattleThree' self.clearInterval(intervalName) self.betweenBattleMusic.stop()", "None self.attackAvId = 0 self.recoverRate = 0 self.recoverStartTime = 0", "tris.addVertex(7) tris.addVertex(5) tris.addVertex(4) tris.closePrimitive() cubeGeom = Geom(myVertexData) cubeGeom.addPrimitive(tris) cubeGN =", "if self.witnessToonOnstage: self.witnessToon.removeActive() self.witnessToon.detachNode() self.witnessToonOnstage = 0 def __hideToons(self): for", "d_hitDefensePan') self.sendUpdate('hitDefensePan', []) def d_hitProsecutionPan(self): self.notify.debug('----- d_hitProsecutionPan') self.sendUpdate('hitProsecutionPan', []) def", "self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: if index in self.cannons:", "self.witnessToon.removeActive() def enterIntroduction(self): self.notify.debug('----- enterIntroduction') self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.stopAnimate() self.__hideWitnessToon() DistributedBossCog.DistributedBossCog.enterIntroduction(self)", "doId = int(avatarDoId) if doId != localAvatar.doId: self.d_hitToon(doId) def __lawyerGotHit(self,", "attackCode tag.' % repr(entry.getIntoNodePath())) return attackCode = int(attackCodeStr) into =", "-10) def loadJuryBox(self): self.juryBox = self.geom.find('**/JuryBox') juryBoxPos = self.juryBox.getPos() newPos", "if not self.reflectedJuryBox.isEmpty(): if self.debugPositions: self.reflectedJuryBox.show() self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) def", "Sequence(Func(self.witnessToon.animFSM.request, 'neutral'), Func(self.witnessToon.setLocalPageChat, epSpeech, 0)) return bossTrack def makeDefeatMovie(self): bossTrack", "0)) rollTrackDuration = rollTrack.getDuration() self.notify.debug('rollTrackDuration = %f' % rollTrackDuration) doorStartPos", "import ToontownTimer OneBossCog = None class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM): notify =", "= Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA) p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosA) battlePos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2])", "'EpilogueMovie' seq = Sequence(self.makeEpilogueMovie(), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.accept('doneChatPage', self.__doneEpilogue)", "duration=3), ActorInterval(self, 'Ff_lookRt', duration=3, startTime=3, endTime=0), ActorInterval(self, 'Ff_neutral', duration=2), ActorInterval(self,", "camera.setPos(self.witnessToon, -9, 12, 6) camera.lookAt(self.witnessToon, 0, 0, 3) intervalName =", "None, 0) bossTrack.append(track) track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr,", "self.loop('Ff_neutral') self.notify.debug('self.battleANode = %s' % self.battleANode) self.__hideWitnessToon() if self.battleA ==", "import Toon from toontown.battle import BattleBase from direct.directutil import Mopath", "< 0: bonusWeight = 0 newWeight = defaultWeight + bonusWeight", "self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleThree(self): self.notify.debug('----- exitPrepareBattleThree') self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore('doneChatPage') intervalName", "= 0 self.raised = 0 self.forward = 1 self.doAnimate() self.setDizzy(1)", "'Ff_neutral'), Func(self.setChatAbsolute, attackToons, CFSpeech)))) track.append(dialogTrack) return Sequence( Func(self.stickToonsToFloor), track, Func(self.unstickToons),", "isOpen: self.insidesANodePath.unstash() else: self.insidesANodePath.stash() def doorBCallback(self, isOpen): if self.insidesBNodePath: if", "d_hitBossInsides') self.sendUpdate('hitBossInsides', []) def d_hitDefensePan(self): self.notify.debug('----- d_hitDefensePan') self.sendUpdate('hitDefensePan', []) def", "self.targetNodePath.detachNode() self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = None self.betweenBattleMusic.stop() self.promotionMusic.stop() self.stingMusic.stop() self.battleTwoMusic.stop() self.battleThreeMusic.stop()", "-13, 0), startPos=Point3(-22, -90, 35), startHpr=Point3(-10, -13, 0), blendType='easeInOut') chatTrack", "if lastPlane == None or plane.compareTo(lastPlane, threshold) != 0: cp", "doorEndPos = Point3(doorStartPos[0], doorStartPos[1], doorStartPos[2] + 25) bossTrack = Track(", "= seq seq.start() self.storeInterval(seq, intervalName) def saySomething(self, chatString): intervalName =", "Wait(0.7), gearTrack, Func(door.request, 'close')) self.__cleanupStrafe() self.strafeInterval = seq seq.start() def", "cn.getSolid(i) if isinstance(solid, CollisionPolygon): plane = Plane(solid.getPlane()) planes.append(plane) else: self.notify.warning('Unexpected", "pos self.notify.debug('origPos = %s' % origPos) self.notify.debug('batlleNode.getTransform = %s render.getTransform=%s'", "Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0, 0, 1, 1)), self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1, 1)),", "self.insidesANodePath.stash() insidesB = CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0, 2.0, 5.0), Point3(4.0,", "__onToBattleThree(self, elapsed): self.notify.debug('----- __onToBattleThree') self.doneBarrier('PrepareBattleThree') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleThree(self):", "self.cr.doId2do.get(toonId) if toon: toon.stopLookAround() toon.stopSmooth() if self.hasLocalToon(): self.toMovieMode() for toonId", "DistributedBossCog.DistributedBossCog.enterBattleThree(self) self.scaleNodePath.unstash() localAvatar.setPos(-3, 0, 0) base.localAvatar.orbitalCamera.start() self.clearChat() self.witnessToon.clearChat() self.reparentTo(render) self.happy", "= None self.witnessToonOnstage = False self.numToonJurorsSeated = 0 self.mainDoor =", "str(ToontownGlobals.BossCogStrafeAttack)) gearModel = self.getGearFrisbee() gearModel.setScale(0.1) t = self.getBossDamage() / 100.0", "exitNearVictory(self): self.notify.debug('----- exitNearVictory') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.setDizzy(0)", "toon, pieCode): if pieCode != ToontownGlobals.PieCodeDefensePan: return self.sendUpdate('finalPieSplat', []) self.ignore('pieSplat')", "Sequence(name=intervalName) seq.append(Func(self.setChatAbsolute, chatString, CFSpeech)) seq.append(Wait(4.0)) seq.append(Func(self.clearChat)) oldSeq = self.activeIntervals.get(intervalName) if", "% prosecutionLocPos) self.prosecutionPanNodePath.setPos(prosecutionLocPos) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) self.beamLocator = self.scaleNodePath.find('**/StandLocator1') beamLocatorBounds = self.beamLocator.getBounds()", "node.hide() node.setPos(0, 0, 0) gear = gearModel.instanceTo(node) angle = (float(i)", "self.bossDamageMovie.finish() self.bossDamageMovie = None self.unstickBoss() taskName = 'RecoverBossDamage' taskMgr.remove(taskName) self.battleThreeMusicTime", "planes.append(plane) else: self.notify.warning('Unexpected collision solid: %s' % repr(solid)) newCollisionNode.addSolid(plane) newCollisionNode.setIntoCollideMask(newCollideMask)", "self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat() self.reparentTo(render) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) self.__showWitnessToon() prepareBattleThreeMovie =", "self.juryTimer.destroy() del self.juryTimer self.juryTimer = None for chair in self.chairs.values():", "%d' % diff) self.battleDifficulty = diff def toonEnteredCannon(self, toonId, cannonIndex):", "base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.betweenBattleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg') self.battleTwoMusic = base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg') floor = self.geom.find('**/MidVaultFloor1')", "self.mainDoor.stash() if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.stash() def exitIntroduction(self): self.notify.debug('----- exitIntroduction') DistributedBossCog.DistributedBossCog.exitIntroduction(self)", "self.notify.debug('----- enterIntroduction') self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.stopAnimate() self.__hideWitnessToon() DistributedBossCog.DistributedBossCog.enterIntroduction(self) base.playMusic(self.promotionMusic, looping=1, volume=0.9)", "x1, y1, z1, x2, y2, z2, r = 1.0, g", "Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2]) if self.arenaSide: topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosB) topRampTurnPos =", "self.notify.debug('----- exitRollToBattleTwo') self.unstickBoss() intervalName = 'RollToBattleTwo' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleTwo(self):", "pieCode != ToontownGlobals.PieCodeToon: return avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId') if avatarDoId ==", "toPos=myToPos, toHpr=None, reverse=0) rollTrack = Sequence( Func(self.getGeomNode().setH, 180), rollThroughDoor[0], Func(self.getGeomNode().setH,", "self.toonUpSfx = None self.bonusTimer = None self.warningSfx = None self.juryMovesSfx", "tris.addVertex(3) tris.addVertex(7) tris.addVertex(6) tris.closePrimitive() tris.addVertex(0) tris.addVertex(2) tris.addVertex(4) tris.closePrimitive() tris.addVertex(2) tris.addVertex(6)", "volume=1.0)) self.juryBoxIval.start() self.juryTimer = ToontownTimer.ToontownTimer() self.juryTimer.posInTopRightCorner() self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime) def exitBattleTwo(self): self.notify.debug('-----", "= GeomVertexWriter(myVertexData, 'color') texWriter = GeomVertexWriter(myVertexData, 'texcoord') vertexWriter.addData3f(x1, y1, z1)", "self.__makePrepareBattleThreeMovie() self.acceptOnce('doneChatPage', self.__onToBattleThree) intervalName = 'prepareBattleThree' seq = Sequence(prepareBattleThreeMovie, name=intervalName)", "attackCodeStr == '': self.notify.warning('Node %s has no attackCode tag.' %", "npc self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr) def __cleanupWitnessToon(self): self.__hideWitnessToon() if self.witnessToon: self.witnessToon.removeActive() self.witnessToon.delete() self.witnessToon", "self.recoverRate = recoverRate self.recoverStartTime = recoverStartTime taskName = 'RecoverBossDamage' taskMgr.remove(taskName)", "% cannonPos) if toon: self.notify.debug('toon = %s' % toon.getName()) toon.reparentTo(cannon.nodePath)", "self.reflectedMainDoor = self.geom.find('**/interiorrefl/CR3_Door') if not self.reflectedMainDoor.isEmpty(): itemsToHide = ['Reflections/Door_1'] for", "import SuitDNA from toontown.toon import Toon from toontown.battle import BattleBase", "bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos, startHpr, bottomPos, None, 1)", "have a toon =%d' % (index, toonId)) allCannonsAppear.append(multiCannons) intervalName =", "seq = Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.bossHealthBar.deinitialize() base.playMusic(self.battleThreeMusic,", "node: %s' % repr(cnp)) break newCollideMask = newCollideMask | cn.getIntoCollideMask()", "%s' % repr(solid)) newCollisionNode.addSolid(plane) newCollisionNode.setIntoCollideMask(newCollideMask) threshold = 0.1 planes.sort(lambda p1,", "def exitBattleTwo(self): self.notify.debug('----- exitBattleTwo') intervalName = self.uniqueName('Drop') self.clearInterval(intervalName) self.cleanupBattles() self.battleTwoMusic.stop()", "self.notify.debug('----- enterFrolic') self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) DistributedBossCog.DistributedBossCog.enterFrolic(self) self.show() def doorACallback(self, isOpen): if self.insidesANodePath:", "else: self.notify.debug('not found %s' % stuffToHide) self.reflectedMainDoor = self.geom.find('**/interiorrefl/CR3_Door') if", "planes.sort(lambda p1, p2: p1.compareTo(p2, threshold)) lastPlane = None for plane", "self.reparentTo(render) self.happy = 1 self.raised = 1 self.forward = 1", "(self.numJurorsLocalToonSeated, self.bonusWeight) trialSpeech += '\\x07' trialSpeech += weightBonusText self.witnessToon.setLocalPageChat(trialSpeech, 0)", "= 'RollToBattleTwo' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleTwo(self): self.notify.debug('----- enterPrepareBattleTwo') self.cleanupIntervals() self.controlToons()", "' % toon.getPos()) def touchedGavel(self, gavel, entry): self.notify.debug('touchedGavel') attackCodeStr =", "__debugScale(self): prosecutionPanPos = self.prosecutionPanNodePath.getPos() origin = Point3(0, 0, 0) prosecutionPanRelPos", "self.unstashBoss, 'unstashBoss')), name=self.uniqueName('BattleTwoMovie')) def __makeRollToBattleThreeMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2])", "1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.35), scale=0.1) return", "self.loadScaleOld() else: self.loadScaleNew() def __debugScale(self): prosecutionPanPos = self.prosecutionPanNodePath.getPos() origin =", "= %s' % toon.getName()) toon.reparentTo(cannon.nodePath) toon.setPos(0, 8, 0) toon.setH(180) renderPos", "self.notify.debug('renderPos =%s' % renderPos) index += 1 self.notify.debug('done with positionToons')", "def numJurorsSeatedByCannon(self, cannonIndex): retVal = 0 for chair in self.chairs.values():", "deathPos = Point3(*ToontownGlobals.LawbotBossDeathPos) self.setPosHpr(startPos, startHpr) bossTrack = Sequence() bossTrack.append(Func(self.loop, 'Ff_neutral'))", "self.strafeInterval = None return def __cleanupJuryBox(self): self.notify.debug('----- __cleanupJuryBox') if self.juryBoxIval:", "startTime=3, endTime=0), ActorInterval(self, 'Ff_neutral', duration=2), ActorInterval(self, 'Ff_speech', duration=7, loop=1)) track.append(bossAnimTrack)", "% self.numToonJurorsSeated juryResult += '\\x07' trialSpeech = juryResult trialSpeech +=", "| ToontownGlobals.CameraBitmask) shieldNodePath = self.pelvis.attachNewNode(shieldNode) disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide') disk.find('**/+CollisionNode').setName('BossZap') disk.reparentTo(self.pelvis)", "== localAvatar: self.d_hitBoss(1) if self.dizzy: self.flashRed() self.doAnimate('hit', now=1) elif pieCode", "self.setState('NearVictory') self.unstickBoss() def exitBattleThree(self): self.notify.debug('----- exitBattleThree') DistributedBossCog.DistributedBossCog.exitBattleThree(self) NametagGlobals.setMasterArrowsOn(1) bossDoneEventName =", "= self.geom.find('**/Podium_Geo1_Refl') reflectedZ = self.reflectedPodium.getZ() if not self.debugPositions: self.reflectedPodium.setZ(reflectedZ) if", "'walk'), toon.posInterval(1, Point3(0, 90, 20)), ParallelEndTogether(MopathInterval(mopath, toon), toon.posInterval(2, destPos, blendType='noBlend')),", "cannon = self.cannons[index] cannonSeq = cannon.generateCannonAppearTrack(toon) multiCannons.append(cannonSeq) index += 1", "self.bossHealthBar.deinitialize() base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueVictory(self): self.notify.debug('----- __continueVictory') self.stopAnimate()", "delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward')) ival.delayDeletes = delayDeletes ival.start() self.storeInterval(ival, intervalName) base.playMusic(self.battleThreeMusic, looping=1,", "1, 4, 0, 1, 7, 3.5) shieldNode = CollisionNode('BossZap') shieldNode.addSolid(shield)", "self.beamNodePath.setP(tilt) if self.useProgrammerScale: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) else: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt) def stashBaseCol(self):", "exitRollToBattleTwo(self): self.notify.debug('----- exitRollToBattleTwo') self.unstickBoss() intervalName = 'RollToBattleTwo' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def", "not self.reflectedPodium.isEmpty(): if self.debugPositions: self.reflectedPodium.show() def loadCannons(self): pass def loadWitnessStand(self):", "self.insidesANodePath = self.axle.attachNewNode(insidesANode) self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesANodePath.stash() insidesB = CollisionPolygon(Point3(-4.0, 2.0,", "0), blendType='easeInOut') chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos,", "= Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __onToBattleTwo(self, elapsedTime", "self.d_hitBossInsides() self.flashRed() elif pieCode == ToontownGlobals.PieCodeBossCog: if toon == localAvatar:", "looping=1, volume=0.9) self.__showWitnessToon() prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie() self.acceptOnce('doneChatPage', self.__onToBattleThree) intervalName =", "len(self.involvedToons) center = (numToons - 1) / 2.0 for i", "self.onscreenMessage = None self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage self.elevatorType = ElevatorConstants.ELEVATOR_CJ self.gavels", "= self.reflectedJuryBox.getPos() newReflectedPos = reflectedJuryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions:", "= base.cr.doId2do.get(toonId) if toon: base.playSfx(self.toonUpSfx, node=toon) def hideBonusTimer(self): if self.bonusTimer:", "'neutral'))) return ival def toonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------') self.notify.debug('toonIds=%s battleNode=%s'", "self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg') self.evidenceHitSfx = loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg') self.warningSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg') self.juryMovesSfx", "import Task import random import math from toontown.coghq import CogDisguiseGlobals", "'RewardMovie' delayDeletes = [] for toonId in self.involvedToons: toon =", "self.baseHighCol.setCollideMask(newBitMask) self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol') self.defenseHighCol.stash() self.defenseHighCol.setCollideMask(newBitMask) self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision') self.baseSideCol", "self.sendUpdate('hitBoss', [bossDamage]) def d_healBoss(self, bossHeal): self.notify.debug('----- d_bossHeal') self.sendUpdate('healBoss', [bossHeal]) def", "25) bossTrack = Track( (0.5, Sequence( Func(self.clearChat), Func(camera.reparentTo, render), Func(camera.setPos,", "self.createBlock(0.25, 0.25, 0, -0.25, -0.25, 3) self.standNodePath = NodePath('scaleStand') self.standNodePath.attachNewNode(standGeom)", "self.stash() self.stopAnimate() self.controlToons() self.__showWitnessToon() self.witnessToon.reparentTo(render) self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr) self.witnessToon.loop('Sit') self.__arrangeToonsAroundWitnessToon() camera.reparentTo(render) camera.setPos(self.witnessToon,", "= None self.juryMovesSfx = None self.baseColStashed = False self.battleDifficulty =", "90, 20)), ParallelEndTogether(MopathInterval(mopath, toon), toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral')) track.append(ival)", "= 0 newWeight = 1 cannonIndex = self.cannonIndex numJurors =", "self.door3.posInterval(2, doorEndPos, startPos=doorStartPos))), (13.1, Sequence(self.door3.posInterval(1, doorStartPos)))) retTrack = Parallel(bossTrack, ActorInterval(self,", "tris.addVertex(2) tris.closePrimitive() tris.addVertex(1) tris.addVertex(3) tris.addVertex(2) tris.closePrimitive() tris.addVertex(2) tris.addVertex(3) tris.addVertex(6) tris.closePrimitive()", "len(self.involvedToons): toonId = self.involvedToons[extraInfo] toon = base.cr.doId2do.get(toonId) if toon: chatString", "tris.addVertex(3) tris.addVertex(2) tris.closePrimitive() tris.addVertex(2) tris.addVertex(3) tris.addVertex(6) tris.closePrimitive() tris.addVertex(3) tris.addVertex(7) tris.addVertex(6)", "self.getPos() self.notify.debug('myCurPos = %s' % self.getPos()) self.notify.debug('battleNode.parent() = %s' %", "= self.axle.attachNewNode(insidesBNode) self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesBNodePath.stash() target = CollisionTube(0, -1, 4,", "str(ToontownGlobals.PieCodeNotBossCog)) insidesA = CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0, -2.0, 5.0), Point3(-4.0,", "self.elevatorEntrance = self.geom.find('**/elevator_origin') self.elevatorEntrance.getChildren().detach() self.elevatorEntrance.setScale(1) elevatorModel = loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator') elevatorModel.reparentTo(self.elevatorEntrance) self.setupElevator(elevatorModel)", "self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.hide() def __showToons(self): for", "self.__outOfPies) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.happy = 0 self.raised = 0 self.forward =", "'\\x07' trialSpeech += weightBonusText self.witnessToon.setLocalPageChat(trialSpeech, 0) def __makePrepareBattleThreeMovie(self): movie =", "BattleBase.BattleBase.toonPoints[len(toonIds) - 1] else: points = list(BattleBase.BattleBase.toonPoints[3]) points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) - 5])", "not self.debugPositions: self.reflectedJuryBox.setPos(newReflectedPos) if not self.reflectedJuryBox.isEmpty(): if self.debugPositions: self.reflectedJuryBox.show() self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ()", "index in xrange(len(self.cannons)): cannon = self.cannons[index] cannon.cannon.show() def getChairParent(self): return", "(24, Sequence( Func(self.clearChat), self.loseCogSuits(self.toonsA + self.toonsB, render, (-2.798, -70, 10,", "self.bonusTimer: self.bonusTimer = ToontownTimer.ToontownTimer() self.bonusTimer.posInTopRightCorner() self.bonusTimer.show() self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer) def setAttackCode(self,", "exitBattleTwo') intervalName = self.uniqueName('Drop') self.clearInterval(intervalName) self.cleanupBattles() self.battleTwoMusic.stop() localAvatar.inventory.setBattleCreditMultiplier(1) if self.juryTimer:", "* ToontownGlobals.LawbotBossWinningTilt else: percentDamaged = diffDamage / (ToontownGlobals.LawbotBossInitialDamage - 0)", "self.clearChat() self.releaseToons(finalBattle=1) self.happy = 0 self.raised = 0 self.forward =", "0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)), Func(node.detachNode))) seq =", "for i in xrange(len(self.lawyers)): suit = self.lawyers[i] suit.fsm.request('neutral') suit.loop('neutral') suit.setBossCogId(self.doId)", "collList = [model] for cnp in collList: cn = cnp.node()", "entry): self.notify.debug('touchedGavel') attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr == '': self.notify.warning('Node", "myToPos = Point3(myFromPos[0], myFromPos[1] + 30, myFromPos[2]) rollThroughDoor = self.rollBossToPoint(fromPos=myFromPos,", "self.notify.debug('cannonPos = %s' % cannonPos) if toon: self.notify.debug('toon = %s'", "rate), Func(node.show), Parallel(node.posInterval(1, Point3(x, y, 0), fluid=1), node.hprInterval(1, VBase3(h, 0,", "vertexWriter.addData3f(x1, y2, z2) vertexWriter.addData3f(x2, y2, z2) for index in xrange(8):", "locatorRenderPos = render.getRelativePoint(self.prosecutionLocator, origin) self.notify.debug('prosecutionLocatorPos = %s ' % prosecutionLocatorPos)", "endTime=0), ActorInterval(self, 'Ff_neutral', duration=2), ActorInterval(self, 'Ff_speech', duration=7, loop=1)) track.append(bossAnimTrack) attackToons", "if not self.useCannons: self.toonsToBattlePosition(self.toonsA, self.battleANode) self.toonsToBattlePosition(self.toonsB, self.battleBNode) base.playMusic(self.battleTwoMusic, looping=1, volume=0.9)", "= 'EpilogueMovie' seq = Sequence(self.makeEpilogueMovie(), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.accept('doneChatPage',", "list(BattleBase.BattleBase.toonPoints[3]) points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) - 5]) self.notify.debug('toonsToBattlePosition: points = %s' % points[0][0])", "tris.addVertex(4) tris.closePrimitive() tris.addVertex(1) tris.addVertex(5) tris.addVertex(3) tris.closePrimitive() tris.addVertex(3) tris.addVertex(5) tris.addVertex(7) tris.closePrimitive()", "self.bossDamageToMovie) if self.recoverRate: taskMgr.add(self.__recoverBossDamage, taskName) self.makeScaleReflectDamage() self.bossHealthBar.update(self.bossMaxDamage - bossDamage, self.bossMaxDamage)", "toon.posInterval(1, Point3(0, 90, 20)), ParallelEndTogether(MopathInterval(mopath, toon), toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop,", "self.saySomething(chatString) def toonGotHealed(self, toonId): toon = base.cr.doId2do.get(toonId) if toon: base.playSfx(self.toonUpSfx,", "= cannon.generateCannonAppearTrack(toon) multiCannons.append(cannonSeq) index += 1 else: self.notify.warning('No cannon %d", "else: self.insidesANodePath.stash() def doorBCallback(self, isOpen): if self.insidesBNodePath: if isOpen: self.insidesBNodePath.unstash()", "node.posInterval(time, toPos)) def __makeRollToBattleTwoMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2]) if", "%s' % stuffToHide) stuffToHide.wrtReparentTo(self.mainDoor) else: self.notify.debug('not found %s' % stuffToHide)", "!= 0: cp = CollisionPlane(plane) newCollisionNode.addSolid(cp) lastPlane = plane return", "toonsToBattlePosition(self, toonIds, battleNode): self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode)) if", "tris.addVertex(0) tris.addVertex(2) tris.addVertex(4) tris.closePrimitive() tris.addVertex(2) tris.addVertex(6) tris.addVertex(4) tris.closePrimitive() tris.addVertex(1) tris.addVertex(5)", "import * from panda3d.core import * from libotp import *", "= %s' % prosecutionPanRelPos) self.notify.debug('panRenderPos = %s' % panRenderPos) prosecutionLocatorPos", "task): self.notify.debug('----- __showWaitingMessage') self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors) def loadEnvironment(self): self.notify.debug('----- loadEnvironment') DistributedBossCog.DistributedBossCog.loadEnvironment(self) self.geom", "= Sequence() bossTrack.append(Func(self.loop, 'Ff_neutral')) track, hpr = self.rollBossToPoint(startPos, startHpr, bottomPos,", "Func(self.getGeomNode().setH, 0)) rollTrackDuration = rollTrack.getDuration() self.notify.debug('rollTrackDuration = %f' % rollTrackDuration)", "visible.') OneBossCog = self return def disable(self): global OneBossCog self.notify.debug('-----", "ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battlePos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3],", "toonIds, battleNode): self.notify.debug('walkToonsToBattlePosition-----------------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode)) ival =", "= self.reflectedJuryBox.getPos() reflectedEndingAbsPos = Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1],", "self.numToonJurorsSeated = 0 self.mainDoor = None self.reflectedMainDoor = None self.panFlashInterval", "self.insidesANodePath = None self.insidesBNodePath = None self.strafeInterval = None self.onscreenMessage", "elapsed): self.notify.debug('----- __onToBattleThree') self.doneBarrier('PrepareBattleThree') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleThree(self): self.notify.debug('-----", "if pieCode == ToontownGlobals.PieCodeLawyer: self.__lawyerGotHit(entry) if pieCode != ToontownGlobals.PieCodeToon: return", "render))), (9.6, Parallel( rollTrack, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3, CFSpeech), self.door3.posInterval(2, doorEndPos, startPos=doorStartPos))),", "self.battleDifficulty = diff def toonEnteredCannon(self, toonId, cannonIndex): if base.localAvatar.doId ==", "lawyerDoId == lawyer.doId: lawyer.sendUpdate('hitByToon', []) def __finalPieSplat(self, toon, pieCode): if", "None self.reflectedMainDoor = None self.panFlashInterval = None self.panDamage = ToontownGlobals.LawbotBossDefensePanDamage", "self.clearChat() self.reparentTo(render) self.__showWitnessToon() prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie() intervalName = 'prepareBattleTwo' seq", "(numToons - 1) / 2.0 for i in xrange(numToons): toon", "i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.reparentTo(render) pos,", "None return def __showWitnessToon(self): if not self.witnessToonOnstage: self.witnessToon.addActive() self.witnessToon.reparentTo(self.geom) seatCenter", "def __howToThrowPies(self, task): self.notify.debug('----- __howToThrowPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies) def __foundPieButton(self): self.everThrownPie =", "prosecutionCollNode.addSolid(prosecutionTube) self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode) self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) standGeom = self.createBlock(0.25, 0.25,", "self.uniqueName('PieAdvice')) def __howToGetPies(self, task): self.notify.debug('----- __howToGetPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence) def __howToThrowPies(self, task):", "in xrange(cn.getNumSolids()): solid = cn.getSolid(i) if isinstance(solid, CollisionPolygon): plane =", "+ 2.0) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent)", "if pieCode != ToontownGlobals.PieCodeToon: return avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId') if avatarDoId", "cleanupPanFlash(self): if self.panFlashInterval: self.panFlashInterval.finish() self.panFlashInterval = None return def flashPanBlue(self):", "if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward')) ival.delayDeletes = delayDeletes ival.start() self.storeInterval(ival, intervalName)", "def enterBattleTwo(self): self.notify.debug('----- enterBattleTwo') self.cleanupIntervals() mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2) localAvatar.inventory.setBattleCreditMultiplier(mult) self.reparentTo(render)", "= Point3(self.podium.getX(), self.podium.getY(), self.podium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) finalReflectedPodiumPos = Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(),", "def touchedGavel(self, gavel, entry): self.notify.debug('touchedGavel') attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr", "= self.door3.getPos() doorEndPos = Point3(doorStartPos[0], doorStartPos[1], doorStartPos[2] + 25) bossTrack", "if toon: chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] % toon.getName() else: chatString =", "NodePath('scaleBeam') self.beamNodePath.attachNewNode(beamGeom) self.beamNodePath.setPos(0, 0, 3) self.beamNodePath.reparentTo(self.scaleNodePath) defensePanGeom = self.createBlock(0.5, 0.5,", "elif self.numToonJurorsSeated == 1: juryResult = TTLocalizer.WitnessToonOneJuror elif self.numToonJurorsSeated ==", "localAvatar: self.d_hitBoss(1) if self.dizzy: self.flashRed() self.doAnimate('hit', now=1) elif pieCode ==", "& ~ToontownGlobals.PieBitmask newBitMask = newBitMask & ~ToontownGlobals.CameraBitmask self.baseHighCol.setCollideMask(newBitMask) self.defenseHighCol =", "intervalName = 'RollToBattleThree' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleThree(self): self.notify.debug('----- enterPrepareBattleThree') self.cleanupIntervals()", "hasattr(self, 'nametag'): self.notify.warning('returning from setTaunt, no attr nametag') gotError =", "pos = %s' % pos) ival.append(Sequence(Func(toon.setPlayRate, 0.8, 'walk'), Func(toon.loop, 'walk'),", "self.notify.debug('----- d_bossHeal') self.sendUpdate('healBoss', [bossHeal]) def d_hitBossInsides(self): self.notify.debug('----- d_hitBossInsides') self.sendUpdate('hitBossInsides', [])", "self.prosecutionLocator.getPos() prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin) locatorRenderPos = render.getRelativePoint(self.prosecutionLocator, origin) self.notify.debug('prosecutionLocatorPos", "= VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5]) bossTrack = Sequence() myInterval = camera.posHprInterval(8,", "loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3') self.geom.setPos(0, 0, -71.601) self.geom.setScale(1) self.elevatorEntrance = self.geom.find('**/elevator_origin') self.elevatorEntrance.getChildren().detach() self.elevatorEntrance.setScale(1)", "diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage, self.bossMaxDamage) if diffSettings[4]: localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu()", "= Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack, Func(door.request, 'close')) self.__cleanupStrafe() self.strafeInterval =", "enterFrolic') self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) DistributedBossCog.DistributedBossCog.enterFrolic(self) self.show() def doorACallback(self, isOpen): if self.insidesANodePath: if", "toon = self.cr.doId2do.get(toonId) if toon: toon.stopLookAround() toon.stopSmooth() if self.hasLocalToon(): self.toMovieMode()", "0), fluid=1), node.hprInterval(1, VBase3(h, 0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self),", "startingTilt = 0 self.scaleNodePath = NodePath('injusticeScale') beamGeom = self.createBlock(0.25, 2,", "Point3(*ToontownGlobals.LawbotBossP3PosA) battlePos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battleHpr = VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4],", "toon = base.cr.doId2do.get(toonIds[i]) if toon: pos, h = points[i] origPos", "= Track( (0.5, Sequence( Func(self.clearChat), Func(camera.reparentTo, render), Func(camera.setPos, -3, 45,", "lastPlane == None or plane.compareTo(lastPlane, threshold) != 0: cp =", "self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom) self.prosecutionPanNodePath.setPos(0, 2, 0) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) prosecutionTube = CollisionTube(0, 0, -0.5,", "self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------') self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode)) if len(toonIds) < 5:", "juryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.juryBox.setPos(newPos) self.reflectedJuryBox = self.geom.find('**/JuryBox_Geo_Reflect')", "= TTLocalizer.LawbotBossTaunts[1] if tauntIndex == 0: if extraInfo < len(self.involvedToons):", "None for chair in self.chairs.values(): chair.stopCogsFlying() return def enterRollToBattleThree(self): self.notify.debug('-----", "battleNode): self.notify.debug('----- __toonsToPromotionPosition') points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] for i", "gotError = False if not hasattr(self, 'state'): self.notify.warning('returning from setTaunt,", "self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr) self.witnessToon.loop('Sit') self.__arrangeToonsAroundWitnessToon() camera.reparentTo(render) camera.setPos(self.witnessToon, -9, 12, 6) camera.lookAt(self.witnessToon, 0,", "0, -0.5, -0.5, -2, 0, 0, 1.0, 0.25) self.defensePanNodePath =", "5: points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] else: points = list(BattleBase.BattleBase.toonPoints[3])", "Func(camera.reparentTo, render), Func(camera.setPos, -3, 45, 25), Func(camera.setHpr, 0, 10, 0))),", "= 50 rate = time / numGears for i in", "juryWeightBonus: weightBonusText = juryWeightBonus % (self.numJurorsLocalToonSeated, self.bonusWeight) trialSpeech += '\\x07'", "toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.hide() def", "ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0,", "def __gotLawyers(self, lawyers): self.lawyerRequest = None self.lawyers = lawyers for", "ActorInterval(self, 'Ff_lookRt', duration=3), ActorInterval(self, 'Ff_lookRt', duration=3, startTime=3, endTime=0), ActorInterval(self, 'Ff_neutral',", "if bonusWeight < 0: bonusWeight = 0 newWeight = defaultWeight", "= Point3(doorStartPos[0], doorStartPos[1], doorStartPos[2] + 25) bossTrack = Track( (0.5,", "tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt self.setScaleTilt(tilt) if self.bossDamage < ToontownGlobals.LawbotBossMaxDamage", "prosecutionTube.setTangible(1) prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol')) prosecutionCollNode.addSolid(prosecutionTube) self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode) self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan))", "OneBossCog = None class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss') debugPositions", "Func(camera.setHpr, 0, 0, 0)] seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam, CFSpeech)) seq.append(Wait(5.0)) seq.append(Func(self.clearChat)) return", "beamGeom = self.createBlock(0.25, 2, 0.125, -0.25, -2, -0.125, 0, 1.0,", "juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty) else: juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty) if juryWeightBonus: weightBonusText", "1 self.notify.debug('self.numToonJurorsSeated = %d' % self.numToonJurorsSeated) return def cleanupPanFlash(self): if", "self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.clearChat() self.witnessToon.clearChat() self.releaseToons(finalBattle=1) self.__showWitnessToon() if not self.useCannons: self.toonsToBattlePosition(self.toonsA,", "{} self.chairs = {} self.cannons = {} self.useCannons = 1", "toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward'))", "def announceGenerate(self): global OneBossCog self.notify.debug('----- announceGenerate') DistributedBossCog.DistributedBossCog.announceGenerate(self) self.setName(TTLocalizer.LawbotBossName) nameInfo =", "self.bossDamage, self.bossMaxDamage) if diffSettings[4]: localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight) def __doneBattleThree(self): self.notify.debug('----- __doneBattleThree')", "self.battleThreeMusicTime = 0 self.battleThreeMusic.stop() def enterEpilogue(self): self.cleanupIntervals() self.clearChat() self.witnessToon.clearChat() self.stash()", "not self.everThrownPie: taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice')) def __pieSplat(self, toon, pieCode): if", "ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2]) self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime,", "if not self.debugPositions: self.reflectedJuryBox.setPos(newReflectedPos) if not self.reflectedJuryBox.isEmpty(): if self.debugPositions: self.reflectedJuryBox.show()", "self.witnessToonOnstage = False self.numToonJurorsSeated = 0 self.mainDoor = None self.reflectedMainDoor", "suit.loop('neutral') suit.setBossCogId(self.doId) return def setBossDamage(self, bossDamage, recoverRate, timestamp): recoverStartTime =", "self.ignore('doneChatPage') self.__clearOnscreenMessage() self.stingMusic.stop() def enterBattleTwo(self): self.notify.debug('----- enterBattleTwo') self.cleanupIntervals() mult =", "%s' % battleNode.getParent().getPos()) bnParent = battleNode.getParent() battleNode.wrtReparentTo(render) bnWorldPos = battleNode.getPos()", "startHpr = Point3(*ToontownGlobals.LawbotBossBattleThreeHpr) bottomPos = Point3(*ToontownGlobals.LawbotBossBottomPos) deathPos = Point3(*ToontownGlobals.LawbotBossDeathPos) self.setPosHpr(startPos,", "4, 0, 1, 7, 3.5) shieldNode = CollisionNode('BossZap') shieldNode.addSolid(shield) shieldNode.setCollideMask(ToontownGlobals.PieBitmask", "return def __showWaitingMessage(self, task): self.notify.debug('----- __showWaitingMessage') self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors) def loadEnvironment(self): self.notify.debug('-----", "self.scaleNodePath.find('**/DefenseCol') self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol') self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) self.standNodePath =", "Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3, CFSpeech)), (22, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4, CFSpeech)), (24, Sequence( Func(self.clearChat),", "fromPos), Func(node.headsUp, toPos), node.posInterval(time, toPos)) def __makeRollToBattleTwoMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0],", "plane return NodePath(newCollisionNode) def makeIntroductionMovie(self, delayDeletes): self.notify.debug('----- makeIntroductionMovie') for toonId", "names = lawyerCol.getName().split('-') lawyerDoId = int(names[1]) for lawyer in self.lawyers:", "chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] self.saySomething(chatString) def toonGotHealed(self, toonId): toon = base.cr.doId2do.get(toonId)", "CollisionTube(0, -1, 4, 0, -1, 9, 3.5) targetNode = CollisionNode('BossZap')", "= self.geom.find('**/MidVaultFloor1') if floor.isEmpty(): floor = self.geom.find('**/CR3_Floor') self.evFloor = self.replaceCollisionPolysWithPlanes(floor)", "self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1, 1,", "= 'PrepareBattleThree' self.clearInterval(intervalName) self.__clearOnscreenMessage() self.betweenBattleMusic.stop() def enterBattleThree(self): DistributedBossCog.DistributedBossCog.enterBattleThree(self) self.scaleNodePath.unstash() localAvatar.setPos(-3,", "0, 0)))), (27, Sequence( self.toonNormalEyes(self.involvedToons), Func(self.loop, 'Ff_neutral'), Func(self.setChatAbsolute, attackToons, CFSpeech))))", "self.notify.debug('----- __continueDefeat') self.stopAnimate() self.doneBarrier('Defeat') def exitDefeat(self): self.notify.debug('----- exitDefeat') self.stopAnimate() self.unstash()", "direct.fsm import FSM from direct.fsm import ClassicFSM from direct.fsm import", "globalClockDelta.networkToLocalTime(timestamp) self.bossDamage = bossDamage self.recoverRate = recoverRate self.recoverStartTime = recoverStartTime", "time=self.battleThreeMusicTime) def __continueDefeat(self): self.notify.debug('----- __continueDefeat') self.stopAnimate() self.doneBarrier('Defeat') def exitDefeat(self): self.notify.debug('-----", "self.notify.debug('----- exitDefeat') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def", "'close')) self.__cleanupStrafe() self.strafeInterval = seq seq.start() def replaceCollisionPolysWithPlanes(self, model): newCollisionNode", "self.battleThreeMusic.stop() def enterVictory(self): self.notify.debug('----- enterVictory') self.cleanupIntervals() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.loop('neutral') localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)", "not self.mainDoor.isEmpty(): pass if not self.reflectedMainDoor.isEmpty(): self.reflectedMainDoor.unstash() if not self.elevatorEntrance.isEmpty():", "intervalName = 'VictoryMovie' seq = Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName) seq.start() self.storeInterval(seq,", "if toon: toon.show() def __arrangeToonsAroundWitnessToon(self): radius = 7 numToons =", "self.panFlashInterval = None return def flashPanBlue(self): self.cleanupPanFlash() intervalName = 'FlashPanBlue'", "= 0 self.numJurorsLocalToonSeated = 0 self.cannonIndex = -1 return def", "bnWorldPos = battleNode.getPos() battleNode.wrtReparentTo(bnParent) toon.setPosHpr(battleNode, pos[0], pos[1], pos[2], h, 0,", "= self.realWitnessStand.find('**/witnessStandSeatEdge') center = seatCenter.getPos() self.notify.debug('center = %s' % center)", "if self.strafeInterval: self.strafeInterval.finish() self.strafeInterval = None return def __cleanupJuryBox(self): self.notify.debug('-----", "calculateWeightOfToon(self, toonId): defaultWeight = 1 bonusWeight = 0 newWeight =", "enterBattleTwo') self.cleanupIntervals() mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2) localAvatar.inventory.setBattleCreditMultiplier(mult) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr) self.clearChat() self.witnessToon.clearChat()", "reflectedJuryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.reflectedJuryBox.setPos(newReflectedPos) if not self.reflectedJuryBox.isEmpty():", "vector = Vec3(toPos - fromPos) distance = vector.length() time =", "pass def __localPieSplat(self, pieCode, entry): if pieCode == ToontownGlobals.PieCodeLawyer: self.__lawyerGotHit(entry)", "or self.battleB == None: pass return def exitBattleOne(self): self.notify.debug('----- exitBattleOne')", "None self.onscreenMessage = None self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage self.elevatorType = ElevatorConstants.ELEVATOR_CJ", "attackCode, avId = 0): DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode, avId) if attackCode ==", "intervalName) track.start() def exitEpilogue(self): self.notify.debug('----- exitEpilogue') self.clearInterval('EpilogueMovieToonAnim') self.unstash() self.epilogueMusic.stop() def", "if tauntIndex == 0: if extraInfo < len(self.involvedToons): toonId =", "self.scaleNodePath.wrtReparentTo(self.geom) self.setScaleTilt(startingTilt) def setScaleTilt(self, tilt): self.beamNodePath.setP(tilt) if self.useProgrammerScale: self.defensePanNodePath.setP(-tilt) self.prosecutionPanNodePath.setP(-tilt)", "toon.loop('neutral') def makeEndOfBattleMovie(self, hasLocalToon): name = self.uniqueName('Drop') seq = Sequence(name=name)", "< ToontownGlobals.MaxCogSuitLevel: newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)] if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel: speech", "self.__cleanupJuryBox() render.clearTag('pieCode') self.targetNodePath.detachNode() self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest = None self.betweenBattleMusic.stop() self.promotionMusic.stop() self.stingMusic.stop()", "not stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.mainDoor) else: self.notify.debug('not found", "Parallel() numGears = int(4 + 6 * t + 0.5)", "node world pos = %s' % bnWorldPos) pos = render.getRelativePoint(battleNode,", "trialSpeech += weightBonusText self.witnessToon.setLocalPageChat(trialSpeech, 0) def __makePrepareBattleThreeMovie(self): movie = Sequence(Func(camera.reparentTo,", "spread x = dist * math.sin(angle) y = dist *", "attackCode == ToontownGlobals.BossCogAreaAttack: self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt) base.playSfx(self.warningSfx) def setBattleDifficulty(self, diff): self.notify.debug('battleDifficulty =", "OneBossCog self.notify.debug('----- disable') DistributedBossCog.DistributedBossCog.disable(self) self.request('Off') self.unloadEnvironment() self.__cleanupWitnessToon() self.__unloadMopaths() self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice'))", "ToontownGlobals.LawbotBossJurorsForBalancedScale movie = Sequence(Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr,", "TTLocalizer.WitnessToonBonus % (ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration) self.witnessToon.setChatAbsolute(text, CFSpeech | CFTimeout) base.playSfx(self.toonUpSfx) if", "~ToontownGlobals.CameraBitmask self.baseHighCol.setCollideMask(newBitMask) self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol') self.defenseHighCol.stash() self.defenseHighCol.setCollideMask(newBitMask) self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision')", "if isOpen: self.insidesANodePath.unstash() else: self.insidesANodePath.stash() def doorBCallback(self, isOpen): if self.insidesBNodePath:", "self.rotateNode.attachNewNode('gearRoot') if side == 0: gearRoot.setPos(0, -7, 3) gearRoot.setHpr(180, 0,", "9, 3.5) targetNode = CollisionNode('BossZap') targetNode.addSolid(target) targetNode.setCollideMask(ToontownGlobals.PieBitmask) self.targetNodePath = self.pelvis.attachNewNode(targetNode)", "def __toonsToPromotionPosition(self, toonIds, battleNode): self.notify.debug('----- __toonsToPromotionPosition') points = BattleBase.BattleBase.toonPoints[len(toonIds) -", "del self.juryTimer if self.bonusTimer: self.bonusTimer.destroy() del self.bonusTimer localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu() if OneBossCog", "+= [Wait(0.0)] if hasLocalToon: seq += [Func(self.show), Func(camera.reparentTo, localAvatar), Func(camera.setPos,", "if isOpen: self.insidesBNodePath.unstash() else: self.insidesBNodePath.stash() def __toonsToPromotionPosition(self, toonIds, battleNode): self.notify.debug('-----", "__makeRollToBattleThreeMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battlePos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1],", "self.geom.find('**/WitnessStand') if not self.realWitnessStand.isEmpty(): pass self.reflectedWitnessStand = self.geom.find('**/Witnessstand_Geo_Reflect') if not", "self.controlToons() self.__showWitnessToon() self.witnessToon.reparentTo(render) self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr) self.witnessToon.loop('Sit') self.__arrangeToonsAroundWitnessToon() camera.reparentTo(render) camera.setPos(self.witnessToon, -9, 12,", "prosecutionPanPos) self.notify.debug('prosecutionPanRelPos = %s' % prosecutionPanRelPos) self.notify.debug('panRenderPos = %s' %", "cannonPos = cannon.nodePath.getPos(render) self.notify.debug('cannonPos = %s' % cannonPos) if toon:", "chair in self.chairs.values(): if chair.state == 'ToonJuror': if chair.toonJurorIndex ==", "def enterNearVictory(self): self.cleanupIntervals() self.reparentTo(render) self.setPos(*ToontownGlobals.LawbotBossDeathPos) self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr) self.clearChat() self.releaseToons(finalBattle=1) self.accept('pieSplat', self.__finalPieSplat)", "return avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId') if avatarDoId == '': self.notify.warning('Toon %s", "(ToontownGlobals.LawbotBossInitialDamage - 0) tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt self.setScaleTilt(tilt) if", "self.placeToonInElevator(toon) def setLawyerIds(self, lawyerIds): self.lawyers = [] self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest) self.lawyerRequest =", "0), Func(self.witnessToon.setLocalPageChat, chatString, 0)) return movie def __doWitnessPrepareBattleThreeChat(self): self.notify.debug('__doWitnessPrepareBattleThreeChat: original", "dist = 50 rate = time / numGears for i", "y1, z1) vertexWriter.addData3f(x2, y1, z1) vertexWriter.addData3f(x1, y2, z1) vertexWriter.addData3f(x2, y2,", "self.realWitnessStand.isEmpty(): pass self.reflectedWitnessStand = self.geom.find('**/Witnessstand_Geo_Reflect') if not self.reflectedWitnessStand.isEmpty(): pass colNode", "localAvatar)) multiCannons = Parallel() index = 0 self.involvedToons.sort() for toonId", "bossTrack.append(track) track, hpr = self.rollBossToPoint(bottomPos, startHpr, deathPos, None, 1) bossTrack.append(track)", "= loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg') self.juryMovesSfx = loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg') self.toonUpSfx = loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg') self.strafeSfx =", "self.clearInterval(intervalName) self.betweenBattleMusic.stop() def enterPrepareBattleThree(self): self.notify.debug('----- enterPrepareBattleThree') self.cleanupIntervals() self.controlToons() self.setToonsToNeutral(self.involvedToons) self.clearChat()", "= ToontownGlobals.LawbotBossMaxDamage base.playMusic(self.battleThreeMusic, looping=1, volume=0.9) self.__showWitnessToon() diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] self.bossHealthBar.initialize(self.bossMaxDamage", "numGears = int(4 + 6 * t + 0.5) time", "% self.numToonJurorsSeated) self.countToonJurors() self.notify.debug('after calling self.countToonJurors, numToonJurorsSeated=%d' % self.numToonJurorsSeated) if", "None and cannonIndex >= 0: diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty] if diffSettings[4]:", "seq def __makeBossDamageMovie(self): self.notify.debug('---- __makeBossDamageMovie') startPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2])", "battleNode.getPos() battleNode.wrtReparentTo(bnParent) self.notify.debug('battle node world pos = %s' % bnWorldPos)", "self.notify.debug('---- __makeBossDamageMovie') startPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) startHpr = Point3(*ToontownGlobals.LawbotBossBattleThreeHpr)", "self.toonsEnterB = Mopath.Mopath() self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB') self.toonsEnterB.fFaceForward = 1 self.toonsEnterB.timeScale = 35", "self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr) self.stopAnimate() self.__hideWitnessToon() DistributedBossCog.DistributedBossCog.enterIntroduction(self) base.playMusic(self.promotionMusic, looping=1, volume=0.9) if not", "loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg') self.evidenceHitSfx = loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg') self.warningSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg') self.juryMovesSfx = loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg')", "__walkToonToPromotion(self, toonId, delay, mopath, track, delayDeletes): self.notify.debug('----- __walkToonToPromotion') toon =", "fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)), Func(node.detachNode))) seq = Sequence(Func(door.request, 'open'),", "self.state == 'BattleThree': self.notify.warning('returning from setTaunt, not in battle three", "self.unloadEnvironment() self.__cleanupWitnessToon() self.__unloadMopaths() self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) self.__cleanupStrafe() self.__cleanupJuryBox() render.clearTag('pieCode') self.targetNodePath.detachNode() self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest)", "* 1.8) return Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos), node.posInterval(time, toPos)) def", "chair.state == 'ToonJuror' or chair.state == None and chair.newState ==", "self.flashRed() self.doAnimate('hit', now=1) elif pieCode == ToontownGlobals.PieCodeDefensePan: self.flashRed() self.flashPanBlue() base.playSfx(self.evidenceHitSfx,", "tris.addVertex(2) tris.closePrimitive() tris.addVertex(2) tris.addVertex(3) tris.addVertex(6) tris.closePrimitive() tris.addVertex(3) tris.addVertex(7) tris.addVertex(6) tris.closePrimitive()", "self.debugPositions: self.juryBox.setPos(newPos) self.reflectedJuryBox = self.geom.find('**/JuryBox_Geo_Reflect') reflectedJuryBoxPos = self.reflectedJuryBox.getPos() newReflectedPos =", "for cnp in collList: cn = cnp.node() if not isinstance(cn,", "def __enterProsecutionCol(self, entry): self.notify.debug('__enterProsecutionCol') def makeVictoryMovie(self): myFromPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1],", "text = TTLocalizer.WitnessToonBonus % (ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration) self.witnessToon.setChatAbsolute(text, CFSpeech | CFTimeout)", "direct.showbase.PythonUtil import StackTrace from direct.gui.DirectGui import * from panda3d.core import", "retVal += 1 return retVal def calculateWeightOfToon(self, toonId): defaultWeight =", "return def __showWitnessToon(self): if not self.witnessToonOnstage: self.witnessToon.addActive() self.witnessToon.reparentTo(self.geom) seatCenter =", "= 1.0, b = 1.0, a = 1.0): gFormat =", "- Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if not self.debugPositions: self.reflectedJuryBox.setPos(newReflectedPos) if not self.reflectedJuryBox.isEmpty(): if", "= -beamLocatorPos self.notify.debug('beamLocatorPos = %s' % beamLocatorPos) self.notify.debug('negBeamLocatorPos = %s'", "None, battlePos, None, 0) bossTrack.append(track) track, hpr = self.rollBossToPoint(battlePos, hpr,", "- 1] self.notify.debug('walkToonsToBattlePosition: points = %s' % points[0][0]) for i", "= self.chairs[key] if chair.state == 'ToonJuror' or chair.state == None", "0) self.prosecutionPanNodePath.reparentTo(self.beamNodePath) prosecutionTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5,", "def __walkToonToPromotion(self, toonId, delay, mopath, track, delayDeletes): self.notify.debug('----- __walkToonToPromotion') toon", "self.defensePanNodePath.reparentTo(self.beamNodePath) self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos()) self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator') prosecutionLocBounds = self.prosecutionLocator.getBounds()", "Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) myToPos = Point3(myFromPos[0], myFromPos[1] + 30, myFromPos[2])", "self.witnessToon.setY(self.witnessToon.getY() - 1.15) self.witnessToonOnstage = 1 def __hideWitnessToon(self): if self.witnessToonOnstage:", "self.bossDamageMovie: self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie) return Task.cont def __walkToonToPromotion(self, toonId, delay,", "prosecutionPanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 1.0, 0,", "points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] self.notify.debug('walkToonsToBattlePosition: points = %s' %", "for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: toon.show()", "self.stash() self.stopAnimate() self.controlToons() panelName = self.uniqueName('reward') self.rewardPanel = RewardPanel.RewardPanel(panelName) victory,", "= base.cr.doId2do.get(toonId) if toon: destPos = toon.getPos() self.placeToonInElevator(toon) toon.wrtReparentTo(render) ival", "self.podium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) finalReflectedPodiumPos = Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(), self.reflectedPodium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2])", "= True def unstashBaseCol(self): if self.baseColStashed: self.notify.debug('unstashBaseCol') self.baseTopCol.unstash() self.baseSideCol.unstash() self.baseColStashed", "def loadPodium(self): self.podium = self.geom.find('**/Podium') newZ = self.podium.getZ() - ToontownGlobals.LawbotBossBattleTwoPosHpr[2]", "def calculateWeightOfToon(self, toonId): defaultWeight = 1 bonusWeight = 0 newWeight", "__walkSuitToPoint') vector = Vec3(toPos - fromPos) distance = vector.length() time", "self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol') oldBitMask = self.baseHighCol.getCollideMask() newBitMask = oldBitMask &", "self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos), Func(self.stashBoss), self.posInterval(5.0, battlePos), Func(taskMgr.doMethodLater, 0.01, self.unstashBoss, 'unstashBoss')), name=self.uniqueName('BattleTwoMovie'))", "self.clearInterval(intervalName) self.__clearOnscreenMessage() self.betweenBattleMusic.stop() def enterBattleThree(self): DistributedBossCog.DistributedBossCog.enterBattleThree(self) self.scaleNodePath.unstash() localAvatar.setPos(-3, 0, 0)", "toon: toon.loop('neutral') def makeEndOfBattleMovie(self, hasLocalToon): name = self.uniqueName('Drop') seq =", "text): self.notify.debug('----- __showOnscreenmessage') if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None self.onscreenMessage", "self.beamLocator.getBounds() beamLocatorPos = beamLocatorBounds.getCenter() self.notify.debug('beamLocatorPos = %s' % beamLocatorPos) def", "%s' % defenseLocPos) self.defensePanNodePath.setPos(defenseLocPos) self.defensePanNodePath.reparentTo(self.beamNodePath) self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos()) self.prosecutionLocator =", "1] self.notify.debug('walkToonsToBattlePosition: points = %s' % points[0][0]) for i in", "self.involvedToons: if index in self.cannons: cannon = self.cannons[index] toon =", "self.podium.setZ(newZ) self.reflectedPodium = self.geom.find('**/Podium_Geo1_Refl') reflectedZ = self.reflectedPodium.getZ() if not self.debugPositions:", "Geom.UHDynamic) vertexWriter = GeomVertexWriter(myVertexData, 'vertex') normalWriter = GeomVertexWriter(myVertexData, 'normal') colorWriter", "startPos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battlePos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2])", "self.request('Off') self.unloadEnvironment() self.__cleanupWitnessToon() self.__unloadMopaths() self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) self.__cleanupStrafe() self.__cleanupJuryBox() render.clearTag('pieCode') self.targetNodePath.detachNode()", "CFSpeech)) seq.append(Wait(5.0)) seq.append(Func(self.clearChat)) return seq def __makeBossDamageMovie(self): self.notify.debug('---- __makeBossDamageMovie') startPos", "= self.rollBossToPoint(startPos, startHpr, bottomPos, None, 1) bossTrack.append(track) track, hpr =", "= Sequence() myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13,", "= 0): self.notify.debug('----- __onToBattleTwo') self.doneBarrier('PrepareBattleTwo') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleTwo(self):", "Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2, CFSpeech)), (18, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3, CFSpeech)), (22, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4,", "ToontownGlobals.PieCodeDefensePan: self.flashRed() self.flashPanBlue() base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath, volume=0.25) if toon == localAvatar:", "= vector.length() time = distance / (ToontownGlobals.SuitWalkSpeed * 1.8) return", "def __makeRollToBattleThreeMovie(self): startPos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2]) battlePos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0],", "import DelayDelete from toontown.battle import MovieToonVictory from toontown.building import ElevatorUtils", "Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1)) bossTrack.append(Func(self.getGeomNode().setH, 180)) bossTrack.append(Func(self.loop, 'Ff_neutral'))", "self.clearChat() self.witnessToon.clearChat() self.stash() self.stopAnimate() self.controlToons() panelName = self.uniqueName('reward') self.rewardPanel =", "= Mopath.Mopath() self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA') self.toonsEnterA.fFaceForward = 1 self.toonsEnterA.timeScale = 35 self.toonsEnterB", "index = 0 self.involvedToons.sort() for toonId in self.involvedToons: toon =", "-2, 1.0, 0, 0, 1.0) self.prosecutionPanNodePath = NodePath('prosecutionPan') self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom) self.prosecutionPanNodePath.setPos(0,", "enterVictory') self.cleanupIntervals() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr) self.loop('neutral') localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.clearChat() self.witnessToon.clearChat() self.controlToons() self.setToonsToNeutral(self.involvedToons)", "0, 0) def __outOfPies(self): self.notify.debug('----- outOfPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence) taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice'))", "%s' % prosecutionPanRelPos) self.notify.debug('panRenderPos = %s' % panRenderPos) prosecutionLocatorPos =", "if chair.state == 'ToonJuror': if chair.toonJurorIndex == cannonIndex: retVal +=", "str(ToontownGlobals.PieCodeBossInsides)) self.insidesBNodePath.stash() target = CollisionTube(0, -1, 4, 0, -1, 9,", "= %d' % diff) self.battleDifficulty = diff def toonEnteredCannon(self, toonId,", "% str) if not stuffToHide.isEmpty(): self.notify.debug('found %s' % stuffToHide) stuffToHide.wrtReparentTo(self.mainDoor)", "myFromPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2]) myToPos = Point3(myFromPos[0], myFromPos[1] +", "== ToontownGlobals.MaxCogSuitLevel: speech += TTLocalizer.WitnessToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel + 1) if", "hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0) bossTrack.append(track) track, hpr", "%s' % beamLocatorPos) def loadScaleNew(self): self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale') self.beamNodePath =", "* from direct.fsm import FSM from direct.fsm import ClassicFSM from", "track.append(dialogTrack) return Sequence( Func(self.stickToonsToFloor), track, Func(self.unstickToons), name=self.uniqueName('Introduction')) def walkToonsToBattlePosition(self, toonIds,", "pos=(0, 0, 0.35), scale=0.1) return def __clearOnscreenMessage(self): if self.onscreenMessage: self.onscreenMessage.destroy()", "* radius toon.setPos(self.witnessToon, x, y, 0) toon.headsUp(self.witnessToon) toon.loop('neutral') toon.show() def", "toon = base.cr.doId2do.get(toonId) if toon: base.playSfx(self.toonUpSfx, node=toon) def hideBonusTimer(self): if", "= %s ' % prosecutionLocatorRelPos) self.notify.debug('locatorRenderPos = %s' % locatorRenderPos)", "attackToons = TTLocalizer.BossCogAttackToons dialogTrack = Track( (0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0, CFSpeech)),", "= CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0, -2.0, 5.0), Point3(-4.0, -2.0, 0.5),", "1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.35), scale=0.1) return def __clearOnscreenMessage(self):", "= self.scaleNodePath.find('**/DefenseHighCol') self.defenseHighCol.stash() self.defenseHighCol.setCollideMask(newBitMask) self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision') self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col')", "self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence) def __howToThrowPies(self, task): self.notify.debug('----- __howToThrowPies') self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies) def __foundPieButton(self): self.everThrownPie", "touchedGavel(self, gavel, entry): self.notify.debug('touchedGavel') attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode') if attackCodeStr ==", "0) def exitElevator(self): self.notify.debug('----- exitElevator') DistributedBossCog.DistributedBossCog.exitElevator(self) self.witnessToon.removeActive() def enterIntroduction(self): self.notify.debug('-----", "TTLocalizer.BossCogAttackToons dialogTrack = Track( (0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0, CFSpeech)), (5.6, Func(self.setChatAbsolute,", "def exitDefeat(self): self.notify.debug('----- exitDefeat') self.stopAnimate() self.unstash() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime()", "disk.find('**/+CollisionNode').setName('BossZap') disk.reparentTo(self.pelvis) disk.setZ(0.8) self.loadEnvironment() self.__makeWitnessToon() self.__loadMopaths() localAvatar.chatMgr.chatInputSpeedChat.addCJMenu() if OneBossCog !=", "camera.reparentTo(render) camera.setPos(self.witnessToon, -9, 12, 6) camera.lookAt(self.witnessToon, 0, 0, 3) intervalName", "chair.state == None and chair.newState == 'ToonJuror': self.numToonJurorsSeated += 1", "= self.replaceCollisionPolysWithPlanes(floor) self.evFloor.reparentTo(self.geom) self.evFloor.setName('floor') plane = CollisionPlane(Plane(Vec3(0, 0, 1), Point3(0,", "stuffToHide) self.reflectedMainDoor = self.geom.find('**/interiorrefl/CR3_Door') if not self.reflectedMainDoor.isEmpty(): itemsToHide = ['Reflections/Door_1']", "'': self.notify.warning('Toon %s has no avatarDoId tag.' % repr(entry.getIntoNodePath())) return", "= self.scaleNodePath.find('**/DefenseCol') self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan)) self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol') self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan)) self.standNodePath", "self.__doneEpilogue) base.playMusic(self.epilogueMusic, looping=1, volume=0.9) def __doneEpilogue(self, elapsedTime = 0): self.notify.debug('-----", "insidesB = CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0, 2.0, 5.0), Point3(4.0, 2.0,", "True def unstashBaseCol(self): if self.baseColStashed: self.notify.debug('unstashBaseCol') self.baseTopCol.unstash() self.baseSideCol.unstash() self.baseColStashed =", "battleHpr, 0) self.makeToonsWait() return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwoMovie')) def", "self.notify.debug('----- enterOff') DistributedBossCog.DistributedBossCog.enterOff(self) if self.witnessToon: self.witnessToon.clearChat() def enterWaitForToons(self): self.notify.debug('----- enterWaitForToons')", "self.insidesBNodePath: if isOpen: self.insidesBNodePath.unstash() else: self.insidesBNodePath.stash() def __toonsToPromotionPosition(self, toonIds, battleNode):", "self.beamNodePath.getPos() beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath, origin) beamRenderPos = render.getRelativePoint(self.beamNodePath, origin) self.notify.debug('beamPos", "chatString = TTLocalizer.LawbotBossTaunts[1] if tauntIndex == 0: if extraInfo <", "if not self.debugPositions: self.podium.setZ(newZ) self.reflectedPodium = self.geom.find('**/Podium_Geo1_Refl') reflectedZ = self.reflectedPodium.getZ()", "DistributedBossCog.DistributedBossCog.exitWaitForToons(self) self.geom.show() self.witnessToon.addActive() def enterElevator(self): self.notify.debug('----- enterElevator') DistributedBossCog.DistributedBossCog.enterElevator(self) self.witnessToon.removeActive() self.reparentTo(render)", "= toon.getPos(render) self.notify.debug('renderPos =%s' % renderPos) index += 1 self.notify.debug('done", "if not self.mainDoor.isEmpty(): itemsToHide = ['interior/Door_1'] for str in itemsToHide:", "- self.recoverStartTime return max(self.bossDamage - self.recoverRate * elapsed / 60.0,", "self.geom.find('**/JuryBox_Geo_Reflect') reflectedJuryBoxPos = self.reflectedJuryBox.getPos() newReflectedPos = reflectedJuryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos) if", "localAvatar.chatMgr.chatInputSpeedChat.addCJMenu() if OneBossCog != None: self.notify.warning('Multiple BossCogs visible.') OneBossCog =", "delayDeletes ival.start() self.storeInterval(ival, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __doneReward(self):", "= 1 self.toonsEnterB.timeScale = 35 def __unloadMopaths(self): self.notify.debug('----- __unloadMopaths') self.toonsEnterA.reset()", "= base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg') self.battleTwoMusic = base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg') floor = self.geom.find('**/MidVaultFloor1') if floor.isEmpty():", "locatorRenderPos) beamPos = self.beamNodePath.getPos() beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath, origin) beamRenderPos =", "__showCannonsAppearing(self, elapsedTime = 0): allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar), Func(camera.setPos,", "pass elif pieCode == ToontownGlobals.PieCodeLawyer: pass def __localPieSplat(self, pieCode, entry):", "extraInfo): gotError = False if not hasattr(self, 'state'): self.notify.warning('returning from", "self.numJurorsLocalToonSeated = self.calculateWeightOfToon(base.localAvatar.doId) if self.bonusWeight > 0: if self.bonusWeight ==", "'state'): self.notify.warning('returning from setTaunt, no attr state') gotError = True", "= self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterDefeat(self): self.notify.debug('----- enterDefeat') self.cleanupIntervals() localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.reparentTo(render)", "ToontownGlobals.MaxCogSuitLevel: speech += TTLocalizer.WitnessToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel + 1) if newCogSuitLevel", "self.baseTopCol.unstash() self.baseSideCol.unstash() self.baseColStashed = False def makeScaleReflectDamage(self): diffDamage = self.bossDamage", "lawyers for i in xrange(len(self.lawyers)): suit = self.lawyers[i] suit.fsm.request('neutral') suit.loop('neutral')", "if self.juryBoxIval: self.juryBoxIval.finish() self.juryBoxIval = None self.juryBox.setPos(-30, 0, -12.645) self.reflectedJuryBox.setPos(-30,", "= Sequence( ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1), ActorInterval(self, 'Ff_lookRt', duration=3),", "for i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.wrtReparentTo(render)", "tris.closePrimitive() tris.addVertex(7) tris.addVertex(5) tris.addVertex(4) tris.closePrimitive() cubeGeom = Geom(myVertexData) cubeGeom.addPrimitive(tris) cubeGN", "= self.cr.playGame.getPlace() if place and hasattr(place, 'fsm'): place.setState('waitForBattle') def makeToonsWait(self):", "self.happy = 1 self.raised = 1 self.forward = 1 intervalName", "__showToons(self): for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon:", "if not self.debugPositions: self.reflectedPodium.setZ(reflectedZ) if not self.reflectedPodium.isEmpty(): if self.debugPositions: self.reflectedPodium.show()", "base.cr.doId2do.get(toonIds[i]) if toon: toon.wrtReparentTo(render) pos, h = points[i] if i", "= oldBitMask & ~ToontownGlobals.PieBitmask newBitMask = newBitMask & ~ToontownGlobals.CameraBitmask self.baseHighCol.setCollideMask(newBitMask)", "colorWriter.addData4f(r, g, b, a) texWriter.addData2f(1.0, 1.0) tris = GeomTriangles(Geom.UHDynamic) tris.addVertex(0)", "scale=0.1) return def __clearOnscreenMessage(self): if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None", "__showWaitingMessage(self, task): self.notify.debug('----- __showWaitingMessage') self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors) def loadEnvironment(self): self.notify.debug('----- loadEnvironment') DistributedBossCog.DistributedBossCog.loadEnvironment(self)", "battleNode.getScale()=%s' % (render.getScale(), battleNode.getScale())) myCurPos = self.getPos() self.notify.debug('myCurPos = %s'", "epSpeech = TTLocalizer.WitnessToonCongratulations epSpeech = self.__talkAboutPromotion(epSpeech) bossTrack = Sequence(Func(self.witnessToon.animFSM.request, 'neutral'),", "0): self.notify.debug('----- __doneEpilogue') intervalName = 'EpilogueMovieToonAnim' self.clearInterval(intervalName) track = Parallel(Sequence(Wait(0.5)," ]
[ "b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32) b =", "rnns.\"\"\" from __future__ import absolute_import from __future__ import division from", "tf.nn.bidirectional_dynamic_rnn class CustomRnnCellTests(Tf2OnnxBackendTestBase): @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn(self): size =", "1]) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\": x_val}", "tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)", "= [\"input_1:0\", \"input_2:0\", \"input_3:0\"] output_names_with_port = [\"output_0:0\", \"final_state:0\"] self.run_test_case(func, feed_dict,", "attention_states, _ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) # [9, 3, 30],", "test_bidrectional_attention_wrapper_lstm_encoder(self): size = 30 time_step = 3 input_size = 4", "LSTMCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) match_cell_bk =", "x, dtype=tf.float32, sequence_length=tf.identity(seq_length)) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict =", "common import * # pylint: disable=wildcard-import, unused-wildcard-import from tf2onnx.tf_loader import", "custom rnns.\"\"\" from __future__ import absolute_import from __future__ import division", "Apache-2.0 \"\"\"Unit Tests for custom rnns.\"\"\" from __future__ import absolute_import", "tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port =", "\"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_non_const_seq_length(self): units = 5 batch_size = 6", "def func(x): attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda curr_input,", "output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder(self): size =", "input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"]", "@check_opset_min_version(8, \"Scan\") @skip_tf2() def test_multi_rnn_lstm(self, state_is_tuple=True): units = 5 batch_size", "lambda curr_input, state: tf.concat([curr_input, state], axis=-1) cell = LSTMCell(size) match_cell_fw", "1 x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)", "attention_state: usually the output of an RNN encoder. # This", "model layer. batch_size = 1 x_val = np.array([[1., 1.], [2.,", "4.], [5., 5.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) def", "be shaped `[batch_size, max_time, ...]`. decoder_time_step = 6 x_val =", "3, 30], [9, 30] attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn =", "= tf.compat.v1.nn.rnn_cell.BasicLSTMCell LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell RNNCell =", "output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_time_major(self): size = 5", "-1) return tf.identity(matched_output, name=\"output_0\"), tf.identity(matched_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val,", "self._num_units], dtype=tf.float32) U = np.arange(75.0, dtype=np.float32).reshape((5, 15)) # b =", "= np.random.randn(decoder_time_step, batch_size, input_size).astype('f') def func(encoder_x, decoder_x, seq_length): encoder_cell =", "cell_2], state_is_tuple=state_is_tuple) outputs, cell_state = dynamic_rnn(cells, x, dtype=tf.float32) return tf.identity(outputs,", "of each model layer. batch_size = 1 x_val = np.array([[1.,", "state: tf.concat([curr_input, state], axis=-1) cell = GRUCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,", "tf.split(tf.matmul(inputs, W) + b, 3, 1) hu = tf.split(tf.matmul(state, U),", "4 attn_size = size batch_size = 9 # shape [batch", "* self._num_units], dtype=tf.float32) U = np.arange(75.0, dtype=np.float32).reshape((5, 15)) # b", "input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port,", "hidden_dim self._activation = tf.tanh @property def state_size(self): return self._num_units @property", "None, \"input dimension must be defined\" # W = tf.get_variable(name=\"W\",", "LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cells = MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple) outputs,", "sequence_length=[4, 3, 4, 5, 2, 1]) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state,", "disable=abstract-method,arguments-differ if is_tf2(): BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell GRUCell", "= dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=tf.identity(seq_length)) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state,", "dynamic_rnn(match_cell_fw, x, dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict =", "x_val = np.random.randn(decoder_time_step, input_size).astype('f') x_val = np.stack([x_val] * batch_size) attention_states", "tf.identity(matched_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val, \"input_3:0\": np.array([6,", "dtype=tf.float32) W = np.arange(30.0, dtype=np.float32).reshape((2, 15)) # U = tf.get_variable(name='U',", "outputs, cell_state = dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=[4, 3, 4,", "= np.arange(75.0, dtype=np.float32).reshape((5, 15)) # b = tf.get_variable(name='b', shape=[1, 3", "disable=wildcard-import, unused-wildcard-import from tf2onnx.tf_loader import is_tf2 # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test #", "0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_gru_encoder(self): size = 5", "(match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \\ bidirectional_dynamic_rnn(cell_fw=match_cell_fw, cell_bw=match_cell_bk, inputs=decoder_x, sequence_length=tf.identity(seq_length),", "tf.tanh @property def state_size(self): return self._num_units @property def output_size(self): return", "= {\"input_1:0\": x_val, \"input_2:0\": y_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port", "r = tf.sigmoid(xw[0] + hu[0]) z = tf.sigmoid(xw[1] + hu[1])", "input_names_with_port = [\"input_1:0\", \"input_2:0\", \"input_3:0\"] output_names_with_port = [\"output_0:0\", \"final_state:0\"] self.run_test_case(func,", "feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_non_const_seq_length(self): units", "BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell RNNCell", "state_is_tuple=state_is_tuple) cell_2 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cells = MultiRNNCell([cell_0, cell_1,", "= h1 * (1 - z) + state * z", "= 5 # size of each model layer. batch_size =", "GatedGRUCell(size) x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)", "5 time_step = 3 input_size = 4 attn_size = size", "axis=-1) cell = GRUCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn,", "tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\": x_val, \"input_2:0\": y_val} input_names_with_port =", "= [\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\")", "b shape: [1, 3 * 5] = [1, 15] #", "cell = GatedGRUCell(size) x_val = np.array([[1., 1.], [2., 2.], [3.,", "shape=[input_dim, 3 * self._num_units], dtype=tf.float32) W = np.arange(30.0, dtype=np.float32).reshape((2, 15))", "= np.stack([decoder_x_val] * batch_size) def func(encoder_x, decoder_x): encoder_cell = GRUCell(size)", "3 * 5] = [5, 15] # b shape: [1,", "\"input_2:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)", "[\"input_1:0\"] feed_dict = {\"input_1:0\": x_val} output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func,", "0.1) class GatedGRUCell(RNNCell): def __init__(self, hidden_dim, reuse=None): super().__init__(self, _reuse=reuse) self._num_units", "U = np.arange(75.0, dtype=np.float32).reshape((5, 15)) # b = tf.get_variable(name='b', shape=[1,", "tf.sigmoid(xw[0] + hu[0]) z = tf.sigmoid(xw[1] + hu[1]) h1 =", "cell = LSTMCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False)", "attention_states) match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1) cell", "match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell,", "LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_2 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cells =", "= tf.compat.v1.nn.rnn_cell.GRUCell RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn =", "= LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_2 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cells", "= tf.tanh @property def state_size(self): return self._num_units @property def output_size(self):", "= {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"]", "return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\": x_val, \"input_2:0\":", "encoder_cell = GRUCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) _", "size] = [1, 3, 2] # num_units: 5 # W", "3 * self._num_units], dtype=tf.float32) U = np.arange(75.0, dtype=np.float32).reshape((5, 15)) #", "input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_seq_length(self): units =", "tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) (match_output_fw, match_output_bk), (match_state_fw, match_state_bk) =", "= [\"input_1:0\"] feed_dict = {\"input_1:0\": x_val} output_names_with_port = [\"output:0\", \"cell_state:0\"]", "tf2onnx.tf_loader import is_tf2 # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test # pylint: disable=abstract-method,arguments-differ if", "feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val, \"input_3:0\": np.array([6, 5, 4,", "name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"]", "x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32) x_val", "= dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) output_0 = tf.identity(output, name=\"output_0\") attention_states =", "x, dtype=tf.float32) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") input_names_with_port = [\"input_1:0\"]", "dimension must be defined\" # W = tf.get_variable(name=\"W\", shape=[input_dim, 3", "pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test # pylint: disable=abstract-method,arguments-differ if is_tf2(): BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell", "z) + state * z return next_h, next_h if __name__", "func(encoder_x, decoder_x): encoder_cell = LSTMCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x,", "\"input_2:0\": decoder_x_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output_0:0\", \"output:0\",", "[2., 2.], [3., 3.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size)", "cell_state = dynamic_rnn(cells, x, dtype=tf.float32) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")", "sequence_length=tf.identity(seq_length)) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\": x_val,", "initializer=initializer, state_is_tuple=state_is_tuple) cells = MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple) outputs, cell_state", "\"input_2:0\", \"input_3:0\"] output_names_with_port = [\"output_0:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port,", "test_attention_wrapper_lstm_encoder_input_has_none_dim(self): size = 5 time_step = 3 input_size = 4", "state size] = [1, 5] input_dim = inputs.get_shape()[-1] assert input_dim", "# size of each model layer. batch_size = 1 x_val", "from __future__ import division from __future__ import print_function import numpy", "output_names_with_port = [\"output_0:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) class", "def test_single_dynamic_custom_rnn_time_major(self): size = 5 # size of each model", "test_single_dynamic_custom_rnn_time_major(self): size = 5 # size of each model layer.", "= hidden_dim self._activation = tf.tanh @property def state_size(self): return self._num_units", "15] # state shape: [batch size, state size] = [1,", "= [5, 15] # b shape: [1, 3 * 5]", "layer. batch_size = 1 x_val = np.array([[1., 1.], [2., 2.],", "= dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=[4, 3, 4, 5, 2,", "input size] = [1, 3, 2] # num_units: 5 #", "tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\": x_val, \"input_2:0\": y_val}", "= 3 input_size = 4 attn_size = size batch_size =", "cell = GRUCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False)", "3.], [4., 4.], [5., 5.]], dtype=np.float32) x_val = np.stack([x_val] *", "= dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False) return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\")", "import division from __future__ import print_function import numpy as np", "return self._num_units @property def output_size(self): return self._num_units def call(self, inputs,", "\"Scan\") @skip_tf2() def test_multi_rnn_lstm(self, state_is_tuple=True): units = 5 batch_size =", "= GatedGRUCell(units) outputs, cell_state = dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=tf.identity(seq_length))", "from __future__ import absolute_import from __future__ import division from __future__", "x_val = np.stack([x_val] * batch_size) def func(x): cell = GatedGRUCell(size)", "{\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func,", "decoder_x, dtype=tf.float32) return output_0, tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict =", "bidirectional_dynamic_rnn(cell_fw=match_cell_fw, cell_bw=match_cell_bk, inputs=decoder_x, sequence_length=tf.identity(seq_length), dtype=tf.float32, time_major=True) matched_output = tf.concat([match_output_fw, match_output_bk],", "\"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder(self): size = 5 time_step =", "LSTMCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) _ = tf.identity(output,", "dtype=tf.float32) output_0 = tf.identity(output, name=\"output_0\") attention_states = output attention_mechanism =", "cell_1, cell_2], state_is_tuple=state_is_tuple) outputs, cell_state = dynamic_rnn(cells, x, dtype=tf.float32) return", "= dynamic_rnn(match_cell_fw, x, dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict", "@skip_tf2() def test_multi_rnn_lstm(self, state_is_tuple=True): units = 5 batch_size = 6", "= tf.concat([match_output_fw, match_output_bk], axis=-1) matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1) return", "# pylint: disable=wildcard-import, unused-wildcard-import from tf2onnx.tf_loader import is_tf2 # pylint:", "super().__init__(self, _reuse=reuse) self._num_units = hidden_dim self._activation = tf.tanh @property def", "import numpy as np import tensorflow as tf from tensorflow.python.ops", "= np.stack([x_val] * batch_size) def func(x): cell = GatedGRUCell(size) xs,", "attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) (match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \\ bidirectional_dynamic_rnn(cell_fw=match_cell_fw,", "@skip_tf2() def test_attention_wrapper_gru_encoder(self): size = 5 time_step = 3 input_size", "return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port", "self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_multi_rnn_lstm(self,", "5.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x): #", "attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return", "import Tf2OnnxBackendTestBase from common import * # pylint: disable=wildcard-import, unused-wildcard-import", "* self._num_units], dtype=tf.float32) b = np.arange(15.0, dtype=np.float32).reshape((1, 15)) xw =", "size] # attention_state: usually the output of an RNN encoder.", "def test_attention_wrapper_lstm_encoder(self): size = 5 time_step = 3 input_size =", "encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f') encoder_x_val = np.stack([encoder_x_val] * batch_size) decoder_time_step", "= tf.compat.v1.nn.bidirectional_dynamic_rnn else: LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell LSTMCell = tf.nn.rnn_cell.LSTMCell GRUCell", "axis=-1) cell = LSTMCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn,", "x_val = np.stack([x_val] * batch_size) def func(x): initializer = init_ops.constant_initializer(0.5)", "feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_multi_rnn_lstm(self, state_is_tuple=True):", "def test_attention_wrapper_gru_encoder(self): size = 5 time_step = 3 input_size =", "size, state size] = [1, 5] input_dim = inputs.get_shape()[-1] assert", "of each model layer. batch_size = 1 cell = GatedGRUCell(size)", "def func(x, seq_length): # no scope cell = GatedGRUCell(units) outputs,", "3 * self._num_units], dtype=tf.float32) b = np.arange(15.0, dtype=np.float32).reshape((1, 15)) xw", "= output attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda curr_input,", "state): # inputs shape: [batch size, time step, input size]", "[\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output_0:0\", \"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port,", "= LSTMCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output,", "= tf.compat.v1.nn.rnn_cell.RNNCell MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn =", "self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def", "= LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_1 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_2", "\"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder_input_has_none_dim(self): size = 5 time_step =", "cell_2 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cells = MultiRNNCell([cell_0, cell_1, cell_2],", "name=\"final_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port =", "is_tf2(): BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell", "MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else:", "@check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_const_encoder(self): size = 5 time_step", "should be shaped `[batch_size, max_time, ...]` encoder_time_step = time_step encoder_x_val", "{\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port =", "{\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val, \"input_3:0\": np.array([6, 5, 4, 3, 2,", "must be defined\" # W = tf.get_variable(name=\"W\", shape=[input_dim, 3 *", "batch_size = 1 x_val = np.array([[1., 1.], [2., 2.], [3.,", "be shaped `[batch_size, max_time, ...]` encoder_time_step = time_step encoder_x_val =", "output_names_with_port = [\"output_0:0\", \"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)", "inputs shape: [batch size, time step, input size] = [1,", "= 6 x_val = np.random.randn(decoder_time_step, input_size).astype('f') x_val = np.stack([x_val] *", "2.], [3., 3.], [4., 4.]], dtype=np.float32) x_val = np.stack([x_val] *", "input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_time_major(self): size =", "np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32) x_val", "self._num_units], dtype=tf.float32) b = np.arange(15.0, dtype=np.float32).reshape((1, 15)) xw = tf.split(tf.matmul(inputs,", "as np import tensorflow as tf from tensorflow.python.ops import init_ops", "= GatedGRUCell(size) xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True) return", "seq_length): encoder_cell = LSTMCell(size) attention_states, _ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)", "tf.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn class CustomRnnCellTests(Tf2OnnxBackendTestBase): @check_opset_min_version(8, \"Scan\") @skip_tf2() def", "attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32) return", "[batch size, time step, input size] = [1, 3, 2]", "* batch_size) def func(x): xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x,", "np.random.randn(decoder_time_step, batch_size, input_size).astype('f') def func(encoder_x, decoder_x, seq_length): encoder_cell = LSTMCell(size)", "h1 = self._activation(xw[2] + r * hu[2]) next_h = h1", "inputs=x, time_major=False) return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict = {\"input_1:0\":", "feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_gru_encoder(self):", "input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict,", "4.], [5., 5.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) y_val", "RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn", "= tf.contrib.rnn.MultiRNNCell dynamic_rnn = tf.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn class CustomRnnCellTests(Tf2OnnxBackendTestBase):", "= MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple) outputs, cell_state = dynamic_rnn(cells, x,", "state], axis=-1) cell = LSTMCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size,", "30], [9, 30] attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda", "3, 4, 5, 2, 1]) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")", "5] = [5, 15] # b shape: [1, 3 *", "size of each model layer. batch_size = 1 cell =", "def test_single_dynamic_custom_rnn_with_non_const_seq_length(self): units = 5 batch_size = 6 x_val =", "feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder(self):", "# num_units: 5 # W shape: [2, 3 * 5]", "tf.split(tf.matmul(state, U), 3, 1) r = tf.sigmoid(xw[0] + hu[0]) z", "= [\"output:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port,", "decoder_x): encoder_cell = GRUCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)", "match_output_bk), (match_state_fw, match_state_bk) = \\ bidirectional_dynamic_rnn(cell_fw=match_cell_fw, cell_bw=match_cell_bk, inputs=decoder_x, sequence_length=tf.identity(seq_length), dtype=tf.float32,", "\"input_2:0\"] output_names_with_port = [\"output_0:0\", \"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port,", "np.stack([decoder_x_val] * batch_size) def func(encoder_x, decoder_x): encoder_cell = LSTMCell(size) output,", "1) r = tf.sigmoid(xw[0] + hu[0]) z = tf.sigmoid(xw[1] +", "decoder_time_step = 6 decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f') def func(encoder_x,", "bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else: LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell LSTMCell = tf.nn.rnn_cell.LSTMCell", "@check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_gru_encoder(self): size = 5 time_step = 3", "{\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\"] output_names_with_port =", "= dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return output_0, tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\")", "= tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism,", "cell, x, dtype=tf.float32, sequence_length=[4, 3, 4, 5, 2, 1]) return", "1 cell = GatedGRUCell(size) x_val = np.array([[1., 1.], [2., 2.],", "[3., 3.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x):", "name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val} input_names_with_port = [\"input_1:0\",", "5] = [2, 15] # U shape: [5, 3 *", "attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn,", "\"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def", "step, size] # attention_state: usually the output of an RNN", "LSTMCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) output_0 = tf.identity(output,", "decoder_x, dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\":", "3 * 5] = [2, 15] # U shape: [5,", "@check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn(self): size = 5 # size", "= [\"input_1:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port,", "feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_const_encoder(self):", "cell_0 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_1 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple)", "dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return output_0, tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict", "state: tf.concat([curr_input, state], axis=-1) cell = LSTMCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,", "5.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) y_val = np.array([4,", "self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_opset(9, \"ReverseSequence\")", "x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]],", "np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]],", "= tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32) b = np.arange(15.0,", "# b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32) b", "dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) output_0 = tf.identity(output, name=\"output_0\") attention_states = output", "curr_input, state: tf.concat([curr_input, state], axis=-1) cell = GRUCell(size) match_cell_fw =", "output, attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state,", "dtype=tf.float32) # [9, 3, 30], [9, 30] attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,", "W = np.arange(30.0, dtype=np.float32).reshape((2, 15)) # U = tf.get_variable(name='U', shape=[self._num_units,", "self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) class GatedGRUCell(RNNCell): def __init__(self, hidden_dim,", "[5, 3 * 5] = [5, 15] # b shape:", "= tf.split(tf.matmul(inputs, W) + b, 3, 1) hu = tf.split(tf.matmul(state,", "np.random.randn(decoder_time_step, input_size).astype('f') decoder_x_val = np.stack([decoder_x_val] * batch_size) def func(encoder_x, decoder_x):", "matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1) matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1)", "input_names_with_port = [\"input_1:0\"] feed_dict = {\"input_1:0\": x_val} output_names_with_port = [\"output:0\",", "shape=[1, 3 * self._num_units], dtype=tf.float32) b = np.arange(15.0, dtype=np.float32).reshape((1, 15))", "shape: [2, 3 * 5] = [2, 15] # U", "def func(x): xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False) return", "1], dtype=np.int32) def func(x, seq_length): # no scope cell =", "3.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x): xs,", "= GRUCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output,", "batch_size) def func(x): initializer = init_ops.constant_initializer(0.5) cell_0 = LSTMCell(units, initializer=initializer,", "= tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32) U = np.arange(75.0,", "cell_input_fn=match_input_fn, output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32) return tf.identity(output,", "input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output_0:0\", \"output:0\", \"final_state:0\"] self.run_test_case(func,", "GatedGRUCell(size) xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True) return tf.identity(xs,", "@skip_tf2() def test_attention_wrapper_lstm_encoder_input_has_none_dim(self): size = 5 time_step = 3 input_size", "@skip_tf2() def test_single_dynamic_custom_rnn_with_non_const_seq_length(self): units = 5 batch_size = 6 x_val", "= tf.split(tf.matmul(state, U), 3, 1) r = tf.sigmoid(xw[0] + hu[0])", "{\"input_1:0\": x_val, \"input_2:0\": y_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port =", "shape [batch size, time step, size] # attention_state: usually the", "This tensor should be shaped `[batch_size, max_time, ...]`. decoder_time_step =", "test_attention_wrapper_const_encoder(self): size = 5 time_step = 3 input_size = 4", "inputs=x, time_major=True) return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict = {\"input_1:0\":", "initializer=initializer, state_is_tuple=state_is_tuple) cell_2 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cells = MultiRNNCell([cell_0,", "[3., 3.], [4., 4.], [5., 5.]], dtype=np.float32) x_val = np.stack([x_val]", "LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell MultiRNNCell", "np.arange(75.0, dtype=np.float32).reshape((5, 15)) # b = tf.get_variable(name='b', shape=[1, 3 *", "= lambda curr_input, state: tf.concat([curr_input, state], axis=-1) cell = GRUCell(size)", "encoder_cell = LSTMCell(size) attention_states, _ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) #", "= np.random.randn(decoder_time_step, input_size).astype('f') x_val = np.stack([x_val] * batch_size) attention_states =", "def func(encoder_x, decoder_x): encoder_cell = GRUCell(size) output, attr_state = dynamic_rnn(encoder_cell,", "np.array([6, 5, 4, 3, 2, 1, 2, 3, 6], dtype=np.int32)}", "+ hu[0]) z = tf.sigmoid(xw[1] + hu[1]) h1 = self._activation(xw[2]", "batch_size) def func(x): cell = GatedGRUCell(size) xs, s = dynamic_rnn(cell=cell,", "= np.stack([x_val] * batch_size) def func(x): # no scope cell", "3 * 5] = [1, 15] # state shape: [batch", "state shape: [batch size, state size] = [1, 5] input_dim", "np.arange(30.0, dtype=np.float32).reshape((2, 15)) # U = tf.get_variable(name='U', shape=[self._num_units, 3 *", "rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_opset(9, \"ReverseSequence\") @skip_tf2() @allow_missing_shapes(\"Missing RNN shape\")", "2, 1], dtype=np.int32) def func(x, seq_length): # no scope cell", "@skip_tf2() def test_attention_wrapper_lstm_encoder(self): size = 5 time_step = 3 input_size", "= dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) # [9, 3, 30], [9, 30]", "return self._num_units def call(self, inputs, state): # inputs shape: [batch", "tf.compat.v1.nn.rnn_cell.LSTMCell GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell", "5, 2, 1]) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict =", "output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) output_0 = tf.identity(output, name=\"output_0\")", "attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) (match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \\", "usually the output of an RNN encoder. # This tensor", "@check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_seq_length(self): units = 5 batch_size =", "x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\"] output_names_with_port = [\"output:0\",", "attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)", "RNNCell = tf.nn.rnn_cell.RNNCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell dynamic_rnn = tf.nn.dynamic_rnn bidirectional_dynamic_rnn", "= tf.nn.bidirectional_dynamic_rnn class CustomRnnCellTests(Tf2OnnxBackendTestBase): @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn(self): size", "dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False) return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict", "name=\"output_0\"), tf.identity(matched_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val, \"input_3:0\":", "x, dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\":", "dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": x_val}", "tf.identity(s, name=\"final_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port", "[1, 3 * 5] = [1, 15] # state shape:", "= tf.compat.v1.nn.rnn_cell.LSTMCell GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell MultiRNNCell =", "decoder_time_step = 6 decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f') decoder_x_val = np.stack([decoder_x_val]", "self._num_units = hidden_dim self._activation = tf.tanh @property def state_size(self): return", "\"Scan\") @check_tf_min_version(\"1.8\") @skip_opset(9, \"ReverseSequence\") @skip_tf2() @allow_missing_shapes(\"Missing RNN shape\") def test_bidrectional_attention_wrapper_lstm_encoder(self):", "input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_opset(9, \"ReverseSequence\") @skip_tf2() @allow_missing_shapes(\"Missing", "hu = tf.split(tf.matmul(state, U), 3, 1) r = tf.sigmoid(xw[0] +", "# attention_state: usually the output of an RNN encoder. #", "[9, 3, 30], [9, 30] attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn", "batch_size, input_size).astype('f') def func(encoder_x, decoder_x, seq_length): encoder_cell = LSTMCell(size) attention_states,", "each model layer. batch_size = 1 x_val = np.array([[1., 1.],", "{\"input_1:0\": x_val} output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port,", "batch_size) def func(x): # no scope cell = GatedGRUCell(units) outputs,", "`[batch_size, max_time, ...]` encoder_time_step = time_step encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')", "tf.concat([curr_input, state], axis=-1) cell = LSTMCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism,", "feed_dict = {\"input_1:0\": x_val, \"input_2:0\": y_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"]", "attention_states = output attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda", "3, 2] # num_units: 5 # W shape: [2, 3", "from tensorflow.python.ops import init_ops from backend_test_base import Tf2OnnxBackendTestBase from common", "* batch_size) def func(x): # no scope cell = GatedGRUCell(units)", "= [\"input_1:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port,", "decoder_x): encoder_cell = LSTMCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)", "np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32) x_val = np.stack([x_val]", "batch_size) def func(encoder_x, decoder_x): encoder_cell = LSTMCell(size) output, attr_state =", "\"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_seq_length(self): units = 5 batch_size = 6", "[\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\")", "import tensorflow as tf from tensorflow.python.ops import init_ops from backend_test_base", "def test_attention_wrapper_lstm_encoder_input_has_none_dim(self): size = 5 time_step = 3 input_size =", "* batch_size) decoder_time_step = 6 decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f') decoder_x_val", "30 time_step = 3 input_size = 4 attn_size = size", "3, 1) r = tf.sigmoid(xw[0] + hu[0]) z = tf.sigmoid(xw[1]", "be defined\" # W = tf.get_variable(name=\"W\", shape=[input_dim, 3 * self._num_units],", "LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell LSTMCell = tf.nn.rnn_cell.LSTMCell GRUCell = tf.nn.rnn_cell.LSTMCell RNNCell", "= 1 cell = GatedGRUCell(size) x_val = np.array([[1., 1.], [2.,", "@skip_tf2() def test_single_dynamic_custom_rnn_with_seq_length(self): units = 5 batch_size = 6 x_val", "output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder_input_has_none_dim(self): size =", "cells = MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple) outputs, cell_state = dynamic_rnn(cells,", "# state shape: [batch size, state size] = [1, 5]", "@skip_tf2() def test_single_dynamic_custom_rnn(self): size = 5 # size of each", "size = 30 time_step = 3 input_size = 4 attn_size", "= [1, 15] # state shape: [batch size, state size]", "[9, 30] attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda curr_input,", "decoder_x_val = np.stack([decoder_x_val] * batch_size) def func(encoder_x, decoder_x): encoder_cell =", "0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_time_major(self): size = 5 #", "tf from tensorflow.python.ops import init_ops from backend_test_base import Tf2OnnxBackendTestBase from", "b, 3, 1) hu = tf.split(tf.matmul(state, U), 3, 1) r", "tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32) b = np.arange(15.0, dtype=np.float32).reshape((1,", "np.random.randn(decoder_time_step, input_size).astype('f') x_val = np.stack([x_val] * batch_size) attention_states = np.random.randn(batch_size,", "state], axis=-1) cell = GRUCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size,", "= tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw,", "= {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\"] output_names_with_port", "print_function import numpy as np import tensorflow as tf from", "= tf.contrib.rnn.LSTMBlockCell LSTMCell = tf.nn.rnn_cell.LSTMCell GRUCell = tf.nn.rnn_cell.LSTMCell RNNCell =", "from backend_test_base import Tf2OnnxBackendTestBase from common import * # pylint:", "tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port =", "from common import * # pylint: disable=wildcard-import, unused-wildcard-import from tf2onnx.tf_loader", "= time_step encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f') encoder_x_val = np.stack([encoder_x_val] *", "attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return output_0, tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state,", "def test_bidrectional_attention_wrapper_lstm_encoder(self): size = 30 time_step = 3 input_size =", "__future__ import print_function import numpy as np import tensorflow as", "np.stack([x_val] * batch_size) y_val = np.array([4, 3, 4, 5, 2,", "state_size(self): return self._num_units @property def output_size(self): return self._num_units def call(self,", "shape\") def test_bidrectional_attention_wrapper_lstm_encoder(self): size = 30 time_step = 3 input_size", "hidden_dim, reuse=None): super().__init__(self, _reuse=reuse) self._num_units = hidden_dim self._activation = tf.tanh", "output of an RNN encoder. # This tensor should be", "next_h = h1 * (1 - z) + state *", "5 batch_size = 6 x_val = np.array([[1., 1.], [2., 2.],", "output_attention=False) (match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \\ bidirectional_dynamic_rnn(cell_fw=match_cell_fw, cell_bw=match_cell_bk, inputs=decoder_x,", "decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f') def func(encoder_x, decoder_x, seq_length): encoder_cell", "= np.random.randn(encoder_time_step, input_size).astype('f') encoder_x_val = np.stack([encoder_x_val] * batch_size) decoder_time_step =", "self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_seq_length(self):", "cell_input_fn=match_input_fn, output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return output_0,", "LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_1 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_2 =", "\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\")", "name=\"cell_state\") input_names_with_port = [\"input_1:0\"] feed_dict = {\"input_1:0\": x_val} output_names_with_port =", "# U shape: [5, 3 * 5] = [5, 15]", "1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32) x_val =", "output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state,", "\"input_3:0\"] output_names_with_port = [\"output_0:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)", "15] # b shape: [1, 3 * 5] = [1,", "= tf.get_variable(name=\"W\", shape=[input_dim, 3 * self._num_units], dtype=tf.float32) W = np.arange(30.0,", "LSTMCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output, attr_state", "= np.arange(15.0, dtype=np.float32).reshape((1, 15)) xw = tf.split(tf.matmul(inputs, W) + b,", "3, 4, 5, 2, 1], dtype=np.int32) def func(x, seq_length): #", "= 6 x_val = np.array([[1., 1.], [2., 2.], [3., 3.],", "1, 2, 3, 6], dtype=np.int32)} input_names_with_port = [\"input_1:0\", \"input_2:0\", \"input_3:0\"]", "name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val, \"input_3:0\": np.array([6, 5,", "name=\"cell_state\") feed_dict = {\"input_1:0\": x_val, \"input_2:0\": y_val} input_names_with_port = [\"input_1:0\",", "batch_size) decoder_time_step = 6 decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f') decoder_x_val =", "Tests for custom rnns.\"\"\" from __future__ import absolute_import from __future__", "# shape [batch size, time step, size] # attention_state: usually", "def test_multi_rnn_lstm(self, state_is_tuple=True): units = 5 batch_size = 6 x_val", "input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder(self): size", "\"ReverseSequence\") @skip_tf2() @allow_missing_shapes(\"Missing RNN shape\") def test_bidrectional_attention_wrapper_lstm_encoder(self): size = 30", "test_single_dynamic_custom_rnn_with_non_const_seq_length(self): units = 5 batch_size = 6 x_val = np.array([[1.,", "time_step = 3 input_size = 4 attn_size = size batch_size", "batch_size) attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f') def func(x): attention_mechanism =", "def call(self, inputs, state): # inputs shape: [batch size, time", "= LSTMCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) _ =", "each model layer. batch_size = 1 cell = GatedGRUCell(size) x_val", "tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port", "def test_single_dynamic_custom_rnn_with_seq_length(self): units = 5 batch_size = 6 x_val =", "step, input size] = [1, 3, 2] # num_units: 5", "tf.nn.rnn_cell.RNNCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell dynamic_rnn = tf.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn", "time_step, attn_size).astype('f') def func(x): attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn =", "dtype=np.float32).reshape((2, 15)) # U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units],", "size = 5 # size of each model layer. batch_size", "return output_0, tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val,", "tf.identity(output, name=\"output_0\") attention_states = output attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn", "input_size).astype('f') encoder_x_val = np.stack([encoder_x_val] * batch_size) decoder_time_step = 6 decoder_x_val", "initializer=initializer, state_is_tuple=state_is_tuple) cell_1 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_2 = LSTMCell(units,", "name=\"output\"), tf.identity(cell_state, name=\"cell_state\") input_names_with_port = [\"input_1:0\"] feed_dict = {\"input_1:0\": x_val}", "z = tf.sigmoid(xw[1] + hu[1]) h1 = self._activation(xw[2] + r", "match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1) cell =", "4, 5, 2, 1]) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict", "attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda curr_input, state: tf.concat([curr_input,", "= inputs.get_shape()[-1] assert input_dim is not None, \"input dimension must", "= LSTMCell(size) attention_states, _ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) # [9,", "attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) output_0 = tf.identity(output, name=\"output_0\") attention_states", "func(x): attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda curr_input, state:", "dynamic_rnn(cells, x, dtype=tf.float32) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") input_names_with_port =", "disable=missing-docstring,invalid-name,unused-argument,using-constant-test # pylint: disable=abstract-method,arguments-differ if is_tf2(): BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell LSTMCell", "outputs, cell_state = dynamic_rnn(cells, x, dtype=tf.float32) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state,", "dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else: LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell", "encoder_x_val, \"input_2:0\": decoder_x_val, \"input_3:0\": np.array([6, 5, 4, 3, 2, 1,", "dtype=tf.float32) return output_0, tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\":", "should be shaped `[batch_size, max_time, ...]`. decoder_time_step = 6 x_val", "numpy as np import tensorflow as tf from tensorflow.python.ops import", "import * # pylint: disable=wildcard-import, unused-wildcard-import from tf2onnx.tf_loader import is_tf2", "= init_ops.constant_initializer(0.5) cell_0 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_1 = LSTMCell(units,", "dtype=tf.float32) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") input_names_with_port = [\"input_1:0\"] feed_dict", "size, time step, input size] = [1, 3, 2] #", "@check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder(self): size = 5 time_step", "3, 6], dtype=np.int32)} input_names_with_port = [\"input_1:0\", \"input_2:0\", \"input_3:0\"] output_names_with_port =", "tf.contrib.rnn.LSTMBlockCell LSTMCell = tf.nn.rnn_cell.LSTMCell GRUCell = tf.nn.rnn_cell.LSTMCell RNNCell = tf.nn.rnn_cell.RNNCell", "[2, 15] # U shape: [5, 3 * 5] =", "(match_state_fw, match_state_bk) = \\ bidirectional_dynamic_rnn(cell_fw=match_cell_fw, cell_bw=match_cell_bk, inputs=decoder_x, sequence_length=tf.identity(seq_length), dtype=tf.float32, time_major=True)", "x_val, \"input_2:0\": y_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output:0\",", "2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32) x_val =", "cell = GatedGRUCell(size) xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True)", "[\"input_1:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)", "not None, \"input dimension must be defined\" # W =", "[5., 5.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x):", "decoder_x_val, \"input_3:0\": np.array([6, 5, 4, 3, 2, 1, 2, 3,", "encoder_cell = LSTMCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) output_0", "cell_input_fn=match_input_fn, output_attention=False) match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) (match_output_fw,", "from tf2onnx.tf_loader import is_tf2 # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test # pylint: disable=abstract-method,arguments-differ", "h1 * (1 - z) + state * z return", "dynamic_rnn = tf.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn class CustomRnnCellTests(Tf2OnnxBackendTestBase): @check_opset_min_version(8, \"Scan\")", "def output_size(self): return self._num_units def call(self, inputs, state): # inputs", "decoder_time_step = 6 x_val = np.random.randn(decoder_time_step, input_size).astype('f') x_val = np.stack([x_val]", "GRUCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) _ = tf.identity(output,", "def func(x): initializer = init_ops.constant_initializer(0.5) cell_0 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple)", "SPDX-License-Identifier: Apache-2.0 \"\"\"Unit Tests for custom rnns.\"\"\" from __future__ import", "func(x): # no scope cell = GatedGRUCell(units) outputs, cell_state =", "feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_opset(9, \"ReverseSequence\") @skip_tf2()", "dtype=np.float32).reshape((1, 15)) xw = tf.split(tf.matmul(inputs, W) + b, 3, 1)", "\"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_time_major(self): size = 5 # size of", "for custom rnns.\"\"\" from __future__ import absolute_import from __future__ import", "* hu[2]) next_h = h1 * (1 - z) +", "name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\": x_val, \"input_2:0\": y_val} input_names_with_port", "* batch_size) def func(encoder_x, decoder_x): encoder_cell = GRUCell(size) output, attr_state", "pylint: disable=abstract-method,arguments-differ if is_tf2(): BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell", "name=\"output_0\") attention_states = output attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn =", "np.stack([x_val] * batch_size) def func(x): initializer = init_ops.constant_initializer(0.5) cell_0 =", "[\"input_1:0\"] output_names_with_port = [\"output:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict,", "= 6 decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f') def func(encoder_x, decoder_x,", "tf.nn.rnn_cell.LSTMCell GRUCell = tf.nn.rnn_cell.LSTMCell RNNCell = tf.nn.rnn_cell.RNNCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell", "+ hu[1]) h1 = self._activation(xw[2] + r * hu[2]) next_h", "[\"input_1:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)", "batch_size) y_val = np.array([4, 3, 4, 5, 2, 1], dtype=np.int32)", "output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return output_0, tf.identity(output, name=\"output\"),", "test_multi_rnn_lstm(self, state_is_tuple=True): units = 5 batch_size = 6 x_val =", "batch_size = 1 cell = GatedGRUCell(size) x_val = np.array([[1., 1.],", "dtype=tf.float32, sequence_length=tf.identity(seq_length)) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\":", "of an RNN encoder. # This tensor should be shaped", "units = 5 batch_size = 6 x_val = np.array([[1., 1.],", "input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_const_encoder(self): size", "= np.stack([x_val] * batch_size) attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f') def", "import absolute_import from __future__ import division from __future__ import print_function", "GRUCell = tf.nn.rnn_cell.LSTMCell RNNCell = tf.nn.rnn_cell.RNNCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell dynamic_rnn", "= {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val, \"input_3:0\": np.array([6, 5, 4, 3,", "self._num_units @property def output_size(self): return self._num_units def call(self, inputs, state):", "= size batch_size = 9 # shape [batch size, time", "* batch_size) def func(x): cell = GatedGRUCell(size) xs, s =", "* batch_size) decoder_time_step = 6 decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f')", "+ state * z return next_h, next_h if __name__ ==", "__init__(self, hidden_dim, reuse=None): super().__init__(self, _reuse=reuse) self._num_units = hidden_dim self._activation =", "= tf.nn.rnn_cell.RNNCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell dynamic_rnn = tf.nn.dynamic_rnn bidirectional_dynamic_rnn =", "func(x): initializer = init_ops.constant_initializer(0.5) cell_0 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_1", "MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple) outputs, cell_state = dynamic_rnn(cells, x, dtype=tf.float32)", "= LSTMCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) match_cell_bk", "= tf.nn.rnn_cell.LSTMCell GRUCell = tf.nn.rnn_cell.LSTMCell RNNCell = tf.nn.rnn_cell.RNNCell MultiRNNCell =", "test_single_dynamic_custom_rnn_with_seq_length(self): units = 5 batch_size = 6 x_val = np.array([[1.,", "input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder_input_has_none_dim(self): size", "# inputs shape: [batch size, time step, input size] =", "U shape: [5, 3 * 5] = [5, 15] #", "= tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda curr_input, state: tf.concat([curr_input, state],", "division from __future__ import print_function import numpy as np import", "= tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else: LSTMBlockCell", "attn_size).astype('f') def func(x): attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda", "2, 1, 2, 3, 6], dtype=np.int32)} input_names_with_port = [\"input_1:0\", \"input_2:0\",", "dtype=tf.float32, inputs=x, time_major=False) return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict =", "MultiRNNCell = tf.contrib.rnn.MultiRNNCell dynamic_rnn = tf.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn class", "attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) _ = tf.identity(output, name=\"output_0\") attention_states", "LSTMCell(size) attention_states, _ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) # [9, 3,", "* batch_size) def func(x): initializer = init_ops.constant_initializer(0.5) cell_0 = LSTMCell(units,", "\"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2()", "tf.concat([match_output_fw, match_output_bk], axis=-1) matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1) return tf.identity(matched_output,", "seq_length): # no scope cell = GatedGRUCell(units) outputs, cell_state =", "name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val} input_names_with_port", "# pylint: disable=abstract-method,arguments-differ if is_tf2(): BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell LSTMCell =", "return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port", "[\"output:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)", "cell_state = dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=tf.identity(seq_length)) return tf.identity(outputs, name=\"output\"),", "dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) # [9, 3, 30], [9, 30] attention_mechanism", "= 9 # shape [batch size, time step, size] #", "batch_size = 9 # shape [batch size, time step, size]", "[1, 5] input_dim = inputs.get_shape()[-1] assert input_dim is not None,", "the output of an RNN encoder. # This tensor should", "= tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) (match_output_fw, match_output_bk), (match_state_fw, match_state_bk)", "= np.stack([x_val] * batch_size) y_val = np.array([4, 3, 4, 5,", "@check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder_input_has_none_dim(self): size = 5 time_step = 3", "shape: [5, 3 * 5] = [5, 15] # b", "dtype=np.float32) x_val = np.stack([x_val] * batch_size) y_val = np.array([4, 3,", "= 5 batch_size = 6 x_val = np.array([[1., 1.], [2.,", "# [9, 3, 30], [9, 30] attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states)", "attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f') def func(x): attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,", "= LSTMCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) output_0 =", "output_names_with_port, 0.1) class GatedGRUCell(RNNCell): def __init__(self, hidden_dim, reuse=None): super().__init__(self, _reuse=reuse)", "= [2, 15] # U shape: [5, 3 * 5]", "= self._activation(xw[2] + r * hu[2]) next_h = h1 *", "curr_input, state: tf.concat([curr_input, state], axis=-1) cell = LSTMCell(size) match_cell_fw =", "dtype=tf.float32, inputs=x, time_major=True) return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict =", "_ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) # [9, 3, 30], [9,", "# b shape: [1, 3 * 5] = [1, 15]", "rtol=1e-06) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_non_const_seq_length(self): units = 5 batch_size", "defined\" # W = tf.get_variable(name=\"W\", shape=[input_dim, 3 * self._num_units], dtype=tf.float32)", "self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_non_const_seq_length(self):", "[3., 3.], [4., 4.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size)", "attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\")", "func(encoder_x, decoder_x, seq_length): encoder_cell = LSTMCell(size) attention_states, _ = dynamic_rnn(encoder_cell,", "np.array([4, 3, 4, 5, 2, 1], dtype=np.int32) def func(x, seq_length):", "output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32) return tf.identity(output, name=\"output\"),", "output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_seq_length(self): units = 5", "_ = tf.identity(output, name=\"output_0\") attention_states = output attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,", "\"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @skip_tf2() def", "[1, 15] # state shape: [batch size, state size] =", "dtype=tf.float32) U = np.arange(75.0, dtype=np.float32).reshape((5, 15)) # b = tf.get_variable(name='b',", "encoder_time_step = time_step encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f') encoder_x_val = np.stack([encoder_x_val]", "feed_dict = {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\"]", "if is_tf2(): BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell GRUCell =", "num_units: 5 # W shape: [2, 3 * 5] =", "np.stack([x_val] * batch_size) def func(x): # no scope cell =", "0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_seq_length(self): units = 5 batch_size", "match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) (match_output_fw, match_output_bk), (match_state_fw,", "tf.identity(matched_output, name=\"output_0\"), tf.identity(matched_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val,", "U), 3, 1) r = tf.sigmoid(xw[0] + hu[0]) z =", "input_size = 4 attn_size = size batch_size = 9 #", "time_major=True) return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict = {\"input_1:0\": x_val}", "= np.stack([x_val] * batch_size) def func(x): initializer = init_ops.constant_initializer(0.5) cell_0", "W) + b, 3, 1) hu = tf.split(tf.matmul(state, U), 3,", "dtype=np.float32).reshape((5, 15)) # b = tf.get_variable(name='b', shape=[1, 3 * self._num_units],", "= 1 x_val = np.array([[1., 1.], [2., 2.], [3., 3.]],", "[\"output_0:0\", \"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\")", "15)) xw = tf.split(tf.matmul(inputs, W) + b, 3, 1) hu", "test_attention_wrapper_lstm_encoder(self): size = 5 time_step = 3 input_size = 4", "6 decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f') decoder_x_val = np.stack([decoder_x_val] * batch_size)", "self._num_units], dtype=tf.float32) W = np.arange(30.0, dtype=np.float32).reshape((2, 15)) # U =", "dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=[4, 3, 4, 5, 2, 1])", "matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1) return tf.identity(matched_output, name=\"output_0\"), tf.identity(matched_state, name=\"final_state\")", "tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else: LSTMBlockCell =", "match_output_bk], axis=-1) matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1) return tf.identity(matched_output, name=\"output_0\"),", "tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, decoder_x,", "inputs, state): # inputs shape: [batch size, time step, input", "LSTMCell = tf.nn.rnn_cell.LSTMCell GRUCell = tf.nn.rnn_cell.LSTMCell RNNCell = tf.nn.rnn_cell.RNNCell MultiRNNCell", "x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.],", "input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_multi_rnn_lstm(self, state_is_tuple=True): units", "3 input_size = 4 attn_size = size batch_size = 9", "tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size,", "= GatedGRUCell(units) outputs, cell_state = dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=[4,", "CustomRnnCellTests(Tf2OnnxBackendTestBase): @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn(self): size = 5 #", "x_val} output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)", "= {\"input_1:0\": x_val} output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port,", "def func(x): cell = GatedGRUCell(size) xs, s = dynamic_rnn(cell=cell, dtype=tf.float32,", "5] = [1, 15] # state shape: [batch size, state", "@allow_missing_shapes(\"Missing RNN shape\") def test_bidrectional_attention_wrapper_lstm_encoder(self): size = 30 time_step =", "\"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_opset(9,", "cell_input_fn=match_input_fn, output_attention=False) (match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \\ bidirectional_dynamic_rnn(cell_fw=match_cell_fw, cell_bw=match_cell_bk,", "time_major=True) matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1) matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state],", "match_state_bk) = \\ bidirectional_dynamic_rnn(cell_fw=match_cell_fw, cell_bw=match_cell_bk, inputs=decoder_x, sequence_length=tf.identity(seq_length), dtype=tf.float32, time_major=True) matched_output", "[\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @skip_tf2()", "rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_const_encoder(self): size = 5", "time_step encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f') encoder_x_val = np.stack([encoder_x_val] * batch_size)", "= np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)", "dtype=tf.float32, time_major=True) matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1) matched_state = tf.concat([match_state_fw.cell_state,", "func(x): cell = GatedGRUCell(size) xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x,", "[1, 3, 2] # num_units: 5 # W shape: [2,", "* 5] = [5, 15] # b shape: [1, 3", "= [\"output_0:0\", \"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8,", "output attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda curr_input, state:", "self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_time_major(self):", "tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else: LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell LSTMCell =", "0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_multi_rnn_lstm(self, state_is_tuple=True): units = 5", "[5, 15] # b shape: [1, 3 * 5] =", "dtype=tf.float32) b = np.arange(15.0, dtype=np.float32).reshape((1, 15)) xw = tf.split(tf.matmul(inputs, W)", "lambda curr_input, state: tf.concat([curr_input, state], axis=-1) cell = GRUCell(size) match_cell_fw", "np import tensorflow as tf from tensorflow.python.ops import init_ops from", "= 30 time_step = 3 input_size = 4 attn_size =", "= [\"input_1:0\"] output_names_with_port = [\"output:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func,", "dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x): xs, s", "= tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else: LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell LSTMCell", "b = np.arange(15.0, dtype=np.float32).reshape((1, 15)) xw = tf.split(tf.matmul(inputs, W) +", "feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder_input_has_none_dim(self):", "feed_dict = {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\",", "output_0, tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\":", "np.stack([encoder_x_val] * batch_size) decoder_time_step = 6 decoder_x_val = np.random.randn(decoder_time_step, batch_size,", "= tf.sigmoid(xw[1] + hu[1]) h1 = self._activation(xw[2] + r *", "def test_attention_wrapper_const_encoder(self): size = 5 time_step = 3 input_size =", "@check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_time_major(self): size = 5 # size", "tf.compat.v1.nn.rnn_cell.BasicLSTMCell LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell", "import is_tf2 # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test # pylint: disable=abstract-method,arguments-differ if is_tf2():", "= np.random.randn(decoder_time_step, input_size).astype('f') decoder_x_val = np.stack([decoder_x_val] * batch_size) def func(encoder_x,", "`[batch_size, max_time, ...]`. decoder_time_step = 6 x_val = np.random.randn(decoder_time_step, input_size).astype('f')", "is_tf2 # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test # pylint: disable=abstract-method,arguments-differ if is_tf2(): BasicLSTMCell", "init_ops from backend_test_base import Tf2OnnxBackendTestBase from common import * #", "name=\"cell_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port =", "# W shape: [2, 3 * 5] = [2, 15]", "= tf.nn.rnn_cell.LSTMCell RNNCell = tf.nn.rnn_cell.RNNCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell dynamic_rnn =", "[2., 2.], [3., 3.], [4., 4.]], dtype=np.float32) x_val = np.stack([x_val]", "encoder_x, dtype=tf.float32) _ = tf.identity(output, name=\"output_0\") attention_states = output attention_mechanism", "= \\ bidirectional_dynamic_rnn(cell_fw=match_cell_fw, cell_bw=match_cell_bk, inputs=decoder_x, sequence_length=tf.identity(seq_length), dtype=tf.float32, time_major=True) matched_output =", "15)) # U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32)", "\"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2()", "layer. batch_size = 1 cell = GatedGRUCell(size) x_val = np.array([[1.,", "input_dim = inputs.get_shape()[-1] assert input_dim is not None, \"input dimension", "# W = tf.get_variable(name=\"W\", shape=[input_dim, 3 * self._num_units], dtype=tf.float32) W", "= tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1) return tf.identity(matched_output, name=\"output_0\"), tf.identity(matched_state, name=\"final_state\") feed_dict", "output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return output_0, tf.identity(output,", "__future__ import absolute_import from __future__ import division from __future__ import", "inputs.get_shape()[-1] assert input_dim is not None, \"input dimension must be", "state_is_tuple=state_is_tuple) cells = MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple) outputs, cell_state =", "inputs=decoder_x, sequence_length=tf.identity(seq_length), dtype=tf.float32, time_major=True) matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1) matched_state", "name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"]", "as tf from tensorflow.python.ops import init_ops from backend_test_base import Tf2OnnxBackendTestBase", "* 5] = [1, 15] # state shape: [batch size,", "input_names_with_port, output_names_with_port, 0.1) class GatedGRUCell(RNNCell): def __init__(self, hidden_dim, reuse=None): super().__init__(self,", "attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False)", "encoder_x, dtype=tf.float32) output_0 = tf.identity(output, name=\"output_0\") attention_states = output attention_mechanism", "dtype=np.int32)} input_names_with_port = [\"input_1:0\", \"input_2:0\", \"input_3:0\"] output_names_with_port = [\"output_0:0\", \"final_state:0\"]", "hu[0]) z = tf.sigmoid(xw[1] + hu[1]) h1 = self._activation(xw[2] +", "input_size).astype('f') decoder_x_val = np.stack([decoder_x_val] * batch_size) def func(encoder_x, decoder_x): encoder_cell", "RNN shape\") def test_bidrectional_attention_wrapper_lstm_encoder(self): size = 30 time_step = 3", "+ r * hu[2]) next_h = h1 * (1 -", "@check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder(self): size = 5 time_step = 3", "return tf.identity(matched_output, name=\"output_0\"), tf.identity(matched_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\":", "tf.compat.v1.nn.rnn_cell.GRUCell RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn = tf.compat.v1.nn.dynamic_rnn", "W = tf.get_variable(name=\"W\", shape=[input_dim, 3 * self._num_units], dtype=tf.float32) W =", "output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8,", "# U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32) U", "output_0 = tf.identity(output, name=\"output_0\") attention_states = output attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,", "2.], [3., 3.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) def", "@skip_tf2() def test_attention_wrapper_const_encoder(self): size = 5 time_step = 3 input_size", "decoder_x, seq_length): encoder_cell = LSTMCell(size) attention_states, _ = dynamic_rnn(encoder_cell, encoder_x,", "x_val = np.stack([x_val] * batch_size) attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f')", "bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn class CustomRnnCellTests(Tf2OnnxBackendTestBase): @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn(self):", "self._activation(xw[2] + r * hu[2]) next_h = h1 * (1", "* z return next_h, next_h if __name__ == '__main__': unittest_main()", "func(x): xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False) return tf.identity(xs,", "# size of each model layer. batch_size = 1 cell", "xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False) return tf.identity(xs, name=\"output\"),", "[\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port,", "= np.random.randn(batch_size, time_step, attn_size).astype('f') def func(x): attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states)", "dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) _ = tf.identity(output, name=\"output_0\") attention_states = output", "y_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func,", "s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True) return tf.identity(xs, name=\"output\"), tf.identity(s,", "9 # shape [batch size, time step, size] # attention_state:", "assert input_dim is not None, \"input dimension must be defined\"", "tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val} input_names_with_port =", "tensorflow.python.ops import init_ops from backend_test_base import Tf2OnnxBackendTestBase from common import", "dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x): # no", "name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"]", "attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32)", "s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False) return tf.identity(xs, name=\"output\"), tf.identity(s,", "def test_single_dynamic_custom_rnn(self): size = 5 # size of each model", "RNN encoder. # This tensor should be shaped `[batch_size, max_time,", "= [\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output_0:0\", \"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict,", "@property def state_size(self): return self._num_units @property def output_size(self): return self._num_units", "match_state_bk.cell_state], -1) return tf.identity(matched_output, name=\"output_0\"), tf.identity(matched_state, name=\"final_state\") feed_dict = {\"input_1:0\":", "\"\"\"Unit Tests for custom rnns.\"\"\" from __future__ import absolute_import from", "GRUCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output, attr_state", "pylint: disable=wildcard-import, unused-wildcard-import from tf2onnx.tf_loader import is_tf2 # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test", "cell_bw=match_cell_bk, inputs=decoder_x, sequence_length=tf.identity(seq_length), dtype=tf.float32, time_major=True) matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1)", "tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") input_names_with_port = [\"input_1:0\"] feed_dict = {\"input_1:0\":", "dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=tf.identity(seq_length)) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")", "decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f') decoder_x_val = np.stack([decoder_x_val] * batch_size) def", "x_val = np.stack([x_val] * batch_size) y_val = np.array([4, 3, 4,", "[4., 4.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x):", "GatedGRUCell(units) outputs, cell_state = dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=tf.identity(seq_length)) return", "= 5 time_step = 3 input_size = 4 attn_size =", "self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def", "@skip_tf2() def test_single_dynamic_custom_rnn_time_major(self): size = 5 # size of each", "cell_1 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_2 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple)", "\"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) class GatedGRUCell(RNNCell): def __init__(self,", "tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port", "= np.stack([x_val] * batch_size) def func(x): xs, s = dynamic_rnn(cell=cell,", "np.random.randn(batch_size, time_step, attn_size).astype('f') def func(x): attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn", "feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_time_major(self): size", "dtype=tf.float32, sequence_length=[4, 3, 4, 5, 2, 1]) return tf.identity(outputs, name=\"output\"),", "# This tensor should be shaped `[batch_size, max_time, ...]`. decoder_time_step", "decoder_x_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output_0:0\", \"output:0\", \"final_state:0\"]", "5 # size of each model layer. batch_size = 1", "size] = [1, 5] input_dim = inputs.get_shape()[-1] assert input_dim is", "encoder_x, dtype=tf.float32) # [9, 3, 30], [9, 30] attention_mechanism =", "4, 5, 2, 1], dtype=np.int32) def func(x, seq_length): # no", "attn_size = size batch_size = 9 # shape [batch size,", "cell_input_fn=match_input_fn, output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return tf.identity(output,", "encoder_cell = LSTMCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) _", "tf.compat.v1.nn.bidirectional_dynamic_rnn else: LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell LSTMCell = tf.nn.rnn_cell.LSTMCell GRUCell =", "\"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn(self): size = 5 # size of", "batch_size) decoder_time_step = 6 decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f') def", "\"input_3:0\": np.array([6, 5, 4, 3, 2, 1, 2, 3, 6],", "@check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder_input_has_none_dim(self): size = 5 time_step", "= tf.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn class CustomRnnCellTests(Tf2OnnxBackendTestBase): @check_opset_min_version(8, \"Scan\") @skip_tf2()", "\\ bidirectional_dynamic_rnn(cell_fw=match_cell_fw, cell_bw=match_cell_bk, inputs=decoder_x, sequence_length=tf.identity(seq_length), dtype=tf.float32, time_major=True) matched_output = tf.concat([match_output_fw,", "* # pylint: disable=wildcard-import, unused-wildcard-import from tf2onnx.tf_loader import is_tf2 #", "tensor should be shaped `[batch_size, max_time, ...]` encoder_time_step = time_step", "state_is_tuple=state_is_tuple) cell_1 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_2 = LSTMCell(units, initializer=initializer,", "output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_opset(9, \"ReverseSequence\") @skip_tf2() @allow_missing_shapes(\"Missing RNN", "# no scope cell = GatedGRUCell(units) outputs, cell_state = dynamic_rnn(", "@check_tf_min_version(\"1.8\") @skip_opset(9, \"ReverseSequence\") @skip_tf2() @allow_missing_shapes(\"Missing RNN shape\") def test_bidrectional_attention_wrapper_lstm_encoder(self): size", "dtype=np.int32) def func(x, seq_length): # no scope cell = GatedGRUCell(units)", "import init_ops from backend_test_base import Tf2OnnxBackendTestBase from common import *", "= np.array([4, 3, 4, 5, 2, 1], dtype=np.int32) def func(x,", "output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_gru_encoder(self): size =", "\"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_gru_encoder(self): size = 5 time_step =", "return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") input_names_with_port = [\"input_1:0\"] feed_dict =", "return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\":", "backend_test_base import Tf2OnnxBackendTestBase from common import * # pylint: disable=wildcard-import,", "output_attention=False) match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) (match_output_fw, match_output_bk),", "state_is_tuple=True): units = 5 batch_size = 6 x_val = np.array([[1.,", "tensorflow as tf from tensorflow.python.ops import init_ops from backend_test_base import", "@check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_const_encoder(self): size = 5 time_step = 3", "# This tensor should be shaped `[batch_size, max_time, ...]` encoder_time_step", "5] input_dim = inputs.get_shape()[-1] assert input_dim is not None, \"input", "output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_multi_rnn_lstm(self, state_is_tuple=True): units =", "* (1 - z) + state * z return next_h,", "= GatedGRUCell(size) x_val = np.array([[1., 1.], [2., 2.], [3., 3.]],", "np.random.randn(encoder_time_step, input_size).astype('f') encoder_x_val = np.stack([encoder_x_val] * batch_size) decoder_time_step = 6", "6 decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f') def func(encoder_x, decoder_x, seq_length):", "size of each model layer. batch_size = 1 x_val =", "test_attention_wrapper_gru_encoder(self): size = 5 time_step = 3 input_size = 4", "3.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x): cell", "output_names_with_port = [\"output:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port,", "* batch_size) attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f') def func(x): attention_mechanism", "initializer = init_ops.constant_initializer(0.5) cell_0 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_1 =", "encoder_x_val = np.stack([encoder_x_val] * batch_size) decoder_time_step = 6 decoder_x_val =", "max_time, ...]`. decoder_time_step = 6 x_val = np.random.randn(decoder_time_step, input_size).astype('f') x_val", "dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x): cell =", "axis=-1) matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1) return tf.identity(matched_output, name=\"output_0\"), tf.identity(matched_state,", "self._num_units def call(self, inputs, state): # inputs shape: [batch size,", "= np.stack([decoder_x_val] * batch_size) def func(encoder_x, decoder_x): encoder_cell = LSTMCell(size)", "np.stack([x_val] * batch_size) def func(x): xs, s = dynamic_rnn(cell=cell, dtype=tf.float32,", "GatedGRUCell(units) outputs, cell_state = dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=[4, 3,", "xw = tf.split(tf.matmul(inputs, W) + b, 3, 1) hu =", "[4., 4.], [5., 5.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size)", "output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_const_encoder(self): size =", "1) hu = tf.split(tf.matmul(state, U), 3, 1) r = tf.sigmoid(xw[0]", "batch_size) def func(x): xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False)", "dtype=tf.float32) _ = tf.identity(output, name=\"output_0\") attention_states = output attention_mechanism =", "state_is_tuple=state_is_tuple) outputs, cell_state = dynamic_rnn(cells, x, dtype=tf.float32) return tf.identity(outputs, name=\"output\"),", "def func(encoder_x, decoder_x, seq_length): encoder_cell = LSTMCell(size) attention_states, _ =", "shape=[self._num_units, 3 * self._num_units], dtype=tf.float32) U = np.arange(75.0, dtype=np.float32).reshape((5, 15))", "# SPDX-License-Identifier: Apache-2.0 \"\"\"Unit Tests for custom rnns.\"\"\" from __future__", "tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port =", "5, 2, 1], dtype=np.int32) def func(x, seq_length): # no scope", "np.stack([decoder_x_val] * batch_size) def func(encoder_x, decoder_x): encoder_cell = GRUCell(size) output,", "[\"output_0:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) class GatedGRUCell(RNNCell): def", "shape: [1, 3 * 5] = [1, 15] # state", "unused-wildcard-import from tf2onnx.tf_loader import is_tf2 # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test # pylint:", "init_ops.constant_initializer(0.5) cell_0 = LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cell_1 = LSTMCell(units, initializer=initializer,", "= np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32) x_val =", "[2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32) x_val", "x_val = np.stack([x_val] * batch_size) def func(x): # no scope", "* batch_size) y_val = np.array([4, 3, 4, 5, 2, 1],", "time step, input size] = [1, 3, 2] # num_units:", "\"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_const_encoder(self): size = 5 time_step =", "* batch_size) def func(encoder_x, decoder_x): encoder_cell = LSTMCell(size) output, attr_state", "\"input_2:0\": y_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"]", "output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) _ = tf.identity(output, name=\"output_0\")", "6 x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4.,", "size, time step, size] # attention_state: usually the output of", "= [1, 5] input_dim = inputs.get_shape()[-1] assert input_dim is not", "def func(encoder_x, decoder_x): encoder_cell = LSTMCell(size) output, attr_state = dynamic_rnn(encoder_cell,", "tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val}", "2] # num_units: 5 # W shape: [2, 3 *", "test_single_dynamic_custom_rnn(self): size = 5 # size of each model layer.", "\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2()", "hu[2]) next_h = h1 * (1 - z) + state", "_reuse=reuse) self._num_units = hidden_dim self._activation = tf.tanh @property def state_size(self):", "= [\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port,", "tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32) U = np.arange(75.0, dtype=np.float32).reshape((5,", "This tensor should be shaped `[batch_size, max_time, ...]` encoder_time_step =", "max_time, ...]` encoder_time_step = time_step encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f') encoder_x_val", "shape: [batch size, state size] = [1, 5] input_dim =", "x_val = np.stack([x_val] * batch_size) def func(x): xs, s =", "# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test # pylint: disable=abstract-method,arguments-differ if is_tf2(): BasicLSTMCell =", "input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_non_const_seq_length(self): units =", "output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return tf.identity(output, name=\"output\"),", "= {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port", "= LSTMCell(units, initializer=initializer, state_is_tuple=state_is_tuple) cells = MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple)", "np.stack([encoder_x_val] * batch_size) decoder_time_step = 6 decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')", "y_val = np.array([4, 3, 4, 5, 2, 1], dtype=np.int32) def", "@check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_non_const_seq_length(self): units = 5 batch_size =", "dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True) return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict", "...]`. decoder_time_step = 6 x_val = np.random.randn(decoder_time_step, input_size).astype('f') x_val =", "[5., 5.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) y_val =", "input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_gru_encoder(self): size", "dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict = {\"input_1:0\": encoder_x_val,", "4.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x): initializer", "{\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func,", "+ b, 3, 1) hu = tf.split(tf.matmul(state, U), 3, 1)", "= dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) _ = tf.identity(output, name=\"output_0\") attention_states =", "tf.sigmoid(xw[1] + hu[1]) h1 = self._activation(xw[2] + r * hu[2])", "batch_size) def func(encoder_x, decoder_x): encoder_cell = GRUCell(size) output, attr_state =", "xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True) return tf.identity(xs, name=\"output\"),", "state * z return next_h, next_h if __name__ == '__main__':", "0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder_input_has_none_dim(self): size = 5", "encoder. # This tensor should be shaped `[batch_size, max_time, ...]`.", "1.], [2., 2.], [3., 3.]], dtype=np.float32) x_val = np.stack([x_val] *", "shaped `[batch_size, max_time, ...]` encoder_time_step = time_step encoder_x_val = np.random.randn(encoder_time_step,", "= 6 decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f') decoder_x_val = np.stack([decoder_x_val] *", "5, 4, 3, 2, 1, 2, 3, 6], dtype=np.int32)} input_names_with_port", "encoder_x_val, \"input_2:0\": decoder_x_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"] output_names_with_port = [\"output_0:0\",", "class GatedGRUCell(RNNCell): def __init__(self, hidden_dim, reuse=None): super().__init__(self, _reuse=reuse) self._num_units =", "tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1) return tf.identity(matched_output, name=\"output_0\"), tf.identity(matched_state, name=\"final_state\") feed_dict =", "size batch_size = 9 # shape [batch size, time step,", "...]` encoder_time_step = time_step encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f') encoder_x_val =", "[\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\")", "input_size).astype('f') x_val = np.stack([x_val] * batch_size) attention_states = np.random.randn(batch_size, time_step,", "2, 1]) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict = {\"input_1:0\":", "\"input dimension must be defined\" # W = tf.get_variable(name=\"W\", shape=[input_dim,", "output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_non_const_seq_length(self): units = 5", "cell, x, dtype=tf.float32, sequence_length=tf.identity(seq_length)) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") feed_dict", "shape: [batch size, time step, input size] = [1, 3,", "* 5] = [2, 15] # U shape: [5, 3", "= {\"input_1:0\": x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"]", "np.stack([x_val] * batch_size) def func(x): cell = GatedGRUCell(size) xs, s", "dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict =", "[2, 3 * 5] = [2, 15] # U shape:", "from __future__ import print_function import numpy as np import tensorflow", "3, 2, 1, 2, 3, 6], dtype=np.int32)} input_names_with_port = [\"input_1:0\",", "@property def output_size(self): return self._num_units def call(self, inputs, state): #", "[batch size, state size] = [1, 5] input_dim = inputs.get_shape()[-1]", "hu[1]) h1 = self._activation(xw[2] + r * hu[2]) next_h =", "tf.concat([curr_input, state], axis=-1) cell = GRUCell(size) match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism,", "tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output, attr_state = dynamic_rnn(match_cell_fw, x,", "- z) + state * z return next_h, next_h if", "np.stack([x_val] * batch_size) attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f') def func(x):", "feed_dict = {\"input_1:0\": x_val} output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict,", "= dynamic_rnn(cells, x, dtype=tf.float32) return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\") input_names_with_port", "= [\"output_0:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) class GatedGRUCell(RNNCell):", "tf.contrib.rnn.MultiRNNCell dynamic_rnn = tf.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn class CustomRnnCellTests(Tf2OnnxBackendTestBase): @check_opset_min_version(8,", "no scope cell = GatedGRUCell(units) outputs, cell_state = dynamic_rnn( cell,", "= dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\") feed_dict", "U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32) U =", "def func(x): # no scope cell = GatedGRUCell(units) outputs, cell_state", "func(x, seq_length): # no scope cell = GatedGRUCell(units) outputs, cell_state", "return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict = {\"input_1:0\": x_val} input_names_with_port", "call(self, inputs, state): # inputs shape: [batch size, time step,", "tf.identity(cell_state, name=\"cell_state\") input_names_with_port = [\"input_1:0\"] feed_dict = {\"input_1:0\": x_val} output_names_with_port", "= lambda curr_input, state: tf.concat([curr_input, state], axis=-1) cell = LSTMCell(size)", "np.arange(15.0, dtype=np.float32).reshape((1, 15)) xw = tf.split(tf.matmul(inputs, W) + b, 3,", "output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8,", "class CustomRnnCellTests(Tf2OnnxBackendTestBase): @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn(self): size = 5", "is not None, \"input dimension must be defined\" # W", "15)) # b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32)", "cell_state = dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=[4, 3, 4, 5,", "time_major=False) return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\") feed_dict = {\"input_1:0\": x_val}", "scope cell = GatedGRUCell(units) outputs, cell_state = dynamic_rnn( cell, x,", "absolute_import from __future__ import division from __future__ import print_function import", "feed_dict = {\"input_1:0\": encoder_x_val, \"input_2:0\": decoder_x_val} input_names_with_port = [\"input_1:0\", \"input_2:0\"]", "encoder. # This tensor should be shaped `[batch_size, max_time, ...]`", "import print_function import numpy as np import tensorflow as tf", "size = 5 time_step = 3 input_size = 4 attn_size", "dtype=np.float32) x_val = np.stack([x_val] * batch_size) def func(x): initializer =", "def __init__(self, hidden_dim, reuse=None): super().__init__(self, _reuse=reuse) self._num_units = hidden_dim self._activation", "15] # U shape: [5, 3 * 5] = [5,", "tf.get_variable(name=\"W\", shape=[input_dim, 3 * self._num_units], dtype=tf.float32) W = np.arange(30.0, dtype=np.float32).reshape((2,", "x, dtype=tf.float32, sequence_length=[4, 3, 4, 5, 2, 1]) return tf.identity(outputs,", "= np.arange(30.0, dtype=np.float32).reshape((2, 15)) # U = tf.get_variable(name='U', shape=[self._num_units, 3", "@skip_tf2() @allow_missing_shapes(\"Missing RNN shape\") def test_bidrectional_attention_wrapper_lstm_encoder(self): size = 30 time_step", "model layer. batch_size = 1 cell = GatedGRUCell(size) x_val =", "def state_size(self): return self._num_units @property def output_size(self): return self._num_units def", "attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32) return tf.identity(output, name=\"output\"), tf.identity(attr_state.cell_state, name=\"final_state\")", "tf.compat.v1.nn.rnn_cell.RNNCell MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn", "[\"input_1:0\", \"input_2:0\", \"input_3:0\"] output_names_with_port = [\"output_0:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port,", "= [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06) @check_opset_min_version(8, \"Scan\")", "match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=attn_size, cell_input_fn=match_input_fn, output_attention=False) output, attr_state =", "= np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5.,", "= dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True) return tf.identity(xs, name=\"output\"), tf.identity(s, name=\"final_state\")", "time step, size] # attention_state: usually the output of an", "@skip_opset(9, \"ReverseSequence\") @skip_tf2() @allow_missing_shapes(\"Missing RNN shape\") def test_bidrectional_attention_wrapper_lstm_encoder(self): size =", "3, 1) hu = tf.split(tf.matmul(state, U), 3, 1) r =", "output_size(self): return self._num_units def call(self, inputs, state): # inputs shape:", "tensor should be shaped `[batch_size, max_time, ...]`. decoder_time_step = 6", "feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2() def test_single_dynamic_custom_rnn_with_seq_length(self): units", "input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port,", "__future__ import division from __future__ import print_function import numpy as", "= 4 attn_size = size batch_size = 9 # shape", "[batch size, time step, size] # attention_state: usually the output", "W shape: [2, 3 * 5] = [2, 15] #", "1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)", "= np.stack([encoder_x_val] * batch_size) decoder_time_step = 6 decoder_x_val = np.random.randn(decoder_time_step,", "6 x_val = np.random.randn(decoder_time_step, input_size).astype('f') x_val = np.stack([x_val] * batch_size)", "@check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_gru_encoder(self): size = 5 time_step", "4, 3, 2, 1, 2, 3, 6], dtype=np.int32)} input_names_with_port =", "5 # W shape: [2, 3 * 5] = [2,", "Tf2OnnxBackendTestBase from common import * # pylint: disable=wildcard-import, unused-wildcard-import from", "[\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) @check_opset_min_version(8, \"Scan\") @skip_tf2()", "reuse=None): super().__init__(self, _reuse=reuse) self._num_units = hidden_dim self._activation = tf.tanh @property", "self._activation = tf.tanh @property def state_size(self): return self._num_units @property def", "6], dtype=np.int32)} input_names_with_port = [\"input_1:0\", \"input_2:0\", \"input_3:0\"] output_names_with_port = [\"output_0:0\",", "(1 - z) + state * z return next_h, next_h", "x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\", \"final_state:0\"] self.run_test_case(func, feed_dict,", "2, 3, 6], dtype=np.int32)} input_names_with_port = [\"input_1:0\", \"input_2:0\", \"input_3:0\"] output_names_with_port", "= tf.identity(output, name=\"output_0\") attention_states = output attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states)", "= GRUCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32) _ =", "@check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_opset(9, \"ReverseSequence\") @skip_tf2() @allow_missing_shapes(\"Missing RNN shape\") def", "tf.nn.rnn_cell.LSTMCell RNNCell = tf.nn.rnn_cell.RNNCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell dynamic_rnn = tf.nn.dynamic_rnn", "input_dim is not None, \"input dimension must be defined\" #", "outputs, cell_state = dynamic_rnn( cell, x, dtype=tf.float32, sequence_length=tf.identity(seq_length)) return tf.identity(outputs,", "GatedGRUCell(RNNCell): def __init__(self, hidden_dim, reuse=None): super().__init__(self, _reuse=reuse) self._num_units = hidden_dim", "30] attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states) match_input_fn = lambda curr_input, state:", "sequence_length=tf.identity(seq_length), dtype=tf.float32, time_major=True) matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1) matched_state =", "3 * self._num_units], dtype=tf.float32) W = np.arange(30.0, dtype=np.float32).reshape((2, 15)) #", "GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn", "3.], [4., 4.]], dtype=np.float32) x_val = np.stack([x_val] * batch_size) def", "r * hu[2]) next_h = h1 * (1 - z)", "feed_dict, input_names_with_port, output_names_with_port, 0.1) class GatedGRUCell(RNNCell): def __init__(self, hidden_dim, reuse=None):", "cell = GatedGRUCell(units) outputs, cell_state = dynamic_rnn( cell, x, dtype=tf.float32,", "shaped `[batch_size, max_time, ...]`. decoder_time_step = 6 x_val = np.random.randn(decoder_time_step,", "\"input_2:0\": decoder_x_val, \"input_3:0\": np.array([6, 5, 4, 3, 2, 1, 2,", "else: LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell LSTMCell = tf.nn.rnn_cell.LSTMCell GRUCell = tf.nn.rnn_cell.LSTMCell", "an RNN encoder. # This tensor should be shaped `[batch_size,", "func(encoder_x, decoder_x): encoder_cell = GRUCell(size) output, attr_state = dynamic_rnn(encoder_cell, encoder_x,", "batch_size = 6 x_val = np.array([[1., 1.], [2., 2.], [3.,", "= tf.sigmoid(xw[0] + hu[0]) z = tf.sigmoid(xw[1] + hu[1]) h1", "input_size).astype('f') def func(encoder_x, decoder_x, seq_length): encoder_cell = LSTMCell(size) attention_states, _", "* self._num_units], dtype=tf.float32) W = np.arange(30.0, dtype=np.float32).reshape((2, 15)) # U", "x_val} input_names_with_port = [\"input_1:0\"] output_names_with_port = [\"output:0\", \"cell_state:0\"] self.run_test_case(func, feed_dict,", "= [1, 3, 2] # num_units: 5 # W shape:", "0.1) @check_opset_min_version(8, \"Scan\") @check_tf_min_version(\"1.8\") @skip_tf2() def test_attention_wrapper_lstm_encoder(self): size = 5" ]
[ "@Email: <EMAIL> # @Date: 04.2020 # Context: CHARM PROJECT -", "CHARM PROJECT - Harzard perception \"\"\" Module documentation. \"\"\" #", "- Harzard perception \"\"\" Module documentation. \"\"\" # Imports import", "documentation. \"\"\" # Imports import sys #import os # Global", "# -*- coding: utf-8 -*- # @Author: <NAME> # @Email:", "perception \"\"\" Module documentation. \"\"\" # Imports import sys #import", "coding: utf-8 -*- #!/user/bin/env python3 # -*- coding: utf-8 -*-", "-*- #!/user/bin/env python3 # -*- coding: utf-8 -*- # @Author:", "not args: print('usage: [--flags options] [inputs] ') sys.exit(1) # Main", "utf-8 -*- # @Author: <NAME> # @Email: <EMAIL> # @Date:", "<EMAIL> # @Date: 04.2020 # Context: CHARM PROJECT - Harzard", "-*- coding: utf-8 -*- # @Author: <NAME> # @Email: <EMAIL>", "04.2020 # Context: CHARM PROJECT - Harzard perception \"\"\" Module", "# Imports import sys #import os # Global variables #", "declarations def main(): args = sys.argv[1:] if not args: print('usage:", "declarations # Function declarations def main(): args = sys.argv[1:] if", "@Date: 04.2020 # Context: CHARM PROJECT - Harzard perception \"\"\"", "# @Author: <NAME> # @Email: <EMAIL> # @Date: 04.2020 #", "main(): args = sys.argv[1:] if not args: print('usage: [--flags options]", "Module documentation. \"\"\" # Imports import sys #import os #", "@Author: <NAME> # @Email: <EMAIL> # @Date: 04.2020 # Context:", "utf-8 -*- #!/user/bin/env python3 # -*- coding: utf-8 -*- #", "#!/user/bin/env python3 # -*- coding: utf-8 -*- #!/user/bin/env python3 #", "Harzard perception \"\"\" Module documentation. \"\"\" # Imports import sys", "args = sys.argv[1:] if not args: print('usage: [--flags options] [inputs]", "# Context: CHARM PROJECT - Harzard perception \"\"\" Module documentation.", "Context: CHARM PROJECT - Harzard perception \"\"\" Module documentation. \"\"\"", "sys.argv[1:] if not args: print('usage: [--flags options] [inputs] ') sys.exit(1)", "# @Email: <EMAIL> # @Date: 04.2020 # Context: CHARM PROJECT", "Class declarations # Function declarations def main(): args = sys.argv[1:]", "os # Global variables # Class declarations # Function declarations", "-*- coding: utf-8 -*- #!/user/bin/env python3 # -*- coding: utf-8", "[inputs] ') sys.exit(1) # Main body if __name__ == '__main__':", "') sys.exit(1) # Main body if __name__ == '__main__': main()", "def main(): args = sys.argv[1:] if not args: print('usage: [--flags", "args: print('usage: [--flags options] [inputs] ') sys.exit(1) # Main body", "-*- # @Author: <NAME> # @Email: <EMAIL> # @Date: 04.2020", "Global variables # Class declarations # Function declarations def main():", "sys #import os # Global variables # Class declarations #", "python3 # -*- coding: utf-8 -*- #!/user/bin/env python3 # -*-", "#import os # Global variables # Class declarations # Function", "if not args: print('usage: [--flags options] [inputs] ') sys.exit(1) #", "# Global variables # Class declarations # Function declarations def", "options] [inputs] ') sys.exit(1) # Main body if __name__ ==", "variables # Class declarations # Function declarations def main(): args", "import sys #import os # Global variables # Class declarations", "coding: utf-8 -*- # @Author: <NAME> # @Email: <EMAIL> #", "#!/user/bin/env python3 # -*- coding: utf-8 -*- # @Author: <NAME>", "python3 # -*- coding: utf-8 -*- # @Author: <NAME> #", "\"\"\" # Imports import sys #import os # Global variables", "Imports import sys #import os # Global variables # Class", "# Class declarations # Function declarations def main(): args =", "Function declarations def main(): args = sys.argv[1:] if not args:", "# @Date: 04.2020 # Context: CHARM PROJECT - Harzard perception", "= sys.argv[1:] if not args: print('usage: [--flags options] [inputs] ')", "PROJECT - Harzard perception \"\"\" Module documentation. \"\"\" # Imports", "<NAME> # @Email: <EMAIL> # @Date: 04.2020 # Context: CHARM", "# Function declarations def main(): args = sys.argv[1:] if not", "[--flags options] [inputs] ') sys.exit(1) # Main body if __name__", "\"\"\" Module documentation. \"\"\" # Imports import sys #import os", "# -*- coding: utf-8 -*- #!/user/bin/env python3 # -*- coding:", "print('usage: [--flags options] [inputs] ') sys.exit(1) # Main body if" ]
[ "t[i, j] > t[i, j + p] data[2] = t[i", "between 1 and 4. print 'shape ', t.shape m, n", "gridpeak(t, X=None): # GP = GRIDPEAK(...) # gp = gridpeak(t)", "+ p, j] data[1] = t[i, j - p] <", "t[i - p, j - p] < t[i, j] and", "X: gp[gp < X] = numpy.nan gp = gp /", "method # gp = gridpeak(t,X) optionally remove peak values scoring", "numpy.arange(p, m - p): for j in numpy.arange(p, n -", "< t[i, j] and t[i, j] > t[i, j +", "= GRIDPEAK(...) # gp = gridpeak(t) return gridpeaks based on", "scoring less than X, # where X can be between", "j] < t[i, j] and t[i, j] > t[i +", "> t[i - p, j + p] data[3] = t[i", "# gp = gridpeak(t) return gridpeaks based on Blakely #", "p, j] data[1] = t[i, j - p] < t[i,", "gp = numpy.zeros((m, n)) for i in numpy.arange(p, m -", "GRIDPEAK(...) # gp = gridpeak(t) return gridpeaks based on Blakely", "t[i + p, j] data[1] = t[i, j - p]", "p] < t[i, j] and t[i, j] > t[i +", "- p] < t[i, j] and t[i, j] > t[i,", "and 4. print 'shape ', t.shape m, n = t.shape", "numpy.sum(data) if X: gp[gp < X] = numpy.nan gp =", "n)) for i in numpy.arange(p, m - p): for j", "j] > t[i - p, j + p] data[3] =", "p, j - p] < t[i, j] and t[i, j]", "p, j] < t[i, j] and t[i, j] > t[i", "j] data[1] = t[i, j - p] < t[i, j]", "= 1 gp = numpy.zeros((m, n)) for i in numpy.arange(p,", "optionally remove peak values scoring less than X, # where", "on Blakely # and Simpson method # gp = gridpeak(t,X)", "values scoring less than X, # where X can be", "j] > t[i + p, j] data[1] = t[i, j", "<filename>utils/gridpeak.py import numpy def gridpeak(t, X=None): # GP = GRIDPEAK(...)", "data[3] = t[i - p, j - p] < t[i,", "data = numpy.zeros(4) data[0] = t[i - p, j] <", "j + p] data[3] = t[i - p, j -", "data[0] = t[i - p, j] < t[i, j] and", "X] = numpy.nan gp = gp / gp return gp", "= t[i + p, j - p] < t[i, j]", "numpy.arange(p, n - p): data = numpy.zeros(4) data[0] = t[i", "gridpeak(t) return gridpeaks based on Blakely # and Simpson method", "X can be between 1 and 4. print 'shape ',", "t[i, j] > t[i - p, j + p] data[3]", "= numpy.zeros(4) data[0] = t[i - p, j] < t[i,", "and Simpson method # gp = gridpeak(t,X) optionally remove peak", "m, n = t.shape p = 1 gp = numpy.zeros((m,", "numpy.zeros((m, n)) for i in numpy.arange(p, m - p): for", "data[2] = t[i + p, j - p] < t[i,", "based on Blakely # and Simpson method # gp =", "- p, j + p] data[3] = t[i - p,", "t[i, j] and t[i, j] > t[i + p, j", "+ p] data[3] = t[i - p, j - p]", "j] and t[i, j] > t[i + p, j +", "1 and 4. print 'shape ', t.shape m, n =", "Simpson method # gp = gridpeak(t,X) optionally remove peak values", "+ p] gp[i, j] = numpy.sum(data) if X: gp[gp <", "p] < t[i, j] and t[i, j] > t[i, j", "for j in numpy.arange(p, n - p): data = numpy.zeros(4)", "p, j + p] data[3] = t[i - p, j", "p] < t[i, j] and t[i, j] > t[i -", "4. print 'shape ', t.shape m, n = t.shape p", "> t[i, j + p] data[2] = t[i + p,", "- p, j] < t[i, j] and t[i, j] >", "t.shape p = 1 gp = numpy.zeros((m, n)) for i", "p] data[2] = t[i + p, j - p] <", "gridpeaks based on Blakely # and Simpson method # gp", "p): for j in numpy.arange(p, n - p): data =", "and t[i, j] > t[i + p, j] data[1] =", "GP = GRIDPEAK(...) # gp = gridpeak(t) return gridpeaks based", "import numpy def gridpeak(t, X=None): # GP = GRIDPEAK(...) #", "# gp = gridpeak(t,X) optionally remove peak values scoring less", "t.shape m, n = t.shape p = 1 gp =", "j - p] < t[i, j] and t[i, j] >", "Blakely # and Simpson method # gp = gridpeak(t,X) optionally", "in numpy.arange(p, m - p): for j in numpy.arange(p, n", "where X can be between 1 and 4. print 'shape", "and t[i, j] > t[i + p, j + p]", "+ p, j - p] < t[i, j] and t[i,", "numpy.zeros(4) data[0] = t[i - p, j] < t[i, j]", "t[i, j] and t[i, j] > t[i - p, j", "and t[i, j] > t[i - p, j + p]", "def gridpeak(t, X=None): # GP = GRIDPEAK(...) # gp =", "j] and t[i, j] > t[i - p, j +", "- p): for j in numpy.arange(p, n - p): data", "data[1] = t[i, j - p] < t[i, j] and", "< t[i, j] and t[i, j] > t[i + p,", "+ p, j + p] gp[i, j] = numpy.sum(data) if", "return gridpeaks based on Blakely # and Simpson method #", "j] and t[i, j] > t[i + p, j] data[1]", "j + p] data[2] = t[i + p, j -", "+ p] data[2] = t[i + p, j - p]", "p] data[3] = t[i - p, j - p] <", "if X: gp[gp < X] = numpy.nan gp = gp", "less than X, # where X can be between 1", "m - p): for j in numpy.arange(p, n - p):", "p, j + p] gp[i, j] = numpy.sum(data) if X:", "p): data = numpy.zeros(4) data[0] = t[i - p, j]", "t[i, j] > t[i + p, j + p] gp[i,", "remove peak values scoring less than X, # where X", "gp[gp < X] = numpy.nan gp = gp / gp", "n - p): data = numpy.zeros(4) data[0] = t[i -", "can be between 1 and 4. print 'shape ', t.shape", "= gridpeak(t,X) optionally remove peak values scoring less than X,", "n = t.shape p = 1 gp = numpy.zeros((m, n))", "- p): data = numpy.zeros(4) data[0] = t[i - p,", "= t[i, j - p] < t[i, j] and t[i,", "= t[i - p, j - p] < t[i, j]", "t[i, j + p] data[2] = t[i + p, j", "t[i + p, j + p] gp[i, j] = numpy.sum(data)", "# where X can be between 1 and 4. print", "be between 1 and 4. print 'shape ', t.shape m,", "p = 1 gp = numpy.zeros((m, n)) for i in", "than X, # where X can be between 1 and", "t[i + p, j - p] < t[i, j] and", "> t[i + p, j] data[1] = t[i, j -", "t[i, j - p] < t[i, j] and t[i, j]", "< X] = numpy.nan gp = gp / gp return", "for i in numpy.arange(p, m - p): for j in", "numpy def gridpeak(t, X=None): # GP = GRIDPEAK(...) # gp", "- p] < t[i, j] and t[i, j] > t[i", "t[i, j] > t[i + p, j] data[1] = t[i,", "< t[i, j] and t[i, j] > t[i - p,", "gp[i, j] = numpy.sum(data) if X: gp[gp < X] =", "> t[i + p, j + p] gp[i, j] =", "= gridpeak(t) return gridpeaks based on Blakely # and Simpson", "j] > t[i, j + p] data[2] = t[i +", "= numpy.zeros((m, n)) for i in numpy.arange(p, m - p):", "# GP = GRIDPEAK(...) # gp = gridpeak(t) return gridpeaks", "X, # where X can be between 1 and 4.", "in numpy.arange(p, n - p): data = numpy.zeros(4) data[0] =", "= t.shape p = 1 gp = numpy.zeros((m, n)) for", "j] and t[i, j] > t[i, j + p] data[2]", "peak values scoring less than X, # where X can", "', t.shape m, n = t.shape p = 1 gp", "i in numpy.arange(p, m - p): for j in numpy.arange(p,", "and t[i, j] > t[i, j + p] data[2] =", "j] = numpy.sum(data) if X: gp[gp < X] = numpy.nan", "t[i - p, j + p] data[3] = t[i -", "= numpy.sum(data) if X: gp[gp < X] = numpy.nan gp", "= t[i - p, j] < t[i, j] and t[i,", "gridpeak(t,X) optionally remove peak values scoring less than X, #", "j] > t[i + p, j + p] gp[i, j]", "- p, j - p] < t[i, j] and t[i,", "t[i, j] and t[i, j] > t[i, j + p]", "gp = gridpeak(t) return gridpeaks based on Blakely # and", "gp = gridpeak(t,X) optionally remove peak values scoring less than", "# and Simpson method # gp = gridpeak(t,X) optionally remove", "'shape ', t.shape m, n = t.shape p = 1", "1 gp = numpy.zeros((m, n)) for i in numpy.arange(p, m", "t[i, j] and t[i, j] > t[i + p, j]", "print 'shape ', t.shape m, n = t.shape p =", "t[i - p, j] < t[i, j] and t[i, j]", "j + p] gp[i, j] = numpy.sum(data) if X: gp[gp", "X=None): # GP = GRIDPEAK(...) # gp = gridpeak(t) return", "j in numpy.arange(p, n - p): data = numpy.zeros(4) data[0]", "p] gp[i, j] = numpy.sum(data) if X: gp[gp < X]" ]
[ "raise Exception('This is the error message.') except: errorFile = open('./Chapter", "10/errorInfo.txt', 'w') errorFile.write(traceback.format_exc()) errorFile.close() print('The traceback info was written to", "errorFile = open('./Chapter 10/errorInfo.txt', 'w') errorFile.write(traceback.format_exc()) errorFile.close() print('The traceback info", "error message.') except: errorFile = open('./Chapter 10/errorInfo.txt', 'w') errorFile.write(traceback.format_exc()) errorFile.close()", "'w') errorFile.write(traceback.format_exc()) errorFile.close() print('The traceback info was written to errorInfo.txt')", "the error message.') except: errorFile = open('./Chapter 10/errorInfo.txt', 'w') errorFile.write(traceback.format_exc())", "message.') except: errorFile = open('./Chapter 10/errorInfo.txt', 'w') errorFile.write(traceback.format_exc()) errorFile.close() print('The", "<reponame>Miillky/automate_the_boring_stuff_with_python<gh_stars>0 import traceback try: raise Exception('This is the error message.')", "traceback try: raise Exception('This is the error message.') except: errorFile", "Exception('This is the error message.') except: errorFile = open('./Chapter 10/errorInfo.txt',", "import traceback try: raise Exception('This is the error message.') except:", "try: raise Exception('This is the error message.') except: errorFile =", "except: errorFile = open('./Chapter 10/errorInfo.txt', 'w') errorFile.write(traceback.format_exc()) errorFile.close() print('The traceback", "= open('./Chapter 10/errorInfo.txt', 'w') errorFile.write(traceback.format_exc()) errorFile.close() print('The traceback info was", "open('./Chapter 10/errorInfo.txt', 'w') errorFile.write(traceback.format_exc()) errorFile.close() print('The traceback info was written", "is the error message.') except: errorFile = open('./Chapter 10/errorInfo.txt', 'w')" ]
[ "arr = array.array('f', base64.b64decode(s)) return (arr[0], arr[1:]) def cosineSimilarity(s1, s2):", "access key for the container, if sas is specified, key", "def __init__(self, resource, container, account, sas='', key=''): AzureStorageAccess.__init__(self, container, account,", "== targetType)) df3 = df2.select(df2.EntityId, df2.EntityType, udfCosineSimilarity(F.lit(row1.Data), df2.Data).alias('Score')) return df3.where(df3.Score", "either sas or key # class NetworkSimilarity(AzureStorageAccess): # constructor def", "stream path # container: container name in Azure Storage (AS)", "e, targetType = '', maxCount = 20, minScore = 0.0):", "float[] with first element being the magnitude def Base64ToFloatArray(s): arr", "in zip(v1, v2))/(m1 * m2) # Register udf functions so", "e) & (df1.EntityType == targetType)) df3 = df2.select(df2.EntityId, df2.EntityType, udfCosineSimilarity(F.lit(row1.Data),", "not have header # you need to provide value for", "getSimilarity(self, e1, e2): df = self.df row1 = df.where(df.EntityId ==", "= array.array('f', base64.b64decode(s)) return (arr[0], arr[1:]) def cosineSimilarity(s1, s2): (m1,", "access signature (sas) for the container # key: access key", "False)) schema.add(StructField('EntityType', StringType(), False)) schema.add(StructField('Data', StringType(), False)) self.df = spark.read.format('csv').options(header='false',", "is specified, key is ignored # # Note: # resource", "need to provide value for either sas or key #", "for the container, if sas is specified, key is ignored", "targetType)) df3 = df2.select(df2.EntityId, df2.EntityType, udfCosineSimilarity(F.lit(row1.Data), df2.Data).alias('Score')) return df3.where(df3.Score >=", "else : return sum(x*y for x,y in zip(v1, v2))/(m1 *", "Base64ToFloatArray(s2) if (m1 == 0) or (m2 == 0): return", "cosineSimilarity(row1.Data, row2.Data) def getTopEntities(self, e, targetType = '', maxCount =", "# resource does not have header # you need to", "resource: resource stream path # container: container name in Azure", "return self.df def raiseErrorIfNotFound(self, row, e): if row is None:", "False)) schema.add(StructField('Data', StringType(), False)) self.df = spark.read.format('csv').options(header='false', delimiter='\\t').schema(schema).load(self.getFullpath(resource)) def getDataframe(self):", "account: Azure Storage (AS) account # sas: complete 'Blob service", ": return sum(x*y for x,y in zip(v1, v2))/(m1 * m2)", "Base64ToFloatArray(s1) (m2, v2) = Base64ToFloatArray(s2) if (m1 == 0) or", "== e1).first() self.raiseErrorIfNotFound(row1, e1) row2 = df.where(df.EntityId == e2).first() self.raiseErrorIfNotFound(row2,", "account, sas='', key=''): AzureStorageAccess.__init__(self, container, account, sas, key) schema =", "signature (sas) for the container # key: access key for", "getDataframe(self): return self.df def raiseErrorIfNotFound(self, row, e): if row is", "self.df row1 = df.where(df.EntityId == e1).first() self.raiseErrorIfNotFound(row1, e1) row2 =", "False)) self.df = spark.read.format('csv').options(header='false', delimiter='\\t').schema(schema).load(self.getFullpath(resource)) def getDataframe(self): return self.df def", "if sas is specified, key is ignored # # Note:", "in dataframe # # Perform same computation as cosineSimilarity() #", "you need to provide value for either sas or key", "it could be used in dataframe # # Perform same", "specified, key is ignored # # Note: # resource does", "= df1.where(df1.EntityId == e).first() self.raiseErrorIfNotFound(row1, e) if targetType == '':", "Databricks notebook source from pyspark.sql.types import * from pyspark.sql import", "# class NetworkSimilarity(AzureStorageAccess): # constructor def __init__(self, resource, container, account,", "a base64 encoded float[] with first element being the magnitude", "Azure Storage (AS) account # sas: complete 'Blob service SAS", "have header # you need to provide value for either", "from pyspark.sql import functions as F import base64 import array", "key) schema = StructType() schema.add(StructField('EntityId', LongType(), False)) schema.add(StructField('EntityType', StringType(), False))", "df2 = df1.where(df1.EntityId != e) else : df2 = df1.where((df1.EntityId", "maxCount = 20, minScore = 0.0): df1 = self.df row1", "compute Network Similarity # COMMAND ---------- # Parameters: # resource:", "# account: Azure Storage (AS) account # sas: complete 'Blob", "URL' of the shared access signature (sas) for the container", "df3 = df2.select(df2.EntityId, df2.EntityType, udfCosineSimilarity(F.lit(row1.Data), df2.Data).alias('Score')) return df3.where(df3.Score >= minScore).orderBy(df3.Score.desc()).limit(maxCount)", "for the container # key: access key for the container,", "container # key: access key for the container, if sas", "functions so that it could be used in dataframe #", "v2) = Base64ToFloatArray(s2) if (m1 == 0) or (m2 ==", "!= e) & (df1.EntityType == targetType)) df3 = df2.select(df2.EntityId, df2.EntityType,", "= '', maxCount = 20, minScore = 0.0): df1 =", "the magnitude def Base64ToFloatArray(s): arr = array.array('f', base64.b64decode(s)) return (arr[0],", "df2 = df1.where((df1.EntityId != e) & (df1.EntityType == targetType)) df3", "element being the magnitude def Base64ToFloatArray(s): arr = array.array('f', base64.b64decode(s))", "in Azure Storage (AS) account # account: Azure Storage (AS)", "account, sas, key) schema = StructType() schema.add(StructField('EntityId', LongType(), False)) schema.add(StructField('EntityType',", "being the magnitude def Base64ToFloatArray(s): arr = array.array('f', base64.b64decode(s)) return", "# Parameters: # resource: resource stream path # container: container", "class NetworkSimilarity(AzureStorageAccess): # constructor def __init__(self, resource, container, account, sas='',", "== e2).first() self.raiseErrorIfNotFound(row2, e2) return cosineSimilarity(row1.Data, row2.Data) def getTopEntities(self, e,", "service SAS URL' of the shared access signature (sas) for", "LongType(), False)) schema.add(StructField('EntityType', StringType(), False)) schema.add(StructField('Data', StringType(), False)) self.df =", "= df1.where(df1.EntityId != e) else : df2 = df1.where((df1.EntityId !=", "Azure Storage (AS) account # account: Azure Storage (AS) account", "sas is specified, key is ignored # # Note: #", "header # you need to provide value for either sas", "be used in dataframe # # Perform same computation as", "first element being the magnitude def Base64ToFloatArray(s): arr = array.array('f',", "e1, e2): df = self.df row1 = df.where(df.EntityId == e1).first()", "# Perform same computation as cosineSimilarity() # @F.udf(\"float\") def udfCosineSimilarity(s1,", "key # class NetworkSimilarity(AzureStorageAccess): # constructor def __init__(self, resource, container,", "# key: access key for the container, if sas is", "= df.where(df.EntityId == e1).first() self.raiseErrorIfNotFound(row1, e1) row2 = df.where(df.EntityId ==", "sum(x*y for x,y in zip(v1, v2))/(m1 * m2) # Register", "= df1.where((df1.EntityId != e) & (df1.EntityType == targetType)) df3 =", "df1.where((df1.EntityId != e) & (df1.EntityType == targetType)) df3 = df2.select(df2.EntityId,", "# COMMAND ---------- # s is a base64 encoded float[]", "v2))/(m1 * m2) # Register udf functions so that it", "df1 = self.df row1 = df1.where(df1.EntityId == e).first() self.raiseErrorIfNotFound(row1, e)", "= self.df row1 = df.where(df.EntityId == e1).first() self.raiseErrorIfNotFound(row1, e1) row2", "# # Perform same computation as cosineSimilarity() # @F.udf(\"float\") def", "complete 'Blob service SAS URL' of the shared access signature", "__init__(self, resource, container, account, sas='', key=''): AzureStorageAccess.__init__(self, container, account, sas,", "cosineSimilarity(s1, s2): (m1, v1) = Base64ToFloatArray(s1) (m2, v2) = Base64ToFloatArray(s2)", "row2.Data) def getTopEntities(self, e, targetType = '', maxCount = 20,", "of the shared access signature (sas) for the container #", "spark.read.format('csv').options(header='false', delimiter='\\t').schema(schema).load(self.getFullpath(resource)) def getDataframe(self): return self.df def raiseErrorIfNotFound(self, row, e):", "(df1.EntityType == targetType)) df3 = df2.select(df2.EntityId, df2.EntityType, udfCosineSimilarity(F.lit(row1.Data), df2.Data).alias('Score')) return", "df1.where(df1.EntityId == e).first() self.raiseErrorIfNotFound(row1, e) if targetType == '': df2", "container: container name in Azure Storage (AS) account # account:", "targetType == '': df2 = df1.where(df1.EntityId != e) else :", "df.where(df.EntityId == e2).first() self.raiseErrorIfNotFound(row2, e2) return cosineSimilarity(row1.Data, row2.Data) def getTopEntities(self,", "StringType(), False)) self.df = spark.read.format('csv').options(header='false', delimiter='\\t').schema(schema).load(self.getFullpath(resource)) def getDataframe(self): return self.df", "(m1, v1) = Base64ToFloatArray(s1) (m2, v2) = Base64ToFloatArray(s2) if (m1", "# resource: resource stream path # container: container name in", "return sum(x*y for x,y in zip(v1, v2))/(m1 * m2) #", "ignored # # Note: # resource does not have header", "udfCosineSimilarity(s1, s2): return cosineSimilarity(s1, s2) # COMMAND ---------- # MAGIC", "pyspark.sql import functions as F import base64 import array #", "# constructor def __init__(self, resource, container, account, sas='', key=''): AzureStorageAccess.__init__(self,", "path # container: container name in Azure Storage (AS) account", "str(e) + ' not found') def getSimilarity(self, e1, e2): df", "if (m1 == 0) or (m2 == 0): return 0", "account # account: Azure Storage (AS) account # sas: complete", "e).first() self.raiseErrorIfNotFound(row1, e) if targetType == '': df2 = df1.where(df1.EntityId", "(m2 == 0): return 0 else : return sum(x*y for", "---------- # Parameters: # resource: resource stream path # container:", "# s is a base64 encoded float[] with first element", "0) or (m2 == 0): return 0 else : return", "AzureStorageAccess.__init__(self, container, account, sas, key) schema = StructType() schema.add(StructField('EntityId', LongType(),", "the container, if sas is specified, key is ignored #", "v1) = Base64ToFloatArray(s1) (m2, v2) = Base64ToFloatArray(s2) if (m1 ==", "the container # key: access key for the container, if", "' + str(e) + ' not found') def getSimilarity(self, e1,", "base64.b64decode(s)) return (arr[0], arr[1:]) def cosineSimilarity(s1, s2): (m1, v1) =", "== e).first() self.raiseErrorIfNotFound(row1, e) if targetType == '': df2 =", "row1 = df.where(df.EntityId == e1).first() self.raiseErrorIfNotFound(row1, e1) row2 = df.where(df.EntityId", "%md **NetworkSimilarity** class to compute Network Similarity # COMMAND ----------", "# COMMAND ---------- # Parameters: # resource: resource stream path", "to compute Network Similarity # COMMAND ---------- # Parameters: #", "row, e): if row is None: raise KeyError('entity ' +", "udf functions so that it could be used in dataframe", "with first element being the magnitude def Base64ToFloatArray(s): arr =", "COMMAND ---------- # s is a base64 encoded float[] with", "# MAGIC %md **NetworkSimilarity** class to compute Network Similarity #", "is None: raise KeyError('entity ' + str(e) + ' not", "m2) # Register udf functions so that it could be", "s2): return cosineSimilarity(s1, s2) # COMMAND ---------- # MAGIC %md", "self.raiseErrorIfNotFound(row2, e2) return cosineSimilarity(row1.Data, row2.Data) def getTopEntities(self, e, targetType =", "schema.add(StructField('Data', StringType(), False)) self.df = spark.read.format('csv').options(header='false', delimiter='\\t').schema(schema).load(self.getFullpath(resource)) def getDataframe(self): return", "raise KeyError('entity ' + str(e) + ' not found') def", ": df2 = df1.where((df1.EntityId != e) & (df1.EntityType == targetType))", "F import base64 import array # COMMAND ---------- # s", "= spark.read.format('csv').options(header='false', delimiter='\\t').schema(schema).load(self.getFullpath(resource)) def getDataframe(self): return self.df def raiseErrorIfNotFound(self, row,", "def cosineSimilarity(s1, s2): (m1, v1) = Base64ToFloatArray(s1) (m2, v2) =", "0): return 0 else : return sum(x*y for x,y in", "schema.add(StructField('EntityType', StringType(), False)) schema.add(StructField('Data', StringType(), False)) self.df = spark.read.format('csv').options(header='false', delimiter='\\t').schema(schema).load(self.getFullpath(resource))", "== '': df2 = df1.where(df1.EntityId != e) else : df2", "container name in Azure Storage (AS) account # account: Azure", "to provide value for either sas or key # class", "array # COMMAND ---------- # s is a base64 encoded", "# you need to provide value for either sas or", "# Note: # resource does not have header # you", "functions as F import base64 import array # COMMAND ----------", "# Databricks notebook source from pyspark.sql.types import * from pyspark.sql", "(m1 == 0) or (m2 == 0): return 0 else", "array.array('f', base64.b64decode(s)) return (arr[0], arr[1:]) def cosineSimilarity(s1, s2): (m1, v1)", "# sas: complete 'Blob service SAS URL' of the shared", "# container: container name in Azure Storage (AS) account #", "container, account, sas='', key=''): AzureStorageAccess.__init__(self, container, account, sas, key) schema", "row1 = df1.where(df1.EntityId == e).first() self.raiseErrorIfNotFound(row1, e) if targetType ==", "(AS) account # sas: complete 'Blob service SAS URL' of", "def getTopEntities(self, e, targetType = '', maxCount = 20, minScore", "* m2) # Register udf functions so that it could", "e2).first() self.raiseErrorIfNotFound(row2, e2) return cosineSimilarity(row1.Data, row2.Data) def getTopEntities(self, e, targetType", "df1.where(df1.EntityId != e) else : df2 = df1.where((df1.EntityId != e)", "Storage (AS) account # account: Azure Storage (AS) account #", "e1) row2 = df.where(df.EntityId == e2).first() self.raiseErrorIfNotFound(row2, e2) return cosineSimilarity(row1.Data,", "minScore = 0.0): df1 = self.df row1 = df1.where(df1.EntityId ==", "e) else : df2 = df1.where((df1.EntityId != e) & (df1.EntityType", "e2): df = self.df row1 = df.where(df.EntityId == e1).first() self.raiseErrorIfNotFound(row1,", "= df.where(df.EntityId == e2).first() self.raiseErrorIfNotFound(row2, e2) return cosineSimilarity(row1.Data, row2.Data) def", "self.df row1 = df1.where(df1.EntityId == e).first() self.raiseErrorIfNotFound(row1, e) if targetType", "s2) # COMMAND ---------- # MAGIC %md **NetworkSimilarity** class to", "value for either sas or key # class NetworkSimilarity(AzureStorageAccess): #", "container, account, sas, key) schema = StructType() schema.add(StructField('EntityId', LongType(), False))", "self.df def raiseErrorIfNotFound(self, row, e): if row is None: raise", "raiseErrorIfNotFound(self, row, e): if row is None: raise KeyError('entity '", "= 20, minScore = 0.0): df1 = self.df row1 =", "return cosineSimilarity(s1, s2) # COMMAND ---------- # MAGIC %md **NetworkSimilarity**", "cosineSimilarity() # @F.udf(\"float\") def udfCosineSimilarity(s1, s2): return cosineSimilarity(s1, s2) #", "or (m2 == 0): return 0 else : return sum(x*y", "cosineSimilarity(s1, s2) # COMMAND ---------- # MAGIC %md **NetworkSimilarity** class", "== 0): return 0 else : return sum(x*y for x,y", "dataframe # # Perform same computation as cosineSimilarity() # @F.udf(\"float\")", "= 0.0): df1 = self.df row1 = df1.where(df1.EntityId == e).first()", "import base64 import array # COMMAND ---------- # s is", "encoded float[] with first element being the magnitude def Base64ToFloatArray(s):", "# # Note: # resource does not have header #", "(arr[0], arr[1:]) def cosineSimilarity(s1, s2): (m1, v1) = Base64ToFloatArray(s1) (m2,", "used in dataframe # # Perform same computation as cosineSimilarity()", "(sas) for the container # key: access key for the", "not found') def getSimilarity(self, e1, e2): df = self.df row1", "' not found') def getSimilarity(self, e1, e2): df = self.df", "Register udf functions so that it could be used in", "targetType = '', maxCount = 20, minScore = 0.0): df1", "class to compute Network Similarity # COMMAND ---------- # Parameters:", "if row is None: raise KeyError('entity ' + str(e) +", "self.raiseErrorIfNotFound(row1, e1) row2 = df.where(df.EntityId == e2).first() self.raiseErrorIfNotFound(row2, e2) return", "return 0 else : return sum(x*y for x,y in zip(v1,", "account # sas: complete 'Blob service SAS URL' of the", "df.where(df.EntityId == e1).first() self.raiseErrorIfNotFound(row1, e1) row2 = df.where(df.EntityId == e2).first()", "constructor def __init__(self, resource, container, account, sas='', key=''): AzureStorageAccess.__init__(self, container,", "---------- # s is a base64 encoded float[] with first", "key: access key for the container, if sas is specified,", "& (df1.EntityType == targetType)) df3 = df2.select(df2.EntityId, df2.EntityType, udfCosineSimilarity(F.lit(row1.Data), df2.Data).alias('Score'))", "provide value for either sas or key # class NetworkSimilarity(AzureStorageAccess):", "delimiter='\\t').schema(schema).load(self.getFullpath(resource)) def getDataframe(self): return self.df def raiseErrorIfNotFound(self, row, e): if", "arr[1:]) def cosineSimilarity(s1, s2): (m1, v1) = Base64ToFloatArray(s1) (m2, v2)", "COMMAND ---------- # Parameters: # resource: resource stream path #", "StringType(), False)) schema.add(StructField('Data', StringType(), False)) self.df = spark.read.format('csv').options(header='false', delimiter='\\t').schema(schema).load(self.getFullpath(resource)) def", "NetworkSimilarity(AzureStorageAccess): # constructor def __init__(self, resource, container, account, sas='', key=''):", "or key # class NetworkSimilarity(AzureStorageAccess): # constructor def __init__(self, resource,", "import * from pyspark.sql import functions as F import base64", "does not have header # you need to provide value", "x,y in zip(v1, v2))/(m1 * m2) # Register udf functions", "Parameters: # resource: resource stream path # container: container name", "'Blob service SAS URL' of the shared access signature (sas)", "SAS URL' of the shared access signature (sas) for the", "(AS) account # account: Azure Storage (AS) account # sas:", "could be used in dataframe # # Perform same computation", "row2 = df.where(df.EntityId == e2).first() self.raiseErrorIfNotFound(row2, e2) return cosineSimilarity(row1.Data, row2.Data)", "0 else : return sum(x*y for x,y in zip(v1, v2))/(m1", "e2) return cosineSimilarity(row1.Data, row2.Data) def getTopEntities(self, e, targetType = '',", "found') def getSimilarity(self, e1, e2): df = self.df row1 =", "def raiseErrorIfNotFound(self, row, e): if row is None: raise KeyError('entity", "that it could be used in dataframe # # Perform", "Network Similarity # COMMAND ---------- # Parameters: # resource: resource", "Note: # resource does not have header # you need", "Perform same computation as cosineSimilarity() # @F.udf(\"float\") def udfCosineSimilarity(s1, s2):", "return cosineSimilarity(row1.Data, row2.Data) def getTopEntities(self, e, targetType = '', maxCount", "is a base64 encoded float[] with first element being the", "self.df = spark.read.format('csv').options(header='false', delimiter='\\t').schema(schema).load(self.getFullpath(resource)) def getDataframe(self): return self.df def raiseErrorIfNotFound(self,", "shared access signature (sas) for the container # key: access", "row is None: raise KeyError('entity ' + str(e) + '", "# COMMAND ---------- # MAGIC %md **NetworkSimilarity** class to compute", "StructType() schema.add(StructField('EntityId', LongType(), False)) schema.add(StructField('EntityType', StringType(), False)) schema.add(StructField('Data', StringType(), False))", "sas, key) schema = StructType() schema.add(StructField('EntityId', LongType(), False)) schema.add(StructField('EntityType', StringType(),", "base64 import array # COMMAND ---------- # s is a", "from pyspark.sql.types import * from pyspark.sql import functions as F", "computation as cosineSimilarity() # @F.udf(\"float\") def udfCosineSimilarity(s1, s2): return cosineSimilarity(s1,", "e) if targetType == '': df2 = df1.where(df1.EntityId != e)", "sas: complete 'Blob service SAS URL' of the shared access", "= self.df row1 = df1.where(df1.EntityId == e).first() self.raiseErrorIfNotFound(row1, e) if", "schema = StructType() schema.add(StructField('EntityId', LongType(), False)) schema.add(StructField('EntityType', StringType(), False)) schema.add(StructField('Data',", "sas='', key=''): AzureStorageAccess.__init__(self, container, account, sas, key) schema = StructType()", "pyspark.sql.types import * from pyspark.sql import functions as F import", "e): if row is None: raise KeyError('entity ' + str(e)", "as cosineSimilarity() # @F.udf(\"float\") def udfCosineSimilarity(s1, s2): return cosineSimilarity(s1, s2)", "+ ' not found') def getSimilarity(self, e1, e2): df =", "s is a base64 encoded float[] with first element being", "# Register udf functions so that it could be used", "is ignored # # Note: # resource does not have", "'', maxCount = 20, minScore = 0.0): df1 = self.df", "KeyError('entity ' + str(e) + ' not found') def getSimilarity(self,", "'': df2 = df1.where(df1.EntityId != e) else : df2 =", "source from pyspark.sql.types import * from pyspark.sql import functions as", "+ str(e) + ' not found') def getSimilarity(self, e1, e2):", "# @F.udf(\"float\") def udfCosineSimilarity(s1, s2): return cosineSimilarity(s1, s2) # COMMAND", "key for the container, if sas is specified, key is", "0.0): df1 = self.df row1 = df1.where(df1.EntityId == e).first() self.raiseErrorIfNotFound(row1,", "the shared access signature (sas) for the container # key:", "= Base64ToFloatArray(s1) (m2, v2) = Base64ToFloatArray(s2) if (m1 == 0)", "schema.add(StructField('EntityId', LongType(), False)) schema.add(StructField('EntityType', StringType(), False)) schema.add(StructField('Data', StringType(), False)) self.df", "resource, container, account, sas='', key=''): AzureStorageAccess.__init__(self, container, account, sas, key)", "for x,y in zip(v1, v2))/(m1 * m2) # Register udf", "container, if sas is specified, key is ignored # #", "(m2, v2) = Base64ToFloatArray(s2) if (m1 == 0) or (m2", "COMMAND ---------- # MAGIC %md **NetworkSimilarity** class to compute Network", "None: raise KeyError('entity ' + str(e) + ' not found')", "Storage (AS) account # sas: complete 'Blob service SAS URL'", "same computation as cosineSimilarity() # @F.udf(\"float\") def udfCosineSimilarity(s1, s2): return", "self.raiseErrorIfNotFound(row1, e) if targetType == '': df2 = df1.where(df1.EntityId !=", "def getDataframe(self): return self.df def raiseErrorIfNotFound(self, row, e): if row", "df = self.df row1 = df.where(df.EntityId == e1).first() self.raiseErrorIfNotFound(row1, e1)", "20, minScore = 0.0): df1 = self.df row1 = df1.where(df1.EntityId", "import array # COMMAND ---------- # s is a base64", "if targetType == '': df2 = df1.where(df1.EntityId != e) else", "---------- # MAGIC %md **NetworkSimilarity** class to compute Network Similarity", "resource stream path # container: container name in Azure Storage", "= Base64ToFloatArray(s2) if (m1 == 0) or (m2 == 0):", "sas or key # class NetworkSimilarity(AzureStorageAccess): # constructor def __init__(self,", "getTopEntities(self, e, targetType = '', maxCount = 20, minScore =", "for either sas or key # class NetworkSimilarity(AzureStorageAccess): # constructor", "notebook source from pyspark.sql.types import * from pyspark.sql import functions", "name in Azure Storage (AS) account # account: Azure Storage", "def udfCosineSimilarity(s1, s2): return cosineSimilarity(s1, s2) # COMMAND ---------- #", "s2): (m1, v1) = Base64ToFloatArray(s1) (m2, v2) = Base64ToFloatArray(s2) if", "key=''): AzureStorageAccess.__init__(self, container, account, sas, key) schema = StructType() schema.add(StructField('EntityId',", "def getSimilarity(self, e1, e2): df = self.df row1 = df.where(df.EntityId", "magnitude def Base64ToFloatArray(s): arr = array.array('f', base64.b64decode(s)) return (arr[0], arr[1:])", "* from pyspark.sql import functions as F import base64 import", "**NetworkSimilarity** class to compute Network Similarity # COMMAND ---------- #", "key is ignored # # Note: # resource does not", "resource does not have header # you need to provide", "base64 encoded float[] with first element being the magnitude def", "e1).first() self.raiseErrorIfNotFound(row1, e1) row2 = df.where(df.EntityId == e2).first() self.raiseErrorIfNotFound(row2, e2)", "!= e) else : df2 = df1.where((df1.EntityId != e) &", "== 0) or (m2 == 0): return 0 else :", "else : df2 = df1.where((df1.EntityId != e) & (df1.EntityType ==", "return (arr[0], arr[1:]) def cosineSimilarity(s1, s2): (m1, v1) = Base64ToFloatArray(s1)", "as F import base64 import array # COMMAND ---------- #", "def Base64ToFloatArray(s): arr = array.array('f', base64.b64decode(s)) return (arr[0], arr[1:]) def", "so that it could be used in dataframe # #", "MAGIC %md **NetworkSimilarity** class to compute Network Similarity # COMMAND", "@F.udf(\"float\") def udfCosineSimilarity(s1, s2): return cosineSimilarity(s1, s2) # COMMAND ----------", "= StructType() schema.add(StructField('EntityId', LongType(), False)) schema.add(StructField('EntityType', StringType(), False)) schema.add(StructField('Data', StringType(),", "Base64ToFloatArray(s): arr = array.array('f', base64.b64decode(s)) return (arr[0], arr[1:]) def cosineSimilarity(s1,", "zip(v1, v2))/(m1 * m2) # Register udf functions so that", "import functions as F import base64 import array # COMMAND", "Similarity # COMMAND ---------- # Parameters: # resource: resource stream" ]
[ "True elif int(in_val) % 3 == 0 and int(in_val) %", "enter a positive integer.\\n > \") if in_val is 'q':", "= input(\"Please enter a positive integer.\\n > \") if in_val", "in_val is 'q': quit = True elif int(in_val) % 3", "print(\"Buzz\") elif int(in_val) % 3 == 0: print(\"Fizz\") else: pass", "> \") if in_val is 'q': quit = True elif", "elif int(in_val) % 3 == 0 and int(in_val) % 5", "% 3 == 0 and int(in_val) % 5 == 0:", "False while quit is False: in_val = input(\"Please enter a", "quit = True elif int(in_val) % 3 == 0 and", "int(in_val) % 5 == 0: print(\"Buzz\") elif int(in_val) % 3", "is 'q': quit = True elif int(in_val) % 3 ==", "int(in_val) % 3 == 0 and int(in_val) % 5 ==", "while quit is False: in_val = input(\"Please enter a positive", "3 == 0 and int(in_val) % 5 == 0: print(\"FizzBuzz\")", "q to quit\") quit = False while quit is False:", "quit\") quit = False while quit is False: in_val =", "\") if in_val is 'q': quit = True elif int(in_val)", "== 0: print(\"FizzBuzz\") elif int(in_val) % 5 == 0: print(\"Buzz\")", "to quit\") quit = False while quit is False: in_val", "% 5 == 0: print(\"FizzBuzz\") elif int(in_val) % 5 ==", "elif int(in_val) % 5 == 0: print(\"Buzz\") elif int(in_val) %", "= False while quit is False: in_val = input(\"Please enter", "False: in_val = input(\"Please enter a positive integer.\\n > \")", "a positive integer.\\n > \") if in_val is 'q': quit", "= True elif int(in_val) % 3 == 0 and int(in_val)", "5 == 0: print(\"FizzBuzz\") elif int(in_val) % 5 == 0:", "'q': quit = True elif int(in_val) % 3 == 0", "== 0: print(\"Buzz\") elif int(in_val) % 3 == 0: print(\"Fizz\")", "in_val = input(\"Please enter a positive integer.\\n > \") if", "positive integer.\\n > \") if in_val is 'q': quit =", "int(in_val) % 5 == 0: print(\"FizzBuzz\") elif int(in_val) % 5", "if in_val is 'q': quit = True elif int(in_val) %", "input(\"Please enter a positive integer.\\n > \") if in_val is", "and int(in_val) % 5 == 0: print(\"FizzBuzz\") elif int(in_val) %", "0: print(\"FizzBuzz\") elif int(in_val) % 5 == 0: print(\"Buzz\") elif", "5 == 0: print(\"Buzz\") elif int(in_val) % 3 == 0:", "% 5 == 0: print(\"Buzz\") elif int(in_val) % 3 ==", "0 and int(in_val) % 5 == 0: print(\"FizzBuzz\") elif int(in_val)", "print(\"FizzBuzz\") elif int(in_val) % 5 == 0: print(\"Buzz\") elif int(in_val)", "print(\"Press q to quit\") quit = False while quit is", "0: print(\"Buzz\") elif int(in_val) % 3 == 0: print(\"Fizz\") else:", "== 0 and int(in_val) % 5 == 0: print(\"FizzBuzz\") elif", "quit = False while quit is False: in_val = input(\"Please", "is False: in_val = input(\"Please enter a positive integer.\\n >", "integer.\\n > \") if in_val is 'q': quit = True", "quit is False: in_val = input(\"Please enter a positive integer.\\n" ]
[ "[INIT, THIS], THIS: { E_OVER: [INIT], E_WAS: [INIT], E_IS: [INIT,", "THIS], THIS: { E_OVER: [INIT], E_WAS: [INIT], E_IS: [INIT, THIS,", "}, }, }, }, PEN: { E_OVER: None, }, },", "pen\", \"entry_state\": INIT, \"data\": { INIT: { E_OVER: [INIT], E_THAT:", "{ E_OVER: [INIT], E_AN: [INIT], E_A: [INIT, THIS, IS, A],", "INIT, IS, PEN, THIS, ) pen_transition_doc_v19 = { \"title\": \"This", "E_OVER, E_PEN, E_PIN, E_THAT, E_THIS, E_WAS, INIT, IS, PEN, THIS,", "a pen\", \"entry_state\": INIT, \"data\": { INIT: { E_OVER: [INIT],", "is a pen\", \"entry_state\": INIT, \"data\": { INIT: { E_OVER:", "[INIT], E_A: [INIT, THIS, IS, A], A: { E_OVER: [INIT],", "E_WAS: [INIT], E_IS: [INIT, THIS, IS], IS: { E_OVER: [INIT],", "{ E_OVER: [INIT], E_WAS: [INIT], E_IS: [INIT, THIS, IS], IS:", ") pen_transition_doc_v19 = { \"title\": \"This is a pen\", \"entry_state\":", "THIS: { E_OVER: [INIT], E_WAS: [INIT], E_IS: [INIT, THIS, IS],", "[INIT, THIS, IS, A], A: { E_OVER: [INIT], E_PIN: [INIT],", "INIT: { E_OVER: [INIT], E_THAT: [INIT], E_THIS: [INIT, THIS], THIS:", "THIS, IS, A], A: { E_OVER: [INIT], E_PIN: [INIT], E_PEN:", "[PEN], }, }, }, }, PEN: { E_OVER: None, },", "= { \"title\": \"This is a pen\", \"entry_state\": INIT, \"data\":", "A, E_A, E_AN, E_IS, E_OVER, E_PEN, E_PIN, E_THAT, E_THIS, E_WAS,", "[INIT, THIS, IS], IS: { E_OVER: [INIT], E_AN: [INIT], E_A:", "IS, PEN, THIS, ) pen_transition_doc_v19 = { \"title\": \"This is", "E_THAT: [INIT], E_THIS: [INIT, THIS], THIS: { E_OVER: [INIT], E_WAS:", "IS], IS: { E_OVER: [INIT], E_AN: [INIT], E_A: [INIT, THIS,", "\"This is a pen\", \"entry_state\": INIT, \"data\": { INIT: {", "{ INIT: { E_OVER: [INIT], E_THAT: [INIT], E_THIS: [INIT, THIS],", "E_A, E_AN, E_IS, E_OVER, E_PEN, E_PIN, E_THAT, E_THIS, E_WAS, INIT,", "THIS, IS], IS: { E_OVER: [INIT], E_AN: [INIT], E_A: [INIT,", "from lesson14_projects.pen.data.const import ( A, E_A, E_AN, E_IS, E_OVER, E_PEN,", "INIT, \"data\": { INIT: { E_OVER: [INIT], E_THAT: [INIT], E_THIS:", "{ E_OVER: [INIT], E_THAT: [INIT], E_THIS: [INIT, THIS], THIS: {", "[INIT], E_PIN: [INIT], E_PEN: [PEN], }, }, }, }, PEN:", "E_WAS, INIT, IS, PEN, THIS, ) pen_transition_doc_v19 = { \"title\":", "IS, A], A: { E_OVER: [INIT], E_PIN: [INIT], E_PEN: [PEN],", "E_IS, E_OVER, E_PEN, E_PIN, E_THAT, E_THIS, E_WAS, INIT, IS, PEN,", "[INIT], E_IS: [INIT, THIS, IS], IS: { E_OVER: [INIT], E_AN:", "E_OVER: [INIT], E_AN: [INIT], E_A: [INIT, THIS, IS, A], A:", "A: { E_OVER: [INIT], E_PIN: [INIT], E_PEN: [PEN], }, },", "E_AN: [INIT], E_A: [INIT, THIS, IS, A], A: { E_OVER:", "E_OVER: [INIT], E_WAS: [INIT], E_IS: [INIT, THIS, IS], IS: {", "E_AN, E_IS, E_OVER, E_PEN, E_PIN, E_THAT, E_THIS, E_WAS, INIT, IS,", "[INIT], E_PEN: [PEN], }, }, }, }, PEN: { E_OVER:", "\"title\": \"This is a pen\", \"entry_state\": INIT, \"data\": { INIT:", "THIS, ) pen_transition_doc_v19 = { \"title\": \"This is a pen\",", "E_OVER: [INIT], E_PIN: [INIT], E_PEN: [PEN], }, }, }, },", "E_IS: [INIT, THIS, IS], IS: { E_OVER: [INIT], E_AN: [INIT],", "}, }, }, PEN: { E_OVER: None, }, }, }", "PEN, THIS, ) pen_transition_doc_v19 = { \"title\": \"This is a", "[INIT], E_THIS: [INIT, THIS], THIS: { E_OVER: [INIT], E_WAS: [INIT],", "\"entry_state\": INIT, \"data\": { INIT: { E_OVER: [INIT], E_THAT: [INIT],", "[INIT], E_THAT: [INIT], E_THIS: [INIT, THIS], THIS: { E_OVER: [INIT],", "{ E_OVER: [INIT], E_PIN: [INIT], E_PEN: [PEN], }, }, },", "A], A: { E_OVER: [INIT], E_PIN: [INIT], E_PEN: [PEN], },", "E_A: [INIT, THIS, IS, A], A: { E_OVER: [INIT], E_PIN:", "pen_transition_doc_v19 = { \"title\": \"This is a pen\", \"entry_state\": INIT,", "E_PEN: [PEN], }, }, }, }, PEN: { E_OVER: None,", "E_PIN, E_THAT, E_THIS, E_WAS, INIT, IS, PEN, THIS, ) pen_transition_doc_v19", "import ( A, E_A, E_AN, E_IS, E_OVER, E_PEN, E_PIN, E_THAT,", "lesson14_projects.pen.data.const import ( A, E_A, E_AN, E_IS, E_OVER, E_PEN, E_PIN,", "( A, E_A, E_AN, E_IS, E_OVER, E_PEN, E_PIN, E_THAT, E_THIS,", "E_THAT, E_THIS, E_WAS, INIT, IS, PEN, THIS, ) pen_transition_doc_v19 =", "IS: { E_OVER: [INIT], E_AN: [INIT], E_A: [INIT, THIS, IS,", "{ \"title\": \"This is a pen\", \"entry_state\": INIT, \"data\": {", "E_THIS, E_WAS, INIT, IS, PEN, THIS, ) pen_transition_doc_v19 = {", "E_PIN: [INIT], E_PEN: [PEN], }, }, }, }, PEN: {", "E_THIS: [INIT, THIS], THIS: { E_OVER: [INIT], E_WAS: [INIT], E_IS:", "[INIT], E_AN: [INIT], E_A: [INIT, THIS, IS, A], A: {", "E_PEN, E_PIN, E_THAT, E_THIS, E_WAS, INIT, IS, PEN, THIS, )", "[INIT], E_WAS: [INIT], E_IS: [INIT, THIS, IS], IS: { E_OVER:", "E_OVER: [INIT], E_THAT: [INIT], E_THIS: [INIT, THIS], THIS: { E_OVER:", "\"data\": { INIT: { E_OVER: [INIT], E_THAT: [INIT], E_THIS: [INIT," ]
[ "Html import Animation_Html from Iteration import Animation_Iteration from Write import", "\"W\", \"Text\": \"White background\", \"Type\": \"bool\", }, } __Args__=[] Indent=\"", "delay (passed to convert)\", \"Type\": None, }, \"W\": { \"Attr\":", "{ \"Attr\": \"Delay\", \"Text\": \"Animated GIF delay (passed to convert)\",", "import gd,os,time from Html import Animation_Html from Iteration import Animation_Iteration", "{ \"Attr\": \"Clean\", \"Text\": \"Remove PNGs generated\", \"Type\": \"int\", },", "from Html import Animation_Html from Iteration import Animation_Iteration from Write", "import * from Canvas2 import * from Canvas2 import Canvas2", "\"Rewrite image file between iterations\", \"Type\": None, }, \"l\": {", "} __Args__=[] Indent=\" \" W=False Verbose=1 Delay=\"5\" Loop=\"0\" Path=\"curves\" Curve_Parms_Path=\"\"", "#Clean up afterwords Iteration_Files=[] Images_Rewrite=1 def __init__(self,pmin,pmax,vals={}): self.Hash2Obj(vals) self.__Canvas__=Canvas2(vals,[ pmin,pmax", "def __init__(self,pmin,pmax,vals={}): self.Hash2Obj(vals) self.__Canvas__=Canvas2(vals,[ pmin,pmax ]) self.Canvas([ pmin,pmax ]).CLI2Obj() ##!", "Canvas2 import * from Canvas2 import Canvas2 from Image import", "Indent=\" \" W=False Verbose=1 Delay=\"5\" Loop=\"0\" Path=\"curves\" Curve_Parms_Path=\"\" FileName=\"Curve\" Name=\"Curve\"", "text=\"Animation, Path: \"+self.Path text+=\"\\n\\tFileName: \"+self.FileName text+=\"\\n\\tParms: \"+self.Curve_Parms_Path text+=\"\\n\\tLoop: \"+self.Loop text+=\"\\n\\tDelay:", "__Canvas__=None class Animation( Animation_Html, Animation_Iteration, Animation_Write, Base,HTML ): Convert_Bin=\"/usr/bin/convert\" HTML_Root=\"http://127.0.0.1/Graphics\"", "}, \"W\": { \"Attr\": \"W\", \"Text\": \"White background\", \"Type\": \"bool\",", "text+=\"\\n\\tLoop: \"+self.Loop text+=\"\\n\\tDelay: \"+self.Delay text+=\"\\n\\tClean: \"+str(self.Clean) text+=\"\\n\"+str(self.Canvas()) return text ##!", "from Iteration import Animation_Iteration from Write import Animation_Write from Base", "\"Type\": None, }, \"W\": { \"Attr\": \"W\", \"Text\": \"White background\",", "Loop=\"0\" Path=\"curves\" Curve_Parms_Path=\"\" FileName=\"Curve\" Name=\"Curve\" Parameters=[\"a\",\"b\",\"c\"] Parameter_Names=[\"a\",\"b\",\"c\"] Clean=0 #Clean up", "__Canvas__): parms={ } __Canvas__=Canvas2(parms,pexts) return __Canvas__ def BackGround_Color(self): if (self.W):", "##! ##! Overrride __str__ to print some useful info. ##!", "import Animation_Html from Iteration import Animation_Iteration from Write import Animation_Write", "None, }, \"-clean\": { \"Attr\": \"Clean\", \"Text\": \"Remove PNGs generated\",", "def BackGround_Color(self): if (self.W): return \"White\" else: return \"Black\" def", "Delay=\"5\" Loop=\"0\" Path=\"curves\" Curve_Parms_Path=\"\" FileName=\"Curve\" Name=\"Curve\" Parameters=[\"a\",\"b\",\"c\"] Parameter_Names=[\"a\",\"b\",\"c\"] Clean=0 #Clean", "pmin,pmax ]).CLI2Obj() ##! ##! Overrride __str__ to print some useful", "class Animation( Animation_Html, Animation_Iteration, Animation_Write, Base,HTML ): Convert_Bin=\"/usr/bin/convert\" HTML_Root=\"http://127.0.0.1/Graphics\" CGI_Root=\"http://127.0.0.1/cgi-bin/Graphics/Display.py\"", "Clean=0 #Clean up afterwords Iteration_Files=[] Images_Rewrite=1 def __init__(self,pmin,pmax,vals={}): self.Hash2Obj(vals) self.__Canvas__=Canvas2(vals,[", "generated\", \"Type\": \"int\", }, \"-rewrite\": { \"Attr\": \"Images_Rewrite\", \"Text\": \"Rewrite", "Overrride __str__ to print some useful info. ##! def __str__(self):", "Parameters=[\"a\",\"b\",\"c\"] Parameter_Names=[\"a\",\"b\",\"c\"] Clean=0 #Clean up afterwords Iteration_Files=[] Images_Rewrite=1 def __init__(self,pmin,pmax,vals={}):", "loops (passed to convert)\", \"Type\": None, }, \"d\": { \"Attr\":", "GIF delay (passed to convert)\", \"Type\": None, }, \"W\": {", "\"Verbosity level. Augment to see more numbers...\", \"Type\": None, },", "import * from Canvas2 import Canvas2 from Image import Image", "{ \"Attr\": \"Images_Rewrite\", \"Text\": \"Rewrite image file between iterations\", \"Type\":", "from Base import * from Canvas2 import * from Canvas2", "import HTML __Canvas__=None class Animation( Animation_Html, Animation_Iteration, Animation_Write, Base,HTML ):", "\"Delay\", \"Text\": \"Animated GIF delay (passed to convert)\", \"Type\": None,", "text+=\"\\n\\tDelay: \"+self.Delay text+=\"\\n\\tClean: \"+str(self.Clean) text+=\"\\n\"+str(self.Canvas()) return text ##! ##! Returns", "to convert)\", \"Type\": None, }, \"W\": { \"Attr\": \"W\", \"Text\":", "\"Type\": \"bool\", }, } __Args__=[] Indent=\" \" W=False Verbose=1 Delay=\"5\"", "import Animation_Iteration from Write import Animation_Write from Base import *", "some useful info. ##! def __str__(self): text=\"Animation, Path: \"+self.Path text+=\"\\n\\tFileName:", "None, }, \"l\": { \"Attr\": \"Loop\", \"Text\": \"Animated GIF no", "\"+self.Path text+=\"\\n\\tFileName: \"+self.FileName text+=\"\\n\\tParms: \"+self.Curve_Parms_Path text+=\"\\n\\tLoop: \"+self.Loop text+=\"\\n\\tDelay: \"+self.Delay text+=\"\\n\\tClean:", "in self.__Canvas__ ##! def Canvas(self,pexts=[]): global __Canvas__ # Needed to", "\"Animated GIF no of loops (passed to convert)\", \"Type\": None,", "\"W\": { \"Attr\": \"W\", \"Text\": \"White background\", \"Type\": \"bool\", },", "\"White background\", \"Type\": \"bool\", }, } __Args__=[] Indent=\" \" W=False", "level. Augment to see more numbers...\", \"Type\": None, }, \"-clean\":", "to convert)\", \"Type\": None, }, \"d\": { \"Attr\": \"Delay\", \"Text\":", "__Args__=[] Indent=\" \" W=False Verbose=1 Delay=\"5\" Loop=\"0\" Path=\"curves\" Curve_Parms_Path=\"\" FileName=\"Curve\"", "self.__Canvas__=Canvas2(vals,[ pmin,pmax ]) self.Canvas([ pmin,pmax ]).CLI2Obj() ##! ##! Overrride __str__", "\"Verbose\", \"Text\": \"Verbosity level. Augment to see more numbers...\", \"Type\":", "<filename>Animation/Main.py import gd,os,time from Html import Animation_Html from Iteration import", "\"Images_Rewrite\", \"Text\": \"Rewrite image file between iterations\", \"Type\": None, },", "\" W=False Verbose=1 Delay=\"5\" Loop=\"0\" Path=\"curves\" Curve_Parms_Path=\"\" FileName=\"Curve\" Name=\"Curve\" Parameters=[\"a\",\"b\",\"c\"]", "]) self.Canvas([ pmin,pmax ]).CLI2Obj() ##! ##! Overrride __str__ to print", "def __str__(self): text=\"Animation, Path: \"+self.Path text+=\"\\n\\tFileName: \"+self.FileName text+=\"\\n\\tParms: \"+self.Curve_Parms_Path text+=\"\\n\\tLoop:", "Name=\"Curve\" Parameters=[\"a\",\"b\",\"c\"] Parameter_Names=[\"a\",\"b\",\"c\"] Clean=0 #Clean up afterwords Iteration_Files=[] Images_Rewrite=1 def", "{ \"Attr\": \"W\", \"Text\": \"White background\", \"Type\": \"bool\", }, }", "modify global copy of __Canvas__ if (not __Canvas__): parms={ }", "convert)\", \"Type\": None, }, \"d\": { \"Attr\": \"Delay\", \"Text\": \"Animated", "text+=\"\\n\\tClean: \"+str(self.Clean) text+=\"\\n\"+str(self.Canvas()) return text ##! ##! Returns Canvas object,", "convert)\", \"Type\": None, }, \"W\": { \"Attr\": \"W\", \"Text\": \"White", "\"d\": { \"Attr\": \"Delay\", \"Text\": \"Animated GIF delay (passed to", "W=False Verbose=1 Delay=\"5\" Loop=\"0\" Path=\"curves\" Curve_Parms_Path=\"\" FileName=\"Curve\" Name=\"Curve\" Parameters=[\"a\",\"b\",\"c\"] Parameter_Names=[\"a\",\"b\",\"c\"]", "HTML import HTML __Canvas__=None class Animation( Animation_Html, Animation_Iteration, Animation_Write, Base,HTML", "iterations\", \"Type\": None, }, \"l\": { \"Attr\": \"Loop\", \"Text\": \"Animated", "from Image import Image from HTML import HTML __Canvas__=None class", "Images_Rewrite=1 def __init__(self,pmin,pmax,vals={}): self.Hash2Obj(vals) self.__Canvas__=Canvas2(vals,[ pmin,pmax ]) self.Canvas([ pmin,pmax ]).CLI2Obj()", "self.Canvas([ pmin,pmax ]).CLI2Obj() ##! ##! Overrride __str__ to print some", "print some useful info. ##! def __str__(self): text=\"Animation, Path: \"+self.Path", "\"Type\": None, }, \"-clean\": { \"Attr\": \"Clean\", \"Text\": \"Remove PNGs", "Augment to see more numbers...\", \"Type\": None, }, \"-clean\": {", "pmin,pmax ]) self.Canvas([ pmin,pmax ]).CLI2Obj() ##! ##! Overrride __str__ to", "Base import * from Canvas2 import * from Canvas2 import", "to modify global copy of __Canvas__ if (not __Canvas__): parms={", "__init__(self,pmin,pmax,vals={}): self.Hash2Obj(vals) self.__Canvas__=Canvas2(vals,[ pmin,pmax ]) self.Canvas([ pmin,pmax ]).CLI2Obj() ##! ##!", "__str__ to print some useful info. ##! def __str__(self): text=\"Animation,", "* from Canvas2 import Canvas2 from Image import Image from", "__str__(self): text=\"Animation, Path: \"+self.Path text+=\"\\n\\tFileName: \"+self.FileName text+=\"\\n\\tParms: \"+self.Curve_Parms_Path text+=\"\\n\\tLoop: \"+self.Loop", "__Canvas__=Canvas2(parms,pexts) return __Canvas__ def BackGround_Color(self): if (self.W): return \"White\" else:", "\"Text\": \"White background\", \"Type\": \"bool\", }, } __Args__=[] Indent=\" \"", "__Switches__={ \"v\": { \"Attr\": \"Verbose\", \"Text\": \"Verbosity level. Augment to", "Verbose=1 Delay=\"5\" Loop=\"0\" Path=\"curves\" Curve_Parms_Path=\"\" FileName=\"Curve\" Name=\"Curve\" Parameters=[\"a\",\"b\",\"c\"] Parameter_Names=[\"a\",\"b\",\"c\"] Clean=0", "\"Text\": \"Animated GIF no of loops (passed to convert)\", \"Type\":", "Path: \"+self.Path text+=\"\\n\\tFileName: \"+self.FileName text+=\"\\n\\tParms: \"+self.Curve_Parms_Path text+=\"\\n\\tLoop: \"+self.Loop text+=\"\\n\\tDelay: \"+self.Delay", "##! Overrride __str__ to print some useful info. ##! def", "image file between iterations\", \"Type\": None, }, \"l\": { \"Attr\":", "Path=\"curves\" Curve_Parms_Path=\"\" FileName=\"Curve\" Name=\"Curve\" Parameters=[\"a\",\"b\",\"c\"] Parameter_Names=[\"a\",\"b\",\"c\"] Clean=0 #Clean up afterwords", "from Write import Animation_Write from Base import * from Canvas2", "}, \"l\": { \"Attr\": \"Loop\", \"Text\": \"Animated GIF no of", "\"Remove PNGs generated\", \"Type\": \"int\", }, \"-rewrite\": { \"Attr\": \"Images_Rewrite\",", "\"v\": { \"Attr\": \"Verbose\", \"Text\": \"Verbosity level. Augment to see", "\"Text\": \"Remove PNGs generated\", \"Type\": \"int\", }, \"-rewrite\": { \"Attr\":", "gd,os,time from Html import Animation_Html from Iteration import Animation_Iteration from", "\"Text\": \"Animated GIF delay (passed to convert)\", \"Type\": None, },", "}, \"-clean\": { \"Attr\": \"Clean\", \"Text\": \"Remove PNGs generated\", \"Type\":", "Parameter_Names=[\"a\",\"b\",\"c\"] Clean=0 #Clean up afterwords Iteration_Files=[] Images_Rewrite=1 def __init__(self,pmin,pmax,vals={}): self.Hash2Obj(vals)", "None, }, \"W\": { \"Attr\": \"W\", \"Text\": \"White background\", \"Type\":", "from HTML import HTML __Canvas__=None class Animation( Animation_Html, Animation_Iteration, Animation_Write,", "Canvas(self,pexts=[]): global __Canvas__ # Needed to modify global copy of", "more numbers...\", \"Type\": None, }, \"-clean\": { \"Attr\": \"Clean\", \"Text\":", "##! def __str__(self): text=\"Animation, Path: \"+self.Path text+=\"\\n\\tFileName: \"+self.FileName text+=\"\\n\\tParms: \"+self.Curve_Parms_Path", "\"+self.FileName text+=\"\\n\\tParms: \"+self.Curve_Parms_Path text+=\"\\n\\tLoop: \"+self.Loop text+=\"\\n\\tDelay: \"+self.Delay text+=\"\\n\\tClean: \"+str(self.Clean) text+=\"\\n\"+str(self.Canvas())", "\"+self.Curve_Parms_Path text+=\"\\n\\tLoop: \"+self.Loop text+=\"\\n\\tDelay: \"+self.Delay text+=\"\\n\\tClean: \"+str(self.Clean) text+=\"\\n\"+str(self.Canvas()) return text", "* from Canvas2 import * from Canvas2 import Canvas2 from", "file between iterations\", \"Type\": None, }, \"l\": { \"Attr\": \"Loop\",", "\"+self.Loop text+=\"\\n\\tDelay: \"+self.Delay text+=\"\\n\\tClean: \"+str(self.Clean) text+=\"\\n\"+str(self.Canvas()) return text ##! ##!", "between iterations\", \"Type\": None, }, \"l\": { \"Attr\": \"Loop\", \"Text\":", "return text ##! ##! Returns Canvas object, stored in self.__Canvas__", "HTML_Root=\"http://127.0.0.1/Graphics\" CGI_Root=\"http://127.0.0.1/cgi-bin/Graphics/Display.py\" __Switches__={ \"v\": { \"Attr\": \"Verbose\", \"Text\": \"Verbosity level.", "to print some useful info. ##! def __str__(self): text=\"Animation, Path:", "(not __Canvas__): parms={ } __Canvas__=Canvas2(parms,pexts) return __Canvas__ def BackGround_Color(self): if", "Convert_Bin=\"/usr/bin/convert\" HTML_Root=\"http://127.0.0.1/Graphics\" CGI_Root=\"http://127.0.0.1/cgi-bin/Graphics/Display.py\" __Switches__={ \"v\": { \"Attr\": \"Verbose\", \"Text\": \"Verbosity", "\"Attr\": \"Verbose\", \"Text\": \"Verbosity level. Augment to see more numbers...\",", "\"bool\", }, } __Args__=[] Indent=\" \" W=False Verbose=1 Delay=\"5\" Loop=\"0\"", "import Canvas2 from Image import Image from HTML import HTML", "import Animation_Write from Base import * from Canvas2 import *", "BackGround_Color(self): if (self.W): return \"White\" else: return \"Black\" def Initialize(self):", "background\", \"Type\": \"bool\", }, } __Args__=[] Indent=\" \" W=False Verbose=1", "\"Attr\": \"Images_Rewrite\", \"Text\": \"Rewrite image file between iterations\", \"Type\": None,", "up afterwords Iteration_Files=[] Images_Rewrite=1 def __init__(self,pmin,pmax,vals={}): self.Hash2Obj(vals) self.__Canvas__=Canvas2(vals,[ pmin,pmax ])", "##! ##! Returns Canvas object, stored in self.__Canvas__ ##! def", "(passed to convert)\", \"Type\": None, }, \"d\": { \"Attr\": \"Delay\",", "Canvas object, stored in self.__Canvas__ ##! def Canvas(self,pexts=[]): global __Canvas__", "Animation_Write from Base import * from Canvas2 import * from", "\"+str(self.Clean) text+=\"\\n\"+str(self.Canvas()) return text ##! ##! Returns Canvas object, stored", "__Canvas__ if (not __Canvas__): parms={ } __Canvas__=Canvas2(parms,pexts) return __Canvas__ def", "Write import Animation_Write from Base import * from Canvas2 import", "}, \"d\": { \"Attr\": \"Delay\", \"Text\": \"Animated GIF delay (passed", "{ \"Attr\": \"Loop\", \"Text\": \"Animated GIF no of loops (passed", "\"l\": { \"Attr\": \"Loop\", \"Text\": \"Animated GIF no of loops", "of loops (passed to convert)\", \"Type\": None, }, \"d\": {", "global copy of __Canvas__ if (not __Canvas__): parms={ } __Canvas__=Canvas2(parms,pexts)", "__Canvas__ # Needed to modify global copy of __Canvas__ if", "Curve_Parms_Path=\"\" FileName=\"Curve\" Name=\"Curve\" Parameters=[\"a\",\"b\",\"c\"] Parameter_Names=[\"a\",\"b\",\"c\"] Clean=0 #Clean up afterwords Iteration_Files=[]", "from Canvas2 import * from Canvas2 import Canvas2 from Image", "self.__Canvas__ ##! def Canvas(self,pexts=[]): global __Canvas__ # Needed to modify", "afterwords Iteration_Files=[] Images_Rewrite=1 def __init__(self,pmin,pmax,vals={}): self.Hash2Obj(vals) self.__Canvas__=Canvas2(vals,[ pmin,pmax ]) self.Canvas([", "\"Attr\": \"W\", \"Text\": \"White background\", \"Type\": \"bool\", }, } __Args__=[]", "##! Returns Canvas object, stored in self.__Canvas__ ##! def Canvas(self,pexts=[]):", "info. ##! def __str__(self): text=\"Animation, Path: \"+self.Path text+=\"\\n\\tFileName: \"+self.FileName text+=\"\\n\\tParms:", "\"Clean\", \"Text\": \"Remove PNGs generated\", \"Type\": \"int\", }, \"-rewrite\": {", "##! def Canvas(self,pexts=[]): global __Canvas__ # Needed to modify global", "Animation_Iteration, Animation_Write, Base,HTML ): Convert_Bin=\"/usr/bin/convert\" HTML_Root=\"http://127.0.0.1/Graphics\" CGI_Root=\"http://127.0.0.1/cgi-bin/Graphics/Display.py\" __Switches__={ \"v\": {", "Iteration_Files=[] Images_Rewrite=1 def __init__(self,pmin,pmax,vals={}): self.Hash2Obj(vals) self.__Canvas__=Canvas2(vals,[ pmin,pmax ]) self.Canvas([ pmin,pmax", "Image import Image from HTML import HTML __Canvas__=None class Animation(", "import Image from HTML import HTML __Canvas__=None class Animation( Animation_Html,", "Returns Canvas object, stored in self.__Canvas__ ##! def Canvas(self,pexts=[]): global", "FileName=\"Curve\" Name=\"Curve\" Parameters=[\"a\",\"b\",\"c\"] Parameter_Names=[\"a\",\"b\",\"c\"] Clean=0 #Clean up afterwords Iteration_Files=[] Images_Rewrite=1", "\"Text\": \"Rewrite image file between iterations\", \"Type\": None, }, \"l\":", "(passed to convert)\", \"Type\": None, }, \"W\": { \"Attr\": \"W\",", "} __Canvas__=Canvas2(parms,pexts) return __Canvas__ def BackGround_Color(self): if (self.W): return \"White\"", "\"Type\": \"int\", }, \"-rewrite\": { \"Attr\": \"Images_Rewrite\", \"Text\": \"Rewrite image", "Iteration import Animation_Iteration from Write import Animation_Write from Base import", "None, }, \"d\": { \"Attr\": \"Delay\", \"Text\": \"Animated GIF delay", "Canvas2 from Image import Image from HTML import HTML __Canvas__=None", "Animation_Write, Base,HTML ): Convert_Bin=\"/usr/bin/convert\" HTML_Root=\"http://127.0.0.1/Graphics\" CGI_Root=\"http://127.0.0.1/cgi-bin/Graphics/Display.py\" __Switches__={ \"v\": { \"Attr\":", "\"int\", }, \"-rewrite\": { \"Attr\": \"Images_Rewrite\", \"Text\": \"Rewrite image file", "\"Type\": None, }, \"l\": { \"Attr\": \"Loop\", \"Text\": \"Animated GIF", "object, stored in self.__Canvas__ ##! def Canvas(self,pexts=[]): global __Canvas__ #", "\"Text\": \"Verbosity level. Augment to see more numbers...\", \"Type\": None,", "Animation_Html, Animation_Iteration, Animation_Write, Base,HTML ): Convert_Bin=\"/usr/bin/convert\" HTML_Root=\"http://127.0.0.1/Graphics\" CGI_Root=\"http://127.0.0.1/cgi-bin/Graphics/Display.py\" __Switches__={ \"v\":", "PNGs generated\", \"Type\": \"int\", }, \"-rewrite\": { \"Attr\": \"Images_Rewrite\", \"Text\":", "global __Canvas__ # Needed to modify global copy of __Canvas__", "useful info. ##! def __str__(self): text=\"Animation, Path: \"+self.Path text+=\"\\n\\tFileName: \"+self.FileName", "no of loops (passed to convert)\", \"Type\": None, }, \"d\":", "Image from HTML import HTML __Canvas__=None class Animation( Animation_Html, Animation_Iteration,", "\"Loop\", \"Text\": \"Animated GIF no of loops (passed to convert)\",", "\"Attr\": \"Delay\", \"Text\": \"Animated GIF delay (passed to convert)\", \"Type\":", "\"Attr\": \"Loop\", \"Text\": \"Animated GIF no of loops (passed to", "text+=\"\\n\"+str(self.Canvas()) return text ##! ##! Returns Canvas object, stored in", "Canvas2 import Canvas2 from Image import Image from HTML import", "Base,HTML ): Convert_Bin=\"/usr/bin/convert\" HTML_Root=\"http://127.0.0.1/Graphics\" CGI_Root=\"http://127.0.0.1/cgi-bin/Graphics/Display.py\" __Switches__={ \"v\": { \"Attr\": \"Verbose\",", "}, } __Args__=[] Indent=\" \" W=False Verbose=1 Delay=\"5\" Loop=\"0\" Path=\"curves\"", "# Needed to modify global copy of __Canvas__ if (not", "): Convert_Bin=\"/usr/bin/convert\" HTML_Root=\"http://127.0.0.1/Graphics\" CGI_Root=\"http://127.0.0.1/cgi-bin/Graphics/Display.py\" __Switches__={ \"v\": { \"Attr\": \"Verbose\", \"Text\":", "of __Canvas__ if (not __Canvas__): parms={ } __Canvas__=Canvas2(parms,pexts) return __Canvas__", "see more numbers...\", \"Type\": None, }, \"-clean\": { \"Attr\": \"Clean\",", "__Canvas__ def BackGround_Color(self): if (self.W): return \"White\" else: return \"Black\"", "GIF no of loops (passed to convert)\", \"Type\": None, },", "\"-rewrite\": { \"Attr\": \"Images_Rewrite\", \"Text\": \"Rewrite image file between iterations\",", "Animation_Iteration from Write import Animation_Write from Base import * from", "text ##! ##! Returns Canvas object, stored in self.__Canvas__ ##!", "(self.W): return \"White\" else: return \"Black\" def Initialize(self): self.Canvas().Resolution=self.Resolution self.Canvas().Image_Rewrite()", "numbers...\", \"Type\": None, }, \"-clean\": { \"Attr\": \"Clean\", \"Text\": \"Remove", "}, \"-rewrite\": { \"Attr\": \"Images_Rewrite\", \"Text\": \"Rewrite image file between", "if (not __Canvas__): parms={ } __Canvas__=Canvas2(parms,pexts) return __Canvas__ def BackGround_Color(self):", "to see more numbers...\", \"Type\": None, }, \"-clean\": { \"Attr\":", "text+=\"\\n\\tParms: \"+self.Curve_Parms_Path text+=\"\\n\\tLoop: \"+self.Loop text+=\"\\n\\tDelay: \"+self.Delay text+=\"\\n\\tClean: \"+str(self.Clean) text+=\"\\n\"+str(self.Canvas()) return", "stored in self.__Canvas__ ##! def Canvas(self,pexts=[]): global __Canvas__ # Needed", "def Canvas(self,pexts=[]): global __Canvas__ # Needed to modify global copy", "\"Attr\": \"Clean\", \"Text\": \"Remove PNGs generated\", \"Type\": \"int\", }, \"-rewrite\":", "copy of __Canvas__ if (not __Canvas__): parms={ } __Canvas__=Canvas2(parms,pexts) return", "return __Canvas__ def BackGround_Color(self): if (self.W): return \"White\" else: return", "self.Hash2Obj(vals) self.__Canvas__=Canvas2(vals,[ pmin,pmax ]) self.Canvas([ pmin,pmax ]).CLI2Obj() ##! ##! Overrride", "parms={ } __Canvas__=Canvas2(parms,pexts) return __Canvas__ def BackGround_Color(self): if (self.W): return", "\"-clean\": { \"Attr\": \"Clean\", \"Text\": \"Remove PNGs generated\", \"Type\": \"int\",", "if (self.W): return \"White\" else: return \"Black\" def Initialize(self): self.Canvas().Resolution=self.Resolution", "HTML __Canvas__=None class Animation( Animation_Html, Animation_Iteration, Animation_Write, Base,HTML ): Convert_Bin=\"/usr/bin/convert\"", "\"+self.Delay text+=\"\\n\\tClean: \"+str(self.Clean) text+=\"\\n\"+str(self.Canvas()) return text ##! ##! Returns Canvas", "text+=\"\\n\\tFileName: \"+self.FileName text+=\"\\n\\tParms: \"+self.Curve_Parms_Path text+=\"\\n\\tLoop: \"+self.Loop text+=\"\\n\\tDelay: \"+self.Delay text+=\"\\n\\tClean: \"+str(self.Clean)", "\"Animated GIF delay (passed to convert)\", \"Type\": None, }, \"W\":", "Animation_Html from Iteration import Animation_Iteration from Write import Animation_Write from", "CGI_Root=\"http://127.0.0.1/cgi-bin/Graphics/Display.py\" __Switches__={ \"v\": { \"Attr\": \"Verbose\", \"Text\": \"Verbosity level. Augment", "]).CLI2Obj() ##! ##! Overrride __str__ to print some useful info.", "Animation( Animation_Html, Animation_Iteration, Animation_Write, Base,HTML ): Convert_Bin=\"/usr/bin/convert\" HTML_Root=\"http://127.0.0.1/Graphics\" CGI_Root=\"http://127.0.0.1/cgi-bin/Graphics/Display.py\" __Switches__={", "{ \"Attr\": \"Verbose\", \"Text\": \"Verbosity level. Augment to see more", "Needed to modify global copy of __Canvas__ if (not __Canvas__):", "\"Type\": None, }, \"d\": { \"Attr\": \"Delay\", \"Text\": \"Animated GIF", "from Canvas2 import Canvas2 from Image import Image from HTML" ]
[ "torch.tensor(self.cutoff).to(dist_mat.device)) # Subtract max(log(distance)) for stability. # See the first", "diagonal out later anyway # Cut off to avoid high", "2.0)) weights = torch.exp(log_weights - torch.max(log_weights)) # Sample only negative", "= cutoff self.nonzero_loss_cutoff = nonzero_loss_cutoff def mine(self, embeddings, labels): label_set", "out later anyway # Cut off to avoid high variance.", "Sample only negative examples by setting weights of # the", "https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/ # /embedding_learning/model.py class DistanceWeightedMiner(BasePostGradientMiner): def __init__(self, cutoff, nonzero_loss_cutoff, **kwargs):", "to avoid high variance. dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device)) # Subtract", "mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0 weights = weights * mask *", "that we don't get log(0). We mask the diagonal out", "to 0. mask = torch.ones(weights.size()).to(embeddings.device) for i in label_set: idx", "= torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device)) # Subtract max(log(distance)) for stability. # See", "as lmu # adapted from # https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/ # /embedding_learning/model.py class", "= 0 weights = weights * mask * ((dist_mat <", "paper log_weights = (2.0 - float(d)) * torch.log(dist_mat) - (", "((dist_mat < self.nonzero_loss_cutoff).float()) weights = weights / torch.sum(weights, dim=1, keepdim=True)", "== i).nonzero() mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0 weights = weights *", "i).nonzero() mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0 weights = weights * mask", "torch.log(dist_mat) - ( float(d - 3) / 2 ) *", "/ 2 ) * torch.log(1.0 - 0.25 * (dist_mat **", "( float(d - 3) / 2 ) * torch.log(1.0 -", "from .base_miner import BasePostGradientMiner import torch from ..utils import loss_and_miner_utils", "BasePostGradientMiner import torch from ..utils import loss_and_miner_utils as lmu #", "mask * ((dist_mat < self.nonzero_loss_cutoff).float()) weights = weights / torch.sum(weights,", "* torch.log(1.0 - 0.25 * (dist_mat ** 2.0)) weights =", "< self.nonzero_loss_cutoff).float()) weights = weights / torch.sum(weights, dim=1, keepdim=True) np_weights", "weights = torch.exp(log_weights - torch.max(log_weights)) # Sample only negative examples", "idx = (labels == i).nonzero() mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0 weights", "def mine(self, embeddings, labels): label_set = torch.unique(labels) n, d =", "i in label_set: idx = (labels == i).nonzero() mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))]", "examples to 0. mask = torch.ones(weights.size()).to(embeddings.device) for i in label_set:", "embeddings, labels): label_set = torch.unique(labels) n, d = embeddings.size() dist_mat", "weights of # the same-class examples to 0. mask =", "**kwargs): super().__init__(**kwargs) self.cutoff = cutoff self.nonzero_loss_cutoff = nonzero_loss_cutoff def mine(self,", "examples by setting weights of # the same-class examples to", "torch.log(1.0 - 0.25 * (dist_mat ** 2.0)) weights = torch.exp(log_weights", ".base_miner import BasePostGradientMiner import torch from ..utils import loss_and_miner_utils as", "d = embeddings.size() dist_mat = lmu.dist_mat(embeddings) dist_mat = dist_mat +", "Subtract max(log(distance)) for stability. # See the first equation from", "(2.0 - float(d)) * torch.log(dist_mat) - ( float(d - 3)", "first equation from Section 4 of the paper log_weights =", "mask the diagonal out later anyway # Cut off to", "See the first equation from Section 4 of the paper", "#! /usr/bin/env python3 from .base_miner import BasePostGradientMiner import torch from", "import BasePostGradientMiner import torch from ..utils import loss_and_miner_utils as lmu", "+ torch.eye(dist_mat.size(0)).to(embeddings.device) # so that we don't get log(0). We", "max(log(distance)) for stability. # See the first equation from Section", "stability. # See the first equation from Section 4 of", "dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device) # so that we don't get log(0).", "- float(d)) * torch.log(dist_mat) - ( float(d - 3) /", "mask = torch.ones(weights.size()).to(embeddings.device) for i in label_set: idx = (labels", "don't get log(0). We mask the diagonal out later anyway", "(dist_mat ** 2.0)) weights = torch.exp(log_weights - torch.max(log_weights)) # Sample", "log(0). We mask the diagonal out later anyway # Cut", "so that we don't get log(0). We mask the diagonal", "of # the same-class examples to 0. mask = torch.ones(weights.size()).to(embeddings.device)", "dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device)) # Subtract max(log(distance)) for stability. #", "dist_mat = lmu.dist_mat(embeddings) dist_mat = dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device) # so", "setting weights of # the same-class examples to 0. mask", "= embeddings.size() dist_mat = lmu.dist_mat(embeddings) dist_mat = dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device)", "= weights * mask * ((dist_mat < self.nonzero_loss_cutoff).float()) weights =", "= torch.unique(labels) n, d = embeddings.size() dist_mat = lmu.dist_mat(embeddings) dist_mat", "negative examples by setting weights of # the same-class examples", "- 3) / 2 ) * torch.log(1.0 - 0.25 *", "import loss_and_miner_utils as lmu # adapted from # https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/ #", "from Section 4 of the paper log_weights = (2.0 -", "torch.max(log_weights)) # Sample only negative examples by setting weights of", "# https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/ # /embedding_learning/model.py class DistanceWeightedMiner(BasePostGradientMiner): def __init__(self, cutoff, nonzero_loss_cutoff,", "nonzero_loss_cutoff def mine(self, embeddings, labels): label_set = torch.unique(labels) n, d", "nonzero_loss_cutoff, **kwargs): super().__init__(**kwargs) self.cutoff = cutoff self.nonzero_loss_cutoff = nonzero_loss_cutoff def", ") * torch.log(1.0 - 0.25 * (dist_mat ** 2.0)) weights", "embeddings.size() dist_mat = lmu.dist_mat(embeddings) dist_mat = dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device) #", "weights / torch.sum(weights, dim=1, keepdim=True) np_weights = weights.cpu().numpy() return lmu.get_random_triplet_indices(labels,", "3) / 2 ) * torch.log(1.0 - 0.25 * (dist_mat", "= lmu.dist_mat(embeddings) dist_mat = dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device) # so that", "idx.squeeze(1))] = 0 weights = weights * mask * ((dist_mat", "weights = weights / torch.sum(weights, dim=1, keepdim=True) np_weights = weights.cpu().numpy()", "# Sample only negative examples by setting weights of #", "only negative examples by setting weights of # the same-class", "for i in label_set: idx = (labels == i).nonzero() mask[torch.meshgrid(idx.squeeze(1),", "Section 4 of the paper log_weights = (2.0 - float(d))", "torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device)) # Subtract max(log(distance)) for stability. # See the", "* mask * ((dist_mat < self.nonzero_loss_cutoff).float()) weights = weights /", "label_set = torch.unique(labels) n, d = embeddings.size() dist_mat = lmu.dist_mat(embeddings)", "** 2.0)) weights = torch.exp(log_weights - torch.max(log_weights)) # Sample only", "We mask the diagonal out later anyway # Cut off", "from # https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/ # /embedding_learning/model.py class DistanceWeightedMiner(BasePostGradientMiner): def __init__(self, cutoff,", "variance. dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device)) # Subtract max(log(distance)) for stability.", "for stability. # See the first equation from Section 4", "0. mask = torch.ones(weights.size()).to(embeddings.device) for i in label_set: idx =", "* ((dist_mat < self.nonzero_loss_cutoff).float()) weights = weights / torch.sum(weights, dim=1,", "# adapted from # https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/ # /embedding_learning/model.py class DistanceWeightedMiner(BasePostGradientMiner): def", "torch.unique(labels) n, d = embeddings.size() dist_mat = lmu.dist_mat(embeddings) dist_mat =", "the same-class examples to 0. mask = torch.ones(weights.size()).to(embeddings.device) for i", "import torch from ..utils import loss_and_miner_utils as lmu # adapted", "torch.eye(dist_mat.size(0)).to(embeddings.device) # so that we don't get log(0). We mask", "/embedding_learning/model.py class DistanceWeightedMiner(BasePostGradientMiner): def __init__(self, cutoff, nonzero_loss_cutoff, **kwargs): super().__init__(**kwargs) self.cutoff", "# Subtract max(log(distance)) for stability. # See the first equation", "the paper log_weights = (2.0 - float(d)) * torch.log(dist_mat) -", "weights * mask * ((dist_mat < self.nonzero_loss_cutoff).float()) weights = weights", "= weights / torch.sum(weights, dim=1, keepdim=True) np_weights = weights.cpu().numpy() return", "loss_and_miner_utils as lmu # adapted from # https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/ # /embedding_learning/model.py", "get log(0). We mask the diagonal out later anyway #", "* torch.log(dist_mat) - ( float(d - 3) / 2 )", "in label_set: idx = (labels == i).nonzero() mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] =", "lmu.dist_mat(embeddings) dist_mat = dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device) # so that we", "later anyway # Cut off to avoid high variance. dist_mat", "- torch.max(log_weights)) # Sample only negative examples by setting weights", "n, d = embeddings.size() dist_mat = lmu.dist_mat(embeddings) dist_mat = dist_mat", "class DistanceWeightedMiner(BasePostGradientMiner): def __init__(self, cutoff, nonzero_loss_cutoff, **kwargs): super().__init__(**kwargs) self.cutoff =", "self.cutoff = cutoff self.nonzero_loss_cutoff = nonzero_loss_cutoff def mine(self, embeddings, labels):", "from ..utils import loss_and_miner_utils as lmu # adapted from #", "/ torch.sum(weights, dim=1, keepdim=True) np_weights = weights.cpu().numpy() return lmu.get_random_triplet_indices(labels, weights=np_weights)", "high variance. dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device)) # Subtract max(log(distance)) for", "cutoff, nonzero_loss_cutoff, **kwargs): super().__init__(**kwargs) self.cutoff = cutoff self.nonzero_loss_cutoff = nonzero_loss_cutoff", "torch.ones(weights.size()).to(embeddings.device) for i in label_set: idx = (labels == i).nonzero()", "DistanceWeightedMiner(BasePostGradientMiner): def __init__(self, cutoff, nonzero_loss_cutoff, **kwargs): super().__init__(**kwargs) self.cutoff = cutoff", "# Cut off to avoid high variance. dist_mat = torch.max(dist_mat,", "the first equation from Section 4 of the paper log_weights", "__init__(self, cutoff, nonzero_loss_cutoff, **kwargs): super().__init__(**kwargs) self.cutoff = cutoff self.nonzero_loss_cutoff =", "self.nonzero_loss_cutoff = nonzero_loss_cutoff def mine(self, embeddings, labels): label_set = torch.unique(labels)", "# the same-class examples to 0. mask = torch.ones(weights.size()).to(embeddings.device) for", "super().__init__(**kwargs) self.cutoff = cutoff self.nonzero_loss_cutoff = nonzero_loss_cutoff def mine(self, embeddings,", "torch.exp(log_weights - torch.max(log_weights)) # Sample only negative examples by setting", "def __init__(self, cutoff, nonzero_loss_cutoff, **kwargs): super().__init__(**kwargs) self.cutoff = cutoff self.nonzero_loss_cutoff", "of the paper log_weights = (2.0 - float(d)) * torch.log(dist_mat)", "torch from ..utils import loss_and_miner_utils as lmu # adapted from", "mine(self, embeddings, labels): label_set = torch.unique(labels) n, d = embeddings.size()", "cutoff self.nonzero_loss_cutoff = nonzero_loss_cutoff def mine(self, embeddings, labels): label_set =", "anyway # Cut off to avoid high variance. dist_mat =", "= dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device) # so that we don't get", "we don't get log(0). We mask the diagonal out later", "# /embedding_learning/model.py class DistanceWeightedMiner(BasePostGradientMiner): def __init__(self, cutoff, nonzero_loss_cutoff, **kwargs): super().__init__(**kwargs)", "0.25 * (dist_mat ** 2.0)) weights = torch.exp(log_weights - torch.max(log_weights))", "weights = weights * mask * ((dist_mat < self.nonzero_loss_cutoff).float()) weights", "- 0.25 * (dist_mat ** 2.0)) weights = torch.exp(log_weights -", "label_set: idx = (labels == i).nonzero() mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0", "# so that we don't get log(0). We mask the", "off to avoid high variance. dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device)) #", "float(d - 3) / 2 ) * torch.log(1.0 - 0.25", "= (2.0 - float(d)) * torch.log(dist_mat) - ( float(d -", "the diagonal out later anyway # Cut off to avoid", "adapted from # https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/ # /embedding_learning/model.py class DistanceWeightedMiner(BasePostGradientMiner): def __init__(self,", "float(d)) * torch.log(dist_mat) - ( float(d - 3) / 2", "0 weights = weights * mask * ((dist_mat < self.nonzero_loss_cutoff).float())", "self.nonzero_loss_cutoff).float()) weights = weights / torch.sum(weights, dim=1, keepdim=True) np_weights =", "avoid high variance. dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device)) # Subtract max(log(distance))", "= torch.ones(weights.size()).to(embeddings.device) for i in label_set: idx = (labels ==", "/usr/bin/env python3 from .base_miner import BasePostGradientMiner import torch from ..utils", "..utils import loss_and_miner_utils as lmu # adapted from # https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/", "4 of the paper log_weights = (2.0 - float(d)) *", "same-class examples to 0. mask = torch.ones(weights.size()).to(embeddings.device) for i in", "(labels == i).nonzero() mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0 weights = weights", "# See the first equation from Section 4 of the", "= nonzero_loss_cutoff def mine(self, embeddings, labels): label_set = torch.unique(labels) n,", "log_weights = (2.0 - float(d)) * torch.log(dist_mat) - ( float(d", "dist_mat = dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device) # so that we don't", "= (labels == i).nonzero() mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0 weights =", "equation from Section 4 of the paper log_weights = (2.0", "by setting weights of # the same-class examples to 0.", "labels): label_set = torch.unique(labels) n, d = embeddings.size() dist_mat =", "- ( float(d - 3) / 2 ) * torch.log(1.0", "python3 from .base_miner import BasePostGradientMiner import torch from ..utils import", "2 ) * torch.log(1.0 - 0.25 * (dist_mat ** 2.0))", "* (dist_mat ** 2.0)) weights = torch.exp(log_weights - torch.max(log_weights)) #", "Cut off to avoid high variance. dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device))", "lmu # adapted from # https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/ # /embedding_learning/model.py class DistanceWeightedMiner(BasePostGradientMiner):", "= torch.exp(log_weights - torch.max(log_weights)) # Sample only negative examples by" ]
[ "\"Infusion\", \"InfusionParameter\", \"Priming\", \"Motor\", \"DeviceReport\", \"HistoryLog\", \"InfusionSetting\", \"Maintenance\", \"SafetySetting\", \"SystemSetting\",", "-*- coding: utf-8 -*- from .Alarm.alarm import Alarm from .DeliveryView.bolus", ".DeliveryView.bolus import Bolus from .DeliveryView.info import Info from .DeliveryView.infusion import", "from .Alarm.alarm import Alarm from .DeliveryView.bolus import Bolus from .DeliveryView.info", "import DeviceReport from .MenuSettings.history_log import HistoryLog from .MenuSettings.infusion_setting import InfusionSetting", "<reponame>cassie01/PumpLibrary # -*- coding: utf-8 -*- from .Alarm.alarm import Alarm", "import Info from .DeliveryView.infusion import Infusion from .DeliveryView.infusion_parameter import InfusionParameter", "import Bolus from .DeliveryView.info import Info from .DeliveryView.infusion import Infusion", "Maintenance from .MenuSettings.safety_setting import SafetySetting from .MenuSettings.system_setting import SystemSetting from", "DeviceReport from .MenuSettings.history_log import HistoryLog from .MenuSettings.infusion_setting import InfusionSetting from", ".DeliveryView.infusion_parameter import InfusionParameter from .DeliveryView.priming import Priming from .HardwareControl.motor import", "from .MenuSettings.infusion_setting import InfusionSetting from .MenuSettings.maintenance import Maintenance from .MenuSettings.safety_setting", "# -*- coding: utf-8 -*- from .Alarm.alarm import Alarm from", ".MenuSettings.system_setting import SystemSetting from .SensorControl.sensor import Sensor __all__ = [\"Alarm\",", "from .DeliveryView.infusion import Infusion from .DeliveryView.infusion_parameter import InfusionParameter from .DeliveryView.priming", "InfusionParameter from .DeliveryView.priming import Priming from .HardwareControl.motor import Motor from", ".DeliveryView.priming import Priming from .HardwareControl.motor import Motor from .MenuSettings.device_report import", "import Sensor __all__ = [\"Alarm\", \"Bolus\", \"Info\", \"Infusion\", \"InfusionParameter\", \"Priming\",", ".MenuSettings.history_log import HistoryLog from .MenuSettings.infusion_setting import InfusionSetting from .MenuSettings.maintenance import", "__all__ = [\"Alarm\", \"Bolus\", \"Info\", \"Infusion\", \"InfusionParameter\", \"Priming\", \"Motor\", \"DeviceReport\",", "Motor from .MenuSettings.device_report import DeviceReport from .MenuSettings.history_log import HistoryLog from", "import SystemSetting from .SensorControl.sensor import Sensor __all__ = [\"Alarm\", \"Bolus\",", "coding: utf-8 -*- from .Alarm.alarm import Alarm from .DeliveryView.bolus import", "Alarm from .DeliveryView.bolus import Bolus from .DeliveryView.info import Info from", "from .MenuSettings.device_report import DeviceReport from .MenuSettings.history_log import HistoryLog from .MenuSettings.infusion_setting", "from .MenuSettings.history_log import HistoryLog from .MenuSettings.infusion_setting import InfusionSetting from .MenuSettings.maintenance", "\"Info\", \"Infusion\", \"InfusionParameter\", \"Priming\", \"Motor\", \"DeviceReport\", \"HistoryLog\", \"InfusionSetting\", \"Maintenance\", \"SafetySetting\",", ".MenuSettings.safety_setting import SafetySetting from .MenuSettings.system_setting import SystemSetting from .SensorControl.sensor import", "from .MenuSettings.system_setting import SystemSetting from .SensorControl.sensor import Sensor __all__ =", "\"Priming\", \"Motor\", \"DeviceReport\", \"HistoryLog\", \"InfusionSetting\", \"Maintenance\", \"SafetySetting\", \"SystemSetting\", \"Sensor\", ]", "import Motor from .MenuSettings.device_report import DeviceReport from .MenuSettings.history_log import HistoryLog", "Info from .DeliveryView.infusion import Infusion from .DeliveryView.infusion_parameter import InfusionParameter from", "Bolus from .DeliveryView.info import Info from .DeliveryView.infusion import Infusion from", "from .SensorControl.sensor import Sensor __all__ = [\"Alarm\", \"Bolus\", \"Info\", \"Infusion\",", ".SensorControl.sensor import Sensor __all__ = [\"Alarm\", \"Bolus\", \"Info\", \"Infusion\", \"InfusionParameter\",", "[\"Alarm\", \"Bolus\", \"Info\", \"Infusion\", \"InfusionParameter\", \"Priming\", \"Motor\", \"DeviceReport\", \"HistoryLog\", \"InfusionSetting\",", "from .DeliveryView.priming import Priming from .HardwareControl.motor import Motor from .MenuSettings.device_report", "= [\"Alarm\", \"Bolus\", \"Info\", \"Infusion\", \"InfusionParameter\", \"Priming\", \"Motor\", \"DeviceReport\", \"HistoryLog\",", "SafetySetting from .MenuSettings.system_setting import SystemSetting from .SensorControl.sensor import Sensor __all__", ".MenuSettings.device_report import DeviceReport from .MenuSettings.history_log import HistoryLog from .MenuSettings.infusion_setting import", "import SafetySetting from .MenuSettings.system_setting import SystemSetting from .SensorControl.sensor import Sensor", "from .DeliveryView.bolus import Bolus from .DeliveryView.info import Info from .DeliveryView.infusion", "from .MenuSettings.maintenance import Maintenance from .MenuSettings.safety_setting import SafetySetting from .MenuSettings.system_setting", ".HardwareControl.motor import Motor from .MenuSettings.device_report import DeviceReport from .MenuSettings.history_log import", "import InfusionSetting from .MenuSettings.maintenance import Maintenance from .MenuSettings.safety_setting import SafetySetting", "from .MenuSettings.safety_setting import SafetySetting from .MenuSettings.system_setting import SystemSetting from .SensorControl.sensor", "Priming from .HardwareControl.motor import Motor from .MenuSettings.device_report import DeviceReport from", ".DeliveryView.info import Info from .DeliveryView.infusion import Infusion from .DeliveryView.infusion_parameter import", "\"InfusionParameter\", \"Priming\", \"Motor\", \"DeviceReport\", \"HistoryLog\", \"InfusionSetting\", \"Maintenance\", \"SafetySetting\", \"SystemSetting\", \"Sensor\",", "HistoryLog from .MenuSettings.infusion_setting import InfusionSetting from .MenuSettings.maintenance import Maintenance from", "-*- from .Alarm.alarm import Alarm from .DeliveryView.bolus import Bolus from", "SystemSetting from .SensorControl.sensor import Sensor __all__ = [\"Alarm\", \"Bolus\", \"Info\",", ".MenuSettings.maintenance import Maintenance from .MenuSettings.safety_setting import SafetySetting from .MenuSettings.system_setting import", "import Alarm from .DeliveryView.bolus import Bolus from .DeliveryView.info import Info", "import InfusionParameter from .DeliveryView.priming import Priming from .HardwareControl.motor import Motor", "from .DeliveryView.infusion_parameter import InfusionParameter from .DeliveryView.priming import Priming from .HardwareControl.motor", ".Alarm.alarm import Alarm from .DeliveryView.bolus import Bolus from .DeliveryView.info import", ".DeliveryView.infusion import Infusion from .DeliveryView.infusion_parameter import InfusionParameter from .DeliveryView.priming import", "InfusionSetting from .MenuSettings.maintenance import Maintenance from .MenuSettings.safety_setting import SafetySetting from", "import HistoryLog from .MenuSettings.infusion_setting import InfusionSetting from .MenuSettings.maintenance import Maintenance", "from .DeliveryView.info import Info from .DeliveryView.infusion import Infusion from .DeliveryView.infusion_parameter", ".MenuSettings.infusion_setting import InfusionSetting from .MenuSettings.maintenance import Maintenance from .MenuSettings.safety_setting import", "Infusion from .DeliveryView.infusion_parameter import InfusionParameter from .DeliveryView.priming import Priming from", "import Priming from .HardwareControl.motor import Motor from .MenuSettings.device_report import DeviceReport", "from .HardwareControl.motor import Motor from .MenuSettings.device_report import DeviceReport from .MenuSettings.history_log", "import Maintenance from .MenuSettings.safety_setting import SafetySetting from .MenuSettings.system_setting import SystemSetting", "\"Bolus\", \"Info\", \"Infusion\", \"InfusionParameter\", \"Priming\", \"Motor\", \"DeviceReport\", \"HistoryLog\", \"InfusionSetting\", \"Maintenance\",", "utf-8 -*- from .Alarm.alarm import Alarm from .DeliveryView.bolus import Bolus", "import Infusion from .DeliveryView.infusion_parameter import InfusionParameter from .DeliveryView.priming import Priming", "Sensor __all__ = [\"Alarm\", \"Bolus\", \"Info\", \"Infusion\", \"InfusionParameter\", \"Priming\", \"Motor\"," ]
[ "--------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. #", "Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- class", "RAI_INSIGHTS_TYPE_KEY = \"_azureml.responsibleai.rai_insights.type\" RAI_INSIGHTS_TYPE_CONSTRUCT = \"construction\" RAI_INSIGHTS_TYPE_CAUSAL = \"causal\" RAI_INSIGHTS_TYPE_COUNTERFACTUAL", "a tool RAI_INSIGHTS_TOOL_KEY_FORMAT = \"_azureml.responsibleai.rai_insights.has_{0}\" class RAIToolType: CAUSAL = \"causal\"", "= \"_azureml.responsibleai.rai_insights.type\" RAI_INSIGHTS_TYPE_CONSTRUCT = \"construction\" RAI_INSIGHTS_TYPE_CAUSAL = \"causal\" RAI_INSIGHTS_TYPE_COUNTERFACTUAL =", "\"causal\" COUNTERFACTUAL = \"counterfactual\" ERROR_ANALYSIS = \"error_analysis\" EXPLANATION = \"explanation\"", "\"rai_insights.json\" class PropertyKeyValues: # The property to indicate the type", "( \"_azureml.responsibleai.rai_insights.responsibleai_version\" ) # Property format to indicate presence of", "# The property to indicate the type of Run RAI_INSIGHTS_TYPE_KEY", "at the model under examination RAI_INSIGHTS_MODEL_ID_KEY = \"_azureml.responsibleai.rai_insights.model_id\" # Property", "to record responsibleai version RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY = ( \"_azureml.responsibleai.rai_insights.responsibleai_version\" ) #", "RAI_INSIGHTS_RUN_ID_KEY = \"rai_insights_parent_run_id\" RAI_INSIGHTS_PARENT_FILENAME = \"rai_insights.json\" class PropertyKeyValues: # The", "tool RAI_INSIGHTS_TOOL_KEY_FORMAT = \"_azureml.responsibleai.rai_insights.has_{0}\" class RAIToolType: CAUSAL = \"causal\" COUNTERFACTUAL", "PropertyKeyValues: # The property to indicate the type of Run", "( \"_azureml.responsibleai.rai_insights.constructor_run\" ) # Property to record responsibleai version RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY", "\"_azureml.responsibleai.rai_insights.type\" RAI_INSIGHTS_TYPE_CONSTRUCT = \"construction\" RAI_INSIGHTS_TYPE_CAUSAL = \"causal\" RAI_INSIGHTS_TYPE_COUNTERFACTUAL = \"counterfactual\"", ") # Property format to indicate presence of a tool", "their constructor run RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY = ( \"_azureml.responsibleai.rai_insights.constructor_run\" ) # Property", "responsibleai version RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY = ( \"_azureml.responsibleai.rai_insights.responsibleai_version\" ) # Property format", "tool runs to point at their constructor run RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY =", "The property to indicate the type of Run RAI_INSIGHTS_TYPE_KEY =", "= \"causal\" COUNTERFACTUAL = \"counterfactual\" ERROR_ANALYSIS = \"error_analysis\" EXPLANATION =", "RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY = ( \"_azureml.responsibleai.rai_insights.constructor_run\" ) # Property to record responsibleai", "RAI_INSIGHTS_TYPE_EXPLANATION = \"explanation\" RAI_INSIGHTS_TYPE_ERROR_ANALYSIS = \"error_analysis\" RAI_INSIGHTS_TYPE_GATHER = \"gather\" #", "Run RAI_INSIGHTS_TYPE_KEY = \"_azureml.responsibleai.rai_insights.type\" RAI_INSIGHTS_TYPE_CONSTRUCT = \"construction\" RAI_INSIGHTS_TYPE_CAUSAL = \"causal\"", "Microsoft Corporation. All rights reserved. # --------------------------------------------------------- class DashboardInfo: MODEL_ID_KEY", "RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY = ( \"_azureml.responsibleai.rai_insights.responsibleai_version\" ) # Property format to indicate", "\"model_id\" RAI_INSIGHTS_RUN_ID_KEY = \"rai_insights_parent_run_id\" RAI_INSIGHTS_PARENT_FILENAME = \"rai_insights.json\" class PropertyKeyValues: #", "= \"id\" # To match Model schema MODEL_INFO_FILENAME = \"model_info.json\"", "point at their constructor run RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY = ( \"_azureml.responsibleai.rai_insights.constructor_run\" )", "# Property for tool runs to point at their constructor", "for tool runs to point at their constructor run RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY", "schema MODEL_INFO_FILENAME = \"model_info.json\" RAI_INSIGHTS_MODEL_ID_KEY = \"model_id\" RAI_INSIGHTS_RUN_ID_KEY = \"rai_insights_parent_run_id\"", "to indicate the type of Run RAI_INSIGHTS_TYPE_KEY = \"_azureml.responsibleai.rai_insights.type\" RAI_INSIGHTS_TYPE_CONSTRUCT", "RAIToolType: CAUSAL = \"causal\" COUNTERFACTUAL = \"counterfactual\" ERROR_ANALYSIS = \"error_analysis\"", "= \"model_info.json\" RAI_INSIGHTS_MODEL_ID_KEY = \"model_id\" RAI_INSIGHTS_RUN_ID_KEY = \"rai_insights_parent_run_id\" RAI_INSIGHTS_PARENT_FILENAME =", "indicate the type of Run RAI_INSIGHTS_TYPE_KEY = \"_azureml.responsibleai.rai_insights.type\" RAI_INSIGHTS_TYPE_CONSTRUCT =", "class PropertyKeyValues: # The property to indicate the type of", "RAI_INSIGHTS_TYPE_CAUSAL = \"causal\" RAI_INSIGHTS_TYPE_COUNTERFACTUAL = \"counterfactual\" RAI_INSIGHTS_TYPE_EXPLANATION = \"explanation\" RAI_INSIGHTS_TYPE_ERROR_ANALYSIS", "property to indicate the type of Run RAI_INSIGHTS_TYPE_KEY = \"_azureml.responsibleai.rai_insights.type\"", "RAI_INSIGHTS_TYPE_CONSTRUCT = \"construction\" RAI_INSIGHTS_TYPE_CAUSAL = \"causal\" RAI_INSIGHTS_TYPE_COUNTERFACTUAL = \"counterfactual\" RAI_INSIGHTS_TYPE_EXPLANATION", "\"construction\" RAI_INSIGHTS_TYPE_CAUSAL = \"causal\" RAI_INSIGHTS_TYPE_COUNTERFACTUAL = \"counterfactual\" RAI_INSIGHTS_TYPE_EXPLANATION = \"explanation\"", "Property for tool runs to point at their constructor run", "MODEL_ID_KEY = \"id\" # To match Model schema MODEL_INFO_FILENAME =", "\"counterfactual\" RAI_INSIGHTS_TYPE_EXPLANATION = \"explanation\" RAI_INSIGHTS_TYPE_ERROR_ANALYSIS = \"error_analysis\" RAI_INSIGHTS_TYPE_GATHER = \"gather\"", "# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved.", "To match Model schema MODEL_INFO_FILENAME = \"model_info.json\" RAI_INSIGHTS_MODEL_ID_KEY = \"model_id\"", "\"causal\" RAI_INSIGHTS_TYPE_COUNTERFACTUAL = \"counterfactual\" RAI_INSIGHTS_TYPE_EXPLANATION = \"explanation\" RAI_INSIGHTS_TYPE_ERROR_ANALYSIS = \"error_analysis\"", "presence of a tool RAI_INSIGHTS_TOOL_KEY_FORMAT = \"_azureml.responsibleai.rai_insights.has_{0}\" class RAIToolType: CAUSAL", "type of Run RAI_INSIGHTS_TYPE_KEY = \"_azureml.responsibleai.rai_insights.type\" RAI_INSIGHTS_TYPE_CONSTRUCT = \"construction\" RAI_INSIGHTS_TYPE_CAUSAL", "Property to record responsibleai version RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY = ( \"_azureml.responsibleai.rai_insights.responsibleai_version\" )", "indicate presence of a tool RAI_INSIGHTS_TOOL_KEY_FORMAT = \"_azureml.responsibleai.rai_insights.has_{0}\" class RAIToolType:", "CAUSAL = \"causal\" COUNTERFACTUAL = \"counterfactual\" ERROR_ANALYSIS = \"error_analysis\" EXPLANATION", "= \"rai_insights.json\" class PropertyKeyValues: # The property to indicate the", "= \"causal\" RAI_INSIGHTS_TYPE_COUNTERFACTUAL = \"counterfactual\" RAI_INSIGHTS_TYPE_EXPLANATION = \"explanation\" RAI_INSIGHTS_TYPE_ERROR_ANALYSIS =", "\"_azureml.responsibleai.rai_insights.has_{0}\" class RAIToolType: CAUSAL = \"causal\" COUNTERFACTUAL = \"counterfactual\" ERROR_ANALYSIS", ") # Property to record responsibleai version RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY = (", "Model schema MODEL_INFO_FILENAME = \"model_info.json\" RAI_INSIGHTS_MODEL_ID_KEY = \"model_id\" RAI_INSIGHTS_RUN_ID_KEY =", "the type of Run RAI_INSIGHTS_TYPE_KEY = \"_azureml.responsibleai.rai_insights.type\" RAI_INSIGHTS_TYPE_CONSTRUCT = \"construction\"", "(c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- class DashboardInfo:", "RAI_INSIGHTS_TYPE_COUNTERFACTUAL = \"counterfactual\" RAI_INSIGHTS_TYPE_EXPLANATION = \"explanation\" RAI_INSIGHTS_TYPE_ERROR_ANALYSIS = \"error_analysis\" RAI_INSIGHTS_TYPE_GATHER", "constructor run RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY = ( \"_azureml.responsibleai.rai_insights.constructor_run\" ) # Property to", "# Property format to indicate presence of a tool RAI_INSIGHTS_TOOL_KEY_FORMAT", "of a tool RAI_INSIGHTS_TOOL_KEY_FORMAT = \"_azureml.responsibleai.rai_insights.has_{0}\" class RAIToolType: CAUSAL =", "RAI_INSIGHTS_MODEL_ID_KEY = \"_azureml.responsibleai.rai_insights.model_id\" # Property for tool runs to point", "match Model schema MODEL_INFO_FILENAME = \"model_info.json\" RAI_INSIGHTS_MODEL_ID_KEY = \"model_id\" RAI_INSIGHTS_RUN_ID_KEY", "\"_azureml.responsibleai.rai_insights.constructor_run\" ) # Property to record responsibleai version RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY =", "class RAIToolType: CAUSAL = \"causal\" COUNTERFACTUAL = \"counterfactual\" ERROR_ANALYSIS =", "rights reserved. # --------------------------------------------------------- class DashboardInfo: MODEL_ID_KEY = \"id\" #", "= \"_azureml.responsibleai.rai_insights.has_{0}\" class RAIToolType: CAUSAL = \"causal\" COUNTERFACTUAL = \"counterfactual\"", "\"_azureml.responsibleai.rai_insights.model_id\" # Property for tool runs to point at their", "# To match Model schema MODEL_INFO_FILENAME = \"model_info.json\" RAI_INSIGHTS_MODEL_ID_KEY =", "to indicate presence of a tool RAI_INSIGHTS_TOOL_KEY_FORMAT = \"_azureml.responsibleai.rai_insights.has_{0}\" class", "= \"construction\" RAI_INSIGHTS_TYPE_CAUSAL = \"causal\" RAI_INSIGHTS_TYPE_COUNTERFACTUAL = \"counterfactual\" RAI_INSIGHTS_TYPE_EXPLANATION =", "the model under examination RAI_INSIGHTS_MODEL_ID_KEY = \"_azureml.responsibleai.rai_insights.model_id\" # Property for", "# Copyright (c) Microsoft Corporation. All rights reserved. # ---------------------------------------------------------", "Property to point at the model under examination RAI_INSIGHTS_MODEL_ID_KEY =", "RAI_INSIGHTS_TYPE_GATHER = \"gather\" # Property to point at the model", "RAI_INSIGHTS_TOOL_KEY_FORMAT = \"_azureml.responsibleai.rai_insights.has_{0}\" class RAIToolType: CAUSAL = \"causal\" COUNTERFACTUAL =", "reserved. # --------------------------------------------------------- class DashboardInfo: MODEL_ID_KEY = \"id\" # To", "under examination RAI_INSIGHTS_MODEL_ID_KEY = \"_azureml.responsibleai.rai_insights.model_id\" # Property for tool runs", "# --------------------------------------------------------- class DashboardInfo: MODEL_ID_KEY = \"id\" # To match", "format to indicate presence of a tool RAI_INSIGHTS_TOOL_KEY_FORMAT = \"_azureml.responsibleai.rai_insights.has_{0}\"", "\"gather\" # Property to point at the model under examination", "= \"counterfactual\" RAI_INSIGHTS_TYPE_EXPLANATION = \"explanation\" RAI_INSIGHTS_TYPE_ERROR_ANALYSIS = \"error_analysis\" RAI_INSIGHTS_TYPE_GATHER =", "examination RAI_INSIGHTS_MODEL_ID_KEY = \"_azureml.responsibleai.rai_insights.model_id\" # Property for tool runs to", "= \"model_id\" RAI_INSIGHTS_RUN_ID_KEY = \"rai_insights_parent_run_id\" RAI_INSIGHTS_PARENT_FILENAME = \"rai_insights.json\" class PropertyKeyValues:", "Corporation. All rights reserved. # --------------------------------------------------------- class DashboardInfo: MODEL_ID_KEY =", "RAI_INSIGHTS_MODEL_ID_KEY = \"model_id\" RAI_INSIGHTS_RUN_ID_KEY = \"rai_insights_parent_run_id\" RAI_INSIGHTS_PARENT_FILENAME = \"rai_insights.json\" class", "= \"error_analysis\" RAI_INSIGHTS_TYPE_GATHER = \"gather\" # Property to point at", "run RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY = ( \"_azureml.responsibleai.rai_insights.constructor_run\" ) # Property to record", "\"error_analysis\" RAI_INSIGHTS_TYPE_GATHER = \"gather\" # Property to point at the", "at their constructor run RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY = ( \"_azureml.responsibleai.rai_insights.constructor_run\" ) #", "# Property to record responsibleai version RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY = ( \"_azureml.responsibleai.rai_insights.responsibleai_version\"", "\"id\" # To match Model schema MODEL_INFO_FILENAME = \"model_info.json\" RAI_INSIGHTS_MODEL_ID_KEY", "point at the model under examination RAI_INSIGHTS_MODEL_ID_KEY = \"_azureml.responsibleai.rai_insights.model_id\" #", "= \"_azureml.responsibleai.rai_insights.model_id\" # Property for tool runs to point at", "class DashboardInfo: MODEL_ID_KEY = \"id\" # To match Model schema", "RAI_INSIGHTS_TYPE_ERROR_ANALYSIS = \"error_analysis\" RAI_INSIGHTS_TYPE_GATHER = \"gather\" # Property to point", "RAI_INSIGHTS_PARENT_FILENAME = \"rai_insights.json\" class PropertyKeyValues: # The property to indicate", "All rights reserved. # --------------------------------------------------------- class DashboardInfo: MODEL_ID_KEY = \"id\"", "DashboardInfo: MODEL_ID_KEY = \"id\" # To match Model schema MODEL_INFO_FILENAME", "record responsibleai version RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY = ( \"_azureml.responsibleai.rai_insights.responsibleai_version\" ) # Property", "= ( \"_azureml.responsibleai.rai_insights.responsibleai_version\" ) # Property format to indicate presence", "\"model_info.json\" RAI_INSIGHTS_MODEL_ID_KEY = \"model_id\" RAI_INSIGHTS_RUN_ID_KEY = \"rai_insights_parent_run_id\" RAI_INSIGHTS_PARENT_FILENAME = \"rai_insights.json\"", "\"rai_insights_parent_run_id\" RAI_INSIGHTS_PARENT_FILENAME = \"rai_insights.json\" class PropertyKeyValues: # The property to", "\"explanation\" RAI_INSIGHTS_TYPE_ERROR_ANALYSIS = \"error_analysis\" RAI_INSIGHTS_TYPE_GATHER = \"gather\" # Property to", "# Property to point at the model under examination RAI_INSIGHTS_MODEL_ID_KEY", "runs to point at their constructor run RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY = (", "version RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY = ( \"_azureml.responsibleai.rai_insights.responsibleai_version\" ) # Property format to", "= \"rai_insights_parent_run_id\" RAI_INSIGHTS_PARENT_FILENAME = \"rai_insights.json\" class PropertyKeyValues: # The property", "to point at their constructor run RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY = ( \"_azureml.responsibleai.rai_insights.constructor_run\"", "Property format to indicate presence of a tool RAI_INSIGHTS_TOOL_KEY_FORMAT =", "--------------------------------------------------------- class DashboardInfo: MODEL_ID_KEY = \"id\" # To match Model", "model under examination RAI_INSIGHTS_MODEL_ID_KEY = \"_azureml.responsibleai.rai_insights.model_id\" # Property for tool", "= ( \"_azureml.responsibleai.rai_insights.constructor_run\" ) # Property to record responsibleai version", "\"_azureml.responsibleai.rai_insights.responsibleai_version\" ) # Property format to indicate presence of a", "of Run RAI_INSIGHTS_TYPE_KEY = \"_azureml.responsibleai.rai_insights.type\" RAI_INSIGHTS_TYPE_CONSTRUCT = \"construction\" RAI_INSIGHTS_TYPE_CAUSAL =", "= \"gather\" # Property to point at the model under", "= \"explanation\" RAI_INSIGHTS_TYPE_ERROR_ANALYSIS = \"error_analysis\" RAI_INSIGHTS_TYPE_GATHER = \"gather\" # Property", "to point at the model under examination RAI_INSIGHTS_MODEL_ID_KEY = \"_azureml.responsibleai.rai_insights.model_id\"", "MODEL_INFO_FILENAME = \"model_info.json\" RAI_INSIGHTS_MODEL_ID_KEY = \"model_id\" RAI_INSIGHTS_RUN_ID_KEY = \"rai_insights_parent_run_id\" RAI_INSIGHTS_PARENT_FILENAME" ]
[ "lua script stdnet/lib/lua/odm.lua''' # indices = dict(((idx.attname, idx.unique) for idx", "= dict(((idx.attname, idx.unique) for idx in meta.indices)) data = meta.as_dict()", "if n else '' def key(self): return (self._dns, self._encoding) def", "prefix namespace to append to all transaction on keys '''", "class RedisStore(RemoteStore): '''Redis :class:`.Store` implementation. ''' protocol_factory = partial(RedisStoreConnection, Consumer)", "= decode_responses if not parser_class: actor = get_actor() pyparser =", "pool_size=50, decode_responses=False, **kwargs): self._decode_responses = decode_responses if not parser_class: actor", "% (self.namespace, meta.table_name) postfix = ':'.join((to_string(p) for p in args", "*args): key = '%s%s' % (self.namespace, meta.table_name) postfix = ':'.join((to_string(p)", "not parser_class: actor = get_actor() pyparser = actor.cfg.redis_py_parser if actor", "self._urlparams.get('namespace') return '%s:' % n if n else '' def", "conn = await self._pool.connect() with conn: result = await conn.execute_pipeline(commands,", "def pool(self): return self._pool @property def namespace(self): '''The prefix namespace", "**kw): return RedisChannels(self.pubsub(protocol=protocol), **kw) def ping(self): return self.client().ping() async def", "connect(self, protocol_factory=None): protocol_factory = protocol_factory or self.create_protocol if isinstance(self._host, tuple):", "% (key, postfix) if postfix else key def meta(self, meta):", "from .client import RedisClient, Pipeline, Consumer, ResponseError from .pubsub import", "options)) result = await consumer.on_finished if isinstance(result, ResponseError): raise result.exception", "indices = dict(((idx.attname, idx.unique) for idx in meta.indices)) data =", "if p is not None)) return '%s:%s' % (key, postfix)", "= parser_class if namespace: self._urlparams['namespace'] = namespace self._pool = Pool(self.connect,", "basekey(self, meta, *args): key = '%s%s' % (self.namespace, meta.table_name) postfix", "= self._producer._parser_class() async def execute(self, *args, **options): consumer = self.current_consumer()", "redis_parser from .client import RedisClient, Pipeline, Consumer, ResponseError from .pubsub", "return self.execute('flushdb') def close(self): '''Close all open connections.''' return self._pool.close()", "client(self): '''Get a :class:`.RedisClient` for the Store''' return RedisClient(self) def", "None: self._database = 0 self._database = int(self._database) self.loaded_scripts = set()", "raise NotImplementedError('Could not connect to %s' % str(self._host)) if self._password:", "with connection: result = await connection.execute(*args, **options) return result async", "'' def key(self): return (self._dns, self._encoding) def client(self): '''Get a", "% str(self._host)) if self._password: await connection.execute('AUTH', self._password) if self._database: await", "def channels(self, protocol=None, **kw): return RedisChannels(self.pubsub(protocol=protocol), **kw) def ping(self): return", "to %s' % str(self._host)) if self._password: await connection.execute('AUTH', self._password) if", "for the Store''' return RedisClient(self) def pipeline(self): '''Get a :class:`.Pipeline`", "decode_responses=False, **kwargs): self._decode_responses = decode_responses if not parser_class: actor =", "not connect to %s' % str(self._host)) if self._password: await connection.execute('AUTH',", "keys ''' n = self._urlparams.get('namespace') return '%s:' % n if", "= self.current_consumer() consumer.start((commands, raise_on_error, [])) result = await consumer.on_finished if", "isinstance(self._host, tuple): host, port = self._host transport, connection = await", "pulsar.apps.ds import redis_parser from .client import RedisClient, Pipeline, Consumer, ResponseError", "get_actor() pyparser = actor.cfg.redis_py_parser if actor else False parser_class =", "await self._loop.create_connection( protocol_factory, host, port) else: raise NotImplementedError('Could not connect", "def close(self): '''Close all open connections.''' return self._pool.close() def has_query(self,", "if self._database: await connection.execute('SELECT', self._database) return connection def flush(self): return", "pubsub(self, protocol=None): return RedisPubSub(self, protocol=protocol) def channels(self, protocol=None, **kw): return", "'''Extract model metadata for lua script stdnet/lib/lua/odm.lua''' # indices =", "if isinstance(result, ResponseError): raise result.exception return result async def execute_pipeline(self,", "connections.''' return self._pool.close() def has_query(self, query_type): return query_type in self.supported_queries", "host, port) else: raise NotImplementedError('Could not connect to %s' %", "import partial from pulsar import Connection, Pool, get_actor from pulsar.utils.pep", "else False parser_class = redis_parser(pyparser) self._parser_class = parser_class if namespace:", "= self._host transport, connection = await self._loop.create_connection( protocol_factory, host, port)", "self._pool.close() def has_query(self, query_type): return query_type in self.supported_queries def basekey(self,", "str(self._host)) if self._password: await connection.execute('AUTH', self._password) if self._database: await connection.execute('SELECT',", "idx.unique) for idx in meta.indices)) data = meta.as_dict() data['namespace'] =", "postfix else key def meta(self, meta): '''Extract model metadata for", "with conn: result = await conn.execute_pipeline(commands, raise_on_error) return result async", "if isinstance(result, ResponseError): raise result.exception return result class RedisStore(RemoteStore): '''Redis", "0 self._database = int(self._database) self.loaded_scripts = set() @property def pool(self):", "for p in args if p is not None)) return", "RedisChannels class RedisStoreConnection(Connection): def __init__(self, *args, **kw): super().__init__(*args, **kw) self.parser", "partial(RedisStoreConnection, Consumer) supported_queries = frozenset(('filter', 'exclude')) def _init(self, namespace=None, parser_class=None,", "= self.basekey(meta) return data class CompiledQuery: def __init__(self, pipe, query):", "= self._urlparams.get('namespace') return '%s:' % n if n else ''", "postfix = ':'.join((to_string(p) for p in args if p is", "loop=self._loop) if self._database is None: self._database = 0 self._database =", "**options) return result async def execute_pipeline(self, commands, raise_on_error=True): conn =", "query_type in self.supported_queries def basekey(self, meta, *args): key = '%s%s'", "on keys ''' n = self._urlparams.get('namespace') return '%s:' % n", "Connection, Pool, get_actor from pulsar.utils.pep import to_string from pulsar.apps.data import", "a :class:`.Pipeline` for the Store''' return Pipeline(self) def pubsub(self, protocol=None):", "in args if p is not None)) return '%s:%s' %", "self.supported_queries def basekey(self, meta, *args): key = '%s%s' % (self.namespace,", "'%s:' % n if n else '' def key(self): return", "key def meta(self, meta): '''Extract model metadata for lua script", "def _init(self, namespace=None, parser_class=None, pool_size=50, decode_responses=False, **kwargs): self._decode_responses = decode_responses", "_init(self, namespace=None, parser_class=None, pool_size=50, decode_responses=False, **kwargs): self._decode_responses = decode_responses if", "def __init__(self, *args, **kw): super().__init__(*args, **kw) self.parser = self._producer._parser_class() async", "import RemoteStore from pulsar.apps.ds import redis_parser from .client import RedisClient,", "from functools import partial from pulsar import Connection, Pool, get_actor", "def execute(self, *args, **options): connection = await self._pool.connect() with connection:", "execute(self, *args, **options): consumer = self.current_consumer() await consumer.start((args, options)) result", "= '%s%s' % (self.namespace, meta.table_name) postfix = ':'.join((to_string(p) for p", "pulsar.utils.pep import to_string from pulsar.apps.data import RemoteStore from pulsar.apps.ds import", "actor.cfg.redis_py_parser if actor else False parser_class = redis_parser(pyparser) self._parser_class =", "connection.execute('SELECT', self._database) return connection def flush(self): return self.execute('flushdb') def close(self):", "= await consumer.on_finished if isinstance(result, ResponseError): raise result.exception return result", "or self.create_protocol if isinstance(self._host, tuple): host, port = self._host transport,", "commands, raise_on_error=True): consumer = self.current_consumer() consumer.start((commands, raise_on_error, [])) result =", "self._encoding) def client(self): '''Get a :class:`.RedisClient` for the Store''' return", "actor else False parser_class = redis_parser(pyparser) self._parser_class = parser_class if", "set() @property def pool(self): return self._pool @property def namespace(self): '''The", "n = self._urlparams.get('namespace') return '%s:' % n if n else", "import RedisClient, Pipeline, Consumer, ResponseError from .pubsub import RedisPubSub, RedisChannels", "return query_type in self.supported_queries def basekey(self, meta, *args): key =", "metadata for lua script stdnet/lib/lua/odm.lua''' # indices = dict(((idx.attname, idx.unique)", "actor = get_actor() pyparser = actor.cfg.redis_py_parser if actor else False", "= Pool(self.connect, pool_size=pool_size, loop=self._loop) if self._database is None: self._database =", "return result async def execute_pipeline(self, commands, raise_on_error=True): conn = await", "super().__init__(*args, **kw) self.parser = self._producer._parser_class() async def execute(self, *args, **options):", "Pipeline, Consumer, ResponseError from .pubsub import RedisPubSub, RedisChannels class RedisStoreConnection(Connection):", "raise_on_error) return result async def connect(self, protocol_factory=None): protocol_factory = protocol_factory", "if not parser_class: actor = get_actor() pyparser = actor.cfg.redis_py_parser if", "pulsar.apps.data import RemoteStore from pulsar.apps.ds import redis_parser from .client import", "ResponseError): raise result.exception return result class RedisStore(RemoteStore): '''Redis :class:`.Store` implementation.", "self.current_consumer() await consumer.start((args, options)) result = await consumer.on_finished if isinstance(result,", "Store''' return Pipeline(self) def pubsub(self, protocol=None): return RedisPubSub(self, protocol=protocol) def", "self._loop.create_connection( protocol_factory, host, port) else: raise NotImplementedError('Could not connect to", "functools import partial from pulsar import Connection, Pool, get_actor from", "namespace(self): '''The prefix namespace to append to all transaction on", "return (self._dns, self._encoding) def client(self): '''Get a :class:`.RedisClient` for the", "stdnet/lib/lua/odm.lua''' # indices = dict(((idx.attname, idx.unique) for idx in meta.indices))", "*args, **options): consumer = self.current_consumer() await consumer.start((args, options)) result =", "consumer.on_finished if isinstance(result, ResponseError): raise result.exception return result async def", "*args, **kw): super().__init__(*args, **kw) self.parser = self._producer._parser_class() async def execute(self,", "= await self._pool.connect() with connection: result = await connection.execute(*args, **options)", "parser_class if namespace: self._urlparams['namespace'] = namespace self._pool = Pool(self.connect, pool_size=pool_size,", "query_type): return query_type in self.supported_queries def basekey(self, meta, *args): key", "model metadata for lua script stdnet/lib/lua/odm.lua''' # indices = dict(((idx.attname,", "**kw): super().__init__(*args, **kw) self.parser = self._producer._parser_class() async def execute(self, *args,", "self._database is None: self._database = 0 self._database = int(self._database) self.loaded_scripts", "**options): consumer = self.current_consumer() await consumer.start((args, options)) result = await", "the Store''' return RedisClient(self) def pipeline(self): '''Get a :class:`.Pipeline` for", "def pubsub(self, protocol=None): return RedisPubSub(self, protocol=protocol) def channels(self, protocol=None, **kw):", "protocol_factory or self.create_protocol if isinstance(self._host, tuple): host, port = self._host", "= redis_parser(pyparser) self._parser_class = parser_class if namespace: self._urlparams['namespace'] = namespace", "async def execute(self, *args, **options): connection = await self._pool.connect() with", "def flush(self): return self.execute('flushdb') def close(self): '''Close all open connections.'''", "self._pool @property def namespace(self): '''The prefix namespace to append to", "port = self._host transport, connection = await self._loop.create_connection( protocol_factory, host,", "= await conn.execute_pipeline(commands, raise_on_error) return result async def connect(self, protocol_factory=None):", "self._pool.connect() with connection: result = await connection.execute(*args, **options) return result", "conn: result = await conn.execute_pipeline(commands, raise_on_error) return result async def", "postfix) if postfix else key def meta(self, meta): '''Extract model", "return self._pool @property def namespace(self): '''The prefix namespace to append", "if namespace: self._urlparams['namespace'] = namespace self._pool = Pool(self.connect, pool_size=pool_size, loop=self._loop)", "return result async def execute_pipeline(self, commands, raise_on_error=True): consumer = self.current_consumer()", "self._urlparams['namespace'] = namespace self._pool = Pool(self.connect, pool_size=pool_size, loop=self._loop) if self._database", "key(self): return (self._dns, self._encoding) def client(self): '''Get a :class:`.RedisClient` for", "meta): '''Extract model metadata for lua script stdnet/lib/lua/odm.lua''' # indices", "= frozenset(('filter', 'exclude')) def _init(self, namespace=None, parser_class=None, pool_size=50, decode_responses=False, **kwargs):", "= await self._loop.create_connection( protocol_factory, host, port) else: raise NotImplementedError('Could not", "def namespace(self): '''The prefix namespace to append to all transaction", "= partial(RedisStoreConnection, Consumer) supported_queries = frozenset(('filter', 'exclude')) def _init(self, namespace=None,", "''' n = self._urlparams.get('namespace') return '%s:' % n if n", "if actor else False parser_class = redis_parser(pyparser) self._parser_class = parser_class", "connection.execute(*args, **options) return result async def execute_pipeline(self, commands, raise_on_error=True): conn", "self._producer._parser_class() async def execute(self, *args, **options): consumer = self.current_consumer() await", "def ping(self): return self.client().ping() async def execute(self, *args, **options): connection", "result = await conn.execute_pipeline(commands, raise_on_error) return result async def connect(self,", "return RedisChannels(self.pubsub(protocol=protocol), **kw) def ping(self): return self.client().ping() async def execute(self,", "protocol_factory = partial(RedisStoreConnection, Consumer) supported_queries = frozenset(('filter', 'exclude')) def _init(self,", "await conn.execute_pipeline(commands, raise_on_error) return result async def connect(self, protocol_factory=None): protocol_factory", "partial from pulsar import Connection, Pool, get_actor from pulsar.utils.pep import", "Pool, get_actor from pulsar.utils.pep import to_string from pulsar.apps.data import RemoteStore", "Pool(self.connect, pool_size=pool_size, loop=self._loop) if self._database is None: self._database = 0", "self.execute('flushdb') def close(self): '''Close all open connections.''' return self._pool.close() def", "return self.client().ping() async def execute(self, *args, **options): connection = await", "# indices = dict(((idx.attname, idx.unique) for idx in meta.indices)) data", "self._database = int(self._database) self.loaded_scripts = set() @property def pool(self): return", "[])) result = await consumer.on_finished if isinstance(result, ResponseError): raise result.exception", "execute(self, *args, **options): connection = await self._pool.connect() with connection: result", "meta(self, meta): '''Extract model metadata for lua script stdnet/lib/lua/odm.lua''' #", "dict(((idx.attname, idx.unique) for idx in meta.indices)) data = meta.as_dict() data['namespace']", "'exclude')) def _init(self, namespace=None, parser_class=None, pool_size=50, decode_responses=False, **kwargs): self._decode_responses =", "connection = await self._loop.create_connection( protocol_factory, host, port) else: raise NotImplementedError('Could", "RedisPubSub(self, protocol=protocol) def channels(self, protocol=None, **kw): return RedisChannels(self.pubsub(protocol=protocol), **kw) def", "connection.execute('AUTH', self._password) if self._database: await connection.execute('SELECT', self._database) return connection def", ":class:`.Store` implementation. ''' protocol_factory = partial(RedisStoreConnection, Consumer) supported_queries = frozenset(('filter',", "await connection.execute(*args, **options) return result async def execute_pipeline(self, commands, raise_on_error=True):", "**kwargs): self._decode_responses = decode_responses if not parser_class: actor = get_actor()", "else key def meta(self, meta): '''Extract model metadata for lua", "namespace self._pool = Pool(self.connect, pool_size=pool_size, loop=self._loop) if self._database is None:", "await consumer.on_finished if isinstance(result, ResponseError): raise result.exception return result async", "import to_string from pulsar.apps.data import RemoteStore from pulsar.apps.ds import redis_parser", "self.loaded_scripts = set() @property def pool(self): return self._pool @property def", "if postfix else key def meta(self, meta): '''Extract model metadata", "Consumer, ResponseError from .pubsub import RedisPubSub, RedisChannels class RedisStoreConnection(Connection): def", "return self._pool.close() def has_query(self, query_type): return query_type in self.supported_queries def", "None)) return '%s:%s' % (key, postfix) if postfix else key", "async def execute_pipeline(self, commands, raise_on_error=True): conn = await self._pool.connect() with", "= namespace self._pool = Pool(self.connect, pool_size=pool_size, loop=self._loop) if self._database is", "result class RedisStore(RemoteStore): '''Redis :class:`.Store` implementation. ''' protocol_factory = partial(RedisStoreConnection,", "def execute_pipeline(self, commands, raise_on_error=True): conn = await self._pool.connect() with conn:", "return RedisPubSub(self, protocol=protocol) def channels(self, protocol=None, **kw): return RedisChannels(self.pubsub(protocol=protocol), **kw)", "from pulsar.utils.pep import to_string from pulsar.apps.data import RemoteStore from pulsar.apps.ds", "execute_pipeline(self, commands, raise_on_error=True): consumer = self.current_consumer() consumer.start((commands, raise_on_error, [])) result", "connection = await self._pool.connect() with connection: result = await connection.execute(*args,", "def key(self): return (self._dns, self._encoding) def client(self): '''Get a :class:`.RedisClient`", "RedisClient, Pipeline, Consumer, ResponseError from .pubsub import RedisPubSub, RedisChannels class", "consumer.start((commands, raise_on_error, [])) result = await consumer.on_finished if isinstance(result, ResponseError):", "data['namespace'] = self.basekey(meta) return data class CompiledQuery: def __init__(self, pipe,", "'''Get a :class:`.RedisClient` for the Store''' return RedisClient(self) def pipeline(self):", "self.current_consumer() consumer.start((commands, raise_on_error, [])) result = await consumer.on_finished if isinstance(result,", "await self._pool.connect() with connection: result = await connection.execute(*args, **options) return", "channels(self, protocol=None, **kw): return RedisChannels(self.pubsub(protocol=protocol), **kw) def ping(self): return self.client().ping()", "import Connection, Pool, get_actor from pulsar.utils.pep import to_string from pulsar.apps.data", "= ':'.join((to_string(p) for p in args if p is not", "result.exception return result class RedisStore(RemoteStore): '''Redis :class:`.Store` implementation. ''' protocol_factory", "not None)) return '%s:%s' % (key, postfix) if postfix else", "the Store''' return Pipeline(self) def pubsub(self, protocol=None): return RedisPubSub(self, protocol=protocol)", "import RedisPubSub, RedisChannels class RedisStoreConnection(Connection): def __init__(self, *args, **kw): super().__init__(*args,", "host, port = self._host transport, connection = await self._loop.create_connection( protocol_factory,", "def pipeline(self): '''Get a :class:`.Pipeline` for the Store''' return Pipeline(self)", "(self._dns, self._encoding) def client(self): '''Get a :class:`.RedisClient` for the Store'''", "all transaction on keys ''' n = self._urlparams.get('namespace') return '%s:'", "= protocol_factory or self.create_protocol if isinstance(self._host, tuple): host, port =", "redis_parser(pyparser) self._parser_class = parser_class if namespace: self._urlparams['namespace'] = namespace self._pool", "pulsar import Connection, Pool, get_actor from pulsar.utils.pep import to_string from", "def has_query(self, query_type): return query_type in self.supported_queries def basekey(self, meta,", "in meta.indices)) data = meta.as_dict() data['namespace'] = self.basekey(meta) return data", "Store''' return RedisClient(self) def pipeline(self): '''Get a :class:`.Pipeline` for the", "'%s%s' % (self.namespace, meta.table_name) postfix = ':'.join((to_string(p) for p in", "execute_pipeline(self, commands, raise_on_error=True): conn = await self._pool.connect() with conn: result", "n else '' def key(self): return (self._dns, self._encoding) def client(self):", "data class CompiledQuery: def __init__(self, pipe, query): self.pipe = pipe", "pool_size=pool_size, loop=self._loop) if self._database is None: self._database = 0 self._database", "**kw) self.parser = self._producer._parser_class() async def execute(self, *args, **options): consumer", "from pulsar import Connection, Pool, get_actor from pulsar.utils.pep import to_string", "connection: result = await connection.execute(*args, **options) return result async def", "flush(self): return self.execute('flushdb') def close(self): '''Close all open connections.''' return", "return '%s:%s' % (key, postfix) if postfix else key def", "self.client().ping() async def execute(self, *args, **options): connection = await self._pool.connect()", "namespace: self._urlparams['namespace'] = namespace self._pool = Pool(self.connect, pool_size=pool_size, loop=self._loop) if", "a :class:`.RedisClient` for the Store''' return RedisClient(self) def pipeline(self): '''Get", "return RedisClient(self) def pipeline(self): '''Get a :class:`.Pipeline` for the Store'''", "RedisChannels(self.pubsub(protocol=protocol), **kw) def ping(self): return self.client().ping() async def execute(self, *args,", "ResponseError from .pubsub import RedisPubSub, RedisChannels class RedisStoreConnection(Connection): def __init__(self,", "async def connect(self, protocol_factory=None): protocol_factory = protocol_factory or self.create_protocol if", "self._parser_class = parser_class if namespace: self._urlparams['namespace'] = namespace self._pool =", ".client import RedisClient, Pipeline, Consumer, ResponseError from .pubsub import RedisPubSub,", "protocol_factory = protocol_factory or self.create_protocol if isinstance(self._host, tuple): host, port", "result async def connect(self, protocol_factory=None): protocol_factory = protocol_factory or self.create_protocol", "script stdnet/lib/lua/odm.lua''' # indices = dict(((idx.attname, idx.unique) for idx in", "self._pool = Pool(self.connect, pool_size=pool_size, loop=self._loop) if self._database is None: self._database", "return Pipeline(self) def pubsub(self, protocol=None): return RedisPubSub(self, protocol=protocol) def channels(self,", "result.exception return result async def execute_pipeline(self, commands, raise_on_error=True): consumer =", "idx in meta.indices)) data = meta.as_dict() data['namespace'] = self.basekey(meta) return", "pyparser = actor.cfg.redis_py_parser if actor else False parser_class = redis_parser(pyparser)", "@property def pool(self): return self._pool @property def namespace(self): '''The prefix", "result async def execute_pipeline(self, commands, raise_on_error=True): conn = await self._pool.connect()", ":class:`.RedisClient` for the Store''' return RedisClient(self) def pipeline(self): '''Get a", "if isinstance(self._host, tuple): host, port = self._host transport, connection =", "'%s:%s' % (key, postfix) if postfix else key def meta(self,", "raise result.exception return result class RedisStore(RemoteStore): '''Redis :class:`.Store` implementation. '''", "RedisPubSub, RedisChannels class RedisStoreConnection(Connection): def __init__(self, *args, **kw): super().__init__(*args, **kw)", "from pulsar.apps.ds import redis_parser from .client import RedisClient, Pipeline, Consumer,", "await connection.execute('SELECT', self._database) return connection def flush(self): return self.execute('flushdb') def", "isinstance(result, ResponseError): raise result.exception return result async def execute_pipeline(self, commands,", "def execute(self, *args, **options): consumer = self.current_consumer() await consumer.start((args, options))", "class RedisStoreConnection(Connection): def __init__(self, *args, **kw): super().__init__(*args, **kw) self.parser =", "isinstance(result, ResponseError): raise result.exception return result class RedisStore(RemoteStore): '''Redis :class:`.Store`", "namespace=None, parser_class=None, pool_size=50, decode_responses=False, **kwargs): self._decode_responses = decode_responses if not", "consumer.start((args, options)) result = await consumer.on_finished if isinstance(result, ResponseError): raise", "to append to all transaction on keys ''' n =", "args if p is not None)) return '%s:%s' % (key,", ".pubsub import RedisPubSub, RedisChannels class RedisStoreConnection(Connection): def __init__(self, *args, **kw):", "raise_on_error=True): consumer = self.current_consumer() consumer.start((commands, raise_on_error, [])) result = await", "self._password: await connection.execute('AUTH', self._password) if self._database: await connection.execute('SELECT', self._database) return", "async def execute(self, *args, **options): consumer = self.current_consumer() await consumer.start((args,", "RemoteStore from pulsar.apps.ds import redis_parser from .client import RedisClient, Pipeline,", "protocol_factory, host, port) else: raise NotImplementedError('Could not connect to %s'", "pipeline(self): '''Get a :class:`.Pipeline` for the Store''' return Pipeline(self) def", "def execute_pipeline(self, commands, raise_on_error=True): consumer = self.current_consumer() consumer.start((commands, raise_on_error, []))", "__init__(self, *args, **kw): super().__init__(*args, **kw) self.parser = self._producer._parser_class() async def", "consumer.on_finished if isinstance(result, ResponseError): raise result.exception return result class RedisStore(RemoteStore):", "def connect(self, protocol_factory=None): protocol_factory = protocol_factory or self.create_protocol if isinstance(self._host,", "int(self._database) self.loaded_scripts = set() @property def pool(self): return self._pool @property", "self._pool.connect() with conn: result = await conn.execute_pipeline(commands, raise_on_error) return result", "for the Store''' return Pipeline(self) def pubsub(self, protocol=None): return RedisPubSub(self,", "RedisStoreConnection(Connection): def __init__(self, *args, **kw): super().__init__(*args, **kw) self.parser = self._producer._parser_class()", "close(self): '''Close all open connections.''' return self._pool.close() def has_query(self, query_type):", "'''The prefix namespace to append to all transaction on keys", "'''Close all open connections.''' return self._pool.close() def has_query(self, query_type): return", "''' protocol_factory = partial(RedisStoreConnection, Consumer) supported_queries = frozenset(('filter', 'exclude')) def", "Pipeline(self) def pubsub(self, protocol=None): return RedisPubSub(self, protocol=protocol) def channels(self, protocol=None,", "RedisClient(self) def pipeline(self): '''Get a :class:`.Pipeline` for the Store''' return", "meta.table_name) postfix = ':'.join((to_string(p) for p in args if p", "= await self._pool.connect() with conn: result = await conn.execute_pipeline(commands, raise_on_error)", "parser_class: actor = get_actor() pyparser = actor.cfg.redis_py_parser if actor else", "if self._password: await connection.execute('AUTH', self._password) if self._database: await connection.execute('SELECT', self._database)", "ping(self): return self.client().ping() async def execute(self, *args, **options): connection =", "protocol=None): return RedisPubSub(self, protocol=protocol) def channels(self, protocol=None, **kw): return RedisChannels(self.pubsub(protocol=protocol),", "is not None)) return '%s:%s' % (key, postfix) if postfix", "'''Get a :class:`.Pipeline` for the Store''' return Pipeline(self) def pubsub(self,", "connection def flush(self): return self.execute('flushdb') def close(self): '''Close all open", "= set() @property def pool(self): return self._pool @property def namespace(self):", "all open connections.''' return self._pool.close() def has_query(self, query_type): return query_type", "for lua script stdnet/lib/lua/odm.lua''' # indices = dict(((idx.attname, idx.unique) for", "result async def execute_pipeline(self, commands, raise_on_error=True): consumer = self.current_consumer() consumer.start((commands,", "consumer = self.current_consumer() consumer.start((commands, raise_on_error, [])) result = await consumer.on_finished", "protocol=None, **kw): return RedisChannels(self.pubsub(protocol=protocol), **kw) def ping(self): return self.client().ping() async", "for idx in meta.indices)) data = meta.as_dict() data['namespace'] = self.basekey(meta)", "= 0 self._database = int(self._database) self.loaded_scripts = set() @property def", "implementation. ''' protocol_factory = partial(RedisStoreConnection, Consumer) supported_queries = frozenset(('filter', 'exclude'))", "await connection.execute('AUTH', self._password) if self._database: await connection.execute('SELECT', self._database) return connection", "NotImplementedError('Could not connect to %s' % str(self._host)) if self._password: await", "self._host transport, connection = await self._loop.create_connection( protocol_factory, host, port) else:", "get_actor from pulsar.utils.pep import to_string from pulsar.apps.data import RemoteStore from", "return '%s:' % n if n else '' def key(self):", "':'.join((to_string(p) for p in args if p is not None))", "import redis_parser from .client import RedisClient, Pipeline, Consumer, ResponseError from", "decode_responses if not parser_class: actor = get_actor() pyparser = actor.cfg.redis_py_parser", "def basekey(self, meta, *args): key = '%s%s' % (self.namespace, meta.table_name)", "transaction on keys ''' n = self._urlparams.get('namespace') return '%s:' %", "False parser_class = redis_parser(pyparser) self._parser_class = parser_class if namespace: self._urlparams['namespace']", "return result class RedisStore(RemoteStore): '''Redis :class:`.Store` implementation. ''' protocol_factory =", "ResponseError): raise result.exception return result async def execute_pipeline(self, commands, raise_on_error=True):", "supported_queries = frozenset(('filter', 'exclude')) def _init(self, namespace=None, parser_class=None, pool_size=50, decode_responses=False,", "n if n else '' def key(self): return (self._dns, self._encoding)", "meta.as_dict() data['namespace'] = self.basekey(meta) return data class CompiledQuery: def __init__(self,", "self.create_protocol if isinstance(self._host, tuple): host, port = self._host transport, connection", "% n if n else '' def key(self): return (self._dns,", "tuple): host, port = self._host transport, connection = await self._loop.create_connection(", "p is not None)) return '%s:%s' % (key, postfix) if", "return data class CompiledQuery: def __init__(self, pipe, query): self.pipe =", "port) else: raise NotImplementedError('Could not connect to %s' % str(self._host))", "self._password) if self._database: await connection.execute('SELECT', self._database) return connection def flush(self):", "else '' def key(self): return (self._dns, self._encoding) def client(self): '''Get", "protocol_factory=None): protocol_factory = protocol_factory or self.create_protocol if isinstance(self._host, tuple): host,", "consumer = self.current_consumer() await consumer.start((args, options)) result = await consumer.on_finished", "protocol=protocol) def channels(self, protocol=None, **kw): return RedisChannels(self.pubsub(protocol=protocol), **kw) def ping(self):", "conn.execute_pipeline(commands, raise_on_error) return result async def connect(self, protocol_factory=None): protocol_factory =", "self._database = 0 self._database = int(self._database) self.loaded_scripts = set() @property", "return result async def connect(self, protocol_factory=None): protocol_factory = protocol_factory or", "= await connection.execute(*args, **options) return result async def execute_pipeline(self, commands,", "**options): connection = await self._pool.connect() with connection: result = await", "is None: self._database = 0 self._database = int(self._database) self.loaded_scripts =", "from pulsar.apps.data import RemoteStore from pulsar.apps.ds import redis_parser from .client", "await self._pool.connect() with conn: result = await conn.execute_pipeline(commands, raise_on_error) return", "if self._database is None: self._database = 0 self._database = int(self._database)", "meta.indices)) data = meta.as_dict() data['namespace'] = self.basekey(meta) return data class", "raise_on_error=True): conn = await self._pool.connect() with conn: result = await", "raise result.exception return result async def execute_pipeline(self, commands, raise_on_error=True): consumer", "parser_class = redis_parser(pyparser) self._parser_class = parser_class if namespace: self._urlparams['namespace'] =", "= meta.as_dict() data['namespace'] = self.basekey(meta) return data class CompiledQuery: def", "data = meta.as_dict() data['namespace'] = self.basekey(meta) return data class CompiledQuery:", "has_query(self, query_type): return query_type in self.supported_queries def basekey(self, meta, *args):", "= actor.cfg.redis_py_parser if actor else False parser_class = redis_parser(pyparser) self._parser_class", "to_string from pulsar.apps.data import RemoteStore from pulsar.apps.ds import redis_parser from", "= int(self._database) self.loaded_scripts = set() @property def pool(self): return self._pool", "self.parser = self._producer._parser_class() async def execute(self, *args, **options): consumer =", "self._database: await connection.execute('SELECT', self._database) return connection def flush(self): return self.execute('flushdb')", "async def execute_pipeline(self, commands, raise_on_error=True): consumer = self.current_consumer() consumer.start((commands, raise_on_error,", "in self.supported_queries def basekey(self, meta, *args): key = '%s%s' %", "(self.namespace, meta.table_name) postfix = ':'.join((to_string(p) for p in args if", "self._decode_responses = decode_responses if not parser_class: actor = get_actor() pyparser", "= get_actor() pyparser = actor.cfg.redis_py_parser if actor else False parser_class", "key = '%s%s' % (self.namespace, meta.table_name) postfix = ':'.join((to_string(p) for", "namespace to append to all transaction on keys ''' n", "else: raise NotImplementedError('Could not connect to %s' % str(self._host)) if", "meta, *args): key = '%s%s' % (self.namespace, meta.table_name) postfix =", "open connections.''' return self._pool.close() def has_query(self, query_type): return query_type in", "result = await consumer.on_finished if isinstance(result, ResponseError): raise result.exception return", "Consumer) supported_queries = frozenset(('filter', 'exclude')) def _init(self, namespace=None, parser_class=None, pool_size=50,", "raise_on_error, [])) result = await consumer.on_finished if isinstance(result, ResponseError): raise", "def meta(self, meta): '''Extract model metadata for lua script stdnet/lib/lua/odm.lua'''", "result = await connection.execute(*args, **options) return result async def execute_pipeline(self,", "commands, raise_on_error=True): conn = await self._pool.connect() with conn: result =", "self.basekey(meta) return data class CompiledQuery: def __init__(self, pipe, query): self.pipe", "await consumer.on_finished if isinstance(result, ResponseError): raise result.exception return result class", "return connection def flush(self): return self.execute('flushdb') def close(self): '''Close all", "from .pubsub import RedisPubSub, RedisChannels class RedisStoreConnection(Connection): def __init__(self, *args,", "(key, postfix) if postfix else key def meta(self, meta): '''Extract", "*args, **options): connection = await self._pool.connect() with connection: result =", "await consumer.start((args, options)) result = await consumer.on_finished if isinstance(result, ResponseError):", "'''Redis :class:`.Store` implementation. ''' protocol_factory = partial(RedisStoreConnection, Consumer) supported_queries =", "%s' % str(self._host)) if self._password: await connection.execute('AUTH', self._password) if self._database:", "RedisStore(RemoteStore): '''Redis :class:`.Store` implementation. ''' protocol_factory = partial(RedisStoreConnection, Consumer) supported_queries", "append to all transaction on keys ''' n = self._urlparams.get('namespace')", "@property def namespace(self): '''The prefix namespace to append to all", "to all transaction on keys ''' n = self._urlparams.get('namespace') return", "**kw) def ping(self): return self.client().ping() async def execute(self, *args, **options):", "connect to %s' % str(self._host)) if self._password: await connection.execute('AUTH', self._password)", "transport, connection = await self._loop.create_connection( protocol_factory, host, port) else: raise", "pool(self): return self._pool @property def namespace(self): '''The prefix namespace to", "parser_class=None, pool_size=50, decode_responses=False, **kwargs): self._decode_responses = decode_responses if not parser_class:", "def client(self): '''Get a :class:`.RedisClient` for the Store''' return RedisClient(self)", "= self.current_consumer() await consumer.start((args, options)) result = await consumer.on_finished if", "frozenset(('filter', 'exclude')) def _init(self, namespace=None, parser_class=None, pool_size=50, decode_responses=False, **kwargs): self._decode_responses", "self._database) return connection def flush(self): return self.execute('flushdb') def close(self): '''Close", ":class:`.Pipeline` for the Store''' return Pipeline(self) def pubsub(self, protocol=None): return", "p in args if p is not None)) return '%s:%s'" ]
[ "migrations.AddField( model_name='usertask', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='usertask', name='last_updated_dt',", "field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='usertask', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField(", "preserve_default=False, ), migrations.AddField( model_name='userreward', name='last_updated_dt', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='usertask', name='created_dt',", "name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='usertask', name='last_updated_dt', field=models.DateTimeField(auto_now=True), ),", "operations = [ migrations.AddField( model_name='userreward', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ),", "), migrations.AddField( model_name='usertask', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='usertask',", "name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='userreward', name='last_updated_dt', field=models.DateTimeField(auto_now=True), ),", "django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies =", "migrations.AddField( model_name='userreward', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='userreward', name='last_updated_dt',", "migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tasks',", "[ migrations.AddField( model_name='userreward', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='userreward',", "= [ migrations.AddField( model_name='userreward', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField(", "= [ ('tasks', '0004_auto_20200616_0116'), ] operations = [ migrations.AddField( model_name='userreward',", "django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tasks', '0004_auto_20200616_0116'), ] operations", "2020-06-16 05:23 from django.db import migrations, models import django.utils.timezone class", "from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies", "default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='userreward', name='last_updated_dt', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='usertask',", "migrations.AddField( model_name='userreward', name='last_updated_dt', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='usertask', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),", "Migration(migrations.Migration): dependencies = [ ('tasks', '0004_auto_20200616_0116'), ] operations = [", "model_name='userreward', name='last_updated_dt', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='usertask', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False,", "dependencies = [ ('tasks', '0004_auto_20200616_0116'), ] operations = [ migrations.AddField(", "# Generated by Django 3.0.7 on 2020-06-16 05:23 from django.db", "] operations = [ migrations.AddField( model_name='userreward', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False,", "by Django 3.0.7 on 2020-06-16 05:23 from django.db import migrations,", "field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='userreward', name='last_updated_dt', field=models.DateTimeField(auto_now=True), ), migrations.AddField(", "('tasks', '0004_auto_20200616_0116'), ] operations = [ migrations.AddField( model_name='userreward', name='created_dt', field=models.DateTimeField(auto_now_add=True,", "Generated by Django 3.0.7 on 2020-06-16 05:23 from django.db import", "models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tasks', '0004_auto_20200616_0116'),", "import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('tasks', '0004_auto_20200616_0116'), ]", "import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [", "3.0.7 on 2020-06-16 05:23 from django.db import migrations, models import", "on 2020-06-16 05:23 from django.db import migrations, models import django.utils.timezone", "), migrations.AddField( model_name='userreward', name='last_updated_dt', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='usertask', name='created_dt', field=models.DateTimeField(auto_now_add=True,", "model_name='usertask', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='usertask', name='last_updated_dt', field=models.DateTimeField(auto_now=True),", "[ ('tasks', '0004_auto_20200616_0116'), ] operations = [ migrations.AddField( model_name='userreward', name='created_dt',", "Django 3.0.7 on 2020-06-16 05:23 from django.db import migrations, models", "field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='usertask', name='last_updated_dt', field=models.DateTimeField(auto_now=True), ), ]", "'0004_auto_20200616_0116'), ] operations = [ migrations.AddField( model_name='userreward', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),", "name='last_updated_dt', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='usertask', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ),", "model_name='userreward', name='created_dt', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='userreward', name='last_updated_dt', field=models.DateTimeField(auto_now=True),", "05:23 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration):", "class Migration(migrations.Migration): dependencies = [ ('tasks', '0004_auto_20200616_0116'), ] operations =" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "NOTICE file # # Licensed under the Apache License, Version", "iterable, element={}, received={}, type={}'.format( self._NAME, value, type(value)) assert len(list(value)) ==", "len(list(value)) == self._size, \\ 'Input vector has the wrong size,", "or isinstance(item, int) self._value = list(value) def reset(self): self._value =", "if not isinstance(self._value, list): print('Vector object must have a list", "self._value = [0 for _ in range(self._size)] def _set_value(self, value):", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# For information on the respective copyright owner see the", "Unless required by applicable law or agreed to in writing,", "permissions and # limitations under the License. from . import", "limitations under the License. from . import XMLBase import collections", "isinstance(size, int), \\ '[{}] Vector size input must be an", "int), \\ '[{}] Vector size input must be an integer,", "float) or isinstance(item, int) self._value = list(value) def reset(self): self._value", "Generation for Gazebo authors # For information on the respective", "import collections class XMLVector(XMLBase): _NAME = '' def __init__(self, size=None):", "return False return True def get_formatted_value_as_str(self): assert self.is_valid(), 'Invalid vector'", "distributed under the License is distributed on an \"AS IS\"", "output_str = ' '.join(['{}'] * self._size) return output_str.format(*[format(x, 'n') for", "value, len(list(value)), self._size) for item in value: assert isinstance(item, float)", "reset(self): self._value = [0 for _ in range(self._size)] XMLBase.reset(self) def", "0, '[{}] Size must be greater than zero'.format( self.xml_element_name) self._size", "element must be a float or integer') return False return", "(c) 2019 - The Procedural Generation for Gazebo authors #", "the specific language governing permissions and # limitations under the", "# limitations under the License. from . import XMLBase import", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "must be greater than zero'.format( self.xml_element_name) self._size = size self._value", "input must be an integer, received={}'.format( self.xml_element_name, size) assert size", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "False if len(self._value) != self._size: print('Normal value must be a", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "def _set_value(self, value): assert isinstance(value, collections.Iterable), \\ 'Input must be", "value: assert isinstance(item, float) or isinstance(item, int) self._value = list(value)", "be None' assert isinstance(size, int), \\ '[{}] Vector size input", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "must be an integer, received={}'.format( self.xml_element_name, size) assert size >", "_ in range(self._size)] XMLBase.reset(self) def is_valid(self): if not isinstance(self._value, list):", "item in value: assert isinstance(item, float) or isinstance(item, int) self._value", "3 elements') return False for item in self._value: if not", "not use this file except in compliance with the License.", "size cannot be None' assert isinstance(size, int), \\ '[{}] Vector", "zero'.format( self.xml_element_name) self._size = size self._value = [0 for _", "wrong size, element={}, received={}, ' \\ 'size of received={}, expected", "integer, received={}'.format( self.xml_element_name, size) assert size > 0, '[{}] Size", "than zero'.format( self.xml_element_name) self._size = size self._value = [0 for", "' '.join(['{}'] * self._size) return output_str.format(*[format(x, 'n') for x in", "writing, software # distributed under the License is distributed on", "'[{}] Size must be greater than zero'.format( self.xml_element_name) self._size =", "in writing, software # distributed under the License is distributed", "the wrong size, element={}, received={}, ' \\ 'size of received={},", "you may not use this file except in compliance with", "range(self._size)] def _set_value(self, value): assert isinstance(value, collections.Iterable), \\ 'Input must", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "language governing permissions and # limitations under the License. from", "XMLBase.__init__(self) assert size is not None, 'Vector size cannot be", "'Vector size cannot be None' assert isinstance(size, int), \\ '[{}]", "file # # Licensed under the Apache License, Version 2.0", "be a float or integer') return False return True def", "Size must be greater than zero'.format( self.xml_element_name) self._size = size", "in self._value: if not isinstance(item, float) and not isinstance(item, int):", "for item in self._value: if not isinstance(item, float) and not", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "XMLVector(XMLBase): _NAME = '' def __init__(self, size=None): XMLBase.__init__(self) assert size", "None' assert isinstance(size, int), \\ '[{}] Vector size input must", "received={}, expected length={}'.format( self._NAME, value, len(list(value)), self._size) for item in", "For information on the respective copyright owner see the NOTICE", "self._NAME, value, type(value)) assert len(list(value)) == self._size, \\ 'Input vector", "is_valid(self): if not isinstance(self._value, list): print('Vector object must have a", "isinstance(self._value, list): print('Vector object must have a list as value')", "of received={}, expected length={}'.format( self._NAME, value, len(list(value)), self._size) for item", "CONDITIONS OF ANY KIND, either express or implied. # See", "greater than zero'.format( self.xml_element_name) self._size = size self._value = [0", "received={}, ' \\ 'size of received={}, expected length={}'.format( self._NAME, value,", "in value: assert isinstance(item, float) or isinstance(item, int) self._value =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "\\ 'Input vector has the wrong size, element={}, received={}, '", "has the wrong size, element={}, received={}, ' \\ 'size of", ". import XMLBase import collections class XMLVector(XMLBase): _NAME = ''", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "int) self._value = list(value) def reset(self): self._value = [0 for", "self._value = list(value) def reset(self): self._value = [0 for _", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "respective copyright owner see the NOTICE file # # Licensed", "License, Version 2.0 (the \"License\"); # you may not use", "self.xml_element_name, size) assert size > 0, '[{}] Size must be", "[0 for _ in range(self._size)] def _set_value(self, value): assert isinstance(value,", "== self._size, \\ 'Input vector has the wrong size, element={},", "list with 3 elements') return False for item in self._value:", "# You may obtain a copy of the License at", "class XMLVector(XMLBase): _NAME = '' def __init__(self, size=None): XMLBase.__init__(self) assert", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "import XMLBase import collections class XMLVector(XMLBase): _NAME = '' def", "from . import XMLBase import collections class XMLVector(XMLBase): _NAME =", "must be a list with 3 elements') return False for", "float) and not isinstance(item, int): print('Each vector element must be", "print('Each vector element must be a float or integer') return", "under the License is distributed on an \"AS IS\" BASIS,", "\\ '[{}] Vector size input must be an integer, received={}'.format(", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "size is not None, 'Vector size cannot be None' assert", "License for the specific language governing permissions and # limitations", "XMLBase import collections class XMLVector(XMLBase): _NAME = '' def __init__(self,", "size > 0, '[{}] Size must be greater than zero'.format(", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "size self._value = [0 for _ in range(self._size)] def _set_value(self,", "isinstance(item, float) or isinstance(item, int) self._value = list(value) def reset(self):", "assert isinstance(item, float) or isinstance(item, int) self._value = list(value) def", "for item in value: assert isinstance(item, float) or isinstance(item, int)", "with 3 elements') return False for item in self._value: if", "assert size > 0, '[{}] Size must be greater than", "The Procedural Generation for Gazebo authors # For information on", "received={}, type={}'.format( self._NAME, value, type(value)) assert len(list(value)) == self._size, \\", "\\ 'size of received={}, expected length={}'.format( self._NAME, value, len(list(value)), self._size)", "a float or integer') return False return True def get_formatted_value_as_str(self):", "in range(self._size)] def _set_value(self, value): assert isinstance(value, collections.Iterable), \\ 'Input", "value must be a list with 3 elements') return False", "the License for the specific language governing permissions and #", "License. from . import XMLBase import collections class XMLVector(XMLBase): _NAME", "size, element={}, received={}, ' \\ 'size of received={}, expected length={}'.format(", "governing permissions and # limitations under the License. from .", "value, type(value)) assert len(list(value)) == self._size, \\ 'Input vector has", "(the \"License\"); # you may not use this file except", "'size of received={}, expected length={}'.format( self._NAME, value, len(list(value)), self._size) for", "vector' output_str = ' '.join(['{}'] * self._size) return output_str.format(*[format(x, 'n')", "Apache License, Version 2.0 (the \"License\"); # you may not", "# you may not use this file except in compliance", "<filename>pcg_libraries/src/pcg_gazebo/parsers/types/vector.py # Copyright (c) 2019 - The Procedural Generation for", "length={}'.format( self._NAME, value, len(list(value)), self._size) for item in value: assert", "either express or implied. # See the License for the", "not isinstance(item, int): print('Each vector element must be a float", "isinstance(value, collections.Iterable), \\ 'Input must be iterable, element={}, received={}, type={}'.format(", "must be a float or integer') return False return True", "OR CONDITIONS OF ANY KIND, either express or implied. #", "self._size) for item in value: assert isinstance(item, float) or isinstance(item,", "> 0, '[{}] Size must be greater than zero'.format( self.xml_element_name)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "object must have a list as value') return False if", "the License is distributed on an \"AS IS\" BASIS, #", "the NOTICE file # # Licensed under the Apache License,", "be an integer, received={}'.format( self.xml_element_name, size) assert size > 0,", "'Input must be iterable, element={}, received={}, type={}'.format( self._NAME, value, type(value))", "collections class XMLVector(XMLBase): _NAME = '' def __init__(self, size=None): XMLBase.__init__(self)", "in compliance with the License. # You may obtain a", "if len(self._value) != self._size: print('Normal value must be a list", "on the respective copyright owner see the NOTICE file #", "value') return False if len(self._value) != self._size: print('Normal value must", "= [0 for _ in range(self._size)] XMLBase.reset(self) def is_valid(self): if", "False return True def get_formatted_value_as_str(self): assert self.is_valid(), 'Invalid vector' output_str", "software # distributed under the License is distributed on an", "_ in range(self._size)] def _set_value(self, value): assert isinstance(value, collections.Iterable), \\", "the License. from . import XMLBase import collections class XMLVector(XMLBase):", "def reset(self): self._value = [0 for _ in range(self._size)] XMLBase.reset(self)", "integer') return False return True def get_formatted_value_as_str(self): assert self.is_valid(), 'Invalid", "!= self._size: print('Normal value must be a list with 3", "Vector size input must be an integer, received={}'.format( self.xml_element_name, size)", "list(value) def reset(self): self._value = [0 for _ in range(self._size)]", "len(self._value) != self._size: print('Normal value must be a list with", "authors # For information on the respective copyright owner see", "information on the respective copyright owner see the NOTICE file", "assert size is not None, 'Vector size cannot be None'", "copyright owner see the NOTICE file # # Licensed under", "Gazebo authors # For information on the respective copyright owner", "# # Unless required by applicable law or agreed to", "Procedural Generation for Gazebo authors # For information on the", "self._size, \\ 'Input vector has the wrong size, element={}, received={},", "self._value = [0 for _ in range(self._size)] XMLBase.reset(self) def is_valid(self):", "elements') return False for item in self._value: if not isinstance(item,", "2019 - The Procedural Generation for Gazebo authors # For", "cannot be None' assert isinstance(size, int), \\ '[{}] Vector size", "def __init__(self, size=None): XMLBase.__init__(self) assert size is not None, 'Vector", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "# Copyright (c) 2019 - The Procedural Generation for Gazebo", "_NAME = '' def __init__(self, size=None): XMLBase.__init__(self) assert size is", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "have a list as value') return False if len(self._value) !=", "= list(value) def reset(self): self._value = [0 for _ in", "isinstance(item, int): print('Each vector element must be a float or", "assert len(list(value)) == self._size, \\ 'Input vector has the wrong", "= [0 for _ in range(self._size)] def _set_value(self, value): assert", "Version 2.0 (the \"License\"); # you may not use this", "a list with 3 elements') return False for item in", "must be iterable, element={}, received={}, type={}'.format( self._NAME, value, type(value)) assert", "return True def get_formatted_value_as_str(self): assert self.is_valid(), 'Invalid vector' output_str =", "owner see the NOTICE file # # Licensed under the", "print('Normal value must be a list with 3 elements') return", "float or integer') return False return True def get_formatted_value_as_str(self): assert", "law or agreed to in writing, software # distributed under", "True def get_formatted_value_as_str(self): assert self.is_valid(), 'Invalid vector' output_str = '", "'Input vector has the wrong size, element={}, received={}, ' \\", "vector element must be a float or integer') return False", "value): assert isinstance(value, collections.Iterable), \\ 'Input must be iterable, element={},", "type={}'.format( self._NAME, value, type(value)) assert len(list(value)) == self._size, \\ 'Input", "range(self._size)] XMLBase.reset(self) def is_valid(self): if not isinstance(self._value, list): print('Vector object", "print('Vector object must have a list as value') return False", "be iterable, element={}, received={}, type={}'.format( self._NAME, value, type(value)) assert len(list(value))", "self._NAME, value, len(list(value)), self._size) for item in value: assert isinstance(item,", "must have a list as value') return False if len(self._value)", "self.is_valid(), 'Invalid vector' output_str = ' '.join(['{}'] * self._size) return", "under the License. from . import XMLBase import collections class", "Copyright (c) 2019 - The Procedural Generation for Gazebo authors", "be greater than zero'.format( self.xml_element_name) self._size = size self._value =", "def is_valid(self): if not isinstance(self._value, list): print('Vector object must have", "- The Procedural Generation for Gazebo authors # For information", "implied. # See the License for the specific language governing", "return False for item in self._value: if not isinstance(item, float)", "received={}'.format( self.xml_element_name, size) assert size > 0, '[{}] Size must", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"License\"); # you may not use this file except in", "size=None): XMLBase.__init__(self) assert size is not None, 'Vector size cannot", "not isinstance(self._value, list): print('Vector object must have a list as", "for _ in range(self._size)] XMLBase.reset(self) def is_valid(self): if not isinstance(self._value,", "int): print('Each vector element must be a float or integer')", "collections.Iterable), \\ 'Input must be iterable, element={}, received={}, type={}'.format( self._NAME,", "is not None, 'Vector size cannot be None' assert isinstance(size,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "\\ 'Input must be iterable, element={}, received={}, type={}'.format( self._NAME, value,", "if not isinstance(item, float) and not isinstance(item, int): print('Each vector", "'.join(['{}'] * self._size) return output_str.format(*[format(x, 'n') for x in self._value])", "and # limitations under the License. from . import XMLBase", "_set_value(self, value): assert isinstance(value, collections.Iterable), \\ 'Input must be iterable,", "assert isinstance(value, collections.Iterable), \\ 'Input must be iterable, element={}, received={},", "= ' '.join(['{}'] * self._size) return output_str.format(*[format(x, 'n') for x", "list): print('Vector object must have a list as value') return", "isinstance(item, float) and not isinstance(item, int): print('Each vector element must", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "self.xml_element_name) self._size = size self._value = [0 for _ in", "' \\ 'size of received={}, expected length={}'.format( self._NAME, value, len(list(value)),", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "for Gazebo authors # For information on the respective copyright", "'' def __init__(self, size=None): XMLBase.__init__(self) assert size is not None,", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "element={}, received={}, type={}'.format( self._NAME, value, type(value)) assert len(list(value)) == self._size,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "= size self._value = [0 for _ in range(self._size)] def", "assert self.is_valid(), 'Invalid vector' output_str = ' '.join(['{}'] * self._size)", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "element={}, received={}, ' \\ 'size of received={}, expected length={}'.format( self._NAME,", "expected length={}'.format( self._NAME, value, len(list(value)), self._size) for item in value:", "XMLBase.reset(self) def is_valid(self): if not isinstance(self._value, list): print('Vector object must", "not None, 'Vector size cannot be None' assert isinstance(size, int),", "You may obtain a copy of the License at #", "vector has the wrong size, element={}, received={}, ' \\ 'size", "False for item in self._value: if not isinstance(item, float) and", "an integer, received={}'.format( self.xml_element_name, size) assert size > 0, '[{}]", "type(value)) assert len(list(value)) == self._size, \\ 'Input vector has the", "the respective copyright owner see the NOTICE file # #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "return False if len(self._value) != self._size: print('Normal value must be", "be a list with 3 elements') return False for item", "for _ in range(self._size)] def _set_value(self, value): assert isinstance(value, collections.Iterable),", "required by applicable law or agreed to in writing, software", "list as value') return False if len(self._value) != self._size: print('Normal", "not isinstance(item, float) and not isinstance(item, int): print('Each vector element", "def get_formatted_value_as_str(self): assert self.is_valid(), 'Invalid vector' output_str = ' '.join(['{}']", "= '' def __init__(self, size=None): XMLBase.__init__(self) assert size is not", "None, 'Vector size cannot be None' assert isinstance(size, int), \\", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "size input must be an integer, received={}'.format( self.xml_element_name, size) assert", "'[{}] Vector size input must be an integer, received={}'.format( self.xml_element_name,", "with the License. # You may obtain a copy of", "self._size = size self._value = [0 for _ in range(self._size)]", "this file except in compliance with the License. # You", "or integer') return False return True def get_formatted_value_as_str(self): assert self.is_valid(),", "the Apache License, Version 2.0 (the \"License\"); # you may", "[0 for _ in range(self._size)] XMLBase.reset(self) def is_valid(self): if not", "self._size: print('Normal value must be a list with 3 elements')", "size) assert size > 0, '[{}] Size must be greater", "in range(self._size)] XMLBase.reset(self) def is_valid(self): if not isinstance(self._value, list): print('Vector", "a list as value') return False if len(self._value) != self._size:", "see the NOTICE file # # Licensed under the Apache", "'Invalid vector' output_str = ' '.join(['{}'] * self._size) return output_str.format(*[format(x,", "and not isinstance(item, int): print('Each vector element must be a", "self._value: if not isinstance(item, float) and not isinstance(item, int): print('Each", "item in self._value: if not isinstance(item, float) and not isinstance(item,", "get_formatted_value_as_str(self): assert self.is_valid(), 'Invalid vector' output_str = ' '.join(['{}'] *", "__init__(self, size=None): XMLBase.__init__(self) assert size is not None, 'Vector size", "isinstance(item, int) self._value = list(value) def reset(self): self._value = [0", "assert isinstance(size, int), \\ '[{}] Vector size input must be", "len(list(value)), self._size) for item in value: assert isinstance(item, float) or", "as value') return False if len(self._value) != self._size: print('Normal value" ]
[ "False]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False, False, False, True, False]}, {\"id\":", "= { \"briefResponses\": [ {\"id\": \"five\", \"niceToHaveRequirements\": [True, True, True,", "False) ] ) def test_is_brief_correct_allowed_statuses(self, allowed_statuses, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4',", "framework, lot, user, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert", "] ) def test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4',", "test_section_has_at_least_one_required_question(self): content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} ) sections_with_required_questions =", "= mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\": [ {\"id\": \"five\", \"niceToHaveRequirements\":", "{\"id\": \"five\", \"niceToHaveRequirements\": [True, True, True, True, True]}, {\"id\": \"zero\",", "lots=[ LotStub(slug='digital-specialists', allows_brief=False).response() ] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists',", "[True, True, True, True, True]}, {'id': 'four', 'niceToHaveRequirements': [True, True,", "] def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\":", "'edit_brief') class TestBuyersHelpers(object): def test_get_framework_and_lot(self): provided_lot = LotStub(slug='digital-specialists', allows_brief=True).response() data_api_client", "\"one\", \"niceToHaveRequirements\": [False, False, False, True, False]}, {\"id\": \"four\", \"niceToHaveRequirements\":", "False, True, False]}, {'id': 'zero', 'niceToHaveRequirements': [False, False, False, False,", "[ ('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True), ('not-digital-outcomes-and-specialists', 'digital-specialists', 123, False), ('digital-outcomes-and-specialists-4',", "from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub content_loader = ContentLoader('tests/fixtures/content') content_loader.load_manifest('dos',", "False]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True, True, True, True, False]}, ]", "= { 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True", "False, False, True, False]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True, True, True,", "'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allow_withdrawn=allow_withdrawn ) is result @pytest.mark.parametrize( 'allowed_statuses, result',", "False, True]}, {\"id\": \"five\", \"niceToHaveRequirements\": [True, True, True, True, True]},", "allowed_statuses=allowed_statuses ) is result def test_is_brief_associated_with_user(self): brief = BriefStub(user_id=123).response() assert", "False, 'section-5': True } for section in content.sections: assert helpers.buyers_helpers.section_has_at_least_one_required_question(section)", "result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4',", "False, False, False, False]}, {\"id\": \"three\", \"niceToHaveRequirements\": [True, True, False,", "'niceToHaveRequirements': [True, True, False, False, True]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False,", "] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, allowed_statuses=['live'], )", "\"four\", \"niceToHaveRequirements\": [True, True, True, True, False]}, ] } brief", "True, True, True, False]}, {'id': 'four', 'niceToHaveRequirements': [True, True, True,", "data_api_client.find_brief_responses.return_value = { \"briefResponses\": [ {\"id\": \"five\", \"niceToHaveRequirements\": [True, True,", "BriefStub(user_id=123).response() assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is True assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is", "True, True]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True, True, True, True, False]},", "\"niceToHaveRequirements\": [False, False, False, True, False]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True,", "False]} ] def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value = {", "True, True, True]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True, True, True, True,", "and Specialists 4' assert framework['slug'] == 'digital-outcomes-and-specialists-4' assert framework['clarificationQuestionsOpen'] is", "Specialists 4' assert framework['slug'] == 'digital-outcomes-and-specialists-4' assert framework['clarificationQuestionsOpen'] is True", "'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True } content =", ") sections = content.summary(brief) unanswered_required, unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections) assert unanswered_required", "provided_lot = LotStub(slug='digital-specialists', allows_brief=True).response() data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub(", "framework, lot, user) is result @pytest.mark.parametrize( ['status', 'allow_withdrawn', 'result'], [", "from werkzeug.exceptions import NotFound import app.main.helpers as helpers from dmcontent.content_loader", "2 assert unanswered_optional == 2 def test_add_unanswered_counts_to_briefs(self): briefs = [{", "'section-4': False, 'section-5': True } for section in content.sections: assert", "test_is_brief_associated_with_user(self): brief = BriefStub(user_id=123).response() assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is True assert", "True]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True, True, True, True, False]}, {\"id\":", "'edit_brief').filter( {'lot': 'digital-specialists'} ) sections_with_required_questions = { 'section-1': True, 'section-2':", "True, True, False]}, {'id': 'three', 'niceToHaveRequirements': [True, True, False, False,", "result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4',", "result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct(brief, framework, lot,", "'niceToHaveRequirements': [True, True, True, True, False]}, {'id': 'three', 'niceToHaveRequirements': [True,", "{\"id\": \"five\", \"niceToHaveRequirements\": [True, True, True, True, True]}, {\"id\": \"four\",", "data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[provided_lot], ).single_result_response()", "is result def test_is_brief_associated_with_user(self): brief = BriefStub(user_id=123).response() assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123)", "test_get_framework_and_lot_404s_if_allows_brief_required(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[", "helpers.buyers_helpers.is_brief_correct(brief, framework, lot, user) is result @pytest.mark.parametrize( ['status', 'allow_withdrawn', 'result'],", "False, False, True]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False, False, False, True,", "with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, allowed_statuses=['live'], ) def test_get_framework_and_lot_404s_if_allows_brief_required(self):", "True]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False, False, False, True, False]}, {'id':", "'section-5': True } for section in content.sections: assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \\", "<filename>tests/main/helpers/test_buyers_helpers.py import mock import pytest from werkzeug.exceptions import NotFound import", "'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True, 'unanswered_required': 2, 'unanswered_optional':", "allow_withdrawn=allow_withdrawn ) is result @pytest.mark.parametrize( 'allowed_statuses, result', [ (['live', 'closed'],", "helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False def test_section_has_at_least_one_required_question(self): content", "content_loader = ContentLoader('tests/fixtures/content') content_loader.load_manifest('dos', 'data', 'edit_brief') questions_builder = content_loader.get_manifest('dos', 'edit_brief')", "'zero', 'niceToHaveRequirements': [False, False, False, False, False]} ] def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self):", "from dmcontent.content_loader import ContentLoader from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub", "'digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'digital-specialists', 124,", "'unanswered_required': 2, 'unanswered_optional': 2 }] def test_get_sorted_responses_for_brief(self): data_api_client = mock.Mock()", "'digital-specialists', 123, True), ('not-digital-outcomes-and-specialists', 'digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123,", "True, False]}, ] } brief = {\"id\": 1, \"niceToHaveRequirements\": [\"Nice\",", "@pytest.mark.parametrize( 'allowed_statuses, result', [ (['live', 'closed'], True), (['closed'], False) ]", "\"three\", \"niceToHaveRequirements\": [True, True, False, False, True]}, {\"id\": \"five\", \"niceToHaveRequirements\":", "[ (['live', 'closed'], True), (['closed'], False) ] ) def test_is_brief_correct_allowed_statuses(self,", "True, 'unanswered_required': 2, 'unanswered_optional': 2 }] def test_get_sorted_responses_for_brief(self): data_api_client =", "FrameworkStub, LotStub content_loader = ContentLoader('tests/fixtures/content') content_loader.load_manifest('dos', 'data', 'edit_brief') questions_builder =", "user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allowed_statuses=allowed_statuses )", "{\"id\": \"four\", \"niceToHaveRequirements\": [True, True, True, True, False]}, ] }", "{'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]}, {'id': 'four',", "123, allowed_statuses=allowed_statuses ) is result def test_is_brief_associated_with_user(self): brief = BriefStub(user_id=123).response()", "as helpers from dmcontent.content_loader import ContentLoader from dmtestutils.api_model_stubs import BriefStub,", "status='live', lots=[provided_lot], ).single_result_response() framework, lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client) assert", "}] def test_get_sorted_responses_for_brief(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\":", "True, 'section-4': False, 'section-5': True } for section in content.sections:", "lot == provided_lot def test_get_framework_and_lot_404s_for_wrong_framework_status(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value =", "124, False), ] ) def test_is_brief_correct(self, framework, lot, user, result):", "werkzeug.exceptions import NotFound import app.main.helpers as helpers from dmcontent.content_loader import", "'data', 'edit_brief') questions_builder = content_loader.get_manifest('dos', 'edit_brief') class TestBuyersHelpers(object): def test_get_framework_and_lot(self):", "123, False), ('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'digital-specialists', 124, False),", "ContentLoader from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub content_loader = ContentLoader('tests/fixtures/content')", "'dos', 'lotSlug': 'digital-specialists', 'required1': True } content = content_loader.get_manifest('dos', 'edit_brief').filter(", "{ 'section-1': True, 'section-2': True, 'section-4': False, 'section-5': True }", "False, True]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False, False, False, True, False]},", "def test_is_brief_correct(self, framework, lot, user, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123,", "False, False, False, False]} ] def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self): data_api_client = mock.Mock()", "True } content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} ) sections", "'digital-specialists', 'required1': True, 'unanswered_required': 2, 'unanswered_optional': 2 }] def test_get_sorted_responses_for_brief(self):", "2 }] def test_get_sorted_responses_for_brief(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value = {", "True, True, True]}, {'id': 'five', 'niceToHaveRequirements': [True, True, True, True,", "'lotSlug': 'digital-specialists', 'required1': True } content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot':", "'niceToHaveRequirements': [True, True, True, True, True]}, {'id': 'four', 'niceToHaveRequirements': [True,", "framework['name'] == 'Digital Outcomes and Specialists 4' assert framework['slug'] ==", "lots=[provided_lot], ).single_result_response() framework, lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client) assert framework['status']", "FrameworkStub( slug='digital-outcomes-and-specialists-4', status='open', lots=[ LotStub(slug='digital-specialists', allows_brief=True).response() ] ).single_result_response() with pytest.raises(NotFound):", "import BriefStub, FrameworkStub, LotStub content_loader = ContentLoader('tests/fixtures/content') content_loader.load_manifest('dos', 'data', 'edit_brief')", "'edit_brief') questions_builder = content_loader.get_manifest('dos', 'edit_brief') class TestBuyersHelpers(object): def test_get_framework_and_lot(self): provided_lot", "user, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct(brief, framework,", "{\"id\": 1, \"niceToHaveRequirements\": [\"Nice\", \"to\", \"have\", \"yes\", \"please\"]} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief,", "TestBuyersHelpers(object): def test_get_framework_and_lot(self): provided_lot = LotStub(slug='digital-specialists', allows_brief=True).response() data_api_client = mock.Mock()", "False), ('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'digital-specialists', 124, False), ]", "content.summary(brief) unanswered_required, unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections) assert unanswered_required == 2 assert", "questions_builder = content_loader.get_manifest('dos', 'edit_brief') class TestBuyersHelpers(object): def test_get_framework_and_lot(self): provided_lot =", "lot, user, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct(brief,", "\"four\", \"niceToHaveRequirements\": [True, True, True, True, False]}, {\"id\": \"one\", \"niceToHaveRequirements\":", "status=status).response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allow_withdrawn=allow_withdrawn ) is", "True, True]}, {'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]},", "True, False, False, True]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False, False, False,", "helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) == [{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists',", "True), ('not-digital-outcomes-and-specialists', 'digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123, False), ('digital-outcomes-and-specialists-4',", "[{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True }]", "[]} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {\"id\": \"five\"}, {\"id\": \"zero\"},", "== sections_with_required_questions[section.slug] def test_count_unanswered_questions(self): brief = { 'status': 'draft', 'frameworkSlug':", "False]}, {'id': 'three', 'niceToHaveRequirements': [True, True, False, False, True]}, {\"id\":", "helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, allowed_statuses=['live'], ) def test_get_framework_and_lot_404s_if_allows_brief_required(self): data_api_client =", "assert lot == provided_lot def test_get_framework_and_lot_404s_for_wrong_framework_status(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value", "False, True), ] ) def test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn, result): brief", "allows_brief=True).response() ] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, allowed_statuses=['live'],", "[True, True, True, True, True]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True, True,", "False, False]} ] def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value =", "[True, True, False, False, True]}, {\"id\": \"five\", \"niceToHaveRequirements\": [True, True,", "True, True, True, False]}, ] } brief = {\"id\": 1,", "assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {\"id\": \"five\"}, {\"id\": \"zero\"}, {\"id\":", "'niceToHaveRequirements': [False, False, False, False, False]} ] def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self): data_api_client", "test_brief_can_be_edited(self): assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False def", "] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, must_allow_brief=True, )", "assert helpers.buyers_helpers.is_brief_correct(brief, framework, lot, user) is result @pytest.mark.parametrize( ['status', 'allow_withdrawn',", "== [{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True,", "'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True } content = content_loader.get_manifest('dos',", "slug='digital-outcomes-and-specialists-4', status='live', lots=[provided_lot], ).single_result_response() framework, lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client)", "\"niceToHaveRequirements\": [True, True, True, True, False]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False,", "{\"id\": \"five\"}, {\"id\": \"zero\"}, {\"id\": \"three\"}, {\"id\": \"five\"} ] }", "'section-1': True, 'section-2': True, 'section-4': False, 'section-5': True } for", "is False def test_brief_can_be_edited(self): assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response())", "[True, True, True, True, False]}, {'id': 'four', 'niceToHaveRequirements': [True, True,", "= BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123,", "\"briefResponses\": [ {\"id\": \"five\", \"niceToHaveRequirements\": [True, True, True, True, True]},", "data_api_client) == [ {'id': 'five', 'niceToHaveRequirements': [True, True, True, True,", "{'id': 'zero', 'niceToHaveRequirements': [False, False, False, False, False]} ] def", "\"five\", \"niceToHaveRequirements\": [True, True, True, True, True]}, {\"id\": \"zero\", \"niceToHaveRequirements\":", "'required1': True } content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} )", "'dos', 'lotSlug': 'digital-specialists', 'required1': True }] assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) ==", "FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[ LotStub(slug='digital-specialists', allows_brief=False).response() ] ).single_result_response() with pytest.raises(NotFound):", "test_get_framework_and_lot(self): provided_lot = LotStub(slug='digital-specialists', allows_brief=True).response() data_api_client = mock.Mock() data_api_client.get_framework.return_value =", "True, True, True, True]}, {'id': 'four', 'niceToHaveRequirements': [True, True, True,", "== [ {'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]},", "import pytest from werkzeug.exceptions import NotFound import app.main.helpers as helpers", "['status', 'allow_withdrawn', 'result'], [ ('withdrawn', True, True), ('withdrawn', False, False),", "def test_is_brief_associated_with_user(self): brief = BriefStub(user_id=123).response() assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is True", "== 2 assert unanswered_optional == 2 def test_add_unanswered_counts_to_briefs(self): briefs =", "[ {\"id\": \"five\"}, {\"id\": \"zero\"}, {\"id\": \"three\"}, {\"id\": \"five\"} ]", "] } brief = {\"id\": 1, \"niceToHaveRequirements\": []} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief,", "True, True]}, {'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]},", "assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {'id': 'five', 'niceToHaveRequirements': [True, True,", "True, True, True, True]}, {\"id\": \"zero\", \"niceToHaveRequirements\": [False, False, False,", "result def test_is_brief_associated_with_user(self): brief = BriefStub(user_id=123).response() assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is", "mock import pytest from werkzeug.exceptions import NotFound import app.main.helpers as", "pytest from werkzeug.exceptions import NotFound import app.main.helpers as helpers from", "status='live').response() assert helpers.buyers_helpers.is_brief_correct(brief, framework, lot, user) is result @pytest.mark.parametrize( ['status',", "is False def test_brief_is_withdrawn(self): assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response())", "True, False]}, {'id': 'zero', 'niceToHaveRequirements': [False, False, False, False, False]}", "helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False def test_brief_is_withdrawn(self): assert", "False def test_brief_can_be_edited(self): assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is", "\"niceToHaveRequirements\": [True, True, True, True, False]}, ] } brief =", "False def test_brief_is_withdrawn(self): assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is", "helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False def test_section_has_at_least_one_required_question(self): content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot':", "def test_brief_is_withdrawn(self): assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False", "False), ] ) def test_is_brief_correct(self, framework, lot, user, result): brief", "\"one\", \"niceToHaveRequirements\": [False, False, False, True, False]}, {'id': 'zero', 'niceToHaveRequirements':", "True, True, True]}, {'id': 'four', 'niceToHaveRequirements': [True, True, True, True,", ").single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, allowed_statuses=['live'], ) def", "] ) def test_is_brief_correct_allowed_statuses(self, allowed_statuses, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123,", "[{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True, 'unanswered_required':", "True), ('withdrawn', False, False), ('live', True, True), ('live', False, True),", "test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\": [ {\"id\":", "False), ('digital-outcomes-and-specialists-4', 'digital-specialists', 124, False), ] ) def test_is_brief_correct(self, framework,", "data_api_client) assert framework['status'] == \"live\" assert framework['name'] == 'Digital Outcomes", "ContentLoader('tests/fixtures/content') content_loader.load_manifest('dos', 'data', 'edit_brief') questions_builder = content_loader.get_manifest('dos', 'edit_brief') class TestBuyersHelpers(object):", "= mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\": [ {\"id\": \"five\"}, {\"id\":", "('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True), ('not-digital-outcomes-and-specialists', 'digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'not-digital-specialists',", "[True, True, False, False, True]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False, False,", "[ ('withdrawn', True, True), ('withdrawn', False, False), ('live', True, True),", "brief = {\"id\": 1, \"niceToHaveRequirements\": []} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) ==", "'niceToHaveRequirements': [True, True, True, True, False]}, {'id': 'four', 'niceToHaveRequirements': [True,", "test_get_sorted_responses_for_brief(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\": [ {\"id\":", "unanswered_optional == 2 def test_add_unanswered_counts_to_briefs(self): briefs = [{ 'status': 'draft',", "result @pytest.mark.parametrize( 'allowed_statuses, result', [ (['live', 'closed'], True), (['closed'], False)", "in content.sections: assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \\ == sections_with_required_questions[section.slug] def test_count_unanswered_questions(self): brief", "= FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[provided_lot], ).single_result_response() framework, lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4',", "LotStub(slug='digital-specialists', allows_brief=True).response() data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live',", "True), ] ) def test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn, result): brief =", "assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \\ == sections_with_required_questions[section.slug] def test_count_unanswered_questions(self): brief = {", "= {\"id\": 1, \"niceToHaveRequirements\": [\"Nice\", \"to\", \"have\", \"yes\", \"please\"]} assert", ") def test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123,", "user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct(brief, framework, lot, user) is result @pytest.mark.parametrize(", "'digital-specialists'} ) sections = content.summary(brief) unanswered_required, unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections) assert", "content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} ) sections = content.summary(brief)", "('withdrawn', True, True), ('withdrawn', False, False), ('live', True, True), ('live',", "class TestBuyersHelpers(object): def test_get_framework_and_lot(self): provided_lot = LotStub(slug='digital-specialists', allows_brief=True).response() data_api_client =", "test_brief_is_withdrawn(self): assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False def", "assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is True assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is False", "'four', 'niceToHaveRequirements': [True, True, True, True, False]}, {'id': 'four', 'niceToHaveRequirements':", "True, True, True]}, {\"id\": \"zero\", \"niceToHaveRequirements\": [False, False, False, False,", "section in content.sections: assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \\ == sections_with_required_questions[section.slug] def test_count_unanswered_questions(self):", "] ) def test_is_brief_correct(self, framework, lot, user, result): brief =", "'unanswered_optional': 2 }] def test_get_sorted_responses_for_brief(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value =", "def test_get_sorted_responses_for_brief(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\": [", "123, allow_withdrawn=allow_withdrawn ) is result @pytest.mark.parametrize( 'allowed_statuses, result', [ (['live',", "'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True }] assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs,", "is result @pytest.mark.parametrize( ['status', 'allow_withdrawn', 'result'], [ ('withdrawn', True, True),", "test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response() assert", "{\"id\": \"three\", \"niceToHaveRequirements\": [True, True, False, False, True]}, {\"id\": \"five\",", "} brief = {\"id\": 1, \"niceToHaveRequirements\": []} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client)", "data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='open', lots=[ LotStub(slug='digital-specialists', allows_brief=True).response() ] ).single_result_response()", "brief = { 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1':", "'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True }] assert", "'digital-specialists'} ) sections_with_required_questions = { 'section-1': True, 'section-2': True, 'section-4':", "unanswered_required == 2 assert unanswered_optional == 2 def test_add_unanswered_counts_to_briefs(self): briefs", "\\ == sections_with_required_questions[section.slug] def test_count_unanswered_questions(self): brief = { 'status': 'draft',", "assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False def test_section_has_at_least_one_required_question(self):", "framework['clarificationQuestionsOpen'] is True assert lot == provided_lot def test_get_framework_and_lot_404s_for_wrong_framework_status(self): data_api_client", "user) is result @pytest.mark.parametrize( ['status', 'allow_withdrawn', 'result'], [ ('withdrawn', True,", "user_id=123, status=status).response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allow_withdrawn=allow_withdrawn )", "[False, False, False, True, False]}, {'id': 'zero', 'niceToHaveRequirements': [False, False,", "False, False, True]}, {\"id\": \"five\", \"niceToHaveRequirements\": [True, True, True, True,", "{ \"briefResponses\": [ {\"id\": \"five\", \"niceToHaveRequirements\": [True, True, True, True,", "{\"id\": \"four\", \"niceToHaveRequirements\": [True, True, True, True, False]}, {\"id\": \"one\",", "{\"id\": \"zero\"}, {\"id\": \"three\"}, {\"id\": \"five\"} ] } brief =", "LotStub content_loader = ContentLoader('tests/fixtures/content') content_loader.load_manifest('dos', 'data', 'edit_brief') questions_builder = content_loader.get_manifest('dos',", "def test_is_brief_correct_allowed_statuses(self, allowed_statuses, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert", "{\"id\": \"one\", \"niceToHaveRequirements\": [False, False, False, True, False]}, {\"id\": \"four\",", "assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False def test_brief_is_withdrawn(self):", "True }] assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) == [{ 'status': 'draft', 'frameworkSlug':", "== 'digital-outcomes-and-specialists-4' assert framework['clarificationQuestionsOpen'] is True assert lot == provided_lot", "= FrameworkStub( slug='digital-outcomes-and-specialists-4', status='open', lots=[ LotStub(slug='digital-specialists', allows_brief=True).response() ] ).single_result_response() with", "'niceToHaveRequirements': [True, True, True, True, True]}, {'id': 'five', 'niceToHaveRequirements': [True,", "\"zero\", \"niceToHaveRequirements\": [False, False, False, False, False]}, {\"id\": \"three\", \"niceToHaveRequirements\":", "brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists',", "{'lot': 'digital-specialists'} ) sections_with_required_questions = { 'section-1': True, 'section-2': True,", "= helpers.buyers_helpers.count_unanswered_questions(sections) assert unanswered_required == 2 assert unanswered_optional == 2", "'edit_brief').filter( {'lot': 'digital-specialists'} ) sections = content.summary(brief) unanswered_required, unanswered_optional =", ") is result @pytest.mark.parametrize( 'allowed_statuses, result', [ (['live', 'closed'], True),", "[True, True, True, True, True]}, {'id': 'five', 'niceToHaveRequirements': [True, True,", "is True assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False def test_section_has_at_least_one_required_question(self): content =", "data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[ LotStub(slug='digital-specialists', allows_brief=False).response() ] ).single_result_response()", "== provided_lot def test_get_framework_and_lot_404s_for_wrong_framework_status(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub(", "is result @pytest.mark.parametrize( 'allowed_statuses, result', [ (['live', 'closed'], True), (['closed'],", "'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True, 'unanswered_required': 2,", "data_api_client) == [ {\"id\": \"five\"}, {\"id\": \"zero\"}, {\"id\": \"three\"}, {\"id\":", "True, True, False]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False, False, False, True,", "assert framework['clarificationQuestionsOpen'] is True assert lot == provided_lot def test_get_framework_and_lot_404s_for_wrong_framework_status(self):", "def test_get_framework_and_lot(self): provided_lot = LotStub(slug='digital-specialists', allows_brief=True).response() data_api_client = mock.Mock() data_api_client.get_framework.return_value", ").single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, must_allow_brief=True, ) @pytest.mark.parametrize(", "('digital-outcomes-and-specialists-4', 'digital-specialists', 124, False), ] ) def test_is_brief_correct(self, framework, lot,", "\"three\"}, {\"id\": \"five\"} ] } brief = {\"id\": 1, \"niceToHaveRequirements\":", "'Digital Outcomes and Specialists 4' assert framework['slug'] == 'digital-outcomes-and-specialists-4' assert", "assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allow_withdrawn=allow_withdrawn ) is result", "briefs = [{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1':", "dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub content_loader = ContentLoader('tests/fixtures/content') content_loader.load_manifest('dos', 'data',", "[False, False, False, False, False]}, {\"id\": \"three\", \"niceToHaveRequirements\": [True, True,", "data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[ LotStub(slug='digital-specialists',", "test_add_unanswered_counts_to_briefs(self): briefs = [{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists',", "helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False def test_brief_is_withdrawn(self): assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True assert", ") def test_is_brief_correct_allowed_statuses(self, allowed_statuses, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response()", "slug='digital-outcomes-and-specialists-4', status='live', lots=[ LotStub(slug='digital-specialists', allows_brief=False).response() ] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot(", ") def test_get_framework_and_lot_404s_if_allows_brief_required(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4',", "app.main.helpers as helpers from dmcontent.content_loader import ContentLoader from dmtestutils.api_model_stubs import", "'five', 'niceToHaveRequirements': [True, True, True, True, True]}, {'id': 'five', 'niceToHaveRequirements':", "[True, True, True, True, True]}, {\"id\": \"zero\", \"niceToHaveRequirements\": [False, False,", ") @pytest.mark.parametrize( ['framework', 'lot', 'user', 'result'], [ ('digital-outcomes-and-specialists-4', 'digital-specialists', 123,", "'digital-specialists', 'required1': True } content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'}", "status='live').response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allowed_statuses=allowed_statuses ) is", "'required1': True }] assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) == [{ 'status': 'draft',", "'digital-specialists', 123, allowed_statuses=allowed_statuses ) is result def test_is_brief_associated_with_user(self): brief =", "content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} ) sections_with_required_questions = { 'section-1': True,", "import app.main.helpers as helpers from dmcontent.content_loader import ContentLoader from dmtestutils.api_model_stubs", ") sections_with_required_questions = { 'section-1': True, 'section-2': True, 'section-4': False,", "status='live', lots=[ LotStub(slug='digital-specialists', allows_brief=False).response() ] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4',", "True, True, True, False]}, {'id': 'three', 'niceToHaveRequirements': [True, True, False,", "brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allowed_statuses=allowed_statuses ) is result def test_is_brief_associated_with_user(self):", "\"five\", \"niceToHaveRequirements\": [True, True, True, True, True]}, {\"id\": \"four\", \"niceToHaveRequirements\":", "'digital-outcomes-and-specialists-4' assert framework['clarificationQuestionsOpen'] is True assert lot == provided_lot def", "'lot', 'user', 'result'], [ ('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True), ('not-digital-outcomes-and-specialists', 'digital-specialists',", "'user', 'result'], [ ('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True), ('not-digital-outcomes-and-specialists', 'digital-specialists', 123,", "'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True } content", "True, True, True, True]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True, True, True,", "allowed_statuses=['live'], ) def test_get_framework_and_lot_404s_if_allows_brief_required(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub(", "\"niceToHaveRequirements\": [False, False, False, False, False]}, {\"id\": \"three\", \"niceToHaveRequirements\": [True,", "== 'Digital Outcomes and Specialists 4' assert framework['slug'] == 'digital-outcomes-and-specialists-4'", "True]}, {\"id\": \"zero\", \"niceToHaveRequirements\": [False, False, False, False, False]}, {\"id\":", "True, True]}, {\"id\": \"zero\", \"niceToHaveRequirements\": [False, False, False, False, False]},", "\"live\" assert framework['name'] == 'Digital Outcomes and Specialists 4' assert", "1, \"niceToHaveRequirements\": []} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {\"id\": \"five\"},", "True, False]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False, False, False, True, False]},", "= {\"id\": 1, \"niceToHaveRequirements\": []} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [", "test_is_brief_correct(self, framework, lot, user, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response()", "BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allow_withdrawn=allow_withdrawn", "[True, True, True, True, False]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False, False,", "'not-digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'digital-specialists', 124, False), ] ) def", "('live', False, True), ] ) def test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn, result):", "False]}, ] } brief = {\"id\": 1, \"niceToHaveRequirements\": [\"Nice\", \"to\",", "pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, allowed_statuses=['live'], ) def test_get_framework_and_lot_404s_if_allows_brief_required(self): data_api_client", "framework['slug'] == 'digital-outcomes-and-specialists-4' assert framework['clarificationQuestionsOpen'] is True assert lot ==", "content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} ) sections_with_required_questions = {", "False), ('live', True, True), ('live', False, True), ] ) def", "@pytest.mark.parametrize( ['status', 'allow_withdrawn', 'result'], [ ('withdrawn', True, True), ('withdrawn', False,", "must_allow_brief=True, ) @pytest.mark.parametrize( ['framework', 'lot', 'user', 'result'], [ ('digital-outcomes-and-specialists-4', 'digital-specialists',", "@pytest.mark.parametrize( ['framework', 'lot', 'user', 'result'], [ ('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True),", "True assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False def test_brief_is_withdrawn(self): assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is", "= BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct(brief, framework, lot, user) is", "'digital-specialists', data_api_client, must_allow_brief=True, ) @pytest.mark.parametrize( ['framework', 'lot', 'user', 'result'], [", "test_get_framework_and_lot_404s_for_wrong_framework_status(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='open', lots=[", "'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, allowed_statuses=['live'], ) def test_get_framework_and_lot_404s_if_allows_brief_required(self): data_api_client = mock.Mock()", "== 2 def test_add_unanswered_counts_to_briefs(self): briefs = [{ 'status': 'draft', 'frameworkSlug':", "lot, user) is result @pytest.mark.parametrize( ['status', 'allow_withdrawn', 'result'], [ ('withdrawn',", "LotStub(slug='digital-specialists', allows_brief=True).response() ] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client,", "'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allowed_statuses=allowed_statuses ) is result def test_is_brief_associated_with_user(self): brief", "= mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='open', lots=[ LotStub(slug='digital-specialists', allows_brief=True).response()", "\"niceToHaveRequirements\": [True, True, True, True, True]}, {\"id\": \"zero\", \"niceToHaveRequirements\": [False,", "'closed'], True), (['closed'], False) ] ) def test_is_brief_correct_allowed_statuses(self, allowed_statuses, result):", "True]}, {'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]}, {'id':", "mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\": [ {\"id\": \"five\", \"niceToHaveRequirements\": [True,", "} brief = {\"id\": 1, \"niceToHaveRequirements\": [\"Nice\", \"to\", \"have\", \"yes\",", "sections = content.summary(brief) unanswered_required, unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections) assert unanswered_required ==", "data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\": [ {\"id\": \"five\"},", "'digital-specialists', data_api_client, allowed_statuses=['live'], ) def test_get_framework_and_lot_404s_if_allows_brief_required(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value", "helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {'id': 'five', 'niceToHaveRequirements': [True, True, True,", "False, False, False]} ] def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value", "assert unanswered_optional == 2 def test_add_unanswered_counts_to_briefs(self): briefs = [{ 'status':", "BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allowed_statuses=allowed_statuses", "helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client) assert framework['status'] == \"live\" assert framework['name'] ==", "brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allow_withdrawn=allow_withdrawn ) is result @pytest.mark.parametrize( 'allowed_statuses,", "brief = BriefStub(user_id=123).response() assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is True assert helpers.buyers_helpers.is_brief_associated_with_user(brief,", "\"yes\", \"please\"]} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {'id': 'five', 'niceToHaveRequirements':", "'three', 'niceToHaveRequirements': [True, True, False, False, True]}, {\"id\": \"one\", \"niceToHaveRequirements\":", "allows_brief=False).response() ] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, must_allow_brief=True,", "'digital-specialists', 'required1': True }] assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) == [{ 'status':", "= [{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True", "helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is True assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is False def", "helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, must_allow_brief=True, ) @pytest.mark.parametrize( ['framework', 'lot', 'user',", "data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='open', lots=[ LotStub(slug='digital-specialists',", "'allowed_statuses, result', [ (['live', 'closed'], True), (['closed'], False) ] )", "def test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response()", "unanswered_required, unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections) assert unanswered_required == 2 assert unanswered_optional", "True]}, {'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]}, {'id':", "'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, must_allow_brief=True, ) @pytest.mark.parametrize( ['framework', 'lot', 'user', 'result'],", "'result'], [ ('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True), ('not-digital-outcomes-and-specialists', 'digital-specialists', 123, False),", "= mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[provided_lot], ).single_result_response() framework,", "}] assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) == [{ 'status': 'draft', 'frameworkSlug': 'dos',", "= content_loader.get_manifest('dos', 'edit_brief') class TestBuyersHelpers(object): def test_get_framework_and_lot(self): provided_lot = LotStub(slug='digital-specialists',", "is True assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is False def test_brief_can_be_edited(self): assert", "sections_with_required_questions = { 'section-1': True, 'section-2': True, 'section-4': False, 'section-5':", "allowed_statuses, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct( brief,", "True assert lot == provided_lot def test_get_framework_and_lot_404s_for_wrong_framework_status(self): data_api_client = mock.Mock()", "def test_brief_can_be_edited(self): assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False", "content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} ) sections = content.summary(brief) unanswered_required, unanswered_optional", "\"please\"]} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {'id': 'five', 'niceToHaveRequirements': [True,", "assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False def test_section_has_at_least_one_required_question(self): content = content_loader.get_manifest('dos', 'edit_brief').filter(", "content_loader) == [{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1':", "def test_get_framework_and_lot_404s_for_wrong_framework_status(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='open',", "slug='digital-outcomes-and-specialists-4', status='open', lots=[ LotStub(slug='digital-specialists', allows_brief=True).response() ] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot(", "[\"Nice\", \"to\", \"have\", \"yes\", \"please\"]} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [", "assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is False def test_brief_can_be_edited(self): assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is", "= helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client) assert framework['status'] == \"live\" assert framework['name']", "True assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is False def test_brief_can_be_edited(self): assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response())", "'digital-specialists', 123, allow_withdrawn=allow_withdrawn ) is result @pytest.mark.parametrize( 'allowed_statuses, result', [", "is True assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False def test_brief_is_withdrawn(self): assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response())", "'four', 'niceToHaveRequirements': [True, True, True, True, False]}, {'id': 'three', 'niceToHaveRequirements':", "('withdrawn', False, False), ('live', True, True), ('live', False, True), ]", "assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) == [{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug':", "data_api_client, allowed_statuses=['live'], ) def test_get_framework_and_lot_404s_if_allows_brief_required(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value =", "BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct(brief, framework, lot, user) is result", "dmcontent.content_loader import ContentLoader from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub content_loader", ") is result def test_is_brief_associated_with_user(self): brief = BriefStub(user_id=123).response() assert helpers.buyers_helpers.is_brief_associated_with_user(brief,", "lots=[ LotStub(slug='digital-specialists', allows_brief=True).response() ] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists',", "} content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} ) sections =", "= LotStub(slug='digital-specialists', allows_brief=True).response() data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4',", "'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True, 'unanswered_required': 2, 'unanswered_optional': 2", "= BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123,", "= { \"briefResponses\": [ {\"id\": \"five\"}, {\"id\": \"zero\"}, {\"id\": \"three\"},", "False]}, {'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]}, {'id':", "\"niceToHaveRequirements\": [\"Nice\", \"to\", \"have\", \"yes\", \"please\"]} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) ==", "helpers.buyers_helpers.section_has_at_least_one_required_question(section) \\ == sections_with_required_questions[section.slug] def test_count_unanswered_questions(self): brief = { 'status':", "True, True, False]}, {'id': 'four', 'niceToHaveRequirements': [True, True, True, True,", "True, False]}, {'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]},", "def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\": [", "assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False def test_brief_is_withdrawn(self): assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True", "'lotSlug': 'digital-specialists', 'required1': True, 'unanswered_required': 2, 'unanswered_optional': 2 }] def", "{\"id\": \"three\"}, {\"id\": \"five\"} ] } brief = {\"id\": 1,", "} for section in content.sections: assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \\ == sections_with_required_questions[section.slug]", "True, 'section-2': True, 'section-4': False, 'section-5': True } for section", "allows_brief=True).response() data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[provided_lot],", "provided_lot def test_get_framework_and_lot_404s_for_wrong_framework_status(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4',", "\"zero\"}, {\"id\": \"three\"}, {\"id\": \"five\"} ] } brief = {\"id\":", "{\"id\": \"one\", \"niceToHaveRequirements\": [False, False, False, True, False]}, {'id': 'zero',", "with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, must_allow_brief=True, ) @pytest.mark.parametrize( ['framework',", "123, False), ('digital-outcomes-and-specialists-4', 'digital-specialists', 124, False), ] ) def test_is_brief_correct(self,", "True, True), ('withdrawn', False, False), ('live', True, True), ('live', False,", "False]}, {\"id\": \"three\", \"niceToHaveRequirements\": [True, True, False, False, True]}, {\"id\":", "BriefStub, FrameworkStub, LotStub content_loader = ContentLoader('tests/fixtures/content') content_loader.load_manifest('dos', 'data', 'edit_brief') questions_builder", "assert framework['name'] == 'Digital Outcomes and Specialists 4' assert framework['slug']", "helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allow_withdrawn=allow_withdrawn ) is result @pytest.mark.parametrize(", "[ {\"id\": \"five\", \"niceToHaveRequirements\": [True, True, True, True, True]}, {\"id\":", "True]}, {\"id\": \"five\", \"niceToHaveRequirements\": [True, True, True, True, True]}, {\"id\":", "sections_with_required_questions[section.slug] def test_count_unanswered_questions(self): brief = { 'status': 'draft', 'frameworkSlug': 'dos',", "data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[provided_lot], ).single_result_response() framework, lot =", "\"niceToHaveRequirements\": [True, True, False, False, True]}, {\"id\": \"five\", \"niceToHaveRequirements\": [True,", "NotFound import app.main.helpers as helpers from dmcontent.content_loader import ContentLoader from", "\"niceToHaveRequirements\": [True, True, True, True, True]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True,", "test_count_unanswered_questions(self): brief = { 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists',", "\"niceToHaveRequirements\": [False, False, False, True, False]}, {'id': 'zero', 'niceToHaveRequirements': [False,", "\"to\", \"have\", \"yes\", \"please\"]} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {'id':", "= content.summary(brief) unanswered_required, unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections) assert unanswered_required == 2", "False, False, True, False]}, {'id': 'zero', 'niceToHaveRequirements': [False, False, False,", "def test_section_has_at_least_one_required_question(self): content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} ) sections_with_required_questions", "unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections) assert unanswered_required == 2 assert unanswered_optional ==", "2, 'unanswered_optional': 2 }] def test_get_sorted_responses_for_brief(self): data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value", "helpers from dmcontent.content_loader import ContentLoader from dmtestutils.api_model_stubs import BriefStub, FrameworkStub,", "LotStub(slug='digital-specialists', allows_brief=False).response() ] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client,", "= FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[ LotStub(slug='digital-specialists', allows_brief=False).response() ] ).single_result_response() with", "[False, False, False, False, False]} ] def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self): data_api_client =", "test_is_brief_correct_allowed_statuses(self, allowed_statuses, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct(", "{\"id\": \"zero\", \"niceToHaveRequirements\": [False, False, False, False, False]}, {\"id\": \"three\",", "[False, False, False, True, False]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True, True,", "assert framework['status'] == \"live\" assert framework['name'] == 'Digital Outcomes and", ") def test_is_brief_correct(self, framework, lot, user, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4',", "FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[provided_lot], ).single_result_response() framework, lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4', 'digital-specialists',", "'dos', 'lotSlug': 'digital-specialists', 'required1': True, 'unanswered_required': 2, 'unanswered_optional': 2 }]", "1, \"niceToHaveRequirements\": [\"Nice\", \"to\", \"have\", \"yes\", \"please\"]} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client)", "{'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]}, {'id': 'three',", "[True, True, True, True, False]}, {'id': 'three', 'niceToHaveRequirements': [True, True,", "is False def test_section_has_at_least_one_required_question(self): content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'}", "True), (['closed'], False) ] ) def test_is_brief_correct_allowed_statuses(self, allowed_statuses, result): brief", "result @pytest.mark.parametrize( ['status', 'allow_withdrawn', 'result'], [ ('withdrawn', True, True), ('withdrawn',", "{'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]}, {'id': 'five',", "mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\": [ {\"id\": \"five\"}, {\"id\": \"zero\"},", "123) is True assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is False def test_brief_can_be_edited(self):", "\"five\"} ] } brief = {\"id\": 1, \"niceToHaveRequirements\": []} assert", "['framework', 'lot', 'user', 'result'], [ ('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True), ('not-digital-outcomes-and-specialists',", "[True, True, True, True, False]}, ] } brief = {\"id\":", "= content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} ) sections = content.summary(brief) unanswered_required,", "import NotFound import app.main.helpers as helpers from dmcontent.content_loader import ContentLoader", "framework, lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client) assert framework['status'] == \"live\"", "lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client) assert framework['status'] == \"live\" assert", "'lotSlug': 'digital-specialists', 'required1': True }] assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) == [{", "] } brief = {\"id\": 1, \"niceToHaveRequirements\": [\"Nice\", \"to\", \"have\",", "pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client, must_allow_brief=True, ) @pytest.mark.parametrize( ['framework', 'lot',", "False, False, False]}, {\"id\": \"three\", \"niceToHaveRequirements\": [True, True, False, False,", "\"niceToHaveRequirements\": []} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {\"id\": \"five\"}, {\"id\":", "{'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]}, {'id': 'four',", "= content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} ) sections_with_required_questions = { 'section-1':", "is True assert lot == provided_lot def test_get_framework_and_lot_404s_for_wrong_framework_status(self): data_api_client =", "framework['status'] == \"live\" assert framework['name'] == 'Digital Outcomes and Specialists", "(['closed'], False) ] ) def test_is_brief_correct_allowed_statuses(self, allowed_statuses, result): brief =", "data_api_client.find_brief_responses.return_value = { \"briefResponses\": [ {\"id\": \"five\"}, {\"id\": \"zero\"}, {\"id\":", "True, True, True, False]}, {\"id\": \"one\", \"niceToHaveRequirements\": [False, False, False,", "= ContentLoader('tests/fixtures/content') content_loader.load_manifest('dos', 'data', 'edit_brief') questions_builder = content_loader.get_manifest('dos', 'edit_brief') class", "False, False), ('live', True, True), ('live', False, True), ] )", "= { 'section-1': True, 'section-2': True, 'section-4': False, 'section-5': True", "content_loader.get_manifest('dos', 'edit_brief') class TestBuyersHelpers(object): def test_get_framework_and_lot(self): provided_lot = LotStub(slug='digital-specialists', allows_brief=True).response()", "import ContentLoader from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub content_loader =", "content_loader.load_manifest('dos', 'data', 'edit_brief') questions_builder = content_loader.get_manifest('dos', 'edit_brief') class TestBuyersHelpers(object): def", "'required1': True, 'unanswered_required': 2, 'unanswered_optional': 2 }] def test_get_sorted_responses_for_brief(self): data_api_client", "{\"id\": \"five\"} ] } brief = {\"id\": 1, \"niceToHaveRequirements\": []}", "import mock import pytest from werkzeug.exceptions import NotFound import app.main.helpers", "= BriefStub(user_id=123).response() assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is True assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234)", "'result'], [ ('withdrawn', True, True), ('withdrawn', False, False), ('live', True,", "allow_withdrawn, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response() assert helpers.buyers_helpers.is_brief_correct( brief,", "[ {'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]}, {'id':", "\"five\"}, {\"id\": \"zero\"}, {\"id\": \"three\"}, {\"id\": \"five\"} ] } brief", "4' assert framework['slug'] == 'digital-outcomes-and-specialists-4' assert framework['clarificationQuestionsOpen'] is True assert", "helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is False def test_brief_can_be_edited(self): assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True", "'five', 'niceToHaveRequirements': [True, True, True, True, True]}, {'id': 'four', 'niceToHaveRequirements':", "{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True }", "('not-digital-outcomes-and-specialists', 'digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'digital-specialists',", "True, True, False]}, ] } brief = {\"id\": 1, \"niceToHaveRequirements\":", "True, False, False, True]}, {\"id\": \"five\", \"niceToHaveRequirements\": [True, True, True,", "status, allow_withdrawn, result): brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response() assert helpers.buyers_helpers.is_brief_correct(", "def test_add_unanswered_counts_to_briefs(self): briefs = [{ 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug':", "result', [ (['live', 'closed'], True), (['closed'], False) ] ) def", "True, False]}, {'id': 'three', 'niceToHaveRequirements': [True, True, False, False, True]},", "2 def test_add_unanswered_counts_to_briefs(self): briefs = [{ 'status': 'draft', 'frameworkSlug': 'dos',", "{\"id\": 1, \"niceToHaveRequirements\": []} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {\"id\":", "False def test_section_has_at_least_one_required_question(self): content = content_loader.get_manifest('dos', 'edit_brief').filter( {'lot': 'digital-specialists'} )", "assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allowed_statuses=allowed_statuses ) is result", "status='open', lots=[ LotStub(slug='digital-specialists', allows_brief=True).response() ] ).single_result_response() with pytest.raises(NotFound): helpers.buyers_helpers.get_framework_and_lot( 'digital-outcomes-and-specialists-4',", "'digital-specialists', 124, False), ] ) def test_is_brief_correct(self, framework, lot, user,", "'digital-specialists', data_api_client) assert framework['status'] == \"live\" assert framework['name'] == 'Digital", "def test_count_unanswered_questions(self): brief = { 'status': 'draft', 'frameworkSlug': 'dos', 'lotSlug':", "False, True, False]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True, True, True, True,", "mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[provided_lot], ).single_result_response() framework, lot", "(['live', 'closed'], True), (['closed'], False) ] ) def test_is_brief_correct_allowed_statuses(self, allowed_statuses,", "brief = {\"id\": 1, \"niceToHaveRequirements\": [\"Nice\", \"to\", \"have\", \"yes\", \"please\"]}", "True } for section in content.sections: assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \\ ==", "123, True), ('not-digital-outcomes-and-specialists', 'digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123, False),", "== [ {\"id\": \"five\"}, {\"id\": \"zero\"}, {\"id\": \"three\"}, {\"id\": \"five\"}", "def test_get_framework_and_lot_404s_if_allows_brief_required(self): data_api_client = mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live',", "False]}, {'id': 'zero', 'niceToHaveRequirements': [False, False, False, False, False]} ]", "assert unanswered_required == 2 assert unanswered_optional == 2 def test_add_unanswered_counts_to_briefs(self):", "data_api_client, must_allow_brief=True, ) @pytest.mark.parametrize( ['framework', 'lot', 'user', 'result'], [ ('digital-outcomes-and-specialists-4',", "Outcomes and Specialists 4' assert framework['slug'] == 'digital-outcomes-and-specialists-4' assert framework['clarificationQuestionsOpen']", "'allow_withdrawn', 'result'], [ ('withdrawn', True, True), ('withdrawn', False, False), ('live',", "helpers.buyers_helpers.count_unanswered_questions(sections) assert unanswered_required == 2 assert unanswered_optional == 2 def", "helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {\"id\": \"five\"}, {\"id\": \"zero\"}, {\"id\": \"three\"},", "= mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[ LotStub(slug='digital-specialists', allows_brief=False).response()", "'section-2': True, 'section-4': False, 'section-5': True } for section in", "('live', True, True), ('live', False, True), ] ) def test_if_brief_correct_allow_withdrawn(self,", "234) is False def test_brief_can_be_edited(self): assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True assert", "mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='open', lots=[ LotStub(slug='digital-specialists', allows_brief=True).response() ]", "True assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False def test_section_has_at_least_one_required_question(self): content = content_loader.get_manifest('dos',", "{'id': 'three', 'niceToHaveRequirements': [True, True, False, False, True]}, {\"id\": \"one\",", "False, False]}, {\"id\": \"three\", \"niceToHaveRequirements\": [True, True, False, False, True]},", "True, True), ('live', False, True), ] ) def test_if_brief_correct_allow_withdrawn(self, status,", "True), ('live', False, True), ] ) def test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn,", "brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists',", "{ \"briefResponses\": [ {\"id\": \"five\"}, {\"id\": \"zero\"}, {\"id\": \"three\"}, {\"id\":", "True, False]}, {\"id\": \"four\", \"niceToHaveRequirements\": [True, True, True, True, False]},", "for section in content.sections: assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \\ == sections_with_required_questions[section.slug] def", ").single_result_response() framework, lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4', 'digital-specialists', data_api_client) assert framework['status'] ==", "data_api_client = mock.Mock() data_api_client.find_brief_responses.return_value = { \"briefResponses\": [ {\"id\": \"five\",", "'frameworkSlug': 'dos', 'lotSlug': 'digital-specialists', 'required1': True }] assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader)", "helpers.buyers_helpers.is_brief_correct( brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allowed_statuses=allowed_statuses ) is result def", "True, True, True, True]}, {'id': 'five', 'niceToHaveRequirements': [True, True, True,", "== \"live\" assert framework['name'] == 'Digital Outcomes and Specialists 4'", "{'lot': 'digital-specialists'} ) sections = content.summary(brief) unanswered_required, unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections)", "assert framework['slug'] == 'digital-outcomes-and-specialists-4' assert framework['clarificationQuestionsOpen'] is True assert lot", "('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123, False), ('digital-outcomes-and-specialists-4', 'digital-specialists', 124, False), ] )", "\"briefResponses\": [ {\"id\": \"five\"}, {\"id\": \"zero\"}, {\"id\": \"three\"}, {\"id\": \"five\"}", "brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response() assert helpers.buyers_helpers.is_brief_correct(brief, framework, lot, user)", "\"have\", \"yes\", \"please\"]} assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [ {'id': 'five',", "mock.Mock() data_api_client.get_framework.return_value = FrameworkStub( slug='digital-outcomes-and-specialists-4', status='live', lots=[ LotStub(slug='digital-specialists', allows_brief=False).response() ]", "content.sections: assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \\ == sections_with_required_questions[section.slug] def test_count_unanswered_questions(self): brief =" ]
[ "valuesD = [27.8, 55.3, 16.8, 17.1, 23.1] valuesI = [27,", "epBy = plt.errorBarXYBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\",", "+ 5\", \"XLow = X -1\", \"XHigh = X +", "\"X\", \"Y\").show() pBy = plt.plotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show() pBy", "\"X\", \"XLow\", \"XHigh\", \"Y\", \"USym\").show() ep3 = plt.errorBarY(\"S1\", t, \"X\",", "= plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() cp3 = plt.catPlot(\"S1\", t, \"X\",", "= plt.catPlot3d(\"S1\", t, \"X\", \"X\", \"Y\").show() cpBy = plt.catPlotBy(\"S1\", t,", "plt.figure().newChart(0)\\ .chartTitle(\"Chart Title\")\\ .newAxes()\\ .xLabel(\"X\")\\ .yLabel(\"Y\")\\ .ohlcPlotBy(\"Test1\", t, \"Time\", \"Open\",", "valuesD).show() aep3 = plt.errorBarY(\"S1\", valuesD, valuesD, valuesD, valuesD).show() hp =", "p3 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() p4 = plt.plot(\"S1\", t,", "\"X\", 5).show() hp = plt.histPlot(\"S1\", t, \"X\", 0, 10, 5).show()", "17.1, 23.1] valuesI = [27, 55, 16, 17, 15] ap", "plt.plot(\"S1\", valuesD, valuesI).show() ap = plt.plot3d(\"S1\", valuesI, valuesI, valuesI).show() acp", "\"Y\", \"YLow\", \"YHigh\").show() epBy = plt.errorBarXYBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\",", "ap = plt.plot(\"S1\", valuesD, valuesI).show() ap = plt.plot3d(\"S1\", valuesI, valuesI,", "import deephaven.TableTools as tt import deephaven.Plot as plt t =", "+ 1\", \"USym = i % 2 == 0 ?", "\"X\", \"Y\").show() cpBy = plt.catPlotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show() cpBy", "\"B\", \"A\", \"B\"]), tt.doubleCol(\"Open\", doubles), tt.doubleCol(\"High\", doubles), tt.doubleCol(\"Low\", doubles), tt.doubleCol(\"Close\",", "\"XHigh\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show() ep2 = plt.errorBarX(\"S1\", t, \"X\",", "tt.emptyTable(50)\\ .update(\"X = i + 5\", \"XLow = X -1\",", "= plt.piePlot(\"S1\", t, \"X\", \"Y\") chp = plt.catHistPlot(\"S1\", t, \"X\").show()", "doubles), tt.doubleCol(\"High\", doubles), tt.doubleCol(\"Low\", doubles), tt.doubleCol(\"Close\", doubles)) t = t.updateView(\"Time", "new DBDateTime(time + (MINUTE * i))\") ohlc = plt.ohlcPlot(\"Test1\", t,", "= plt.plot(\"S1\", valuesD, valuesI).show() ap = plt.plot3d(\"S1\", valuesI, valuesI, valuesI).show()", "= plt.plot3d(\"S1\", valuesI, valuesI, valuesI).show() acp = plt.catPlot(\"S1\", categories, valuesI).show()", "plt.errorBarY(\"S1\", valuesD, valuesD, valuesD, valuesD).show() hp = plt.histPlot(\"S1\", valuesD, 5).show()", "\"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() p4 = plt.plot3d(\"S1\", t, \"X\", \"X\", \"Y\").show() pBy", "\"B\"]), tt.doubleCol(\"Open\", doubles), tt.doubleCol(\"High\", doubles), tt.doubleCol(\"Low\", doubles), tt.doubleCol(\"Close\", doubles)) t", "valuesD, valuesD, valuesD, valuesD).show() hp = plt.histPlot(\"S1\", valuesD, 5).show() hp", ".ohlcPlotBy(\"Test1\", t, \"Time\", \"Open\", \"High\", \"Low\", \"Close\", \"USym\") categories =", "16, 17, 15] ap = plt.plot(\"S1\", valuesD, valuesI).show() ap =", "valuesD, valuesD, valuesD).show() aep2 = plt.errorBarX(\"S1\", valuesD, valuesD, valuesD, valuesD).show()", "cp = plt.catPlot3d(\"S1\", t, \"X\", \"X\", \"Y\").show() cpBy = plt.catPlotBy(\"S1\",", "5).show() hp = plt.histPlot(\"S1\", valuesD, 0, 10, 5).show() hp =", "= plt.errorBarX(\"S1\", valuesD, valuesD, valuesD, valuesD).show() aep3 = plt.errorBarY(\"S1\", valuesD,", "acp2 = plt.catPlot3d(\"S1\", categories, categories, valuesD).show() achp = plt.catHistPlot(\"S1\", categories).show()", "\"X\", \"Y\", \"USym\").show() pp = plt.piePlot(\"S1\", t, \"X\", \"Y\") chp", "0, 10, 5).show() ep = plt.errorBarXY(\"S1\", t, \"X\", \"XLow\", \"XHigh\",", "2 == 0 ? `AAPL` : `MSFT`\") p = plt.plot(\"S1\",", "ep3 = plt.errorBarY(\"S1\", t, \"X\", \"Y\", \"YLow\", \"YHigh\").show() epBy3 =", "\"Close\") ohlcPlotBy = plt.figure().newChart(0)\\ .chartTitle(\"Chart Title\")\\ .newAxes()\\ .xLabel(\"X\")\\ .yLabel(\"Y\")\\ .ohlcPlotBy(\"Test1\",", "== 0 ? `AAPL` : `MSFT`\") p = plt.plot(\"S1\", t,", "= plt.errorBarX(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\").show() epBy2 = plt.errorBarXBy(\"S1\",", "categories = [\"Samsung\", \"Others\", \"Nokia\", \"Apple\", \"MSFT\"] valuesD = [27.8,", "\"Y\").lineColor(\"black\").show() cp2 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() cp3 = plt.catPlot(\"S1\",", "\"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() p3 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() p4 =", "= plt.plot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() p2 = plt.plot(\"S1\", t, \"X\",", "t.updateView(\"Time = new DBDateTime(time + (MINUTE * i))\") ohlc =", "plt.figure().xLabel(\"X\").yLabel(\"Y\").piePlot(\"S1\", categories, valuesI).pointLabelFormat(\"{0}\").show() aep = plt.errorBarXY(\"S1\", valuesD, valuesD, valuesD, valuesD,", "1\", \"Y = Math.random() * 5\", \"YLow = Y -", "= plt.errorBarXY(\"S1\", valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show() aep2 =", "= plt.plot3dBy(\"S1\", t, \"X\", \"X\", \"Y\", \"USym\").show() cp = plt.catPlot(\"S1\",", "55, 16, 17, 15] ap = plt.plot(\"S1\", valuesD, valuesI).show() ap", "valuesD).show() aep2 = plt.errorBarX(\"S1\", valuesD, valuesD, valuesD, valuesD).show() aep3 =", "= [\"Samsung\", \"Others\", \"Nokia\", \"Apple\", \"MSFT\"] valuesD = [27.8, 55.3,", "\"Y\", \"USym\").show() cpBy = plt.catPlot3dBy(\"S1\", t, \"X\", \"X\", \"Y\", \"USym\").show()", "plt.plot3dBy(\"S1\", t, \"X\", \"X\", \"Y\", \"USym\").show() cp = plt.catPlot(\"S1\", t,", "\"Y\") chp = plt.catHistPlot(\"S1\", t, \"X\").show() hp = plt.histPlot(\"S1\", t,", "\"MSFT\"] valuesD = [27.8, 55.3, 16.8, 17.1, 23.1] valuesI =", "acp = plt.catPlot(\"S1\", categories, valuesI).show() acp2 = plt.catPlot3d(\"S1\", categories, categories,", "\"USym\").show() ep3 = plt.errorBarY(\"S1\", t, \"X\", \"Y\", \"YLow\", \"YHigh\").show() epBy3", "aep = plt.errorBarXY(\"S1\", valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show() aep2", "\"X\", \"X\", \"Y\", \"USym\").show() pp = plt.piePlot(\"S1\", t, \"X\", \"Y\")", "plt.plot3d(\"S1\", valuesI, valuesI, valuesI).show() acp = plt.catPlot(\"S1\", categories, valuesI).show() acp2", "plt.errorBarXBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"USym\").show() ep3 = plt.errorBarY(\"S1\",", "= plt.catPlotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show() cpBy = plt.catPlot3dBy(\"S1\", t,", "\"YHigh = Y + 1\", \"USym = i % 2", "= plt.errorBarY(\"S1\", t, \"X\", \"Y\", \"YLow\", \"YHigh\").show() epBy3 = plt.errorBarYBy(\"S1\",", "hp = plt.histPlot(\"S1\", valuesD, 5).show() hp = plt.histPlot(\"S1\", valuesD, 0,", "\"Y\").lineColor(\"black\").show() p2 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() p3 = plt.plot(\"S1\",", "ohlc = plt.ohlcPlot(\"Test1\", t, \"Time\", \"Open\", \"High\", \"Low\", \"Close\") ohlcPlotBy", "plt.catHistPlot(\"S1\", categories).show() app = plt.figure().xLabel(\"X\").yLabel(\"Y\").piePlot(\"S1\", categories, valuesI).pointLabelFormat(\"{0}\").show() aep = plt.errorBarXY(\"S1\",", "t, \"Time\", \"Open\", \"High\", \"Low\", \"Close\", \"USym\") categories = [\"Samsung\",", "1\", \"USym = i % 2 == 0 ? `AAPL`", "\"X\", \"Y\", \"USym\").show() cpBy = plt.catPlot3dBy(\"S1\", t, \"X\", \"X\", \"Y\",", "DBDateTime(time + (MINUTE * i))\") ohlc = plt.ohlcPlot(\"Test1\", t, \"Time\",", "\"X\", \"Y\").lineColor(\"black\").show() cp2 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() cp3 =", "cpBy = plt.catPlot3dBy(\"S1\", t, \"X\", \"X\", \"Y\", \"USym\").show() pp =", "\"YLow\", \"YHigh\", \"USym\").show() ep2 = plt.errorBarX(\"S1\", t, \"X\", \"XLow\", \"XHigh\",", "17, 15] ap = plt.plot(\"S1\", valuesD, valuesI).show() ap = plt.plot3d(\"S1\",", "plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() p4 = plt.plot3d(\"S1\", t, \"X\", \"X\",", "\"Open\", \"High\", \"Low\", \"Close\") ohlcPlotBy = plt.figure().newChart(0)\\ .chartTitle(\"Chart Title\")\\ .newAxes()\\", "plt.catPlot3d(\"S1\", categories, categories, valuesD).show() achp = plt.catHistPlot(\"S1\", categories).show() app =", "(MINUTE * i))\") ohlc = plt.ohlcPlot(\"Test1\", t, \"Time\", \"Open\", \"High\",", "= plt.plotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show() pBy = plt.plot3dBy(\"S1\", t,", "\"YLow = Y - 1\", \"YHigh = Y + 1\",", "\"X\", \"Y\", \"YLow\", \"YHigh\").show() epBy3 = plt.errorBarYBy(\"S1\", t, \"X\", \"Y\",", "cp3 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() cp4 = plt.catPlot(\"S1\", t,", "\"YHigh\").show() epBy3 = plt.errorBarYBy(\"S1\", t, \"X\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show()", "= plt.histPlot(\"S1\", valuesD, 5).show() hp = plt.histPlot(\"S1\", valuesD, 0, 10,", "\"XHigh\", \"Y\", \"YLow\", \"YHigh\").show() epBy = plt.errorBarXYBy(\"S1\", t, \"X\", \"XLow\",", "t, \"X\", \"Y\", \"USym\").show() cpBy = plt.catPlot3dBy(\"S1\", t, \"X\", \"X\",", "\"USym\").show() ep2 = plt.errorBarX(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\").show() epBy2", "\"Y\", \"USym\").show() ep3 = plt.errorBarY(\"S1\", t, \"X\", \"Y\", \"YLow\", \"YHigh\").show()", "achp = plt.catHistPlot(\"S1\", categories).show() app = plt.figure().xLabel(\"X\").yLabel(\"Y\").piePlot(\"S1\", categories, valuesI).pointLabelFormat(\"{0}\").show() aep", "\"X\", \"Y\", \"USym\").show() pBy = plt.plot3dBy(\"S1\", t, \"X\", \"X\", \"Y\",", "\"YLow\", \"YHigh\").show() epBy3 = plt.errorBarYBy(\"S1\", t, \"X\", \"Y\", \"YLow\", \"YHigh\",", "t = tt.newTable(tt.col(\"USym\", [\"A\", \"B\", \"A\", \"B\", \"A\", \"B\"]), tt.doubleCol(\"Open\",", "tt.doubleCol(\"Low\", doubles), tt.doubleCol(\"Close\", doubles)) t = t.updateView(\"Time = new DBDateTime(time", "\"USym\").show() pBy = plt.plot3dBy(\"S1\", t, \"X\", \"X\", \"Y\", \"USym\").show() cp", "doubles)) t = t.updateView(\"Time = new DBDateTime(time + (MINUTE *", "= plt.ohlcPlot(\"Test1\", t, \"Time\", \"Open\", \"High\", \"Low\", \"Close\") ohlcPlotBy =", "plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() cp = plt.catPlot3d(\"S1\", t, \"X\", \"X\",", "= plt.catPlot3d(\"S1\", categories, categories, valuesD).show() achp = plt.catHistPlot(\"S1\", categories).show() app", "= tt.emptyTable(50)\\ .update(\"X = i + 5\", \"XLow = X", "plt.errorBarX(\"S1\", valuesD, valuesD, valuesD, valuesD).show() aep3 = plt.errorBarY(\"S1\", valuesD, valuesD,", "\"YHigh\", \"USym\").show() doubles = [3, 4, 3, 5, 4, 5]", "\"X\", \"X\", \"Y\", \"USym\").show() cp = plt.catPlot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show()", "= plt.errorBarXBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"USym\").show() ep3 =", "\"Y\", \"USym\").show() pp = plt.piePlot(\"S1\", t, \"X\", \"Y\") chp =", "t, \"X\", 0, 10, 5).show() ep = plt.errorBarXY(\"S1\", t, \"X\",", "= plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() cp = plt.catPlot3d(\"S1\", t, \"X\",", "i))\") ohlc = plt.ohlcPlot(\"Test1\", t, \"Time\", \"Open\", \"High\", \"Low\", \"Close\")", "t, \"X\", \"X\", \"Y\").show() cpBy = plt.catPlotBy(\"S1\", t, \"X\", \"Y\",", "plt.errorBarY(\"S1\", t, \"X\", \"Y\", \"YLow\", \"YHigh\").show() epBy3 = plt.errorBarYBy(\"S1\", t,", "valuesI).show() acp2 = plt.catPlot3d(\"S1\", categories, categories, valuesD).show() achp = plt.catHistPlot(\"S1\",", "\"YHigh\", \"USym\").show() ep2 = plt.errorBarX(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\").show()", ".update(\"X = i + 5\", \"XLow = X -1\", \"XHigh", "* i))\") ohlc = plt.ohlcPlot(\"Test1\", t, \"Time\", \"Open\", \"High\", \"Low\",", "categories, valuesI).show() acp2 = plt.catPlot3d(\"S1\", categories, categories, valuesD).show() achp =", "plt.histPlot(\"S1\", t, \"X\", 5).show() hp = plt.histPlot(\"S1\", t, \"X\", 0,", "= tt.newTable(tt.col(\"USym\", [\"A\", \"B\", \"A\", \"B\", \"A\", \"B\"]), tt.doubleCol(\"Open\", doubles),", "\"A\", \"B\", \"A\", \"B\"]), tt.doubleCol(\"Open\", doubles), tt.doubleCol(\"High\", doubles), tt.doubleCol(\"Low\", doubles),", "= plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() p4 = plt.plot3d(\"S1\", t, \"X\",", "ep = plt.errorBarXY(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\").show()", "\"Y\").plotStyle(\"bar\").gradientVisible(True).show() cp3 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() cp4 = plt.catPlot(\"S1\",", "valuesD, valuesD, valuesD, valuesD).show() aep2 = plt.errorBarX(\"S1\", valuesD, valuesD, valuesD,", "valuesD, valuesD, valuesD).show() aep3 = plt.errorBarY(\"S1\", valuesD, valuesD, valuesD, valuesD).show()", "[\"A\", \"B\", \"A\", \"B\", \"A\", \"B\"]), tt.doubleCol(\"Open\", doubles), tt.doubleCol(\"High\", doubles),", "= [27.8, 55.3, 16.8, 17.1, 23.1] valuesI = [27, 55,", ".xLabel(\"X\")\\ .yLabel(\"Y\")\\ .ohlcPlotBy(\"Test1\", t, \"Time\", \"Open\", \"High\", \"Low\", \"Close\", \"USym\")", "\"XLow\", \"XHigh\", \"Y\", \"USym\").show() ep3 = plt.errorBarY(\"S1\", t, \"X\", \"Y\",", "doubles = [3, 4, 3, 5, 4, 5] time =", "cp4 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() cp = plt.catPlot3d(\"S1\", t,", "[27, 55, 16, 17, 15] ap = plt.plot(\"S1\", valuesD, valuesI).show()", "5).show() ep = plt.errorBarXY(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\",", "valuesI = [27, 55, 16, 17, 15] ap = plt.plot(\"S1\",", "plt.errorBarXY(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\").show() epBy =", "1\", \"YHigh = Y + 1\", \"USym = i %", "= plt.figure().xLabel(\"X\").yLabel(\"Y\").piePlot(\"S1\", categories, valuesI).pointLabelFormat(\"{0}\").show() aep = plt.errorBarXY(\"S1\", valuesD, valuesD, valuesD,", "- 1\", \"YHigh = Y + 1\", \"USym = i", "5\", \"XLow = X -1\", \"XHigh = X + 1\",", "plt.catPlotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show() cpBy = plt.catPlot3dBy(\"S1\", t, \"X\",", "\"Apple\", \"MSFT\"] valuesD = [27.8, 55.3, 16.8, 17.1, 23.1] valuesI", "tt.doubleCol(\"Close\", doubles)) t = t.updateView(\"Time = new DBDateTime(time + (MINUTE", "hp = plt.histPlot(\"S1\", t, \"X\", 0, 10, 5).show() ep =", "\"X\", \"Y\", \"USym\").show() cp = plt.catPlot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() cp2", "[\"Samsung\", \"Others\", \"Nokia\", \"Apple\", \"MSFT\"] valuesD = [27.8, 55.3, 16.8,", "ap = plt.plot3d(\"S1\", valuesI, valuesI, valuesI).show() acp = plt.catPlot(\"S1\", categories,", "t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() p4 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() p4", "\"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() cp4 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() cp =", "= Y + 1\", \"USym = i % 2 ==", "valuesD, valuesI).show() ap = plt.plot3d(\"S1\", valuesI, valuesI, valuesI).show() acp =", "[27.8, 55.3, 16.8, 17.1, 23.1] valuesI = [27, 55, 16,", "Y - 1\", \"YHigh = Y + 1\", \"USym =", "+ 1\", \"Y = Math.random() * 5\", \"YLow = Y", "p2 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() p3 = plt.plot(\"S1\", t,", "plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() cp3 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show()", "\"A\", \"B\"]), tt.doubleCol(\"Open\", doubles), tt.doubleCol(\"High\", doubles), tt.doubleCol(\"Low\", doubles), tt.doubleCol(\"Close\", doubles))", "16.8, 17.1, 23.1] valuesI = [27, 55, 16, 17, 15]", "Y + 1\", \"USym = i % 2 == 0", "= plt.histPlot(\"S1\", t, \"X\", 0, 10, 5).show() ep = plt.errorBarXY(\"S1\",", "% 2 == 0 ? `AAPL` : `MSFT`\") p =", "Title\")\\ .newAxes()\\ .xLabel(\"X\")\\ .yLabel(\"Y\")\\ .ohlcPlotBy(\"Test1\", t, \"Time\", \"Open\", \"High\", \"Low\",", "= plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() cp4 = plt.catPlot(\"S1\", t, \"X\",", "valuesD, valuesD, valuesD, valuesD).show() aep3 = plt.errorBarY(\"S1\", valuesD, valuesD, valuesD,", "valuesD, valuesD).show() hp = plt.histPlot(\"S1\", valuesD, 5).show() hp = plt.histPlot(\"S1\",", "\"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show() ep2 = plt.errorBarX(\"S1\", t,", ".newAxes()\\ .xLabel(\"X\")\\ .yLabel(\"Y\")\\ .ohlcPlotBy(\"Test1\", t, \"Time\", \"Open\", \"High\", \"Low\", \"Close\",", "t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"USym\").show() ep3 = plt.errorBarY(\"S1\", t,", "p4 = plt.plot3d(\"S1\", t, \"X\", \"X\", \"Y\").show() pBy = plt.plotBy(\"S1\",", "23.1] valuesI = [27, 55, 16, 17, 15] ap =", "t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() cp4 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() cp", "\"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() cp = plt.catPlot3d(\"S1\", t, \"X\", \"X\", \"Y\").show() cpBy", "valuesD, valuesD).show() aep2 = plt.errorBarX(\"S1\", valuesD, valuesD, valuesD, valuesD).show() aep3", "t, \"X\", 5).show() hp = plt.histPlot(\"S1\", t, \"X\", 0, 10,", "\"Low\", \"Close\", \"USym\") categories = [\"Samsung\", \"Others\", \"Nokia\", \"Apple\", \"MSFT\"]", "10, 5).show() ep = plt.errorBarXY(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\",", "= plt.catPlot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() cp2 = plt.catPlot(\"S1\", t, \"X\",", "plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() cp4 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show()", "= plt.catHistPlot(\"S1\", categories).show() app = plt.figure().xLabel(\"X\").yLabel(\"Y\").piePlot(\"S1\", categories, valuesI).pointLabelFormat(\"{0}\").show() aep =", "\"Y\", \"YLow\", \"YHigh\", \"USym\").show() ep2 = plt.errorBarX(\"S1\", t, \"X\", \"XLow\",", "\"USym = i % 2 == 0 ? `AAPL` :", "deephaven.Plot as plt t = tt.emptyTable(50)\\ .update(\"X = i +", "= plt.histPlot(\"S1\", t, \"X\", 5).show() hp = plt.histPlot(\"S1\", t, \"X\",", "i + 5\", \"XLow = X -1\", \"XHigh = X", "app = plt.figure().xLabel(\"X\").yLabel(\"Y\").piePlot(\"S1\", categories, valuesI).pointLabelFormat(\"{0}\").show() aep = plt.errorBarXY(\"S1\", valuesD, valuesD,", "\"YHigh\").show() epBy = plt.errorBarXYBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\",", "valuesD, valuesD).show() aep3 = plt.errorBarY(\"S1\", valuesD, valuesD, valuesD, valuesD).show() hp", "\"Low\", \"Close\") ohlcPlotBy = plt.figure().newChart(0)\\ .chartTitle(\"Chart Title\")\\ .newAxes()\\ .xLabel(\"X\")\\ .yLabel(\"Y\")\\", "= [27, 55, 16, 17, 15] ap = plt.plot(\"S1\", valuesD,", "1491946585000000000 t = tt.newTable(tt.col(\"USym\", [\"A\", \"B\", \"A\", \"B\", \"A\", \"B\"]),", "valuesI).show() acp = plt.catPlot(\"S1\", categories, valuesI).show() acp2 = plt.catPlot3d(\"S1\", categories,", "\"X\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show() doubles = [3, 4, 3,", "valuesD).show() hp = plt.histPlot(\"S1\", valuesD, 5).show() hp = plt.histPlot(\"S1\", valuesD,", "5).show() hp = plt.histPlot(\"S1\", t, \"X\", 0, 10, 5).show() ep", "valuesI).pointLabelFormat(\"{0}\").show() aep = plt.errorBarXY(\"S1\", valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show()", "t, \"Time\", \"Open\", \"High\", \"Low\", \"Close\") ohlcPlotBy = plt.figure().newChart(0)\\ .chartTitle(\"Chart", "pBy = plt.plot3dBy(\"S1\", t, \"X\", \"X\", \"Y\", \"USym\").show() cp =", "\"Close\", \"USym\") categories = [\"Samsung\", \"Others\", \"Nokia\", \"Apple\", \"MSFT\"] valuesD", "t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() cp = plt.catPlot3d(\"S1\", t, \"X\", \"X\", \"Y\").show()", "\"X\", \"Y\").lineColor(\"black\").show() p2 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() p3 =", "= [3, 4, 3, 5, 4, 5] time = 1491946585000000000", "= X -1\", \"XHigh = X + 1\", \"Y =", "= plt.figure().newChart(0)\\ .chartTitle(\"Chart Title\")\\ .newAxes()\\ .xLabel(\"X\")\\ .yLabel(\"Y\")\\ .ohlcPlotBy(\"Test1\", t, \"Time\",", "cpBy = plt.catPlotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show() cpBy = plt.catPlot3dBy(\"S1\",", "deephaven.TableTools as tt import deephaven.Plot as plt t = tt.emptyTable(50)\\", "as tt import deephaven.Plot as plt t = tt.emptyTable(50)\\ .update(\"X", "\"Time\", \"Open\", \"High\", \"Low\", \"Close\", \"USym\") categories = [\"Samsung\", \"Others\",", "plt.catPlot3dBy(\"S1\", t, \"X\", \"X\", \"Y\", \"USym\").show() pp = plt.piePlot(\"S1\", t,", "valuesI, valuesI).show() acp = plt.catPlot(\"S1\", categories, valuesI).show() acp2 = plt.catPlot3d(\"S1\",", "t, \"X\").show() hp = plt.histPlot(\"S1\", t, \"X\", 5).show() hp =", "doubles), tt.doubleCol(\"Close\", doubles)) t = t.updateView(\"Time = new DBDateTime(time +", "X + 1\", \"Y = Math.random() * 5\", \"YLow =", "time = 1491946585000000000 t = tt.newTable(tt.col(\"USym\", [\"A\", \"B\", \"A\", \"B\",", "plt.histPlot(\"S1\", valuesD, 0, 10, 5).show() hp = plt.histPlot(\"S1\", valuesI, 5).show()", "\"Open\", \"High\", \"Low\", \"Close\", \"USym\") categories = [\"Samsung\", \"Others\", \"Nokia\",", "\"Y\", \"YLow\", \"YHigh\", \"USym\").show() doubles = [3, 4, 3, 5,", "plt.errorBarXYBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show() ep2", "\"High\", \"Low\", \"Close\") ohlcPlotBy = plt.figure().newChart(0)\\ .chartTitle(\"Chart Title\")\\ .newAxes()\\ .xLabel(\"X\")\\", "plt.errorBarYBy(\"S1\", t, \"X\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show() doubles = [3,", "\"USym\").show() cp = plt.catPlot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() cp2 = plt.catPlot(\"S1\",", "cp = plt.catPlot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() cp2 = plt.catPlot(\"S1\", t,", "3, 5, 4, 5] time = 1491946585000000000 t = tt.newTable(tt.col(\"USym\",", "= t.updateView(\"Time = new DBDateTime(time + (MINUTE * i))\") ohlc", "plt.plot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() p2 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show()", ".yLabel(\"Y\")\\ .ohlcPlotBy(\"Test1\", t, \"Time\", \"Open\", \"High\", \"Low\", \"Close\", \"USym\") categories", "t, \"X\", \"Y\", \"USym\").show() pBy = plt.plot3dBy(\"S1\", t, \"X\", \"X\",", "= plt.histPlot(\"S1\", valuesD, 0, 10, 5).show() hp = plt.histPlot(\"S1\", valuesI,", "\"High\", \"Low\", \"Close\", \"USym\") categories = [\"Samsung\", \"Others\", \"Nokia\", \"Apple\",", "\"USym\") categories = [\"Samsung\", \"Others\", \"Nokia\", \"Apple\", \"MSFT\"] valuesD =", "\"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show() ep2 = plt.errorBarX(\"S1\",", "as plt t = tt.emptyTable(50)\\ .update(\"X = i + 5\",", "plt.catPlot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() cp2 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show()", "aep2 = plt.errorBarX(\"S1\", valuesD, valuesD, valuesD, valuesD).show() aep3 = plt.errorBarY(\"S1\",", "t, \"X\", \"Y\").lineColor(\"black\").show() p2 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() p3", "\"Y = Math.random() * 5\", \"YLow = Y - 1\",", "\"Y\").show() pBy = plt.plotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show() pBy =", "\"Y\").show() epBy2 = plt.errorBarXBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"USym\").show()", "-1\", \"XHigh = X + 1\", \"Y = Math.random() *", "tt.doubleCol(\"Open\", doubles), tt.doubleCol(\"High\", doubles), tt.doubleCol(\"Low\", doubles), tt.doubleCol(\"Close\", doubles)) t =", "\"X\", \"X\", \"Y\").show() cpBy = plt.catPlotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show()", "Math.random() * 5\", \"YLow = Y - 1\", \"YHigh =", ": `MSFT`\") p = plt.plot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() p2 =", "\"XHigh = X + 1\", \"Y = Math.random() * 5\",", "* 5\", \"YLow = Y - 1\", \"YHigh = Y", "plt.histPlot(\"S1\", valuesD, 5).show() hp = plt.histPlot(\"S1\", valuesD, 0, 10, 5).show()", "= new DBDateTime(time + (MINUTE * i))\") ohlc = plt.ohlcPlot(\"Test1\",", "t, \"X\", \"Y\", \"YLow\", \"YHigh\").show() epBy3 = plt.errorBarYBy(\"S1\", t, \"X\",", "= Math.random() * 5\", \"YLow = Y - 1\", \"YHigh", "= plt.catHistPlot(\"S1\", t, \"X\").show() hp = plt.histPlot(\"S1\", t, \"X\", 5).show()", "tt.newTable(tt.col(\"USym\", [\"A\", \"B\", \"A\", \"B\", \"A\", \"B\"]), tt.doubleCol(\"Open\", doubles), tt.doubleCol(\"High\",", "plt t = tt.emptyTable(50)\\ .update(\"X = i + 5\", \"XLow", "\"X\", \"XLow\", \"XHigh\", \"Y\").show() epBy2 = plt.errorBarXBy(\"S1\", t, \"X\", \"XLow\",", "valuesD).show() achp = plt.catHistPlot(\"S1\", categories).show() app = plt.figure().xLabel(\"X\").yLabel(\"Y\").piePlot(\"S1\", categories, valuesI).pointLabelFormat(\"{0}\").show()", "\"X\", \"Y\") chp = plt.catHistPlot(\"S1\", t, \"X\").show() hp = plt.histPlot(\"S1\",", "= plt.catPlot(\"S1\", categories, valuesI).show() acp2 = plt.catPlot3d(\"S1\", categories, categories, valuesD).show()", "\"YLow\", \"YHigh\").show() epBy = plt.errorBarXYBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\",", "pp = plt.piePlot(\"S1\", t, \"X\", \"Y\") chp = plt.catHistPlot(\"S1\", t,", "\"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\").show() epBy = plt.errorBarXYBy(\"S1\", t,", "plt.catPlot(\"S1\", categories, valuesI).show() acp2 = plt.catPlot3d(\"S1\", categories, categories, valuesD).show() achp", "5\", \"YLow = Y - 1\", \"YHigh = Y +", "t, \"X\", \"Y\").lineColor(\"black\").show() cp2 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() cp3", "\"Y\", \"USym\").show() pBy = plt.plot3dBy(\"S1\", t, \"X\", \"X\", \"Y\", \"USym\").show()", "= plt.errorBarXYBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show()", "t, \"X\", \"X\", \"Y\").show() pBy = plt.plotBy(\"S1\", t, \"X\", \"Y\",", "\"Y\", \"YLow\", \"YHigh\").show() epBy3 = plt.errorBarYBy(\"S1\", t, \"X\", \"Y\", \"YLow\",", "\"XHigh\", \"Y\").show() epBy2 = plt.errorBarXBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\",", "cp2 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() cp3 = plt.catPlot(\"S1\", t,", "\"Time\", \"Open\", \"High\", \"Low\", \"Close\") ohlcPlotBy = plt.figure().newChart(0)\\ .chartTitle(\"Chart Title\")\\", "\"USym\").show() pp = plt.piePlot(\"S1\", t, \"X\", \"Y\") chp = plt.catHistPlot(\"S1\",", "\"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() cp3 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() cp4 =", "\"Others\", \"Nokia\", \"Apple\", \"MSFT\"] valuesD = [27.8, 55.3, 16.8, 17.1,", ".chartTitle(\"Chart Title\")\\ .newAxes()\\ .xLabel(\"X\")\\ .yLabel(\"Y\")\\ .ohlcPlotBy(\"Test1\", t, \"Time\", \"Open\", \"High\",", "\"YLow\", \"YHigh\", \"USym\").show() doubles = [3, 4, 3, 5, 4,", "\"X\").show() hp = plt.histPlot(\"S1\", t, \"X\", 5).show() hp = plt.histPlot(\"S1\",", "epBy2 = plt.errorBarXBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"USym\").show() ep3", "ohlcPlotBy = plt.figure().newChart(0)\\ .chartTitle(\"Chart Title\")\\ .newAxes()\\ .xLabel(\"X\")\\ .yLabel(\"Y\")\\ .ohlcPlotBy(\"Test1\", t,", "\"XLow = X -1\", \"XHigh = X + 1\", \"Y", "p4 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() p4 = plt.plot3d(\"S1\", t,", "= plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() p3 = plt.plot(\"S1\", t, \"X\",", "\"USym\").show() doubles = [3, 4, 3, 5, 4, 5] time", "doubles), tt.doubleCol(\"Low\", doubles), tt.doubleCol(\"Close\", doubles)) t = t.updateView(\"Time = new", "hp = plt.histPlot(\"S1\", t, \"X\", 5).show() hp = plt.histPlot(\"S1\", t,", "\"Y\").plotStyle(\"area\").seriesColor(\"red\").show() p4 = plt.plot3d(\"S1\", t, \"X\", \"X\", \"Y\").show() pBy =", "\"XLow\", \"XHigh\", \"Y\").show() epBy2 = plt.errorBarXBy(\"S1\", t, \"X\", \"XLow\", \"XHigh\",", "t, \"X\", \"XLow\", \"XHigh\", \"Y\").show() epBy2 = plt.errorBarXBy(\"S1\", t, \"X\",", "= plt.errorBarY(\"S1\", valuesD, valuesD, valuesD, valuesD).show() hp = plt.histPlot(\"S1\", valuesD,", "categories, valuesD).show() achp = plt.catHistPlot(\"S1\", categories).show() app = plt.figure().xLabel(\"X\").yLabel(\"Y\").piePlot(\"S1\", categories,", "ep2 = plt.errorBarX(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\").show() epBy2 =", "55.3, 16.8, 17.1, 23.1] valuesI = [27, 55, 16, 17,", "\"USym\").show() cpBy = plt.catPlot3dBy(\"S1\", t, \"X\", \"X\", \"Y\", \"USym\").show() pp", "\"Nokia\", \"Apple\", \"MSFT\"] valuesD = [27.8, 55.3, 16.8, 17.1, 23.1]", "\"B\", \"A\", \"B\", \"A\", \"B\"]), tt.doubleCol(\"Open\", doubles), tt.doubleCol(\"High\", doubles), tt.doubleCol(\"Low\",", "valuesI).show() ap = plt.plot3d(\"S1\", valuesI, valuesI, valuesI).show() acp = plt.catPlot(\"S1\",", "plt.errorBarX(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\").show() epBy2 = plt.errorBarXBy(\"S1\", t,", "t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show() ep2 =", "valuesD, valuesD, valuesD, valuesD, valuesD).show() aep2 = plt.errorBarX(\"S1\", valuesD, valuesD,", "t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\").show() epBy = plt.errorBarXYBy(\"S1\",", "plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() p4 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show()", "[3, 4, 3, 5, 4, 5] time = 1491946585000000000 t", "t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() p4 = plt.plot3d(\"S1\", t, \"X\", \"X\", \"Y\").show()", "\"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() p4 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() p4 = plt.plot3d(\"S1\",", "= plt.errorBarXY(\"S1\", t, \"X\", \"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\").show() epBy", "t, \"X\", \"X\", \"Y\", \"USym\").show() cp = plt.catPlot(\"S1\", t, \"X\",", "valuesD, 5).show() hp = plt.histPlot(\"S1\", valuesD, 0, 10, 5).show() hp", "= i % 2 == 0 ? `AAPL` : `MSFT`\")", "tt import deephaven.Plot as plt t = tt.emptyTable(50)\\ .update(\"X =", "import deephaven.Plot as plt t = tt.emptyTable(50)\\ .update(\"X = i", "= plt.errorBarYBy(\"S1\", t, \"X\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show() doubles =", "plt.catHistPlot(\"S1\", t, \"X\").show() hp = plt.histPlot(\"S1\", t, \"X\", 5).show() hp", "epBy3 = plt.errorBarYBy(\"S1\", t, \"X\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show() doubles", "\"Y\").plotStyle(\"area\").seriesColor(\"red\").show() cp = plt.catPlot3d(\"S1\", t, \"X\", \"X\", \"Y\").show() cpBy =", "plt.piePlot(\"S1\", t, \"X\", \"Y\") chp = plt.catHistPlot(\"S1\", t, \"X\").show() hp", "t, \"X\", \"Y\", \"YLow\", \"YHigh\", \"USym\").show() doubles = [3, 4,", "t = t.updateView(\"Time = new DBDateTime(time + (MINUTE * i))\")", "t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() p3 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() p4", "\"Y\").show() cpBy = plt.catPlotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show() cpBy =", "0 ? `AAPL` : `MSFT`\") p = plt.plot(\"S1\", t, \"X\",", "5, 4, 5] time = 1491946585000000000 t = tt.newTable(tt.col(\"USym\", [\"A\",", "= i + 5\", \"XLow = X -1\", \"XHigh =", "\"Y\").plotStyle(\"bar\").gradientVisible(True).show() p3 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() p4 = plt.plot(\"S1\",", "plt.plot3d(\"S1\", t, \"X\", \"X\", \"Y\").show() pBy = plt.plotBy(\"S1\", t, \"X\",", "\"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() cp4 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() cp = plt.catPlot3d(\"S1\",", "plt.catPlot3d(\"S1\", t, \"X\", \"X\", \"Y\").show() cpBy = plt.catPlotBy(\"S1\", t, \"X\",", "chp = plt.catHistPlot(\"S1\", t, \"X\").show() hp = plt.histPlot(\"S1\", t, \"X\",", "pBy = plt.plotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show() pBy = plt.plot3dBy(\"S1\",", "\"Y\", \"USym\").show() cp = plt.catPlot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() cp2 =", "`AAPL` : `MSFT`\") p = plt.plot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() p2", "\"X\", 0, 10, 5).show() ep = plt.errorBarXY(\"S1\", t, \"X\", \"XLow\",", "valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show() aep2 = plt.errorBarX(\"S1\", valuesD,", "\"XHigh\", \"Y\", \"USym\").show() ep3 = plt.errorBarY(\"S1\", t, \"X\", \"Y\", \"YLow\",", "plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() p3 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show()", "4, 3, 5, 4, 5] time = 1491946585000000000 t =", "4, 5] time = 1491946585000000000 t = tt.newTable(tt.col(\"USym\", [\"A\", \"B\",", "plt.ohlcPlot(\"Test1\", t, \"Time\", \"Open\", \"High\", \"Low\", \"Close\") ohlcPlotBy = plt.figure().newChart(0)\\", "t, \"X\", \"Y\") chp = plt.catHistPlot(\"S1\", t, \"X\").show() hp =", "tt.doubleCol(\"High\", doubles), tt.doubleCol(\"Low\", doubles), tt.doubleCol(\"Close\", doubles)) t = t.updateView(\"Time =", "= plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() p4 = plt.plot(\"S1\", t, \"X\",", "valuesD, valuesD, valuesD).show() hp = plt.histPlot(\"S1\", valuesD, 5).show() hp =", "= plt.catPlot3dBy(\"S1\", t, \"X\", \"X\", \"Y\", \"USym\").show() pp = plt.piePlot(\"S1\",", "plt.plotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show() pBy = plt.plot3dBy(\"S1\", t, \"X\",", "= 1491946585000000000 t = tt.newTable(tt.col(\"USym\", [\"A\", \"B\", \"A\", \"B\", \"A\",", "categories, categories, valuesD).show() achp = plt.catHistPlot(\"S1\", categories).show() app = plt.figure().xLabel(\"X\").yLabel(\"Y\").piePlot(\"S1\",", "\"XLow\", \"XHigh\", \"Y\", \"YLow\", \"YHigh\").show() epBy = plt.errorBarXYBy(\"S1\", t, \"X\",", "aep3 = plt.errorBarY(\"S1\", valuesD, valuesD, valuesD, valuesD).show() hp = plt.histPlot(\"S1\",", "\"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() p4 = plt.plot(\"S1\", t, \"X\", \"Y\").plotStyle(\"area\").seriesColor(\"red\").show() p4 =", "= plt.plot3d(\"S1\", t, \"X\", \"X\", \"Y\").show() pBy = plt.plotBy(\"S1\", t,", "\"X\", \"X\", \"Y\").show() pBy = plt.plotBy(\"S1\", t, \"X\", \"Y\", \"USym\").show()", "categories, valuesI).pointLabelFormat(\"{0}\").show() aep = plt.errorBarXY(\"S1\", valuesD, valuesD, valuesD, valuesD, valuesD,", "= X + 1\", \"Y = Math.random() * 5\", \"YLow", "plt.histPlot(\"S1\", t, \"X\", 0, 10, 5).show() ep = plt.errorBarXY(\"S1\", t,", "p = plt.plot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() p2 = plt.plot(\"S1\", t,", "t, \"X\", \"X\", \"Y\", \"USym\").show() pp = plt.piePlot(\"S1\", t, \"X\",", "5] time = 1491946585000000000 t = tt.newTable(tt.col(\"USym\", [\"A\", \"B\", \"A\",", "? `AAPL` : `MSFT`\") p = plt.plot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show()", "+ (MINUTE * i))\") ohlc = plt.ohlcPlot(\"Test1\", t, \"Time\", \"Open\",", "= Y - 1\", \"YHigh = Y + 1\", \"USym", "t = tt.emptyTable(50)\\ .update(\"X = i + 5\", \"XLow =", "plt.errorBarXY(\"S1\", valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show() aep2 = plt.errorBarX(\"S1\",", "i % 2 == 0 ? `AAPL` : `MSFT`\") p", "t, \"X\", \"Y\").plotStyle(\"bar\").gradientVisible(True).show() cp3 = plt.catPlot(\"S1\", t, \"X\", \"Y\").plotStyle(\"scatter\").pointColor(\"black\").pointSize(2).show() cp4", "15] ap = plt.plot(\"S1\", valuesD, valuesI).show() ap = plt.plot3d(\"S1\", valuesI,", "hp = plt.histPlot(\"S1\", valuesD, 0, 10, 5).show() hp = plt.histPlot(\"S1\",", "categories).show() app = plt.figure().xLabel(\"X\").yLabel(\"Y\").piePlot(\"S1\", categories, valuesI).pointLabelFormat(\"{0}\").show() aep = plt.errorBarXY(\"S1\", valuesD,", "`MSFT`\") p = plt.plot(\"S1\", t, \"X\", \"Y\").lineColor(\"black\").show() p2 = plt.plot(\"S1\",", "valuesI, valuesI, valuesI).show() acp = plt.catPlot(\"S1\", categories, valuesI).show() acp2 =", "X -1\", \"XHigh = X + 1\", \"Y = Math.random()" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "import url_for import logging LOG = logging.getLogger(__name__) from rhoci.test import", "may obtain # a copy of the License at #", "# # Licensed under the Apache License, Version 2.0 (the", "current_app as app from flask import render_template from flask import", "def index(): \"\"\"All tests.\"\"\" jenkins_url = app.config['custom']['jenkins']['url'] uf = url_for('api.all_tests')", "# Copyright 2019 <NAME> # # Licensed under the Apache", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "from __future__ import absolute_import from flask import current_app as app", "distributed under the License is distributed on an \"AS IS\"", "= url_for('api.all_tests') return render_template('tests/index.html', jenkins_url=jenkins_url, uf=uf) @bp.route('/class/<class_name>/name/<name>') def test(class_name, name):", "License, Version 2.0 (the \"License\"); you may # not use", "CONDITIONS OF ANY KIND, either express or implied. See the", "\"\"\"Specific test summary.\"\"\" uf = url_for('api.test_to_jobs', class_name=class_name, test_name=name) return render_template('tests/test_to_jobs.html',", "obtain # a copy of the License at # #", "absolute_import from flask import current_app as app from flask import", "applicable law or agreed to in writing, software # distributed", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "2019 <NAME> # # Licensed under the Apache License, Version", "Version 2.0 (the \"License\"); you may # not use this", "specific language governing permissions and limitations # under the License.", "def test(class_name, name): \"\"\"Specific test summary.\"\"\" uf = url_for('api.test_to_jobs', class_name=class_name,", "Copyright 2019 <NAME> # # Licensed under the Apache License,", "render_template from flask import url_for import logging LOG = logging.getLogger(__name__)", "# not use this file except in compliance with the", "not use this file except in compliance with the License.", "OF ANY KIND, either express or implied. See the #", "from flask import current_app as app from flask import render_template", "render_template('tests/index.html', jenkins_url=jenkins_url, uf=uf) @bp.route('/class/<class_name>/name/<name>') def test(class_name, name): \"\"\"Specific test summary.\"\"\"", "writing, software # distributed under the License is distributed on", "jenkins_url=jenkins_url, uf=uf) @bp.route('/class/<class_name>/name/<name>') def test(class_name, name): \"\"\"Specific test summary.\"\"\" uf", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "limitations # under the License. from __future__ import absolute_import from", "noqa @bp.route('/index') @bp.route('/') def index(): \"\"\"All tests.\"\"\" jenkins_url = app.config['custom']['jenkins']['url']", "app from flask import render_template from flask import url_for import", "test(class_name, name): \"\"\"Specific test summary.\"\"\" uf = url_for('api.test_to_jobs', class_name=class_name, test_name=name)", "in compliance with the License. You may obtain # a", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "License for the specific language governing permissions and limitations #", "index(): \"\"\"All tests.\"\"\" jenkins_url = app.config['custom']['jenkins']['url'] uf = url_for('api.all_tests') return", "the License. from __future__ import absolute_import from flask import current_app", "jenkins_url = app.config['custom']['jenkins']['url'] uf = url_for('api.all_tests') return render_template('tests/index.html', jenkins_url=jenkins_url, uf=uf)", "test summary.\"\"\" uf = url_for('api.test_to_jobs', class_name=class_name, test_name=name) return render_template('tests/test_to_jobs.html', uf=uf)", "import render_template from flask import url_for import logging LOG =", "the License. You may obtain # a copy of the", "url_for import logging LOG = logging.getLogger(__name__) from rhoci.test import bp", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "use this file except in compliance with the License. You", "logging.getLogger(__name__) from rhoci.test import bp # noqa @bp.route('/index') @bp.route('/') def", "You may obtain # a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "as app from flask import render_template from flask import url_for", "name): \"\"\"Specific test summary.\"\"\" uf = url_for('api.test_to_jobs', class_name=class_name, test_name=name) return", "import current_app as app from flask import render_template from flask", "from rhoci.test import bp # noqa @bp.route('/index') @bp.route('/') def index():", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "either express or implied. See the # License for the", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "may # not use this file except in compliance with", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "with the License. You may obtain # a copy of", "KIND, either express or implied. See the # License for", "# License for the specific language governing permissions and limitations", "logging LOG = logging.getLogger(__name__) from rhoci.test import bp # noqa", "you may # not use this file except in compliance", "\"License\"); you may # not use this file except in", "\"\"\"All tests.\"\"\" jenkins_url = app.config['custom']['jenkins']['url'] uf = url_for('api.all_tests') return render_template('tests/index.html',", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "express or implied. See the # License for the specific", "this file except in compliance with the License. You may", "language governing permissions and limitations # under the License. from", "bp # noqa @bp.route('/index') @bp.route('/') def index(): \"\"\"All tests.\"\"\" jenkins_url", "LOG = logging.getLogger(__name__) from rhoci.test import bp # noqa @bp.route('/index')", "compliance with the License. You may obtain # a copy", "app.config['custom']['jenkins']['url'] uf = url_for('api.all_tests') return render_template('tests/index.html', jenkins_url=jenkins_url, uf=uf) @bp.route('/class/<class_name>/name/<name>') def", "the Apache License, Version 2.0 (the \"License\"); you may #", "governing permissions and limitations # under the License. from __future__", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= logging.getLogger(__name__) from rhoci.test import bp # noqa @bp.route('/index') @bp.route('/')", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "See the # License for the specific language governing permissions", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "and limitations # under the License. from __future__ import absolute_import", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "the # License for the specific language governing permissions and", "License. from __future__ import absolute_import from flask import current_app as", "@bp.route('/') def index(): \"\"\"All tests.\"\"\" jenkins_url = app.config['custom']['jenkins']['url'] uf =", "uf=uf) @bp.route('/class/<class_name>/name/<name>') def test(class_name, name): \"\"\"Specific test summary.\"\"\" uf =", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "# # Unless required by applicable law or agreed to", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "file except in compliance with the License. You may obtain", "tests.\"\"\" jenkins_url = app.config['custom']['jenkins']['url'] uf = url_for('api.all_tests') return render_template('tests/index.html', jenkins_url=jenkins_url,", "for the specific language governing permissions and limitations # under", "law or agreed to in writing, software # distributed under", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "import logging LOG = logging.getLogger(__name__) from rhoci.test import bp #", "# noqa @bp.route('/index') @bp.route('/') def index(): \"\"\"All tests.\"\"\" jenkins_url =", "under the Apache License, Version 2.0 (the \"License\"); you may", "except in compliance with the License. You may obtain #", "2.0 (the \"License\"); you may # not use this file", "implied. See the # License for the specific language governing", "permissions and limitations # under the License. from __future__ import", "flask import url_for import logging LOG = logging.getLogger(__name__) from rhoci.test", "flask import current_app as app from flask import render_template from", "rhoci.test import bp # noqa @bp.route('/index') @bp.route('/') def index(): \"\"\"All", "License. You may obtain # a copy of the License", "@bp.route('/class/<class_name>/name/<name>') def test(class_name, name): \"\"\"Specific test summary.\"\"\" uf = url_for('api.test_to_jobs',", "from flask import render_template from flask import url_for import logging", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "ANY KIND, either express or implied. See the # License", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "from flask import url_for import logging LOG = logging.getLogger(__name__) from", "@bp.route('/index') @bp.route('/') def index(): \"\"\"All tests.\"\"\" jenkins_url = app.config['custom']['jenkins']['url'] uf", "# Unless required by applicable law or agreed to in", "under the License. from __future__ import absolute_import from flask import", "= app.config['custom']['jenkins']['url'] uf = url_for('api.all_tests') return render_template('tests/index.html', jenkins_url=jenkins_url, uf=uf) @bp.route('/class/<class_name>/name/<name>')", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "to in writing, software # distributed under the License is", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "url_for('api.all_tests') return render_template('tests/index.html', jenkins_url=jenkins_url, uf=uf) @bp.route('/class/<class_name>/name/<name>') def test(class_name, name): \"\"\"Specific", "# under the License. from __future__ import absolute_import from flask", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "import absolute_import from flask import current_app as app from flask", "return render_template('tests/index.html', jenkins_url=jenkins_url, uf=uf) @bp.route('/class/<class_name>/name/<name>') def test(class_name, name): \"\"\"Specific test", "or agreed to in writing, software # distributed under the", "import bp # noqa @bp.route('/index') @bp.route('/') def index(): \"\"\"All tests.\"\"\"", "flask import render_template from flask import url_for import logging LOG", "required by applicable law or agreed to in writing, software", "__future__ import absolute_import from flask import current_app as app from", "<NAME> # # Licensed under the Apache License, Version 2.0", "uf = url_for('api.all_tests') return render_template('tests/index.html', jenkins_url=jenkins_url, uf=uf) @bp.route('/class/<class_name>/name/<name>') def test(class_name,", "or implied. See the # License for the specific language", "Apache License, Version 2.0 (the \"License\"); you may # not" ]
[ ".assemble import ( assemble_request, assemble_request_head, assemble_response, assemble_response_head, assemble_body, ) __all__", "connection_close, expected_http_body_size, validate_headers, ) from .assemble import ( assemble_request, assemble_request_head,", "from .assemble import ( assemble_request, assemble_request_head, assemble_response, assemble_response_head, assemble_body, )", "read_request_head, read_response_head, connection_close, expected_http_body_size, validate_headers, ) from .assemble import (", "[ \"read_request_head\", \"read_response_head\", \"connection_close\", \"expected_http_body_size\", \"validate_headers\", \"assemble_request\", \"assemble_request_head\", \"assemble_response\", \"assemble_response_head\",", "= [ \"read_request_head\", \"read_response_head\", \"connection_close\", \"expected_http_body_size\", \"validate_headers\", \"assemble_request\", \"assemble_request_head\", \"assemble_response\",", "__all__ = [ \"read_request_head\", \"read_response_head\", \"connection_close\", \"expected_http_body_size\", \"validate_headers\", \"assemble_request\", \"assemble_request_head\",", "assemble_response_head, assemble_body, ) __all__ = [ \"read_request_head\", \"read_response_head\", \"connection_close\", \"expected_http_body_size\",", "validate_headers, ) from .assemble import ( assemble_request, assemble_request_head, assemble_response, assemble_response_head,", "assemble_request, assemble_request_head, assemble_response, assemble_response_head, assemble_body, ) __all__ = [ \"read_request_head\",", ") __all__ = [ \"read_request_head\", \"read_response_head\", \"connection_close\", \"expected_http_body_size\", \"validate_headers\", \"assemble_request\",", "\"read_request_head\", \"read_response_head\", \"connection_close\", \"expected_http_body_size\", \"validate_headers\", \"assemble_request\", \"assemble_request_head\", \"assemble_response\", \"assemble_response_head\", \"assemble_body\",", "( read_request_head, read_response_head, connection_close, expected_http_body_size, validate_headers, ) from .assemble import", "import ( assemble_request, assemble_request_head, assemble_response, assemble_response_head, assemble_body, ) __all__ =", "import ( read_request_head, read_response_head, connection_close, expected_http_body_size, validate_headers, ) from .assemble", "read_response_head, connection_close, expected_http_body_size, validate_headers, ) from .assemble import ( assemble_request,", "\"read_response_head\", \"connection_close\", \"expected_http_body_size\", \"validate_headers\", \"assemble_request\", \"assemble_request_head\", \"assemble_response\", \"assemble_response_head\", \"assemble_body\", ]", ".read import ( read_request_head, read_response_head, connection_close, expected_http_body_size, validate_headers, ) from", "assemble_body, ) __all__ = [ \"read_request_head\", \"read_response_head\", \"connection_close\", \"expected_http_body_size\", \"validate_headers\",", "from .read import ( read_request_head, read_response_head, connection_close, expected_http_body_size, validate_headers, )", "( assemble_request, assemble_request_head, assemble_response, assemble_response_head, assemble_body, ) __all__ = [", "expected_http_body_size, validate_headers, ) from .assemble import ( assemble_request, assemble_request_head, assemble_response,", "assemble_request_head, assemble_response, assemble_response_head, assemble_body, ) __all__ = [ \"read_request_head\", \"read_response_head\",", ") from .assemble import ( assemble_request, assemble_request_head, assemble_response, assemble_response_head, assemble_body,", "assemble_response, assemble_response_head, assemble_body, ) __all__ = [ \"read_request_head\", \"read_response_head\", \"connection_close\"," ]
[ "of error raised.', max_length=50)), ('error_message', models.CharField(help_text='The error message supplied.', max_length=200)),", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "max_length=50)), ('error_message', models.CharField(help_text='The error message supplied.', max_length=200)), ('log', models.OneToOneField(help_text='The token", "('token', models.ForeignKey(help_text='The RequestToken that was used.', on_delete=django.db.models.deletion.CASCADE, related_name='errors', to='request_token.RequestToken')), ],", "] operations = [ migrations.CreateModel( name='RequestTokenErrorLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "Django 1.10 on 2017-05-21 19:33 from __future__ import unicode_literals from", "verbose_name='ID')), ('error_type', models.CharField(help_text='The underlying type of error raised.', max_length=50)), ('error_message',", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('request_token',", "= [ migrations.CreateModel( name='RequestTokenErrorLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('error_type', models.CharField(help_text='The underlying type", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('error_type', models.CharField(help_text='The underlying type of error", "models.OneToOneField(help_text='The token use against which the error occurred.', on_delete=django.db.models.deletion.CASCADE, related_name='error',", "type of error raised.', max_length=50)), ('error_message', models.CharField(help_text='The error message supplied.',", "on_delete=django.db.models.deletion.CASCADE, related_name='error', to='request_token.RequestTokenLog')), ('token', models.ForeignKey(help_text='The RequestToken that was used.', on_delete=django.db.models.deletion.CASCADE,", "dependencies = [ ('request_token', '0008_convert_token_data_to_jsonfield'), ] operations = [ migrations.CreateModel(", "error occurred.', on_delete=django.db.models.deletion.CASCADE, related_name='error', to='request_token.RequestTokenLog')), ('token', models.ForeignKey(help_text='The RequestToken that was", "related_name='error', to='request_token.RequestTokenLog')), ('token', models.ForeignKey(help_text='The RequestToken that was used.', on_delete=django.db.models.deletion.CASCADE, related_name='errors',", "migrations.CreateModel( name='RequestTokenErrorLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('error_type', models.CharField(help_text='The", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('request_token', '0008_convert_token_data_to_jsonfield'),", "on 2017-05-21 19:33 from __future__ import unicode_literals from django.db import", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('request_token', '0008_convert_token_data_to_jsonfield'), ]", "supplied.', max_length=200)), ('log', models.OneToOneField(help_text='The token use against which the error", "[ migrations.CreateModel( name='RequestTokenErrorLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('error_type',", "name='RequestTokenErrorLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('error_type', models.CharField(help_text='The underlying", "('log', models.OneToOneField(help_text='The token use against which the error occurred.', on_delete=django.db.models.deletion.CASCADE,", "the error occurred.', on_delete=django.db.models.deletion.CASCADE, related_name='error', to='request_token.RequestTokenLog')), ('token', models.ForeignKey(help_text='The RequestToken that", "# -*- coding: utf-8 -*- # Generated by Django 1.10", "'0008_convert_token_data_to_jsonfield'), ] operations = [ migrations.CreateModel( name='RequestTokenErrorLog', fields=[ ('id', models.AutoField(auto_created=True,", "underlying type of error raised.', max_length=50)), ('error_message', models.CharField(help_text='The error message", "by Django 1.10 on 2017-05-21 19:33 from __future__ import unicode_literals", "utf-8 -*- # Generated by Django 1.10 on 2017-05-21 19:33", "Migration(migrations.Migration): dependencies = [ ('request_token', '0008_convert_token_data_to_jsonfield'), ] operations = [", "models.CharField(help_text='The error message supplied.', max_length=200)), ('log', models.OneToOneField(help_text='The token use against", "19:33 from __future__ import unicode_literals from django.db import migrations, models", "use against which the error occurred.', on_delete=django.db.models.deletion.CASCADE, related_name='error', to='request_token.RequestTokenLog')), ('token',", "-*- coding: utf-8 -*- # Generated by Django 1.10 on", "against which the error occurred.', on_delete=django.db.models.deletion.CASCADE, related_name='error', to='request_token.RequestTokenLog')), ('token', models.ForeignKey(help_text='The", "serialize=False, verbose_name='ID')), ('error_type', models.CharField(help_text='The underlying type of error raised.', max_length=50)),", "RequestToken that was used.', on_delete=django.db.models.deletion.CASCADE, related_name='errors', to='request_token.RequestToken')), ], ), ]", "models.CharField(help_text='The underlying type of error raised.', max_length=50)), ('error_message', models.CharField(help_text='The error", "('error_message', models.CharField(help_text='The error message supplied.', max_length=200)), ('log', models.OneToOneField(help_text='The token use", "class Migration(migrations.Migration): dependencies = [ ('request_token', '0008_convert_token_data_to_jsonfield'), ] operations =", "max_length=200)), ('log', models.OneToOneField(help_text='The token use against which the error occurred.',", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "from __future__ import unicode_literals from django.db import migrations, models import", "operations = [ migrations.CreateModel( name='RequestTokenErrorLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "primary_key=True, serialize=False, verbose_name='ID')), ('error_type', models.CharField(help_text='The underlying type of error raised.',", "error message supplied.', max_length=200)), ('log', models.OneToOneField(help_text='The token use against which", "to='request_token.RequestTokenLog')), ('token', models.ForeignKey(help_text='The RequestToken that was used.', on_delete=django.db.models.deletion.CASCADE, related_name='errors', to='request_token.RequestToken')),", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "('request_token', '0008_convert_token_data_to_jsonfield'), ] operations = [ migrations.CreateModel( name='RequestTokenErrorLog', fields=[ ('id',", "error raised.', max_length=50)), ('error_message', models.CharField(help_text='The error message supplied.', max_length=200)), ('log',", "<filename>request_token/migrations/0009_requesttokenerror.py # -*- coding: utf-8 -*- # Generated by Django", "which the error occurred.', on_delete=django.db.models.deletion.CASCADE, related_name='error', to='request_token.RequestTokenLog')), ('token', models.ForeignKey(help_text='The RequestToken", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('error_type', models.CharField(help_text='The underlying type of", "1.10 on 2017-05-21 19:33 from __future__ import unicode_literals from django.db", "2017-05-21 19:33 from __future__ import unicode_literals from django.db import migrations,", "__future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion", "token use against which the error occurred.', on_delete=django.db.models.deletion.CASCADE, related_name='error', to='request_token.RequestTokenLog')),", "# Generated by Django 1.10 on 2017-05-21 19:33 from __future__", "message supplied.', max_length=200)), ('log', models.OneToOneField(help_text='The token use against which the", "= [ ('request_token', '0008_convert_token_data_to_jsonfield'), ] operations = [ migrations.CreateModel( name='RequestTokenErrorLog',", "models.ForeignKey(help_text='The RequestToken that was used.', on_delete=django.db.models.deletion.CASCADE, related_name='errors', to='request_token.RequestToken')), ], ),", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('request_token', '0008_convert_token_data_to_jsonfield'), ] operations", "raised.', max_length=50)), ('error_message', models.CharField(help_text='The error message supplied.', max_length=200)), ('log', models.OneToOneField(help_text='The", "[ ('request_token', '0008_convert_token_data_to_jsonfield'), ] operations = [ migrations.CreateModel( name='RequestTokenErrorLog', fields=[", "unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "import unicode_literals from django.db import migrations, models import django.db.models.deletion class", "coding: utf-8 -*- # Generated by Django 1.10 on 2017-05-21", "Generated by Django 1.10 on 2017-05-21 19:33 from __future__ import", "occurred.', on_delete=django.db.models.deletion.CASCADE, related_name='error', to='request_token.RequestTokenLog')), ('token', models.ForeignKey(help_text='The RequestToken that was used.',", "('error_type', models.CharField(help_text='The underlying type of error raised.', max_length=50)), ('error_message', models.CharField(help_text='The", "-*- # Generated by Django 1.10 on 2017-05-21 19:33 from" ]
[ "100: print ('This generation has {0} babies'.format(babies)) parents, babies =", "assignment parents, babies = (1, 1) while babies < 100:", "tuple assignment parents, babies = (1, 1) while babies <", "print ('This generation has {0} babies'.format(babies)) parents, babies = (babies,", "while babies < 100: print ('This generation has {0} babies'.format(babies))", "< 100: print ('This generation has {0} babies'.format(babies)) parents, babies", "1) while babies < 100: print ('This generation has {0}", "generation has {0} babies'.format(babies)) parents, babies = (babies, parents +", "('This generation has {0} babies'.format(babies)) parents, babies = (babies, parents", "babies < 100: print ('This generation has {0} babies'.format(babies)) parents,", "Fibonacci, tuple assignment parents, babies = (1, 1) while babies", "has {0} babies'.format(babies)) parents, babies = (babies, parents + babies)", "lines: Fibonacci, tuple assignment parents, babies = (1, 1) while", "<filename>01-basic-programs/04-lines.py #4 lines: Fibonacci, tuple assignment parents, babies = (1,", "(1, 1) while babies < 100: print ('This generation has", "#4 lines: Fibonacci, tuple assignment parents, babies = (1, 1)", "parents, babies = (1, 1) while babies < 100: print", "babies = (1, 1) while babies < 100: print ('This", "= (1, 1) while babies < 100: print ('This generation" ]
[ "= controller_factory def build_controller(controller_class: _ControllerType) -> _Controller: if _controller_factory is", "typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object]) _controller_factory: typing.Optional[ControllerFactory] = None def controller(controller_class: _ControllerType)", "Component.register(controller_class) return controller_class def set_controller_factory(controller_factory: ControllerFactory) -> None: global _controller_factory", "Component _Controller = typing.TypeVar('_Controller') _ControllerType = typing.Type[_Controller] ControllerFactory = typing.NewType('ControllerFactory',", "typing.TypeVar('_Controller') _ControllerType = typing.Type[_Controller] ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object]) _controller_factory:", "controller(controller_class: _ControllerType) -> _ControllerType: Component.register(controller_class) return controller_class def set_controller_factory(controller_factory: ControllerFactory)", "def set_controller_factory(controller_factory: ControllerFactory) -> None: global _controller_factory _controller_factory = controller_factory", "= typing.Type[_Controller] ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object]) _controller_factory: typing.Optional[ControllerFactory] =", "_controller_factory: typing.Optional[ControllerFactory] = None def controller(controller_class: _ControllerType) -> _ControllerType: Component.register(controller_class)", "= typing.TypeVar('_Controller') _ControllerType = typing.Type[_Controller] ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object])", "global _controller_factory _controller_factory = controller_factory def build_controller(controller_class: _ControllerType) -> _Controller:", "-> None: global _controller_factory _controller_factory = controller_factory def build_controller(controller_class: _ControllerType)", "def controller(controller_class: _ControllerType) -> _ControllerType: Component.register(controller_class) return controller_class def set_controller_factory(controller_factory:", "import typing from .core import Component _Controller = typing.TypeVar('_Controller') _ControllerType", "_ControllerType = typing.Type[_Controller] ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object]) _controller_factory: typing.Optional[ControllerFactory]", "_ControllerType) -> _ControllerType: Component.register(controller_class) return controller_class def set_controller_factory(controller_factory: ControllerFactory) ->", "controller_class def set_controller_factory(controller_factory: ControllerFactory) -> None: global _controller_factory _controller_factory =", "set_controller_factory(controller_factory: ControllerFactory) -> None: global _controller_factory _controller_factory = controller_factory def", ".core import Component _Controller = typing.TypeVar('_Controller') _ControllerType = typing.Type[_Controller] ControllerFactory", "_controller_factory is None: return controller_class() return _controller_factory(controller_class) def get_component(controller_class: _ControllerType)", "-> _ControllerType: Component.register(controller_class) return controller_class def set_controller_factory(controller_factory: ControllerFactory) -> None:", "return controller_class def set_controller_factory(controller_factory: ControllerFactory) -> None: global _controller_factory _controller_factory", "None: global _controller_factory _controller_factory = controller_factory def build_controller(controller_class: _ControllerType) ->", "typing.Callable[[typing.Type], object]) _controller_factory: typing.Optional[ControllerFactory] = None def controller(controller_class: _ControllerType) ->", "_Controller = typing.TypeVar('_Controller') _ControllerType = typing.Type[_Controller] ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type],", "def build_controller(controller_class: _ControllerType) -> _Controller: if _controller_factory is None: return", "if _controller_factory is None: return controller_class() return _controller_factory(controller_class) def get_component(controller_class:", "= typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object]) _controller_factory: typing.Optional[ControllerFactory] = None def controller(controller_class:", "typing from .core import Component _Controller = typing.TypeVar('_Controller') _ControllerType =", "None: return controller_class() return _controller_factory(controller_class) def get_component(controller_class: _ControllerType) -> Component:", "_ControllerType) -> _Controller: if _controller_factory is None: return controller_class() return", "_controller_factory _controller_factory = controller_factory def build_controller(controller_class: _ControllerType) -> _Controller: if", "-> _Controller: if _controller_factory is None: return controller_class() return _controller_factory(controller_class)", "return controller_class() return _controller_factory(controller_class) def get_component(controller_class: _ControllerType) -> Component: return", "= None def controller(controller_class: _ControllerType) -> _ControllerType: Component.register(controller_class) return controller_class", "import Component _Controller = typing.TypeVar('_Controller') _ControllerType = typing.Type[_Controller] ControllerFactory =", "typing.Optional[ControllerFactory] = None def controller(controller_class: _ControllerType) -> _ControllerType: Component.register(controller_class) return", "object]) _controller_factory: typing.Optional[ControllerFactory] = None def controller(controller_class: _ControllerType) -> _ControllerType:", "_controller_factory = controller_factory def build_controller(controller_class: _ControllerType) -> _Controller: if _controller_factory", "ControllerFactory) -> None: global _controller_factory _controller_factory = controller_factory def build_controller(controller_class:", "controller_factory def build_controller(controller_class: _ControllerType) -> _Controller: if _controller_factory is None:", "is None: return controller_class() return _controller_factory(controller_class) def get_component(controller_class: _ControllerType) ->", "typing.Type[_Controller] ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object]) _controller_factory: typing.Optional[ControllerFactory] = None", "_ControllerType: Component.register(controller_class) return controller_class def set_controller_factory(controller_factory: ControllerFactory) -> None: global", "build_controller(controller_class: _ControllerType) -> _Controller: if _controller_factory is None: return controller_class()", "from .core import Component _Controller = typing.TypeVar('_Controller') _ControllerType = typing.Type[_Controller]", "_Controller: if _controller_factory is None: return controller_class() return _controller_factory(controller_class) def", "None def controller(controller_class: _ControllerType) -> _ControllerType: Component.register(controller_class) return controller_class def", "controller_class() return _controller_factory(controller_class) def get_component(controller_class: _ControllerType) -> Component: return Component.get_by_cls(controller_class)", "ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object]) _controller_factory: typing.Optional[ControllerFactory] = None def" ]
[ "\"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library\", ] # Current version or next version to be", "and re-exported in this file. This allows the real location", "\"@org_golang_x_tools//go/analysis/passes/printf:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shift:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/tests:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library\",", "2.0 (the \"License\"); # you may not use this file", "# See docs/go/core/rules.md#go_test for full documentation. go_test = _go_test_macro #", ") load( \"//go/private/rules:library.bzl\", _go_tool_library = \"go_tool_library\", ) load( \"//go/private/rules:nogo.bzl\", _nogo", "_GoSource # See go/providers.rst#GoPath for full documentation. GoPath = _GoPath", "_declare_toolchains go_context = _go_context go_embed_data = _go_embed_data go_sdk = _go_sdk", "\"GoLibrary\", _GoPath = \"GoPath\", _GoSDK = \"GoSDK\", _GoSource = \"GoSource\",", "= \"go_path\", ) load( \"//go/private/rules:library.bzl\", _go_tool_library = \"go_tool_library\", ) load(", "= _GoSource # See go/providers.rst#GoPath for full documentation. GoPath =", "TOOLS_NOGO = [ \"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/assign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/bools:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library\",", "_go_path = \"go_path\", ) load( \"//go/private/rules:library.bzl\", _go_tool_library = \"go_tool_library\", )", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "without notice. \"\"\" load( \"//go/private:context.bzl\", _go_context = \"go_context\", ) load(", "\"@org_golang_x_tools//go/analysis/passes/tests:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library\", ] # Current version or", "# check this to determine compatibility. RULES_GO_VERSION = \"0.30.0\" declare_toolchains", "This allows the real location of definitions to change for", "go_rule function has been removed. Use rule directly instead. See", "\"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/printf:go_default_library\",", "\"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library\", ] # Current version or next version", "are imported and re-exported in this file. This allows the", "docs/go/core/rules.md#go_binary for full documentation. go_binary = _go_binary_macro # See docs/go/core/rules.md#go_test", "for full documentation. go_path = _go_path def go_vet_test(*args, **kwargs): fail(\"The", "load( \"//go/private:providers.bzl\", _GoArchive = \"GoArchive\", _GoArchiveData = \"GoArchiveData\", _GoLibrary =", "documentation. GoArchiveData = _GoArchiveData # See go/providers.rst#GoSDK for full documentation.", "check this to determine compatibility. RULES_GO_VERSION = \"0.30.0\" declare_toolchains =", "See go/providers.rst#GoPath for full documentation. GoPath = _GoPath # See", "real location of definitions to change for easier maintenance. Definitions", "Current version or next version to be tagged. Gazelle and", "use this file except in compliance with the License. #", "load( \"//go/private/rules:nogo.bzl\", _nogo = \"nogo_wrapper\", ) # TOOLS_NOGO is a", "the License. \"\"\"Public definitions for Go rules. All public Go", "on this -- # new analyses may discover issues in", "# golang.org/x/tools/go/analysis/passes. # This is not backward compatible, so use", "reserved. # # Licensed under the Apache License, Version 2.0", "fail(\"The go_rule function has been removed. Use rule directly instead.", "\"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library\", # TODO(#2396): pass raw cgo sources to cgocall", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "License. # You may obtain a copy of the License", "\"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library\",", "go_path = _go_path def go_vet_test(*args, **kwargs): fail(\"The go_vet_test rule has", "def go_wrap_sdk(**kwargs): _moved(\"go_wrap_sdK\") def _moved(name): fail(name + \" has moved.", "existing builds. TOOLS_NOGO = [ \"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/assign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/bools:go_default_library\",", "_moved(\"go_host_sdk\") def go_local_sdk(**kwargs): _moved(\"go_local_sdk\") def go_wrap_sdk(**kwargs): _moved(\"go_wrap_sdK\") def _moved(name): fail(name", "instead. See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules\") def go_rules_dependencies(): _moved(\"go_rules_dependencies\") def go_register_toolchains(**kwargs): _moved(\"go_register_toolchains\") def", "= \"go_toolchain\", ) load( \"//go/private/rules:wrappers.bzl\", _go_binary_macro = \"go_binary_macro\", _go_library_macro =", "under the License is distributed on an \"AS IS\" BASIS,", "\"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/printf:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shift:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library\",", "_go_context = \"go_context\", ) load( \"//go/private:providers.bzl\", _GoArchive = \"GoArchive\", _GoArchiveData", "License for the specific language governing permissions and # limitations", "use caution when depending on this -- # new analyses", ") load( \"//go/private/rules:source.bzl\", _go_source = \"go_source\", ) load( \"//extras:embed_data.bzl\", _go_embed_data", "version to be tagged. Gazelle and other tools may #", "allows the real location of definitions to change for easier", "_GoArchiveData # See go/providers.rst#GoSDK for full documentation. GoSDK = _GoSDK", "and re-enable. # \"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/composite:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library\",", "public Go rules, providers, and other definitions are imported and", "pass raw cgo sources to cgocall and re-enable. # \"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library\",", "docs/go/core/rules.md#go_library for full documentation. go_library = _go_library_macro # See docs/go/core/rules.md#go_binary", "full documentation. go_binary = _go_binary_macro # See docs/go/core/rules.md#go_test for full", "moved. Please load from \" + \" @io_bazel_rules_go//go:deps.bzl instead of", "documentation. go_test = _go_test_macro # See docs/go/core/rules.md#go_test for full documentation.", "\"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library\", ] # Current version or next", "for full documentation. go_binary = _go_binary_macro # See docs/go/core/rules.md#go_test for", "\"//go/private/rules:wrappers.bzl\", _go_binary_macro = \"go_binary_macro\", _go_library_macro = \"go_library_macro\", _go_test_macro = \"go_test_macro\",", "def go_host_sdk(**kwargs): _moved(\"go_host_sdk\") def go_local_sdk(**kwargs): _moved(\"go_local_sdk\") def go_wrap_sdk(**kwargs): _moved(\"go_wrap_sdK\") def", "migrate to nogo instead, which supports vet tests.\") def go_rule(**kwargs):", "limitations under the License. \"\"\"Public definitions for Go rules. All", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "GoLibrary = _GoLibrary # See go/providers.rst#GoSource for full documentation. GoSource", "See go/providers.rst#GoArchive for full documentation. GoArchive = _GoArchive # See", "\"//go/private:go_toolchain.bzl\", _declare_toolchains = \"declare_toolchains\", _go_toolchain = \"go_toolchain\", ) load( \"//go/private/rules:wrappers.bzl\",", "_go_test_macro = \"go_test_macro\", ) load( \"//go/private/rules:source.bzl\", _go_source = \"go_source\", )", "issues in existing builds. TOOLS_NOGO = [ \"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/assign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library\",", "fail(name + \" has moved. Please load from \" +", "_go_embed_data go_sdk = _go_sdk go_tool_library = _go_tool_library go_toolchain = _go_toolchain", "go_vet_test rule has been removed. Please migrate to nogo instead,", "when depending on this -- # new analyses may discover", "GoPath = _GoPath # See go/providers.rst#GoArchive for full documentation. GoArchive", "of all analysis passes in # golang.org/x/tools/go/analysis/passes. # This is", "\"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/printf:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shift:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library\",", "def go_rule(**kwargs): fail(\"The go_rule function has been removed. Use rule", "See docs/go/core/rules.md#go_test for full documentation. go_test = _go_test_macro # See", "\"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/tests:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library\", ]", "= _go_test_macro # See docs/go/core/rules.md#go_test for full documentation. go_source =", "been removed. Use rule directly instead. See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules\") def go_rules_dependencies():", "GoSource = _GoSource # See go/providers.rst#GoPath for full documentation. GoPath", "\"\"\"Public definitions for Go rules. All public Go rules, providers,", "= \"nogo_wrapper\", ) # TOOLS_NOGO is a list of all", "private unless otherwise noted, and may change without notice. \"\"\"", "= \"GoArchive\", _GoArchiveData = \"GoArchiveData\", _GoLibrary = \"GoLibrary\", _GoPath =", "removed. Use rule directly instead. See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules\") def go_rules_dependencies(): _moved(\"go_rules_dependencies\")", "def _moved(name): fail(name + \" has moved. Please load from", "_GoLibrary = \"GoLibrary\", _GoPath = \"GoPath\", _GoSDK = \"GoSDK\", _GoSource", "for full documentation. GoArchiveData = _GoArchiveData # See go/providers.rst#GoSDK for", "Copyright 2014 The Bazel Authors. All rights reserved. # #", "in # golang.org/x/tools/go/analysis/passes. # This is not backward compatible, so", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "_GoPath = \"GoPath\", _GoSDK = \"GoSDK\", _GoSource = \"GoSource\", )", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "other tools may # check this to determine compatibility. RULES_GO_VERSION", "depending on this -- # new analyses may discover issues", "to in writing, software # distributed under the License is", "# \"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/composite:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library\",", "\"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/printf:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library\",", "All public Go rules, providers, and other definitions are imported", "# See the License for the specific language governing permissions", "go_binary = _go_binary_macro # See docs/go/core/rules.md#go_test for full documentation. go_test", "\"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library\",", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "# TOOLS_NOGO is a list of all analysis passes in", "# Copyright 2014 The Bazel Authors. All rights reserved. #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "providers, and other definitions are imported and re-exported in this", "analysis passes in # golang.org/x/tools/go/analysis/passes. # This is not backward", "with the License. # You may obtain a copy of", "full documentation. go_path = _go_path def go_vet_test(*args, **kwargs): fail(\"The go_vet_test", "instead, which supports vet tests.\") def go_rule(**kwargs): fail(\"The go_rule function", "\"go_tool_library\", ) load( \"//go/private/rules:nogo.bzl\", _nogo = \"nogo_wrapper\", ) # TOOLS_NOGO", "= _GoArchiveData # See go/providers.rst#GoSDK for full documentation. GoSDK =", "\"@org_golang_x_tools//go/analysis/passes/bools:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library\", # TODO(#2396): pass raw cgo sources to", "_go_sdk = \"go_sdk\", ) load( \"//go/private:go_toolchain.bzl\", _declare_toolchains = \"declare_toolchains\", _go_toolchain", "_moved(\"go_download_sdk\") def go_host_sdk(**kwargs): _moved(\"go_host_sdk\") def go_local_sdk(**kwargs): _moved(\"go_local_sdk\") def go_wrap_sdk(**kwargs): _moved(\"go_wrap_sdK\")", "builds. TOOLS_NOGO = [ \"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/assign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/bools:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\",", "location of definitions to change for easier maintenance. Definitions outside", "\"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/tests:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library\", ] # Current", "maintenance. Definitions outside this file are private unless otherwise noted,", "= \"go_context\", ) load( \"//go/private:providers.bzl\", _GoArchive = \"GoArchive\", _GoArchiveData =", "compliance with the License. # You may obtain a copy", "re-exported in this file. This allows the real location of", "agreed to in writing, software # distributed under the License", "go/providers.rst#GoSDK for full documentation. GoSDK = _GoSDK # See docs/go/core/rules.md#go_library", "_nogo # See go/providers.rst#GoLibrary for full documentation. GoLibrary = _GoLibrary", "next version to be tagged. Gazelle and other tools may", "distributed under the License is distributed on an \"AS IS\"", "_go_embed_data = \"go_embed_data\", ) load( \"//go/private/tools:path.bzl\", _go_path = \"go_path\", )", "\"go_toolchain\", ) load( \"//go/private/rules:wrappers.bzl\", _go_binary_macro = \"go_binary_macro\", _go_library_macro = \"go_library_macro\",", "docs/go/core/rules.md#go_test for full documentation. go_source = _go_source # See docs/go/core/rules.md#go_path", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "\"//go/private/tools:path.bzl\", _go_path = \"go_path\", ) load( \"//go/private/rules:library.bzl\", _go_tool_library = \"go_tool_library\",", "= \"GoArchiveData\", _GoLibrary = \"GoLibrary\", _GoPath = \"GoPath\", _GoSDK =", "for full documentation. GoArchive = _GoArchive # See go/providers.rst#GoArchiveData for", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "or next version to be tagged. Gazelle and other tools", "to be tagged. Gazelle and other tools may # check", "not use this file except in compliance with the License.", "go/providers.rst#GoLibrary for full documentation. GoLibrary = _GoLibrary # See go/providers.rst#GoSource", "for Go rules. All public Go rules, providers, and other", "writing, software # distributed under the License is distributed on", "other definitions are imported and re-exported in this file. This", "you may not use this file except in compliance with", "_moved(name): fail(name + \" has moved. Please load from \"", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "to cgocall and re-enable. # \"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/composite:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library\",", "def go_register_toolchains(**kwargs): _moved(\"go_register_toolchains\") def go_download_sdk(**kwargs): _moved(\"go_download_sdk\") def go_host_sdk(**kwargs): _moved(\"go_host_sdk\") def", "_GoArchiveData = \"GoArchiveData\", _GoLibrary = \"GoLibrary\", _GoPath = \"GoPath\", _GoSDK", "= _go_binary_macro # See docs/go/core/rules.md#go_test for full documentation. go_test =", "definitions for Go rules. All public Go rules, providers, and", "2014 The Bazel Authors. All rights reserved. # # Licensed", "CONDITIONS OF ANY KIND, either express or implied. # See", "governing permissions and # limitations under the License. \"\"\"Public definitions", "# This is not backward compatible, so use caution when", "permissions and # limitations under the License. \"\"\"Public definitions for", "all analysis passes in # golang.org/x/tools/go/analysis/passes. # This is not", "full documentation. GoPath = _GoPath # See go/providers.rst#GoArchive for full", "See docs/go/core/rules.md#go_binary for full documentation. go_binary = _go_binary_macro # See", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "_moved(\"go_register_toolchains\") def go_download_sdk(**kwargs): _moved(\"go_download_sdk\") def go_host_sdk(**kwargs): _moved(\"go_host_sdk\") def go_local_sdk(**kwargs): _moved(\"go_local_sdk\")", "\"GoArchive\", _GoArchiveData = \"GoArchiveData\", _GoLibrary = \"GoLibrary\", _GoPath = \"GoPath\",", "tools may # check this to determine compatibility. RULES_GO_VERSION =", "\"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/composite:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library\",", "load( \"//go/private/rules:library.bzl\", _go_tool_library = \"go_tool_library\", ) load( \"//go/private/rules:nogo.bzl\", _nogo =", "Definitions outside this file are private unless otherwise noted, and", "supports vet tests.\") def go_rule(**kwargs): fail(\"The go_rule function has been", "load( \"//extras:embed_data.bzl\", _go_embed_data = \"go_embed_data\", ) load( \"//go/private/tools:path.bzl\", _go_path =", ") load( \"//go/private/rules:nogo.bzl\", _nogo = \"nogo_wrapper\", ) # TOOLS_NOGO is", "TODO(#2396): pass raw cgo sources to cgocall and re-enable. #", "GoArchive = _GoArchive # See go/providers.rst#GoArchiveData for full documentation. GoArchiveData", "# See go/providers.rst#GoSDK for full documentation. GoSDK = _GoSDK #", "OR CONDITIONS OF ANY KIND, either express or implied. #", "docs/go/core/rules.md#go_test for full documentation. go_test = _go_test_macro # See docs/go/core/rules.md#go_test", "_go_toolchain = \"go_toolchain\", ) load( \"//go/private/rules:wrappers.bzl\", _go_binary_macro = \"go_binary_macro\", _go_library_macro", "the License is distributed on an \"AS IS\" BASIS, #", "directly instead. See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules\") def go_rules_dependencies(): _moved(\"go_rules_dependencies\") def go_register_toolchains(**kwargs): _moved(\"go_register_toolchains\")", "been removed. Please migrate to nogo instead, which supports vet", "change for easier maintenance. Definitions outside this file are private", "load( \"//go/private/rules:wrappers.bzl\", _go_binary_macro = \"go_binary_macro\", _go_library_macro = \"go_library_macro\", _go_test_macro =", "full documentation. GoArchive = _GoArchive # See go/providers.rst#GoArchiveData for full", "documentation. GoSDK = _GoSDK # See docs/go/core/rules.md#go_library for full documentation.", "Go rules, providers, and other definitions are imported and re-exported", "full documentation. GoLibrary = _GoLibrary # See go/providers.rst#GoSource for full", "RULES_GO_VERSION = \"0.30.0\" declare_toolchains = _declare_toolchains go_context = _go_context go_embed_data", "full documentation. go_test = _go_test_macro # See docs/go/core/rules.md#go_test for full", "\"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/bools:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library\", # TODO(#2396): pass raw cgo", "= \"go_library_macro\", _go_test_macro = \"go_test_macro\", ) load( \"//go/private/rules:source.bzl\", _go_source =", ") load( \"//go/private:go_toolchain.bzl\", _declare_toolchains = \"declare_toolchains\", _go_toolchain = \"go_toolchain\", )", "go/providers.rst#GoArchiveData for full documentation. GoArchiveData = _GoArchiveData # See go/providers.rst#GoSDK", "= _GoSDK # See docs/go/core/rules.md#go_library for full documentation. go_library =", "compatibility. RULES_GO_VERSION = \"0.30.0\" declare_toolchains = _declare_toolchains go_context = _go_context", "law or agreed to in writing, software # distributed under", ") load( \"//go/private/rules:sdk.bzl\", _go_sdk = \"go_sdk\", ) load( \"//go/private:go_toolchain.bzl\", _declare_toolchains", "def go_download_sdk(**kwargs): _moved(\"go_download_sdk\") def go_host_sdk(**kwargs): _moved(\"go_host_sdk\") def go_local_sdk(**kwargs): _moved(\"go_local_sdk\") def", "this file. This allows the real location of definitions to", "go_vet_test(*args, **kwargs): fail(\"The go_vet_test rule has been removed. Please migrate", "discover issues in existing builds. TOOLS_NOGO = [ \"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/assign:go_default_library\",", "definitions are imported and re-exported in this file. This allows", "\" has moved. Please load from \" + \" @io_bazel_rules_go//go:deps.bzl", "full documentation. GoSDK = _GoSDK # See docs/go/core/rules.md#go_library for full", "# See docs/go/core/rules.md#go_library for full documentation. go_library = _go_library_macro #", "# TODO(#2396): pass raw cgo sources to cgocall and re-enable.", "See go/providers.rst#GoLibrary for full documentation. GoLibrary = _GoLibrary # See", "nogo instead, which supports vet tests.\") def go_rule(**kwargs): fail(\"The go_rule", "may obtain a copy of the License at # #", "of definitions to change for easier maintenance. Definitions outside this", "_moved(\"go_wrap_sdK\") def _moved(name): fail(name + \" has moved. Please load", "= _GoArchive # See go/providers.rst#GoArchiveData for full documentation. GoArchiveData =", "= _go_toolchain nogo = _nogo # See go/providers.rst#GoLibrary for full", "\"go_source\", ) load( \"//extras:embed_data.bzl\", _go_embed_data = \"go_embed_data\", ) load( \"//go/private/tools:path.bzl\",", "= _GoPath # See go/providers.rst#GoArchive for full documentation. GoArchive =", "imported and re-exported in this file. This allows the real", "for full documentation. go_test = _go_test_macro # See docs/go/core/rules.md#go_test for", "go_rules_dependencies(): _moved(\"go_rules_dependencies\") def go_register_toolchains(**kwargs): _moved(\"go_register_toolchains\") def go_download_sdk(**kwargs): _moved(\"go_download_sdk\") def go_host_sdk(**kwargs):", "# See go/providers.rst#GoLibrary for full documentation. GoLibrary = _GoLibrary #", "documentation. go_source = _go_source # See docs/go/core/rules.md#go_path for full documentation.", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "\"@org_golang_x_tools//go/analysis/passes/composite:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library\",", "is not backward compatible, so use caution when depending on", "\"go_binary_macro\", _go_library_macro = \"go_library_macro\", _go_test_macro = \"go_test_macro\", ) load( \"//go/private/rules:source.bzl\",", "may not use this file except in compliance with the", "this to determine compatibility. RULES_GO_VERSION = \"0.30.0\" declare_toolchains = _declare_toolchains", "cgocall and re-enable. # \"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/composite:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library\",", "_go_binary_macro # See docs/go/core/rules.md#go_test for full documentation. go_test = _go_test_macro", "\"//go/private/rules:source.bzl\", _go_source = \"go_source\", ) load( \"//extras:embed_data.bzl\", _go_embed_data = \"go_embed_data\",", "See docs/go/core/rules.md#go_path for full documentation. go_path = _go_path def go_vet_test(*args,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "_GoArchive = \"GoArchive\", _GoArchiveData = \"GoArchiveData\", _GoLibrary = \"GoLibrary\", _GoPath", "language governing permissions and # limitations under the License. \"\"\"Public", "load( \"//go/private/rules:sdk.bzl\", _go_sdk = \"go_sdk\", ) load( \"//go/private:go_toolchain.bzl\", _declare_toolchains =", "\"GoSource\", ) load( \"//go/private/rules:sdk.bzl\", _go_sdk = \"go_sdk\", ) load( \"//go/private:go_toolchain.bzl\",", "\"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/printf:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shift:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library\",", ") load( \"//extras:embed_data.bzl\", _go_embed_data = \"go_embed_data\", ) load( \"//go/private/tools:path.bzl\", _go_path", "re-enable. # \"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/composite:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library\",", "\"go_path\", ) load( \"//go/private/rules:library.bzl\", _go_tool_library = \"go_tool_library\", ) load( \"//go/private/rules:nogo.bzl\",", "= \"go_sdk\", ) load( \"//go/private:go_toolchain.bzl\", _declare_toolchains = \"declare_toolchains\", _go_toolchain =", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Gazelle and other tools may # check this to determine", "\"//go/private:providers.bzl\", _GoArchive = \"GoArchive\", _GoArchiveData = \"GoArchiveData\", _GoLibrary = \"GoLibrary\",", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "is a list of all analysis passes in # golang.org/x/tools/go/analysis/passes.", "file. This allows the real location of definitions to change", "= \"go_binary_macro\", _go_library_macro = \"go_library_macro\", _go_test_macro = \"go_test_macro\", ) load(", "unless otherwise noted, and may change without notice. \"\"\" load(", "\"@org_golang_x_tools//go/analysis/passes/assign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/bools:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library\", # TODO(#2396): pass raw", "for easier maintenance. Definitions outside this file are private unless", "analyses may discover issues in existing builds. TOOLS_NOGO = [", "full documentation. go_source = _go_source # See docs/go/core/rules.md#go_path for full", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "go_toolchain = _go_toolchain nogo = _nogo # See go/providers.rst#GoLibrary for", "def go_rules_dependencies(): _moved(\"go_rules_dependencies\") def go_register_toolchains(**kwargs): _moved(\"go_register_toolchains\") def go_download_sdk(**kwargs): _moved(\"go_download_sdk\") def", "go_wrap_sdk(**kwargs): _moved(\"go_wrap_sdK\") def _moved(name): fail(name + \" has moved. Please", "declare_toolchains = _declare_toolchains go_context = _go_context go_embed_data = _go_embed_data go_sdk", "# See go/providers.rst#GoSource for full documentation. GoSource = _GoSource #", "and other definitions are imported and re-exported in this file.", "\"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/printf:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shift:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/tests:go_default_library\",", "rights reserved. # # Licensed under the Apache License, Version", "outside this file are private unless otherwise noted, and may", "go_host_sdk(**kwargs): _moved(\"go_host_sdk\") def go_local_sdk(**kwargs): _moved(\"go_local_sdk\") def go_wrap_sdk(**kwargs): _moved(\"go_wrap_sdK\") def _moved(name):", ") load( \"//go/private/tools:path.bzl\", _go_path = \"go_path\", ) load( \"//go/private/rules:library.bzl\", _go_tool_library", "go/providers.rst#GoSource for full documentation. GoSource = _GoSource # See go/providers.rst#GoPath", "rule directly instead. See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules\") def go_rules_dependencies(): _moved(\"go_rules_dependencies\") def go_register_toolchains(**kwargs):", "[ \"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/assign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/bools:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library\", # TODO(#2396):", "Use rule directly instead. See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules\") def go_rules_dependencies(): _moved(\"go_rules_dependencies\") def", "_go_path def go_vet_test(*args, **kwargs): fail(\"The go_vet_test rule has been removed.", "= _go_embed_data go_sdk = _go_sdk go_tool_library = _go_tool_library go_toolchain =", "\"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library\", # TODO(#2396): pass raw cgo sources to cgocall and", "# See go/providers.rst#GoPath for full documentation. GoPath = _GoPath #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "rule has been removed. Please migrate to nogo instead, which", "has been removed. Use rule directly instead. See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules\") def", "for full documentation. GoLibrary = _GoLibrary # See go/providers.rst#GoSource for", "to nogo instead, which supports vet tests.\") def go_rule(**kwargs): fail(\"The", "] # Current version or next version to be tagged.", "= \"GoLibrary\", _GoPath = \"GoPath\", _GoSDK = \"GoSDK\", _GoSource =", "_GoArchive # See go/providers.rst#GoArchiveData for full documentation. GoArchiveData = _GoArchiveData", "or implied. # See the License for the specific language", "\"nogo_wrapper\", ) # TOOLS_NOGO is a list of all analysis", "documentation. GoLibrary = _GoLibrary # See go/providers.rst#GoSource for full documentation.", "_go_source = \"go_source\", ) load( \"//extras:embed_data.bzl\", _go_embed_data = \"go_embed_data\", )", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "= _go_tool_library go_toolchain = _go_toolchain nogo = _nogo # See", "# See docs/go/core/rules.md#go_path for full documentation. go_path = _go_path def", "https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules\") def go_rules_dependencies(): _moved(\"go_rules_dependencies\") def go_register_toolchains(**kwargs): _moved(\"go_register_toolchains\") def go_download_sdk(**kwargs): _moved(\"go_download_sdk\")", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "_GoSDK # See docs/go/core/rules.md#go_library for full documentation. go_library = _go_library_macro", "go_tool_library = _go_tool_library go_toolchain = _go_toolchain nogo = _nogo #", "= [ \"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/assign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/bools:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library\", #", "GoArchiveData = _GoArchiveData # See go/providers.rst#GoSDK for full documentation. GoSDK", "_GoLibrary # See go/providers.rst#GoSource for full documentation. GoSource = _GoSource", "(the \"License\"); # you may not use this file except", "= \"GoSource\", ) load( \"//go/private/rules:sdk.bzl\", _go_sdk = \"go_sdk\", ) load(", "# you may not use this file except in compliance", "_declare_toolchains = \"declare_toolchains\", _go_toolchain = \"go_toolchain\", ) load( \"//go/private/rules:wrappers.bzl\", _go_binary_macro", "go/providers.rst#GoPath for full documentation. GoPath = _GoPath # See go/providers.rst#GoArchive", "_go_tool_library = \"go_tool_library\", ) load( \"//go/private/rules:nogo.bzl\", _nogo = \"nogo_wrapper\", )", "This is not backward compatible, so use caution when depending", "docs/go/core/rules.md#go_path for full documentation. go_path = _go_path def go_vet_test(*args, **kwargs):", "_go_library_macro # See docs/go/core/rules.md#go_binary for full documentation. go_binary = _go_binary_macro", "for full documentation. GoPath = _GoPath # See go/providers.rst#GoArchive for", "= \"declare_toolchains\", _go_toolchain = \"go_toolchain\", ) load( \"//go/private/rules:wrappers.bzl\", _go_binary_macro =", "definitions to change for easier maintenance. Definitions outside this file", "may change without notice. \"\"\" load( \"//go/private:context.bzl\", _go_context = \"go_context\",", "not backward compatible, so use caution when depending on this", "_go_source # See docs/go/core/rules.md#go_path for full documentation. go_path = _go_path", "under the License. \"\"\"Public definitions for Go rules. All public", "# # Unless required by applicable law or agreed to", "= \"GoPath\", _GoSDK = \"GoSDK\", _GoSource = \"GoSource\", ) load(", "go_download_sdk(**kwargs): _moved(\"go_download_sdk\") def go_host_sdk(**kwargs): _moved(\"go_host_sdk\") def go_local_sdk(**kwargs): _moved(\"go_local_sdk\") def go_wrap_sdk(**kwargs):", "go_context = _go_context go_embed_data = _go_embed_data go_sdk = _go_sdk go_tool_library", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "this -- # new analyses may discover issues in existing", "Version 2.0 (the \"License\"); # you may not use this", "sources to cgocall and re-enable. # \"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/composite:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library\",", "documentation. GoSource = _GoSource # See go/providers.rst#GoPath for full documentation.", "# See go/providers.rst#GoArchiveData for full documentation. GoArchiveData = _GoArchiveData #", "License. \"\"\"Public definitions for Go rules. All public Go rules,", "file are private unless otherwise noted, and may change without", "= \"go_embed_data\", ) load( \"//go/private/tools:path.bzl\", _go_path = \"go_path\", ) load(", "go/providers.rst#GoArchive for full documentation. GoArchive = _GoArchive # See go/providers.rst#GoArchiveData", "go_embed_data = _go_embed_data go_sdk = _go_sdk go_tool_library = _go_tool_library go_toolchain", "= _declare_toolchains go_context = _go_context go_embed_data = _go_embed_data go_sdk =", "implied. # See the License for the specific language governing", "def go_vet_test(*args, **kwargs): fail(\"The go_vet_test rule has been removed. Please", "under the Apache License, Version 2.0 (the \"License\"); # you", "Go rules. All public Go rules, providers, and other definitions", "See go/providers.rst#GoSDK for full documentation. GoSDK = _GoSDK # See", "the real location of definitions to change for easier maintenance.", "See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules\") def go_rules_dependencies(): _moved(\"go_rules_dependencies\") def go_register_toolchains(**kwargs): _moved(\"go_register_toolchains\") def go_download_sdk(**kwargs):", "# See docs/go/core/rules.md#go_test for full documentation. go_source = _go_source #", "All rights reserved. # # Licensed under the Apache License,", "by applicable law or agreed to in writing, software #", "has moved. Please load from \" + \" @io_bazel_rules_go//go:deps.bzl instead", "\"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/tests:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library\",", "= _go_source # See docs/go/core/rules.md#go_path for full documentation. go_path =", "passes in # golang.org/x/tools/go/analysis/passes. # This is not backward compatible,", "\"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/printf:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shift:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library\",", "_moved(\"go_rules_dependencies\") def go_register_toolchains(**kwargs): _moved(\"go_register_toolchains\") def go_download_sdk(**kwargs): _moved(\"go_download_sdk\") def go_host_sdk(**kwargs): _moved(\"go_host_sdk\")", "documentation. go_binary = _go_binary_macro # See docs/go/core/rules.md#go_test for full documentation.", "noted, and may change without notice. \"\"\" load( \"//go/private:context.bzl\", _go_context", "load( \"//go/private/tools:path.bzl\", _go_path = \"go_path\", ) load( \"//go/private/rules:library.bzl\", _go_tool_library =", "Authors. All rights reserved. # # Licensed under the Apache", "are private unless otherwise noted, and may change without notice.", "\"//extras:embed_data.bzl\", _go_embed_data = \"go_embed_data\", ) load( \"//go/private/tools:path.bzl\", _go_path = \"go_path\",", "load( \"//go/private/rules:source.bzl\", _go_source = \"go_source\", ) load( \"//extras:embed_data.bzl\", _go_embed_data =", "rules, providers, and other definitions are imported and re-exported in", ") load( \"//go/private/rules:wrappers.bzl\", _go_binary_macro = \"go_binary_macro\", _go_library_macro = \"go_library_macro\", _go_test_macro", "**kwargs): fail(\"The go_vet_test rule has been removed. Please migrate to", "\"//go/private/rules:sdk.bzl\", _go_sdk = \"go_sdk\", ) load( \"//go/private:go_toolchain.bzl\", _declare_toolchains = \"declare_toolchains\",", "GoSDK = _GoSDK # See docs/go/core/rules.md#go_library for full documentation. go_library", "+ \" has moved. Please load from \" + \"", "See docs/go/core/rules.md#go_test for full documentation. go_source = _go_source # See", "# limitations under the License. \"\"\"Public definitions for Go rules.", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "# See docs/go/core/rules.md#go_binary for full documentation. go_binary = _go_binary_macro #", "Unless required by applicable law or agreed to in writing,", "go_local_sdk(**kwargs): _moved(\"go_local_sdk\") def go_wrap_sdk(**kwargs): _moved(\"go_wrap_sdK\") def _moved(name): fail(name + \"", "a list of all analysis passes in # golang.org/x/tools/go/analysis/passes. #", "raw cgo sources to cgocall and re-enable. # \"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/composite:go_default_library\",", "for full documentation. GoSDK = _GoSDK # See docs/go/core/rules.md#go_library for", "-- # new analyses may discover issues in existing builds.", "= _GoLibrary # See go/providers.rst#GoSource for full documentation. GoSource =", "cgo sources to cgocall and re-enable. # \"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/composite:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library\",", "= _go_sdk go_tool_library = _go_tool_library go_toolchain = _go_toolchain nogo =", "the specific language governing permissions and # limitations under the", "go_register_toolchains(**kwargs): _moved(\"go_register_toolchains\") def go_download_sdk(**kwargs): _moved(\"go_download_sdk\") def go_host_sdk(**kwargs): _moved(\"go_host_sdk\") def go_local_sdk(**kwargs):", "full documentation. GoArchiveData = _GoArchiveData # See go/providers.rst#GoSDK for full", "this file are private unless otherwise noted, and may change", "\"\"\" load( \"//go/private:context.bzl\", _go_context = \"go_context\", ) load( \"//go/private:providers.bzl\", _GoArchive", "applicable law or agreed to in writing, software # distributed", "_go_context go_embed_data = _go_embed_data go_sdk = _go_sdk go_tool_library = _go_tool_library", "may # check this to determine compatibility. RULES_GO_VERSION = \"0.30.0\"", "Please migrate to nogo instead, which supports vet tests.\") def", "= \"go_source\", ) load( \"//extras:embed_data.bzl\", _go_embed_data = \"go_embed_data\", ) load(", "rules. All public Go rules, providers, and other definitions are", "in writing, software # distributed under the License is distributed", "= _nogo # See go/providers.rst#GoLibrary for full documentation. GoLibrary =", "tests.\") def go_rule(**kwargs): fail(\"The go_rule function has been removed. Use", "has been removed. Please migrate to nogo instead, which supports", "go_sdk = _go_sdk go_tool_library = _go_tool_library go_toolchain = _go_toolchain nogo", "Bazel Authors. All rights reserved. # # Licensed under the", "\"//go/private/rules:library.bzl\", _go_tool_library = \"go_tool_library\", ) load( \"//go/private/rules:nogo.bzl\", _nogo = \"nogo_wrapper\",", "and may change without notice. \"\"\" load( \"//go/private:context.bzl\", _go_context =", "for full documentation. go_source = _go_source # See docs/go/core/rules.md#go_path for", "\"//go/private:context.bzl\", _go_context = \"go_context\", ) load( \"//go/private:providers.bzl\", _GoArchive = \"GoArchive\",", "and # limitations under the License. \"\"\"Public definitions for Go", "\"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/bools:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library\", # TODO(#2396): pass raw cgo sources", "\"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/tests:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library\", ] # Current version", "# new analyses may discover issues in existing builds. TOOLS_NOGO", "= _go_context go_embed_data = _go_embed_data go_sdk = _go_sdk go_tool_library =", "go_test = _go_test_macro # See docs/go/core/rules.md#go_test for full documentation. go_source", "def go_local_sdk(**kwargs): _moved(\"go_local_sdk\") def go_wrap_sdk(**kwargs): _moved(\"go_wrap_sdK\") def _moved(name): fail(name +", "may discover issues in existing builds. TOOLS_NOGO = [ \"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library\",", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "\"GoArchiveData\", _GoLibrary = \"GoLibrary\", _GoPath = \"GoPath\", _GoSDK = \"GoSDK\",", "# You may obtain a copy of the License at", "_go_binary_macro = \"go_binary_macro\", _go_library_macro = \"go_library_macro\", _go_test_macro = \"go_test_macro\", )", "See docs/go/core/rules.md#go_library for full documentation. go_library = _go_library_macro # See", "\"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library\",", ") # TOOLS_NOGO is a list of all analysis passes", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= \"GoSDK\", _GoSource = \"GoSource\", ) load( \"//go/private/rules:sdk.bzl\", _go_sdk =", "to determine compatibility. RULES_GO_VERSION = \"0.30.0\" declare_toolchains = _declare_toolchains go_context", "go_source = _go_source # See docs/go/core/rules.md#go_path for full documentation. go_path", "to change for easier maintenance. Definitions outside this file are", "= \"go_tool_library\", ) load( \"//go/private/rules:nogo.bzl\", _nogo = \"nogo_wrapper\", ) #", "\"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/assign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/bools:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library\", # TODO(#2396): pass", "\"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/printf:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shift:go_default_library\",", "full documentation. GoSource = _GoSource # See go/providers.rst#GoPath for full", "_go_test_macro # See docs/go/core/rules.md#go_test for full documentation. go_source = _go_source", "new analyses may discover issues in existing builds. TOOLS_NOGO =", "\"0.30.0\" declare_toolchains = _declare_toolchains go_context = _go_context go_embed_data = _go_embed_data", "\"go_context\", ) load( \"//go/private:providers.bzl\", _GoArchive = \"GoArchive\", _GoArchiveData = \"GoArchiveData\",", "= \"go_test_macro\", ) load( \"//go/private/rules:source.bzl\", _go_source = \"go_source\", ) load(", "the License for the specific language governing permissions and #", "and other tools may # check this to determine compatibility.", "easier maintenance. Definitions outside this file are private unless otherwise", "Please load from \" + \" @io_bazel_rules_go//go:deps.bzl instead of def.bzl.\")", "_go_library_macro = \"go_library_macro\", _go_test_macro = \"go_test_macro\", ) load( \"//go/private/rules:source.bzl\", _go_source", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "notice. \"\"\" load( \"//go/private:context.bzl\", _go_context = \"go_context\", ) load( \"//go/private:providers.bzl\",", "\"GoSDK\", _GoSource = \"GoSource\", ) load( \"//go/private/rules:sdk.bzl\", _go_sdk = \"go_sdk\",", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "\"go_test_macro\", ) load( \"//go/private/rules:source.bzl\", _go_source = \"go_source\", ) load( \"//extras:embed_data.bzl\",", "list of all analysis passes in # golang.org/x/tools/go/analysis/passes. # This", "See go/providers.rst#GoSource for full documentation. GoSource = _GoSource # See", "\"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library\",", "fail(\"The go_vet_test rule has been removed. Please migrate to nogo", "_nogo = \"nogo_wrapper\", ) # TOOLS_NOGO is a list of", "documentation. go_library = _go_library_macro # See docs/go/core/rules.md#go_binary for full documentation.", "\"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shift:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/tests:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library\",", "_go_tool_library go_toolchain = _go_toolchain nogo = _nogo # See go/providers.rst#GoLibrary", "\"go_sdk\", ) load( \"//go/private:go_toolchain.bzl\", _declare_toolchains = \"declare_toolchains\", _go_toolchain = \"go_toolchain\",", "\"go_embed_data\", ) load( \"//go/private/tools:path.bzl\", _go_path = \"go_path\", ) load( \"//go/private/rules:library.bzl\",", "caution when depending on this -- # new analyses may", ") load( \"//go/private:providers.bzl\", _GoArchive = \"GoArchive\", _GoArchiveData = \"GoArchiveData\", _GoLibrary", "load( \"//go/private:go_toolchain.bzl\", _declare_toolchains = \"declare_toolchains\", _go_toolchain = \"go_toolchain\", ) load(", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "function has been removed. Use rule directly instead. See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules\")", "removed. Please migrate to nogo instead, which supports vet tests.\")", "# Current version or next version to be tagged. Gazelle", "\"//go/private/rules:nogo.bzl\", _nogo = \"nogo_wrapper\", ) # TOOLS_NOGO is a list", "= \"0.30.0\" declare_toolchains = _declare_toolchains go_context = _go_context go_embed_data =", "nogo = _nogo # See go/providers.rst#GoLibrary for full documentation. GoLibrary", "tagged. Gazelle and other tools may # check this to", "compatible, so use caution when depending on this -- #", "for full documentation. GoSource = _GoSource # See go/providers.rst#GoPath for", "go_library = _go_library_macro # See docs/go/core/rules.md#go_binary for full documentation. go_binary", "\"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library\", ] # Current version or next version to", "The Bazel Authors. All rights reserved. # # Licensed under", "which supports vet tests.\") def go_rule(**kwargs): fail(\"The go_rule function has", "load( \"//go/private:context.bzl\", _go_context = \"go_context\", ) load( \"//go/private:providers.bzl\", _GoArchive =", "for full documentation. go_library = _go_library_macro # See docs/go/core/rules.md#go_binary for", "_moved(\"go_local_sdk\") def go_wrap_sdk(**kwargs): _moved(\"go_wrap_sdK\") def _moved(name): fail(name + \" has", "change without notice. \"\"\" load( \"//go/private:context.bzl\", _go_context = \"go_context\", )", "\"@org_golang_x_tools//go/analysis/passes/shift:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/tests:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library\",", "vet tests.\") def go_rule(**kwargs): fail(\"The go_rule function has been removed.", "\"License\"); # you may not use this file except in", "in this file. This allows the real location of definitions", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "backward compatible, so use caution when depending on this --", "full documentation. go_library = _go_library_macro # See docs/go/core/rules.md#go_binary for full", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "\"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/tests:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library\", ] #", "golang.org/x/tools/go/analysis/passes. # This is not backward compatible, so use caution", "in existing builds. TOOLS_NOGO = [ \"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/assign:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library\",", "\"go_library_macro\", _go_test_macro = \"go_test_macro\", ) load( \"//go/private/rules:source.bzl\", _go_source = \"go_source\",", "_GoPath # See go/providers.rst#GoArchive for full documentation. GoArchive = _GoArchive", "be tagged. Gazelle and other tools may # check this", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "_go_toolchain nogo = _nogo # See go/providers.rst#GoLibrary for full documentation.", "= _go_path def go_vet_test(*args, **kwargs): fail(\"The go_vet_test rule has been", "# See go/providers.rst#GoArchive for full documentation. GoArchive = _GoArchive #", "_GoSource = \"GoSource\", ) load( \"//go/private/rules:sdk.bzl\", _go_sdk = \"go_sdk\", )", "documentation. GoArchive = _GoArchive # See go/providers.rst#GoArchiveData for full documentation.", "determine compatibility. RULES_GO_VERSION = \"0.30.0\" declare_toolchains = _declare_toolchains go_context =", "version or next version to be tagged. Gazelle and other", "go_rule(**kwargs): fail(\"The go_rule function has been removed. Use rule directly", "You may obtain a copy of the License at #", "otherwise noted, and may change without notice. \"\"\" load( \"//go/private:context.bzl\",", "documentation. GoPath = _GoPath # See go/providers.rst#GoArchive for full documentation.", "_go_sdk go_tool_library = _go_tool_library go_toolchain = _go_toolchain nogo = _nogo", "= _go_library_macro # See docs/go/core/rules.md#go_binary for full documentation. go_binary =", "TOOLS_NOGO is a list of all analysis passes in #", "See go/providers.rst#GoArchiveData for full documentation. GoArchiveData = _GoArchiveData # See", "\"declare_toolchains\", _go_toolchain = \"go_toolchain\", ) load( \"//go/private/rules:wrappers.bzl\", _go_binary_macro = \"go_binary_macro\",", "so use caution when depending on this -- # new", "_GoSDK = \"GoSDK\", _GoSource = \"GoSource\", ) load( \"//go/private/rules:sdk.bzl\", _go_sdk", "the Apache License, Version 2.0 (the \"License\"); # you may", "\"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/printf:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/shift:go_default_library\", \"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library\",", "documentation. go_path = _go_path def go_vet_test(*args, **kwargs): fail(\"The go_vet_test rule", "\"GoPath\", _GoSDK = \"GoSDK\", _GoSource = \"GoSource\", ) load( \"//go/private/rules:sdk.bzl\"," ]
[ "millimeters if 0.5 < rain_in_millimeters / rain_hours <= 4: if", "millimeters elif rain_in_millimeters / rain_hours > 8: if weather ==", "= 81 # גשם וסופת רוחות elif weather == 77:", "station_data = collection.getElementsByTagName('surface_station') for i, station in enumerate(station_data): station_lon =", "== 76: weather = 83 # סופת רוחות, גשם זלעפות", "the distance will always be smaller than the initial station_data", "= collection.getElementsByTagName('surface_station') for i, station in enumerate(station_data): station_lon = station.getElementsByTagName('station_lon')", "the 0es before the number if char == '0': rain.replace(char,", "amount per hour is more than 8.0 millimeters elif rain_in_millimeters", "accidents already in DB\") return 0 db.session.execute(AccidentMarker.__table__.insert(), [m for m", "python # -*- coding: utf-8 -*- import calendar import csv", "< rain_in_millimeters / rain_hours <= 8: if 76 == weather:", "weather = 15 # גשם # average rain amount per", "accident_date.month == 10 & ( winter_clock.day > accident_date.day | (", "the week (ex: the forth sunday of april 2016) #", "[m[\"id\"] for m in accidents if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id ==", "\"3\": 3, \"4\": 4, \"5\": 5, \"7\": 6, \"8\": 6,", "from the local time and in winter clock 2 hours", "float(rain) if rain_in_millimeters >= 990: # numbers that are higher", "= collection.getElementsByTagName('date_selected') station_data_in_date.sort() accident_date = accident_time_zone_adjustment(created) for station in enumerate(station_data_in_date):", "\"long\": 3, \"lat\": 4, \"city\": 5, \"street\": 6, \"comment\": 7,", "\"18\": 12, \"19\": 13, \"20\": 14, \"21\": 15, \"22\": 16,", "60, \"85\": 61, \"86\": 62, \"87\": 63, \"88\": 64, \"89\":", "weather is given in UTC time # therefore in daylight_saving_time", "( daylight_saving_time.day == accident_date.day & accident_date.hour >= 2)): accident_date.replace(hour=accident_date.hour -", "hour = time.strftime('%H') hour = int(hour) if str(created).endswith('AM') else int(hour)", "for the parsing and deployment of \"united hatzala\" data to", "8}, ] def create_accidents(collection, file_location): \"\"\" :param file_location: local location", "+ 12 break except ValueError: pass return datetime(time.year, time.month, time.day,", "\"1\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"7\": 6,", "weather_data, tag): element = weather_data[station].getElementsByTagName(tag) if element: weather_element = element[0].childNodes[0].nodeValue", "for united_file in os.listdir(united_path): if united_file.endswith(\".csv\"): total += import_to_db(collection, united_path", "25, \"38\": 25, \"39\": 25, \"40\": 26, \"41\": 27, \"42\":", "גשם זלעפות else: weather = 79 # גשם זלעפות return", "datetime import datetime import os from flask_sqlalchemy import SQLAlchemy from", "\"time\": 1, \"lat\": 2, \"long\": 3, \"street\": 4, \"city\": 6,", "file_location: local location of .csv :return: Yields a marker object", "1, \"1\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"7\":", "\"street\": 4, \"city\": 6, \"comment\": 7, \"type\": 8, \"casualties\": 9},", "hours # [ accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE) # if", "reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab) for line, accident in enumerate(reader):", "s = requests.session() r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml') xml_doc = minidom.parseString(r.text) collection", "to importing to DB \"\"\" collection = retrieve_ims_xml() if not", "== accident_date.day & accident_date.hour < 2)): accident_date.replace(hour=accident_date.hour - 1) #", "path: Local files directory ('united_path' on main() below) :return: length", "Date & Time string from csv :return: Python datetime object", "> accident_date.day | ( winter_clock.day == accident_date.day & accident_date.hour <", "new_ids]) db.session.commit() return len(new_ids) def update_db(collection): \"\"\" :return: length of", "accident.longitude) db.session.commit() logging.info(\"\\tFinished commiting the changes\") def main(light=True, username='', password='',", "None and rain_duration is not None: rain_in_millimeters = convert_xml_values_to_numbers(rain) rain_hours", "last sunday of october at 2:00 o'clock elif accident_date.month ==", "DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M',", "DB\") return 0 db.session.execute(AccidentMarker.__table__.insert(), [m for m in accidents if", "(str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour))) return adate def all_station_in_date_frame(collection, created): #", "\"33\": 7, \"34\": 7, \"35\": 7, \"36\": 25, \"37\": 25,", "created: Date & Time string from csv :return: Python datetime", "\"city\": 6, \"comment\": 7, \"type\": 8, \"casualties\": 9}, {\"id\": 0,", "# United.py is responsible for the parsing and deployment of", "temp_dis < min_distance: min_distance = temp_dis station_place_in_xml = i return", "smaller than the initial station_data = collection.getElementsByTagName('surface_station') for i, station", "weather_element = element[0].childNodes[0].nodeValue else: weather_element = None return weather_element def", "import init_flask, decode_hebrew, open_utf8 from ..import importmail from xml.dom import", "utf-8 -*- import calendar import csv from datetime import datetime", "and deployment of \"united hatzala\" data to the DB ############################################################################################", "חזקות, גשם קל else: weather = 37 # גשם קל", "in os.listdir(united_path): if united_file.endswith(\".csv\"): total += import_to_db(collection, united_path + united_file)", "hour = time.strftime('%H') hour = int(hour) else: time = datetime.strptime(str(created)[:-3],", "responsible for the parsing and deployment of \"united hatzala\" data", "header format_version = 0 if \"MissionID\" in accident[0] else 1", "\"47\": 30, \"48\": 31, \"49\": 32, \"50\": 33, \"51\": 34,", "object with every iteration \"\"\" logging.info(\"\\tReading accidents data from '%s'...\"", "while node.parentNode: node = node.parentNode if node.nodeName == \"Object\": return", "accident happend before the last sunday of october at 2:00", "= doc.createElement('accident_date') doc.appendChild(base) station_data_in_date = collection.getElementsByTagName('date_selected') station_data_in_date.sort() accident_date = accident_time_zone_adjustment(created)", "accident_date = accident_time_zone_adjustment(created) for station in enumerate(station_data_in_date): if accident_date in", "> 5: weather = 77 # רוחות חזקות if rain", "'type': CONST.MARKER_TYPE_ACCIDENT, 'description': decode_hebrew(accident[csvmap[\"comment\"]], encoding=\"utf-8\"), 'weather': process_weather_data(collection, accident[csvmap[\"lat\"]], accident[csvmap[\"long\"]])} if", "if accident_date in str(station.childNodes[0].nodeValue): base.appendChild(get_parent_object_node(station)) return base def find_station_by_coordinate(collection, latitude,", "2) for accident in united: if not accident.weather: accident.weather =", "= 83 # סופת רוחות, גשם זלעפות if weather ==", "weather == 77: weather = 87 # רוחות חזקות, גשם", "# convert IMS hours code to hours RAIN_DURATION_CODE_TO_HOURS = {\"1\":", "<= 4: if weather == 76: weather = 81 #", "datetime.strptime(str(created)[:-4], date_format) hour = time.strftime('%H') hour = int(hour) else: time", "== 0: casualties = accident[csvmap[\"casualties\"]] marker['road_intactness'] = casualties if casualties.isdigit()", "decode_hebrew(accident[csvmap[\"comment\"]], encoding=\"utf-8\"), 'weather': process_weather_data(collection, accident[csvmap[\"lat\"]], accident[csvmap[\"long\"]])} if format_version == 0:", "accident time in UTC time # pylint: disable=unexpected-keyword-arg accident_date =", "get_weather_element(station, weather_data, 'TR') # the duration of time in which", ".csv :return: Yields a marker object with every iteration \"\"\"", "** 2 station_lat = station.getElementsByTagName('station_lat') assert len(station_lat) == 1 lat", "int(hour) if str(created).endswith('AM') else int(hour) + 12 break except ValueError:", "stations data in the time of the accident doc =", "# רוחות חזקות, גשם זלעפות else: weather = 79 #", "break except ValueError: pass return datetime(time.year, time.month, time.day, hour, time.minute,", "three digits format (4-004), we delete the 0es before the", "16, \"23\": 17, \"24\": 18, \"25\": 19, \"26\": 20, \"27\":", "weather == 76: weather = 83 # סופת רוחות, גשם", "= time.strftime('%H') hour = int(hour) if str(created).endswith('AM') else int(hour) +", ">= 990: # numbers that are higher then 990 in", "min_distance: min_distance = temp_dis station_place_in_xml = i return station_place_in_xml def", "entries after execution \"\"\" app = init_flask() db = SQLAlchemy(app)", "# weather is given in UTC time # therefore in", "parse_date(accident[csvmap[\"time\"]]) marker = {'id': accident[csvmap[\"id\"]], 'latitude': accident[csvmap[\"lat\"]], 'longitude': accident[csvmap[\"long\"]], 'created':", "if weather == 77: weather = 86 # רוחות חזקות,", "return weather_element def process_weather_data(collection, latitude, longitude): weather = 1 #", "september if accident_date.month < 10 & accident_date.month > 3: accident_date.replace(hour=accident_date.hour", "time = datetime.strptime(str(created)[:-3], date_format) hour = time.strftime('%H') hour = int(hour)", "node.parentNode: node = node.parentNode if node.nodeName == \"Object\": return node", "station_data_in_date.sort() accident_date = accident_time_zone_adjustment(created) for station in enumerate(station_data_in_date): if accident_date", "millimeter if 0.0 < rain_in_millimeters <= 0.5 or ( 0.0", "winter clock 2 hours # [ accident_date = accident_date.replace(hour=accident_date.hour -", "accident[csvmap[\"long\"]] == \"NULL\": logging.warn(\"\\t\\tMissing coordinates in line {0}. Moving on...\".format(line", "weather_element def process_weather_data(collection, latitude, longitude): weather = 1 # default", "3, \"street\": 4, \"city\": 6, \"comment\": 7, \"type\": 8, \"casualties\":", "= datetime.strptime(str(created)[:-4], date_format) hour = time.strftime('%H') hour = int(hour) else:", "on...\".format(line + 1)) continue created = parse_date(accident[csvmap[\"time\"]]) marker = {'id':", "\"casualties\": 8}, ] def create_accidents(collection, file_location): \"\"\" :param file_location: local", "{\"id\": 0, \"time\": 1, \"lat\": 2, \"long\": 3, \"street\": 4,", "longitude): station_place_in_xml = -1 min_distance = float(\"inf\") # initialize big", "if rain_in_millimeters >= 990: # numbers that are higher then", "element: weather_element = element[0].childNodes[0].nodeValue else: weather_element = None return weather_element", "3, \"lat\": 4, \"city\": 5, \"street\": 6, \"comment\": 7, \"casualties\":", "& accident_date.month > 3: accident_date.replace(hour=accident_date.hour - 1) # if accident", "\"73\": 16, \"74\": 50, \"75\": 51, \"76\": 52, \"77\": 53,", "0) def is_nth_weekday(nth, daynum, year, month): # find if date", "גשם שוטף if weather == 77: weather = 86 #", "rain_hours <= 8: if 76 == weather: weather = 82", "\"66\": 42, \"67\": 43, \"68\": 44, \"69\": 45, \"70\": 46,", "\\ accident[csvmap[\"lat\"]] == \"NULL\" or accident[csvmap[\"long\"]] == \"NULL\": logging.warn(\"\\t\\tMissing coordinates", "in decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\") else 3, 'location_accuracy': 1, 'accident_type': 21, 'type':", "data from '%s'...\" % file_location) with open_utf8(file_location, 'rU') as f:", "for station in enumerate(station_data_in_date): if accident_date in str(station.childNodes[0].nodeValue): base.appendChild(get_parent_object_node(station)) return", "8, \"11\": 9, \"12\": 10, \"17\": 11, \"18\": 12, \"19\":", "time in UTC time # pylint: disable=unexpected-keyword-arg accident_date = parse_date(created)", "\"93\": 69, \"94\": 70, \"95\": 71, \"96\": 72, \"97\": 73,", "i return station_place_in_xml def convert_xml_values_to_numbers(rain): num_conv = rain[:2] # variable", "decode_hebrew((accident[csvmap[\"street\"]] + ' ' + accident[csvmap[\"city\"]]), encoding=\"utf-8\"), 'accident_severity': 2 if", "גשם קל elif weather == 77: weather = 84 #", "logging.info(\"Importing data from mail...\") importmail.main(username, password, lastmail) united_path = \"static/data/united/\"", "in daylight_saving_time we deduct 3 hours from the local time", "== '0': rain.replace(char, '') else: break rain_in_millimeters = float(rain) if", "\"98\": 74, \"99\": 75} def retrieve_ims_xml(): # getting an xml", "'weather': process_weather_data(collection, accident[csvmap[\"lat\"]], accident[csvmap[\"long\"]])} if format_version == 0: casualties =", "m in accidents if m[\"id\"] in new_ids]) db.session.commit() return len(new_ids)", "76 # סופת רוחות elif int(wind_force) > 5: weather =", "Python datetime object \"\"\" global time global hour DATE_FORMATS =", "6, \"9\": 7, \"10\": 8, \"11\": 9, \"12\": 10, \"17\":", "lon_difference = (lon - float(longitude)) ** 2 station_lat = station.getElementsByTagName('station_lat')", "if accident happend between april and september if accident_date.month <", "rain_in_millimeters / rain_hours <= 4: if weather == 76: weather", "are higher then 990 in the xml code equals 0.(the", "value so the distance will always be smaller than the", "wind_force = get_weather_element(station, weather_data, 'FF') rain = get_weather_element(station, weather_data, 'RRR')", "we delete the 0es before the number if char ==", "16, \"74\": 50, \"75\": 51, \"76\": 52, \"77\": 53, \"78\":", "get_weather_element(station, weather_data, tag): element = weather_data[station].getElementsByTagName(tag) if element: weather_element =", "accidents = list(create_accidents(collection, path)) if not accidents: return 0 new_ids", "int(hour) else: time = datetime.strptime(str(created)[:-3], date_format) hour = time.strftime('%H') hour", "WEATHER = {\"0\": 1, \"1\": 2, \"3\": 3, \"4\": 4,", "in a three digits format (4-004), we delete the 0es", "calendar import csv from datetime import datetime import os from", "from the ims(israel meteorological service) website logging.basicConfig(level=logging.DEBUG) s = requests.session()", "\"lat\": 4, \"city\": 5, \"street\": 6, \"comment\": 7, \"casualties\": 8},", "= collection.getElementsByTagName('surface_observation') wind_force = get_weather_element(station, weather_data, 'FF') rain = get_weather_element(station,", "= get_weather_element(station, weather_data, 'TR') # the duration of time in", "73, \"98\": 74, \"99\": 75} def retrieve_ims_xml(): # getting an", "= init_flask() db = SQLAlchemy(app) accidents = list(create_accidents(collection, path)) if", "date_format in DATE_FORMATS: try: if date_format == '%Y-%m-%d %H:%M:%S': time", "line, accident in enumerate(reader): if line == 0: # header", "hatzala\" data to the DB ############################################################################################ PROVIDER_CODE = CONST.UNITED_HATZALA_CODE TIME_ZONE", "ValueError: pass return datetime(time.year, time.month, time.day, hour, time.minute, 0) def", "5: weather = 77 # רוחות חזקות if rain is", "marker = {'id': accident[csvmap[\"id\"]], 'latitude': accident[csvmap[\"lat\"]], 'longitude': accident[csvmap[\"long\"]], 'created': created,", "רוחות חזקות, גשם שוטף else: weather = 78 # גשם", "continue if line == 1 and accident[0] == \"\": logging.warn(\"\\t\\tEmpty", ")[nth][daynum] def get_parent_object_node(node): while node.parentNode: node = node.parentNode if node.nodeName", "average rain amount per hour is between 0.5 and 4.0", "day of the week (ex: the forth sunday of april", "starting value so the distance will always be smaller than", "+ ' ' + accident[csvmap[\"city\"]]), encoding=\"utf-8\"), 'accident_severity': 2 if u\"קשה\"", "weather = 83 # סופת רוחות, גשם זלעפות if weather", "\"63\": 15, \"64\": 41, \"65\": 19, \"66\": 42, \"67\": 43,", "logging.basicConfig(level=logging.DEBUG) s = requests.session() r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml') xml_doc = minidom.parseString(r.text)", "from xml.dom import minidom import math import requests import logging", "None: rain_in_millimeters = convert_xml_values_to_numbers(rain) rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()] # rain amount", "to DB \"\"\" collection = retrieve_ims_xml() if not light: logging.info(\"Importing", "12, \"3\": 18, \"4\": 24, \"/\": 24, \"5\": 1, \"6\":", "number for char in num_conv: # in the xml number", "25, \"40\": 26, \"41\": 27, \"42\": 28, \"43\": 29, \"44\":", "64, \"89\": 65, \"90\": 66, \"91\": 67, \"92\": 68, \"93\":", "\\ accident[csvmap[\"lat\"]] is None or accident[csvmap[\"long\"]] is None or \\", "monday = 0 return calendar.Calendar(nth).monthdatescalendar( year, month )[nth][daynum] def get_parent_object_node(node):", "0.5 millimeter if 0.0 < rain_in_millimeters <= 0.5 or (", "3 hours from the local time and in winter clock", "9}, {\"id\": 0, \"time\": 1, \"type\": 2, \"long\": 3, \"lat\":", "and accident[0] == \"\": logging.warn(\"\\t\\tEmpty File!\") continue csvmap = CSVMAP[format_version]", "format_version = 0 if \"MissionID\" in accident[0] else 1 continue", "DB entries after execution \"\"\" app = init_flask() db =", "mail...\") importmail.main(username, password, lastmail) united_path = \"static/data/united/\" total = 0", "66, \"91\": 67, \"92\": 68, \"93\": 69, \"94\": 70, \"95\":", "new_ids: logging.info(\"\\t\\tNothing loaded, all accidents already in DB\") return 0", "# סופת רוחות elif int(wind_force) > 5: weather = 77", "accident[csvmap[\"lat\"]], accident[csvmap[\"long\"]])} if format_version == 0: casualties = accident[csvmap[\"casualties\"]] marker['road_intactness']", "sky station = find_station_by_coordinate(collection, latitude, longitude) weather_data = collection.getElementsByTagName('surface_observation') wind_force", "or accident[csvmap[\"long\"]] == \"\" or \\ accident[csvmap[\"lat\"]] is None or", "coding: utf-8 -*- import calendar import csv from datetime import", "54, \"79\": 55, \"80\": 56, \"81\": 57, \"82\": 58, \"83\":", "encoding=\"utf-8\"), 'accident_severity': 2 if u\"קשה\" in decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\") else 3,", "51, \"76\": 52, \"77\": 53, \"78\": 54, \"79\": 55, \"80\":", "daylight_saving_time.day < accident_date.day | ( daylight_saving_time.day == accident_date.day & accident_date.hour", "wind_force is not None: if int(wind_force) > 8: weather =", "yield marker def import_to_db(collection, path): \"\"\" :param path: Local files", "] adate = ''.join( (str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour))) return adate", "0 logging.info(\"Loading United accidents...\") for united_file in os.listdir(united_path): if united_file.endswith(\".csv\"):", "station = find_station_by_coordinate(collection, latitude, longitude) weather_data = collection.getElementsByTagName('surface_observation') wind_force =", "march at 2:00 o'clock elif (accident_date.month == 3 & daylight_saving_time.day", "daynum day of the week (ex: the forth sunday of", "coordinates in line {0}. Moving on...\".format(line + 1)) continue created", "and september if accident_date.month < 10 & accident_date.month > 3:", "weather = 77 # רוחות חזקות if rain is not", "\"91\": 67, \"92\": 68, \"93\": 69, \"94\": 70, \"95\": 71,", "Calls importmail.py prior to importing to DB \"\"\" collection =", "רוחות elif weather == 77: weather = 85 # גשם", "accidents data from '%s'...\" % file_location) with open_utf8(file_location, 'rU') as", "time global hour DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d", "..utilities import init_flask, decode_hebrew, open_utf8 from ..import importmail from xml.dom", "ims(israel meteorological service) website logging.basicConfig(level=logging.DEBUG) s = requests.session() r =", "= accident_date.replace(hour=accident_date.hour - TIME_ZONE) # if accident happend between april", "18, \"25\": 19, \"26\": 20, \"27\": 21, \"28\": 22, \"29\":", "חזקות, גשם זלעפות else: weather = 79 # גשם זלעפות", "collection.getElementsByTagName('date_selected') station_data_in_date.sort() accident_date = accident_time_zone_adjustment(created) for station in enumerate(station_data_in_date): if", "initialize big starting value so the distance will always be", "United accidents...\") for united_file in os.listdir(united_path): if united_file.endswith(\".csv\"): total +=", "6, \"8\": 6, \"9\": 7, \"10\": 8, \"11\": 9, \"12\":", "# גשם ורוחות חזקות else: weather = 15 # גשם", "april and september if accident_date.month < 10 & accident_date.month >", "CSVMAP[format_version] if accident[csvmap[\"lat\"]] == \"\" or accident[csvmap[\"long\"]] == \"\" or", "month )[nth][daynum] def get_parent_object_node(node): while node.parentNode: node = node.parentNode if", "990 in the xml code equals 0.(the last digit) for", "the ims(israel meteorological service) website logging.basicConfig(level=logging.DEBUG) s = requests.session() r", "commiting the changes\") def main(light=True, username='', password='', lastmail=False): \"\"\" Calls", "time.strftime('%H') hour = int(hour) else: time = datetime.strptime(str(created)[:-3], date_format) hour", "from ..import importmail from xml.dom import minidom import math import", "6, \"2\": 12, \"3\": 18, \"4\": 24, \"/\": 24, \"5\":", "are in a three digits format (4-004), we delete the", "light: logging.info(\"Importing data from mail...\") importmail.main(username, password, lastmail) united_path =", "29, \"44\": 9, \"45\": 30, \"46\": 30, \"47\": 30, \"48\":", "25, \"39\": 25, \"40\": 26, \"41\": 27, \"42\": 28, \"43\":", "between 0.5 and 4.0 millimeters if 0.5 < rain_in_millimeters /", "#!/usr/bin/env python # -*- coding: utf-8 -*- import calendar import", "\"3\": 18, \"4\": 24, \"/\": 24, \"5\": 1, \"6\": 2,", "37, \"56\": 38, \"57\": 39, \"58\": 37, \"59\": 37, \"61\":", "in DB\") return 0 db.session.execute(AccidentMarker.__table__.insert(), [m for m in accidents", "\"/\": 24, \"5\": 1, \"6\": 2, \"7\": 3, \"8\": 9,", "30, \"47\": 30, \"48\": 31, \"49\": 32, \"50\": 33, \"51\":", "> 3: accident_date.replace(hour=accident_date.hour - 1) # if accident happend before", "delete the 0es before the number if char == '0':", "0.5 < rain_in_millimeters / rain_hours <= 4: if weather ==", "- float(longitude)) ** 2 station_lat = station.getElementsByTagName('station_lat') assert len(station_lat) ==", "def update_db(collection): \"\"\" :return: length of DB entries after execution", "Yields a marker object with every iteration \"\"\" logging.info(\"\\tReading accidents", "0.5): if weather == 76: weather = 80 # סופת", "last digit) for example 991 = 0.1 rain_in_millimeters *= 0.01", "*= 0.01 return rain_in_millimeters def get_weather_element(station, weather_data, tag): element =", "\"20\": 14, \"21\": 15, \"22\": 16, \"23\": 17, \"24\": 18,", "36, \"62\": 40, \"63\": 15, \"64\": 41, \"65\": 19, \"66\":", "# average rain amount per hour is more than 8.0", "\"64\": 41, \"65\": 19, \"66\": 42, \"67\": 43, \"68\": 44,", "return calendar.Calendar(nth).monthdatescalendar( year, month )[nth][daynum] def get_parent_object_node(node): while node.parentNode: node", "else: weather = 79 # גשם זלעפות return weather CSVMAP", "\"9\": 15} WEATHER = {\"0\": 1, \"1\": 2, \"3\": 3,", "rain amount per hour is between 4.0 and 8.0 millimeters", "the daynum from monday = 0 return calendar.Calendar(nth).monthdatescalendar( year, month", "= 80 # סופת רוחות, גשם קל elif weather ==", "and 4.0 millimeters if 0.5 < rain_in_millimeters / rain_hours <=", "logging.info(\"\\tFinished commiting the changes\") def main(light=True, username='', password='', lastmail=False): \"\"\"", "def parse_date(created): \"\"\" :param created: Date & Time string from", "'WW') if weather_code is not None: return WEATHER[weather_code.strip()] if wind_force", "station.getElementsByTagName('station_lat') assert len(station_lat) == 1 lat = float(station_lat[0].childNodes[0].nodeValue) lat_difference =", "4, \"city\": 5, \"street\": 6, \"comment\": 7, \"casualties\": 8}, ]", "RAIN_DURATION_CODE_TO_HOURS = {\"1\": 6, \"2\": 12, \"3\": 18, \"4\": 24,", "\"83\": 59, \"84\": 60, \"85\": 61, \"86\": 62, \"87\": 63,", "assert len(station_lat) == 1 lat = float(station_lat[0].childNodes[0].nodeValue) lat_difference = (lat", "weather = 81 # גשם וסופת רוחות elif weather ==", "station_lon = station.getElementsByTagName('station_lon') assert len(station_lon) == 1 lon = float(station_lon[0].childNodes[0].nodeValue)", "/ rain_hours <= 8: if 76 == weather: weather =", "== db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m[\"id\"], AccidentMarker.provider_code == m[\"provider_code\"])).count()] if not new_ids:", "\"34\": 7, \"35\": 7, \"36\": 25, \"37\": 25, \"38\": 25,", "= -1 min_distance = float(\"inf\") # initialize big starting value", "\"29\": 23, \"30\": 24, \"31\": 24, \"32\": 24, \"33\": 7,", "7, \"casualties\": 8}, ] def create_accidents(collection, file_location): \"\"\" :param file_location:", "\"\"\" collection = retrieve_ims_xml() if not light: logging.info(\"Importing data from", "the rain amount was measured weather_code = get_weather_element(station, weather_data, 'WW')", "%H:%M:%S': time = datetime.strptime(str(created)[:-4], date_format) hour = time.strftime('%H') hour =", "11, \"18\": 12, \"19\": 13, \"20\": 14, \"21\": 15, \"22\":", "= ''.join( (str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour))) return adate def all_station_in_date_frame(collection,", "שוטף else: weather = 78 # גשם שוטף # average", "\"56\": 38, \"57\": 39, \"58\": 37, \"59\": 37, \"61\": 37,", "PROVIDER_CODE = CONST.UNITED_HATZALA_CODE TIME_ZONE = 2 # convert IMS hours", "\"30\": 24, \"31\": 24, \"32\": 24, \"33\": 7, \"34\": 7,", "\"69\": 45, \"70\": 46, \"71\": 47, \"72\": 48, \"73\": 16,", "= {'id': accident[csvmap[\"id\"]], 'latitude': accident[csvmap[\"lat\"]], 'longitude': accident[csvmap[\"long\"]], 'created': created, 'provider_code':", "== '%Y-%m-%d %H:%M:%S': time = datetime.strptime(str(created)[:-4], date_format) hour = time.strftime('%H')", "& daylight_saving_time.day < accident_date.day | ( daylight_saving_time.day == accident_date.day &", "find_station_by_coordinate(collection, latitude, longitude): station_place_in_xml = -1 min_distance = float(\"inf\") #", "logging.info(\"Loading United accidents...\") for united_file in os.listdir(united_path): if united_file.endswith(\".csv\"): total", "\"42\": 28, \"43\": 29, \"44\": 9, \"45\": 30, \"46\": 30,", "accident_time_zone_adjustment(created) for station in enumerate(station_data_in_date): if accident_date in str(station.childNodes[0].nodeValue): base.appendChild(get_parent_object_node(station))", "15 # גשם # average rain amount per hour is", "# סופת רוחות, גשם שוטף if weather == 77: weather", "return collection def parse_date(created): \"\"\" :param created: Date & Time", "len(station_lat) == 1 lat = float(station_lat[0].childNodes[0].nodeValue) lat_difference = (lat -", "after the last friday of march at 2:00 o'clock elif", "# start counting the daynum from monday = 0 return", "nth occurrence of the daynum day of the week (ex:", "= int(hour) else: time = datetime.strptime(str(created)[:-3], date_format) hour = time.strftime('%H')", "line continue if line == 1 and accident[0] == \"\":", "17, \"24\": 18, \"25\": 19, \"26\": 20, \"27\": 21, \"28\":", "else: weather = 37 # גשם קל # average rain", "import_to_db(collection, path): \"\"\" :param path: Local files directory ('united_path' on", "data in the time of the accident doc = minidom.Document()", "hour is more than 8.0 millimeters elif rain_in_millimeters / rain_hours", "calendar.Calendar(nth).monthdatescalendar( year, month )[nth][daynum] def get_parent_object_node(node): while node.parentNode: node =", "is the nth occurrence of the daynum day of the", "\"58\": 37, \"59\": 37, \"61\": 37, \"60\": 36, \"62\": 40,", "longitude): weather = 1 # default weather is clear sky", "\"90\": 66, \"91\": 67, \"92\": 68, \"93\": 69, \"94\": 70,", "<= 0.5 or ( 0.0 < rain_in_millimeters / rain_hours <=", "weather = 76 # סופת רוחות elif int(wind_force) > 5:", "2:00 o'clock elif accident_date.month == 10 & ( winter_clock.day >", "= 0 return calendar.Calendar(nth).monthdatescalendar( year, month )[nth][daynum] def get_parent_object_node(node): while", "accident doc = minidom.Document() base = doc.createElement('accident_date') doc.appendChild(base) station_data_in_date =", "{\"0\": 1, \"1\": 2, \"3\": 3, \"4\": 4, \"5\": 5,", "2)): accident_date.replace(hour=accident_date.hour - 1) # if accident happend after the", "& ( winter_clock.day > accident_date.day | ( winter_clock.day == accident_date.day", "\"10\": 8, \"11\": 9, \"12\": 10, \"17\": 11, \"18\": 12,", "casualties.isdigit() else 0 yield marker def import_to_db(collection, path): \"\"\" :param", "76: weather = 80 # סופת רוחות, גשם קל elif", "30, \"46\": 30, \"47\": 30, \"48\": 31, \"49\": 32, \"50\":", "= time.strftime('%H') hour = int(hour) else: time = datetime.strptime(str(created)[:-3], date_format)", "to help convert from string to number for char in", ":param file_location: local location of .csv :return: Yields a marker", "13, \"20\": 14, \"21\": 15, \"22\": 16, \"23\": 17, \"24\":", "\"city\": 5, \"street\": 6, \"comment\": 7, \"casualties\": 8}, ] def", "5, \"7\": 6, \"8\": 6, \"9\": 7, \"10\": 8, \"11\":", "therefore in daylight_saving_time we deduct 3 hours from the local", "db.session.execute(AccidentMarker.__table__.insert(), [m for m in accidents if m[\"id\"] in new_ids])", "for m in accidents if m[\"id\"] in new_ids]) db.session.commit() return", "with open_utf8(file_location, 'rU') as f: reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab)", "0.1 and 0.5 millimeter if 0.0 < rain_in_millimeters <= 0.5", "daynum, year, month): # find if date is the nth", "67, \"92\": 68, \"93\": 69, \"94\": 70, \"95\": 71, \"96\":", "\"lat\": 2, \"long\": 3, \"street\": 4, \"city\": 6, \"comment\": 7,", "pylint: disable=unexpected-keyword-arg accident_date = parse_date(created) daylight_saving_time = is_nth_weekday(4, 4, accident_date.year,", "if line == 0: # header format_version = 0 if", "\"\" or \\ accident[csvmap[\"lat\"]] is None or accident[csvmap[\"long\"]] is None", "2 # convert IMS hours code to hours RAIN_DURATION_CODE_TO_HOURS =", "8: weather = 76 # סופת רוחות elif int(wind_force) >", "importmail.py prior to importing to DB \"\"\" collection = retrieve_ims_xml()", "\"48\": 31, \"49\": 32, \"50\": 33, \"51\": 34, \"52\": 33,", "parse_date(created): \"\"\" :param created: Date & Time string from csv", "'%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M']", "UTC time # therefore in daylight_saving_time we deduct 3 hours", "76 == weather: weather = 82 # סופת רוחות, גשם", "# header format_version = 0 if \"MissionID\" in accident[0] else", "\"\": logging.warn(\"\\t\\tEmpty File!\") continue csvmap = CSVMAP[format_version] if accident[csvmap[\"lat\"]] ==", "int(wind_force) > 5: weather = 77 # רוחות חזקות if", "float(longitude)) ** 2 station_lat = station.getElementsByTagName('station_lat') assert len(station_lat) == 1", "accident_date.replace(hour=accident_date.hour - 1) # ] adate = ''.join( (str(accident_date.year), str(accident_date.month),", "and 8.0 millimeters elif 4 < rain_in_millimeters / rain_hours <=", "decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\")[:100], 'address': decode_hebrew((accident[csvmap[\"street\"]] + ' ' + accident[csvmap[\"city\"]]), encoding=\"utf-8\"),", "(ex: the forth sunday of april 2016) # start counting", "return adate def all_station_in_date_frame(collection, created): # return the stations data", "then 990 in the xml code equals 0.(the last digit)", "if date_format == '%Y-%m-%d %H:%M:%S': time = datetime.strptime(str(created)[:-4], date_format) hour", "%I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M'] for date_format in DATE_FORMATS: try:", "month): # find if date is the nth occurrence of", "\"94\": 70, \"95\": 71, \"96\": 72, \"97\": 73, \"98\": 74,", "WEATHER[weather_code.strip()] if wind_force is not None: if int(wind_force) > 8:", "rain amount per hour is more than 8.0 millimeters elif", "7, \"10\": 8, \"11\": 9, \"12\": 10, \"17\": 11, \"18\":", "= s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml') xml_doc = minidom.parseString(r.text) collection = xml_doc.documentElement return collection", "1 lat = float(station_lat[0].childNodes[0].nodeValue) lat_difference = (lat - float(latitude)) **", "continue if not accident: # empty line continue if line", "get_weather_element(station, weather_data, 'FF') rain = get_weather_element(station, weather_data, 'RRR') rain_duration =", "accident[csvmap[\"id\"]], 'latitude': accident[csvmap[\"lat\"]], 'longitude': accident[csvmap[\"long\"]], 'created': created, 'provider_code': PROVIDER_CODE, 'title':", "last friday of march at 2:00 o'clock elif (accident_date.month ==", "\"84\": 60, \"85\": 61, \"86\": 62, \"87\": 63, \"88\": 64,", "\"\"\" app = init_flask() db = SQLAlchemy(app) united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code", ":return: Yields a marker object with every iteration \"\"\" logging.info(\"\\tReading", "80 # סופת רוחות, גשם קל elif weather == 77:", "if weather_code is not None: return WEATHER[weather_code.strip()] if wind_force is", "in new_ids]) db.session.commit() return len(new_ids) def update_db(collection): \"\"\" :return: length", "/ rain_hours > 8: if weather == 76: weather =", "[ {\"id\": 0, \"time\": 1, \"lat\": 2, \"long\": 3, \"street\":", "daynum from monday = 0 return calendar.Calendar(nth).monthdatescalendar( year, month )[nth][daynum]", "counting the daynum from monday = 0 return calendar.Calendar(nth).monthdatescalendar( year,", "- 1) # ] adate = ''.join( (str(accident_date.year), str(accident_date.month), str(accident_date.day),", "xml_doc.documentElement return collection def parse_date(created): \"\"\" :param created: Date &", "dialect=csv.excel_tab) for line, accident in enumerate(reader): if line == 0:", "and 0.5 millimeter if 0.0 < rain_in_millimeters <= 0.5 or", "1 # default weather is clear sky station = find_station_by_coordinate(collection,", "weather = 78 # גשם שוטף # average rain amount", "process_weather_data(collection, accident[csvmap[\"lat\"]], accident[csvmap[\"long\"]])} if format_version == 0: casualties = accident[csvmap[\"casualties\"]]", "& accident_date.hour < 2)): accident_date.replace(hour=accident_date.hour - 1) # if accident", "clear sky station = find_station_by_coordinate(collection, latitude, longitude) weather_data = collection.getElementsByTagName('surface_observation')", "= float(rain) if rain_in_millimeters >= 990: # numbers that are", "united: if not accident.weather: accident.weather = process_weather_data(collection, accident.latitude, accident.longitude) db.session.commit()", "39, \"58\": 37, \"59\": 37, \"61\": 37, \"60\": 36, \"62\":", "990: # numbers that are higher then 990 in the", "april 2016) # start counting the daynum from monday =", "was measured weather_code = get_weather_element(station, weather_data, 'WW') if weather_code is", "file_location) with open_utf8(file_location, 'rU') as f: reader = csv.reader(f, delimiter=',',", "%I:%M'] for date_format in DATE_FORMATS: try: if date_format == '%Y-%m-%d", "importmail.main(username, password, lastmail) united_path = \"static/data/united/\" total = 0 logging.info(\"Loading", "\"7\": 3, \"8\": 9, \"9\": 15} WEATHER = {\"0\": 1,", "casualties if casualties.isdigit() else 0 yield marker def import_to_db(collection, path):", "\"24\": 18, \"25\": 19, \"26\": 20, \"27\": 21, \"28\": 22,", "data to the DB ############################################################################################ PROVIDER_CODE = CONST.UNITED_HATZALA_CODE TIME_ZONE =", "in DATE_FORMATS: try: if date_format == '%Y-%m-%d %H:%M:%S': time =", "############################################################################################ PROVIDER_CODE = CONST.UNITED_HATZALA_CODE TIME_ZONE = 2 # convert IMS", "65, \"90\": 66, \"91\": 67, \"92\": 68, \"93\": 69, \"94\":", "זלעפות else: weather = 79 # גשם זלעפות return weather", "TIME_ZONE) # if accident happend between april and september if", "flask_sqlalchemy import SQLAlchemy from sqlalchemy import and_ from ..constants import", "enumerate(reader): if line == 0: # header format_version = 0", "or \\ accident[csvmap[\"lat\"]] is None or accident[csvmap[\"long\"]] is None or", "48, \"73\": 16, \"74\": 50, \"75\": 51, \"76\": 52, \"77\":", "'TR') # the duration of time in which the rain", "# גשם # average rain amount per hour is between", "weather = 79 # גשם זלעפות return weather CSVMAP =", "an xml document from the ims(israel meteorological service) website logging.basicConfig(level=logging.DEBUG)", "7, \"type\": 8, \"casualties\": 9}, {\"id\": 0, \"time\": 1, \"type\":", "for char in num_conv: # in the xml number are", "else: time = datetime.strptime(str(created)[:-3], date_format) hour = time.strftime('%H') hour =", "79 # גשם זלעפות return weather CSVMAP = [ {\"id\":", "# variable to help convert from string to number for", "3, \"8\": 9, \"9\": 15} WEATHER = {\"0\": 1, \"1\":", "'%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M'] for date_format", "SQLAlchemy(app) united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2) for accident in united:", "doc.appendChild(base) station_data_in_date = collection.getElementsByTagName('date_selected') station_data_in_date.sort() accident_date = accident_time_zone_adjustment(created) for station", ":return: length of DB entries after execution \"\"\" app =", "break rain_in_millimeters = float(rain) if rain_in_millimeters >= 990: # numbers", "accident_date.month > 3: accident_date.replace(hour=accident_date.hour - 1) # if accident happend", "continue created = parse_date(accident[csvmap[\"time\"]]) marker = {'id': accident[csvmap[\"id\"]], 'latitude': accident[csvmap[\"lat\"]],", "before the number if char == '0': rain.replace(char, '') else:", "if format_version == 0: casualties = accident[csvmap[\"casualties\"]] marker['road_intactness'] = casualties", "CONST from ..models import AccidentMarker from ..utilities import init_flask, decode_hebrew,", "( 0.0 < rain_in_millimeters / rain_hours <= 0.5): if weather", "weather_data = collection.getElementsByTagName('surface_observation') wind_force = get_weather_element(station, weather_data, 'FF') rain =", "october at 2:00 o'clock elif accident_date.month == 10 & (", "4: if weather == 76: weather = 81 # גשם", "'title': decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\")[:100], 'address': decode_hebrew((accident[csvmap[\"street\"]] + ' ' + accident[csvmap[\"city\"]]),", "= {\"0\": 1, \"1\": 2, \"3\": 3, \"4\": 4, \"5\":", "None or \\ accident[csvmap[\"lat\"]] == \"NULL\" or accident[csvmap[\"long\"]] == \"NULL\":", "** 2 temp_dis = math.sqrt(lat_difference + lon_difference) if temp_dis <", "24, \"31\": 24, \"32\": 24, \"33\": 7, \"34\": 7, \"35\":", ">= 2)): accident_date.replace(hour=accident_date.hour - 1) # ] adate = ''.join(", "rain is not None and rain_duration is not None: rain_in_millimeters", "xml_doc = minidom.parseString(r.text) collection = xml_doc.documentElement return collection def parse_date(created):", "def find_station_by_coordinate(collection, latitude, longitude): station_place_in_xml = -1 min_distance = float(\"inf\")", "33, \"53\": 35, \"54\": 36, \"55\": 37, \"56\": 38, \"57\":", "%I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M'] for date_format in", "= 37 # גשם קל # average rain amount per", "adate def all_station_in_date_frame(collection, created): # return the stations data in", "os.listdir(united_path): if united_file.endswith(\".csv\"): total += import_to_db(collection, united_path + united_file) logging.info(\"\\tImported", "accident_date.hour >= 2)): accident_date.replace(hour=accident_date.hour - 1) # ] adate =", "= int(hour) if str(created).endswith('AM') else int(hour) + 12 break except", "after execution \"\"\" app = init_flask() db = SQLAlchemy(app) accidents", "+ accident[csvmap[\"city\"]]), encoding=\"utf-8\"), 'accident_severity': 2 if u\"קשה\" in decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\")", "סופת רוחות elif int(wind_force) > 5: weather = 77 #", "\"97\": 73, \"98\": 74, \"99\": 75} def retrieve_ims_xml(): # getting", "from ..constants import CONST from ..models import AccidentMarker from ..utilities", "base def find_station_by_coordinate(collection, latitude, longitude): station_place_in_xml = -1 min_distance =", "> 8: if weather == 76: weather = 83 #", "= minidom.Document() base = doc.createElement('accident_date') doc.appendChild(base) station_data_in_date = collection.getElementsByTagName('date_selected') station_data_in_date.sort()", "convert_xml_values_to_numbers(rain) rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()] # rain amount is between 0.1", "requests import logging ############################################################################################ # United.py is responsible for the", "elif 4 < rain_in_millimeters / rain_hours <= 8: if 76", "= db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2) for accident in united: if not", "between 4.0 and 8.0 millimeters elif 4 < rain_in_millimeters /", "xml.dom import minidom import math import requests import logging ############################################################################################", "# return accident time in UTC time # pylint: disable=unexpected-keyword-arg", "'%m/%d/%Y %I:%M'] for date_format in DATE_FORMATS: try: if date_format ==", "43, \"68\": 44, \"69\": 45, \"70\": 46, \"71\": 47, \"72\":", "-*- coding: utf-8 -*- import calendar import csv from datetime", "def create_accidents(collection, file_location): \"\"\" :param file_location: local location of .csv", "hours from the local time and in winter clock 2", "22, \"29\": 23, \"30\": 24, \"31\": 24, \"32\": 24, \"33\":", "datetime.strptime(str(created)[:-3], date_format) hour = time.strftime('%H') hour = int(hour) if str(created).endswith('AM')", "accident[csvmap[\"long\"]] == \"\" or \\ accident[csvmap[\"lat\"]] is None or accident[csvmap[\"long\"]]", "year, month): # find if date is the nth occurrence", "\"51\": 34, \"52\": 33, \"53\": 35, \"54\": 36, \"55\": 37,", "date_format) hour = time.strftime('%H') hour = int(hour) else: time =", "rain_in_millimeters *= 0.01 return rain_in_millimeters def get_weather_element(station, weather_data, tag): element", "= float(station_lat[0].childNodes[0].nodeValue) lat_difference = (lat - float(latitude)) ** 2 temp_dis", "deployment of \"united hatzala\" data to the DB ############################################################################################ PROVIDER_CODE", "path): \"\"\" :param path: Local files directory ('united_path' on main()", "is_nth_weekday(4, 6, accident_date.year, 10) # weather is given in UTC", "lastmail) united_path = \"static/data/united/\" total = 0 logging.info(\"Loading United accidents...\")", "= SQLAlchemy(app) united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2) for accident in", "..constants import CONST from ..models import AccidentMarker from ..utilities import", "56, \"81\": 57, \"82\": 58, \"83\": 59, \"84\": 60, \"85\":", "global hour DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S',", "\"2\": 12, \"3\": 18, \"4\": 24, \"/\": 24, \"5\": 1,", "74, \"99\": 75} def retrieve_ims_xml(): # getting an xml document", "float(station_lon[0].childNodes[0].nodeValue) lon_difference = (lon - float(longitude)) ** 2 station_lat =", "return weather CSVMAP = [ {\"id\": 0, \"time\": 1, \"lat\":", "rain = get_weather_element(station, weather_data, 'RRR') rain_duration = get_weather_element(station, weather_data, 'TR')", "or accident[csvmap[\"long\"]] is None or \\ accident[csvmap[\"lat\"]] == \"NULL\" or", "float(station_lat[0].childNodes[0].nodeValue) lat_difference = (lat - float(latitude)) ** 2 temp_dis =", "\"casualties\": 9}, {\"id\": 0, \"time\": 1, \"type\": 2, \"long\": 3,", "= (lat - float(latitude)) ** 2 temp_dis = math.sqrt(lat_difference +", "'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT, 'description': decode_hebrew(accident[csvmap[\"comment\"]], encoding=\"utf-8\"), 'weather': process_weather_data(collection, accident[csvmap[\"lat\"]],", "(lon - float(longitude)) ** 2 station_lat = station.getElementsByTagName('station_lat') assert len(station_lat)", "math.sqrt(lat_difference + lon_difference) if temp_dis < min_distance: min_distance = temp_dis", "1) # ] adate = ''.join( (str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour)))", ":param created: Date & Time string from csv :return: Python", "= station.getElementsByTagName('station_lat') assert len(station_lat) == 1 lat = float(station_lat[0].childNodes[0].nodeValue) lat_difference", "hours code to hours RAIN_DURATION_CODE_TO_HOURS = {\"1\": 6, \"2\": 12,", "in which the rain amount was measured weather_code = get_weather_element(station,", "0.(the last digit) for example 991 = 0.1 rain_in_millimeters *=", "continue csvmap = CSVMAP[format_version] if accident[csvmap[\"lat\"]] == \"\" or accident[csvmap[\"long\"]]", "= 2 # convert IMS hours code to hours RAIN_DURATION_CODE_TO_HOURS", "2016) # start counting the daynum from monday = 0", "= get_weather_element(station, weather_data, 'WW') if weather_code is not None: return", "'location_accuracy': 1, 'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT, 'description': decode_hebrew(accident[csvmap[\"comment\"]], encoding=\"utf-8\"), 'weather':", "3 & daylight_saving_time.day < accident_date.day | ( daylight_saving_time.day == accident_date.day", "\"95\": 71, \"96\": 72, \"97\": 73, \"98\": 74, \"99\": 75}", "\"6\": 2, \"7\": 3, \"8\": 9, \"9\": 15} WEATHER =", "\"32\": 24, \"33\": 7, \"34\": 7, \"35\": 7, \"36\": 25,", "\"46\": 30, \"47\": 30, \"48\": 31, \"49\": 32, \"50\": 33,", "weather CSVMAP = [ {\"id\": 0, \"time\": 1, \"lat\": 2,", "31, \"49\": 32, \"50\": 33, \"51\": 34, \"52\": 33, \"53\":", "= retrieve_ims_xml() if not light: logging.info(\"Importing data from mail...\") importmail.main(username,", "37 # גשם קל # average rain amount per hour", "accident: # empty line continue if line == 1 and", "= weather_data[station].getElementsByTagName(tag) if element: weather_element = element[0].childNodes[0].nodeValue else: weather_element =", "retrieve_ims_xml(): # getting an xml document from the ims(israel meteorological", "גשם זלעפות if weather == 77: weather = 87 #", "below) :return: length of DB entries after execution \"\"\" app", "average rain amount per hour is between 4.0 and 8.0", "which the rain amount was measured weather_code = get_weather_element(station, weather_data,", "list(create_accidents(collection, path)) if not accidents: return 0 new_ids = [m[\"id\"]", "def get_weather_element(station, weather_data, tag): element = weather_data[station].getElementsByTagName(tag) if element: weather_element", "hour = int(hour) else: time = datetime.strptime(str(created)[:-3], date_format) hour =", "accident[csvmap[\"city\"]]), encoding=\"utf-8\"), 'accident_severity': 2 if u\"קשה\" in decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\") else", "logging.warn(\"\\t\\tMissing coordinates in line {0}. Moving on...\".format(line + 1)) continue", "math import requests import logging ############################################################################################ # United.py is responsible", "collection = retrieve_ims_xml() if not light: logging.info(\"Importing data from mail...\")", "def all_station_in_date_frame(collection, created): # return the stations data in the", "int(hour) + 12 break except ValueError: pass return datetime(time.year, time.month,", "daylight_saving_time.day == accident_date.day & accident_date.hour >= 2)): accident_date.replace(hour=accident_date.hour - 1)", "== 2) for accident in united: if not accident.weather: accident.weather", "time and in winter clock 2 hours # [ accident_date", "directory ('united_path' on main() below) :return: length of DB entries", "24, \"33\": 7, \"34\": 7, \"35\": 7, \"36\": 25, \"37\":", "weather: weather = 82 # סופת רוחות, גשם שוטף if", "10 & ( winter_clock.day > accident_date.day | ( winter_clock.day ==", "== \"NULL\" or accident[csvmap[\"long\"]] == \"NULL\": logging.warn(\"\\t\\tMissing coordinates in line", "num_conv: # in the xml number are in a three", "'%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M'] for date_format in DATE_FORMATS: try: if", "/ rain_hours <= 4: if weather == 76: weather =", "accident[csvmap[\"lat\"]], 'longitude': accident[csvmap[\"long\"]], 'created': created, 'provider_code': PROVIDER_CODE, 'title': decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\")[:100],", "try: if date_format == '%Y-%m-%d %H:%M:%S': time = datetime.strptime(str(created)[:-4], date_format)", "hour is between 4.0 and 8.0 millimeters elif 4 <", "digits format (4-004), we delete the 0es before the number", "weather = 82 # סופת רוחות, גשם שוטף if weather", "\"59\": 37, \"61\": 37, \"60\": 36, \"62\": 40, \"63\": 15,", "& accident_date.hour >= 2)): accident_date.replace(hour=accident_date.hour - 1) # ] adate", "\"\"\" logging.info(\"\\tReading accidents data from '%s'...\" % file_location) with open_utf8(file_location,", "between 0.1 and 0.5 millimeter if 0.0 < rain_in_millimeters <=", "time # pylint: disable=unexpected-keyword-arg accident_date = parse_date(created) daylight_saving_time = is_nth_weekday(4,", "per hour is between 0.5 and 4.0 millimeters if 0.5", "4, accident_date.year, 3) winter_clock = is_nth_weekday(4, 6, accident_date.year, 10) #", "if not new_ids: logging.info(\"\\t\\tNothing loaded, all accidents already in DB\")", "\"36\": 25, \"37\": 25, \"38\": 25, \"39\": 25, \"40\": 26,", "if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m[\"id\"], AccidentMarker.provider_code == m[\"provider_code\"])).count()] if", "import os from flask_sqlalchemy import SQLAlchemy from sqlalchemy import and_", "= [ {\"id\": 0, \"time\": 1, \"lat\": 2, \"long\": 3,", "date_format) hour = time.strftime('%H') hour = int(hour) if str(created).endswith('AM') else", "start counting the daynum from monday = 0 return calendar.Calendar(nth).monthdatescalendar(", "| ( daylight_saving_time.day == accident_date.day & accident_date.hour >= 2)): accident_date.replace(hour=accident_date.hour", "station_data_in_date = collection.getElementsByTagName('date_selected') station_data_in_date.sort() accident_date = accident_time_zone_adjustment(created) for station in", "else: weather_element = None return weather_element def process_weather_data(collection, latitude, longitude):", "4, \"5\": 5, \"7\": 6, \"8\": 6, \"9\": 7, \"10\":", "77: weather = 86 # רוחות חזקות, גשם שוטף else:", "4.0 and 8.0 millimeters elif 4 < rain_in_millimeters / rain_hours", "86 # רוחות חזקות, גשם שוטף else: weather = 78", "\"\"\" :return: length of DB entries after execution \"\"\" app", "{\"1\": 6, \"2\": 12, \"3\": 18, \"4\": 24, \"/\": 24,", "winter_clock = is_nth_weekday(4, 6, accident_date.year, 10) # weather is given", "רוחות חזקות if rain is not None and rain_duration is", "service) website logging.basicConfig(level=logging.DEBUG) s = requests.session() r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml') xml_doc", "import calendar import csv from datetime import datetime import os", "= xml_doc.documentElement return collection def parse_date(created): \"\"\" :param created: Date", "process_weather_data(collection, accident.latitude, accident.longitude) db.session.commit() logging.info(\"\\tFinished commiting the changes\") def main(light=True,", "24, \"/\": 24, \"5\": 1, \"6\": 2, \"7\": 3, \"8\":", "גשם # average rain amount per hour is between 4.0", "location of .csv :return: Yields a marker object with every", "requests.session() r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml') xml_doc = minidom.parseString(r.text) collection = xml_doc.documentElement", "= ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d", "SQLAlchemy(app) accidents = list(create_accidents(collection, path)) if not accidents: return 0", "37, \"61\": 37, \"60\": 36, \"62\": 40, \"63\": 15, \"64\":", "\"88\": 64, \"89\": 65, \"90\": 66, \"91\": 67, \"92\": 68,", "return 0 new_ids = [m[\"id\"] for m in accidents if", "\"60\": 36, \"62\": 40, \"63\": 15, \"64\": 41, \"65\": 19,", "float(\"inf\") # initialize big starting value so the distance will", "convert IMS hours code to hours RAIN_DURATION_CODE_TO_HOURS = {\"1\": 6,", "in accident[0] else 1 continue if not accident: # empty", "0.0 < rain_in_millimeters / rain_hours <= 0.5): if weather ==", "of .csv :return: Yields a marker object with every iteration", "accident[0] == \"\": logging.warn(\"\\t\\tEmpty File!\") continue csvmap = CSVMAP[format_version] if", "import SQLAlchemy from sqlalchemy import and_ from ..constants import CONST", "33, \"51\": 34, \"52\": 33, \"53\": 35, \"54\": 36, \"55\":", "the stations data in the time of the accident doc", "'') else: break rain_in_millimeters = float(rain) if rain_in_millimeters >= 990:", "== \"\" or accident[csvmap[\"long\"]] == \"\" or \\ accident[csvmap[\"lat\"]] is", "= requests.session() r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml') xml_doc = minidom.parseString(r.text) collection =", "weather == 76: weather = 81 # גשם וסופת רוחות", "encoding=\"utf-8\"), 'weather': process_weather_data(collection, accident[csvmap[\"lat\"]], accident[csvmap[\"long\"]])} if format_version == 0: casualties", "o'clock elif accident_date.month == 10 & ( winter_clock.day > accident_date.day", "rain_hours <= 0.5): if weather == 76: weather = 80", "format_version == 0: casualties = accident[csvmap[\"casualties\"]] marker['road_intactness'] = casualties if", "< min_distance: min_distance = temp_dis station_place_in_xml = i return station_place_in_xml", "15, \"22\": 16, \"23\": 17, \"24\": 18, \"25\": 19, \"26\":", "# initialize big starting value so the distance will always", "if accident_date.month < 10 & accident_date.month > 3: accident_date.replace(hour=accident_date.hour -", "37, \"59\": 37, \"61\": 37, \"60\": 36, \"62\": 40, \"63\":", "str(accident_date.hour))) return adate def all_station_in_date_frame(collection, created): # return the stations", "%H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M'] for", "rain_in_millimeters >= 990: # numbers that are higher then 990", "None return weather_element def process_weather_data(collection, latitude, longitude): weather = 1", "{'id': accident[csvmap[\"id\"]], 'latitude': accident[csvmap[\"lat\"]], 'longitude': accident[csvmap[\"long\"]], 'created': created, 'provider_code': PROVIDER_CODE,", "'accident_severity': 2 if u\"קשה\" in decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\") else 3, 'location_accuracy':", "the number if char == '0': rain.replace(char, '') else: break", "if str(created).endswith('AM') else int(hour) + 12 break except ValueError: pass", "logging.info(\"\\tReading accidents data from '%s'...\" % file_location) with open_utf8(file_location, 'rU')", "if not accidents: return 0 new_ids = [m[\"id\"] for m", "not light: logging.info(\"Importing data from mail...\") importmail.main(username, password, lastmail) united_path", "the nth occurrence of the daynum day of the week", "create_accidents(collection, file_location): \"\"\" :param file_location: local location of .csv :return:", "30, \"48\": 31, \"49\": 32, \"50\": 33, \"51\": 34, \"52\":", "date is the nth occurrence of the daynum day of", "7, \"34\": 7, \"35\": 7, \"36\": 25, \"37\": 25, \"38\":", "\"41\": 27, \"42\": 28, \"43\": 29, \"44\": 9, \"45\": 30,", "get_weather_element(station, weather_data, 'WW') if weather_code is not None: return WEATHER[weather_code.strip()]", "\"55\": 37, \"56\": 38, \"57\": 39, \"58\": 37, \"59\": 37,", "= 86 # רוחות חזקות, גשם שוטף else: weather =", "{\"id\": 0, \"time\": 1, \"type\": 2, \"long\": 3, \"lat\": 4,", "| ( winter_clock.day == accident_date.day & accident_date.hour < 2)): accident_date.replace(hour=accident_date.hour", "in the time of the accident doc = minidom.Document() base", "the last friday of march at 2:00 o'clock elif (accident_date.month", "if wind_force is not None: if int(wind_force) > 8: weather", "\"12\": 10, \"17\": 11, \"18\": 12, \"19\": 13, \"20\": 14,", "25, \"37\": 25, \"38\": 25, \"39\": 25, \"40\": 26, \"41\":", "== accident_date.day & accident_date.hour >= 2)): accident_date.replace(hour=accident_date.hour - 1) #", "1 lon = float(station_lon[0].childNodes[0].nodeValue) lon_difference = (lon - float(longitude)) **", "def process_weather_data(collection, latitude, longitude): weather = 1 # default weather", "station_place_in_xml = i return station_place_in_xml def convert_xml_values_to_numbers(rain): num_conv = rain[:2]", "\"25\": 19, \"26\": 20, \"27\": 21, \"28\": 22, \"29\": 23,", "before the last sunday of october at 2:00 o'clock elif", "in UTC time # therefore in daylight_saving_time we deduct 3", "= parse_date(created) daylight_saving_time = is_nth_weekday(4, 4, accident_date.year, 3) winter_clock =", "a marker object with every iteration \"\"\" logging.info(\"\\tReading accidents data", "average rain amount per hour is more than 8.0 millimeters", "the DB ############################################################################################ PROVIDER_CODE = CONST.UNITED_HATZALA_CODE TIME_ZONE = 2 #", "string from csv :return: Python datetime object \"\"\" global time", "daylight_saving_time = is_nth_weekday(4, 4, accident_date.year, 3) winter_clock = is_nth_weekday(4, 6,", "= element[0].childNodes[0].nodeValue else: weather_element = None return weather_element def process_weather_data(collection,", "parse_date(created) daylight_saving_time = is_nth_weekday(4, 4, accident_date.year, 3) winter_clock = is_nth_weekday(4,", "3, \"4\": 4, \"5\": 5, \"7\": 6, \"8\": 6, \"9\":", "# the duration of time in which the rain amount", "7, \"35\": 7, \"36\": 25, \"37\": 25, \"38\": 25, \"39\":", "+ 1)) continue created = parse_date(accident[csvmap[\"time\"]]) marker = {'id': accident[csvmap[\"id\"]],", "return the stations data in the time of the accident", "elif int(wind_force) > 5: weather = 77 # רוחות חזקות", "import logging ############################################################################################ # United.py is responsible for the parsing", "\"86\": 62, \"87\": 63, \"88\": 64, \"89\": 65, \"90\": 66,", "77: weather = 85 # גשם ורוחות חזקות else: weather", "def is_nth_weekday(nth, daynum, year, month): # find if date is", "accident[csvmap[\"long\"]] is None or \\ accident[csvmap[\"lat\"]] == \"NULL\" or accident[csvmap[\"long\"]]", "0 db.session.execute(AccidentMarker.__table__.insert(), [m for m in accidents if m[\"id\"] in", "weather_data[station].getElementsByTagName(tag) if element: weather_element = element[0].childNodes[0].nodeValue else: weather_element = None", "IMS hours code to hours RAIN_DURATION_CODE_TO_HOURS = {\"1\": 6, \"2\":", "= is_nth_weekday(4, 6, accident_date.year, 10) # weather is given in", "return datetime(time.year, time.month, time.day, hour, time.minute, 0) def is_nth_weekday(nth, daynum,", "u\"קשה\" in decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\") else 3, 'location_accuracy': 1, 'accident_type': 21,", "83 # סופת רוחות, גשם זלעפות if weather == 77:", "object \"\"\" global time global hour DATE_FORMATS = ['%m/%d/%Y %I:%M:%S',", "\"Object\": return node def accident_time_zone_adjustment(created): # return accident time in", "\"71\": 47, \"72\": 48, \"73\": 16, \"74\": 50, \"75\": 51,", "\"17\": 11, \"18\": 12, \"19\": 13, \"20\": 14, \"21\": 15,", "main(light=True, username='', password='', lastmail=False): \"\"\" Calls importmail.py prior to importing", "20, \"27\": 21, \"28\": 22, \"29\": 23, \"30\": 24, \"31\":", "char in num_conv: # in the xml number are in", "else 3, 'location_accuracy': 1, 'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT, 'description': decode_hebrew(accident[csvmap[\"comment\"]],", "files directory ('united_path' on main() below) :return: length of DB", "= 78 # גשם שוטף # average rain amount per", "76: weather = 83 # סופת רוחות, גשם זלעפות if", "password='', lastmail=False): \"\"\" Calls importmail.py prior to importing to DB", "\"44\": 9, \"45\": 30, \"46\": 30, \"47\": 30, \"48\": 31,", "db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2) for accident in united: if not accident.weather:", "\"9\": 7, \"10\": 8, \"11\": 9, \"12\": 10, \"17\": 11,", "58, \"83\": 59, \"84\": 60, \"85\": 61, \"86\": 62, \"87\":", "main() below) :return: length of DB entries after execution \"\"\"", "rain_hours <= 4: if weather == 76: weather = 81", "\"49\": 32, \"50\": 33, \"51\": 34, \"52\": 33, \"53\": 35,", "<= 0.5): if weather == 76: weather = 80 #", "doc.createElement('accident_date') doc.appendChild(base) station_data_in_date = collection.getElementsByTagName('date_selected') station_data_in_date.sort() accident_date = accident_time_zone_adjustment(created) for", "1 continue if not accident: # empty line continue if", "== 76: weather = 80 # סופת רוחות, גשם קל", "of time in which the rain amount was measured weather_code", "datetime(time.year, time.month, time.day, hour, time.minute, 0) def is_nth_weekday(nth, daynum, year,", "AccidentMarker from ..utilities import init_flask, decode_hebrew, open_utf8 from ..import importmail", "of the accident doc = minidom.Document() base = doc.createElement('accident_date') doc.appendChild(base)", "hour = int(hour) if str(created).endswith('AM') else int(hour) + 12 break", "of october at 2:00 o'clock elif accident_date.month == 10 &", "\"21\": 15, \"22\": 16, \"23\": 17, \"24\": 18, \"25\": 19,", "2)): accident_date.replace(hour=accident_date.hour - 1) # ] adate = ''.join( (str(accident_date.year),", "station_place_in_xml def convert_xml_values_to_numbers(rain): num_conv = rain[:2] # variable to help", "'provider_code': PROVIDER_CODE, 'title': decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\")[:100], 'address': decode_hebrew((accident[csvmap[\"street\"]] + ' '", "# numbers that are higher then 990 in the xml", "accident_date.replace(hour=accident_date.hour - 1) # if accident happend before the last", "10) # weather is given in UTC time # therefore", "3) winter_clock = is_nth_weekday(4, 6, accident_date.year, 10) # weather is", "tag): element = weather_data[station].getElementsByTagName(tag) if element: weather_element = element[0].childNodes[0].nodeValue else:", "if \"MissionID\" in accident[0] else 1 continue if not accident:", "open_utf8 from ..import importmail from xml.dom import minidom import math", "marker['road_intactness'] = casualties if casualties.isdigit() else 0 yield marker def", "# if accident happend before the last sunday of october", "friday of march at 2:00 o'clock elif (accident_date.month == 3", "( winter_clock.day == accident_date.day & accident_date.hour < 2)): accident_date.replace(hour=accident_date.hour -", "init_flask, decode_hebrew, open_utf8 from ..import importmail from xml.dom import minidom", "weather = 86 # רוחות חזקות, גשם שוטף else: weather", "for line, accident in enumerate(reader): if line == 0: #", "\"\" or accident[csvmap[\"long\"]] == \"\" or \\ accident[csvmap[\"lat\"]] is None", "= find_station_by_coordinate(collection, latitude, longitude) weather_data = collection.getElementsByTagName('surface_observation') wind_force = get_weather_element(station,", "else 0 yield marker def import_to_db(collection, path): \"\"\" :param path:", "from flask_sqlalchemy import SQLAlchemy from sqlalchemy import and_ from ..constants", "0 return calendar.Calendar(nth).monthdatescalendar( year, month )[nth][daynum] def get_parent_object_node(node): while node.parentNode:", "TIME_ZONE = 2 # convert IMS hours code to hours", "rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()] # rain amount is between 0.1 and", "if m[\"id\"] in new_ids]) db.session.commit() return len(new_ids) def update_db(collection): \"\"\"", "זלעפות return weather CSVMAP = [ {\"id\": 0, \"time\": 1,", "34, \"52\": 33, \"53\": 35, \"54\": 36, \"55\": 37, \"56\":", "always be smaller than the initial station_data = collection.getElementsByTagName('surface_station') for", "be smaller than the initial station_data = collection.getElementsByTagName('surface_station') for i,", "סופת רוחות, גשם קל elif weather == 77: weather =", "if u\"קשה\" in decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\") else 3, 'location_accuracy': 1, 'accident_type':", "weather = 80 # סופת רוחות, גשם קל elif weather", "= process_weather_data(collection, accident.latitude, accident.longitude) db.session.commit() logging.info(\"\\tFinished commiting the changes\") def", "United.py is responsible for the parsing and deployment of \"united", "רוחות חזקות, גשם קל else: weather = 37 # גשם", "num_conv = rain[:2] # variable to help convert from string", "63, \"88\": 64, \"89\": 65, \"90\": 66, \"91\": 67, \"92\":", "marker def import_to_db(collection, path): \"\"\" :param path: Local files directory", "== weather: weather = 82 # סופת רוחות, גשם שוטף", "= 1 # default weather is clear sky station =", "password, lastmail) united_path = \"static/data/united/\" total = 0 logging.info(\"Loading United", "changes\") def main(light=True, username='', password='', lastmail=False): \"\"\" Calls importmail.py prior", "is given in UTC time # therefore in daylight_saving_time we", "is None or accident[csvmap[\"long\"]] is None or \\ accident[csvmap[\"lat\"]] ==", "a three digits format (4-004), we delete the 0es before", "empty line continue if line == 1 and accident[0] ==", "ורוחות חזקות else: weather = 15 # גשם # average", "higher then 990 in the xml code equals 0.(the last", "already in DB\") return 0 db.session.execute(AccidentMarker.__table__.insert(), [m for m in", "\"\"\" :param created: Date & Time string from csv :return:", "== 1 lat = float(station_lat[0].childNodes[0].nodeValue) lat_difference = (lat - float(latitude))", "of the daynum day of the week (ex: the forth", "adate = ''.join( (str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour))) return adate def", "station_place_in_xml = -1 min_distance = float(\"inf\") # initialize big starting", "if element: weather_element = element[0].childNodes[0].nodeValue else: weather_element = None return", "we deduct 3 hours from the local time and in", "amount is between 0.1 and 0.5 millimeter if 0.0 <", "variable to help convert from string to number for char", "CONST.MARKER_TYPE_ACCIDENT, 'description': decode_hebrew(accident[csvmap[\"comment\"]], encoding=\"utf-8\"), 'weather': process_weather_data(collection, accident[csvmap[\"lat\"]], accident[csvmap[\"long\"]])} if format_version", "\"type\": 2, \"long\": 3, \"lat\": 4, \"city\": 5, \"street\": 6,", "str(accident_date.month), str(accident_date.day), str(accident_date.hour))) return adate def all_station_in_date_frame(collection, created): # return", "at 2:00 o'clock elif accident_date.month == 10 & ( winter_clock.day", "marker object with every iteration \"\"\" logging.info(\"\\tReading accidents data from", "חזקות else: weather = 15 # גשם # average rain", "DATE_FORMATS: try: if date_format == '%Y-%m-%d %H:%M:%S': time = datetime.strptime(str(created)[:-4],", "26, \"41\": 27, \"42\": 28, \"43\": 29, \"44\": 9, \"45\":", "if rain is not None and rain_duration is not None:", "not None: rain_in_millimeters = convert_xml_values_to_numbers(rain) rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()] # rain", "# גשם וסופת רוחות elif weather == 77: weather =", "== 77: weather = 87 # רוחות חזקות, גשם זלעפות", "in united: if not accident.weather: accident.weather = process_weather_data(collection, accident.latitude, accident.longitude)", "min_distance = temp_dis station_place_in_xml = i return station_place_in_xml def convert_xml_values_to_numbers(rain):", "importing to DB \"\"\" collection = retrieve_ims_xml() if not light:", "\"79\": 55, \"80\": 56, \"81\": 57, \"82\": 58, \"83\": 59,", "== m[\"id\"], AccidentMarker.provider_code == m[\"provider_code\"])).count()] if not new_ids: logging.info(\"\\t\\tNothing loaded,", "base = doc.createElement('accident_date') doc.appendChild(base) station_data_in_date = collection.getElementsByTagName('date_selected') station_data_in_date.sort() accident_date =", "happend between april and september if accident_date.month < 10 &", "encoding=\"utf-8\")[:100], 'address': decode_hebrew((accident[csvmap[\"street\"]] + ' ' + accident[csvmap[\"city\"]]), encoding=\"utf-8\"), 'accident_severity':", "disable=unexpected-keyword-arg accident_date = parse_date(created) daylight_saving_time = is_nth_weekday(4, 4, accident_date.year, 3)", "total = 0 logging.info(\"Loading United accidents...\") for united_file in os.listdir(united_path):", "enumerate(station_data_in_date): if accident_date in str(station.childNodes[0].nodeValue): base.appendChild(get_parent_object_node(station)) return base def find_station_by_coordinate(collection,", "if temp_dis < min_distance: min_distance = temp_dis station_place_in_xml = i", "decode_hebrew, open_utf8 from ..import importmail from xml.dom import minidom import", "weather_element = None return weather_element def process_weather_data(collection, latitude, longitude): weather", "to hours RAIN_DURATION_CODE_TO_HOURS = {\"1\": 6, \"2\": 12, \"3\": 18,", "sqlalchemy import and_ from ..constants import CONST from ..models import", "..import importmail from xml.dom import minidom import math import requests", "רוחות elif int(wind_force) > 5: weather = 77 # רוחות", "latitude, longitude): weather = 1 # default weather is clear", ":return: Python datetime object \"\"\" global time global hour DATE_FORMATS", "\"54\": 36, \"55\": 37, \"56\": 38, \"57\": 39, \"58\": 37,", "\"78\": 54, \"79\": 55, \"80\": 56, \"81\": 57, \"82\": 58,", "2, \"long\": 3, \"lat\": 4, \"city\": 5, \"street\": 6, \"comment\":", "len(station_lon) == 1 lon = float(station_lon[0].childNodes[0].nodeValue) lon_difference = (lon -", "UTC time # pylint: disable=unexpected-keyword-arg accident_date = parse_date(created) daylight_saving_time =", "87 # רוחות חזקות, גשם זלעפות else: weather = 79", "getting an xml document from the ims(israel meteorological service) website", "amount was measured weather_code = get_weather_element(station, weather_data, 'WW') if weather_code", "\"31\": 24, \"32\": 24, \"33\": 7, \"34\": 7, \"35\": 7,", "assert len(station_lon) == 1 lon = float(station_lon[0].childNodes[0].nodeValue) lon_difference = (lon", "35, \"54\": 36, \"55\": 37, \"56\": 38, \"57\": 39, \"58\":", "min_distance = float(\"inf\") # initialize big starting value so the", "3, 'location_accuracy': 1, 'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT, 'description': decode_hebrew(accident[csvmap[\"comment\"]], encoding=\"utf-8\"),", "2 station_lat = station.getElementsByTagName('station_lat') assert len(station_lat) == 1 lat =", "init_flask() db = SQLAlchemy(app) united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2) for", "== 10 & ( winter_clock.day > accident_date.day | ( winter_clock.day", "in num_conv: # in the xml number are in a", "import minidom import math import requests import logging ############################################################################################ #", "retrieve_ims_xml() if not light: logging.info(\"Importing data from mail...\") importmail.main(username, password,", "deduct 3 hours from the local time and in winter", "winter_clock.day == accident_date.day & accident_date.hour < 2)): accident_date.replace(hour=accident_date.hour - 1)", "\"43\": 29, \"44\": 9, \"45\": 30, \"46\": 30, \"47\": 30,", "None or accident[csvmap[\"long\"]] is None or \\ accident[csvmap[\"lat\"]] == \"NULL\"", "== 1 lon = float(station_lon[0].childNodes[0].nodeValue) lon_difference = (lon - float(longitude))", "8.0 millimeters elif rain_in_millimeters / rain_hours > 8: if weather", "2, \"3\": 3, \"4\": 4, \"5\": 5, \"7\": 6, \"8\":", "\"76\": 52, \"77\": 53, \"78\": 54, \"79\": 55, \"80\": 56,", "if not light: logging.info(\"Importing data from mail...\") importmail.main(username, password, lastmail)", "default weather is clear sky station = find_station_by_coordinate(collection, latitude, longitude)", "local time and in winter clock 2 hours # [", "accident.latitude, accident.longitude) db.session.commit() logging.info(\"\\tFinished commiting the changes\") def main(light=True, username='',", "] def create_accidents(collection, file_location): \"\"\" :param file_location: local location of", "db.session.commit() logging.info(\"\\tFinished commiting the changes\") def main(light=True, username='', password='', lastmail=False):", "69, \"94\": 70, \"95\": 71, \"96\": 72, \"97\": 73, \"98\":", "forth sunday of april 2016) # start counting the daynum", "accident_date.replace(hour=accident_date.hour - TIME_ZONE) # if accident happend between april and", "21, \"28\": 22, \"29\": 23, \"30\": 24, \"31\": 24, \"32\":", "for example 991 = 0.1 rain_in_millimeters *= 0.01 return rain_in_millimeters", "== 76: weather = 81 # גשם וסופת רוחות elif", "= accident[csvmap[\"casualties\"]] marker['road_intactness'] = casualties if casualties.isdigit() else 0 yield", "not new_ids: logging.info(\"\\t\\tNothing loaded, all accidents already in DB\") return", "in the xml number are in a three digits format", "72, \"97\": 73, \"98\": 74, \"99\": 75} def retrieve_ims_xml(): #", "is between 4.0 and 8.0 millimeters elif 4 < rain_in_millimeters", "and rain_duration is not None: rain_in_millimeters = convert_xml_values_to_numbers(rain) rain_hours =", "db = SQLAlchemy(app) united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2) for accident", "רוחות, גשם שוטף if weather == 77: weather = 86", "('united_path' on main() below) :return: length of DB entries after", "\"NULL\": logging.warn(\"\\t\\tMissing coordinates in line {0}. Moving on...\".format(line + 1))", "str(created).endswith('AM') else int(hour) + 12 break except ValueError: pass return", "accident[csvmap[\"casualties\"]] marker['road_intactness'] = casualties if casualties.isdigit() else 0 yield marker", "\"5\": 1, \"6\": 2, \"7\": 3, \"8\": 9, \"9\": 15}", "24, \"32\": 24, \"33\": 7, \"34\": 7, \"35\": 7, \"36\":", "get_weather_element(station, weather_data, 'RRR') rain_duration = get_weather_element(station, weather_data, 'TR') # the", "website logging.basicConfig(level=logging.DEBUG) s = requests.session() r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml') xml_doc =", "= get_weather_element(station, weather_data, 'FF') rain = get_weather_element(station, weather_data, 'RRR') rain_duration", "import AccidentMarker from ..utilities import init_flask, decode_hebrew, open_utf8 from ..import", "== 77: weather = 85 # גשם ורוחות חזקות else:", "in the xml code equals 0.(the last digit) for example", "process_weather_data(collection, latitude, longitude): weather = 1 # default weather is", "\"7\": 6, \"8\": 6, \"9\": 7, \"10\": 8, \"11\": 9,", "''.join( (str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour))) return adate def all_station_in_date_frame(collection, created):", "lat_difference = (lat - float(latitude)) ** 2 temp_dis = math.sqrt(lat_difference", "\"65\": 19, \"66\": 42, \"67\": 43, \"68\": 44, \"69\": 45,", "else int(hour) + 12 break except ValueError: pass return datetime(time.year,", "that are higher then 990 in the xml code equals", "== \"NULL\": logging.warn(\"\\t\\tMissing coordinates in line {0}. Moving on...\".format(line +", "if 0.5 < rain_in_millimeters / rain_hours <= 4: if weather", "# גשם קל # average rain amount per hour is", "\"\"\" app = init_flask() db = SQLAlchemy(app) accidents = list(create_accidents(collection,", "with every iteration \"\"\" logging.info(\"\\tReading accidents data from '%s'...\" %", "else: weather = 78 # גשם שוטף # average rain", "2, \"7\": 3, \"8\": 9, \"9\": 15} WEATHER = {\"0\":", "in accidents if m[\"id\"] in new_ids]) db.session.commit() return len(new_ids) def", "or \\ accident[csvmap[\"lat\"]] == \"NULL\" or accident[csvmap[\"long\"]] == \"NULL\": logging.warn(\"\\t\\tMissing", "db = SQLAlchemy(app) accidents = list(create_accidents(collection, path)) if not accidents:", "'%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M'] for date_format in DATE_FORMATS:", "all_station_in_date_frame(collection, created): # return the stations data in the time", "string to number for char in num_conv: # in the", "collection def parse_date(created): \"\"\" :param created: Date & Time string", "document from the ims(israel meteorological service) website logging.basicConfig(level=logging.DEBUG) s =", "\"92\": 68, \"93\": 69, \"94\": 70, \"95\": 71, \"96\": 72,", "time.strftime('%H') hour = int(hour) if str(created).endswith('AM') else int(hour) + 12", "m[\"id\"], AccidentMarker.provider_code == m[\"provider_code\"])).count()] if not new_ids: logging.info(\"\\t\\tNothing loaded, all", "- 1) # if accident happend after the last friday", "0 new_ids = [m[\"id\"] for m in accidents if 0", "accidents if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m[\"id\"], AccidentMarker.provider_code == m[\"provider_code\"])).count()]", "return node def accident_time_zone_adjustment(created): # return accident time in UTC", "45, \"70\": 46, \"71\": 47, \"72\": 48, \"73\": 16, \"74\":", "[m for m in accidents if m[\"id\"] in new_ids]) db.session.commit()", "קל else: weather = 37 # גשם קל # average", "the time of the accident doc = minidom.Document() base =", "\"comment\": 7, \"casualties\": 8}, ] def create_accidents(collection, file_location): \"\"\" :param", "lat = float(station_lat[0].childNodes[0].nodeValue) lat_difference = (lat - float(latitude)) ** 2", "# pylint: disable=unexpected-keyword-arg accident_date = parse_date(created) daylight_saving_time = is_nth_weekday(4, 4,", "def convert_xml_values_to_numbers(rain): num_conv = rain[:2] # variable to help convert", "find if date is the nth occurrence of the daynum", "return base def find_station_by_coordinate(collection, latitude, longitude): station_place_in_xml = -1 min_distance", "accident[csvmap[\"long\"]], 'created': created, 'provider_code': PROVIDER_CODE, 'title': decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\")[:100], 'address': decode_hebrew((accident[csvmap[\"street\"]]", "= rain[:2] # variable to help convert from string to", "= {\"1\": 6, \"2\": 12, \"3\": 18, \"4\": 24, \"/\":", "גשם שוטף # average rain amount per hour is more", "than 8.0 millimeters elif rain_in_millimeters / rain_hours > 8: if", "created, 'provider_code': PROVIDER_CODE, 'title': decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\")[:100], 'address': decode_hebrew((accident[csvmap[\"street\"]] + '", "for i, station in enumerate(station_data): station_lon = station.getElementsByTagName('station_lon') assert len(station_lon)", "= get_weather_element(station, weather_data, 'RRR') rain_duration = get_weather_element(station, weather_data, 'TR') #", "44, \"69\": 45, \"70\": 46, \"71\": 47, \"72\": 48, \"73\":", "per hour is between 4.0 and 8.0 millimeters elif 4", "{0}. Moving on...\".format(line + 1)) continue created = parse_date(accident[csvmap[\"time\"]]) marker", "\"time\": 1, \"type\": 2, \"long\": 3, \"lat\": 4, \"city\": 5,", "node.parentNode if node.nodeName == \"Object\": return node def accident_time_zone_adjustment(created): #", "weather = 85 # גשם ורוחות חזקות else: weather =", "in winter clock 2 hours # [ accident_date = accident_date.replace(hour=accident_date.hour", "iteration \"\"\" logging.info(\"\\tReading accidents data from '%s'...\" % file_location) with", "importmail from xml.dom import minidom import math import requests import", "between april and september if accident_date.month < 10 & accident_date.month", "\"\"\" Calls importmail.py prior to importing to DB \"\"\" collection", "hour is between 0.5 and 4.0 millimeters if 0.5 <", "execution \"\"\" app = init_flask() db = SQLAlchemy(app) accidents =", "rain.replace(char, '') else: break rain_in_millimeters = float(rain) if rain_in_millimeters >=", "accident_date.replace(hour=accident_date.hour - 1) # if accident happend after the last", "if not accident: # empty line continue if line ==", "db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m[\"id\"], AccidentMarker.provider_code == m[\"provider_code\"])).count()] if not new_ids: logging.info(\"\\t\\tNothing", "'0': rain.replace(char, '') else: break rain_in_millimeters = float(rain) if rain_in_millimeters", "m[\"provider_code\"])).count()] if not new_ids: logging.info(\"\\t\\tNothing loaded, all accidents already in", "27, \"42\": 28, \"43\": 29, \"44\": 9, \"45\": 30, \"46\":", "# סופת רוחות, גשם קל elif weather == 77: weather", "more than 8.0 millimeters elif rain_in_millimeters / rain_hours > 8:", "united_path = \"static/data/united/\" total = 0 logging.info(\"Loading United accidents...\") for", "rain_duration = get_weather_element(station, weather_data, 'TR') # the duration of time", "accident[0] else 1 continue if not accident: # empty line", "so the distance will always be smaller than the initial", "if accident happend before the last sunday of october at", "execution \"\"\" app = init_flask() db = SQLAlchemy(app) united =", "accident happend after the last friday of march at 2:00", "from datetime import datetime import os from flask_sqlalchemy import SQLAlchemy", "% file_location) with open_utf8(file_location, 'rU') as f: reader = csv.reader(f,", "רוחות, גשם קל elif weather == 77: weather = 84", "node.nodeName == \"Object\": return node def accident_time_zone_adjustment(created): # return accident", "the last sunday of october at 2:00 o'clock elif accident_date.month", "47, \"72\": 48, \"73\": 16, \"74\": 50, \"75\": 51, \"76\":", "> 8: weather = 76 # סופת רוחות elif int(wind_force)", "\"11\": 9, \"12\": 10, \"17\": 11, \"18\": 12, \"19\": 13,", "line {0}. Moving on...\".format(line + 1)) continue created = parse_date(accident[csvmap[\"time\"]])", "m[\"id\"] in new_ids]) db.session.commit() return len(new_ids) def update_db(collection): \"\"\" :return:", "= i return station_place_in_xml def convert_xml_values_to_numbers(rain): num_conv = rain[:2] #", "19, \"66\": 42, \"67\": 43, \"68\": 44, \"69\": 45, \"70\":", "4 < rain_in_millimeters / rain_hours <= 8: if 76 ==", "not accident: # empty line continue if line == 1", "total += import_to_db(collection, united_path + united_file) logging.info(\"\\tImported {0} items\".format(total)) update_db(collection)", "\"27\": 21, \"28\": 22, \"29\": 23, \"30\": 24, \"31\": 24,", "# therefore in daylight_saving_time we deduct 3 hours from the", "else: weather = 15 # גשם # average rain amount", "== m[\"provider_code\"])).count()] if not new_ids: logging.info(\"\\t\\tNothing loaded, all accidents already", "clock 2 hours # [ accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE)", "78 # גשם שוטף # average rain amount per hour", "= 82 # סופת רוחות, גשם שוטף if weather ==", "= 77 # רוחות חזקות if rain is not None", "rain_in_millimeters <= 0.5 or ( 0.0 < rain_in_millimeters / rain_hours", "not None: if int(wind_force) > 8: weather = 76 #", "CSVMAP = [ {\"id\": 0, \"time\": 1, \"lat\": 2, \"long\":", "occurrence of the daynum day of the week (ex: the", "0 yield marker def import_to_db(collection, path): \"\"\" :param path: Local", "time in which the rain amount was measured weather_code =", "accident_date.day | ( winter_clock.day == accident_date.day & accident_date.hour < 2)):", "\"45\": 30, \"46\": 30, \"47\": 30, \"48\": 31, \"49\": 32,", "winter_clock.day > accident_date.day | ( winter_clock.day == accident_date.day & accident_date.hour", "line == 0: # header format_version = 0 if \"MissionID\"", "42, \"67\": 43, \"68\": 44, \"69\": 45, \"70\": 46, \"71\":", "except ValueError: pass return datetime(time.year, time.month, time.day, hour, time.minute, 0)", "new_ids = [m[\"id\"] for m in accidents if 0 ==", "minidom.Document() base = doc.createElement('accident_date') doc.appendChild(base) station_data_in_date = collection.getElementsByTagName('date_selected') station_data_in_date.sort() accident_date", "def retrieve_ims_xml(): # getting an xml document from the ims(israel", "longitude) weather_data = collection.getElementsByTagName('surface_observation') wind_force = get_weather_element(station, weather_data, 'FF') rain", "None: return WEATHER[weather_code.strip()] if wind_force is not None: if int(wind_force)", "accident_date = parse_date(created) daylight_saving_time = is_nth_weekday(4, 4, accident_date.year, 3) winter_clock", "time.month, time.day, hour, time.minute, 0) def is_nth_weekday(nth, daynum, year, month):", "if weather == 76: weather = 80 # סופת רוחות,", "\"28\": 22, \"29\": 23, \"30\": 24, \"31\": 24, \"32\": 24,", "\"96\": 72, \"97\": 73, \"98\": 74, \"99\": 75} def retrieve_ims_xml():", "68, \"93\": 69, \"94\": 70, \"95\": 71, \"96\": 72, \"97\":", "\"static/data/united/\" total = 0 logging.info(\"Loading United accidents...\") for united_file in", "62, \"87\": 63, \"88\": 64, \"89\": 65, \"90\": 66, \"91\":", "from sqlalchemy import and_ from ..constants import CONST from ..models", "in accidents if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m[\"id\"], AccidentMarker.provider_code ==", "loaded, all accidents already in DB\") return 0 db.session.execute(AccidentMarker.__table__.insert(), [m", "= datetime.strptime(str(created)[:-3], date_format) hour = time.strftime('%H') hour = int(hour) if", "if weather == 76: weather = 81 # גשם וסופת", "of \"united hatzala\" data to the DB ############################################################################################ PROVIDER_CODE =", "import csv from datetime import datetime import os from flask_sqlalchemy", "delimiter=',', dialect=csv.excel_tab) for line, accident in enumerate(reader): if line ==", "# סופת רוחות, גשם זלעפות if weather == 77: weather", "element = weather_data[station].getElementsByTagName(tag) if element: weather_element = element[0].childNodes[0].nodeValue else: weather_element", "< 10 & accident_date.month > 3: accident_date.replace(hour=accident_date.hour - 1) #", "= 0 if \"MissionID\" in accident[0] else 1 continue if", "weather = 84 # רוחות חזקות, גשם קל else: weather", "amount per hour is between 4.0 and 8.0 millimeters elif", "\"62\": 40, \"63\": 15, \"64\": 41, \"65\": 19, \"66\": 42,", "15} WEATHER = {\"0\": 1, \"1\": 2, \"3\": 3, \"4\":", "hour, time.minute, 0) def is_nth_weekday(nth, daynum, year, month): # find", "( winter_clock.day > accident_date.day | ( winter_clock.day == accident_date.day &", "if int(wind_force) > 8: weather = 76 # סופת רוחות", "from mail...\") importmail.main(username, password, lastmail) united_path = \"static/data/united/\" total =", "db.session.commit() return len(new_ids) def update_db(collection): \"\"\" :return: length of DB", "prior to importing to DB \"\"\" collection = retrieve_ims_xml() if", "station.getElementsByTagName('station_lon') assert len(station_lon) == 1 lon = float(station_lon[0].childNodes[0].nodeValue) lon_difference =", "== \"\": logging.warn(\"\\t\\tEmpty File!\") continue csvmap = CSVMAP[format_version] if accident[csvmap[\"lat\"]]", "0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m[\"id\"], AccidentMarker.provider_code == m[\"provider_code\"])).count()] if not", "sunday of april 2016) # start counting the daynum from", "for m in accidents if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m[\"id\"],", "70, \"95\": 71, \"96\": 72, \"97\": 73, \"98\": 74, \"99\":", "21, 'type': CONST.MARKER_TYPE_ACCIDENT, 'description': decode_hebrew(accident[csvmap[\"comment\"]], encoding=\"utf-8\"), 'weather': process_weather_data(collection, accident[csvmap[\"lat\"]], accident[csvmap[\"long\"]])}", "# if accident happend after the last friday of march", "logging ############################################################################################ # United.py is responsible for the parsing and", "rain_duration is not None: rain_in_millimeters = convert_xml_values_to_numbers(rain) rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()]", "77: weather = 87 # רוחות חזקות, גשם זלעפות else:", "1 and accident[0] == \"\": logging.warn(\"\\t\\tEmpty File!\") continue csvmap =", "file_location): \"\"\" :param file_location: local location of .csv :return: Yields", "weather is clear sky station = find_station_by_coordinate(collection, latitude, longitude) weather_data", "return WEATHER[weather_code.strip()] if wind_force is not None: if int(wind_force) >", "from monday = 0 return calendar.Calendar(nth).monthdatescalendar( year, month )[nth][daynum] def", "datetime import os from flask_sqlalchemy import SQLAlchemy from sqlalchemy import", "is_nth_weekday(nth, daynum, year, month): # find if date is the", "temp_dis station_place_in_xml = i return station_place_in_xml def convert_xml_values_to_numbers(rain): num_conv =", "time.day, hour, time.minute, 0) def is_nth_weekday(nth, daynum, year, month): #", "0.1 rain_in_millimeters *= 0.01 return rain_in_millimeters def get_weather_element(station, weather_data, tag):", "elif rain_in_millimeters / rain_hours > 8: if weather == 76:", "= float(station_lon[0].childNodes[0].nodeValue) lon_difference = (lon - float(longitude)) ** 2 station_lat", "'address': decode_hebrew((accident[csvmap[\"street\"]] + ' ' + accident[csvmap[\"city\"]]), encoding=\"utf-8\"), 'accident_severity': 2", "0.0 < rain_in_millimeters <= 0.5 or ( 0.0 < rain_in_millimeters", "= list(create_accidents(collection, path)) if not accidents: return 0 new_ids =", "convert_xml_values_to_numbers(rain): num_conv = rain[:2] # variable to help convert from", "\"\"\" global time global hour DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d", "18, \"4\": 24, \"/\": 24, \"5\": 1, \"6\": 2, \"7\":", "return station_place_in_xml def convert_xml_values_to_numbers(rain): num_conv = rain[:2] # variable to", "1) # if accident happend after the last friday of", "= casualties if casualties.isdigit() else 0 yield marker def import_to_db(collection,", "= temp_dis station_place_in_xml = i return station_place_in_xml def convert_xml_values_to_numbers(rain): num_conv", "def get_parent_object_node(node): while node.parentNode: node = node.parentNode if node.nodeName ==", "weather_data, 'WW') if weather_code is not None: return WEATHER[weather_code.strip()] if", "accident_date.year, 10) # weather is given in UTC time #", "CONST.UNITED_HATZALA_CODE TIME_ZONE = 2 # convert IMS hours code to", "f: reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab) for line, accident in", "= accident_time_zone_adjustment(created) for station in enumerate(station_data_in_date): if accident_date in str(station.childNodes[0].nodeValue):", "is None or \\ accident[csvmap[\"lat\"]] == \"NULL\" or accident[csvmap[\"long\"]] ==", "# רוחות חזקות, גשם קל else: weather = 37 #", "2 hours # [ accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE) #", "and in winter clock 2 hours # [ accident_date =", "\"82\": 58, \"83\": 59, \"84\": 60, \"85\": 61, \"86\": 62,", "\"39\": 25, \"40\": 26, \"41\": 27, \"42\": 28, \"43\": 29,", "# empty line continue if line == 1 and accident[0]", "elif accident_date.month == 10 & ( winter_clock.day > accident_date.day |", "rain_in_millimeters = convert_xml_values_to_numbers(rain) rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()] # rain amount is", "station in enumerate(station_data): station_lon = station.getElementsByTagName('station_lon') assert len(station_lon) == 1", "\"52\": 33, \"53\": 35, \"54\": 36, \"55\": 37, \"56\": 38,", "elif weather == 77: weather = 84 # רוחות חזקות,", "סופת רוחות, גשם שוטף if weather == 77: weather =", "casualties = accident[csvmap[\"casualties\"]] marker['road_intactness'] = casualties if casualties.isdigit() else 0", "weather_data, 'TR') # the duration of time in which the", "1, \"lat\": 2, \"long\": 3, \"street\": 4, \"city\": 6, \"comment\":", "import and_ from ..constants import CONST from ..models import AccidentMarker", "given in UTC time # therefore in daylight_saving_time we deduct", "%I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y", "חזקות if rain is not None and rain_duration is not", "53, \"78\": 54, \"79\": 55, \"80\": 56, \"81\": 57, \"82\":", "-*- import calendar import csv from datetime import datetime import", "csv.reader(f, delimiter=',', dialect=csv.excel_tab) for line, accident in enumerate(reader): if line", "millimeters elif 4 < rain_in_millimeters / rain_hours <= 8: if", "weather_code is not None: return WEATHER[weather_code.strip()] if wind_force is not", "station_lat = station.getElementsByTagName('station_lat') assert len(station_lat) == 1 lat = float(station_lat[0].childNodes[0].nodeValue)", "else 1 continue if not accident: # empty line continue", "וסופת רוחות elif weather == 77: weather = 85 #", "node = node.parentNode if node.nodeName == \"Object\": return node def", "\"67\": 43, \"68\": 44, \"69\": 45, \"70\": 46, \"71\": 47,", "# גשם שוטף # average rain amount per hour is", "-1 min_distance = float(\"inf\") # initialize big starting value so", "csv :return: Python datetime object \"\"\" global time global hour", "28, \"43\": 29, \"44\": 9, \"45\": 30, \"46\": 30, \"47\":", "\"35\": 7, \"36\": 25, \"37\": 25, \"38\": 25, \"39\": 25,", "41, \"65\": 19, \"66\": 42, \"67\": 43, \"68\": 44, \"69\":", "data from mail...\") importmail.main(username, password, lastmail) united_path = \"static/data/united/\" total", "rain amount is between 0.1 and 0.5 millimeter if 0.0", "12 break except ValueError: pass return datetime(time.year, time.month, time.day, hour,", "import math import requests import logging ############################################################################################ # United.py is", "xml code equals 0.(the last digit) for example 991 =", "hours RAIN_DURATION_CODE_TO_HOURS = {\"1\": 6, \"2\": 12, \"3\": 18, \"4\":", "[ accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE) # if accident happend", "path)) if not accidents: return 0 new_ids = [m[\"id\"] for", "created = parse_date(accident[csvmap[\"time\"]]) marker = {'id': accident[csvmap[\"id\"]], 'latitude': accident[csvmap[\"lat\"]], 'longitude':", "10 & accident_date.month > 3: accident_date.replace(hour=accident_date.hour - 1) # if", "collection.getElementsByTagName('surface_observation') wind_force = get_weather_element(station, weather_data, 'FF') rain = get_weather_element(station, weather_data,", "< accident_date.day | ( daylight_saving_time.day == accident_date.day & accident_date.hour >=", "9, \"9\": 15} WEATHER = {\"0\": 1, \"1\": 2, \"3\":", "is responsible for the parsing and deployment of \"united hatzala\"", "2 temp_dis = math.sqrt(lat_difference + lon_difference) if temp_dis < min_distance:", "float(latitude)) ** 2 temp_dis = math.sqrt(lat_difference + lon_difference) if temp_dis", "\"89\": 65, \"90\": 66, \"91\": 67, \"92\": 68, \"93\": 69,", "& Time string from csv :return: Python datetime object \"\"\"", "2, \"long\": 3, \"street\": 4, \"city\": 6, \"comment\": 7, \"type\":", "== 77: weather = 86 # רוחות חזקות, גשם שוטף", "happend before the last sunday of october at 2:00 o'clock", "' ' + accident[csvmap[\"city\"]]), encoding=\"utf-8\"), 'accident_severity': 2 if u\"קשה\" in", "is clear sky station = find_station_by_coordinate(collection, latitude, longitude) weather_data =", "PROVIDER_CODE, 'title': decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\")[:100], 'address': decode_hebrew((accident[csvmap[\"street\"]] + ' ' +", "def import_to_db(collection, path): \"\"\" :param path: Local files directory ('united_path'", "accident.weather: accident.weather = process_weather_data(collection, accident.latitude, accident.longitude) db.session.commit() logging.info(\"\\tFinished commiting the", "גשם זלעפות return weather CSVMAP = [ {\"id\": 0, \"time\":", "import datetime import os from flask_sqlalchemy import SQLAlchemy from sqlalchemy", "is more than 8.0 millimeters elif rain_in_millimeters / rain_hours >", "not None: return WEATHER[weather_code.strip()] if wind_force is not None: if", "accident[csvmap[\"lat\"]] is None or accident[csvmap[\"long\"]] is None or \\ accident[csvmap[\"lat\"]]", "accident_date.year, 3) winter_clock = is_nth_weekday(4, 6, accident_date.year, 10) # weather", "accident[csvmap[\"lat\"]] == \"NULL\" or accident[csvmap[\"long\"]] == \"NULL\": logging.warn(\"\\t\\tMissing coordinates in", "convert from string to number for char in num_conv: #", "= \"static/data/united/\" total = 0 logging.info(\"Loading United accidents...\") for united_file", "'created': created, 'provider_code': PROVIDER_CODE, 'title': decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\")[:100], 'address': decode_hebrew((accident[csvmap[\"street\"]] +", "\"37\": 25, \"38\": 25, \"39\": 25, \"40\": 26, \"41\": 27,", "the xml number are in a three digits format (4-004),", "= 0.1 rain_in_millimeters *= 0.01 return rain_in_millimeters def get_weather_element(station, weather_data,", "number if char == '0': rain.replace(char, '') else: break rain_in_millimeters", "1, \"6\": 2, \"7\": 3, \"8\": 9, \"9\": 15} WEATHER", "== 77: weather = 84 # רוחות חזקות, גשם קל", "the accident doc = minidom.Document() base = doc.createElement('accident_date') doc.appendChild(base) station_data_in_date", "united_file.endswith(\".csv\"): total += import_to_db(collection, united_path + united_file) logging.info(\"\\tImported {0} items\".format(total))", "is not None: return WEATHER[weather_code.strip()] if wind_force is not None:", "50, \"75\": 51, \"76\": 52, \"77\": 53, \"78\": 54, \"79\":", "code to hours RAIN_DURATION_CODE_TO_HOURS = {\"1\": 6, \"2\": 12, \"3\":", "\"80\": 56, \"81\": 57, \"82\": 58, \"83\": 59, \"84\": 60,", "גשם קל # average rain amount per hour is between", "rain_hours > 8: if weather == 76: weather = 83", "- 1) # if accident happend before the last sunday", "# רוחות חזקות if rain is not None and rain_duration", "week (ex: the forth sunday of april 2016) # start", "\"38\": 25, \"39\": 25, \"40\": 26, \"41\": 27, \"42\": 28,", "1, \"type\": 2, \"long\": 3, \"lat\": 4, \"city\": 5, \"street\":", "= 0 logging.info(\"Loading United accidents...\") for united_file in os.listdir(united_path): if", "= (lon - float(longitude)) ** 2 station_lat = station.getElementsByTagName('station_lat') assert", "\"99\": 75} def retrieve_ims_xml(): # getting an xml document from", "\"\"\" :param path: Local files directory ('united_path' on main() below)", "os from flask_sqlalchemy import SQLAlchemy from sqlalchemy import and_ from", "59, \"84\": 60, \"85\": 61, \"86\": 62, \"87\": 63, \"88\":", "2:00 o'clock elif (accident_date.month == 3 & daylight_saving_time.day < accident_date.day", "accident_date.hour < 2)): accident_date.replace(hour=accident_date.hour - 1) # if accident happend", "32, \"50\": 33, \"51\": 34, \"52\": 33, \"53\": 35, \"54\":", "\"61\": 37, \"60\": 36, \"62\": 40, \"63\": 15, \"64\": 41,", "= CSVMAP[format_version] if accident[csvmap[\"lat\"]] == \"\" or accident[csvmap[\"long\"]] == \"\"", "== \"Object\": return node def accident_time_zone_adjustment(created): # return accident time", "61, \"86\": 62, \"87\": 63, \"88\": 64, \"89\": 65, \"90\":", "שוטף if weather == 77: weather = 86 # רוחות", "pass return datetime(time.year, time.month, time.day, hour, time.minute, 0) def is_nth_weekday(nth,", "import requests import logging ############################################################################################ # United.py is responsible for", "enumerate(station_data): station_lon = station.getElementsByTagName('station_lon') assert len(station_lon) == 1 lon =", "of DB entries after execution \"\"\" app = init_flask() db", "\"4\": 24, \"/\": 24, \"5\": 1, \"6\": 2, \"7\": 3,", "is not None and rain_duration is not None: rain_in_millimeters =", "on main() below) :return: length of DB entries after execution", "if node.nodeName == \"Object\": return node def accident_time_zone_adjustment(created): # return", "collection.getElementsByTagName('surface_station') for i, station in enumerate(station_data): station_lon = station.getElementsByTagName('station_lon') assert", "equals 0.(the last digit) for example 991 = 0.1 rain_in_millimeters", "גשם וסופת רוחות elif weather == 77: weather = 85", "time.minute, 0) def is_nth_weekday(nth, daynum, year, month): # find if", "every iteration \"\"\" logging.info(\"\\tReading accidents data from '%s'...\" % file_location)", "or ( 0.0 < rain_in_millimeters / rain_hours <= 0.5): if", "from ..utilities import init_flask, decode_hebrew, open_utf8 from ..import importmail from", "# rain amount is between 0.1 and 0.5 millimeter if", "'FF') rain = get_weather_element(station, weather_data, 'RRR') rain_duration = get_weather_element(station, weather_data,", "# average rain amount per hour is between 4.0 and", "# find if date is the nth occurrence of the", "rain_in_millimeters / rain_hours <= 8: if 76 == weather: weather", "# גשם זלעפות return weather CSVMAP = [ {\"id\": 0,", "8.0 millimeters elif 4 < rain_in_millimeters / rain_hours <= 8:", "minidom.parseString(r.text) collection = xml_doc.documentElement return collection def parse_date(created): \"\"\" :param", "\"50\": 33, \"51\": 34, \"52\": 33, \"53\": 35, \"54\": 36,", "SQLAlchemy from sqlalchemy import and_ from ..constants import CONST from", "accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE) # if accident happend between", "measured weather_code = get_weather_element(station, weather_data, 'WW') if weather_code is not", "if not accident.weather: accident.weather = process_weather_data(collection, accident.latitude, accident.longitude) db.session.commit() logging.info(\"\\tFinished", "csv from datetime import datetime import os from flask_sqlalchemy import", "\"85\": 61, \"86\": 62, \"87\": 63, \"88\": 64, \"89\": 65,", "station in enumerate(station_data_in_date): if accident_date in str(station.childNodes[0].nodeValue): base.appendChild(get_parent_object_node(station)) return base", "the duration of time in which the rain amount was", "\"4\": 4, \"5\": 5, \"7\": 6, \"8\": 6, \"9\": 7,", "85 # גשם ורוחות חזקות else: weather = 15 #", "76: weather = 81 # גשם וסופת רוחות elif weather", "< rain_in_millimeters / rain_hours <= 4: if weather == 76:", "'rU') as f: reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab) for line,", "9, \"12\": 10, \"17\": 11, \"18\": 12, \"19\": 13, \"20\":", "0, \"time\": 1, \"lat\": 2, \"long\": 3, \"street\": 4, \"city\":", "..models import AccidentMarker from ..utilities import init_flask, decode_hebrew, open_utf8 from", "return rain_in_millimeters def get_weather_element(station, weather_data, tag): element = weather_data[station].getElementsByTagName(tag) if", "init_flask() db = SQLAlchemy(app) accidents = list(create_accidents(collection, path)) if not", "update_db(collection): \"\"\" :return: length of DB entries after execution \"\"\"", "0, \"time\": 1, \"type\": 2, \"long\": 3, \"lat\": 4, \"city\":", "1) # if accident happend before the last sunday of", "\"8\": 9, \"9\": 15} WEATHER = {\"0\": 1, \"1\": 2,", "for accident in united: if not accident.weather: accident.weather = process_weather_data(collection,", "in enumerate(station_data_in_date): if accident_date in str(station.childNodes[0].nodeValue): base.appendChild(get_parent_object_node(station)) return base def", "38, \"57\": 39, \"58\": 37, \"59\": 37, \"61\": 37, \"60\":", "is between 0.5 and 4.0 millimeters if 0.5 < rain_in_millimeters", "rain_in_millimeters / rain_hours <= 0.5): if weather == 76: weather", "== 1 and accident[0] == \"\": logging.warn(\"\\t\\tEmpty File!\") continue csvmap", "char == '0': rain.replace(char, '') else: break rain_in_millimeters = float(rain)", "rain amount per hour is between 0.5 and 4.0 millimeters", "the initial station_data = collection.getElementsByTagName('surface_station') for i, station in enumerate(station_data):", "4.0 millimeters if 0.5 < rain_in_millimeters / rain_hours <= 4:", "date_format == '%Y-%m-%d %H:%M:%S': time = datetime.strptime(str(created)[:-4], date_format) hour =", "from csv :return: Python datetime object \"\"\" global time global", "in str(station.childNodes[0].nodeValue): base.appendChild(get_parent_object_node(station)) return base def find_station_by_coordinate(collection, latitude, longitude): station_place_in_xml", "10, \"17\": 11, \"18\": 12, \"19\": 13, \"20\": 14, \"21\":", "רוחות, גשם זלעפות if weather == 77: weather = 87", "lon_difference) if temp_dis < min_distance: min_distance = temp_dis station_place_in_xml =", "weather_data, 'FF') rain = get_weather_element(station, weather_data, 'RRR') rain_duration = get_weather_element(station,", "8, \"casualties\": 9}, {\"id\": 0, \"time\": 1, \"type\": 2, \"long\":", "if weather == 77: weather = 87 # רוחות חזקות,", "<= 8: if 76 == weather: weather = 82 #", "for date_format in DATE_FORMATS: try: if date_format == '%Y-%m-%d %H:%M:%S':", "# [ accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE) # if accident", "'description': decode_hebrew(accident[csvmap[\"comment\"]], encoding=\"utf-8\"), 'weather': process_weather_data(collection, accident[csvmap[\"lat\"]], accident[csvmap[\"long\"]])} if format_version ==", "hour DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y", "77 # רוחות חזקות if rain is not None and", "app = init_flask() db = SQLAlchemy(app) united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code ==", "5, \"street\": 6, \"comment\": 7, \"casualties\": 8}, ] def create_accidents(collection,", "46, \"71\": 47, \"72\": 48, \"73\": 16, \"74\": 50, \"75\":", "< rain_in_millimeters <= 0.5 or ( 0.0 < rain_in_millimeters /", "to the DB ############################################################################################ PROVIDER_CODE = CONST.UNITED_HATZALA_CODE TIME_ZONE = 2", "lon = float(station_lon[0].childNodes[0].nodeValue) lon_difference = (lon - float(longitude)) ** 2", "= float(\"inf\") # initialize big starting value so the distance", "to number for char in num_conv: # in the xml", "1, 'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT, 'description': decode_hebrew(accident[csvmap[\"comment\"]], encoding=\"utf-8\"), 'weather': process_weather_data(collection,", "str(station.childNodes[0].nodeValue): base.appendChild(get_parent_object_node(station)) return base def find_station_by_coordinate(collection, latitude, longitude): station_place_in_xml =", "lastmail=False): \"\"\" Calls importmail.py prior to importing to DB \"\"\"", "accident in enumerate(reader): if line == 0: # header format_version", "== \"\" or \\ accident[csvmap[\"lat\"]] is None or accident[csvmap[\"long\"]] is", "\"23\": 17, \"24\": 18, \"25\": 19, \"26\": 20, \"27\": 21,", "all accidents already in DB\") return 0 db.session.execute(AccidentMarker.__table__.insert(), [m for", "גשם ורוחות חזקות else: weather = 15 # גשם #", "# default weather is clear sky station = find_station_by_coordinate(collection, latitude,", "accidents: return 0 new_ids = [m[\"id\"] for m in accidents", "37, \"60\": 36, \"62\": 40, \"63\": 15, \"64\": 41, \"65\":", "time of the accident doc = minidom.Document() base = doc.createElement('accident_date')", "encoding=\"utf-8\") else 3, 'location_accuracy': 1, 'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT, 'description':", "\"74\": 50, \"75\": 51, \"76\": 52, \"77\": 53, \"78\": 54,", "if char == '0': rain.replace(char, '') else: break rain_in_millimeters =", "== 0: # header format_version = 0 if \"MissionID\" in", "82 # סופת רוחות, גשם שוטף if weather == 77:", "and_ from ..constants import CONST from ..models import AccidentMarker from", "base.appendChild(get_parent_object_node(station)) return base def find_station_by_coordinate(collection, latitude, longitude): station_place_in_xml = -1", "int(wind_force) > 8: weather = 76 # סופת רוחות elif", "\"5\": 5, \"7\": 6, \"8\": 6, \"9\": 7, \"10\": 8,", "time = datetime.strptime(str(created)[:-4], date_format) hour = time.strftime('%H') hour = int(hour)", "accident_date.day | ( daylight_saving_time.day == accident_date.day & accident_date.hour >= 2)):", "= csv.reader(f, delimiter=',', dialect=csv.excel_tab) for line, accident in enumerate(reader): if", "elif weather == 77: weather = 85 # גשם ורוחות", "weather == 77: weather = 84 # רוחות חזקות, גשם", "# רוחות חזקות, גשם שוטף else: weather = 78 #", "m in accidents if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m[\"id\"], AccidentMarker.provider_code", "\"87\": 63, \"88\": 64, \"89\": 65, \"90\": 66, \"91\": 67,", "o'clock elif (accident_date.month == 3 & daylight_saving_time.day < accident_date.day |", "0.5 or ( 0.0 < rain_in_millimeters / rain_hours <= 0.5):", "'longitude': accident[csvmap[\"long\"]], 'created': created, 'provider_code': PROVIDER_CODE, 'title': decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\")[:100], 'address':", "גשם שוטף else: weather = 78 # גשם שוטף #", "digit) for example 991 = 0.1 rain_in_millimeters *= 0.01 return", "24, \"5\": 1, \"6\": 2, \"7\": 3, \"8\": 9, \"9\":", "36, \"55\": 37, \"56\": 38, \"57\": 39, \"58\": 37, \"59\":", "# in the xml number are in a three digits", "AccidentMarker.provider_code == m[\"provider_code\"])).count()] if not new_ids: logging.info(\"\\t\\tNothing loaded, all accidents", "format (4-004), we delete the 0es before the number if", "from ..models import AccidentMarker from ..utilities import init_flask, decode_hebrew, open_utf8", "= node.parentNode if node.nodeName == \"Object\": return node def accident_time_zone_adjustment(created):", "decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\") else 3, 'location_accuracy': 1, 'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT,", "weather_code = get_weather_element(station, weather_data, 'WW') if weather_code is not None:", "\"77\": 53, \"78\": 54, \"79\": 55, \"80\": 56, \"81\": 57,", "not accidents: return 0 new_ids = [m[\"id\"] for m in", "if 76 == weather: weather = 82 # סופת רוחות,", "\"\"\" :param file_location: local location of .csv :return: Yields a", "55, \"80\": 56, \"81\": 57, \"82\": 58, \"83\": 59, \"84\":", "or accident[csvmap[\"long\"]] == \"NULL\": logging.warn(\"\\t\\tMissing coordinates in line {0}. Moving", "84 # רוחות חזקות, גשם קל else: weather = 37", "\"72\": 48, \"73\": 16, \"74\": 50, \"75\": 51, \"76\": 52,", "accident in united: if not accident.weather: accident.weather = process_weather_data(collection, accident.latitude,", "global time global hour DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S',", "after execution \"\"\" app = init_flask() db = SQLAlchemy(app) united", "xml number are in a three digits format (4-004), we", "חזקות, גשם שוטף else: weather = 78 # גשם שוטף", "= station.getElementsByTagName('station_lon') assert len(station_lon) == 1 lon = float(station_lon[0].childNodes[0].nodeValue) lon_difference", "if accident happend after the last friday of march at", "in UTC time # pylint: disable=unexpected-keyword-arg accident_date = parse_date(created) daylight_saving_time", "the local time and in winter clock 2 hours #", "if accident[csvmap[\"lat\"]] == \"\" or accident[csvmap[\"long\"]] == \"\" or \\", "15, \"64\": 41, \"65\": 19, \"66\": 42, \"67\": 43, \"68\":", "xml document from the ims(israel meteorological service) website logging.basicConfig(level=logging.DEBUG) s", "8: if 76 == weather: weather = 82 # סופת", "the changes\") def main(light=True, username='', password='', lastmail=False): \"\"\" Calls importmail.py", "991 = 0.1 rain_in_millimeters *= 0.01 return rain_in_millimeters def get_weather_element(station,", "latitude, longitude): station_place_in_xml = -1 min_distance = float(\"inf\") # initialize", "= 85 # גשם ורוחות חזקות else: weather = 15", "collection = xml_doc.documentElement return collection def parse_date(created): \"\"\" :param created:", "is not None: if int(wind_force) > 8: weather = 76", "open_utf8(file_location, 'rU') as f: reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab) for", "accidents if m[\"id\"] in new_ids]) db.session.commit() return len(new_ids) def update_db(collection):", "if 0.0 < rain_in_millimeters <= 0.5 or ( 0.0 <", "distance will always be smaller than the initial station_data =", "4, \"city\": 6, \"comment\": 7, \"type\": 8, \"casualties\": 9}, {\"id\":", "not None and rain_duration is not None: rain_in_millimeters = convert_xml_values_to_numbers(rain)", "weather == 76: weather = 80 # סופת רוחות, גשם", "minidom import math import requests import logging ############################################################################################ # United.py", "0es before the number if char == '0': rain.replace(char, '')", ":param path: Local files directory ('united_path' on main() below) :return:", "DB \"\"\" collection = retrieve_ims_xml() if not light: logging.info(\"Importing data", "6, accident_date.year, 10) # weather is given in UTC time", "accident[csvmap[\"long\"]])} if format_version == 0: casualties = accident[csvmap[\"casualties\"]] marker['road_intactness'] =", "# return the stations data in the time of the", "= convert_xml_values_to_numbers(rain) rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()] # rain amount is between", "will always be smaller than the initial station_data = collection.getElementsByTagName('surface_station')", "52, \"77\": 53, \"78\": 54, \"79\": 55, \"80\": 56, \"81\":", "############################################################################################ # United.py is responsible for the parsing and deployment", "the daynum day of the week (ex: the forth sunday", "40, \"63\": 15, \"64\": 41, \"65\": 19, \"66\": 42, \"67\":", "# if accident happend between april and september if accident_date.month", "return 0 db.session.execute(AccidentMarker.__table__.insert(), [m for m in accidents if m[\"id\"]", "rain_in_millimeters = float(rain) if rain_in_millimeters >= 990: # numbers that", "\"NULL\" or accident[csvmap[\"long\"]] == \"NULL\": logging.warn(\"\\t\\tMissing coordinates in line {0}.", "the xml code equals 0.(the last digit) for example 991", "0: casualties = accident[csvmap[\"casualties\"]] marker['road_intactness'] = casualties if casualties.isdigit() else", "< 2)): accident_date.replace(hour=accident_date.hour - 1) # if accident happend after", "return len(new_ids) def update_db(collection): \"\"\" :return: length of DB entries", "\"type\": 8, \"casualties\": 9}, {\"id\": 0, \"time\": 1, \"type\": 2,", "amount per hour is between 0.5 and 4.0 millimeters if", "= RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()] # rain amount is between 0.1 and 0.5", "3: accident_date.replace(hour=accident_date.hour - 1) # if accident happend before the", "9, \"45\": 30, \"46\": 30, \"47\": 30, \"48\": 31, \"49\":", "accident_date.day & accident_date.hour >= 2)): accident_date.replace(hour=accident_date.hour - 1) # ]", "14, \"21\": 15, \"22\": 16, \"23\": 17, \"24\": 18, \"25\":", "(lat - float(latitude)) ** 2 temp_dis = math.sqrt(lat_difference + lon_difference)", "code equals 0.(the last digit) for example 991 = 0.1", "75} def retrieve_ims_xml(): # getting an xml document from the", "rain_in_millimeters def get_weather_element(station, weather_data, tag): element = weather_data[station].getElementsByTagName(tag) if element:", "81 # גשם וסופת רוחות elif weather == 77: weather", "'%Y-%m-%d %H:%M:%S': time = datetime.strptime(str(created)[:-4], date_format) hour = time.strftime('%H') hour", "= 84 # רוחות חזקות, גשם קל else: weather =", "united_file in os.listdir(united_path): if united_file.endswith(\".csv\"): total += import_to_db(collection, united_path +", "Local files directory ('united_path' on main() below) :return: length of", "united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2) for accident in united: if", "%I:%M', '%m/%d/%Y %I:%M'] for date_format in DATE_FORMATS: try: if date_format", "\"street\": 6, \"comment\": 7, \"casualties\": 8}, ] def create_accidents(collection, file_location):", "rain_in_millimeters / rain_hours > 8: if weather == 76: weather", "number are in a three digits format (4-004), we delete", "שוטף # average rain amount per hour is more than", "username='', password='', lastmail=False): \"\"\" Calls importmail.py prior to importing to", "\"68\": 44, \"69\": 45, \"70\": 46, \"71\": 47, \"72\": 48,", "accident_date.day & accident_date.hour < 2)): accident_date.replace(hour=accident_date.hour - 1) # if", "of april 2016) # start counting the daynum from monday", "6, \"comment\": 7, \"type\": 8, \"casualties\": 9}, {\"id\": 0, \"time\":", "rain[:2] # variable to help convert from string to number", "rain amount was measured weather_code = get_weather_element(station, weather_data, 'WW') if", "accident[csvmap[\"lat\"]] == \"\" or accident[csvmap[\"long\"]] == \"\" or \\ accident[csvmap[\"lat\"]]", "קל # average rain amount per hour is between 0.5", "\"81\": 57, \"82\": 58, \"83\": 59, \"84\": 60, \"85\": 61,", "accident_date in str(station.childNodes[0].nodeValue): base.appendChild(get_parent_object_node(station)) return base def find_station_by_coordinate(collection, latitude, longitude):", "as f: reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab) for line, accident", "= [m[\"id\"] for m in accidents if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id", "in enumerate(station_data): station_lon = station.getElementsByTagName('station_lon') assert len(station_lon) == 1 lon", "help convert from string to number for char in num_conv:", "big starting value so the distance will always be smaller", "the forth sunday of april 2016) # start counting the", "= CONST.UNITED_HATZALA_CODE TIME_ZONE = 2 # convert IMS hours code", "latitude, longitude) weather_data = collection.getElementsByTagName('surface_observation') wind_force = get_weather_element(station, weather_data, 'FF')", "find_station_by_coordinate(collection, latitude, longitude) weather_data = collection.getElementsByTagName('surface_observation') wind_force = get_weather_element(station, weather_data,", "'RRR') rain_duration = get_weather_element(station, weather_data, 'TR') # the duration of", "57, \"82\": 58, \"83\": 59, \"84\": 60, \"85\": 61, \"86\":", "is_nth_weekday(4, 4, accident_date.year, 3) winter_clock = is_nth_weekday(4, 6, accident_date.year, 10)", "local location of .csv :return: Yields a marker object with", "0.01 return rain_in_millimeters def get_weather_element(station, weather_data, tag): element = weather_data[station].getElementsByTagName(tag)", "= parse_date(accident[csvmap[\"time\"]]) marker = {'id': accident[csvmap[\"id\"]], 'latitude': accident[csvmap[\"lat\"]], 'longitude': accident[csvmap[\"long\"]],", "Moving on...\".format(line + 1)) continue created = parse_date(accident[csvmap[\"time\"]]) marker =", "= is_nth_weekday(4, 4, accident_date.year, 3) winter_clock = is_nth_weekday(4, 6, accident_date.year,", "numbers that are higher then 990 in the xml code", "i, station in enumerate(station_data): station_lon = station.getElementsByTagName('station_lon') assert len(station_lon) ==", "1)) continue created = parse_date(accident[csvmap[\"time\"]]) marker = {'id': accident[csvmap[\"id\"]], 'latitude':", "\"comment\": 7, \"type\": 8, \"casualties\": 9}, {\"id\": 0, \"time\": 1,", "\"8\": 6, \"9\": 7, \"10\": 8, \"11\": 9, \"12\": 10,", "of march at 2:00 o'clock elif (accident_date.month == 3 &", "example 991 = 0.1 rain_in_millimeters *= 0.01 return rain_in_millimeters def", "the parsing and deployment of \"united hatzala\" data to the", "DB ############################################################################################ PROVIDER_CODE = CONST.UNITED_HATZALA_CODE TIME_ZONE = 2 # convert", "of the week (ex: the forth sunday of april 2016)", "== 3 & daylight_saving_time.day < accident_date.day | ( daylight_saving_time.day ==", "logging.warn(\"\\t\\tEmpty File!\") continue csvmap = CSVMAP[format_version] if accident[csvmap[\"lat\"]] == \"\"", "node def accident_time_zone_adjustment(created): # return accident time in UTC time", "accident_date.month < 10 & accident_date.month > 3: accident_date.replace(hour=accident_date.hour - 1)", "זלעפות if weather == 77: weather = 87 # רוחות", "initial station_data = collection.getElementsByTagName('surface_station') for i, station in enumerate(station_data): station_lon", "71, \"96\": 72, \"97\": 73, \"98\": 74, \"99\": 75} def", "weather = 37 # גשם קל # average rain amount", "app = init_flask() db = SQLAlchemy(app) accidents = list(create_accidents(collection, path))", "'%s'...\" % file_location) with open_utf8(file_location, 'rU') as f: reader =", "רוחות חזקות, גשם זלעפות else: weather = 79 # גשם", "\"70\": 46, \"71\": 47, \"72\": 48, \"73\": 16, \"74\": 50,", "= 79 # גשם זלעפות return weather CSVMAP = [", "0: # header format_version = 0 if \"MissionID\" in accident[0]", "if date is the nth occurrence of the daynum day", "duration of time in which the rain amount was measured", "str(accident_date.day), str(accident_date.hour))) return adate def all_station_in_date_frame(collection, created): # return the", "happend after the last friday of march at 2:00 o'clock", "23, \"30\": 24, \"31\": 24, \"32\": 24, \"33\": 7, \"34\":", "# ] adate = ''.join( (str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour))) return", "len(new_ids) def update_db(collection): \"\"\" :return: length of DB entries after", "\"57\": 39, \"58\": 37, \"59\": 37, \"61\": 37, \"60\": 36,", "\"MissionID\" in accident[0] else 1 continue if not accident: #", "# -*- coding: utf-8 -*- import calendar import csv from", "return accident time in UTC time # pylint: disable=unexpected-keyword-arg accident_date", "is not None: rain_in_millimeters = convert_xml_values_to_numbers(rain) rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()] #", "from string to number for char in num_conv: # in", "is between 0.1 and 0.5 millimeter if 0.0 < rain_in_millimeters", "6, \"comment\": 7, \"casualties\": 8}, ] def create_accidents(collection, file_location): \"\"\"", "in enumerate(reader): if line == 0: # header format_version =", "0.5 and 4.0 millimeters if 0.5 < rain_in_millimeters / rain_hours", "קל elif weather == 77: weather = 84 # רוחות", "19, \"26\": 20, \"27\": 21, \"28\": 22, \"29\": 23, \"30\":", "\"19\": 13, \"20\": 14, \"21\": 15, \"22\": 16, \"23\": 17,", "Time string from csv :return: Python datetime object \"\"\" global", "meteorological service) website logging.basicConfig(level=logging.DEBUG) s = requests.session() r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml')", "weather_data, 'RRR') rain_duration = get_weather_element(station, weather_data, 'TR') # the duration", "per hour is more than 8.0 millimeters elif rain_in_millimeters /", "= 15 # גשם # average rain amount per hour", "doc = minidom.Document() base = doc.createElement('accident_date') doc.appendChild(base) station_data_in_date = collection.getElementsByTagName('date_selected')", "accident_time_zone_adjustment(created): # return accident time in UTC time # pylint:", "- float(latitude)) ** 2 temp_dis = math.sqrt(lat_difference + lon_difference) if", "8: if weather == 76: weather = 83 # סופת", "(accident_date.month == 3 & daylight_saving_time.day < accident_date.day | ( daylight_saving_time.day", "element[0].childNodes[0].nodeValue else: weather_element = None return weather_element def process_weather_data(collection, latitude,", "7, \"36\": 25, \"37\": 25, \"38\": 25, \"39\": 25, \"40\":", "['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M',", "import CONST from ..models import AccidentMarker from ..utilities import init_flask,", "temp_dis = math.sqrt(lat_difference + lon_difference) if temp_dis < min_distance: min_distance", "\"40\": 26, \"41\": 27, \"42\": 28, \"43\": 29, \"44\": 9,", "s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml') xml_doc = minidom.parseString(r.text) collection = xml_doc.documentElement return collection def", "year, month )[nth][daynum] def get_parent_object_node(node): while node.parentNode: node = node.parentNode", "line == 1 and accident[0] == \"\": logging.warn(\"\\t\\tEmpty File!\") continue", "\"long\": 3, \"street\": 4, \"city\": 6, \"comment\": 7, \"type\": 8,", "= init_flask() db = SQLAlchemy(app) united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2)", "\"75\": 51, \"76\": 52, \"77\": 53, \"78\": 54, \"79\": 55,", "accident happend between april and september if accident_date.month < 10", "length of DB entries after execution \"\"\" app = init_flask()", "at 2:00 o'clock elif (accident_date.month == 3 & daylight_saving_time.day <", "0 if \"MissionID\" in accident[0] else 1 continue if not", "= SQLAlchemy(app) accidents = list(create_accidents(collection, path)) if not accidents: return", "(4-004), we delete the 0es before the number if char", "77: weather = 84 # רוחות חזקות, גשם קל else:", "/ rain_hours <= 0.5): if weather == 76: weather =", "File!\") continue csvmap = CSVMAP[format_version] if accident[csvmap[\"lat\"]] == \"\" or", "weather == 77: weather = 85 # גשם ורוחות חזקות", "else: break rain_in_millimeters = float(rain) if rain_in_millimeters >= 990: #", "\"53\": 35, \"54\": 36, \"55\": 37, \"56\": 38, \"57\": 39,", "in line {0}. Moving on...\".format(line + 1)) continue created =", "\"26\": 20, \"27\": 21, \"28\": 22, \"29\": 23, \"30\": 24,", "if casualties.isdigit() else 0 yield marker def import_to_db(collection, path): \"\"\"", "None: if int(wind_force) > 8: weather = 76 # סופת", "RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()] # rain amount is between 0.1 and 0.5 millimeter", "if line == 1 and accident[0] == \"\": logging.warn(\"\\t\\tEmpty File!\")", "\"22\": 16, \"23\": 17, \"24\": 18, \"25\": 19, \"26\": 20,", "= math.sqrt(lat_difference + lon_difference) if temp_dis < min_distance: min_distance =", "parsing and deployment of \"united hatzala\" data to the DB", "' + accident[csvmap[\"city\"]]), encoding=\"utf-8\"), 'accident_severity': 2 if u\"קשה\" in decode_hebrew(accident[csvmap[\"type\"]],", "datetime object \"\"\" global time global hour DATE_FORMATS = ['%m/%d/%Y", "created): # return the stations data in the time of", "= 87 # רוחות חזקות, גשם זלעפות else: weather =", "def accident_time_zone_adjustment(created): # return accident time in UTC time #", "logging.info(\"\\t\\tNothing loaded, all accidents already in DB\") return 0 db.session.execute(AccidentMarker.__table__.insert(),", "2 if u\"קשה\" in decode_hebrew(accident[csvmap[\"type\"]], encoding=\"utf-8\") else 3, 'location_accuracy': 1,", "weather = 1 # default weather is clear sky station", "= minidom.parseString(r.text) collection = xml_doc.documentElement return collection def parse_date(created): \"\"\"", "+ lon_difference) if temp_dis < min_distance: min_distance = temp_dis station_place_in_xml", "not accident.weather: accident.weather = process_weather_data(collection, accident.latitude, accident.longitude) db.session.commit() logging.info(\"\\tFinished commiting", "if united_file.endswith(\".csv\"): total += import_to_db(collection, united_path + united_file) logging.info(\"\\tImported {0}", "time # therefore in daylight_saving_time we deduct 3 hours from", "accidents...\") for united_file in os.listdir(united_path): if united_file.endswith(\".csv\"): total += import_to_db(collection,", "גשם קל else: weather = 37 # גשם קל #", "\"united hatzala\" data to the DB ############################################################################################ PROVIDER_CODE = CONST.UNITED_HATZALA_CODE", "if weather == 76: weather = 83 # סופת רוחות,", "r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml') xml_doc = minidom.parseString(r.text) collection = xml_doc.documentElement return", "elif (accident_date.month == 3 & daylight_saving_time.day < accident_date.day | (", "weather == 77: weather = 86 # רוחות חזקות, גשם", "- TIME_ZONE) # if accident happend between april and september", "# getting an xml document from the ims(israel meteorological service)", "12, \"19\": 13, \"20\": 14, \"21\": 15, \"22\": 16, \"23\":", "daylight_saving_time we deduct 3 hours from the local time and", "weather = 87 # רוחות חזקות, גשם זלעפות else: weather", "< rain_in_millimeters / rain_hours <= 0.5): if weather == 76:", "from '%s'...\" % file_location) with open_utf8(file_location, 'rU') as f: reader", "sunday of october at 2:00 o'clock elif accident_date.month == 10", "accident.weather = process_weather_data(collection, accident.latitude, accident.longitude) db.session.commit() logging.info(\"\\tFinished commiting the changes\")", "than the initial station_data = collection.getElementsByTagName('surface_station') for i, station in", "'latitude': accident[csvmap[\"lat\"]], 'longitude': accident[csvmap[\"long\"]], 'created': created, 'provider_code': PROVIDER_CODE, 'title': decode_hebrew(accident[csvmap[\"type\"]],", "get_parent_object_node(node): while node.parentNode: node = node.parentNode if node.nodeName == \"Object\":", "csvmap = CSVMAP[format_version] if accident[csvmap[\"lat\"]] == \"\" or accident[csvmap[\"long\"]] ==", "def main(light=True, username='', password='', lastmail=False): \"\"\" Calls importmail.py prior to", "סופת רוחות, גשם זלעפות if weather == 77: weather =", "= None return weather_element def process_weather_data(collection, latitude, longitude): weather =", "= 76 # סופת רוחות elif int(wind_force) > 5: weather", "# average rain amount per hour is between 0.5 and" ]
[ "= run_qs(trn_ds, qs, self.y, self.quota) assert_array_equal(qseq, np.array([4, 5, 2, 3]))", "libact.models import LogisticRegression from libact.query_strategies import VarianceReduction from .utils import", "qs, self.y, self.quota) assert_array_equal(qseq, np.array([4, 5, 2, 3])) if __name__", "(len(self.y) - 2)])) qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1) qseq =", "* (len(self.y) - 2)])) qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1) qseq", "from .utils import run_qs class VarianceReductionTestCase(unittest.TestCase): \"\"\"Variance reduction test case", "= Dataset(self.X, np.concatenate([self.y[:2], [None] * (len(self.y) - 2)])) qs =", "-1], [1, 1], [-1, -2], [-1, -1], [1, 2], [2,", "sigma=0.1) qseq = run_qs(trn_ds, qs, self.y, self.quota) assert_array_equal(qseq, np.array([4, 5,", "self.y = [0, 1, 0, 1, 0, 1] self.quota =", "def test_variance_reduction(self): trn_ds = Dataset(self.X, np.concatenate([self.y[:2], [None] * (len(self.y) -", "self.quota = 4 def test_variance_reduction(self): trn_ds = Dataset(self.X, np.concatenate([self.y[:2], [None]", "VarianceReduction from .utils import run_qs class VarianceReductionTestCase(unittest.TestCase): \"\"\"Variance reduction test", "using artifitial dataset\"\"\" def setUp(self): self.X = [[-2, -1], [1,", "libact.query_strategies import VarianceReduction from .utils import run_qs class VarianceReductionTestCase(unittest.TestCase): \"\"\"Variance", "dataset\"\"\" def setUp(self): self.X = [[-2, -1], [1, 1], [-1,", "= VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1) qseq = run_qs(trn_ds, qs, self.y, self.quota)", "as np from libact.base.dataset import Dataset from libact.models import LogisticRegression", "\"\"\"Variance reduction test case using artifitial dataset\"\"\" def setUp(self): self.X", "[[-2, -1], [1, 1], [-1, -2], [-1, -1], [1, 2],", "self.quota) assert_array_equal(qseq, np.array([4, 5, 2, 3])) if __name__ == '__main__':", "-1], [1, 2], [2, 1]] self.y = [0, 1, 0,", "test_variance_reduction(self): trn_ds = Dataset(self.X, np.concatenate([self.y[:2], [None] * (len(self.y) - 2)]))", "import run_qs class VarianceReductionTestCase(unittest.TestCase): \"\"\"Variance reduction test case using artifitial", "assert_array_equal(qseq, np.array([4, 5, 2, 3])) if __name__ == '__main__': unittest.main()", "from libact.base.dataset import Dataset from libact.models import LogisticRegression from libact.query_strategies", "import unittest from numpy.testing import assert_array_equal import numpy as np", "- 2)])) qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1) qseq = run_qs(trn_ds,", "artifitial dataset\"\"\" def setUp(self): self.X = [[-2, -1], [1, 1],", "[0, 1, 0, 1, 0, 1] self.quota = 4 def", "4 def test_variance_reduction(self): trn_ds = Dataset(self.X, np.concatenate([self.y[:2], [None] * (len(self.y)", "Dataset(self.X, np.concatenate([self.y[:2], [None] * (len(self.y) - 2)])) qs = VarianceReduction(trn_ds,", "[1, 1], [-1, -2], [-1, -1], [1, 2], [2, 1]]", "LogisticRegression from libact.query_strategies import VarianceReduction from .utils import run_qs class", "from libact.models import LogisticRegression from libact.query_strategies import VarianceReduction from .utils", "[-1, -1], [1, 2], [2, 1]] self.y = [0, 1,", "case using artifitial dataset\"\"\" def setUp(self): self.X = [[-2, -1],", "from numpy.testing import assert_array_equal import numpy as np from libact.base.dataset", "numpy as np from libact.base.dataset import Dataset from libact.models import", "1], [-1, -2], [-1, -1], [1, 2], [2, 1]] self.y", "assert_array_equal import numpy as np from libact.base.dataset import Dataset from", "0, 1] self.quota = 4 def test_variance_reduction(self): trn_ds = Dataset(self.X,", "run_qs(trn_ds, qs, self.y, self.quota) assert_array_equal(qseq, np.array([4, 5, 2, 3])) if", "2], [2, 1]] self.y = [0, 1, 0, 1, 0,", "= [0, 1, 0, 1, 0, 1] self.quota = 4", "= 4 def test_variance_reduction(self): trn_ds = Dataset(self.X, np.concatenate([self.y[:2], [None] *", "-2], [-1, -1], [1, 2], [2, 1]] self.y = [0,", "[2, 1]] self.y = [0, 1, 0, 1, 0, 1]", "numpy.testing import assert_array_equal import numpy as np from libact.base.dataset import", "1]] self.y = [0, 1, 0, 1, 0, 1] self.quota", "[1, 2], [2, 1]] self.y = [0, 1, 0, 1,", "np from libact.base.dataset import Dataset from libact.models import LogisticRegression from", "class VarianceReductionTestCase(unittest.TestCase): \"\"\"Variance reduction test case using artifitial dataset\"\"\" def", "def setUp(self): self.X = [[-2, -1], [1, 1], [-1, -2],", "[None] * (len(self.y) - 2)])) qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1)", "unittest from numpy.testing import assert_array_equal import numpy as np from", "VarianceReductionTestCase(unittest.TestCase): \"\"\"Variance reduction test case using artifitial dataset\"\"\" def setUp(self):", "1, 0, 1] self.quota = 4 def test_variance_reduction(self): trn_ds =", "import Dataset from libact.models import LogisticRegression from libact.query_strategies import VarianceReduction", "self.X = [[-2, -1], [1, 1], [-1, -2], [-1, -1],", "0, 1, 0, 1] self.quota = 4 def test_variance_reduction(self): trn_ds", "trn_ds = Dataset(self.X, np.concatenate([self.y[:2], [None] * (len(self.y) - 2)])) qs", "reduction test case using artifitial dataset\"\"\" def setUp(self): self.X =", "import LogisticRegression from libact.query_strategies import VarianceReduction from .utils import run_qs", "qseq = run_qs(trn_ds, qs, self.y, self.quota) assert_array_equal(qseq, np.array([4, 5, 2,", "setUp(self): self.X = [[-2, -1], [1, 1], [-1, -2], [-1,", "self.y, self.quota) assert_array_equal(qseq, np.array([4, 5, 2, 3])) if __name__ ==", "qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1) qseq = run_qs(trn_ds, qs, self.y,", "libact.base.dataset import Dataset from libact.models import LogisticRegression from libact.query_strategies import", "np.concatenate([self.y[:2], [None] * (len(self.y) - 2)])) qs = VarianceReduction(trn_ds, model=LogisticRegression(),", "2)])) qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1) qseq = run_qs(trn_ds, qs,", "1, 0, 1, 0, 1] self.quota = 4 def test_variance_reduction(self):", ".utils import run_qs class VarianceReductionTestCase(unittest.TestCase): \"\"\"Variance reduction test case using", "= [[-2, -1], [1, 1], [-1, -2], [-1, -1], [1,", "model=LogisticRegression(), sigma=0.1) qseq = run_qs(trn_ds, qs, self.y, self.quota) assert_array_equal(qseq, np.array([4,", "from libact.query_strategies import VarianceReduction from .utils import run_qs class VarianceReductionTestCase(unittest.TestCase):", "VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1) qseq = run_qs(trn_ds, qs, self.y, self.quota) assert_array_equal(qseq,", "run_qs class VarianceReductionTestCase(unittest.TestCase): \"\"\"Variance reduction test case using artifitial dataset\"\"\"", "1] self.quota = 4 def test_variance_reduction(self): trn_ds = Dataset(self.X, np.concatenate([self.y[:2],", "[-1, -2], [-1, -1], [1, 2], [2, 1]] self.y =", "import VarianceReduction from .utils import run_qs class VarianceReductionTestCase(unittest.TestCase): \"\"\"Variance reduction", "test case using artifitial dataset\"\"\" def setUp(self): self.X = [[-2,", "import numpy as np from libact.base.dataset import Dataset from libact.models", "Dataset from libact.models import LogisticRegression from libact.query_strategies import VarianceReduction from", "import assert_array_equal import numpy as np from libact.base.dataset import Dataset" ]
[ "= \"hysds:%s\" % get_uuid('bundle-%s' % job['job_id']) doc = ProvEsDocument() #", "bundle_id = \"hysds:%s\" % get_uuid('bundle-%s' % job['job_id']) doc = ProvEsDocument()", "doc = ProvEsDocument() # get bundle #bndl = doc.bundle(bundle_id) bndl", "# get json pd = json.loads(doc.serialize()) # update input entity", "pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] =", "doc.bundle(bundle_id) bndl = None # add input entity execute_node =", "= 'job' job['type'] = job.get('job', {}).get('type', 'unknown') job['@version'] = '1'", "add output entity output_id = \"hysds:%s\" % get_uuid(pub_urls[0]) output_ent =", "[input_id], [output_id], label=job_id, bundle=bndl, prov_type=\"hysds:publish_dataset\") # get json pd =", "except: hostname = '' info = {'resource': 'event', 'type': event_type,", "= orig_ent[attr] # write prov with open(prov_es_file, 'w') as f:", "<= soft_time_limit+gap): time_limit = soft_time_limit + gap return soft_time_limit, time_limit", "r.setex(JOB_STATUS_KEY_TMPL % job['uuid'], app.conf.HYSDS_JOB_STATUS_EXPIRES, job['status']) # for dedup r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job))", "import json import copy import socket import msgpack import traceback", "bundle=bndl) # add output entity output_id = \"hysds:%s\" % get_uuid(pub_urls[0])", "= doc.granule(input_id, None, [prod_url], [], None, None, None, label=os.path.basename(prod_url), bundle=bndl)", "pool for job info metrics.\"\"\" global JOB_INFO_POOL if JOB_INFO_POOL is", "if JOB_STATUS_POOL is None: JOB_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_job_info_pool():", "= StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(TASK_WORKER_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value)", "job_info = {'type': 'job_info', '@version': '1', '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(),", "else: prov_es_info['bundle'][bundle_id_orig]['activity'].update( pd['bundle'][bundle_id]['activity']) else: prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity'] else: # update", "list(prov_es_info['bundle'].keys())[0] # update software agent prov_es_info['bundle'][bundle_id_orig].setdefault( 'agent', {}).update(pd['bundle'][bundle_id]['agent']) # update", "prov_es_info.setdefault('wasAssociatedWith', {}).update( pd['wasAssociatedWith']) # update process step if 'activity' in", "[output_id], label=job_id, bundle=bndl, prov_type=\"hysds:publish_dataset\") # get json pd = json.loads(doc.serialize())", "None, label=objectid, bundle=bndl) # software and algorithm algorithm = \"eos:product_publishing\"", "software and algorithm algorithm = \"eos:product_publishing\" software_version = hysds.__version__ software_title", "@backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_status(job): \"\"\"Print job status.\"\"\" set_redis_job_status_pool()", "worker by task ID from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL #", "standard_library standard_library.install_aliases() import os import re import json import copy", "# update software agent prov_es_info['bundle'][bundle_id_orig].setdefault( 'agent', {}).update(pd['bundle'][bundle_id]['agent']) # update wasAssociatedWith", "import copy import socket import msgpack import traceback import types", "app.conf.HYSDS_JOB_STATUS_EXPIRES, job['status']) # for dedup r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) # for ES", "== 1: ps_id_orig = list( prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime']", "'@version': '1', 'event': event} # send update to redis r", "tags=[], hostname=None): \"\"\"Log custom event.\"\"\" set_redis_event_status_pool() global EVENT_STATUS_POOL uuid =", "= socket.getfqdn() prod_url = \"file://%s%s\" % (execute_node, prod_path) input_id =", "= job['job_info']['job_url'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]:", "if attr in ('prov:location', 'prov:label', 'prov:type'): continue pd['entity'][output_id][attr] = orig_ent[attr]", "create processStep job_id = \"publish_dataset-%s\" % os.path.basename(prod_path) doc.processStep(\"hysds:%s\" % get_uuid(job_id),", "for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']: if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] =", "from uuid import uuid4 from redis import BlockingConnectionPool, StrictRedis, RedisError", "@backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_custom_event(event_type, event_status, event, tags=[], hostname=None):", "from future import standard_library standard_library.install_aliases() import os import re import", "doc.softwareAgent(sa_id, str(job['job_info']['pid']), job['job_info']['execute_node'], role=job.get('username', None), label=sa_label, bundle=bndl) # create processStep", "retrieve job status r = StrictRedis(connection_pool=JOB_STATUS_POOL) return r.get(JOB_STATUS_KEY_TMPL % task_id)", "EVENT_STATUS_POOL if EVENT_STATUS_POOL is None: EVENT_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) @backoff.on_exception(backoff.expo,", "{'type': 'job_info', '@version': '1', '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'job': filtered_info,", "msgpack.dumps(info)) logger.info(\"hysds.custom_event:%s\" % json.dumps(info)) return uuid def log_prov_es(job, prov_es_info, prov_es_file):", "import standard_library standard_library.install_aliases() import os import re import json import", "pd = json.loads(doc.serialize()) # update software agent and process step", "update input entity orig_ent = prov_es_info.get('entity', {}).get(input_id, {}) pd['entity'][input_id].update(orig_ent) #", "task ID r = StrictRedis(connection_pool=WORKER_STATUS_POOL) r.setex(TASK_WORKER_KEY_TMPL % task_id, app.conf.HYSDS_JOB_STATUS_EXPIRES, worker)", "prod_path) input_id = \"hysds:%s\" % get_uuid(prod_url) input_ent = doc.granule(input_id, None,", "\"\"\"Ensure hard time limit gap.\"\"\" gap = hard_time_limit_gap() if soft_time_limit", "tags = job.setdefault('tags', []) if isinstance(tags, str): tags = [tags]", "update to redis r = StrictRedis(connection_pool=EVENT_STATUS_POOL) r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info)) logger.info(\"hysds.custom_event:%s\" %", "tags.append(job['job']['tag']) job['tags'] = tags # send update to redis r", "job status.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL job['resource'] = 'job' job['type'] =", "get_uuid(job_id), prod_metrics['time_start'], prod_metrics['time_end'], [software], sa_id, None, [input_id], [output_id], label=job_id, bundle=bndl,", "for backoff.\"\"\" return app.conf.BACKOFF_MAX_VALUE def backoff_max_tries(): \"\"\"Return max tries for", "'job_info', '@version': '1', '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'job': filtered_info, 'job_type':", "and process step if 'bundle' in prov_es_info: if len(prov_es_info['bundle']) ==", "max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_status(job): \"\"\"Print job status.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL", "ps_id: prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['bundle'][bundle_id_orig]['activity'].update( pd['bundle'][bundle_id]['activity']) else: prov_es_info['bundle'][bundle_id_orig]['activity'] =", "max_tries=backoff_max_tries, max_value=backoff_max_value) def get_task_worker(task_id): \"\"\"Retrieve task worker by task ID", "@backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_info(job): \"\"\"Print job info.\"\"\" set_redis_job_info_pool()", "and algorithm algorithm = \"eos:product_publishing\" software_version = hysds.__version__ software_title =", "'priority', 'container_image_name', 'container_image_url', 'name'): if info in job: filtered_info[info] =", "orig_ent = prov_es_info.get('entity', {}).get(input_id, {}) pd['entity'][input_id].update(orig_ent) # update output entity", "after soft time limit.\"\"\" return app.conf.HARD_TIME_LIMIT_GAP def ensure_hard_time_limit_gap(soft_time_limit, time_limit): \"\"\"Ensure", "% datetime.utcnow().isoformat(), 'hostname': hostname, 'uuid': uuid, 'tags': tags, '@version': '1',", "\"hysds:%s\" % get_uuid(prod_url) input_ent = doc.granule(input_id, None, [prod_url], [], None,", "know ps_id = \"hysds:%s\" % get_uuid(job['job_id']) bundle_id = \"hysds:%s\" %", "for event status.\"\"\" global EVENT_STATUS_POOL if EVENT_STATUS_POOL is None: EVENT_STATUS_POOL", "create PROV-ES doc doc = ProvEsDocument(namespaces=prov_es_info['prefix']) # get bundle #bndl", "def set_redis_job_info_pool(): \"\"\"Set redis connection pool for job info metrics.\"\"\"", "'@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'job': filtered_info, 'job_type': job['type']} # send", "hard_time_limit_gap() if soft_time_limit is not None and (time_limit is None", "[], sa_id, None, [], [], bundle=bndl, prov_type=\"hysds:%s\" % job['type']) #", "time_limit <= soft_time_limit+gap): time_limit = soft_time_limit + gap return soft_time_limit,", "# create PROV-ES doc to generate attributes that only verdi", "continue pd['entity'][output_id][attr] = orig_ent[attr] # write prov with open(prov_es_file, 'w')", "WORKER_STATUS_POOL # set task worker for task ID r =", "datetime import datetime from uuid import uuid4 from redis import", "in prov_es_info['bundle'][bundle_id_orig]: if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1: ps_id_orig = list( prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0]", "update wasAssociatedWith prov_es_info.setdefault('wasAssociatedWith', {}).update( pd['wasAssociatedWith']) # update process step if", "= BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_event_status_pool(): \"\"\"Set redis connection pool for", "\"hysds-task-worker-%s\" def backoff_max_value(): \"\"\"Return max value for backoff.\"\"\" return app.conf.BACKOFF_MAX_VALUE", "RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_worker_status(worker): \"\"\"Retrieve worker status by worker", "from prov_es.model import get_uuid, ProvEsDocument # logger logger = get_task_logger(__name__)", "= \"hysds-worker-status-%s\" # task worker key template TASK_WORKER_KEY_TMPL = \"hysds-task-worker-%s\"", "# create sofware agent sa_label = \"hysds:pge_wrapper/%s/%d/%s\" % (job['job_info']['execute_node'], job['job_info']['pid'],", "= pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] =", "log_job_info(job): \"\"\"Print job info.\"\"\" set_redis_job_info_pool() global JOB_INFO_POOL filtered_info = {}", "% get_uuid(job['job_id']) bundle_id = \"hysds:%s\" % get_uuid('bundle-%s' % job['job_id']) doc", "task ID from redis.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL # retrieve job", "get bundle #bndl = doc.bundle(bundle_id) bndl = None # create", "StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(TASK_WORKER_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def", "max_value=backoff_max_value) def log_job_status(job): \"\"\"Print job status.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL job['resource']", "max_tries=backoff_max_tries, max_value=backoff_max_value) def log_task_worker(task_id, worker): \"\"\"Log task worker for task", "str(job['job_info']['pid']), job['job_info']['execute_node'], role=job.get('username', None), label=sa_label, bundle=bndl) # create processStep doc.processStep(ps_id,", "app.conf.REDIS_JOB_INFO_URL) def set_redis_worker_status_pool(): \"\"\"Set redis connection pool for worker status.\"\"\"", "activity ids for waw_id in prov_es_info['wasAssociatedWith']: if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id:", "= ps_id_orig else: prov_es_info['activity'].update(pd['activity']) else: prov_es_info['activity'] = pd['activity'] # write", "task worker for task ID in redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL", "job['type']} # send update to redis r = StrictRedis(connection_pool=JOB_INFO_POOL) r.rpush(app.conf.REDIS_JOB_INFO_KEY,", "= pd['bundle'][bundle_id]['activity'] else: # update software agent prov_es_info.setdefault('agent', {}).update(pd['agent']) #", "prov_es_info['activity'].update(pd['activity']) else: prov_es_info['activity'] = pd['activity'] # write prov with open(prov_es_file,", "event_status, event, tags=[], hostname=None): \"\"\"Log custom event.\"\"\" set_redis_event_status_pool() global EVENT_STATUS_POOL", "def log_job_status(job): \"\"\"Print job status.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL job['resource'] =", "# for ES logger.info(\"job_status_json:%s\" % json.dumps(job)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value)", "that only verdi know ps_id = \"hysds:%s\" % get_uuid(job['job_id']) bundle_id", "agent sa_label = \"hysds:pge_wrapper/%s/%d/%s\" % (job['job_info']['execute_node'], job['job_info']['pid'], datetime.utcnow().isoformat()) sa_id =", "is None: try: hostname = socket.getfqdn() except: try: hostname =", "get json pd = json.loads(doc.serialize()) # update input entity orig_ent", "job info.\"\"\" set_redis_job_info_pool() global JOB_INFO_POOL filtered_info = {} for info", "'1' job['@timestamp'] = \"%sZ\" % datetime.utcnow().isoformat() if 'tag' in job.get('job',", "prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime'] prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['activity'][ps_id_orig]['hysds:job_url']", "json pd = json.loads(doc.serialize()) # update software agent and process", "# update output entity for attr in orig_ent: if attr", "doc.bundle(bundle_id) bndl = None # create sofware agent sa_label =", "None # job status key template JOB_STATUS_KEY_TMPL = \"hysds-job-status-%s\" #", "datetime from uuid import uuid4 from redis import BlockingConnectionPool, StrictRedis,", "bundle #bndl = doc.bundle(bundle_id) bndl = None # create sofware", "job['job_id'] prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL", "= job['job_id'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] =", "pd['activity'][ps_id]['prov:startTime'] prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime'] prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type']", "gap = hard_time_limit_gap() if soft_time_limit is not None and (time_limit", "= StrictRedis(connection_pool=JOB_STATUS_POOL) return r.get(JOB_STATUS_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value)", "'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]: prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type'] # update", "software agent prov_es_info.setdefault('agent', {}).update(pd['agent']) # update wasAssociatedWith prov_es_info.setdefault('wasAssociatedWith', {}).update( pd['wasAssociatedWith'])", "# retrieve worker status r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(WORKER_STATUS_KEY_TMPL %", "redis r = StrictRedis(connection_pool=JOB_INFO_POOL) r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info)) logger.info(\"job_info_json:%s\" % json.dumps(job_info)) @backoff.on_exception(backoff.expo,", "if len(prov_es_info['activity']) == 1: ps_id_orig = list(prov_es_info['activity'].keys())[0] prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime']", "def hard_time_limit_gap(): \"\"\"Return minimum gap time after soft time limit.\"\"\"", "= StrictRedis(connection_pool=EVENT_STATUS_POOL) r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info)) logger.info(\"hysds.custom_event:%s\" % json.dumps(info)) return uuid def", "software_version, label=software_title, location=software_location, bundle=bndl) # create sofware agent pid =", "len(prov_es_info['bundle']) == 1: bundle_id_orig = list(prov_es_info['bundle'].keys())[0] # update software agent", "ID in redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # set task worker", "update wasAssociatedWith activity ids for waw_id in prov_es_info['wasAssociatedWith']: if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity']", "dedup r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) # for ES logger.info(\"job_status_json:%s\" % json.dumps(job)) @backoff.on_exception(backoff.expo,", "job status by task ID from redis.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL", "def backoff_max_tries(): \"\"\"Return max tries for backoff.\"\"\" return app.conf.BACKOFF_MAX_TRIES def", "software = \"eos:HySDS-%s\" % software_version software_location = hysds.__url__ doc.software(software, [algorithm],", "pd['entity'][input_id].update(orig_ent) # update output entity for attr in orig_ent: if", "role=job.get('username', None), label=sa_label, bundle=bndl) # create processStep doc.processStep(ps_id, job['job_info']['cmd_start'], job['job_info']['cmd_end'],", "job['resource'] = 'job' job['type'] = job.get('job', {}).get('type', 'unknown') job['@version'] =", "% get_uuid(pub_urls[0]) output_ent = doc.product(output_id, None, [pub_urls[0]], [], None, None,", "= '' info = {'resource': 'event', 'type': event_type, 'status': event_status,", "doc.product(output_id, None, [pub_urls[0]], [], None, None, None, label=objectid, bundle=bndl) #", "WORKER_STATUS_POOL is None: WORKER_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_event_status_pool(): \"\"\"Set", "ps_id_orig = list( prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:endTime']", "# update software agent prov_es_info.setdefault('agent', {}).update(pd['agent']) # update wasAssociatedWith prov_es_info.setdefault('wasAssociatedWith',", "backoff from datetime import datetime from uuid import uuid4 from", "hysds from hysds.celery import app from prov_es.model import get_uuid, ProvEsDocument", "agent pid = os.getpid() sa_label = \"hysds:publish_dataset/%s/%d/%s\" % (execute_node, pid,", "ProvEsDocument() # get bundle #bndl = doc.bundle(bundle_id) bndl = None", "Create temp PROV-ES document to populate attributes that only the", "__future__ import division from __future__ import absolute_import from builtins import", "backoff_max_value(): \"\"\"Return max value for backoff.\"\"\" return app.conf.BACKOFF_MAX_VALUE def backoff_max_tries():", "status.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL job['resource'] = 'job' job['type'] = job.get('job',", "= \"hysds:%s\" % get_uuid(prod_url) input_ent = doc.granule(input_id, None, [prod_url], [],", "r = StrictRedis(connection_pool=JOB_INFO_POOL) r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info)) logger.info(\"job_info_json:%s\" % json.dumps(job_info)) @backoff.on_exception(backoff.expo, RedisError,", "prod_metrics['time_start'], prod_metrics['time_end'], [software], sa_id, None, [input_id], [output_id], label=job_id, bundle=bndl, prov_type=\"hysds:publish_dataset\")", "None # add input entity execute_node = socket.getfqdn() prod_url =", "{}) pd['entity'][input_id].update(orig_ent) # update output entity for attr in orig_ent:", "worker status.\"\"\" global WORKER_STATUS_POOL if WORKER_STATUS_POOL is None: WORKER_STATUS_POOL =", "global JOB_INFO_POOL filtered_info = {} for info in ('job_info', 'job_id',", "is not None and (time_limit is None or time_limit <=", "def log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls, prod_metrics, objectid): \"\"\"Log publish step", "global WORKER_STATUS_POOL # set task worker for task ID r", "re import json import copy import socket import msgpack import", "str): tags = [tags] tags.append(job['job']['tag']) job['tags'] = tags # send", "if isinstance(tags, str): tags = [tags] tags.append(job['job']['tag']) job['tags'] = tags", "= json.loads(doc.serialize()) # update software agent and process step if", "worker status key template WORKER_STATUS_KEY_TMPL = \"hysds-worker-status-%s\" # task worker", "redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # set task worker for task", "if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['activity'].update(pd['activity']) else:", "json.dumps(info)) return uuid def log_prov_es(job, prov_es_info, prov_es_file): \"\"\"Log PROV-ES document.", "worker for task ID in redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL #", "list(prov_es_info['activity'].keys())[0] prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime'] prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime'] prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id']", "for info in ('job_info', 'job_id', 'task_id', 'delivery_info', 'tag', 'priority', 'container_image_name',", "'activity' in prov_es_info['bundle'][bundle_id_orig]: if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1: ps_id_orig = list(", "os.path.basename(prod_path) doc.processStep(\"hysds:%s\" % get_uuid(job_id), prod_metrics['time_start'], prod_metrics['time_end'], [software], sa_id, None, [input_id],", "WORKER_STATUS_KEY_TMPL = \"hysds-worker-status-%s\" # task worker key template TASK_WORKER_KEY_TMPL =", "entity execute_node = socket.getfqdn() prod_url = \"file://%s%s\" % (execute_node, prod_path)", "StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(WORKER_STATUS_KEY_TMPL % worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def", "StrictRedis, RedisError from celery.utils.log import get_task_logger import hysds from hysds.celery", "key template JOB_STATUS_KEY_TMPL = \"hysds-job-status-%s\" # worker status key template", "template TASK_WORKER_KEY_TMPL = \"hysds-task-worker-%s\" def backoff_max_value(): \"\"\"Return max value for", "{}): tags = job.setdefault('tags', []) if isinstance(tags, str): tags =", "agent prov_es_info.setdefault('agent', {}).update(pd['agent']) # update wasAssociatedWith prov_es_info.setdefault('wasAssociatedWith', {}).update( pd['wasAssociatedWith']) #", "v%s\" % (hysds.__description__, software_version) software = \"eos:HySDS-%s\" % software_version software_location", "connection pool for worker status.\"\"\" global WORKER_STATUS_POOL if WORKER_STATUS_POOL is", "None, [], [], bundle=bndl, prov_type=\"hysds:%s\" % job['type']) # get json", "pools JOB_STATUS_POOL = None JOB_INFO_POOL = None WORKER_STATUS_POOL = None", "\"hysds:pge_wrapper/%s/%d/%s\" % (job['job_info']['execute_node'], job['job_info']['pid'], datetime.utcnow().isoformat()) sa_id = \"hysds:%s\" % get_uuid(sa_label)", "output entity for attr in orig_ent: if attr in ('prov:location',", "sa_id = \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(pid), execute_node, role=\"invoked\", label=sa_label,", "json import copy import socket import msgpack import traceback import", "future import standard_library standard_library.install_aliases() import os import re import json", "hard_time_limit_gap(): \"\"\"Return minimum gap time after soft time limit.\"\"\" return", "# retrieve task worker r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(TASK_WORKER_KEY_TMPL %", "def log_custom_event(event_type, event_status, event, tags=[], hostname=None): \"\"\"Log custom event.\"\"\" set_redis_event_status_pool()", "# set task worker for task ID r = StrictRedis(connection_pool=WORKER_STATUS_POOL)", "if 'bundle' in prov_es_info: if len(prov_es_info['bundle']) == 1: bundle_id_orig =", "(hysds.__description__, software_version) software = \"eos:HySDS-%s\" % software_version software_location = hysds.__url__", "copy import socket import msgpack import traceback import types import", "import datetime from uuid import uuid4 from redis import BlockingConnectionPool,", "None and (time_limit is None or time_limit <= soft_time_limit+gap): time_limit", "isinstance(tags, str): tags = [tags] tags.append(job['job']['tag']) job['tags'] = tags #", "to redis r = StrictRedis(connection_pool=JOB_STATUS_POOL) r.setex(JOB_STATUS_KEY_TMPL % job['uuid'], app.conf.HYSDS_JOB_STATUS_EXPIRES, job['status'])", "[], bundle=bndl, prov_type=\"hysds:%s\" % job['type']) # get json pd =", "prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]: prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids", "prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['bundle'][bundle_id_orig]['activity'].update( pd['bundle'][bundle_id]['activity']) else: prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity']", "not in prov_es_info['activity'][ps_id_orig]: prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity", "label=software_title, location=software_location, bundle=bndl) # create sofware agent pid = os.getpid()", "def set_redis_worker_status_pool(): \"\"\"Set redis connection pool for worker status.\"\"\" global", "pd['wasAssociatedWith']) # update process step if 'activity' in prov_es_info: if", "return soft_time_limit, time_limit def set_redis_job_status_pool(): \"\"\"Set redis connection pool for", "soft_time_limit is not None and (time_limit is None or time_limit", "update wasAssociatedWith prov_es_info['bundle'][bundle_id_orig].setdefault( 'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith']) # update activity if 'activity'", "prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']: if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['bundle'][bundle_id_orig]['activity'].update(", "= \"hysds-task-worker-%s\" def backoff_max_value(): \"\"\"Return max value for backoff.\"\"\" return", "global EVENT_STATUS_POOL uuid = str(uuid4()) if hostname is None: try:", "% (execute_node, pid, prod_metrics['time_start']) sa_id = \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id,", "RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_status(job): \"\"\"Print job status.\"\"\" set_redis_job_status_pool() global", "job['@version'] = '1' job['@timestamp'] = \"%sZ\" % datetime.utcnow().isoformat() if 'tag'", "max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_info(job): \"\"\"Print job info.\"\"\" set_redis_job_info_pool() global JOB_INFO_POOL", "(e.g. PID).\"\"\" # create PROV-ES doc to generate attributes that", "msgpack import traceback import types import backoff from datetime import", "RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_custom_event(event_type, event_status, event, tags=[], hostname=None): \"\"\"Log", "= doc.bundle(bundle_id) bndl = None # add input entity execute_node", "sofware agent pid = os.getpid() sa_label = \"hysds:publish_dataset/%s/%d/%s\" % (execute_node,", "redis.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL # retrieve job status r =", "str from future import standard_library standard_library.install_aliases() import os import re", "socket.getfqdn() prod_url = \"file://%s%s\" % (execute_node, prod_path) input_id = \"hysds:%s\"", "hostname, 'uuid': uuid, 'tags': tags, '@version': '1', 'event': event} #", "[], None, None, None, label=objectid, bundle=bndl) # software and algorithm", "= \"hysds:pge_wrapper/%s/%d/%s\" % (job['job_info']['execute_node'], job['job_info']['pid'], datetime.utcnow().isoformat()) sa_id = \"hysds:%s\" %", "job info metrics.\"\"\" global JOB_INFO_POOL if JOB_INFO_POOL is None: JOB_INFO_POOL", "status key template JOB_STATUS_KEY_TMPL = \"hysds-job-status-%s\" # worker status key", "is None: EVENT_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value)", "f, indent=2) def log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls, prod_metrics, objectid): \"\"\"Log", "\"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(pid), execute_node, role=\"invoked\", label=sa_label, bundle=bndl) #", "None, None, None, label=os.path.basename(prod_url), bundle=bndl) # add output entity output_id", "app.conf.HYSDS_JOB_STATUS_EXPIRES, worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_task_worker(task_id): \"\"\"Retrieve task", "'type': event_type, 'status': event_status, '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'hostname': hostname,", "import get_task_logger import hysds from hysds.celery import app from prov_es.model", "JOB_STATUS_POOL job['resource'] = 'job' job['type'] = job.get('job', {}).get('type', 'unknown') job['@version']", "time_limit = soft_time_limit + gap return soft_time_limit, time_limit def set_redis_job_status_pool():", "= \"%sZ\" % datetime.utcnow().isoformat() if 'tag' in job.get('job', {}): tags", "connection pools JOB_STATUS_POOL = None JOB_INFO_POOL = None WORKER_STATUS_POOL =", "= doc.bundle(bundle_id) bndl = None # create sofware agent sa_label", "[], [], bundle=bndl, prov_type=\"hysds:%s\" % job['type']) # get json pd", "'activity' in prov_es_info: if len(prov_es_info['activity']) == 1: ps_id_orig = list(prov_es_info['activity'].keys())[0]", "= None JOB_INFO_POOL = None WORKER_STATUS_POOL = None EVENT_STATUS_POOL =", "status.\"\"\" global JOB_STATUS_POOL if JOB_STATUS_POOL is None: JOB_STATUS_POOL = BlockingConnectionPool.from_url(", "= StrictRedis(connection_pool=WORKER_STATUS_POOL) r.setex(TASK_WORKER_KEY_TMPL % task_id, app.conf.HYSDS_JOB_STATUS_EXPIRES, worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries,", "else: prov_es_info['activity'].update(pd['activity']) else: prov_es_info['activity'] = pd['activity'] # write prov with", "key template TASK_WORKER_KEY_TMPL = \"hysds-task-worker-%s\" def backoff_max_value(): \"\"\"Return max value", "label=sa_label, bundle=bndl) # create processStep job_id = \"publish_dataset-%s\" % os.path.basename(prod_path)", "bundle=bndl, prov_type=\"hysds:publish_dataset\") # get json pd = json.loads(doc.serialize()) # update", "attributes that only the worker has access to (e.g. PID).\"\"\"", "= app.conf.MOZART_URL if 'prov:type' not in prov_es_info['activity'][ps_id_orig]: prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type']", "set_redis_job_info_pool(): \"\"\"Set redis connection pool for job info metrics.\"\"\" global", "'hostname': hostname, 'uuid': uuid, 'tags': tags, '@version': '1', 'event': event}", "redis import BlockingConnectionPool, StrictRedis, RedisError from celery.utils.log import get_task_logger import", "% (job['job_info']['execute_node'], job['job_info']['pid'], datetime.utcnow().isoformat()) sa_id = \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id,", "import backoff from datetime import datetime from uuid import uuid4", "('prov:location', 'prov:label', 'prov:type'): continue pd['entity'][output_id][attr] = orig_ent[attr] # write prov", "(time_limit is None or time_limit <= soft_time_limit+gap): time_limit = soft_time_limit", "if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['bundle'][bundle_id_orig]['activity'].update( pd['bundle'][bundle_id]['activity'])", "is None: JOB_INFO_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL) def set_redis_worker_status_pool(): \"\"\"Set redis", "return uuid def log_prov_es(job, prov_es_info, prov_es_file): \"\"\"Log PROV-ES document. Create", "redis r = StrictRedis(connection_pool=EVENT_STATUS_POOL) r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info)) logger.info(\"hysds.custom_event:%s\" % json.dumps(info)) return", "from __future__ import print_function from __future__ import division from __future__", "uuid = str(uuid4()) if hostname is None: try: hostname =", "logger logger = get_task_logger(__name__) # redis connection pools JOB_STATUS_POOL =", "uuid4 from redis import BlockingConnectionPool, StrictRedis, RedisError from celery.utils.log import", "software agent and process step if 'bundle' in prov_es_info: if", "attr in orig_ent: if attr in ('prov:location', 'prov:label', 'prov:type'): continue", "= '1' job['@timestamp'] = \"%sZ\" % datetime.utcnow().isoformat() if 'tag' in", "types import backoff from datetime import datetime from uuid import", "hostname=None): \"\"\"Log custom event.\"\"\" set_redis_event_status_pool() global EVENT_STATUS_POOL uuid = str(uuid4())", "wasAssociatedWith prov_es_info.setdefault('wasAssociatedWith', {}).update( pd['wasAssociatedWith']) # update process step if 'activity'", "prov_es_info.get('entity', {}).get(input_id, {}) pd['entity'][input_id].update(orig_ent) # update output entity for attr", "send update to redis r = StrictRedis(connection_pool=EVENT_STATUS_POOL) r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info)) logger.info(\"hysds.custom_event:%s\"", "set_redis_job_info_pool() global JOB_INFO_POOL filtered_info = {} for info in ('job_info',", "if 'activity' in prov_es_info['bundle'][bundle_id_orig]: if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1: ps_id_orig =", "json.loads(doc.serialize()) # update input entity orig_ent = prov_es_info.get('entity', {}).get(input_id, {})", "= None # create sofware agent sa_label = \"hysds:pge_wrapper/%s/%d/%s\" %", "pool for job status.\"\"\" global JOB_STATUS_POOL if JOB_STATUS_POOL is None:", "is None: JOB_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_job_info_pool(): \"\"\"Set redis", "in orig_ent: if attr in ('prov:location', 'prov:label', 'prov:type'): continue pd['entity'][output_id][attr]", "= BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_task_worker(task_id, worker):", "'job_type': job['type']} # send update to redis r = StrictRedis(connection_pool=JOB_INFO_POOL)", "[]) if isinstance(tags, str): tags = [tags] tags.append(job['job']['tag']) job['tags'] =", "waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']: if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig", "\"eos:HySDS-%s\" % software_version software_location = hysds.__url__ doc.software(software, [algorithm], software_version, label=software_title,", "= pd['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids for waw_id in", "bundle=bndl, prov_type=\"hysds:%s\" % job['type']) # get json pd = json.loads(doc.serialize())", "% datetime.utcnow().isoformat(), 'job': filtered_info, 'job_type': job['type']} # send update to", "filtered_info, 'job_type': job['type']} # send update to redis r =", "print_function from __future__ import division from __future__ import absolute_import from", "prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not in prov_es_info['activity'][ps_id_orig]: prov_es_info['activity'][ps_id_orig]['prov:type'] =", "redis r = StrictRedis(connection_pool=JOB_STATUS_POOL) r.setex(JOB_STATUS_KEY_TMPL % job['uuid'], app.conf.HYSDS_JOB_STATUS_EXPIRES, job['status']) #", "soft_time_limit, time_limit def set_redis_job_status_pool(): \"\"\"Set redis connection pool for job", "os import re import json import copy import socket import", "global WORKER_STATUS_POOL # retrieve task worker r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return", "task worker for task ID r = StrictRedis(connection_pool=WORKER_STATUS_POOL) r.setex(TASK_WORKER_KEY_TMPL %", "traceback import types import backoff from datetime import datetime from", "RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_info(job): \"\"\"Print job info.\"\"\" set_redis_job_info_pool() global", "\"publish_dataset-%s\" % os.path.basename(prod_path) doc.processStep(\"hysds:%s\" % get_uuid(job_id), prod_metrics['time_start'], prod_metrics['time_end'], [software], sa_id,", "ps_id = \"hysds:%s\" % get_uuid(job['job_id']) bundle_id = \"hysds:%s\" % get_uuid('bundle-%s'", "# create PROV-ES doc doc = ProvEsDocument(namespaces=prov_es_info['prefix']) # get bundle", "# get bundle #bndl = doc.bundle(bundle_id) bndl = None #", "ID from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve task worker", "= None # job status key template JOB_STATUS_KEY_TMPL = \"hysds-job-status-%s\"", "algorithm = \"eos:product_publishing\" software_version = hysds.__version__ software_title = \"%s v%s\"", "datetime.utcnow().isoformat() if 'tag' in job.get('job', {}): tags = job.setdefault('tags', [])", "= pd['activity'] # write prov with open(prov_es_file, 'w') as f:", "not None and (time_limit is None or time_limit <= soft_time_limit+gap):", "import unicode_literals from __future__ import print_function from __future__ import division", "pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url']", "get_uuid(sa_label) doc.softwareAgent(sa_id, str(job['job_info']['pid']), job['job_info']['execute_node'], role=job.get('username', None), label=sa_label, bundle=bndl) # create", "job['type'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not", "connection pool for job info metrics.\"\"\" global JOB_INFO_POOL if JOB_INFO_POOL", "event} # send update to redis r = StrictRedis(connection_pool=EVENT_STATUS_POOL) r.rpush(app.conf.REDIS_JOB_STATUS_KEY,", "1: ps_id_orig = list( prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][", "# task worker key template TASK_WORKER_KEY_TMPL = \"hysds-task-worker-%s\" def backoff_max_value():", "standard_library.install_aliases() import os import re import json import copy import", "@backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_task_worker(task_id, worker): \"\"\"Log task worker", "StrictRedis(connection_pool=WORKER_STATUS_POOL) r.setex(TASK_WORKER_KEY_TMPL % task_id, app.conf.HYSDS_JOB_STATUS_EXPIRES, worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value)", "worker status r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(WORKER_STATUS_KEY_TMPL % worker) @backoff.on_exception(backoff.expo,", "job[info] job_info = {'type': 'job_info', '@version': '1', '@timestamp': \"%sZ\" %", "BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_job_info_pool(): \"\"\"Set redis connection pool for job", "'1', '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'job': filtered_info, 'job_type': job['type']} #", "soft_time_limit + gap return soft_time_limit, time_limit def set_redis_job_status_pool(): \"\"\"Set redis", "hostname = '' info = {'resource': 'event', 'type': event_type, 'status':", "bndl = None # create sofware agent sa_label = \"hysds:pge_wrapper/%s/%d/%s\"", "gap time after soft time limit.\"\"\" return app.conf.HARD_TIME_LIMIT_GAP def ensure_hard_time_limit_gap(soft_time_limit,", "# write prov with open(prov_es_file, 'w') as f: json.dump(prov_es_info, f,", "gap.\"\"\" gap = hard_time_limit_gap() if soft_time_limit is not None and", "tags = [tags] tags.append(job['job']['tag']) job['tags'] = tags # send update", "bundle=bndl) # create processStep job_id = \"publish_dataset-%s\" % os.path.basename(prod_path) doc.processStep(\"hysds:%s\"", "write prov with open(prov_es_file, 'w') as f: json.dump(pd, f, indent=2)", "None # create sofware agent sa_label = \"hysds:pge_wrapper/%s/%d/%s\" % (job['job_info']['execute_node'],", "BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL) def set_redis_worker_status_pool(): \"\"\"Set redis connection pool for worker", "builtins import open from builtins import str from future import", "= [tags] tags.append(job['job']['tag']) job['tags'] = tags # send update to", "import division from __future__ import absolute_import from builtins import open", "execute_node, role=\"invoked\", label=sa_label, bundle=bndl) # create processStep job_id = \"publish_dataset-%s\"", "step if 'activity' in prov_es_info: if len(prov_es_info['activity']) == 1: ps_id_orig", "import open from builtins import str from future import standard_library", "@backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_worker_status(worker): \"\"\"Retrieve worker status by", "return app.conf.BACKOFF_MAX_TRIES def hard_time_limit_gap(): \"\"\"Return minimum gap time after soft", "len(prov_es_info['activity']) == 1: ps_id_orig = list(prov_es_info['activity'].keys())[0] prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime'] prov_es_info['activity'][ps_id_orig]['prov:endTime']", "\"\"\"Print job status.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL job['resource'] = 'job' job['type']", "job.get('job', {}): tags = job.setdefault('tags', []) if isinstance(tags, str): tags", "return r.get(JOB_STATUS_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_status(job):", "event_status, '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'hostname': hostname, 'uuid': uuid, 'tags':", "\"\"\"Return max value for backoff.\"\"\" return app.conf.BACKOFF_MAX_VALUE def backoff_max_tries(): \"\"\"Return", "send update to redis r = StrictRedis(connection_pool=JOB_STATUS_POOL) r.setex(JOB_STATUS_KEY_TMPL % job['uuid'],", "set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve task worker r = StrictRedis(connection_pool=WORKER_STATUS_POOL)", "= ProvEsDocument(namespaces=prov_es_info['prefix']) # get bundle #bndl = doc.bundle(bundle_id) bndl =", "% task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_worker_status(worker): \"\"\"Retrieve worker", "output_ent = doc.product(output_id, None, [pub_urls[0]], [], None, None, None, label=objectid,", "= {'type': 'job_info', '@version': '1', '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'job':", "def ensure_hard_time_limit_gap(soft_time_limit, time_limit): \"\"\"Ensure hard time limit gap.\"\"\" gap =", "tries for backoff.\"\"\" return app.conf.BACKOFF_MAX_TRIES def hard_time_limit_gap(): \"\"\"Return minimum gap", "get_uuid(job['job_id']) bundle_id = \"hysds:%s\" % get_uuid('bundle-%s' % job['job_id']) doc =", "'prov:type'): continue pd['entity'][output_id][attr] = orig_ent[attr] # write prov with open(prov_es_file,", "prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not in", "return app.conf.BACKOFF_MAX_VALUE def backoff_max_tries(): \"\"\"Return max tries for backoff.\"\"\" return", "None: JOB_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_job_info_pool(): \"\"\"Set redis connection", "prod_url = \"file://%s%s\" % (execute_node, prod_path) input_id = \"hysds:%s\" %", "job['job_info']['execute_node'], role=job.get('username', None), label=sa_label, bundle=bndl) # create processStep doc.processStep(ps_id, job['job_info']['cmd_start'],", "max_tries=backoff_max_tries, max_value=backoff_max_value) def get_job_status(task_id): \"\"\"Retrieve job status by task ID", "{'resource': 'event', 'type': event_type, 'status': event_status, '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(),", "'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id']", "% worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_job_status(task_id): \"\"\"Retrieve job", "get_task_worker(task_id): \"\"\"Retrieve task worker by task ID from redis.\"\"\" set_redis_worker_status_pool()", "prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type']", "log_task_worker(task_id, worker): \"\"\"Log task worker for task ID in redis.\"\"\"", "\"\"\"Log PROV-ES document. Create temp PROV-ES document to populate attributes", "ps_id_orig else: prov_es_info['bundle'][bundle_id_orig]['activity'].update( pd['bundle'][bundle_id]['activity']) else: prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity'] else: #", "ps_id_orig = list(prov_es_info['activity'].keys())[0] prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime'] prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime'] prov_es_info['activity'][ps_id_orig]['hysds:job_id']", "(execute_node, prod_path) input_id = \"hysds:%s\" % get_uuid(prod_url) input_ent = doc.granule(input_id,", "('job_info', 'job_id', 'task_id', 'delivery_info', 'tag', 'priority', 'container_image_name', 'container_image_url', 'name'): if", "None, [input_id], [output_id], label=job_id, bundle=bndl, prov_type=\"hysds:publish_dataset\") # get json pd", "r = StrictRedis(connection_pool=JOB_STATUS_POOL) return r.get(JOB_STATUS_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries,", "task worker by task ID from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL", "logger.info(\"hysds.custom_event:%s\" % json.dumps(info)) return uuid def log_prov_es(job, prov_es_info, prov_es_file): \"\"\"Log", "app.conf.REDIS_JOB_STATUS_URL) def set_redis_job_info_pool(): \"\"\"Set redis connection pool for job info", "from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve worker status r", "by task ID from redis.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL # retrieve", "unicode_literals from __future__ import print_function from __future__ import division from", "ProvEsDocument # logger logger = get_task_logger(__name__) # redis connection pools", "if 'activity' in prov_es_info: if len(prov_es_info['activity']) == 1: ps_id_orig =", "prov_es_info['bundle'][bundle_id_orig]: if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1: ps_id_orig = list( prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][", "sa_id, None, [input_id], [output_id], label=job_id, bundle=bndl, prov_type=\"hysds:publish_dataset\") # get json", "__future__ import print_function from __future__ import division from __future__ import", "connection pool for job status.\"\"\" global JOB_STATUS_POOL if JOB_STATUS_POOL is", "prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] =", "ps_id: prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['activity'].update(pd['activity']) else: prov_es_info['activity'] = pd['activity']", "= prov_es_info.get('entity', {}).get(input_id, {}) pd['entity'][input_id].update(orig_ent) # update output entity for", "time limit.\"\"\" return app.conf.HARD_TIME_LIMIT_GAP def ensure_hard_time_limit_gap(soft_time_limit, time_limit): \"\"\"Ensure hard time", "== 1: bundle_id_orig = list(prov_es_info['bundle'].keys())[0] # update software agent prov_es_info['bundle'][bundle_id_orig].setdefault(", "= list( prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:endTime'] =", "JOB_STATUS_POOL # retrieve job status r = StrictRedis(connection_pool=JOB_STATUS_POOL) return r.get(JOB_STATUS_KEY_TMPL", "backoff.\"\"\" return app.conf.BACKOFF_MAX_VALUE def backoff_max_tries(): \"\"\"Return max tries for backoff.\"\"\"", "temp PROV-ES document to populate attributes that only the worker", "import absolute_import from builtins import open from builtins import str", "in prov_es_info['wasAssociatedWith']: if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else:", "time_limit def set_redis_job_status_pool(): \"\"\"Set redis connection pool for job status.\"\"\"", "= {'resource': 'event', 'type': event_type, 'status': event_status, '@timestamp': \"%sZ\" %", "# update wasAssociatedWith activity ids for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']: if", "# create processStep doc.processStep(ps_id, job['job_info']['cmd_start'], job['job_info']['cmd_end'], [], sa_id, None, [],", "\"hysds:%s\" % get_uuid(pub_urls[0]) output_ent = doc.product(output_id, None, [pub_urls[0]], [], None,", "get_uuid(sa_label) doc.softwareAgent(sa_id, str(pid), execute_node, role=\"invoked\", label=sa_label, bundle=bndl) # create processStep", "\"\"\"Return max tries for backoff.\"\"\" return app.conf.BACKOFF_MAX_TRIES def hard_time_limit_gap(): \"\"\"Return", "% get_uuid(sa_label) doc.softwareAgent(sa_id, str(pid), execute_node, role=\"invoked\", label=sa_label, bundle=bndl) # create", "None: WORKER_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_event_status_pool(): \"\"\"Set redis connection", "\"hysds-worker-status-%s\" # task worker key template TASK_WORKER_KEY_TMPL = \"hysds-task-worker-%s\" def", "for job status.\"\"\" global JOB_STATUS_POOL if JOB_STATUS_POOL is None: JOB_STATUS_POOL", "task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_worker_status(worker): \"\"\"Retrieve worker status", "None, label=os.path.basename(prod_url), bundle=bndl) # add output entity output_id = \"hysds:%s\"", "bundle=bndl) # create sofware agent pid = os.getpid() sa_label =", "for task ID r = StrictRedis(connection_pool=WORKER_STATUS_POOL) r.setex(TASK_WORKER_KEY_TMPL % task_id, app.conf.HYSDS_JOB_STATUS_EXPIRES,", "BlockingConnectionPool, StrictRedis, RedisError from celery.utils.log import get_task_logger import hysds from", "app.conf.MOZART_URL if 'prov:type' not in prov_es_info['activity'][ps_id_orig]: prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type'] #", "redis connection pool for event status.\"\"\" global EVENT_STATUS_POOL if EVENT_STATUS_POOL", "bundle #bndl = doc.bundle(bundle_id) bndl = None # add input", "% task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_status(job): \"\"\"Print job", "\"\"\"Set redis connection pool for event status.\"\"\" global EVENT_STATUS_POOL if", "worker key template TASK_WORKER_KEY_TMPL = \"hysds-task-worker-%s\" def backoff_max_value(): \"\"\"Return max", "prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids for waw_id", "app.conf.HARD_TIME_LIMIT_GAP def ensure_hard_time_limit_gap(soft_time_limit, time_limit): \"\"\"Ensure hard time limit gap.\"\"\" gap", "write prov with open(prov_es_file, 'w') as f: json.dump(prov_es_info, f, indent=2)", "PROV-ES document.\"\"\" # create PROV-ES doc doc = ProvEsDocument(namespaces=prov_es_info['prefix']) #", "sa_label = \"hysds:pge_wrapper/%s/%d/%s\" % (job['job_info']['execute_node'], job['job_info']['pid'], datetime.utcnow().isoformat()) sa_id = \"hysds:%s\"", "= {} for info in ('job_info', 'job_id', 'task_id', 'delivery_info', 'tag',", "<reponame>fgreg/hysds<gh_stars>0 from __future__ import unicode_literals from __future__ import print_function from", "EVENT_STATUS_POOL is None: EVENT_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries,", "create processStep doc.processStep(ps_id, job['job_info']['cmd_start'], job['job_info']['cmd_end'], [], sa_id, None, [], [],", "label=os.path.basename(prod_url), bundle=bndl) # add output entity output_id = \"hysds:%s\" %", "\"\"\"Retrieve worker status by worker ID from redis.\"\"\" set_redis_worker_status_pool() global", "pd['entity'][output_id][attr] = orig_ent[attr] # write prov with open(prov_es_file, 'w') as", "location=software_location, bundle=bndl) # create sofware agent pid = os.getpid() sa_label", "tags, '@version': '1', 'event': event} # send update to redis", "% get_uuid(sa_label) doc.softwareAgent(sa_id, str(job['job_info']['pid']), job['job_info']['execute_node'], role=job.get('username', None), label=sa_label, bundle=bndl) #", "objectid): \"\"\"Log publish step in PROV-ES document.\"\"\" # create PROV-ES", "'event': event} # send update to redis r = StrictRedis(connection_pool=EVENT_STATUS_POOL)", "max value for backoff.\"\"\" return app.conf.BACKOFF_MAX_VALUE def backoff_max_tries(): \"\"\"Return max", "\"hysds:%s\" % get_uuid(job['job_id']) bundle_id = \"hysds:%s\" % get_uuid('bundle-%s' % job['job_id'])", "= hysds.__url__ doc.software(software, [algorithm], software_version, label=software_title, location=software_location, bundle=bndl) # create", "% (execute_node, prod_path) input_id = \"hysds:%s\" % get_uuid(prod_url) input_ent =", "by task ID from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve", "f: json.dump(prov_es_info, f, indent=2) def log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls, prod_metrics,", "for backoff.\"\"\" return app.conf.BACKOFF_MAX_TRIES def hard_time_limit_gap(): \"\"\"Return minimum gap time", "r.setex(TASK_WORKER_KEY_TMPL % task_id, app.conf.HYSDS_JOB_STATUS_EXPIRES, worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def", "'name'): if info in job: filtered_info[info] = job[info] job_info =", "get_uuid(prod_url) input_ent = doc.granule(input_id, None, [prod_url], [], None, None, None,", "prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['activity'][ps_id_orig]['hysds:mozart_url']", "in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]: prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity", "value for backoff.\"\"\" return app.conf.BACKOFF_MAX_VALUE def backoff_max_tries(): \"\"\"Return max tries", "\"\"\"Log task worker for task ID in redis.\"\"\" set_redis_worker_status_pool() global", "set_redis_job_status_pool() global JOB_STATUS_POOL # retrieve job status r = StrictRedis(connection_pool=JOB_STATUS_POOL)", "task worker r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(TASK_WORKER_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo,", "logger.info(\"job_info_json:%s\" % json.dumps(job_info)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_custom_event(event_type, event_status,", "try: hostname = socket.gethostbyname(socket.gethostname()) except: hostname = '' info =", "verdi know ps_id = \"hysds:%s\" % get_uuid(job['job_id']) bundle_id = \"hysds:%s\"", "== 1: ps_id_orig = list(prov_es_info['activity'].keys())[0] prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime'] prov_es_info['activity'][ps_id_orig]['prov:endTime'] =", "update software agent and process step if 'bundle' in prov_es_info:", "is None or time_limit <= soft_time_limit+gap): time_limit = soft_time_limit +", "prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not in", "{}).update( pd['wasAssociatedWith']) # update process step if 'activity' in prov_es_info:", "= StrictRedis(connection_pool=JOB_STATUS_POOL) r.setex(JOB_STATUS_KEY_TMPL % job['uuid'], app.conf.HYSDS_JOB_STATUS_EXPIRES, job['status']) # for dedup", "celery.utils.log import get_task_logger import hysds from hysds.celery import app from", "pid, prod_metrics['time_start']) sa_id = \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(pid), execute_node,", "% job['uuid'], app.conf.HYSDS_JOB_STATUS_EXPIRES, job['status']) # for dedup r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) #", "ES logger.info(\"job_status_json:%s\" % json.dumps(job)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_info(job):", "update output entity for attr in orig_ent: if attr in", "from hysds.celery import app from prov_es.model import get_uuid, ProvEsDocument #", "ID r = StrictRedis(connection_pool=WORKER_STATUS_POOL) r.setex(TASK_WORKER_KEY_TMPL % task_id, app.conf.HYSDS_JOB_STATUS_EXPIRES, worker) @backoff.on_exception(backoff.expo,", "= str(uuid4()) if hostname is None: try: hostname = socket.getfqdn()", "not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]: prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type'] # update wasAssociatedWith", "= os.getpid() sa_label = \"hysds:publish_dataset/%s/%d/%s\" % (execute_node, pid, prod_metrics['time_start']) sa_id", "= soft_time_limit + gap return soft_time_limit, time_limit def set_redis_job_status_pool(): \"\"\"Set", "= pd['activity'][ps_id]['prov:startTime'] prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime'] prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['activity'][ps_id_orig]['hysds:job_type'] =", "output entity output_id = \"hysds:%s\" % get_uuid(pub_urls[0]) output_ent = doc.product(output_id,", "pool for event status.\"\"\" global EVENT_STATUS_POOL if EVENT_STATUS_POOL is None:", "% (hysds.__description__, software_version) software = \"eos:HySDS-%s\" % software_version software_location =", "input entity execute_node = socket.getfqdn() prod_url = \"file://%s%s\" % (execute_node,", "else: prov_es_info['activity'] = pd['activity'] # write prov with open(prov_es_file, 'w')", "PROV-ES document to populate attributes that only the worker has", "if len(prov_es_info['bundle']) == 1: bundle_id_orig = list(prov_es_info['bundle'].keys())[0] # update software", "task_id, app.conf.HYSDS_JOB_STATUS_EXPIRES, worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_task_worker(task_id): \"\"\"Retrieve", "= job.get('job', {}).get('type', 'unknown') job['@version'] = '1' job['@timestamp'] = \"%sZ\"", "job['tags'] = tags # send update to redis r =", "StrictRedis(connection_pool=EVENT_STATUS_POOL) r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info)) logger.info(\"hysds.custom_event:%s\" % json.dumps(info)) return uuid def log_prov_es(job,", "JOB_STATUS_KEY_TMPL = \"hysds-job-status-%s\" # worker status key template WORKER_STATUS_KEY_TMPL =", "'1', 'event': event} # send update to redis r =", "\"file://%s%s\" % (execute_node, prod_path) input_id = \"hysds:%s\" % get_uuid(prod_url) input_ent", "global WORKER_STATUS_POOL if WORKER_STATUS_POOL is None: WORKER_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL)", "import print_function from __future__ import division from __future__ import absolute_import", "attributes that only verdi know ps_id = \"hysds:%s\" % get_uuid(job['job_id'])", "get bundle #bndl = doc.bundle(bundle_id) bndl = None # add", "get json pd = json.loads(doc.serialize()) # update software agent and", "# update input entity orig_ent = prov_es_info.get('entity', {}).get(input_id, {}) pd['entity'][input_id].update(orig_ent)", "WORKER_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_event_status_pool(): \"\"\"Set redis connection pool", "WORKER_STATUS_POOL = None EVENT_STATUS_POOL = None # job status key", "role=\"invoked\", label=sa_label, bundle=bndl) # create processStep job_id = \"publish_dataset-%s\" %", "% datetime.utcnow().isoformat() if 'tag' in job.get('job', {}): tags = job.setdefault('tags',", "% software_version software_location = hysds.__url__ doc.software(software, [algorithm], software_version, label=software_title, location=software_location,", "r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(TASK_WORKER_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries,", "# create processStep job_id = \"publish_dataset-%s\" % os.path.basename(prod_path) doc.processStep(\"hysds:%s\" %", "get_job_status(task_id): \"\"\"Retrieve job status by task ID from redis.\"\"\" set_redis_job_status_pool()", "\"%sZ\" % datetime.utcnow().isoformat(), 'job': filtered_info, 'job_type': job['type']} # send update", "has access to (e.g. PID).\"\"\" # create PROV-ES doc to", "= \"publish_dataset-%s\" % os.path.basename(prod_path) doc.processStep(\"hysds:%s\" % get_uuid(job_id), prod_metrics['time_start'], prod_metrics['time_end'], [software],", "= job['type'] prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type'", "is None: WORKER_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_event_status_pool(): \"\"\"Set redis", "bundle_id_orig = list(prov_es_info['bundle'].keys())[0] # update software agent prov_es_info['bundle'][bundle_id_orig].setdefault( 'agent', {}).update(pd['bundle'][bundle_id]['agent'])", "set_redis_event_status_pool(): \"\"\"Set redis connection pool for event status.\"\"\" global EVENT_STATUS_POOL", "import BlockingConnectionPool, StrictRedis, RedisError from celery.utils.log import get_task_logger import hysds", "logger = get_task_logger(__name__) # redis connection pools JOB_STATUS_POOL = None", "= None WORKER_STATUS_POOL = None EVENT_STATUS_POOL = None # job", "limit gap.\"\"\" gap = hard_time_limit_gap() if soft_time_limit is not None", "'prov:type' not in prov_es_info['activity'][ps_id_orig]: prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type'] # update wasAssociatedWith", "process step if 'activity' in prov_es_info: if len(prov_es_info['activity']) == 1:", "create sofware agent sa_label = \"hysds:pge_wrapper/%s/%d/%s\" % (job['job_info']['execute_node'], job['job_info']['pid'], datetime.utcnow().isoformat())", "from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve task worker r", "app.conf.REDIS_JOB_STATUS_URL) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_task_worker(task_id, worker): \"\"\"Log task", "get_uuid('bundle-%s' % job['job_id']) doc = ProvEsDocument() # get bundle #bndl", "from __future__ import division from __future__ import absolute_import from builtins", "job status key template JOB_STATUS_KEY_TMPL = \"hysds-job-status-%s\" # worker status", "software_version software_location = hysds.__url__ doc.software(software, [algorithm], software_version, label=software_title, location=software_location, bundle=bndl)", "# add input entity execute_node = socket.getfqdn() prod_url = \"file://%s%s\"", "# job status key template JOB_STATUS_KEY_TMPL = \"hysds-job-status-%s\" # worker", "filtered_info[info] = job[info] job_info = {'type': 'job_info', '@version': '1', '@timestamp':", "populate attributes that only the worker has access to (e.g.", "pd['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids for waw_id in prov_es_info['wasAssociatedWith']:", "# for dedup r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) # for ES logger.info(\"job_status_json:%s\" %", "[software], sa_id, None, [input_id], [output_id], label=job_id, bundle=bndl, prov_type=\"hysds:publish_dataset\") # get", "= ps_id_orig else: prov_es_info['bundle'][bundle_id_orig]['activity'].update( pd['bundle'][bundle_id]['activity']) else: prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity'] else:", "update to redis r = StrictRedis(connection_pool=JOB_STATUS_POOL) r.setex(JOB_STATUS_KEY_TMPL % job['uuid'], app.conf.HYSDS_JOB_STATUS_EXPIRES,", "% json.dumps(job)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_info(job): \"\"\"Print job", "prov with open(prov_es_file, 'w') as f: json.dump(prov_es_info, f, indent=2) def", "worker status by worker ID from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL", "r.get(WORKER_STATUS_KEY_TMPL % worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_job_status(task_id): \"\"\"Retrieve", "None, None, label=os.path.basename(prod_url), bundle=bndl) # add output entity output_id =", "r = StrictRedis(connection_pool=EVENT_STATUS_POOL) r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info)) logger.info(\"hysds.custom_event:%s\" % json.dumps(info)) return uuid", "only verdi know ps_id = \"hysds:%s\" % get_uuid(job['job_id']) bundle_id =", "RedisError from celery.utils.log import get_task_logger import hysds from hysds.celery import", "import uuid4 from redis import BlockingConnectionPool, StrictRedis, RedisError from celery.utils.log", "prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if", "prov_es_info['bundle'][bundle_id_orig]['activity'].update( pd['bundle'][bundle_id]['activity']) else: prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity'] else: # update software", "retrieve worker status r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(WORKER_STATUS_KEY_TMPL % worker)", "worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_job_status(task_id): \"\"\"Retrieve job status", "= ProvEsDocument() # get bundle #bndl = doc.bundle(bundle_id) bndl =", "app.conf.BACKOFF_MAX_VALUE def backoff_max_tries(): \"\"\"Return max tries for backoff.\"\"\" return app.conf.BACKOFF_MAX_TRIES", "prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if", "document to populate attributes that only the worker has access", "None: JOB_INFO_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL) def set_redis_worker_status_pool(): \"\"\"Set redis connection", "in prov_es_info: if len(prov_es_info['activity']) == 1: ps_id_orig = list(prov_es_info['activity'].keys())[0] prov_es_info['activity'][ps_id_orig]['prov:startTime']", "JOB_INFO_POOL if JOB_INFO_POOL is None: JOB_INFO_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL) def", "job.get('job', {}).get('type', 'unknown') job['@version'] = '1' job['@timestamp'] = \"%sZ\" %", "from __future__ import absolute_import from builtins import open from builtins", "StrictRedis(connection_pool=JOB_INFO_POOL) r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info)) logger.info(\"job_info_json:%s\" % json.dumps(job_info)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value)", "document. Create temp PROV-ES document to populate attributes that only", "if soft_time_limit is not None and (time_limit is None or", "list( prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime']", "activity if 'activity' in prov_es_info['bundle'][bundle_id_orig]: if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1: ps_id_orig", "return r.get(WORKER_STATUS_KEY_TMPL % worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_job_status(task_id):", "\"%sZ\" % datetime.utcnow().isoformat(), 'hostname': hostname, 'uuid': uuid, 'tags': tags, '@version':", "@backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_task_worker(task_id): \"\"\"Retrieve task worker by", "prov_es_info, prov_es_file): \"\"\"Log PROV-ES document. Create temp PROV-ES document to", "= BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_job_info_pool(): \"\"\"Set redis connection pool for", "% os.path.basename(prod_path) doc.processStep(\"hysds:%s\" % get_uuid(job_id), prod_metrics['time_start'], prod_metrics['time_end'], [software], sa_id, None,", "update wasAssociatedWith activity ids for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']: if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity']", "to redis r = StrictRedis(connection_pool=EVENT_STATUS_POOL) r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info)) logger.info(\"hysds.custom_event:%s\" % json.dumps(info))", "input_id = \"hysds:%s\" % get_uuid(prod_url) input_ent = doc.granule(input_id, None, [prod_url],", "set_redis_event_status_pool() global EVENT_STATUS_POOL uuid = str(uuid4()) if hostname is None:", "to generate attributes that only verdi know ps_id = \"hysds:%s\"", "filtered_info = {} for info in ('job_info', 'job_id', 'task_id', 'delivery_info',", "[prod_url], [], None, None, None, label=os.path.basename(prod_url), bundle=bndl) # add output", "in ('prov:location', 'prov:label', 'prov:type'): continue pd['entity'][output_id][attr] = orig_ent[attr] # write", "prod_metrics['time_end'], [software], sa_id, None, [input_id], [output_id], label=job_id, bundle=bndl, prov_type=\"hysds:publish_dataset\") #", "sofware agent sa_label = \"hysds:pge_wrapper/%s/%d/%s\" % (job['job_info']['execute_node'], job['job_info']['pid'], datetime.utcnow().isoformat()) sa_id", "prov_es_info.setdefault('agent', {}).update(pd['agent']) # update wasAssociatedWith prov_es_info.setdefault('wasAssociatedWith', {}).update( pd['wasAssociatedWith']) # update", "def log_prov_es(job, prov_es_info, prov_es_file): \"\"\"Log PROV-ES document. Create temp PROV-ES", "None: EVENT_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def", "StrictRedis(connection_pool=JOB_STATUS_POOL) r.setex(JOB_STATUS_KEY_TMPL % job['uuid'], app.conf.HYSDS_JOB_STATUS_EXPIRES, job['status']) # for dedup r.rpush(app.conf.REDIS_JOB_STATUS_KEY,", "\"hysds:%s\" % get_uuid('bundle-%s' % job['job_id']) doc = ProvEsDocument() # get", "uuid, 'tags': tags, '@version': '1', 'event': event} # send update", "update software agent prov_es_info['bundle'][bundle_id_orig].setdefault( 'agent', {}).update(pd['bundle'][bundle_id]['agent']) # update wasAssociatedWith prov_es_info['bundle'][bundle_id_orig].setdefault(", "json.dump(prov_es_info, f, indent=2) def log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls, prod_metrics, objectid):", "r.get(TASK_WORKER_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_worker_status(worker): \"\"\"Retrieve", "access to (e.g. PID).\"\"\" # create PROV-ES doc to generate", "get_task_logger import hysds from hysds.celery import app from prov_es.model import", "\"\"\"Set redis connection pool for worker status.\"\"\" global WORKER_STATUS_POOL if", "task ID from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve task", "set_redis_job_status_pool() global JOB_STATUS_POOL job['resource'] = 'job' job['type'] = job.get('job', {}).get('type',", "socket import msgpack import traceback import types import backoff from", "redis connection pools JOB_STATUS_POOL = None JOB_INFO_POOL = None WORKER_STATUS_POOL", "to populate attributes that only the worker has access to", "label=sa_label, bundle=bndl) # create processStep doc.processStep(ps_id, job['job_info']['cmd_start'], job['job_info']['cmd_end'], [], sa_id,", "agent and process step if 'bundle' in prov_es_info: if len(prov_es_info['bundle'])", "len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1: ps_id_orig = list( prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:startTime'] =", "prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['bundle'][bundle_id_orig]['activity'].update( pd['bundle'][bundle_id]['activity']) else:", "RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_job_status(task_id): \"\"\"Retrieve job status by task", "JOB_INFO_POOL is None: JOB_INFO_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL) def set_redis_worker_status_pool(): \"\"\"Set", "from redis.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL # retrieve job status r", "send update to redis r = StrictRedis(connection_pool=JOB_INFO_POOL) r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info)) logger.info(\"job_info_json:%s\"", "bndl = None # add input entity execute_node = socket.getfqdn()", "= \"file://%s%s\" % (execute_node, prod_path) input_id = \"hysds:%s\" % get_uuid(prod_url)", "os.getpid() sa_label = \"hysds:publish_dataset/%s/%d/%s\" % (execute_node, pid, prod_metrics['time_start']) sa_id =", "entity orig_ent = prov_es_info.get('entity', {}).get(input_id, {}) pd['entity'][input_id].update(orig_ent) # update output", "StrictRedis(connection_pool=JOB_STATUS_POOL) return r.get(JOB_STATUS_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def", "for ES logger.info(\"job_status_json:%s\" % json.dumps(job)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def", "#bndl = doc.bundle(bundle_id) bndl = None # create sofware agent", "doc.granule(input_id, None, [prod_url], [], None, None, None, label=os.path.basename(prod_url), bundle=bndl) #", "info.\"\"\" set_redis_job_info_pool() global JOB_INFO_POOL filtered_info = {} for info in", "\"%s v%s\" % (hysds.__description__, software_version) software = \"eos:HySDS-%s\" % software_version", "step in PROV-ES document.\"\"\" # create PROV-ES doc doc =", "prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity'] else: # update software agent prov_es_info.setdefault('agent', {}).update(pd['agent'])", "process step if 'bundle' in prov_es_info: if len(prov_es_info['bundle']) == 1:", "wasAssociatedWith activity ids for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']: if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] ==", "# get json pd = json.loads(doc.serialize()) # update software agent", "doc doc = ProvEsDocument(namespaces=prov_es_info['prefix']) # get bundle #bndl = doc.bundle(bundle_id)", "= pd['bundle'][bundle_id]['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids for waw_id in", "pid = os.getpid() sa_label = \"hysds:publish_dataset/%s/%d/%s\" % (execute_node, pid, prod_metrics['time_start'])", "datetime.utcnow().isoformat()) sa_id = \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(job['job_info']['pid']), job['job_info']['execute_node'], role=job.get('username',", "#bndl = doc.bundle(bundle_id) bndl = None # add input entity", "= \"%s v%s\" % (hysds.__description__, software_version) software = \"eos:HySDS-%s\" %", "software agent prov_es_info['bundle'][bundle_id_orig].setdefault( 'agent', {}).update(pd['bundle'][bundle_id]['agent']) # update wasAssociatedWith prov_es_info['bundle'][bundle_id_orig].setdefault( 'wasAssociatedWith',", "entity for attr in orig_ent: if attr in ('prov:location', 'prov:label',", "job['job_id']) doc = ProvEsDocument() # get bundle #bndl = doc.bundle(bundle_id)", "r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info)) logger.info(\"hysds.custom_event:%s\" % json.dumps(info)) return uuid def log_prov_es(job, prov_es_info,", "get_worker_status(worker): \"\"\"Retrieve worker status by worker ID from redis.\"\"\" set_redis_worker_status_pool()", "{}).update(pd['bundle'][bundle_id]['wasAssociatedWith']) # update activity if 'activity' in prov_es_info['bundle'][bundle_id_orig]: if len(prov_es_info['bundle'][bundle_id_orig]['activity'])", "return app.conf.HARD_TIME_LIMIT_GAP def ensure_hard_time_limit_gap(soft_time_limit, time_limit): \"\"\"Ensure hard time limit gap.\"\"\"", "= \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(pid), execute_node, role=\"invoked\", label=sa_label, bundle=bndl)", "import traceback import types import backoff from datetime import datetime", "None WORKER_STATUS_POOL = None EVENT_STATUS_POOL = None # job status", "in prov_es_info['activity'][ps_id_orig]: prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids", "if hostname is None: try: hostname = socket.getfqdn() except: try:", "\"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(job['job_info']['pid']), job['job_info']['execute_node'], role=job.get('username', None), label=sa_label, bundle=bndl)", "info in ('job_info', 'job_id', 'task_id', 'delivery_info', 'tag', 'priority', 'container_image_name', 'container_image_url',", "connection pool for event status.\"\"\" global EVENT_STATUS_POOL if EVENT_STATUS_POOL is", "None: try: hostname = socket.getfqdn() except: try: hostname = socket.gethostbyname(socket.gethostname())", "None, None, None, label=objectid, bundle=bndl) # software and algorithm algorithm", "worker): \"\"\"Log task worker for task ID in redis.\"\"\" set_redis_worker_status_pool()", "max_value=backoff_max_value) def get_task_worker(task_id): \"\"\"Retrieve task worker by task ID from", "status by task ID from redis.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL #", "in ('job_info', 'job_id', 'task_id', 'delivery_info', 'tag', 'priority', 'container_image_name', 'container_image_url', 'name'):", "datetime.utcnow().isoformat(), 'job': filtered_info, 'job_type': job['type']} # send update to redis", "str(uuid4()) if hostname is None: try: hostname = socket.getfqdn() except:", "= \"hysds:%s\" % get_uuid(job['job_id']) bundle_id = \"hysds:%s\" % get_uuid('bundle-%s' %", "import os import re import json import copy import socket", "# update wasAssociatedWith prov_es_info['bundle'][bundle_id_orig].setdefault( 'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith']) # update activity if", "'w') as f: json.dump(prov_es_info, f, indent=2) def log_publish_prov_es(prov_es_info, prov_es_file, prod_path,", "time after soft time limit.\"\"\" return app.conf.HARD_TIME_LIMIT_GAP def ensure_hard_time_limit_gap(soft_time_limit, time_limit):", "pd['activity'] # write prov with open(prov_es_file, 'w') as f: json.dump(prov_es_info,", "'job_id', 'task_id', 'delivery_info', 'tag', 'priority', 'container_image_name', 'container_image_url', 'name'): if info", "def set_redis_event_status_pool(): \"\"\"Set redis connection pool for event status.\"\"\" global", "'prov:label', 'prov:type'): continue pd['entity'][output_id][attr] = orig_ent[attr] # write prov with", "bundle=bndl) # create processStep doc.processStep(ps_id, job['job_info']['cmd_start'], job['job_info']['cmd_end'], [], sa_id, None,", "backoff_max_tries(): \"\"\"Return max tries for backoff.\"\"\" return app.conf.BACKOFF_MAX_TRIES def hard_time_limit_gap():", "except: try: hostname = socket.gethostbyname(socket.gethostname()) except: hostname = '' info", "JOB_INFO_POOL = None WORKER_STATUS_POOL = None EVENT_STATUS_POOL = None #", "entity output_id = \"hysds:%s\" % get_uuid(pub_urls[0]) output_ent = doc.product(output_id, None,", "return r.get(TASK_WORKER_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_worker_status(worker):", "ids for waw_id in prov_es_info['wasAssociatedWith']: if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['wasAssociatedWith'][waw_id]['prov:activity']", "prov_es_file, prod_path, pub_urls, prod_metrics, objectid): \"\"\"Log publish step in PROV-ES", "msgpack.dumps(job_info)) logger.info(\"job_info_json:%s\" % json.dumps(job_info)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_custom_event(event_type,", "retrieve task worker r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(TASK_WORKER_KEY_TMPL % task_id)", "json.dumps(job_info)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_custom_event(event_type, event_status, event, tags=[],", "for waw_id in prov_es_info['wasAssociatedWith']: if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] =", "orig_ent: if attr in ('prov:location', 'prov:label', 'prov:type'): continue pd['entity'][output_id][attr] =", "with open(prov_es_file, 'w') as f: json.dump(prov_es_info, f, indent=2) def log_publish_prov_es(prov_es_info,", "prov_es_info['bundle'][bundle_id_orig].setdefault( 'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith']) # update activity if 'activity' in prov_es_info['bundle'][bundle_id_orig]:", "JOB_STATUS_POOL is None: JOB_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_job_info_pool(): \"\"\"Set", "= \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(job['job_info']['pid']), job['job_info']['execute_node'], role=job.get('username', None), label=sa_label,", "= job.setdefault('tags', []) if isinstance(tags, str): tags = [tags] tags.append(job['job']['tag'])", "JOB_STATUS_POOL if JOB_STATUS_POOL is None: JOB_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def", "publish step in PROV-ES document.\"\"\" # create PROV-ES doc doc", "= None # add input entity execute_node = socket.getfqdn() prod_url", "tags # send update to redis r = StrictRedis(connection_pool=JOB_STATUS_POOL) r.setex(JOB_STATUS_KEY_TMPL", "in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']: if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else:", "'task_id', 'delivery_info', 'tag', 'priority', 'container_image_name', 'container_image_url', 'name'): if info in", "that only the worker has access to (e.g. PID).\"\"\" #", "update software agent prov_es_info.setdefault('agent', {}).update(pd['agent']) # update wasAssociatedWith prov_es_info.setdefault('wasAssociatedWith', {}).update(", "software_location = hysds.__url__ doc.software(software, [algorithm], software_version, label=software_title, location=software_location, bundle=bndl) #", "key template WORKER_STATUS_KEY_TMPL = \"hysds-worker-status-%s\" # task worker key template", "JOB_INFO_POOL filtered_info = {} for info in ('job_info', 'job_id', 'task_id',", "None EVENT_STATUS_POOL = None # job status key template JOB_STATUS_KEY_TMPL", "status.\"\"\" global WORKER_STATUS_POOL if WORKER_STATUS_POOL is None: WORKER_STATUS_POOL = BlockingConnectionPool.from_url(", "ID from redis.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL # retrieve job status", "event_type, 'status': event_status, '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'hostname': hostname, 'uuid':", "datetime.utcnow().isoformat(), 'hostname': hostname, 'uuid': uuid, 'tags': tags, '@version': '1', 'event':", "open from builtins import str from future import standard_library standard_library.install_aliases()", "doc to generate attributes that only verdi know ps_id =", "prov_es_info: if len(prov_es_info['activity']) == 1: ps_id_orig = list(prov_es_info['activity'].keys())[0] prov_es_info['activity'][ps_id_orig]['prov:startTime'] =", "event.\"\"\" set_redis_event_status_pool() global EVENT_STATUS_POOL uuid = str(uuid4()) if hostname is", "EVENT_STATUS_POOL uuid = str(uuid4()) if hostname is None: try: hostname", "prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['activity'].update(pd['activity']) else: prov_es_info['activity']", "log_job_status(job): \"\"\"Print job status.\"\"\" set_redis_job_status_pool() global JOB_STATUS_POOL job['resource'] = 'job'", "import hysds from hysds.celery import app from prov_es.model import get_uuid,", "+ gap return soft_time_limit, time_limit def set_redis_job_status_pool(): \"\"\"Set redis connection", "'job': filtered_info, 'job_type': job['type']} # send update to redis r", "# worker status key template WORKER_STATUS_KEY_TMPL = \"hysds-worker-status-%s\" # task", "= None EVENT_STATUS_POOL = None # job status key template", "import re import json import copy import socket import msgpack", "from builtins import open from builtins import str from future", "to redis r = StrictRedis(connection_pool=JOB_INFO_POOL) r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info)) logger.info(\"job_info_json:%s\" % json.dumps(job_info))", "@backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_job_status(task_id): \"\"\"Retrieve job status by", "'tag', 'priority', 'container_image_name', 'container_image_url', 'name'): if info in job: filtered_info[info]", "ps_id_orig else: prov_es_info['activity'].update(pd['activity']) else: prov_es_info['activity'] = pd['activity'] # write prov", "\"%sZ\" % datetime.utcnow().isoformat() if 'tag' in job.get('job', {}): tags =", "= list(prov_es_info['bundle'].keys())[0] # update software agent prov_es_info['bundle'][bundle_id_orig].setdefault( 'agent', {}).update(pd['bundle'][bundle_id]['agent']) #", "= hard_time_limit_gap() if soft_time_limit is not None and (time_limit is", "r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(WORKER_STATUS_KEY_TMPL % worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries,", "% job['type']) # get json pd = json.loads(doc.serialize()) # update", "from builtins import str from future import standard_library standard_library.install_aliases() import", "doc.software(software, [algorithm], software_version, label=software_title, location=software_location, bundle=bndl) # create sofware agent", "for job info metrics.\"\"\" global JOB_INFO_POOL if JOB_INFO_POOL is None:", "log_custom_event(event_type, event_status, event, tags=[], hostname=None): \"\"\"Log custom event.\"\"\" set_redis_event_status_pool() global", "\"eos:product_publishing\" software_version = hysds.__version__ software_title = \"%s v%s\" % (hysds.__description__,", "if info in job: filtered_info[info] = job[info] job_info = {'type':", "# write prov with open(prov_es_file, 'w') as f: json.dump(pd, f,", "global JOB_STATUS_POOL job['resource'] = 'job' job['type'] = job.get('job', {}).get('type', 'unknown')", "pub_urls, prod_metrics, objectid): \"\"\"Log publish step in PROV-ES document.\"\"\" #", "gap return soft_time_limit, time_limit def set_redis_job_status_pool(): \"\"\"Set redis connection pool", "job['type']) # get json pd = json.loads(doc.serialize()) # update software", "to (e.g. PID).\"\"\" # create PROV-ES doc to generate attributes", "worker for task ID r = StrictRedis(connection_pool=WORKER_STATUS_POOL) r.setex(TASK_WORKER_KEY_TMPL % task_id,", "'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith']) # update activity if 'activity' in prov_es_info['bundle'][bundle_id_orig]: if", "\"\"\"Retrieve job status by task ID from redis.\"\"\" set_redis_job_status_pool() global", "prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime'] prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime'] prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['activity'][ps_id_orig]['hysds:job_type']", "# update wasAssociatedWith activity ids for waw_id in prov_es_info['wasAssociatedWith']: if", "prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id']", "job['uuid'], app.conf.HYSDS_JOB_STATUS_EXPIRES, job['status']) # for dedup r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) # for", "wasAssociatedWith activity ids for waw_id in prov_es_info['wasAssociatedWith']: if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] ==", "waw_id in prov_es_info['wasAssociatedWith']: if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig", "get_task_logger(__name__) # redis connection pools JOB_STATUS_POOL = None JOB_INFO_POOL =", "== ps_id: prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['activity'].update(pd['activity']) else: prov_es_info['activity'] =", "event status.\"\"\" global EVENT_STATUS_POOL if EVENT_STATUS_POOL is None: EVENT_STATUS_POOL =", "job['job_id'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL", "\"hysds-job-status-%s\" # worker status key template WORKER_STATUS_KEY_TMPL = \"hysds-worker-status-%s\" #", "bundle=bndl) # software and algorithm algorithm = \"eos:product_publishing\" software_version =", "job_id = \"publish_dataset-%s\" % os.path.basename(prod_path) doc.processStep(\"hysds:%s\" % get_uuid(job_id), prod_metrics['time_start'], prod_metrics['time_end'],", "= \"hysds:%s\" % get_uuid(pub_urls[0]) output_ent = doc.product(output_id, None, [pub_urls[0]], [],", "import app from prov_es.model import get_uuid, ProvEsDocument # logger logger", "= get_task_logger(__name__) # redis connection pools JOB_STATUS_POOL = None JOB_INFO_POOL", "hysds.celery import app from prov_es.model import get_uuid, ProvEsDocument # logger", "the worker has access to (e.g. PID).\"\"\" # create PROV-ES", "% job['job_id']) doc = ProvEsDocument() # get bundle #bndl =", "'tag' in job.get('job', {}): tags = job.setdefault('tags', []) if isinstance(tags,", "__future__ import absolute_import from builtins import open from builtins import", "global JOB_STATUS_POOL # retrieve job status r = StrictRedis(connection_pool=JOB_STATUS_POOL) return", "= job[info] job_info = {'type': 'job_info', '@version': '1', '@timestamp': \"%sZ\"", "from datetime import datetime from uuid import uuid4 from redis", "if JOB_INFO_POOL is None: JOB_INFO_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL) def set_redis_worker_status_pool():", "logger.info(\"job_status_json:%s\" % json.dumps(job)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_info(job): \"\"\"Print", "in redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # set task worker for", "global JOB_STATUS_POOL if JOB_STATUS_POOL is None: JOB_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL)", "soft_time_limit+gap): time_limit = soft_time_limit + gap return soft_time_limit, time_limit def", "job: filtered_info[info] = job[info] job_info = {'type': 'job_info', '@version': '1',", "= BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL) def set_redis_worker_status_pool(): \"\"\"Set redis connection pool for", "'job' job['type'] = job.get('job', {}).get('type', 'unknown') job['@version'] = '1' job['@timestamp']", "r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) # for ES logger.info(\"job_status_json:%s\" % json.dumps(job)) @backoff.on_exception(backoff.expo, RedisError,", "doc.softwareAgent(sa_id, str(pid), execute_node, role=\"invoked\", label=sa_label, bundle=bndl) # create processStep job_id", "# retrieve job status r = StrictRedis(connection_pool=JOB_STATUS_POOL) return r.get(JOB_STATUS_KEY_TMPL %", "hostname = socket.getfqdn() except: try: hostname = socket.gethostbyname(socket.gethostname()) except: hostname", "% get_uuid(job_id), prod_metrics['time_start'], prod_metrics['time_end'], [software], sa_id, None, [input_id], [output_id], label=job_id,", "limit.\"\"\" return app.conf.HARD_TIME_LIMIT_GAP def ensure_hard_time_limit_gap(soft_time_limit, time_limit): \"\"\"Ensure hard time limit", "uuid import uuid4 from redis import BlockingConnectionPool, StrictRedis, RedisError from", "def set_redis_job_status_pool(): \"\"\"Set redis connection pool for job status.\"\"\" global", "or time_limit <= soft_time_limit+gap): time_limit = soft_time_limit + gap return", "PROV-ES document. Create temp PROV-ES document to populate attributes that", "str(pid), execute_node, role=\"invoked\", label=sa_label, bundle=bndl) # create processStep job_id =", "generate attributes that only verdi know ps_id = \"hysds:%s\" %", "= \"eos:HySDS-%s\" % software_version software_location = hysds.__url__ doc.software(software, [algorithm], software_version,", "\"hysds:publish_dataset/%s/%d/%s\" % (execute_node, pid, prod_metrics['time_start']) sa_id = \"hysds:%s\" % get_uuid(sa_label)", "event, tags=[], hostname=None): \"\"\"Log custom event.\"\"\" set_redis_event_status_pool() global EVENT_STATUS_POOL uuid", "status r = StrictRedis(connection_pool=JOB_STATUS_POOL) return r.get(JOB_STATUS_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError,", "hysds.__version__ software_title = \"%s v%s\" % (hysds.__description__, software_version) software =", "# send update to redis r = StrictRedis(connection_pool=JOB_STATUS_POOL) r.setex(JOB_STATUS_KEY_TMPL %", "def backoff_max_value(): \"\"\"Return max value for backoff.\"\"\" return app.conf.BACKOFF_MAX_VALUE def", "PID).\"\"\" # create PROV-ES doc to generate attributes that only", "= pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type']", "software_title = \"%s v%s\" % (hysds.__description__, software_version) software = \"eos:HySDS-%s\"", "uuid def log_prov_es(job, prov_es_info, prov_es_file): \"\"\"Log PROV-ES document. Create temp", "# send update to redis r = StrictRedis(connection_pool=JOB_INFO_POOL) r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info))", "info metrics.\"\"\" global JOB_INFO_POOL if JOB_INFO_POOL is None: JOB_INFO_POOL =", "EVENT_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_task_worker(task_id,", "= job['type'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type'", "pd['bundle'][bundle_id]['activity'] else: # update software agent prov_es_info.setdefault('agent', {}).update(pd['agent']) # update", "= list(prov_es_info['activity'].keys())[0] prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime'] prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime'] prov_es_info['activity'][ps_id_orig]['hysds:job_id'] =", "processStep job_id = \"publish_dataset-%s\" % os.path.basename(prod_path) doc.processStep(\"hysds:%s\" % get_uuid(job_id), prod_metrics['time_start'],", "ids for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']: if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity']", "WORKER_STATUS_POOL # retrieve worker status r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(WORKER_STATUS_KEY_TMPL", "and (time_limit is None or time_limit <= soft_time_limit+gap): time_limit =", "worker ID from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve worker", "hostname is None: try: hostname = socket.getfqdn() except: try: hostname", "prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['activity'].update(pd['activity']) else: prov_es_info['activity'] = pd['activity'] #", "in PROV-ES document.\"\"\" # create PROV-ES doc doc = ProvEsDocument(namespaces=prov_es_info['prefix'])", "None, None, label=objectid, bundle=bndl) # software and algorithm algorithm =", "if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]: prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type'] #", "template JOB_STATUS_KEY_TMPL = \"hysds-job-status-%s\" # worker status key template WORKER_STATUS_KEY_TMPL", "global JOB_INFO_POOL if JOB_INFO_POOL is None: JOB_INFO_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL)", "import types import backoff from datetime import datetime from uuid", "'container_image_url', 'name'): if info in job: filtered_info[info] = job[info] job_info", "prov_es.model import get_uuid, ProvEsDocument # logger logger = get_task_logger(__name__) #", "app.conf.REDIS_JOB_STATUS_URL) def set_redis_event_status_pool(): \"\"\"Set redis connection pool for event status.\"\"\"", "\"\"\"Log custom event.\"\"\" set_redis_event_status_pool() global EVENT_STATUS_POOL uuid = str(uuid4()) if", "log_prov_es(job, prov_es_info, prov_es_file): \"\"\"Log PROV-ES document. Create temp PROV-ES document", "step if 'bundle' in prov_es_info: if len(prov_es_info['bundle']) == 1: bundle_id_orig", "for worker status.\"\"\" global WORKER_STATUS_POOL if WORKER_STATUS_POOL is None: WORKER_STATUS_POOL", "input_ent = doc.granule(input_id, None, [prod_url], [], None, None, None, label=os.path.basename(prod_url),", "'uuid': uuid, 'tags': tags, '@version': '1', 'event': event} # send", "= StrictRedis(connection_pool=JOB_INFO_POOL) r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info)) logger.info(\"job_info_json:%s\" % json.dumps(job_info)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries,", "RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_task_worker(task_id, worker): \"\"\"Log task worker for", "else: # update software agent prov_es_info.setdefault('agent', {}).update(pd['agent']) # update wasAssociatedWith", "doc = ProvEsDocument(namespaces=prov_es_info['prefix']) # get bundle #bndl = doc.bundle(bundle_id) bndl", "r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info)) logger.info(\"job_info_json:%s\" % json.dumps(job_info)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def", "# send update to redis r = StrictRedis(connection_pool=EVENT_STATUS_POOL) r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info))", "prov_es_info: if len(prov_es_info['bundle']) == 1: bundle_id_orig = list(prov_es_info['bundle'].keys())[0] # update", "absolute_import from builtins import open from builtins import str from", "worker r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(TASK_WORKER_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError,", "= socket.getfqdn() except: try: hostname = socket.gethostbyname(socket.gethostname()) except: hostname =", "r.get(JOB_STATUS_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_status(job): \"\"\"Print", "import str from future import standard_library standard_library.install_aliases() import os import", "sa_id = \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(job['job_info']['pid']), job['job_info']['execute_node'], role=job.get('username', None),", "processStep doc.processStep(ps_id, job['job_info']['cmd_start'], job['job_info']['cmd_end'], [], sa_id, None, [], [], bundle=bndl,", "'unknown') job['@version'] = '1' job['@timestamp'] = \"%sZ\" % datetime.utcnow().isoformat() if", "prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids for", "set_redis_worker_status_pool(): \"\"\"Set redis connection pool for worker status.\"\"\" global WORKER_STATUS_POOL", "None), label=sa_label, bundle=bndl) # create processStep doc.processStep(ps_id, job['job_info']['cmd_start'], job['job_info']['cmd_end'], [],", "job status r = StrictRedis(connection_pool=JOB_STATUS_POOL) return r.get(JOB_STATUS_KEY_TMPL % task_id) @backoff.on_exception(backoff.expo,", "1: ps_id_orig = list(prov_es_info['activity'].keys())[0] prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime'] prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime']", "execute_node = socket.getfqdn() prod_url = \"file://%s%s\" % (execute_node, prod_path) input_id", "% task_id, app.conf.HYSDS_JOB_STATUS_EXPIRES, worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_task_worker(task_id):", "r = StrictRedis(connection_pool=JOB_STATUS_POOL) r.setex(JOB_STATUS_KEY_TMPL % job['uuid'], app.conf.HYSDS_JOB_STATUS_EXPIRES, job['status']) # for", "= doc.product(output_id, None, [pub_urls[0]], [], None, None, None, label=objectid, bundle=bndl)", "JOB_STATUS_POOL = None JOB_INFO_POOL = None WORKER_STATUS_POOL = None EVENT_STATUS_POOL", "WORKER_STATUS_POOL # retrieve task worker r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(TASK_WORKER_KEY_TMPL", "wasAssociatedWith prov_es_info['bundle'][bundle_id_orig].setdefault( 'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith']) # update activity if 'activity' in", "import msgpack import traceback import types import backoff from datetime", "if EVENT_STATUS_POOL is None: EVENT_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) @backoff.on_exception(backoff.expo, RedisError,", "soft time limit.\"\"\" return app.conf.HARD_TIME_LIMIT_GAP def ensure_hard_time_limit_gap(soft_time_limit, time_limit): \"\"\"Ensure hard", "json.loads(doc.serialize()) # update software agent and process step if 'bundle'", "'' info = {'resource': 'event', 'type': event_type, 'status': event_status, '@timestamp':", "app.conf.MOZART_URL if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]: prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type']", "job.setdefault('tags', []) if isinstance(tags, str): tags = [tags] tags.append(job['job']['tag']) job['tags']", "'bundle' in prov_es_info: if len(prov_es_info['bundle']) == 1: bundle_id_orig = list(prov_es_info['bundle'].keys())[0]", "agent prov_es_info['bundle'][bundle_id_orig].setdefault( 'agent', {}).update(pd['bundle'][bundle_id]['agent']) # update wasAssociatedWith prov_es_info['bundle'][bundle_id_orig].setdefault( 'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith'])", "prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]: prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:type']", "prod_metrics['time_start']) sa_id = \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(pid), execute_node, role=\"invoked\",", "EVENT_STATUS_POOL = None # job status key template JOB_STATUS_KEY_TMPL =", "[algorithm], software_version, label=software_title, location=software_location, bundle=bndl) # create sofware agent pid", "sa_id, None, [], [], bundle=bndl, prov_type=\"hysds:%s\" % job['type']) # get", "job['job_info']['job_url'] prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not in prov_es_info['activity'][ps_id_orig]: prov_es_info['activity'][ps_id_orig]['prov:type']", "% json.dumps(info)) return uuid def log_prov_es(job, prov_es_info, prov_es_file): \"\"\"Log PROV-ES", "pd['bundle'][bundle_id]['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']:", "(job['job_info']['execute_node'], job['job_info']['pid'], datetime.utcnow().isoformat()) sa_id = \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(job['job_info']['pid']),", "activity ids for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']: if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id:", "job['type'] prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not", "'status': event_status, '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'hostname': hostname, 'uuid': uuid,", "prod_metrics, objectid): \"\"\"Log publish step in PROV-ES document.\"\"\" # create", "r = StrictRedis(connection_pool=WORKER_STATUS_POOL) r.setex(TASK_WORKER_KEY_TMPL % task_id, app.conf.HYSDS_JOB_STATUS_EXPIRES, worker) @backoff.on_exception(backoff.expo, RedisError,", "{}).update(pd['bundle'][bundle_id]['agent']) # update wasAssociatedWith prov_es_info['bundle'][bundle_id_orig].setdefault( 'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith']) # update activity", "redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve task worker r =", "document.\"\"\" # create PROV-ES doc doc = ProvEsDocument(namespaces=prov_es_info['prefix']) # get", "if 'tag' in job.get('job', {}): tags = job.setdefault('tags', []) if", "update to redis r = StrictRedis(connection_pool=JOB_INFO_POOL) r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info)) logger.info(\"job_info_json:%s\" %", "import socket import msgpack import traceback import types import backoff", "algorithm algorithm = \"eos:product_publishing\" software_version = hysds.__version__ software_title = \"%s", "socket.gethostbyname(socket.gethostname()) except: hostname = '' info = {'resource': 'event', 'type':", "software_version) software = \"eos:HySDS-%s\" % software_version software_location = hysds.__url__ doc.software(software,", "= \"hysds:publish_dataset/%s/%d/%s\" % (execute_node, pid, prod_metrics['time_start']) sa_id = \"hysds:%s\" %", "BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_task_worker(task_id, worker): \"\"\"Log", "job['job_info']['cmd_end'], [], sa_id, None, [], [], bundle=bndl, prov_type=\"hysds:%s\" % job['type'])", "'agent', {}).update(pd['bundle'][bundle_id]['agent']) # update wasAssociatedWith prov_es_info['bundle'][bundle_id_orig].setdefault( 'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith']) # update", "from redis import BlockingConnectionPool, StrictRedis, RedisError from celery.utils.log import get_task_logger", "get_uuid(pub_urls[0]) output_ent = doc.product(output_id, None, [pub_urls[0]], [], None, None, None,", "open(prov_es_file, 'w') as f: json.dump(prov_es_info, f, indent=2) def log_publish_prov_es(prov_es_info, prov_es_file,", "time_limit): \"\"\"Ensure hard time limit gap.\"\"\" gap = hard_time_limit_gap() if", "max_value=backoff_max_value) def log_custom_event(event_type, event_status, event, tags=[], hostname=None): \"\"\"Log custom event.\"\"\"", "= socket.gethostbyname(socket.gethostname()) except: hostname = '' info = {'resource': 'event',", "msgpack.dumps(job)) # for ES logger.info(\"job_status_json:%s\" % json.dumps(job)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries,", "# update process step if 'activity' in prov_es_info: if len(prov_es_info['activity'])", "custom event.\"\"\" set_redis_event_status_pool() global EVENT_STATUS_POOL uuid = str(uuid4()) if hostname", "{} for info in ('job_info', 'job_id', 'task_id', 'delivery_info', 'tag', 'priority',", "None, [prod_url], [], None, None, None, label=os.path.basename(prod_url), bundle=bndl) # add", "metrics.\"\"\" global JOB_INFO_POOL if JOB_INFO_POOL is None: JOB_INFO_POOL = BlockingConnectionPool.from_url(", "minimum gap time after soft time limit.\"\"\" return app.conf.HARD_TIME_LIMIT_GAP def", "prov_type=\"hysds:%s\" % job['type']) # get json pd = json.loads(doc.serialize()) #", "label=objectid, bundle=bndl) # software and algorithm algorithm = \"eos:product_publishing\" software_version", "status by worker ID from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL #", "prov_es_info['activity'] = pd['activity'] # write prov with open(prov_es_file, 'w') as", "max tries for backoff.\"\"\" return app.conf.BACKOFF_MAX_TRIES def hard_time_limit_gap(): \"\"\"Return minimum", "for attr in orig_ent: if attr in ('prov:location', 'prov:label', 'prov:type'):", "= json.loads(doc.serialize()) # update input entity orig_ent = prov_es_info.get('entity', {}).get(input_id,", "= pd['activity'][ps_id]['prov:endTime'] prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['activity'][ps_id_orig]['hysds:job_url'] =", "builtins import str from future import standard_library standard_library.install_aliases() import os", "pd['activity'][ps_id]['prov:endTime'] prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url']", "hostname = socket.gethostbyname(socket.gethostname()) except: hostname = '' info = {'resource':", "= StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(WORKER_STATUS_KEY_TMPL % worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value)", "job['job_info']['cmd_start'], job['job_info']['cmd_end'], [], sa_id, None, [], [], bundle=bndl, prov_type=\"hysds:%s\" %", "status r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return r.get(WORKER_STATUS_KEY_TMPL % worker) @backoff.on_exception(backoff.expo, RedisError,", "task_id) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_status(job): \"\"\"Print job status.\"\"\"", "max_value=backoff_max_value) def get_job_status(task_id): \"\"\"Retrieve job status by task ID from", "# add output entity output_id = \"hysds:%s\" % get_uuid(pub_urls[0]) output_ent", "update process step if 'activity' in prov_es_info: if len(prov_es_info['activity']) ==", "= hysds.__version__ software_title = \"%s v%s\" % (hysds.__description__, software_version) software", "sa_label = \"hysds:publish_dataset/%s/%d/%s\" % (execute_node, pid, prod_metrics['time_start']) sa_id = \"hysds:%s\"", "= app.conf.MOZART_URL if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]: prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:type'] =", "hard time limit gap.\"\"\" gap = hard_time_limit_gap() if soft_time_limit is", "app.conf.BACKOFF_MAX_TRIES def hard_time_limit_gap(): \"\"\"Return minimum gap time after soft time", "None, [pub_urls[0]], [], None, None, None, label=objectid, bundle=bndl) # software", "task worker key template TASK_WORKER_KEY_TMPL = \"hysds-task-worker-%s\" def backoff_max_value(): \"\"\"Return", "RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_task_worker(task_id): \"\"\"Retrieve task worker by task", "info = {'resource': 'event', 'type': event_type, 'status': event_status, '@timestamp': \"%sZ\"", "update activity if 'activity' in prov_es_info['bundle'][bundle_id_orig]: if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1:", "def get_worker_status(worker): \"\"\"Retrieve worker status by worker ID from redis.\"\"\"", "if 'prov:type' not in prov_es_info['activity'][ps_id_orig]: prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type'] # update", "BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_event_status_pool(): \"\"\"Set redis connection pool for event", "only the worker has access to (e.g. PID).\"\"\" # create", "backoff.\"\"\" return app.conf.BACKOFF_MAX_TRIES def hard_time_limit_gap(): \"\"\"Return minimum gap time after", "prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url']", "JOB_INFO_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL) def set_redis_worker_status_pool(): \"\"\"Set redis connection pool", "PROV-ES doc doc = ProvEsDocument(namespaces=prov_es_info['prefix']) # get bundle #bndl =", "def get_task_worker(task_id): \"\"\"Retrieve task worker by task ID from redis.\"\"\"", "'@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'hostname': hostname, 'uuid': uuid, 'tags': tags,", "% get_uuid(prod_url) input_ent = doc.granule(input_id, None, [prod_url], [], None, None,", "TASK_WORKER_KEY_TMPL = \"hysds-task-worker-%s\" def backoff_max_value(): \"\"\"Return max value for backoff.\"\"\"", "def log_job_info(job): \"\"\"Print job info.\"\"\" set_redis_job_info_pool() global JOB_INFO_POOL filtered_info =", "pd['bundle'][bundle_id]['activity']) else: prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity'] else: # update software agent", "prov_type=\"hysds:publish_dataset\") # get json pd = json.loads(doc.serialize()) # update input", "socket.getfqdn() except: try: hostname = socket.gethostbyname(socket.gethostname()) except: hostname = ''", "prov_es_info['wasAssociatedWith']: if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['activity'].update(pd['activity'])", "json pd = json.loads(doc.serialize()) # update input entity orig_ent =", "= \"eos:product_publishing\" software_version = hysds.__version__ software_title = \"%s v%s\" %", "'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url']", "[], None, None, None, label=os.path.basename(prod_url), bundle=bndl) # add output entity", "(execute_node, pid, prod_metrics['time_start']) sa_id = \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(pid),", "worker has access to (e.g. PID).\"\"\" # create PROV-ES doc", "add input entity execute_node = socket.getfqdn() prod_url = \"file://%s%s\" %", "set_redis_worker_status_pool() global WORKER_STATUS_POOL # set task worker for task ID", "get_uuid, ProvEsDocument # logger logger = get_task_logger(__name__) # redis connection", "from __future__ import unicode_literals from __future__ import print_function from __future__", "time limit gap.\"\"\" gap = hard_time_limit_gap() if soft_time_limit is not", "\"\"\"Return minimum gap time after soft time limit.\"\"\" return app.conf.HARD_TIME_LIMIT_GAP", "in job: filtered_info[info] = job[info] job_info = {'type': 'job_info', '@version':", "try: hostname = socket.getfqdn() except: try: hostname = socket.gethostbyname(socket.gethostname()) except:", "\"\"\"Print job info.\"\"\" set_redis_job_info_pool() global JOB_INFO_POOL filtered_info = {} for", "\"\"\"Set redis connection pool for job status.\"\"\" global JOB_STATUS_POOL if", "doc.processStep(ps_id, job['job_info']['cmd_start'], job['job_info']['cmd_end'], [], sa_id, None, [], [], bundle=bndl, prov_type=\"hysds:%s\"", "log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls, prod_metrics, objectid): \"\"\"Log publish step in", "'event', 'type': event_type, 'status': event_status, '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'hostname':", "info in job: filtered_info[info] = job[info] job_info = {'type': 'job_info',", "worker) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def get_task_worker(task_id): \"\"\"Retrieve task worker", "if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1: ps_id_orig = list( prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:startTime']", "input entity orig_ent = prov_es_info.get('entity', {}).get(input_id, {}) pd['entity'][input_id].update(orig_ent) # update", "def log_task_worker(task_id, worker): \"\"\"Log task worker for task ID in", "{}).update(pd['agent']) # update wasAssociatedWith prov_es_info.setdefault('wasAssociatedWith', {}).update( pd['wasAssociatedWith']) # update process", "# update wasAssociatedWith prov_es_info.setdefault('wasAssociatedWith', {}).update( pd['wasAssociatedWith']) # update process step", "max_tries=backoff_max_tries, max_value=backoff_max_value) def get_worker_status(worker): \"\"\"Retrieve worker status by worker ID", "== ps_id: prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['bundle'][bundle_id_orig]['activity'].update( pd['bundle'][bundle_id]['activity']) else: prov_es_info['bundle'][bundle_id_orig]['activity']", "job status.\"\"\" global JOB_STATUS_POOL if JOB_STATUS_POOL is None: JOB_STATUS_POOL =", "division from __future__ import absolute_import from builtins import open from", "'@version': '1', '@timestamp': \"%sZ\" % datetime.utcnow().isoformat(), 'job': filtered_info, 'job_type': job['type']}", "# logger logger = get_task_logger(__name__) # redis connection pools JOB_STATUS_POOL", "global EVENT_STATUS_POOL if EVENT_STATUS_POOL is None: EVENT_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL)", "create sofware agent pid = os.getpid() sa_label = \"hysds:publish_dataset/%s/%d/%s\" %", "job['job_info']['pid'], datetime.utcnow().isoformat()) sa_id = \"hysds:%s\" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(job['job_info']['pid']), job['job_info']['execute_node'],", "\"\"\"Set redis connection pool for job info metrics.\"\"\" global JOB_INFO_POOL", "json.dumps(job)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_job_info(job): \"\"\"Print job info.\"\"\"", "'tags': tags, '@version': '1', 'event': event} # send update to", "set_redis_job_status_pool(): \"\"\"Set redis connection pool for job status.\"\"\" global JOB_STATUS_POOL", "prov_es_info['activity'][ps_id_orig]: prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids for", "# software and algorithm algorithm = \"eos:product_publishing\" software_version = hysds.__version__", "JOB_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_job_info_pool(): \"\"\"Set redis connection pool", "# redis connection pools JOB_STATUS_POOL = None JOB_INFO_POOL = None", "redis connection pool for job info metrics.\"\"\" global JOB_INFO_POOL if", "redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve worker status r =", "output_id = \"hysds:%s\" % get_uuid(pub_urls[0]) output_ent = doc.product(output_id, None, [pub_urls[0]],", "if WORKER_STATUS_POOL is None: WORKER_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_event_status_pool():", "in prov_es_info: if len(prov_es_info['bundle']) == 1: bundle_id_orig = list(prov_es_info['bundle'].keys())[0] #", "1: bundle_id_orig = list(prov_es_info['bundle'].keys())[0] # update software agent prov_es_info['bundle'][bundle_id_orig].setdefault( 'agent',", "ProvEsDocument(namespaces=prov_es_info['prefix']) # get bundle #bndl = doc.bundle(bundle_id) bndl = None", "__future__ import unicode_literals from __future__ import print_function from __future__ import", "ensure_hard_time_limit_gap(soft_time_limit, time_limit): \"\"\"Ensure hard time limit gap.\"\"\" gap = hard_time_limit_gap()", "\"\"\"Retrieve task worker by task ID from redis.\"\"\" set_redis_worker_status_pool() global", "job['job_info']['job_url'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]: prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][", "'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids for waw_id", "set task worker for task ID r = StrictRedis(connection_pool=WORKER_STATUS_POOL) r.setex(TASK_WORKER_KEY_TMPL", "# update software agent and process step if 'bundle' in", "redis connection pool for job status.\"\"\" global JOB_STATUS_POOL if JOB_STATUS_POOL", "set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve worker status r = StrictRedis(connection_pool=WORKER_STATUS_POOL)", "task ID in redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # set task", "max_value=backoff_max_value) def get_worker_status(worker): \"\"\"Retrieve worker status by worker ID from", "status.\"\"\" global EVENT_STATUS_POOL if EVENT_STATUS_POOL is None: EVENT_STATUS_POOL = BlockingConnectionPool.from_url(", "redis connection pool for worker status.\"\"\" global WORKER_STATUS_POOL if WORKER_STATUS_POOL", "max_tries=backoff_max_tries, max_value=backoff_max_value) def log_custom_event(event_type, event_status, event, tags=[], hostname=None): \"\"\"Log custom", "[pub_urls[0]], [], None, None, None, label=objectid, bundle=bndl) # software and", "attr in ('prov:location', 'prov:label', 'prov:type'): continue pd['entity'][output_id][attr] = orig_ent[attr] #", "= job['job_id'] prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] =", "import get_uuid, ProvEsDocument # logger logger = get_task_logger(__name__) # redis", "% json.dumps(job_info)) @backoff.on_exception(backoff.expo, RedisError, max_tries=backoff_max_tries, max_value=backoff_max_value) def log_custom_event(event_type, event_status, event,", "indent=2) def log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls, prod_metrics, objectid): \"\"\"Log publish", "{}).get(input_id, {}) pd['entity'][input_id].update(orig_ent) # update output entity for attr in", "{}).get('type', 'unknown') job['@version'] = '1' job['@timestamp'] = \"%sZ\" % datetime.utcnow().isoformat()", "% get_uuid('bundle-%s' % job['job_id']) doc = ProvEsDocument() # get bundle", "ID from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve worker status", "prov_es_info['bundle'][bundle_id_orig].setdefault( 'agent', {}).update(pd['bundle'][bundle_id]['agent']) # update wasAssociatedWith prov_es_info['bundle'][bundle_id_orig].setdefault( 'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith']) #", "for task ID in redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # set", "\"\"\"Log publish step in PROV-ES document.\"\"\" # create PROV-ES doc", "max_value=backoff_max_value) def log_task_worker(task_id, worker): \"\"\"Log task worker for task ID", "pd = json.loads(doc.serialize()) # update input entity orig_ent = prov_es_info.get('entity',", "template WORKER_STATUS_KEY_TMPL = \"hysds-worker-status-%s\" # task worker key template TASK_WORKER_KEY_TMPL", "pool for worker status.\"\"\" global WORKER_STATUS_POOL if WORKER_STATUS_POOL is None:", "software_version = hysds.__version__ software_title = \"%s v%s\" % (hysds.__description__, software_version)", "as f: json.dump(prov_es_info, f, indent=2) def log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls,", "= tags # send update to redis r = StrictRedis(connection_pool=JOB_STATUS_POOL)", "[tags] tags.append(job['job']['tag']) job['tags'] = tags # send update to redis", "for dedup r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) # for ES logger.info(\"job_status_json:%s\" % json.dumps(job))", "max_value=backoff_max_value) def log_job_info(job): \"\"\"Print job info.\"\"\" set_redis_job_info_pool() global JOB_INFO_POOL filtered_info", "label=job_id, bundle=bndl, prov_type=\"hysds:publish_dataset\") # get json pd = json.loads(doc.serialize()) #", "in job.get('job', {}): tags = job.setdefault('tags', []) if isinstance(tags, str):", "WORKER_STATUS_POOL if WORKER_STATUS_POOL is None: WORKER_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def", "prov_es_file): \"\"\"Log PROV-ES document. Create temp PROV-ES document to populate", "# create sofware agent pid = os.getpid() sa_label = \"hysds:publish_dataset/%s/%d/%s\"", "prod_path, pub_urls, prod_metrics, objectid): \"\"\"Log publish step in PROV-ES document.\"\"\"", "'container_image_name', 'container_image_url', 'name'): if info in job: filtered_info[info] = job[info]", "doc.processStep(\"hysds:%s\" % get_uuid(job_id), prod_metrics['time_start'], prod_metrics['time_end'], [software], sa_id, None, [input_id], [output_id],", "from celery.utils.log import get_task_logger import hysds from hysds.celery import app", "hysds.__url__ doc.software(software, [algorithm], software_version, label=software_title, location=software_location, bundle=bndl) # create sofware", "'delivery_info', 'tag', 'priority', 'container_image_name', 'container_image_url', 'name'): if info in job:", "= job['job_info']['job_url'] prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not in prov_es_info['activity'][ps_id_orig]:", "global WORKER_STATUS_POOL # retrieve worker status r = StrictRedis(connection_pool=WORKER_STATUS_POOL) return", "else: prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity'] else: # update software agent prov_es_info.setdefault('agent',", "= \"hysds-job-status-%s\" # worker status key template WORKER_STATUS_KEY_TMPL = \"hysds-worker-status-%s\"", "None or time_limit <= soft_time_limit+gap): time_limit = soft_time_limit + gap", "create PROV-ES doc to generate attributes that only verdi know", "app from prov_es.model import get_uuid, ProvEsDocument # logger logger =", "orig_ent[attr] # write prov with open(prov_es_file, 'w') as f: json.dump(pd,", "PROV-ES doc to generate attributes that only verdi know ps_id", "job['status']) # for dedup r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) # for ES logger.info(\"job_status_json:%s\"", "status key template WORKER_STATUS_KEY_TMPL = \"hysds-worker-status-%s\" # task worker key", "# update activity if 'activity' in prov_es_info['bundle'][bundle_id_orig]: if len(prov_es_info['bundle'][bundle_id_orig]['activity']) ==", "job['@timestamp'] = \"%sZ\" % datetime.utcnow().isoformat() if 'tag' in job.get('job', {}):", "job['type'] = job.get('job', {}).get('type', 'unknown') job['@version'] = '1' job['@timestamp'] =", "def get_job_status(task_id): \"\"\"Retrieve job status by task ID from redis.\"\"\"", "by worker ID from redis.\"\"\" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve", "None JOB_INFO_POOL = None WORKER_STATUS_POOL = None EVENT_STATUS_POOL = None" ]
[ "result = api.swift.swift_create_pseudo_folder( request, container, object_name ) else: result =", "request): \"\"\"Get the list of containers for this account TODO(neillc):", "= StreamingHttpResponse(obj.data) safe = filename.replace(\",\", \"\") if six.PY2: safe =", "form.is_valid(): raise rest_utils.AjaxError(500, 'Invalid request') data = form.clean() if object_name[-1]", "= request.GET.get('path') if path is not None: path = urlunquote(path)", "2.0 (the \"License\"); # you may not use this file", "container, object_name ) # Add the original file extension back", "r'swift/containers/(?P<container>[^/]+)/copy/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def post(self, request, container, object_name): dest_container", "listing for an account \"\"\" url_regex = r'swift/containers/$' @rest_utils.ajax() def", "r'swift/containers/(?P<container>[^/]+)/objects/$' @rest_utils.ajax() def get(self, request, container): \"\"\"Get object information. :param", "% container, ) @rest_utils.ajax() def delete(self, request, container): try: api.swift.swift_delete_container(request,", "import csrf_exempt from django.views import generic import six from horizon", "{} if 'is_public' in request.DATA: metadata['is_public'] = request.DATA['is_public'] # This", "raise an exception if the container already exists try: api.swift.swift_create_container(request,", "a (pseudo) folder contents = [{ 'path': o.subdir if isinstance(o,", "getattr(o, 'content_type', None) } for o in objects[0] if o.name", "swift container listing for an account \"\"\" url_regex = r'swift/containers/$'", "from openstack_dashboard import api from openstack_dashboard.api.rest import urls from openstack_dashboard.api.rest", "rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s' % container, ) @rest_utils.ajax() def", "content @csrf_exempt def post(self, request, container, object_name): \"\"\"Create or replace", "for # contents of a (pseudo) folder contents = [{", "Any file content passed along with the request will be", "containers] return {'items': containers, 'has_more': has_more} @urls.register class Container(generic.View): \"\"\"API", "\\ '(?P<object_name>.+)$' # note: not an AJAX request - the", "is not None: path = urlunquote(path) objects = api.swift.swift_get_objects( request,", "object_name[-1] == '/': result = api.swift.swift_create_pseudo_folder( request, container, object_name )", "object_name: If the object_name (ie. POST path) ends in a", "api.swift.swift_copy_object( request, container, object_name, dest_container, dest_name ) except exceptions.AlreadyExists as", "out the folder from the listing if we're filtering for", "request.DATA['dest_container'] dest_name = request.DATA['dest_name'] try: result = api.swift.swift_copy_object( request, container,", "isinstance(o, swift.PseudoFolder), 'content_type': getattr(o, 'content_type', None) } for o in", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "= request.DATA['dest_container'] dest_name = request.DATA['dest_name'] try: result = api.swift.swift_copy_object( request,", "= request.DATA['is_public'] # This will raise an exception if the", "isinstance(o, swift.PseudoFolder) else o.name, 'name': o.name.split('/')[-1], 'bytes': o.bytes, 'is_subdir': isinstance(o,", "= UploadObjectForm(request.POST, request.FILES) if not form.is_valid(): raise rest_utils.AjaxError(500, 'Invalid request')", "is created, rather than an object. Any file content passed", "django import forms from django.http import StreamingHttpResponse from django.utils.http import", "in containers] return {'items': containers, 'has_more': has_more} @urls.register class Container(generic.View):", "of a (pseudo) folder contents = [{ 'path': o.subdir if", "return {'items': contents} class UploadObjectForm(forms.Form): file = forms.FileField(required=False) @urls.register class", "case. POST parameter: :param file: the file data for the", "than an object. Any file content passed along with the", "be raw file content @csrf_exempt def post(self, request, container, object_name):", "class Container(generic.View): \"\"\"API for swift container level information \"\"\" url_regex", "api.swift.swift_get_objects( request, container, prefix=path ) # filter out the folder", "in the # name given to the object. filename =", "use this file except in compliance with the License. #", "\"\"\"API for swift container level information \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$'", "r'swift/containers/(?P<container>[^/]+)/metadata/$' @rest_utils.ajax() def get(self, request, container): \"\"\"Get the container details", "from django.utils.http import urlunquote from django.views.decorators.csrf import csrf_exempt from django.views", "import urls from openstack_dashboard.api.rest import utils as rest_utils from openstack_dashboard.api", "def post(self, request, container, object_name): \"\"\"Create or replace an object", "post(self, request, container, object_name): \"\"\"Create or replace an object or", "api.swift.swift_get_container(request, container).to_dict() @rest_utils.ajax() def post(self, request, container): metadata = {}", "os.path.splitext(obj.name)[1] and obj.orig_name: name, ext = os.path.splitext(obj.orig_name) filename = \"%s%s\"", "path = urlunquote(path) objects = api.swift.swift_get_objects( request, container, prefix=path )", "deleted if it's not empty. return rest_utils.JSONResponse(str(e), 409) @rest_utils.ajax(data_required=True) def", "Object(generic.View): \"\"\"API for a single swift object or pseudo-folder \"\"\"", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "StreamingHttpResponse from django.utils.http import urlunquote from django.views.decorators.csrf import csrf_exempt from", "container level information \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$' @rest_utils.ajax() def get(self,", "License. # You may obtain a copy of the License", "except exceptions.Conflict as e: # It cannot be deleted if", "request, container): metadata = {} if 'is_public' in request.DATA: metadata['is_public']", "import StreamingHttpResponse from django.utils.http import urlunquote from django.views.decorators.csrf import csrf_exempt", "api.swift.swift_create_container(request, container, metadata=metadata) except exceptions.AlreadyExists as e: # 409 Conflict", "for a list of swift objects \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$'", "metadata['is_public'] = request.DATA['is_public'] # This will raise an exception if", "filter out the folder from the listing if we're filtering", "under the License is distributed on an \"AS IS\" BASIS,", "= api.swift.swift_create_pseudo_folder( request, container, object_name ) else: result = api.swift.swift_upload_object(", "POST parameter: :param file: the file data for the upload.", "License for the specific language governing permissions and # limitations", "not empty. return rest_utils.JSONResponse(str(e), 409) else: api.swift.swift_delete_object(request, container, object_name) def", "metadata=metadata) except exceptions.AlreadyExists as e: # 409 Conflict return rest_utils.JSONResponse(str(e),", "= r'swift/containers/(?P<container>[^/]+)/objects/$' @rest_utils.ajax() def get(self, request, container): \"\"\"Get object information.", "container listing for an account \"\"\" url_regex = r'swift/containers/$' @rest_utils.ajax()", "'(?P<object_name>.+)$' # note: not an AJAX request - the body", "class Objects(generic.View): \"\"\"API for a list of swift objects \"\"\"", "file data for the upload. :return: \"\"\" form = UploadObjectForm(request.POST,", "@urls.register class Object(generic.View): \"\"\"API for a single swift object or", "container, object_name) except exceptions.Conflict as e: # In case the", "'(?P<object_name>.+)$' @rest_utils.ajax() def get(self, request, container, object_name): return api.swift.swift_get_object( request,", "@urls.register class Objects(generic.View): \"\"\"API for a list of swift objects", "about the Swift installation. \"\"\" url_regex = r'swift/info/$' @rest_utils.ajax() def", "object_name): dest_container = request.DATA['dest_container'] dest_name = request.DATA['dest_name'] try: result =", "\"\"\"Get object information. :param request: :param container: :return: \"\"\" path", "== '/': result = api.swift.swift_create_pseudo_folder( request, container, object_name ) else:", "if the container already exists try: api.swift.swift_create_container(request, container, metadata=metadata) except", "container): \"\"\"Get the container details \"\"\" return api.swift.swift_get_container(request, container).to_dict() @rest_utils.ajax()", "container in containers] return {'items': containers, 'has_more': has_more} @urls.register class", "or pseudo-folder :param request: :param container: :param object_name: If the", "the listing if we're filtering for # contents of a", "'has_more': has_more} @urls.register class Container(generic.View): \"\"\"API for swift container level", "try: result = api.swift.swift_copy_object( request, container, object_name, dest_container, dest_name )", "= [{ 'path': o.subdir if isinstance(o, swift.PseudoFolder) else o.name, 'name':", "'application/octet-stream' response['Content-Length'] = obj.bytes return response @urls.register class ObjectMetadata(generic.View): \"\"\"API", "container, prefix=path ) # filter out the folder from the", "in compliance with the License. # You may obtain a", "[{ 'path': o.subdir if isinstance(o, swift.PseudoFolder) else o.name, 'name': o.name.split('/')[-1],", "api.swift.swift_get_object( request, container, object_name ) # Add the original file", "exceptions.AlreadyExists as e: return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' %", "software # distributed under the License is distributed on an", "not None: path = urlunquote(path) objects = api.swift.swift_get_objects( request, container,", "ends in a '/' then a folder is created, rather", "the file data for the upload. :return: \"\"\" form =", "the body will be raw file content @csrf_exempt def post(self,", "POST path) ends in a '/' then a folder is", "'path': o.subdir if isinstance(o, swift.PseudoFolder) else o.name, 'name': o.name.split('/')[-1], 'bytes':", "Conflict return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s' % container, )", "import swift @urls.register class Info(generic.View): \"\"\"API for information about the", "container).to_dict() @rest_utils.ajax() def post(self, request, container): metadata = {} if", "api.swift.swift_create_pseudo_folder( request, container, object_name ) else: result = api.swift.swift_upload_object( request,", ") return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' % (container, result.name) ) @rest_utils.ajax() def", "e: return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' % (dest_container, result.name)", "= forms.FileField(required=False) @urls.register class Object(generic.View): \"\"\"API for a single swift", "def get(self, request, container, object_name): \"\"\"Get the object contents. \"\"\"", "forms from django.http import StreamingHttpResponse from django.utils.http import urlunquote from", "response['Content-Disposition'] = 'attachment; filename=\"%s\"' % safe response['Content-Type'] = 'application/octet-stream' response['Content-Length']", "# limitations under the License. \"\"\"API for the swift service.", "class ObjectMetadata(generic.View): \"\"\"API for a single swift object \"\"\" url_regex", "preserved in the # name given to the object. filename", "it's not empty. return rest_utils.JSONResponse(str(e), 409) @rest_utils.ajax(data_required=True) def put(self, request,", "url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$' @rest_utils.ajax() def get(self, request, container): \"\"\"Get object", "about the Swift installation. \"\"\" capabilities = api.swift.swift_get_capabilities(request) return {'info':", "\"\"\"Get the list of containers for this account TODO(neillc): Add", "rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' % (container, result.name) ) @rest_utils.ajax() def delete(self, request,", "and obj.orig_name: name, ext = os.path.splitext(obj.orig_name) filename = \"%s%s\" %", "# 409 Conflict return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s' %", "request, container, object_name): return api.swift.swift_get_object( request, container_name=container, object_name=object_name, with_data=False ).to_dict()", "= [container.to_dict() for container in containers] return {'items': containers, 'has_more':", "def put(self, request, container): metadata = {'is_public': request.DATA['is_public']} api.swift.swift_update_container(request, container,", "url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def post(self, request, container,", "as e: return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' % (dest_container,", "request, container): \"\"\"Get object information. :param request: :param container: :return:", "except exceptions.Conflict as e: # In case the given object", "class Object(generic.View): \"\"\"API for a single swift object or pseudo-folder", "= api.swift.swift_get_containers(request) containers = [container.to_dict() for container in containers] return", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "as rest_utils from openstack_dashboard.api import swift @urls.register class Info(generic.View): \"\"\"API", "api.swift.swift_update_container(request, container, metadata=metadata) @urls.register class Objects(generic.View): \"\"\"API for a list", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "from the listing if we're filtering for # contents of", "def get(self, request, container): \"\"\"Get the container details \"\"\" return", "r'swift/containers/$' @rest_utils.ajax() def get(self, request): \"\"\"Get the list of containers", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "the container details \"\"\" return api.swift.swift_get_container(request, container).to_dict() @rest_utils.ajax() def post(self,", "request, container, object_name): \"\"\"Get the object contents. \"\"\" obj =", "container, metadata=metadata) @urls.register class Objects(generic.View): \"\"\"API for a list of", "to in writing, software # distributed under the License is", "# contents of a (pseudo) folder contents = [{ 'path':", "api.swift.swift_delete_container(request, container) except exceptions.Conflict as e: # It cannot be", "Add the original file extension back on if it wasn't", "# See the License for the specific language governing permissions", "= r'swift/containers/(?P<container>[^/]+)/metadata/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def get(self, request, container, object_name):", "django.views.decorators.csrf import csrf_exempt from django.views import generic import six from", "path = request.GET.get('path') if path is not None: path =", "pseudo-folder \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \\ '(?P<object_name>.+)$' # note: not", "pseudo-folder :param request: :param container: :param object_name: If the object_name", "= object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1] if not os.path.splitext(obj.name)[1] and obj.orig_name: name, ext =", "u'/api/swift/containers/%s' % container, ) @rest_utils.ajax() def delete(self, request, container): try:", "or agreed to in writing, software # distributed under the", "url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \\ '(?P<object_name>.+)$' # note: not an AJAX", "has_more} @urls.register class Container(generic.View): \"\"\"API for swift container level information", "required by applicable law or agreed to in writing, software", "UploadObjectForm(forms.Form): file = forms.FileField(required=False) @urls.register class Object(generic.View): \"\"\"API for a", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "try: api.swift.swift_delete_folder(request, container, object_name) except exceptions.Conflict as e: # In", "with the License. # You may obtain a copy of", "409) else: api.swift.swift_delete_object(request, container, object_name) def get(self, request, container, object_name):", "return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' % (dest_container, result.name) )", "object_name) def get(self, request, container, object_name): \"\"\"Get the object contents.", "def get(self, request, container, object_name): return api.swift.swift_get_object( request, container_name=container, object_name=object_name,", "if not os.path.splitext(obj.name)[1] and obj.orig_name: name, ext = os.path.splitext(obj.orig_name) filename", "= \"%s%s\" % (filename, ext) response = StreamingHttpResponse(obj.data) safe =", "container, object_name, dest_container, dest_name ) except exceptions.AlreadyExists as e: return", "file content passed along with the request will be ignored", "container already exists try: api.swift.swift_create_container(request, container, metadata=metadata) except exceptions.AlreadyExists as", "from openstack_dashboard.api import swift @urls.register class Info(generic.View): \"\"\"API for information", "post(self, request, container): metadata = {} if 'is_public' in request.DATA:", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "raise rest_utils.AjaxError(500, 'Invalid request') data = form.clean() if object_name[-1] ==", "\"\"\" capabilities = api.swift.swift_get_capabilities(request) return {'info': capabilities} @urls.register class Containers(generic.View):", "distributed under the License is distributed on an \"AS IS\"", "def get(self, request, container): \"\"\"Get object information. :param request: :param", "capabilities} @urls.register class Containers(generic.View): \"\"\"API for swift container listing for", "@urls.register class ObjectMetadata(generic.View): \"\"\"API for a single swift object \"\"\"", "of swift objects \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$' @rest_utils.ajax() def get(self,", "name given to the object. filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1] if not", "governing permissions and # limitations under the License. \"\"\"API for", "\\ '(?P<object_name>.+)$' @rest_utils.ajax() def get(self, request, container, object_name): return api.swift.swift_get_object(", "api.swift.swift_delete_folder(request, container, object_name) except exceptions.Conflict as e: # In case", "return api.swift.swift_get_object( request, container_name=container, object_name=object_name, with_data=False ).to_dict() @urls.register class ObjectCopy(generic.View):", "express or implied. # See the License for the specific", "ObjectCopy(generic.View): \"\"\"API to copy a swift object \"\"\" url_regex =", "dest_name = request.DATA['dest_name'] try: result = api.swift.swift_copy_object( request, container, object_name,", "except in compliance with the License. # You may obtain", "and # limitations under the License. \"\"\"API for the swift", "Add pagination \"\"\" containers, has_more = api.swift.swift_get_containers(request) containers = [container.to_dict()", "for swift container listing for an account \"\"\" url_regex =", "{'items': contents} class UploadObjectForm(forms.Form): file = forms.FileField(required=False) @urls.register class Object(generic.View):", "url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def get(self, request, container,", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "} for o in objects[0] if o.name != path] return", "if six.PY2: safe = safe.encode('utf-8') response['Content-Disposition'] = 'attachment; filename=\"%s\"' %", "for an account \"\"\" url_regex = r'swift/containers/$' @rest_utils.ajax() def get(self,", "not use this file except in compliance with the License.", "container, object_name, data['file'] ) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' % (container, result.name)", "return {'info': capabilities} @urls.register class Containers(generic.View): \"\"\"API for swift container", "def delete(self, request, container): try: api.swift.swift_delete_container(request, container) except exceptions.Conflict as", "not os.path.splitext(obj.name)[1] and obj.orig_name: name, ext = os.path.splitext(obj.orig_name) filename =", "writing, software # distributed under the License is distributed on", "you may not use this file except in compliance with", "not form.is_valid(): raise rest_utils.AjaxError(500, 'Invalid request') data = form.clean() if", "'/': result = api.swift.swift_create_pseudo_folder( request, container, object_name ) else: result", "information about the Swift installation. \"\"\" url_regex = r'swift/info/$' @rest_utils.ajax()", "\"\"\" url_regex = r'swift/info/$' @rest_utils.ajax() def get(self, request): \"\"\"Get information", "object is pseudo folder # It cannot be deleted if", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "listing if we're filtering for # contents of a (pseudo)", "original file extension back on if it wasn't preserved in", "\"\"\" import os from django import forms from django.http import", "for swift container level information \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$' @rest_utils.ajax()", "the object contents. \"\"\" obj = api.swift.swift_get_object( request, container, object_name", "= filename.replace(\",\", \"\") if six.PY2: safe = safe.encode('utf-8') response['Content-Disposition'] =", "the given object is pseudo folder # It cannot be", "single swift object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \\ '(?P<object_name>.+)$' @rest_utils.ajax()", "empty. return rest_utils.JSONResponse(str(e), 409) else: api.swift.swift_delete_object(request, container, object_name) def get(self,", "openstack_dashboard.api import swift @urls.register class Info(generic.View): \"\"\"API for information about", ":param container: :param object_name: If the object_name (ie. POST path)", "django.views import generic import six from horizon import exceptions from", "horizon import exceptions from openstack_dashboard import api from openstack_dashboard.api.rest import", "% (container, result.name) ) @rest_utils.ajax() def delete(self, request, container, object_name):", "\"%s%s\" % (filename, ext) response = StreamingHttpResponse(obj.data) safe = filename.replace(\",\",", "the folder from the listing if we're filtering for #", "the object_name (ie. POST path) ends in a '/' then", ") except exceptions.AlreadyExists as e: return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse(", ":param container: :return: \"\"\" path = request.GET.get('path') if path is", "CONDITIONS OF ANY KIND, either express or implied. # See", "It cannot be deleted if it's not empty. return rest_utils.JSONResponse(str(e),", "Copyright 2015, Rackspace, US, Inc. # # Licensed under the", "2015, Rackspace, US, Inc. # # Licensed under the Apache", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "obj.orig_name: name, ext = os.path.splitext(obj.orig_name) filename = \"%s%s\" % (filename,", "the swift service. \"\"\" import os from django import forms", "response['Content-Type'] = 'application/octet-stream' response['Content-Length'] = obj.bytes return response @urls.register class", "folder is created, rather than an object. Any file content", "os from django import forms from django.http import StreamingHttpResponse from", "containers for this account TODO(neillc): Add pagination \"\"\" containers, has_more", "object_name ) else: result = api.swift.swift_upload_object( request, container, object_name, data['file']", "@rest_utils.ajax() def get(self, request): \"\"\"Get the list of containers for", "\"\"\"API for a single swift object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/'", ":param request: :param container: :return: \"\"\" path = request.GET.get('path') if", "result.name) ) @rest_utils.ajax() def delete(self, request, container, object_name): if object_name[-1]", "Info(generic.View): \"\"\"API for information about the Swift installation. \"\"\" url_regex", "r'swift/info/$' @rest_utils.ajax() def get(self, request): \"\"\"Get information about the Swift", "if not form.is_valid(): raise rest_utils.AjaxError(500, 'Invalid request') data = form.clean()", "if it's not empty. return rest_utils.JSONResponse(str(e), 409) @rest_utils.ajax(data_required=True) def put(self,", "an account \"\"\" url_regex = r'swift/containers/$' @rest_utils.ajax() def get(self, request):", "o.name != path] return {'items': contents} class UploadObjectForm(forms.Form): file =", "with_data=False ).to_dict() @urls.register class ObjectCopy(generic.View): \"\"\"API to copy a swift", "= form.clean() if object_name[-1] == '/': result = api.swift.swift_create_pseudo_folder( request,", "StreamingHttpResponse(obj.data) safe = filename.replace(\",\", \"\") if six.PY2: safe = safe.encode('utf-8')", "for a single swift object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \\", "TODO(neillc): Add pagination \"\"\" containers, has_more = api.swift.swift_get_containers(request) containers =", "the upload. :return: \"\"\" form = UploadObjectForm(request.POST, request.FILES) if not", "url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$' @rest_utils.ajax() def get(self, request, container): \"\"\"Get the", "OR CONDITIONS OF ANY KIND, either express or implied. #", "from django.views import generic import six from horizon import exceptions", "if o.name != path] return {'items': contents} class UploadObjectForm(forms.Form): file", "a folder is created, rather than an object. Any file", "request, container, object_name): \"\"\"Create or replace an object or pseudo-folder", "import utils as rest_utils from openstack_dashboard.api import swift @urls.register class", "the License is distributed on an \"AS IS\" BASIS, #", "container) except exceptions.Conflict as e: # It cannot be deleted", "@urls.register class Container(generic.View): \"\"\"API for swift container level information \"\"\"", "metadata=metadata) @urls.register class Objects(generic.View): \"\"\"API for a list of swift", "US, Inc. # # Licensed under the Apache License, Version", "django.utils.http import urlunquote from django.views.decorators.csrf import csrf_exempt from django.views import", "created, rather than an object. Any file content passed along", "openstack_dashboard.api.rest import utils as rest_utils from openstack_dashboard.api import swift @urls.register", "\"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def get(self, request,", "Swift installation. \"\"\" capabilities = api.swift.swift_get_capabilities(request) return {'info': capabilities} @urls.register", "data['file'] ) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' % (container, result.name) ) @rest_utils.ajax()", "the request will be ignored in that case. POST parameter:", "upload. :return: \"\"\" form = UploadObjectForm(request.POST, request.FILES) if not form.is_valid():", "then a folder is created, rather than an object. Any", "a list of swift objects \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$' @rest_utils.ajax()", "law or agreed to in writing, software # distributed under", "= 'application/octet-stream' response['Content-Length'] = obj.bytes return response @urls.register class ObjectMetadata(generic.View):", "rather than an object. Any file content passed along with", "@csrf_exempt def post(self, request, container, object_name): \"\"\"Create or replace an", "[container.to_dict() for container in containers] return {'items': containers, 'has_more': has_more}", "urls from openstack_dashboard.api.rest import utils as rest_utils from openstack_dashboard.api import", "single swift object or pseudo-folder \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \\", "for container in containers] return {'items': containers, 'has_more': has_more} @urls.register", "swift container level information \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$' @rest_utils.ajax() def", "for the swift service. \"\"\" import os from django import", ":param object_name: If the object_name (ie. POST path) ends in", "or pseudo-folder \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \\ '(?P<object_name>.+)$' # note:", "!= path] return {'items': contents} class UploadObjectForm(forms.Form): file = forms.FileField(required=False)", "contents. \"\"\" obj = api.swift.swift_get_object( request, container, object_name ) #", "api.swift.swift_upload_object( request, container, object_name, data['file'] ) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' %", "an exception if the container already exists try: api.swift.swift_create_container(request, container,", "api.swift.swift_get_object( request, container_name=container, object_name=object_name, with_data=False ).to_dict() @urls.register class ObjectCopy(generic.View): \"\"\"API", "container, object_name): if object_name[-1] == '/': try: api.swift.swift_delete_folder(request, container, object_name)", "may obtain a copy of the License at # #", "class UploadObjectForm(forms.Form): file = forms.FileField(required=False) @urls.register class Object(generic.View): \"\"\"API for", "containers = [container.to_dict() for container in containers] return {'items': containers,", "= api.swift.swift_get_objects( request, container, prefix=path ) # filter out the", "exceptions.Conflict as e: # It cannot be deleted if it's", "container, object_name): dest_container = request.DATA['dest_container'] dest_name = request.DATA['dest_name'] try: result", "request - the body will be raw file content @csrf_exempt", "\"\"\" url_regex = r'swift/containers/$' @rest_utils.ajax() def get(self, request): \"\"\"Get the", "object_name) except exceptions.Conflict as e: # In case the given", "{'items': containers, 'has_more': has_more} @urls.register class Container(generic.View): \"\"\"API for swift", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "= os.path.splitext(obj.orig_name) filename = \"%s%s\" % (filename, ext) response =", "will be ignored in that case. POST parameter: :param file:", "'attachment; filename=\"%s\"' % safe response['Content-Type'] = 'application/octet-stream' response['Content-Length'] = obj.bytes", "raw file content @csrf_exempt def post(self, request, container, object_name): \"\"\"Create", "result = api.swift.swift_upload_object( request, container, object_name, data['file'] ) return rest_utils.CreatedResponse(", "may not use this file except in compliance with the", "o.name, 'name': o.name.split('/')[-1], 'bytes': o.bytes, 'is_subdir': isinstance(o, swift.PseudoFolder), 'is_object': not", "e: # 409 Conflict return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s'", "u'/api/swift/containers/%s/object/%s' % (container, result.name) ) @rest_utils.ajax() def delete(self, request, container,", "try: api.swift.swift_delete_container(request, container) except exceptions.Conflict as e: # It cannot", "swift object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def", "request, container): metadata = {'is_public': request.DATA['is_public']} api.swift.swift_update_container(request, container, metadata=metadata) @urls.register", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "import exceptions from openstack_dashboard import api from openstack_dashboard.api.rest import urls", "o in objects[0] if o.name != path] return {'items': contents}", "will raise an exception if the container already exists try:", "request.DATA: metadata['is_public'] = request.DATA['is_public'] # This will raise an exception", "this file except in compliance with the License. # You", "(pseudo) folder contents = [{ 'path': o.subdir if isinstance(o, swift.PseudoFolder)", "container_name=container, object_name=object_name, with_data=False ).to_dict() @urls.register class ObjectCopy(generic.View): \"\"\"API to copy", "not empty. return rest_utils.JSONResponse(str(e), 409) @rest_utils.ajax(data_required=True) def put(self, request, container):", "we're filtering for # contents of a (pseudo) folder contents", "else o.name, 'name': o.name.split('/')[-1], 'bytes': o.bytes, 'is_subdir': isinstance(o, swift.PseudoFolder), 'is_object':", "container, object_name) def get(self, request, container, object_name): \"\"\"Get the object", "account \"\"\" url_regex = r'swift/containers/$' @rest_utils.ajax() def get(self, request): \"\"\"Get", "filename = \"%s%s\" % (filename, ext) response = StreamingHttpResponse(obj.data) safe", "obj.bytes return response @urls.register class ObjectMetadata(generic.View): \"\"\"API for a single", "object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1] if not os.path.splitext(obj.name)[1] and obj.orig_name: name, ext = os.path.splitext(obj.orig_name)", "container): try: api.swift.swift_delete_container(request, container) except exceptions.Conflict as e: # It", "request.DATA['is_public']} api.swift.swift_update_container(request, container, metadata=metadata) @urls.register class Objects(generic.View): \"\"\"API for a", "'/': try: api.swift.swift_delete_folder(request, container, object_name) except exceptions.Conflict as e: #", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s' % container, ) @rest_utils.ajax() def delete(self,", "\"\"\"API for the swift service. \"\"\" import os from django", "get(self, request, container): \"\"\"Get the container details \"\"\" return api.swift.swift_get_container(request,", "api.swift.swift_delete_object(request, container, object_name) def get(self, request, container, object_name): \"\"\"Get the", "request: :param container: :return: \"\"\" path = request.GET.get('path') if path", "body will be raw file content @csrf_exempt def post(self, request,", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "pagination \"\"\" containers, has_more = api.swift.swift_get_containers(request) containers = [container.to_dict() for", ") else: result = api.swift.swift_upload_object( request, container, object_name, data['file'] )", "for the upload. :return: \"\"\" form = UploadObjectForm(request.POST, request.FILES) if", "request') data = form.clean() if object_name[-1] == '/': result =", "file content @csrf_exempt def post(self, request, container, object_name): \"\"\"Create or", "six from horizon import exceptions from openstack_dashboard import api from", "on if it wasn't preserved in the # name given", "= obj.bytes return response @urls.register class ObjectMetadata(generic.View): \"\"\"API for a", "request): \"\"\"Get information about the Swift installation. \"\"\" capabilities =", "for o in objects[0] if o.name != path] return {'items':", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "{'is_public': request.DATA['is_public']} api.swift.swift_update_container(request, container, metadata=metadata) @urls.register class Objects(generic.View): \"\"\"API for", "# note: not an AJAX request - the body will", "details \"\"\" return api.swift.swift_get_container(request, container).to_dict() @rest_utils.ajax() def post(self, request, container):", "Container(generic.View): \"\"\"API for swift container level information \"\"\" url_regex =", "urlunquote(path) objects = api.swift.swift_get_objects( request, container, prefix=path ) # filter", "rest_utils.JSONResponse(str(e), 409) else: api.swift.swift_delete_object(request, container, object_name) def get(self, request, container,", "given to the object. filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1] if not os.path.splitext(obj.name)[1]", "deleted if it's not empty. return rest_utils.JSONResponse(str(e), 409) else: api.swift.swift_delete_object(request,", "not isinstance(o, swift.PseudoFolder), 'content_type': getattr(o, 'content_type', None) } for o", "path] return {'items': contents} class UploadObjectForm(forms.Form): file = forms.FileField(required=False) @urls.register", "metadata = {} if 'is_public' in request.DATA: metadata['is_public'] = request.DATA['is_public']", "metadata = {'is_public': request.DATA['is_public']} api.swift.swift_update_container(request, container, metadata=metadata) @urls.register class Objects(generic.View):", "from django.http import StreamingHttpResponse from django.utils.http import urlunquote from django.views.decorators.csrf", ") # Add the original file extension back on if", "if path is not None: path = urlunquote(path) objects =", "objects[0] if o.name != path] return {'items': contents} class UploadObjectForm(forms.Form):", "swift object or pseudo-folder \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \\ '(?P<object_name>.+)$'", "# filter out the folder from the listing if we're", "return rest_utils.JSONResponse(str(e), 409) @rest_utils.ajax(data_required=True) def put(self, request, container): metadata =", "folder contents = [{ 'path': o.subdir if isinstance(o, swift.PseudoFolder) else", "name, ext = os.path.splitext(obj.orig_name) filename = \"%s%s\" % (filename, ext)", "safe response['Content-Type'] = 'application/octet-stream' response['Content-Length'] = obj.bytes return response @urls.register", "filtering for # contents of a (pseudo) folder contents =", "container: :param object_name: If the object_name (ie. POST path) ends", "safe.encode('utf-8') response['Content-Disposition'] = 'attachment; filename=\"%s\"' % safe response['Content-Type'] = 'application/octet-stream'", "import api from openstack_dashboard.api.rest import urls from openstack_dashboard.api.rest import utils", "container, ) @rest_utils.ajax() def delete(self, request, container): try: api.swift.swift_delete_container(request, container)", "to the object. filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1] if not os.path.splitext(obj.name)[1] and", "language governing permissions and # limitations under the License. \"\"\"API", "@urls.register class Info(generic.View): \"\"\"API for information about the Swift installation.", "\\ '(?P<object_name>.+)$' @rest_utils.ajax() def post(self, request, container, object_name): dest_container =", "rest_utils.AjaxError(500, 'Invalid request') data = form.clean() if object_name[-1] == '/':", "delete(self, request, container, object_name): if object_name[-1] == '/': try: api.swift.swift_delete_folder(request,", "request.DATA['dest_name'] try: result = api.swift.swift_copy_object( request, container, object_name, dest_container, dest_name", "\"\"\"API for a list of swift objects \"\"\" url_regex =", "= r'swift/info/$' @rest_utils.ajax() def get(self, request): \"\"\"Get information about the", "return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s' % container, ) @rest_utils.ajax()", "rest_utils.JSONResponse(str(e), 409) @rest_utils.ajax(data_required=True) def put(self, request, container): metadata = {'is_public':", "be ignored in that case. POST parameter: :param file: the", "import generic import six from horizon import exceptions from openstack_dashboard", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "cannot be deleted if it's not empty. return rest_utils.JSONResponse(str(e), 409)", "= urlunquote(path) objects = api.swift.swift_get_objects( request, container, prefix=path ) #", ":return: \"\"\" path = request.GET.get('path') if path is not None:", "container, object_name ) else: result = api.swift.swift_upload_object( request, container, object_name,", "UploadObjectForm(request.POST, request.FILES) if not form.is_valid(): raise rest_utils.AjaxError(500, 'Invalid request') data", "def post(self, request, container): metadata = {} if 'is_public' in", "contents} class UploadObjectForm(forms.Form): file = forms.FileField(required=False) @urls.register class Object(generic.View): \"\"\"API", "an AJAX request - the body will be raw file", "or implied. # See the License for the specific language", "from horizon import exceptions from openstack_dashboard import api from openstack_dashboard.api.rest", "@rest_utils.ajax() def delete(self, request, container, object_name): if object_name[-1] == '/':", "this account TODO(neillc): Add pagination \"\"\" containers, has_more = api.swift.swift_get_containers(request)", "case the given object is pseudo folder # It cannot", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "object_name ) # Add the original file extension back on", "from openstack_dashboard.api.rest import utils as rest_utils from openstack_dashboard.api import swift", "exists try: api.swift.swift_create_container(request, container, metadata=metadata) except exceptions.AlreadyExists as e: #", "folder from the listing if we're filtering for # contents", "'/' then a folder is created, rather than an object.", "object. filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1] if not os.path.splitext(obj.name)[1] and obj.orig_name: name,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "as e: # In case the given object is pseudo", "request, container_name=container, object_name=object_name, with_data=False ).to_dict() @urls.register class ObjectCopy(generic.View): \"\"\"API to", "= api.swift.swift_copy_object( request, container, object_name, dest_container, dest_name ) except exceptions.AlreadyExists", "'is_public' in request.DATA: metadata['is_public'] = request.DATA['is_public'] # This will raise", "if it's not empty. return rest_utils.JSONResponse(str(e), 409) else: api.swift.swift_delete_object(request, container,", "path is not None: path = urlunquote(path) objects = api.swift.swift_get_objects(", "{'info': capabilities} @urls.register class Containers(generic.View): \"\"\"API for swift container listing", "\"\"\"Get the container details \"\"\" return api.swift.swift_get_container(request, container).to_dict() @rest_utils.ajax() def", "'content_type', None) } for o in objects[0] if o.name !=", "object_name): \"\"\"Create or replace an object or pseudo-folder :param request:", "in a '/' then a folder is created, rather than", "get(self, request): \"\"\"Get the list of containers for this account", "= r'swift/containers/(?P<container>[^/]+)/metadata/$' @rest_utils.ajax() def get(self, request, container): \"\"\"Get the container", "'Invalid request') data = form.clean() if object_name[-1] == '/': result", "object_name=object_name, with_data=False ).to_dict() @urls.register class ObjectCopy(generic.View): \"\"\"API to copy a", "request.FILES) if not form.is_valid(): raise rest_utils.AjaxError(500, 'Invalid request') data =", "(the \"License\"); # you may not use this file except", "ext) response = StreamingHttpResponse(obj.data) safe = filename.replace(\",\", \"\") if six.PY2:", "except exceptions.AlreadyExists as e: return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s'", "# you may not use this file except in compliance", "in objects[0] if o.name != path] return {'items': contents} class", "\"\"\"API for swift container listing for an account \"\"\" url_regex", "exceptions.AlreadyExists as e: # 409 Conflict return rest_utils.JSONResponse(str(e), 409) return", "return {'items': containers, 'has_more': has_more} @urls.register class Container(generic.View): \"\"\"API for", "post(self, request, container, object_name): dest_container = request.DATA['dest_container'] dest_name = request.DATA['dest_name']", "api.swift.swift_get_capabilities(request) return {'info': capabilities} @urls.register class Containers(generic.View): \"\"\"API for swift", "openstack_dashboard.api.rest import urls from openstack_dashboard.api.rest import utils as rest_utils from", "limitations under the License. \"\"\"API for the swift service. \"\"\"", "installation. \"\"\" capabilities = api.swift.swift_get_capabilities(request) return {'info': capabilities} @urls.register class", "or replace an object or pseudo-folder :param request: :param container:", "object_name[-1] == '/': try: api.swift.swift_delete_folder(request, container, object_name) except exceptions.Conflict as", "request, container): \"\"\"Get the container details \"\"\" return api.swift.swift_get_container(request, container).to_dict()", "safe = filename.replace(\",\", \"\") if six.PY2: safe = safe.encode('utf-8') response['Content-Disposition']", "class ObjectCopy(generic.View): \"\"\"API to copy a swift object \"\"\" url_regex", "parameter: :param file: the file data for the upload. :return:", "object_name): if object_name[-1] == '/': try: api.swift.swift_delete_folder(request, container, object_name) except", "response @urls.register class ObjectMetadata(generic.View): \"\"\"API for a single swift object", "object_name, dest_container, dest_name ) except exceptions.AlreadyExists as e: return rest_utils.JSONResponse(str(e),", "container, object_name): \"\"\"Create or replace an object or pseudo-folder :param", "exceptions from openstack_dashboard import api from openstack_dashboard.api.rest import urls from", "rest_utils.CreatedResponse( u'/api/swift/containers/%s' % container, ) @rest_utils.ajax() def delete(self, request, container):", "file extension back on if it wasn't preserved in the", "= {} if 'is_public' in request.DATA: metadata['is_public'] = request.DATA['is_public'] #", "the License. \"\"\"API for the swift service. \"\"\" import os", "isinstance(o, swift.PseudoFolder), 'is_object': not isinstance(o, swift.PseudoFolder), 'content_type': getattr(o, 'content_type', None)", "passed along with the request will be ignored in that", "object or pseudo-folder \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \\ '(?P<object_name>.+)$' #", "# # Unless required by applicable law or agreed to", "# Add the original file extension back on if it", "Containers(generic.View): \"\"\"API for swift container listing for an account \"\"\"", "@rest_utils.ajax(data_required=True) def put(self, request, container): metadata = {'is_public': request.DATA['is_public']} api.swift.swift_update_container(request,", "\"\"\"Get the object contents. \"\"\" obj = api.swift.swift_get_object( request, container,", "= request.DATA['dest_name'] try: result = api.swift.swift_copy_object( request, container, object_name, dest_container,", "will be raw file content @csrf_exempt def post(self, request, container,", "is pseudo folder # It cannot be deleted if it's", "return response @urls.register class ObjectMetadata(generic.View): \"\"\"API for a single swift", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "\"\"\" return api.swift.swift_get_container(request, container).to_dict() @rest_utils.ajax() def post(self, request, container): metadata", "= r'swift/containers/$' @rest_utils.ajax() def get(self, request): \"\"\"Get the list of", "return rest_utils.JSONResponse(str(e), 409) else: api.swift.swift_delete_object(request, container, object_name) def get(self, request,", "Version 2.0 (the \"License\"); # you may not use this", "generic import six from horizon import exceptions from openstack_dashboard import", "get(self, request): \"\"\"Get information about the Swift installation. \"\"\" capabilities", "object or pseudo-folder :param request: :param container: :param object_name: If", "path) ends in a '/' then a folder is created,", "(ie. POST path) ends in a '/' then a folder", "for information about the Swift installation. \"\"\" url_regex = r'swift/info/$'", "class Info(generic.View): \"\"\"API for information about the Swift installation. \"\"\"", "container, metadata=metadata) except exceptions.AlreadyExists as e: # 409 Conflict return", "wasn't preserved in the # name given to the object.", "exception if the container already exists try: api.swift.swift_create_container(request, container, metadata=metadata)", "@rest_utils.ajax() def post(self, request, container, object_name): dest_container = request.DATA['dest_container'] dest_name", "file: the file data for the upload. :return: \"\"\" form", "(container, result.name) ) @rest_utils.ajax() def delete(self, request, container, object_name): if", "objects \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$' @rest_utils.ajax() def get(self, request, container):", ") # filter out the folder from the listing if", "implied. # See the License for the specific language governing", "for a single swift object or pseudo-folder \"\"\" url_regex =", "409) @rest_utils.ajax(data_required=True) def put(self, request, container): metadata = {'is_public': request.DATA['is_public']}", "rest_utils from openstack_dashboard.api import swift @urls.register class Info(generic.View): \"\"\"API for", "request.GET.get('path') if path is not None: path = urlunquote(path) objects", "\"\") if six.PY2: safe = safe.encode('utf-8') response['Content-Disposition'] = 'attachment; filename=\"%s\"'", "under the Apache License, Version 2.0 (the \"License\"); # you", "information about the Swift installation. \"\"\" capabilities = api.swift.swift_get_capabilities(request) return", ":param file: the file data for the upload. :return: \"\"\"", "response['Content-Length'] = obj.bytes return response @urls.register class ObjectMetadata(generic.View): \"\"\"API for", "in request.DATA: metadata['is_public'] = request.DATA['is_public'] # This will raise an", "form = UploadObjectForm(request.POST, request.FILES) if not form.is_valid(): raise rest_utils.AjaxError(500, 'Invalid", "the object. filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1] if not os.path.splitext(obj.name)[1] and obj.orig_name:", "it's not empty. return rest_utils.JSONResponse(str(e), 409) else: api.swift.swift_delete_object(request, container, object_name)", "\"\"\"API to copy a swift object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/copy/'", "six.PY2: safe = safe.encode('utf-8') response['Content-Disposition'] = 'attachment; filename=\"%s\"' % safe", "containers, 'has_more': has_more} @urls.register class Container(generic.View): \"\"\"API for swift container", "under the License. \"\"\"API for the swift service. \"\"\" import", "swift object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def", "by applicable law or agreed to in writing, software #", "return rest_utils.CreatedResponse( u'/api/swift/containers/%s' % container, ) @rest_utils.ajax() def delete(self, request,", "response = StreamingHttpResponse(obj.data) safe = filename.replace(\",\", \"\") if six.PY2: safe", "objects = api.swift.swift_get_objects( request, container, prefix=path ) # filter out", "import six from horizon import exceptions from openstack_dashboard import api", "a swift object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \\ '(?P<object_name>.+)$' @rest_utils.ajax()", "url_regex = r'swift/containers/$' @rest_utils.ajax() def get(self, request): \"\"\"Get the list", "'name': o.name.split('/')[-1], 'bytes': o.bytes, 'is_subdir': isinstance(o, swift.PseudoFolder), 'is_object': not isinstance(o,", "file = forms.FileField(required=False) @urls.register class Object(generic.View): \"\"\"API for a single", "\"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def post(self, request,", "container: :return: \"\"\" path = request.GET.get('path') if path is not", "be deleted if it's not empty. return rest_utils.JSONResponse(str(e), 409) else:", "\"\"\" containers, has_more = api.swift.swift_get_containers(request) containers = [container.to_dict() for container", "already exists try: api.swift.swift_create_container(request, container, metadata=metadata) except exceptions.AlreadyExists as e:", "pseudo folder # It cannot be deleted if it's not", "empty. return rest_utils.JSONResponse(str(e), 409) @rest_utils.ajax(data_required=True) def put(self, request, container): metadata", "@urls.register class ObjectCopy(generic.View): \"\"\"API to copy a swift object \"\"\"", "back on if it wasn't preserved in the # name", "object. Any file content passed along with the request will", "@rest_utils.ajax() def post(self, request, container): metadata = {} if 'is_public'", "if it wasn't preserved in the # name given to", "result = api.swift.swift_copy_object( request, container, object_name, dest_container, dest_name ) except", "'content_type': getattr(o, 'content_type', None) } for o in objects[0] if", "prefix=path ) # filter out the folder from the listing", "of containers for this account TODO(neillc): Add pagination \"\"\" containers,", "container): metadata = {} if 'is_public' in request.DATA: metadata['is_public'] =", "container): \"\"\"Get object information. :param request: :param container: :return: \"\"\"", "with the request will be ignored in that case. POST", "note: not an AJAX request - the body will be", "# This will raise an exception if the container already", "request, container, object_name): dest_container = request.DATA['dest_container'] dest_name = request.DATA['dest_name'] try:", "o.bytes, 'is_subdir': isinstance(o, swift.PseudoFolder), 'is_object': not isinstance(o, swift.PseudoFolder), 'content_type': getattr(o,", "- the body will be raw file content @csrf_exempt def", "os.path.splitext(obj.orig_name) filename = \"%s%s\" % (filename, ext) response = StreamingHttpResponse(obj.data)", "= api.swift.swift_get_capabilities(request) return {'info': capabilities} @urls.register class Containers(generic.View): \"\"\"API for", "capabilities = api.swift.swift_get_capabilities(request) return {'info': capabilities} @urls.register class Containers(generic.View): \"\"\"API", "a single swift object or pseudo-folder \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/object/'", "containers, has_more = api.swift.swift_get_containers(request) containers = [container.to_dict() for container in", "= {'is_public': request.DATA['is_public']} api.swift.swift_update_container(request, container, metadata=metadata) @urls.register class Objects(generic.View): \"\"\"API", "from django import forms from django.http import StreamingHttpResponse from django.utils.http", "swift @urls.register class Info(generic.View): \"\"\"API for information about the Swift", "o.name.split('/')[-1], 'bytes': o.bytes, 'is_subdir': isinstance(o, swift.PseudoFolder), 'is_object': not isinstance(o, swift.PseudoFolder),", "If the object_name (ie. POST path) ends in a '/'", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "'is_object': not isinstance(o, swift.PseudoFolder), 'content_type': getattr(o, 'content_type', None) } for", "extension back on if it wasn't preserved in the #", "= r'swift/containers/(?P<container>[^/]+)/object/' \\ '(?P<object_name>.+)$' # note: not an AJAX request", "Unless required by applicable law or agreed to in writing,", "= safe.encode('utf-8') response['Content-Disposition'] = 'attachment; filename=\"%s\"' % safe response['Content-Type'] =", "# In case the given object is pseudo folder #", "request, container, object_name): if object_name[-1] == '/': try: api.swift.swift_delete_folder(request, container,", "dest_container = request.DATA['dest_container'] dest_name = request.DATA['dest_name'] try: result = api.swift.swift_copy_object(", "copy a swift object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \\ '(?P<object_name>.+)$'", "if object_name[-1] == '/': result = api.swift.swift_create_pseudo_folder( request, container, object_name", "@rest_utils.ajax() def get(self, request, container): \"\"\"Get the container details \"\"\"", "the specific language governing permissions and # limitations under the", "\"\"\" path = request.GET.get('path') if path is not None: path", "permissions and # limitations under the License. \"\"\"API for the", "'is_subdir': isinstance(o, swift.PseudoFolder), 'is_object': not isinstance(o, swift.PseudoFolder), 'content_type': getattr(o, 'content_type',", "applicable law or agreed to in writing, software # distributed", "class Containers(generic.View): \"\"\"API for swift container listing for an account", "container details \"\"\" return api.swift.swift_get_container(request, container).to_dict() @rest_utils.ajax() def post(self, request,", "contents = [{ 'path': o.subdir if isinstance(o, swift.PseudoFolder) else o.name,", "request, container, object_name, data['file'] ) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' % (container,", "if isinstance(o, swift.PseudoFolder) else o.name, 'name': o.name.split('/')[-1], 'bytes': o.bytes, 'is_subdir':", "along with the request will be ignored in that case.", "data for the upload. :return: \"\"\" form = UploadObjectForm(request.POST, request.FILES)", "swift.PseudoFolder) else o.name, 'name': o.name.split('/')[-1], 'bytes': o.bytes, 'is_subdir': isinstance(o, swift.PseudoFolder),", ":param request: :param container: :param object_name: If the object_name (ie.", "\"\"\" obj = api.swift.swift_get_object( request, container, object_name ) # Add", "to copy a swift object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \\", "def get(self, request): \"\"\"Get information about the Swift installation. \"\"\"", "get(self, request, container): \"\"\"Get object information. :param request: :param container:", "swift service. \"\"\" import os from django import forms from", "as e: # 409 Conflict return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse(", "In case the given object is pseudo folder # It", "try: api.swift.swift_create_container(request, container, metadata=metadata) except exceptions.AlreadyExists as e: # 409", "@rest_utils.ajax() def delete(self, request, container): try: api.swift.swift_delete_container(request, container) except exceptions.Conflict", "409 Conflict return rest_utils.JSONResponse(str(e), 409) return rest_utils.CreatedResponse( u'/api/swift/containers/%s' % container,", "if 'is_public' in request.DATA: metadata['is_public'] = request.DATA['is_public'] # This will", "return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' % (container, result.name) ) @rest_utils.ajax() def delete(self,", "in writing, software # distributed under the License is distributed", "ext = os.path.splitext(obj.orig_name) filename = \"%s%s\" % (filename, ext) response", "request will be ignored in that case. POST parameter: :param", "= api.swift.swift_get_object( request, container, object_name ) # Add the original", "given object is pseudo folder # It cannot be deleted", "Swift installation. \"\"\" url_regex = r'swift/info/$' @rest_utils.ajax() def get(self, request):", "urlunquote from django.views.decorators.csrf import csrf_exempt from django.views import generic import", "information. :param request: :param container: :return: \"\"\" path = request.GET.get('path')", "get(self, request, container, object_name): return api.swift.swift_get_object( request, container_name=container, object_name=object_name, with_data=False", "# It cannot be deleted if it's not empty. return", "ignored in that case. POST parameter: :param file: the file", "not an AJAX request - the body will be raw", "None) } for o in objects[0] if o.name != path]", "for this account TODO(neillc): Add pagination \"\"\" containers, has_more =", "exceptions.Conflict as e: # In case the given object is", ").to_dict() @urls.register class ObjectCopy(generic.View): \"\"\"API to copy a swift object", "\"\"\"API for a single swift object or pseudo-folder \"\"\" url_regex", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "swift objects \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$' @rest_utils.ajax() def get(self, request,", "License, Version 2.0 (the \"License\"); # you may not use", "the container already exists try: api.swift.swift_create_container(request, container, metadata=metadata) except exceptions.AlreadyExists", "else: result = api.swift.swift_upload_object( request, container, object_name, data['file'] ) return", "list of containers for this account TODO(neillc): Add pagination \"\"\"", "the # name given to the object. filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1]", "# You may obtain a copy of the License at", "request, container, object_name, dest_container, dest_name ) except exceptions.AlreadyExists as e:", "request: :param container: :param object_name: If the object_name (ie. POST", "if we're filtering for # contents of a (pseudo) folder", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "delete(self, request, container): try: api.swift.swift_delete_container(request, container) except exceptions.Conflict as e:", "put(self, request, container): metadata = {'is_public': request.DATA['is_public']} api.swift.swift_update_container(request, container, metadata=metadata)", "installation. \"\"\" url_regex = r'swift/info/$' @rest_utils.ajax() def get(self, request): \"\"\"Get", "o.subdir if isinstance(o, swift.PseudoFolder) else o.name, 'name': o.name.split('/')[-1], 'bytes': o.bytes,", "object_name (ie. POST path) ends in a '/' then a", "safe = safe.encode('utf-8') response['Content-Disposition'] = 'attachment; filename=\"%s\"' % safe response['Content-Type']", "object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def get(self,", "@rest_utils.ajax() def get(self, request): \"\"\"Get information about the Swift installation.", "return api.swift.swift_get_container(request, container).to_dict() @rest_utils.ajax() def post(self, request, container): metadata =", "as e: # It cannot be deleted if it's not", "= r'swift/containers/(?P<container>[^/]+)/copy/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def post(self, request, container, object_name):", "def post(self, request, container, object_name): dest_container = request.DATA['dest_container'] dest_name =", "% (filename, ext) response = StreamingHttpResponse(obj.data) safe = filename.replace(\",\", \"\")", "be deleted if it's not empty. return rest_utils.JSONResponse(str(e), 409) @rest_utils.ajax(data_required=True)", "api.swift.swift_get_containers(request) containers = [container.to_dict() for container in containers] return {'items':", "dest_name ) except exceptions.AlreadyExists as e: return rest_utils.JSONResponse(str(e), 409) return", "Objects(generic.View): \"\"\"API for a list of swift objects \"\"\" url_regex", "swift.PseudoFolder), 'content_type': getattr(o, 'content_type', None) } for o in objects[0]", "api from openstack_dashboard.api.rest import urls from openstack_dashboard.api.rest import utils as", "r'swift/containers/(?P<container>[^/]+)/object/' \\ '(?P<object_name>.+)$' # note: not an AJAX request -", "the License for the specific language governing permissions and #", "object_name): return api.swift.swift_get_object( request, container_name=container, object_name=object_name, with_data=False ).to_dict() @urls.register class", "the Swift installation. \"\"\" capabilities = api.swift.swift_get_capabilities(request) return {'info': capabilities}", "content passed along with the request will be ignored in", "information \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$' @rest_utils.ajax() def get(self, request, container):", "Apache License, Version 2.0 (the \"License\"); # you may not", "r'swift/containers/(?P<container>[^/]+)/metadata/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def get(self, request, container, object_name): return", "\"\"\"Create or replace an object or pseudo-folder :param request: :param", "either express or implied. # See the License for the", "AJAX request - the body will be raw file content", "\"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$' @rest_utils.ajax() def get(self, request, container): \"\"\"Get", "(filename, ext) response = StreamingHttpResponse(obj.data) safe = filename.replace(\",\", \"\") if", "# Copyright 2015, Rackspace, US, Inc. # # Licensed under", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "folder # It cannot be deleted if it's not empty.", "has_more = api.swift.swift_get_containers(request) containers = [container.to_dict() for container in containers]", "= 'attachment; filename=\"%s\"' % safe response['Content-Type'] = 'application/octet-stream' response['Content-Length'] =", "e: # It cannot be deleted if it's not empty.", "ObjectMetadata(generic.View): \"\"\"API for a single swift object \"\"\" url_regex =", "e: # In case the given object is pseudo folder", "if object_name[-1] == '/': try: api.swift.swift_delete_folder(request, container, object_name) except exceptions.Conflict", "Rackspace, US, Inc. # # Licensed under the Apache License,", "from openstack_dashboard.api.rest import urls from openstack_dashboard.api.rest import utils as rest_utils", "\"\"\" form = UploadObjectForm(request.POST, request.FILES) if not form.is_valid(): raise rest_utils.AjaxError(500,", "request, container, prefix=path ) # filter out the folder from", "django.http import StreamingHttpResponse from django.utils.http import urlunquote from django.views.decorators.csrf import", "a single swift object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \\ '(?P<object_name>.+)$'", "request.DATA['is_public'] # This will raise an exception if the container", "@rest_utils.ajax() def get(self, request, container, object_name): return api.swift.swift_get_object( request, container_name=container,", "level information \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$' @rest_utils.ajax() def get(self, request,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "filename.replace(\",\", \"\") if six.PY2: safe = safe.encode('utf-8') response['Content-Disposition'] = 'attachment;", "request, container, object_name ) else: result = api.swift.swift_upload_object( request, container,", "License. \"\"\"API for the swift service. \"\"\" import os from", ") @rest_utils.ajax() def delete(self, request, container): try: api.swift.swift_delete_container(request, container) except", "object_name): \"\"\"Get the object contents. \"\"\" obj = api.swift.swift_get_object( request,", "'bytes': o.bytes, 'is_subdir': isinstance(o, swift.PseudoFolder), 'is_object': not isinstance(o, swift.PseudoFolder), 'content_type':", "@urls.register class Containers(generic.View): \"\"\"API for swift container listing for an", "except exceptions.AlreadyExists as e: # 409 Conflict return rest_utils.JSONResponse(str(e), 409)", "object information. :param request: :param container: :return: \"\"\" path =", "form.clean() if object_name[-1] == '/': result = api.swift.swift_create_pseudo_folder( request, container,", "import forms from django.http import StreamingHttpResponse from django.utils.http import urlunquote", "\"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$' @rest_utils.ajax() def get(self, request, container): \"\"\"Get", "get(self, request, container, object_name): \"\"\"Get the object contents. \"\"\" obj", "filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1] if not os.path.splitext(obj.name)[1] and obj.orig_name: name, ext", "request, container, object_name ) # Add the original file extension", "container, object_name): \"\"\"Get the object contents. \"\"\" obj = api.swift.swift_get_object(", "import urlunquote from django.views.decorators.csrf import csrf_exempt from django.views import generic", "== '/': try: api.swift.swift_delete_folder(request, container, object_name) except exceptions.Conflict as e:", "object \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \\ '(?P<object_name>.+)$' @rest_utils.ajax() def post(self,", "dest_container, dest_name ) except exceptions.AlreadyExists as e: return rest_utils.JSONResponse(str(e), 409)", "\"License\"); # you may not use this file except in", "openstack_dashboard import api from openstack_dashboard.api.rest import urls from openstack_dashboard.api.rest import", "request, container): try: api.swift.swift_delete_container(request, container) except exceptions.Conflict as e: #", "it wasn't preserved in the # name given to the", "an object or pseudo-folder :param request: :param container: :param object_name:", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "contents of a (pseudo) folder contents = [{ 'path': o.subdir", "service. \"\"\" import os from django import forms from django.http", "from django.views.decorators.csrf import csrf_exempt from django.views import generic import six", "filename=\"%s\"' % safe response['Content-Type'] = 'application/octet-stream' response['Content-Length'] = obj.bytes return", "# distributed under the License is distributed on an \"AS", "csrf_exempt from django.views import generic import six from horizon import", "obj = api.swift.swift_get_object( request, container, object_name ) # Add the", "container): metadata = {'is_public': request.DATA['is_public']} api.swift.swift_update_container(request, container, metadata=metadata) @urls.register class", "None: path = urlunquote(path) objects = api.swift.swift_get_objects( request, container, prefix=path", "\"\"\"API for information about the Swift installation. \"\"\" url_regex =", "# Unless required by applicable law or agreed to in", "replace an object or pseudo-folder :param request: :param container: :param", "an object. Any file content passed along with the request", "@rest_utils.ajax() def get(self, request, container): \"\"\"Get object information. :param request:", "the list of containers for this account TODO(neillc): Add pagination", "object contents. \"\"\" obj = api.swift.swift_get_object( request, container, object_name )", "list of swift objects \"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$' @rest_utils.ajax() def", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "the Swift installation. \"\"\" url_regex = r'swift/info/$' @rest_utils.ajax() def get(self,", "the original file extension back on if it wasn't preserved", "that case. POST parameter: :param file: the file data for", "forms.FileField(required=False) @urls.register class Object(generic.View): \"\"\"API for a single swift object", "= api.swift.swift_upload_object( request, container, object_name, data['file'] ) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s'", "% safe response['Content-Type'] = 'application/octet-stream' response['Content-Length'] = obj.bytes return response", "'(?P<object_name>.+)$' @rest_utils.ajax() def post(self, request, container, object_name): dest_container = request.DATA['dest_container']", "data = form.clean() if object_name[-1] == '/': result = api.swift.swift_create_pseudo_folder(", "You may obtain a copy of the License at #", "import os from django import forms from django.http import StreamingHttpResponse", "in that case. POST parameter: :param file: the file data", "def delete(self, request, container, object_name): if object_name[-1] == '/': try:", "This will raise an exception if the container already exists", "\"\"\" url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \\ '(?P<object_name>.+)$' # note: not an", "url_regex = r'swift/info/$' @rest_utils.ajax() def get(self, request): \"\"\"Get information about", "utils as rest_utils from openstack_dashboard.api import swift @urls.register class Info(generic.View):", "container, object_name): return api.swift.swift_get_object( request, container_name=container, object_name=object_name, with_data=False ).to_dict() @urls.register", ":return: \"\"\" form = UploadObjectForm(request.POST, request.FILES) if not form.is_valid(): raise", "def get(self, request): \"\"\"Get the list of containers for this", "the Apache License, Version 2.0 (the \"License\"); # you may", "account TODO(neillc): Add pagination \"\"\" containers, has_more = api.swift.swift_get_containers(request) containers", "object_name, data['file'] ) return rest_utils.CreatedResponse( u'/api/swift/containers/%s/object/%s' % (container, result.name) )", "\"\"\"Get information about the Swift installation. \"\"\" capabilities = api.swift.swift_get_capabilities(request)", "else: api.swift.swift_delete_object(request, container, object_name) def get(self, request, container, object_name): \"\"\"Get", "swift.PseudoFolder), 'is_object': not isinstance(o, swift.PseudoFolder), 'content_type': getattr(o, 'content_type', None) }", "a '/' then a folder is created, rather than an", "# name given to the object. filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1] if", ") @rest_utils.ajax() def delete(self, request, container, object_name): if object_name[-1] ==" ]
[ "labels for _ in range(100): w = random.randrange(int(0.1*imw), imw) h", "y+h)) selected_boxes[:,0].add_(-x).clamp_(min=0, max=w) selected_boxes[:,1].add_(-y).clamp_(min=0, max=h) selected_boxes[:,2].add_(-x).clamp_(min=0, max=w) selected_boxes[:,3].add_(-y).clamp_(min=0, max=h) return", "label.append(int(c)) self.boxes.append(torch.Tensor(box)) self.labels.append(torch.LongTensor(label)) def __getitem__(self, idx): '''Load a image, and", "labels, sized [#obj,]. Returns: img: (PIL.Image) cropped image. selected_boxes: (tensor)", "mask = mask[:,0] & mask[:,1] #[N,] if not mask.any(): continue", "(str) path to index file. train: (boolean) train or test.", "(w-xmax, ymin, w-xmin, ymax). Args: img: (PIL.Image) image. boxes: (tensor)", "(center > roi2[:,:2]) & (center < roi2[:,2:]) # [N,2] mask", "y, x+w, y+h]]) center = (boxes[:,:2] + boxes[:,2:]) / 2", "selected bbox locations. labels: (tensor) selected bbox labels. ''' imw,", "torchvision.transforms as transforms from encoder import DataEncoder from PIL import", "len(lines) for line in lines: splited = line.strip().split() self.fnames.append(splited[0]) num_objs", "train, transform): ''' Args: root: (str) ditectory to images. list_file:", "Args: img: (PIL.Image) image. boxes: (tensor) bbox locations, sized [#obj,", "loc_target, conf_target def random_flip(self, img, boxes): '''Randomly flip the image", "import os import sys import os.path import random import numpy", "= img.width xmin = w - boxes[:,2] xmax = w", "= line.strip().split() self.fnames.append(splited[0]) num_objs = int(splited[1]) box = [] label", "= self.boxes[idx].clone() labels = self.labels[idx] # Data augmentation while training.", "loc & conf targets. loc_target, conf_target = self.data_encoder.encode(boxes, labels) return", "in range(num_objs): xmin = splited[2+5*i] ymin = splited[3+5*i] xmax =", "= random.randrange(imw - w) y = random.randrange(imh - h) roi", "__getitem__(self, idx): '''Load a image, and encode its bbox locations", "/ 2 # [N,2] roi2 = roi.expand(len(center), 4) # [N,4]", "(center < roi2[:,2:]) # [N,2] mask = mask[:,0] & mask[:,1]", "label targets, sized [8732,]. ''' # Load image and bbox", "as np import torch import torch.utils.data as data import torchvision.transforms", "random.random() < 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) w = img.width xmin", "boxes, labels) # Scale bbox locaitons to [0,1]. w,h =", "bbox locations. For more details, see 'Chapter2.2: Data augmentation' of", "torch.Tensor([[x, y, x+w, y+h]]) center = (boxes[:,:2] + boxes[:,2:]) /", "random_flip(self, img, boxes): '''Randomly flip the image and adjust the", "the bbox locations. For more details, see 'Chapter2.2: Data augmentation'", "transforms from encoder import DataEncoder from PIL import Image, ImageOps", "ymax = splited[5+5*i] c = splited[6+5*i] box.append([float(xmin),float(ymin),float(xmax),float(ymax)]) label.append(int(c)) self.boxes.append(torch.Tensor(box)) self.labels.append(torch.LongTensor(label))", "tensor. loc_target: (tensor) location targets, sized [8732,4]. conf_target: (tensor) label", "[#obj, 4]. labels: (tensor) bbox labels, sized [#obj,]. Returns: img:", "img = Image.open(os.path.join(self.root, fname)) boxes = self.boxes[idx].clone() labels = self.labels[idx]", "train self.transform = transform self.fnames = [] self.boxes = []", "= self.fnames[idx] img = Image.open(os.path.join(self.root, fname)) boxes = self.boxes[idx].clone() labels", "(tensor) bbox locations, sized [#obj, 4]. Returns: img: (PIL.Image) randomly", "min_iou is None: return img, boxes, labels for _ in", "= w - boxes[:,0] boxes[:,0] = xmin boxes[:,2] = xmax", "for i in range(num_objs): xmin = splited[2+5*i] ymin = splited[3+5*i]", "0.9]) if min_iou is None: return img, boxes, labels for", "self.boxes = [] self.labels = [] self.data_encoder = DataEncoder() with", "the paper. Args: img: (PIL.Image) image. boxes: (tensor) bbox locations,", "conf_target: (tensor) label targets, sized [8732,]. ''' # Load image", "img: (PIL.Image) image. boxes: (tensor) bbox locations, sized [#obj, 4].", "= [] self.labels = [] self.data_encoder = DataEncoder() with open(list_file)", "(boxes[:,:2] + boxes[:,2:]) / 2 # [N,2] roi2 = roi.expand(len(center),", "mask[:,0] & mask[:,1] #[N,] if not mask.any(): continue selected_boxes =", "4) # [N,4] mask = (center > roi2[:,:2]) & (center", "= random.randrange(imh - h) roi = torch.Tensor([[x, y, x+w, y+h]])", "if min_iou is None: return img, boxes, labels for _", "import print_function import os import sys import os.path import random", "from encoder import DataEncoder from PIL import Image, ImageOps class", "selected_boxes[:,1].add_(-y).clamp_(min=0, max=h) selected_boxes[:,2].add_(-x).clamp_(min=0, max=w) selected_boxes[:,3].add_(-y).clamp_(min=0, max=h) return img, selected_boxes, labels[mask]", "details, see 'Chapter2.2: Data augmentation' of the paper. Args: img:", "imw) h = random.randrange(int(0.1*imh), imh) if h > 2*w or", "the flipped bbox is: (w-xmax, ymin, w-xmin, ymax). Args: img:", "img, boxes def random_crop(self, img, boxes, labels): '''Randomly crop the", "selected bbox labels. ''' imw, imh = img.size while True:", "Args: idx: (int) image index. Returns: img: (tensor) image tensor.", "ymax class_index .. ''' from __future__ import print_function import os", "ymin xmax ymax class_index .. ''' from __future__ import print_function", "and bbox locations. fname = self.fnames[idx] img = Image.open(os.path.join(self.root, fname))", "while True: min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])", "= train self.transform = transform self.fnames = [] self.boxes =", "img, boxes, labels = self.random_crop(img, boxes, labels) # Scale bbox", "= [] self.boxes = [] self.labels = [] self.data_encoder =", "& conf targets. loc_target, conf_target = self.data_encoder.encode(boxes, labels) return img,", "encode its bbox locations and class labels. Args: idx: (int)", "[#obj, 4]. ''' if random.random() < 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT)", "= xmin boxes[:,2] = xmax return img, boxes def random_crop(self,", "data import torchvision.transforms as transforms from encoder import DataEncoder from", "sized [8732,]. ''' # Load image and bbox locations. fname", "roi.expand(len(center), 4) # [N,4] mask = (center > roi2[:,:2]) &", "and encode its bbox locations and class labels. Args: idx:", "image. boxes: (tensor) bbox locations, sized [#obj, 4]. labels: (tensor)", "h > 2*w or w > 2*h: continue x =", "= boxes.index_select(0, mask.nonzero().squeeze(1)) iou = self.data_encoder.iou(selected_boxes, roi) if iou.min() <", "roi2[:,2:]) # [N,2] mask = mask[:,0] & mask[:,1] #[N,] if", "with open(list_file) as f: lines = f.readlines() self.num_samples = len(lines)", "bbox locations. For bbox (xmin, ymin, xmax, ymax), the flipped", "import torch.utils.data as data import torchvision.transforms as transforms from encoder", "if not mask.any(): continue selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1)) iou =", "self.boxes.append(torch.Tensor(box)) self.labels.append(torch.LongTensor(label)) def __getitem__(self, idx): '''Load a image, and encode", "4]. Returns: img: (PIL.Image) randomly flipped image. boxes: (tensor) randomly", "to [0,1]. w,h = img.size boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes) img =", "ymin = splited[3+5*i] xmax = splited[4+5*i] ymax = splited[5+5*i] c", "selected_boxes[:,0].add_(-x).clamp_(min=0, max=w) selected_boxes[:,1].add_(-y).clamp_(min=0, max=h) selected_boxes[:,2].add_(-x).clamp_(min=0, max=w) selected_boxes[:,3].add_(-y).clamp_(min=0, max=h) return img,", "boxes: (tensor) bbox locations, sized [#obj, 4]. labels: (tensor) bbox", "= mask[:,0] & mask[:,1] #[N,] if not mask.any(): continue selected_boxes", "DataEncoder from PIL import Image, ImageOps class ListDataset(data.Dataset): img_size =", "xmax, ymax), the flipped bbox is: (w-xmax, ymin, w-xmin, ymax).", "from __future__ import print_function import os import sys import os.path", "= xmax return img, boxes def random_crop(self, img, boxes, labels):", "bbox locations, sized [#obj, 4]. ''' if random.random() < 0.5:", "y = random.randrange(imh - h) roi = torch.Tensor([[x, y, x+w,", "as transforms from encoder import DataEncoder from PIL import Image,", "random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9]) if min_iou is None:", "iou.min() < min_iou: continue img = img.crop((x, y, x+w, y+h))", "f.readlines() self.num_samples = len(lines) for line in lines: splited =", "self.fnames.append(splited[0]) num_objs = int(splited[1]) box = [] label = []", "(tensor) selected bbox labels. ''' imw, imh = img.size while", "[8732,]. ''' # Load image and bbox locations. fname =", "image index. Returns: img: (tensor) image tensor. loc_target: (tensor) location", "= torch.Tensor([[x, y, x+w, y+h]]) center = (boxes[:,:2] + boxes[:,2:])", "and class labels. Args: idx: (int) image index. Returns: img:", "transform): ''' Args: root: (str) ditectory to images. list_file: (str)", "image tensor. loc_target: (tensor) location targets, sized [8732,4]. conf_target: (tensor)", "locations. labels: (tensor) selected bbox labels. ''' imw, imh =", "(boolean) train or test. transform: ([transforms]) image transforms. ''' self.root", "images. list_file: (str) path to index file. train: (boolean) train", "image and adjust the bbox locations. For bbox (xmin, ymin,", "boxes def random_crop(self, img, boxes, labels): '''Randomly crop the image", "loc_target, conf_target = self.data_encoder.encode(boxes, labels) return img, loc_target, conf_target def", "= img.size while True: min_iou = random.choice([None, 0.1, 0.3, 0.5,", "randomly flipped image. boxes: (tensor) randomly flipped bbox locations, sized", "(tensor) location targets, sized [8732,4]. conf_target: (tensor) label targets, sized", "xmax = w - boxes[:,0] boxes[:,0] = xmin boxes[:,2] =", "in lines: splited = line.strip().split() self.fnames.append(splited[0]) num_objs = int(splited[1]) box", "# Scale bbox locaitons to [0,1]. w,h = img.size boxes", "[] for i in range(num_objs): xmin = splited[2+5*i] ymin =", "h = random.randrange(int(0.1*imh), imh) if h > 2*w or w", "= root self.train = train self.transform = transform self.fnames =", "& mask[:,1] #[N,] if not mask.any(): continue selected_boxes = boxes.index_select(0,", "= f.readlines() self.num_samples = len(lines) for line in lines: splited", "index. Returns: img: (tensor) image tensor. loc_target: (tensor) location targets,", "image, and encode its bbox locations and class labels. Args:", "h) roi = torch.Tensor([[x, y, x+w, y+h]]) center = (boxes[:,:2]", "boxes: (tensor) bbox locations, sized [#obj, 4]. Returns: img: (PIL.Image)", "y, x+w, y+h)) selected_boxes[:,0].add_(-x).clamp_(min=0, max=w) selected_boxes[:,1].add_(-y).clamp_(min=0, max=h) selected_boxes[:,2].add_(-x).clamp_(min=0, max=w) selected_boxes[:,3].add_(-y).clamp_(min=0,", "continue selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1)) iou = self.data_encoder.iou(selected_boxes, roi) if", "random.randrange(int(0.1*imh), imh) if h > 2*w or w > 2*h:", "transform: ([transforms]) image transforms. ''' self.root = root self.train =", "_ in range(100): w = random.randrange(int(0.1*imw), imw) h = random.randrange(int(0.1*imh),", "its bbox locations and class labels. Args: idx: (int) image", "encoder import DataEncoder from PIL import Image, ImageOps class ListDataset(data.Dataset):", "w,h = img.size boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes) img = img.resize((self.img_size,self.img_size)) img", "flipped bbox is: (w-xmax, ymin, w-xmin, ymax). Args: img: (PIL.Image)", "print_function import os import sys import os.path import random import", "center = (boxes[:,:2] + boxes[:,2:]) / 2 # [N,2] roi2", "(tensor) bbox locations, sized [#obj, 4]. labels: (tensor) bbox labels,", "boxes[:,2] = xmax return img, boxes def random_crop(self, img, boxes,", "conf targets. loc_target, conf_target = self.data_encoder.encode(boxes, labels) return img, loc_target,", "y+h]]) center = (boxes[:,:2] + boxes[:,2:]) / 2 # [N,2]", "boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes) img = img.resize((self.img_size,self.img_size)) img = self.transform(img) #", "= img.resize((self.img_size,self.img_size)) img = self.transform(img) # Encode loc & conf", "labels. Args: idx: (int) image index. Returns: img: (tensor) image", "def __getitem__(self, idx): '''Load a image, and encode its bbox", "labels. ''' imw, imh = img.size while True: min_iou =", "img, boxes): '''Randomly flip the image and adjust the bbox", "None: return img, boxes, labels for _ in range(100): w", "# [N,4] mask = (center > roi2[:,:2]) & (center <", "self.transform(img) # Encode loc & conf targets. loc_target, conf_target =", "xmin ymin xmax ymax class_index .. ''' from __future__ import", "splited = line.strip().split() self.fnames.append(splited[0]) num_objs = int(splited[1]) box = []", "The annotation file is organized as: image_name #obj xmin ymin", "self.labels.append(torch.LongTensor(label)) def __getitem__(self, idx): '''Load a image, and encode its", "imh) if h > 2*w or w > 2*h: continue", "= [] for i in range(num_objs): xmin = splited[2+5*i] ymin", "i in range(num_objs): xmin = splited[2+5*i] ymin = splited[3+5*i] xmax", "= random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9]) if min_iou is", "0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) w = img.width xmin = w", "see 'Chapter2.2: Data augmentation' of the paper. Args: img: (PIL.Image)", "if iou.min() < min_iou: continue img = img.crop((x, y, x+w,", "root, list_file, train, transform): ''' Args: root: (str) ditectory to", "img.width xmin = w - boxes[:,2] xmax = w -", "[] self.labels = [] self.data_encoder = DataEncoder() with open(list_file) as", "[#obj,]. Returns: img: (PIL.Image) cropped image. selected_boxes: (tensor) selected bbox", "boxes) img, boxes, labels = self.random_crop(img, boxes, labels) # Scale", "random import numpy as np import torch import torch.utils.data as", "for line in lines: splited = line.strip().split() self.fnames.append(splited[0]) num_objs =", "more details, see 'Chapter2.2: Data augmentation' of the paper. Args:", "= splited[4+5*i] ymax = splited[5+5*i] c = splited[6+5*i] box.append([float(xmin),float(ymin),float(xmax),float(ymax)]) label.append(int(c))", "(PIL.Image) randomly flipped image. boxes: (tensor) randomly flipped bbox locations,", "x+w, y+h]]) center = (boxes[:,:2] + boxes[:,2:]) / 2 #", "locations. fname = self.fnames[idx] img = Image.open(os.path.join(self.root, fname)) boxes =", "train: (boolean) train or test. transform: ([transforms]) image transforms. '''", "torch.utils.data as data import torchvision.transforms as transforms from encoder import", "& (center < roi2[:,2:]) # [N,2] mask = mask[:,0] &", "(tensor) selected bbox locations. labels: (tensor) selected bbox labels. '''", "list_file, train, transform): ''' Args: root: (str) ditectory to images.", "label = [] for i in range(num_objs): xmin = splited[2+5*i]", "selected_boxes[:,3].add_(-y).clamp_(min=0, max=h) return img, selected_boxes, labels[mask] def __len__(self): return self.num_samples", "image/class/box from a annotation file. The annotation file is organized", "(tensor) image tensor. loc_target: (tensor) location targets, sized [8732,4]. conf_target:", "iou = self.data_encoder.iou(selected_boxes, roi) if iou.min() < min_iou: continue img", "for _ in range(100): w = random.randrange(int(0.1*imw), imw) h =", "image. boxes: (tensor) bbox locations, sized [#obj, 4]. Returns: img:", "splited[5+5*i] c = splited[6+5*i] box.append([float(xmin),float(ymin),float(xmax),float(ymax)]) label.append(int(c)) self.boxes.append(torch.Tensor(box)) self.labels.append(torch.LongTensor(label)) def __getitem__(self,", "organized as: image_name #obj xmin ymin xmax ymax class_index ..", "''' if random.random() < 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) w =", "boxes, labels for _ in range(100): w = random.randrange(int(0.1*imw), imw)", "PIL import Image, ImageOps class ListDataset(data.Dataset): img_size = 300 def", "img = img.resize((self.img_size,self.img_size)) img = self.transform(img) # Encode loc &", "bbox locaitons to [0,1]. w,h = img.size boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes)", "ymax). Args: img: (PIL.Image) image. boxes: (tensor) bbox locations, sized", "self.random_flip(img, boxes) img, boxes, labels = self.random_crop(img, boxes, labels) #", "root: (str) ditectory to images. list_file: (str) path to index", "lines: splited = line.strip().split() self.fnames.append(splited[0]) num_objs = int(splited[1]) box =", "= 300 def __init__(self, root, list_file, train, transform): ''' Args:", "w - boxes[:,0] boxes[:,0] = xmin boxes[:,2] = xmax return", "< roi2[:,2:]) # [N,2] mask = mask[:,0] & mask[:,1] #[N,]", "sized [#obj, 4]. Returns: img: (PIL.Image) randomly flipped image. boxes:", "image. boxes: (tensor) randomly flipped bbox locations, sized [#obj, 4].", "or w > 2*h: continue x = random.randrange(imw - w)", "random_crop(self, img, boxes, labels): '''Randomly crop the image and adjust", "= self.random_flip(img, boxes) img, boxes, labels = self.random_crop(img, boxes, labels)", "ymax), the flipped bbox is: (w-xmax, ymin, w-xmin, ymax). Args:", "ymin, w-xmin, ymax). Args: img: (PIL.Image) image. boxes: (tensor) bbox", "Load image and bbox locations. fname = self.fnames[idx] img =", "roi2 = roi.expand(len(center), 4) # [N,4] mask = (center >", "in range(100): w = random.randrange(int(0.1*imw), imw) h = random.randrange(int(0.1*imh), imh)", "os.path import random import numpy as np import torch import", "sized [8732,4]. conf_target: (tensor) label targets, sized [8732,]. ''' #", "> 2*h: continue x = random.randrange(imw - w) y =", "bbox labels, sized [#obj,]. Returns: img: (PIL.Image) cropped image. selected_boxes:", "index file. train: (boolean) train or test. transform: ([transforms]) image", "= img.transpose(Image.FLIP_LEFT_RIGHT) w = img.width xmin = w - boxes[:,2]", "Encode loc & conf targets. loc_target, conf_target = self.data_encoder.encode(boxes, labels)", "sized [#obj, 4]. labels: (tensor) bbox labels, sized [#obj,]. Returns:", "img.resize((self.img_size,self.img_size)) img = self.transform(img) # Encode loc & conf targets.", "> 2*w or w > 2*h: continue x = random.randrange(imw", "''' self.root = root self.train = train self.transform = transform", "return img, boxes, labels for _ in range(100): w =", "file. train: (boolean) train or test. transform: ([transforms]) image transforms.", "'''Load a image, and encode its bbox locations and class", "bbox locations, sized [#obj, 4]. labels: (tensor) bbox labels, sized", "transforms. ''' self.root = root self.train = train self.transform =", "bbox locations. labels: (tensor) selected bbox labels. ''' imw, imh", "test. transform: ([transforms]) image transforms. ''' self.root = root self.train", "/= torch.Tensor([w,h,w,h]).expand_as(boxes) img = img.resize((self.img_size,self.img_size)) img = self.transform(img) # Encode", "of the paper. Args: img: (PIL.Image) image. boxes: (tensor) bbox", "box.append([float(xmin),float(ymin),float(xmax),float(ymax)]) label.append(int(c)) self.boxes.append(torch.Tensor(box)) self.labels.append(torch.LongTensor(label)) def __getitem__(self, idx): '''Load a image,", "[N,2] mask = mask[:,0] & mask[:,1] #[N,] if not mask.any():", "''' # Load image and bbox locations. fname = self.fnames[idx]", "and adjust the bbox locations. For bbox (xmin, ymin, xmax,", "__future__ import print_function import os import sys import os.path import", "[#obj, 4]. Returns: img: (PIL.Image) randomly flipped image. boxes: (tensor)", "= img.crop((x, y, x+w, y+h)) selected_boxes[:,0].add_(-x).clamp_(min=0, max=w) selected_boxes[:,1].add_(-y).clamp_(min=0, max=h) selected_boxes[:,2].add_(-x).clamp_(min=0,", "a annotation file. The annotation file is organized as: image_name", "def random_flip(self, img, boxes): '''Randomly flip the image and adjust", "file. The annotation file is organized as: image_name #obj xmin", "img, loc_target, conf_target def random_flip(self, img, boxes): '''Randomly flip the", "max=h) selected_boxes[:,2].add_(-x).clamp_(min=0, max=w) selected_boxes[:,3].add_(-y).clamp_(min=0, max=h) return img, selected_boxes, labels[mask] def", "to index file. train: (boolean) train or test. transform: ([transforms])", "4]. labels: (tensor) bbox labels, sized [#obj,]. Returns: img: (PIL.Image)", "range(num_objs): xmin = splited[2+5*i] ymin = splited[3+5*i] xmax = splited[4+5*i]", "300 def __init__(self, root, list_file, train, transform): ''' Args: root:", "max=w) selected_boxes[:,1].add_(-y).clamp_(min=0, max=h) selected_boxes[:,2].add_(-x).clamp_(min=0, max=w) selected_boxes[:,3].add_(-y).clamp_(min=0, max=h) return img, selected_boxes,", "xmax ymax class_index .. ''' from __future__ import print_function import", "# [N,2] mask = mask[:,0] & mask[:,1] #[N,] if not", "Returns: img: (tensor) image tensor. loc_target: (tensor) location targets, sized", "__init__(self, root, list_file, train, transform): ''' Args: root: (str) ditectory", "= splited[6+5*i] box.append([float(xmin),float(ymin),float(xmax),float(ymax)]) label.append(int(c)) self.boxes.append(torch.Tensor(box)) self.labels.append(torch.LongTensor(label)) def __getitem__(self, idx): '''Load", "boxes = self.boxes[idx].clone() labels = self.labels[idx] # Data augmentation while", "is None: return img, boxes, labels for _ in range(100):", "locations and class labels. Args: idx: (int) image index. Returns:", "labels): '''Randomly crop the image and adjust the bbox locations.", "random.randrange(imh - h) roi = torch.Tensor([[x, y, x+w, y+h]]) center", "Image.open(os.path.join(self.root, fname)) boxes = self.boxes[idx].clone() labels = self.labels[idx] # Data", "image transforms. ''' self.root = root self.train = train self.transform", "[] self.boxes = [] self.labels = [] self.data_encoder = DataEncoder()", "= splited[5+5*i] c = splited[6+5*i] box.append([float(xmin),float(ymin),float(xmax),float(ymax)]) label.append(int(c)) self.boxes.append(torch.Tensor(box)) self.labels.append(torch.LongTensor(label)) def", "augmentation' of the paper. Args: img: (PIL.Image) image. boxes: (tensor)", "bbox locations and class labels. Args: idx: (int) image index.", "locations. For bbox (xmin, ymin, xmax, ymax), the flipped bbox", "locations, sized [#obj, 4]. labels: (tensor) bbox labels, sized [#obj,].", "line in lines: splited = line.strip().split() self.fnames.append(splited[0]) num_objs = int(splited[1])", "bbox is: (w-xmax, ymin, w-xmin, ymax). Args: img: (PIL.Image) image.", "min_iou: continue img = img.crop((x, y, x+w, y+h)) selected_boxes[:,0].add_(-x).clamp_(min=0, max=w)", "splited[6+5*i] box.append([float(xmin),float(ymin),float(xmax),float(ymax)]) label.append(int(c)) self.boxes.append(torch.Tensor(box)) self.labels.append(torch.LongTensor(label)) def __getitem__(self, idx): '''Load a", "targets. loc_target, conf_target = self.data_encoder.encode(boxes, labels) return img, loc_target, conf_target", "return img, loc_target, conf_target def random_flip(self, img, boxes): '''Randomly flip", "if random.random() < 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) w = img.width", "2 # [N,2] roi2 = roi.expand(len(center), 4) # [N,4] mask", "(str) ditectory to images. list_file: (str) path to index file.", "self.labels = [] self.data_encoder = DataEncoder() with open(list_file) as f:", "while training. if self.train: img, boxes = self.random_flip(img, boxes) img,", "= self.data_encoder.encode(boxes, labels) return img, loc_target, conf_target def random_flip(self, img,", "[N,2] roi2 = roi.expand(len(center), 4) # [N,4] mask = (center", "img, boxes = self.random_flip(img, boxes) img, boxes, labels = self.random_crop(img,", "Scale bbox locaitons to [0,1]. w,h = img.size boxes /=", "boxes, labels = self.random_crop(img, boxes, labels) # Scale bbox locaitons", "# [N,2] roi2 = roi.expand(len(center), 4) # [N,4] mask =", "ditectory to images. list_file: (str) path to index file. train:", "a image, and encode its bbox locations and class labels.", "os import sys import os.path import random import numpy as", "boxes[:,2] xmax = w - boxes[:,0] boxes[:,0] = xmin boxes[:,2]", "image_name #obj xmin ymin xmax ymax class_index .. ''' from", "bbox locations, sized [#obj, 4]. Returns: img: (PIL.Image) randomly flipped", "to images. list_file: (str) path to index file. train: (boolean)", "xmin = w - boxes[:,2] xmax = w - boxes[:,0]", "np import torch import torch.utils.data as data import torchvision.transforms as", "self.train = train self.transform = transform self.fnames = [] self.boxes", "as data import torchvision.transforms as transforms from encoder import DataEncoder", "loc_target: (tensor) location targets, sized [8732,4]. conf_target: (tensor) label targets,", "# Data augmentation while training. if self.train: img, boxes =", "img: (PIL.Image) cropped image. selected_boxes: (tensor) selected bbox locations. labels:", "image and adjust the bbox locations. For more details, see", "bbox locations. fname = self.fnames[idx] img = Image.open(os.path.join(self.root, fname)) boxes", "from a annotation file. The annotation file is organized as:", "(tensor) randomly flipped bbox locations, sized [#obj, 4]. ''' if", "roi = torch.Tensor([[x, y, x+w, y+h]]) center = (boxes[:,:2] +", "training. if self.train: img, boxes = self.random_flip(img, boxes) img, boxes,", "[] label = [] for i in range(num_objs): xmin =", "def random_crop(self, img, boxes, labels): '''Randomly crop the image and", "cropped image. selected_boxes: (tensor) selected bbox locations. labels: (tensor) selected", "= self.transform(img) # Encode loc & conf targets. loc_target, conf_target", "w = img.width xmin = w - boxes[:,2] xmax =", "self.random_crop(img, boxes, labels) # Scale bbox locaitons to [0,1]. w,h", "selected_boxes[:,2].add_(-x).clamp_(min=0, max=w) selected_boxes[:,3].add_(-y).clamp_(min=0, max=h) return img, selected_boxes, labels[mask] def __len__(self):", "labels: (tensor) selected bbox labels. ''' imw, imh = img.size", "#obj xmin ymin xmax ymax class_index .. ''' from __future__", "self.data_encoder = DataEncoder() with open(list_file) as f: lines = f.readlines()", "'Chapter2.2: Data augmentation' of the paper. Args: img: (PIL.Image) image.", "0.5, 0.7, 0.9]) if min_iou is None: return img, boxes,", "0.1, 0.3, 0.5, 0.7, 0.9]) if min_iou is None: return", "ImageOps class ListDataset(data.Dataset): img_size = 300 def __init__(self, root, list_file,", "self.num_samples = len(lines) for line in lines: splited = line.strip().split()", "w = random.randrange(int(0.1*imw), imw) h = random.randrange(int(0.1*imh), imh) if h", "- h) roi = torch.Tensor([[x, y, x+w, y+h]]) center =", "mask.nonzero().squeeze(1)) iou = self.data_encoder.iou(selected_boxes, roi) if iou.min() < min_iou: continue", "list_file: (str) path to index file. train: (boolean) train or", "fname = self.fnames[idx] img = Image.open(os.path.join(self.root, fname)) boxes = self.boxes[idx].clone()", "splited[4+5*i] ymax = splited[5+5*i] c = splited[6+5*i] box.append([float(xmin),float(ymin),float(xmax),float(ymax)]) label.append(int(c)) self.boxes.append(torch.Tensor(box))", "Data augmentation' of the paper. Args: img: (PIL.Image) image. boxes:", "img.size while True: min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7,", "annotation file is organized as: image_name #obj xmin ymin xmax", "self.fnames = [] self.boxes = [] self.labels = [] self.data_encoder", "class ListDataset(data.Dataset): img_size = 300 def __init__(self, root, list_file, train,", "sized [#obj,]. Returns: img: (PIL.Image) cropped image. selected_boxes: (tensor) selected", "= Image.open(os.path.join(self.root, fname)) boxes = self.boxes[idx].clone() labels = self.labels[idx] #", "< 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) w = img.width xmin =", "2*h: continue x = random.randrange(imw - w) y = random.randrange(imh", "boxes, labels): '''Randomly crop the image and adjust the bbox", "sys import os.path import random import numpy as np import", "imw, imh = img.size while True: min_iou = random.choice([None, 0.1,", "max=w) selected_boxes[:,3].add_(-y).clamp_(min=0, max=h) return img, selected_boxes, labels[mask] def __len__(self): return", "import torchvision.transforms as transforms from encoder import DataEncoder from PIL", "= DataEncoder() with open(list_file) as f: lines = f.readlines() self.num_samples", "img, boxes, labels): '''Randomly crop the image and adjust the", "continue img = img.crop((x, y, x+w, y+h)) selected_boxes[:,0].add_(-x).clamp_(min=0, max=w) selected_boxes[:,1].add_(-y).clamp_(min=0,", "For bbox (xmin, ymin, xmax, ymax), the flipped bbox is:", "conf_target def random_flip(self, img, boxes): '''Randomly flip the image and", "self.labels[idx] # Data augmentation while training. if self.train: img, boxes", "(xmin, ymin, xmax, ymax), the flipped bbox is: (w-xmax, ymin,", "selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1)) iou = self.data_encoder.iou(selected_boxes, roi) if iou.min()", "if self.train: img, boxes = self.random_flip(img, boxes) img, boxes, labels", "root self.train = train self.transform = transform self.fnames = []", "import random import numpy as np import torch import torch.utils.data", "return img, boxes def random_crop(self, img, boxes, labels): '''Randomly crop", "path to index file. train: (boolean) train or test. transform:", "= [] label = [] for i in range(num_objs): xmin", "# Load image and bbox locations. fname = self.fnames[idx] img", "imh = img.size while True: min_iou = random.choice([None, 0.1, 0.3,", "(int) image index. Returns: img: (tensor) image tensor. loc_target: (tensor)", "= random.randrange(int(0.1*imh), imh) if h > 2*w or w >", "0.3, 0.5, 0.7, 0.9]) if min_iou is None: return img,", "x = random.randrange(imw - w) y = random.randrange(imh - h)", "min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9]) if min_iou", "> roi2[:,:2]) & (center < roi2[:,2:]) # [N,2] mask =", "- w) y = random.randrange(imh - h) roi = torch.Tensor([[x,", "xmax = splited[4+5*i] ymax = splited[5+5*i] c = splited[6+5*i] box.append([float(xmin),float(ymin),float(xmax),float(ymax)])", "(tensor) label targets, sized [8732,]. ''' # Load image and", "image. selected_boxes: (tensor) selected bbox locations. labels: (tensor) selected bbox", "- boxes[:,0] boxes[:,0] = xmin boxes[:,2] = xmax return img,", "mask = (center > roi2[:,:2]) & (center < roi2[:,2:]) #", "adjust the bbox locations. For more details, see 'Chapter2.2: Data", "''' imw, imh = img.size while True: min_iou = random.choice([None,", "and adjust the bbox locations. For more details, see 'Chapter2.2:", "torch import torch.utils.data as data import torchvision.transforms as transforms from", "2*w or w > 2*h: continue x = random.randrange(imw -", "bbox (xmin, ymin, xmax, ymax), the flipped bbox is: (w-xmax,", "(tensor) bbox labels, sized [#obj,]. Returns: img: (PIL.Image) cropped image.", "img.crop((x, y, x+w, y+h)) selected_boxes[:,0].add_(-x).clamp_(min=0, max=w) selected_boxes[:,1].add_(-y).clamp_(min=0, max=h) selected_boxes[:,2].add_(-x).clamp_(min=0, max=w)", "w - boxes[:,2] xmax = w - boxes[:,0] boxes[:,0] =", "4]. ''' if random.random() < 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) w", "Returns: img: (PIL.Image) cropped image. selected_boxes: (tensor) selected bbox locations.", "as: image_name #obj xmin ymin xmax ymax class_index .. '''", "targets, sized [8732,4]. conf_target: (tensor) label targets, sized [8732,]. '''", "img: (tensor) image tensor. loc_target: (tensor) location targets, sized [8732,4].", "labels = self.random_crop(img, boxes, labels) # Scale bbox locaitons to", "'''Load image/class/box from a annotation file. The annotation file is", "img.transpose(Image.FLIP_LEFT_RIGHT) w = img.width xmin = w - boxes[:,2] xmax", "mask.any(): continue selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1)) iou = self.data_encoder.iou(selected_boxes, roi)", "= roi.expand(len(center), 4) # [N,4] mask = (center > roi2[:,:2])", "'''Randomly crop the image and adjust the bbox locations. For", "the bbox locations. For bbox (xmin, ymin, xmax, ymax), the", "img, boxes, labels for _ in range(100): w = random.randrange(int(0.1*imw),", "selected_boxes: (tensor) selected bbox locations. labels: (tensor) selected bbox labels.", "+ boxes[:,2:]) / 2 # [N,2] roi2 = roi.expand(len(center), 4)", "f: lines = f.readlines() self.num_samples = len(lines) for line in", "labels) # Scale bbox locaitons to [0,1]. w,h = img.size", "the image and adjust the bbox locations. For bbox (xmin,", "fname)) boxes = self.boxes[idx].clone() labels = self.labels[idx] # Data augmentation", "as f: lines = f.readlines() self.num_samples = len(lines) for line", "''' from __future__ import print_function import os import sys import", "locations, sized [#obj, 4]. ''' if random.random() < 0.5: img", "adjust the bbox locations. For bbox (xmin, ymin, xmax, ymax),", "([transforms]) image transforms. ''' self.root = root self.train = train", "= random.randrange(int(0.1*imw), imw) h = random.randrange(int(0.1*imh), imh) if h >", "labels) return img, loc_target, conf_target def random_flip(self, img, boxes): '''Randomly", "box = [] label = [] for i in range(num_objs):", "targets, sized [8732,]. ''' # Load image and bbox locations.", "= splited[2+5*i] ymin = splited[3+5*i] xmax = splited[4+5*i] ymax =", "idx): '''Load a image, and encode its bbox locations and", "location targets, sized [8732,4]. conf_target: (tensor) label targets, sized [8732,].", "[] self.data_encoder = DataEncoder() with open(list_file) as f: lines =", "= self.labels[idx] # Data augmentation while training. if self.train: img,", "(PIL.Image) cropped image. selected_boxes: (tensor) selected bbox locations. labels: (tensor)", "annotation file. The annotation file is organized as: image_name #obj", "w) y = random.randrange(imh - h) roi = torch.Tensor([[x, y,", "labels = self.labels[idx] # Data augmentation while training. if self.train:", "img.size boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes) img = img.resize((self.img_size,self.img_size)) img = self.transform(img)", "= self.data_encoder.iou(selected_boxes, roi) if iou.min() < min_iou: continue img =", "w-xmin, ymax). Args: img: (PIL.Image) image. boxes: (tensor) bbox locations,", "boxes[:,0] = xmin boxes[:,2] = xmax return img, boxes def", "boxes.index_select(0, mask.nonzero().squeeze(1)) iou = self.data_encoder.iou(selected_boxes, roi) if iou.min() < min_iou:", "Returns: img: (PIL.Image) randomly flipped image. boxes: (tensor) randomly flipped", "paper. Args: img: (PIL.Image) image. boxes: (tensor) bbox locations, sized", "import DataEncoder from PIL import Image, ImageOps class ListDataset(data.Dataset): img_size", "boxes[:,2:]) / 2 # [N,2] roi2 = roi.expand(len(center), 4) #", "crop the image and adjust the bbox locations. For more", "[8732,4]. conf_target: (tensor) label targets, sized [8732,]. ''' # Load", "lines = f.readlines() self.num_samples = len(lines) for line in lines:", "locations, sized [#obj, 4]. Returns: img: (PIL.Image) randomly flipped image.", "(PIL.Image) image. boxes: (tensor) bbox locations, sized [#obj, 4]. Returns:", "= splited[3+5*i] xmax = splited[4+5*i] ymax = splited[5+5*i] c =", "img = self.transform(img) # Encode loc & conf targets. loc_target,", "line.strip().split() self.fnames.append(splited[0]) num_objs = int(splited[1]) box = [] label =", "open(list_file) as f: lines = f.readlines() self.num_samples = len(lines) for", "numpy as np import torch import torch.utils.data as data import", "import os.path import random import numpy as np import torch", "0.7, 0.9]) if min_iou is None: return img, boxes, labels", "continue x = random.randrange(imw - w) y = random.randrange(imh -", "not mask.any(): continue selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1)) iou = self.data_encoder.iou(selected_boxes,", "flipped bbox locations, sized [#obj, 4]. ''' if random.random() <", "random.randrange(int(0.1*imw), imw) h = random.randrange(int(0.1*imh), imh) if h > 2*w", "splited[3+5*i] xmax = splited[4+5*i] ymax = splited[5+5*i] c = splited[6+5*i]", "[0,1]. w,h = img.size boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes) img = img.resize((self.img_size,self.img_size))", "import Image, ImageOps class ListDataset(data.Dataset): img_size = 300 def __init__(self,", "True: min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9]) if", "For more details, see 'Chapter2.2: Data augmentation' of the paper.", "import torch import torch.utils.data as data import torchvision.transforms as transforms", "= (center > roi2[:,:2]) & (center < roi2[:,2:]) # [N,2]", "random.randrange(imw - w) y = random.randrange(imh - h) roi =", "num_objs = int(splited[1]) box = [] label = [] for", "the image and adjust the bbox locations. For more details,", "boxes: (tensor) randomly flipped bbox locations, sized [#obj, 4]. '''", "locations. For more details, see 'Chapter2.2: Data augmentation' of the", "roi) if iou.min() < min_iou: continue img = img.crop((x, y,", "img = img.crop((x, y, x+w, y+h)) selected_boxes[:,0].add_(-x).clamp_(min=0, max=w) selected_boxes[:,1].add_(-y).clamp_(min=0, max=h)", "Data augmentation while training. if self.train: img, boxes = self.random_flip(img,", "is: (w-xmax, ymin, w-xmin, ymax). Args: img: (PIL.Image) image. boxes:", "class labels. Args: idx: (int) image index. Returns: img: (tensor)", "img: (PIL.Image) randomly flipped image. boxes: (tensor) randomly flipped bbox", "self.boxes[idx].clone() labels = self.labels[idx] # Data augmentation while training. if", "or test. transform: ([transforms]) image transforms. ''' self.root = root", "range(100): w = random.randrange(int(0.1*imw), imw) h = random.randrange(int(0.1*imh), imh) if", "[N,4] mask = (center > roi2[:,:2]) & (center < roi2[:,2:])", "< min_iou: continue img = img.crop((x, y, x+w, y+h)) selected_boxes[:,0].add_(-x).clamp_(min=0,", "def __init__(self, root, list_file, train, transform): ''' Args: root: (str)", "augmentation while training. if self.train: img, boxes = self.random_flip(img, boxes)", "from PIL import Image, ImageOps class ListDataset(data.Dataset): img_size = 300", "locaitons to [0,1]. w,h = img.size boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes) img", "= (boxes[:,:2] + boxes[:,2:]) / 2 # [N,2] roi2 =", ".. ''' from __future__ import print_function import os import sys", "labels: (tensor) bbox labels, sized [#obj,]. Returns: img: (PIL.Image) cropped", "= transform self.fnames = [] self.boxes = [] self.labels =", "bbox labels. ''' imw, imh = img.size while True: min_iou", "''' Args: root: (str) ditectory to images. list_file: (str) path", "roi2[:,:2]) & (center < roi2[:,2:]) # [N,2] mask = mask[:,0]", "import sys import os.path import random import numpy as np", "self.root = root self.train = train self.transform = transform self.fnames", "idx: (int) image index. Returns: img: (tensor) image tensor. loc_target:", "file is organized as: image_name #obj xmin ymin xmax ymax", "if h > 2*w or w > 2*h: continue x", "'''Randomly flip the image and adjust the bbox locations. For", "ymin, xmax, ymax), the flipped bbox is: (w-xmax, ymin, w-xmin,", "self.transform = transform self.fnames = [] self.boxes = [] self.labels", "xmax return img, boxes def random_crop(self, img, boxes, labels): '''Randomly", "image and bbox locations. fname = self.fnames[idx] img = Image.open(os.path.join(self.root,", "boxes = self.random_flip(img, boxes) img, boxes, labels = self.random_crop(img, boxes,", "boxes[:,0] boxes[:,0] = xmin boxes[:,2] = xmax return img, boxes", "= [] self.data_encoder = DataEncoder() with open(list_file) as f: lines", "c = splited[6+5*i] box.append([float(xmin),float(ymin),float(xmax),float(ymax)]) label.append(int(c)) self.boxes.append(torch.Tensor(box)) self.labels.append(torch.LongTensor(label)) def __getitem__(self, idx):", "ListDataset(data.Dataset): img_size = 300 def __init__(self, root, list_file, train, transform):", "self.fnames[idx] img = Image.open(os.path.join(self.root, fname)) boxes = self.boxes[idx].clone() labels =", "- boxes[:,2] xmax = w - boxes[:,0] boxes[:,0] = xmin", "DataEncoder() with open(list_file) as f: lines = f.readlines() self.num_samples =", "randomly flipped bbox locations, sized [#obj, 4]. ''' if random.random()", "xmin boxes[:,2] = xmax return img, boxes def random_crop(self, img,", "= w - boxes[:,2] xmax = w - boxes[:,0] boxes[:,0]", "= len(lines) for line in lines: splited = line.strip().split() self.fnames.append(splited[0])", "xmin = splited[2+5*i] ymin = splited[3+5*i] xmax = splited[4+5*i] ymax", "is organized as: image_name #obj xmin ymin xmax ymax class_index", "#[N,] if not mask.any(): continue selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1)) iou", "self.data_encoder.iou(selected_boxes, roi) if iou.min() < min_iou: continue img = img.crop((x,", "sized [#obj, 4]. ''' if random.random() < 0.5: img =", "(PIL.Image) image. boxes: (tensor) bbox locations, sized [#obj, 4]. labels:", "x+w, y+h)) selected_boxes[:,0].add_(-x).clamp_(min=0, max=w) selected_boxes[:,1].add_(-y).clamp_(min=0, max=h) selected_boxes[:,2].add_(-x).clamp_(min=0, max=w) selected_boxes[:,3].add_(-y).clamp_(min=0, max=h)", "Image, ImageOps class ListDataset(data.Dataset): img_size = 300 def __init__(self, root,", "Args: root: (str) ditectory to images. list_file: (str) path to", "class_index .. ''' from __future__ import print_function import os import", "import numpy as np import torch import torch.utils.data as data", "int(splited[1]) box = [] label = [] for i in", "self.data_encoder.encode(boxes, labels) return img, loc_target, conf_target def random_flip(self, img, boxes):", "train or test. transform: ([transforms]) image transforms. ''' self.root =", "flipped image. boxes: (tensor) randomly flipped bbox locations, sized [#obj,", "= self.random_crop(img, boxes, labels) # Scale bbox locaitons to [0,1].", "self.train: img, boxes = self.random_flip(img, boxes) img, boxes, labels =", "= int(splited[1]) box = [] label = [] for i", "img_size = 300 def __init__(self, root, list_file, train, transform): '''", "= img.size boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes) img = img.resize((self.img_size,self.img_size)) img =", "flip the image and adjust the bbox locations. For bbox", "img = img.transpose(Image.FLIP_LEFT_RIGHT) w = img.width xmin = w -", "mask[:,1] #[N,] if not mask.any(): continue selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1))", "boxes): '''Randomly flip the image and adjust the bbox locations.", "torch.Tensor([w,h,w,h]).expand_as(boxes) img = img.resize((self.img_size,self.img_size)) img = self.transform(img) # Encode loc", "splited[2+5*i] ymin = splited[3+5*i] xmax = splited[4+5*i] ymax = splited[5+5*i]", "conf_target = self.data_encoder.encode(boxes, labels) return img, loc_target, conf_target def random_flip(self,", "w > 2*h: continue x = random.randrange(imw - w) y", "# Encode loc & conf targets. loc_target, conf_target = self.data_encoder.encode(boxes,", "transform self.fnames = [] self.boxes = [] self.labels = []" ]
[ "numpy.ndarray specifying the topology of a device mesh to place", "FnMeta(*shapes): \"\"\"A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}.\"\"\" if fn_out: out_shapes", "same structure as the given sa, but sa1 and sa2", "and dims[i+1] as its output dimensions. act: The activation function.", "lingvo.core import builder_layers from lingvo.core import hyperparams from lingvo.core import", "2.0 (the \"License\"); # you may not use this file", "\"\"\"Model builder with commonly used layers. A method in a", "layers. WARNING: The builder pattern is still experimental and we", "return builder_layers.BiasLayer.Params().Set( name=name, dims=dims, fprop_dtype=self.params.fprop_dtype, device_mesh=device_mesh, weight_split_dims_mapping=weight_split_dims_mapping) def _Activation(self, name,", "a sequential layer that calls its sub-layer one after another.", "4, 5, 6, 7]) which is a 1d mesh with", "how weight of this layer or those of the sublayers", "body: The sub-layer. fetches: A list of fetch names inside", "to facilitate building these patterns. For example, _Seq() helps to", "return self._Seq( name, self._Linear('linear', idims, odims), self._Bias('bias', odims), self._Activation('act', fn=act))", "The layer name. repeat: Repeat \\*subs this many times in", "the sublayers should ' 'be sharded over device mesh. ')", "experimental and we need to gain experience on when to", "layer. *subs: A list of sub-layers. Returns: The param for", "2, sa, sb, sc) constructs a layer with 6 layers", "layer name. dims: A list of int. i-th layer has", "_Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None): \"\"\"Bias layer. The bias is", "Returns: A BatchParallel layer which splits the batch and computes", "-> NestedMap{flops, out_shapes}.\"\"\" if fn_out: out_shapes = fn_out(*shapes) if isinstance(out_shapes,", "for ' 'layers built using model builder, set fprop_dtype to", "n, (i, o) in enumerate(zip(dims[:-1], dims[1:])): l += [self._FC('l%03d' %", "input.\"\"\" return builder_layers.BiasLayer.Params().Set( name=name, dims=dims, fprop_dtype=self.params.fprop_dtype, device_mesh=device_mesh, weight_split_dims_mapping=weight_split_dims_mapping) def _Activation(self,", "returns a single Tensor. # # These methods are designed", "License for the specific language governing permissions and # limitations", "multiple devices. Args: name: This layer's name. sub: The sub-layer.", "Reserved. # # Licensed under the Apache License, Version 2.0", "def _Reshape(self, name, shape): \"\"\"Reshape inputs to the shape provided.\"\"\"", "fn is a very simple python function. This layer can", "fn: A lambda tuple(Tensor) -> tuple(Tensor). fn_out: A lambda tuple(tshape.Shape)", "= [] for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])): l", "i-th layer has dims[i] as its input dimension, and dims[i+1]", "splits the batch and computes the forward pass on multiple", "3], [4, 5, 6, 7]]) which is 2d matrix of", "..., t_n) -> (t1, ..., t_n).\"\"\" return self._Seq(name) def _Arg(self,", "tuple. Args: name: The layer name. *subs: A list of", "the input tuple. Typically, fn is a very simple python", "Params(cls): \"\"\"The params of this layer.\"\"\" p = hyperparams.InstantiableParams(cls) p.Define('deterministic_dropout',", "odims, act='RELU'): \"\"\"Feed-forward fully connected. y = act(matmul(x, w) +", "Args: name: The layer name. fn: A lambda tuple(Tensor) ->", "graph.\"\"\" return builder_layers.GraphLayer.Params().Set( name=name, input_endpoints=input_endpoints, output_endpoints=output_endpoints, sub=list(signature_sub_param_list)) def _Id(self, name):", "l = [] for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])):", "are designed to have minimal knobs. Sub-classes which needs to", "the composed layer. \"\"\" def ConcatTuples(tuples): # tuples is a", "dims[1:])): l += [self._FC('l%03d' % n, i, o, act)] return", "lingvo.core import activations from lingvo.core import builder_layers from lingvo.core import", "') p.Define( 'activation_split_dims_mapping', None, 'Relevant only if device_mesh above is", "fn=fn, fn_meta=FnMeta) def _Save(self, name): \"\"\"Returns a layer from which", "function. This layer can be used for prototyping but we", "act='RELU'): \"\"\"Feed-forward fully connected. y = act(matmul(x, w) + b).\"\"\"", "\"\"\"Picks index-th element. (t_1, ..., t_n) -> (t_{index},).\"\"\" return builder_layers.ArgIndexLayer.Params().Set(name=name,", "composition method. ###################################################################### def _Rep(self, name, repeat, *subs): r\"\"\"Connects sub-layers", "concatenates their output tuples into one tuple. Args: name: The", "to # be flexible can override these methods with different", "python function. This layer can be used for prototyping but", "layer name. *subs: A list of sub-layers. Returns: The param", "the body layer.\"\"\" return builder_layers.RematerializationLayer.Params().Set( name=name, body=body) def _BatchParallel(self, name,", "qdomain=None): \"\"\"Linear layer. y = matmul([..., idims], [idims, odims]).\"\"\" p", "if device_mesh above is not None. If not None, it", "tshape.Shape): out_shapes = (out_shapes,) else: out_shapes = shapes if fn_flops:", "to the last dimension of the input.\"\"\" return builder_layers.BiasLayer.Params().Set( name=name,", "def _Conv2D(self, name, filter_shape, filter_stride): \"\"\"Conv2D layer.\"\"\" return layers.Conv2DLayerNoPadding.Params().Set( name=name,", "noise_shape_broadcast_dims=noise_shape_broadcast_dims) return layers.DropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims, fprop_dtype=self.params.fprop_dtype) def _Linear(self, name,", "OF ANY KIND, either express or implied. # See the", "See the License for the specific language governing permissions and", "Typically, fn is a very simple python function. This layer", "_AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...), _Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out',", "..., fn)(x). We feed the input tuple to all sub-layers", "to in writing, software # distributed under the License is", "it to build complicated layers. \"\"\" import functools from lingvo.core", "# Sub-classes should put some options common to many layers", "None, it ' 'specifies how activation of this layer or", "name, dims, act='RELU'): \"\"\"Multiple layers of feed-forward fully connected. Args:", "or agreed to in writing, software # distributed under the", "builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub) def _PrintShape(self, name): \"\"\"Print FProp input shape information.\"\"\"", "self._Seq(name, *l) def _Conv2D(self, name, filter_shape, filter_stride): \"\"\"Conv2D layer.\"\"\" return", "layer whose outputs correspond to the activations of fetch points", "hyperparams.InstantiableParams(cls) p.Define('deterministic_dropout', False, 'Used deterministic dropout or not.') p.Define( 'fprop_dtype',", "2, 3], [4, 5, 6, 7]]) which is 2d matrix", "(t_{index},).\"\"\" return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index]) def _Par(self, name, *subs): \"\"\"y =", "pass on multiple devices. Args: name: This layer's name. sub:", "' 'specifies how activation of this layer or those of", "Please discuss w/ teammates before using it to build complicated", "not to use. Please discuss w/ teammates before using it", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "*subs): \"\"\"Connects sub-layers sequentially.\"\"\" return builder_layers.SequentialLayer.Params().Set( name=name, sub=list(subs)) def _Graph(self,", "import functools from lingvo.core import activations from lingvo.core import builder_layers", "we assume flops == sum of elements in the inputs.", "layers.FetchLayer.Params().Set(name=name) def _AddFetches(self, name, body, fetches): \"\"\"Fetches saved activations in", "depend on such a support. The constructed layer is often", "compose multiple layers. # # Sub-classes are discouraged to override", "fetches: A list of fetch names inside the sub-layer body.", "sb1, sc1, sa2, sb2, sc2]. sa1 and sa2 have the", "not depend on such a support. The constructed layer is", "in the body sub-layer. E.g.: _AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...),", "# # These methods are designed to have minimal knobs.", "composed layer. \"\"\" def ConcatTuples(tuples): # tuples is a list", "not use this file except in compliance with the License.", "i, o, act)] return self._Seq(name, *l) def _Conv2D(self, name, filter_shape,", "odims), self._Bias('bias', odims), self._Activation('act', fn=act)) def _MLP(self, name, dims, act='RELU'):", "_Id(self, name): \"\"\"Identity. (t_1, ..., t_n) -> (t1, ..., t_n).\"\"\"", "you may not use this file except in compliance with", "E.g., _Rep('foo', 2, sa, sb, sc) constructs a layer with", "is 2d matrix of 8 ' 'devices.') p.Define( 'weight_split_dims_mapping', None,", "argument being None (e.g., Conv2DLayer), builder should not depend on", "o, act)] return self._Seq(name, *l) def _Conv2D(self, name, filter_shape, filter_stride):", "which will use float32 ' 'activations.') # SPMD partition related", "lambda tuple(tshape.Shape) -> output tuple(tshape.Shape) fn_flops: A lambda tuple(tshape.Shape) ->", "If not None, it ' 'specifies how weight of this", "self._Seq(name) def _Arg(self, name, index): \"\"\"Picks index-th element. (t_1, ...,", "return self._Seq(name, *l) def _Conv2D(self, name, filter_shape, filter_stride): \"\"\"Conv2D layer.\"\"\"", "by a builder takes a tuple of tf.Tensor (one or", "...), _Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...), _Output('output', ...)), ['layer1_out',", "...), _Output('output', ...)), ['layer1_out', 'layer2_out']) The layer returns the stack's", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "The layer returns the stack's final output together with intermediate", "from lingvo.core import builder_layers from lingvo.core import hyperparams from lingvo.core", "def _Arg(self, name, index): \"\"\"Picks index-th element. (t_1, ..., t_n)", "A list of sub-layers. Returns: The param for the composed", "return activations.ActivationLayer.Params().Set(activation=fn, name=name) def _FC(self, name, idims, odims, act='RELU'): \"\"\"Feed-forward", "_Seq() helps to build a sequential layer that calls its", "single Tensor or tuple(Tensor) to the input tuple. Typically, fn", "single Tensor. # # These methods are designed to have", "sub-layers sequentially and repeat multiple times. E.g., _Rep('foo', 2, sa,", "a builder class constructs a layer param. FProp of a", "*subs): \"\"\"y = (f1, f2, ..., fn)(x). We feed the", "tuple of tf.Tensor (one or more). Even though certain layers", "minimal knobs. Sub-classes which needs to # be flexible can", "sequential layer that calls its sub-layer one after another. TODO(zhifengc):", "float32 ' 'activations.') # SPMD partition related params. p.Define( 'device_mesh',", "activations in the body sub-layer. E.g.: _AddFetches('foo', _Seq( 'stack', _Layer('layer1',", "fetch1, ..., fetchM]. \"\"\" return builder_layers.BranchLayer.Params().Set( name=name, body=body, fetches=fetches) def", "a layer with 6 layers sequentially connected: [sa1, sb1, sc1,", "activations.ActivationLayer.Params().Set(activation=fn, name=name) def _FC(self, name, idims, odims, act='RELU'): \"\"\"Feed-forward fully", "return layers.DeterministicDropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims) return layers.DropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims,", "to build a sequential layer that calls its sub-layer one", "is a very simple python function. This layer can be", "Adds a more concrete example. \"\"\" @classmethod def Params(cls): \"\"\"The", "name, dims): \"\"\"Batch norm.\"\"\" return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99) def _LN(self,", "flops = fn_flops(*shapes) else: flops = sum([s.size for s in", "times. E.g., _Rep('foo', 2, sa, sb, sc) constructs a layer", "sb, sc) constructs a layer with 6 layers sequentially connected:", "# SPMD partition related params. p.Define( 'device_mesh', None, 'A numpy.ndarray", "shape information.\"\"\" return builder_layers.PrintShapeLayer.Params().Set(name=name) def _CreateNestedMap(self, name, keys): \"\"\"Returns a", "'device_mesh', None, 'A numpy.ndarray specifying the topology of a device", "..., inputN, fetch1, ..., fetchM]. \"\"\" return builder_layers.BranchLayer.Params().Set( name=name, body=body,", "returns a tuple of tf.Tensor (one or more). Even though", "(i, o) in enumerate(zip(dims[:-1], dims[1:])): l += [self._FC('l%03d' % n,", "Tensor and returns a single Tensor. # # These methods", "and when not to use. Please discuss w/ teammates before", "functools.reduce(lambda x, y: x + list(y), tuples, []))) return builder_layers.ParallelLayer.Params().Set(", "\\*subs this many times in the compose layer. *subs: A", "list of fetch names inside the sub-layer body. Returns: A", "# Basic nn layers. # # The following method returns", "name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims, fprop_dtype=self.params.fprop_dtype) def _Linear(self, name, idims, odims, device_mesh=None,", "to use. To enable bfloat16 activations for ' 'layers built", "this layer.\"\"\" p = hyperparams.InstantiableParams(cls) p.Define('deterministic_dropout', False, 'Used deterministic dropout", "fn_meta=FnMeta) def _Save(self, name): \"\"\"Returns a layer from which the", "name, keep_prob, noise_shape_broadcast_dims=None): \"\"\"Returns a DropoutLayer Params.\"\"\" if self.params.deterministic_dropout: return", "which is 2d matrix of 8 ' 'devices.') p.Define( 'weight_split_dims_mapping',", "simple python function. This layer can be used for prototyping", "all established layers as FnLayer can't be serialized. Args: name:", "its output dimensions. act: The activation function. Returns: The param", "and concatenates their output tuples into one tuple. Args: name:", "builder_layers.BiasLayer.Params().Set( name=name, dims=dims, fprop_dtype=self.params.fprop_dtype, device_mesh=device_mesh, weight_split_dims_mapping=weight_split_dims_mapping) def _Activation(self, name, fn='RELU'):", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "or more). Even though certain layers support FProp argument being", "using it to build complicated layers. \"\"\" import functools from", "name): \"\"\"Returns a layer from which the activation and gradient", "iterations = [] for i in range(repeat): iterations.append(self._Seq('iter_%03d' % i,", "multiple layers. # # Sub-classes are discouraged to override these", "layer returns the stack's final output together with intermediate activations", "is assumed to be a ' 'single device. Here are", "layer is built.\"\"\" return self._params def __init__(self, params): # Sub-classes", "If not None, it ' 'specifies how activation of this", "\"\"\" def FnMeta(*shapes): \"\"\"A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}.\"\"\" if", "file except in compliance with the License. # You may", "output together with intermediate activations from layer1_out and layer2_out. Args:", "# be flexible can override these methods with different options.", "layer.\"\"\" return activations.ActivationLayer.Params().Set(activation=fn, name=name) def _FC(self, name, idims, odims, act='RELU'):", "If device_mesh is None, it is assumed to be a", "['layer1_out', 'layer2_out']) The layer returns the stack's final output together", "\"\"\" l = [] for n, (i, o) in enumerate(zip(dims[:-1],", "ConcatTuples(tuples): # tuples is a list of tuples. return tuple(functools.reduce(lambda", "return py_utils.NestedMap( flops=0, out_shapes=tuple( functools.reduce(lambda x, y: x + list(y),", "saved activations in the body sub-layer. E.g.: _AddFetches('foo', _Seq( 'stack',", "def _Graph(self, name, input_endpoints, output_endpoints, *signature_sub_param_list): \"\"\"Connects sub-layers into a", "A method in a builder class constructs a layer param.", "return layers.Conv2DLayerNoPadding.Params().Set( name=name, filter_shape=filter_shape, filter_stride=filter_stride, fprop_dtype=self.params.fprop_dtype) def _Reshape(self, name, shape):", "of multiple sub-layers connected in certain patterns. We expect to", "name, *subs): \"\"\"y = (f1, f2, ..., fn)(x). We feed", "The bias is added to the last dimension of the", "name. fn: A lambda tuple(Tensor) -> tuple(Tensor). fn_out: A lambda", "a builder takes a tuple of tf.Tensor (one or more)", "from lingvo.core import activations from lingvo.core import builder_layers from lingvo.core", "\"\"\"Identity. (t_1, ..., t_n) -> (t1, ..., t_n).\"\"\" return self._Seq(name)", "name=name, keys=keys) ########################################################################### # Basic nn layers. # # The", "established layers as FnLayer can't be serialized. Args: name: The", "inputs. Returns: The param for the composed layer. \"\"\" def", "False, 'Used deterministic dropout or not.') p.Define( 'fprop_dtype', None, 'Activations", "feed the input tuple to all sub-layers and concatenates their", "to have a few methods to facilitate building these patterns.", "+= [self._FC('l%03d' % n, i, o, act)] return self._Seq(name, *l)", "*signature_sub_param_list): \"\"\"Connects sub-layers into a data flow graph.\"\"\" return builder_layers.GraphLayer.Params().Set(", "A BatchParallel layer which splits the batch and computes the", "fetch points in the sub-layer body. [input1, input2, ..., inputN,", "WARNING: The builder pattern is still experimental and we need", "list of int. i-th layer has dims[i] as its input", "forward pass on multiple devices. Args: name: This layer's name.", "computes the forward pass on multiple devices. \"\"\" return builder_layers.BatchParallelLayer.Params().Set(name=name,", "and returns a single Tensor. # # These methods are", "KIND, either express or implied. # See the License for", "+ list(y), tuples, [])) def ConcatMeta(tuples): return py_utils.NestedMap( flops=0, out_shapes=tuple(", "% i, *[p.Copy() for p in subs])) return self._Seq(name, *iterations)", "8 devices, ' 'np.array([[0, 1, 2, 3], [4, 5, 6,", "i, *[p.Copy() for p in subs])) return self._Seq(name, *iterations) def", "pyformat: disable return self._Seq( name, self._Linear('linear', idims, odims), self._Bias('bias', odims),", "The param for the composed layer. \"\"\" def FnMeta(*shapes): \"\"\"A", "fn_flops: flops = fn_flops(*shapes) else: flops = sum([s.size for s", "(the \"License\"); # you may not use this file except", "y = matmul([..., idims], [idims, odims]).\"\"\" p = builder_layers.LinearLayer.Params() p.name", "be used for prototyping but we advice to implement the", "bfloat16 activations for ' 'layers built using model builder, set", "activation function. Returns: The param for the composed layer. \"\"\"", "None. If not None, it ' 'specifies how activation of", "# These methods are designed to have minimal knobs. Sub-classes", "layers.DeterministicDropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims) return layers.DropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims, fprop_dtype=self.params.fprop_dtype)", "_Seq( 'stack', _Layer('layer1', ...), _Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...),", "2, 3, 4, 5, 6, 7]) which is a 1d", "fn. If None, we assume flops == sum of elements", "propagated to layers that support ' 'bfloat16 activations. Default is", "# # Unless required by applicable law or agreed to", "will be propagated to layers that support ' 'bfloat16 activations.", "def _AddFetches(self, name, body, fetches): \"\"\"Fetches saved activations in the", "'devices.') p.Define( 'weight_split_dims_mapping', None, 'Relevant only if device_mesh above is", "_Seq(self, name, *subs): \"\"\"Connects sub-layers sequentially.\"\"\" return builder_layers.SequentialLayer.Params().Set( name=name, sub=list(subs))", "more) and returns a tuple of tf.Tensor (one or more).", "' 'activations.') # SPMD partition related params. p.Define( 'device_mesh', None,", "to be a ' 'single device. Here are some examples:", "_Layer('layer1', ...), _Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...), _Output('output', ...)),", "Returns: A layer whose outputs correspond to the activations of", "The param for the composed layer. \"\"\" def ConcatTuples(tuples): #", "activations from lingvo.core import builder_layers from lingvo.core import hyperparams from", "_Activation(self, name, fn='RELU'): \"\"\"Activation layer.\"\"\" return activations.ActivationLayer.Params().Set(activation=fn, name=name) def _FC(self,", "over device mesh. ') p.Define( 'activation_split_dims_mapping', None, 'Relevant only if", "sub-layer. Returns: A BatchParallel layer which splits the batch and", "= odims p.fprop_dtype = self.params.fprop_dtype p.device_mesh = device_mesh p.weight_split_dims_mapping =", "implied. # See the License for the specific language governing", "[])) def ConcatMeta(tuples): return py_utils.NestedMap( flops=0, out_shapes=tuple( functools.reduce(lambda x, y:", "FProp takes a single # Tensor and returns a single", "Args: name: This layer's name. body: The sub-layer. fetches: A", "import tshape class Base: \"\"\"Model builder with commonly used layers.", "layer which splits the batch and computes the forward pass", "builder_layers.LinearLayer.Params() p.name = name p.input_dims = idims p.output_dims = odims", "Returns: The param for the composed layer. \"\"\" l =", "name p.input_dims = idims p.output_dims = odims p.fprop_dtype = self.params.fprop_dtype", "fn_out(*shapes) if isinstance(out_shapes, tshape.Shape): out_shapes = (out_shapes,) else: out_shapes =", "'specifies how activation of this layer or those of the", "'Relevant only if device_mesh above is not None. If not", "layer with 6 layers sequentially connected: [sa1, sb1, sc1, sa2,", "args.\"\"\" return builder_layers.CreateNestedMapLayer.Params().Set( name=name, keys=keys) ########################################################################### # Basic nn layers.", "library to build composite layers. WARNING: The builder pattern is", "fprop_dtype=self.params.fprop_dtype) def _Linear(self, name, idims, odims, device_mesh=None, weight_split_dims_mapping=None, qdomain=None): \"\"\"Linear", "device_mesh=None, weight_split_dims_mapping=None): \"\"\"Bias layer. The bias is added to the", "flops=0, out_shapes=tuple( functools.reduce(lambda x, y: x + list(y), tuples, [])))", "import py_utils from lingvo.core import tshape class Base: \"\"\"Model builder", "# Tensor and returns a single Tensor. # # These", "name, keys): \"\"\"Returns a NestedMap with keys from fprop args.\"\"\"", "class constructs a layer param. FProp of a layer constructed", "odims), self._Activation('act', fn=act)) def _MLP(self, name, dims, act='RELU'): \"\"\"Multiple layers", "that support ' 'bfloat16 activations. Default is None, which will", "Unless required by applicable law or agreed to in writing,", "name, body): \"\"\"Forces rematerialization on FProp of the body layer.\"\"\"", "the topology of a device mesh to place the '", "sub-layers. Returns: The param for the composed layer. \"\"\" def", "' 'specifies how weight of this layer or those of", "import builder_layers from lingvo.core import hyperparams from lingvo.core import layers", "the specific language governing permissions and # limitations under the", "dims, act='RELU'): \"\"\"Multiple layers of feed-forward fully connected. Args: name:", "Here are some examples: ' 'np.array([0, 1, 2, 3, 4,", "Conv2DLayer), builder should not depend on such a support. The", "# limitations under the License. # ============================================================================== \"\"\"A library to", "[] for i in range(repeat): iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for", "sub-class of BaseLayer for all established layers as FnLayer can't", "This layer can be used for prototyping but we advice", "py_utils.NestedMap(flops=flops, out_shapes=out_shapes) return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta) def _Save(self, name): \"\"\"Returns", "name: The layer name. fn: A lambda tuple(Tensor) -> tuple(Tensor).", "calls its sub-layer one after another. TODO(zhifengc): Adds a more", "\"\"\"The params of this layer.\"\"\" p = hyperparams.InstantiableParams(cls) p.Define('deterministic_dropout', False,", "should not depend on such a support. The constructed layer", "layer. \"\"\" def ConcatTuples(tuples): # tuples is a list of", "============================================================================== \"\"\"A library to build composite layers. WARNING: The builder", "layer. \"\"\" def FnMeta(*shapes): \"\"\"A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}.\"\"\"", "= shapes if fn_flops: flops = fn_flops(*shapes) else: flops =", "same weight. Args: name: The layer name. repeat: Repeat \\*subs", "all sub-layers and concatenates their output tuples into one tuple.", "a tuple of tf.Tensor (one or more) and returns a", "return builder_layers.ParallelLayer.Params().Set( name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta) def _Fn(self, name, fn,", "= act(matmul(x, w) + b).\"\"\" # pyformat: disable return self._Seq(", "' 'layers built using model builder, set fprop_dtype to '", "lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}.\"\"\" if fn_out: out_shapes = fn_out(*shapes)", "-> estimated flops of fn. If None, we assume flops", "layer.\"\"\" p = hyperparams.InstantiableParams(cls) p.Define('deterministic_dropout', False, 'Used deterministic dropout or", "builder can override _BN() to tune the decay option. ###########################################################################", "the body sub-layer. E.g.: _AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...), _Save('layer1_out',", "weight. Args: name: The layer name. repeat: Repeat \\*subs this", "None, it ' 'specifies how weight of this layer or", "sa2, sb2, sc2]. sa1 and sa2 have the same structure", "\"\"\"y = fn(x). Applies a fn: tuple(Tensor) -> a single", "the stack's final output together with intermediate activations from layer1_out", "layers. # # The following method returns a layer param,", "param for the composed layer. \"\"\" iterations = [] for", "with 8 devices, ' 'np.array([[0, 1, 2, 3], [4, 5,", "used for prototyping but we advice to implement the logic", "method in a builder class constructs a layer param. FProp", "building these patterns. For example, _Seq() helps to build a", "' 'devices.') p.Define( 'weight_split_dims_mapping', None, 'Relevant only if device_mesh above", "return layers.FetchLayer.Params().Set(name=name) def _AddFetches(self, name, body, fetches): \"\"\"Fetches saved activations", "fprop_dtype=self.params.fprop_dtype) def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None): \"\"\"Returns a DropoutLayer Params.\"\"\"", "\"\"\"Splits the batch and compute the forward pass on multiple", "sequentially.\"\"\" return builder_layers.SequentialLayer.Params().Set( name=name, sub=list(subs)) def _Graph(self, name, input_endpoints, output_endpoints,", "builder_layers from lingvo.core import hyperparams from lingvo.core import layers from", "name=name, dims=dims, fprop_dtype=self.params.fprop_dtype, device_mesh=device_mesh, weight_split_dims_mapping=weight_split_dims_mapping) def _Activation(self, name, fn='RELU'): \"\"\"Activation", "###################################################################### # Layers to compose multiple layers. # # Sub-classes", "builder_layers.CreateNestedMapLayer.Params().Set( name=name, keys=keys) ########################################################################### # Basic nn layers. # #", "out_shapes = (out_shapes,) else: out_shapes = shapes if fn_flops: flops", "For example, _Seq() helps to build a sequential layer that", "The following method returns a layer param, whose FProp takes", "and computes the forward pass on multiple devices. \"\"\" return", "sub-class builder can override _BN() to tune the decay option.", "You may obtain a copy of the License at #", "built using model builder, set fprop_dtype to ' 'tf.bfloat16, which", "index): \"\"\"Picks index-th element. (t_1, ..., t_n) -> (t_{index},).\"\"\" return", "the same structure as the given sa, but sa1 and", "o) in enumerate(zip(dims[:-1], dims[1:])): l += [self._FC('l%03d' % n, i,", "\"\"\"Activation layer.\"\"\" return activations.ActivationLayer.Params().Set(activation=fn, name=name) def _FC(self, name, idims, odims,", "\"\"\"A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}.\"\"\" if fn_out: out_shapes =", "..., t_n).\"\"\" return self._Seq(name) def _Arg(self, name, index): \"\"\"Picks index-th", "of the input.\"\"\" return builder_layers.BiasLayer.Params().Set( name=name, dims=dims, fprop_dtype=self.params.fprop_dtype, device_mesh=device_mesh, weight_split_dims_mapping=weight_split_dims_mapping)", "py_utils.NestedMap( flops=0, out_shapes=tuple( functools.reduce(lambda x, y: x + list(y), tuples,", "the input tuple to all sub-layers and concatenates their output", "norm.\"\"\" return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99) def _LN(self, name, dims, use_fused_layernorm=False):", "' 'be sharded over device mesh. ') p.Define( 'activation_split_dims_mapping', None,", "name=name, filter_shape=filter_shape, filter_stride=filter_stride, fprop_dtype=self.params.fprop_dtype) def _Reshape(self, name, shape): \"\"\"Reshape inputs", "the same weight. Args: name: The layer name. repeat: Repeat", "for the composed layer. \"\"\" l = [] for n,", "function. Returns: The param for the composed layer. \"\"\" l", "FProp of a layer constructed by a builder takes a", "python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved.", "sublayers should ' 'be sharded over device mesh. ') p.Define(", "noise_shape_broadcast_dims=None): \"\"\"Returns a DropoutLayer Params.\"\"\" if self.params.deterministic_dropout: return layers.DeterministicDropoutLayer.Params().Set( name=name,", "in enumerate(zip(dims[:-1], dims[1:])): l += [self._FC('l%03d' % n, i, o,", "is still experimental and we need to gain experience on", "Args: name: This layer's name. sub: The sub-layer. Returns: A", "permissions and # limitations under the License. # ============================================================================== \"\"\"A", "constructed layer is often a composition of multiple sub-layers connected", "Tensor or tuple(Tensor) to the input tuple. Typically, fn is", "related params. p.Define( 'device_mesh', None, 'A numpy.ndarray specifying the topology", "fn, fn_out=None, fn_flops=None): \"\"\"y = fn(x). Applies a fn: tuple(Tensor)", "layer. \"\"\" iterations = [] for i in range(repeat): iterations.append(self._Seq('iter_%03d'", "multiple sub-layers connected in certain patterns. We expect to have", "takes a single # Tensor and returns a single Tensor.", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "def _Activation(self, name, fn='RELU'): \"\"\"Activation layer.\"\"\" return activations.ActivationLayer.Params().Set(activation=fn, name=name) def", "layers.LayerNorm.Params().Set( name=name, input_dim=dims, use_fused_layernorm=use_fused_layernorm, fprop_dtype=self.params.fprop_dtype) def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None):", "Returns: The param for the composed layer. \"\"\" def ConcatTuples(tuples):", "py_utils from lingvo.core import tshape class Base: \"\"\"Model builder with", "sc) constructs a layer with 6 layers sequentially connected: [sa1,", "(one or more) and returns a tuple of tf.Tensor (one", "functools from lingvo.core import activations from lingvo.core import builder_layers from", "input_endpoints=input_endpoints, output_endpoints=output_endpoints, sub=list(signature_sub_param_list)) def _Id(self, name): \"\"\"Identity. (t_1, ..., t_n)", "with commonly used layers. A method in a builder class", "(t_1, ..., t_n) -> (t1, ..., t_n).\"\"\" return self._Seq(name) def", "a device mesh to place the ' 'computations onto. If", "x, y: x + list(y), tuples, [])) def ConcatMeta(tuples): return", "'stack', _Layer('layer1', ...), _Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...), _Output('output',", "This layer's name. body: The sub-layer. fetches: A list of", "range(repeat): iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p in subs])) return", "\"\"\"Forces rematerialization on FProp of the body layer.\"\"\" return builder_layers.RematerializationLayer.Params().Set(", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "tuples, [])) def ConcatMeta(tuples): return py_utils.NestedMap( flops=0, out_shapes=tuple( functools.reduce(lambda x,", "tune the decay option. ########################################################################### def _BN(self, name, dims): \"\"\"Batch", "License. # You may obtain a copy of the License", "to build complicated layers. \"\"\" import functools from lingvo.core import", "[idims, odims]).\"\"\" p = builder_layers.LinearLayer.Params() p.name = name p.input_dims =", "6, 7]]) which is 2d matrix of 8 ' 'devices.')", "weight_split_dims_mapping=None, qdomain=None): \"\"\"Linear layer. y = matmul([..., idims], [idims, odims]).\"\"\"", "8 ' 'devices.') p.Define( 'weight_split_dims_mapping', None, 'Relevant only if device_mesh", "fprop_dtype=self.params.fprop_dtype, device_mesh=device_mesh, weight_split_dims_mapping=weight_split_dims_mapping) def _Activation(self, name, fn='RELU'): \"\"\"Activation layer.\"\"\" return", "a layer param. FProp of a layer constructed by a", "layer can be used for prototyping but we advice to", "dims=dims, fprop_dtype=self.params.fprop_dtype, device_mesh=device_mesh, weight_split_dims_mapping=weight_split_dims_mapping) def _Activation(self, name, fn='RELU'): \"\"\"Activation layer.\"\"\"", "governing permissions and # limitations under the License. # ==============================================================================", "sa1 and sa2 do not share the same weight. Args:", "a layer constructed by a builder takes a tuple of", "should ' 'be sharded over device mesh. ') p.Define( 'activation_split_dims_mapping',", "the given sa, but sa1 and sa2 do not share", "of fetch points in the sub-layer body. [input1, input2, ...,", "on when to use and when not to use. Please", "examples: ' 'np.array([0, 1, 2, 3, 4, 5, 6, 7])", "idims, odims, act='RELU'): \"\"\"Feed-forward fully connected. y = act(matmul(x, w)", "TensorFlow Authors. All Rights Reserved. # # Licensed under the", "odims]).\"\"\" p = builder_layers.LinearLayer.Params() p.name = name p.input_dims = idims", "layer param, whose FProp takes a single # Tensor and", "it ' 'specifies how activation of this layer or those", "of a layer constructed by a builder takes a tuple", "those of the sublayers ' 'should be sharded over device", "sc2]. sa1 and sa2 have the same structure as the", "methods to facilitate building these patterns. For example, _Seq() helps", "def _Seq(self, name, *subs): \"\"\"Connects sub-layers sequentially.\"\"\" return builder_layers.SequentialLayer.Params().Set( name=name,", "2020 The TensorFlow Authors. All Rights Reserved. # # Licensed", "These methods are designed to have minimal knobs. Sub-classes which", "sub-layers. Returns: The param for the composed layer. \"\"\" iterations", "def _CreateNestedMap(self, name, keys): \"\"\"Returns a NestedMap with keys from", "layers in __init__. self._params = params.Copy() ###################################################################### # Layers to", "FnLayer can't be serialized. Args: name: The layer name. fn:", "to build composite layers. WARNING: The builder pattern is still", "odims p.fprop_dtype = self.params.fprop_dtype p.device_mesh = device_mesh p.weight_split_dims_mapping = weight_split_dims_mapping", "one after another. TODO(zhifengc): Adds a more concrete example. \"\"\"", "the decay option. ########################################################################### def _BN(self, name, dims): \"\"\"Batch norm.\"\"\"", "self._Linear('linear', idims, odims), self._Bias('bias', odims), self._Activation('act', fn=act)) def _MLP(self, name,", "are discouraged to override these composition method. ###################################################################### def _Rep(self,", "composed layer. \"\"\" def FnMeta(*shapes): \"\"\"A lambda tuple(tshape.Shape) -> NestedMap{flops,", "name, input_endpoints, output_endpoints, *signature_sub_param_list): \"\"\"Connects sub-layers into a data flow", "sa, sb, sc) constructs a layer with 6 layers sequentially", "knobs. Sub-classes which needs to # be flexible can override", "before using it to build complicated layers. \"\"\" import functools", "weight of this layer or those of the sublayers should", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "\"\"\" return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub) def _PrintShape(self, name): \"\"\"Print FProp input", "# pyformat: disable return self._Seq( name, self._Linear('linear', idims, odims), self._Bias('bias',", "1d mesh with 8 devices, ' 'np.array([[0, 1, 2, 3],", "\"\"\"Returns a layer from which the activation and gradient can", "language governing permissions and # limitations under the License. #", "advice to implement the logic as a sub-class of BaseLayer", "required by applicable law or agreed to in writing, software", "params upon which this layer is built.\"\"\" return self._params def", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "The activation function. Returns: The param for the composed layer.", "layer is often a composition of multiple sub-layers connected in", "\"\"\"Multiple layers of feed-forward fully connected. Args: name: The layer", "[input1, input2, ..., inputN, fetch1, ..., fetchM]. \"\"\" return builder_layers.BranchLayer.Params().Set(", "\"\"\"Print FProp input shape information.\"\"\" return builder_layers.PrintShapeLayer.Params().Set(name=name) def _CreateNestedMap(self, name,", "option. ########################################################################### def _BN(self, name, dims): \"\"\"Batch norm.\"\"\" return layers.BatchNormLayer.Params().Set(name=name,", "discouraged to override these composition method. ###################################################################### def _Rep(self, name,", "BatchParallel layer which splits the batch and computes the forward", "agreed to in writing, software # distributed under the License", "import activations from lingvo.core import builder_layers from lingvo.core import hyperparams", "\"\"\"Connects sub-layers into a data flow graph.\"\"\" return builder_layers.GraphLayer.Params().Set( name=name,", "its sub-layer one after another. TODO(zhifengc): Adds a more concrete", "_LN(self, name, dims, use_fused_layernorm=False): \"\"\"Layer norm.\"\"\" return layers.LayerNorm.Params().Set( name=name, input_dim=dims,", "distributed under the License is distributed on an \"AS IS\"", "s in shapes]) return py_utils.NestedMap(flops=flops, out_shapes=out_shapes) return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta)", "A list of fetch names inside the sub-layer body. Returns:", "it is assumed to be a ' 'single device. Here", "of the sublayers should ' 'be sharded over device mesh.", "or those of the sublayers should ' 'be sharded over", "def _BatchParallel(self, name, sub): \"\"\"Splits the batch and compute the", "'layer2_out']) The layer returns the stack's final output together with", "Even though certain layers support FProp argument being None (e.g.,", "the batch and computes the forward pass on multiple devices.", "builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index]) def _Par(self, name, *subs): \"\"\"y = (f1, f2,", "shapes if fn_flops: flops = fn_flops(*shapes) else: flops = sum([s.size", "tuple(Tensor) -> a single Tensor or tuple(Tensor) to the input", "return py_utils.NestedMap(flops=flops, out_shapes=out_shapes) return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta) def _Save(self, name):", "param for the composed layer. \"\"\" def ConcatTuples(tuples): # tuples", "override _BN() to tune the decay option. ########################################################################### def _BN(self,", "3, 4, 5, 6, 7]) which is a 1d mesh", "..., fetchM]. \"\"\" return builder_layers.BranchLayer.Params().Set( name=name, body=body, fetches=fetches) def _Rematerialize(self,", "of tf.Tensor (one or more). Even though certain layers support", "y: x + list(y), tuples, [])) def ConcatMeta(tuples): return py_utils.NestedMap(", "such a support. The constructed layer is often a composition", "tuple(Tensor). fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape) fn_flops: A", "act(matmul(x, w) + b).\"\"\" # pyformat: disable return self._Seq( name,", "not share the same weight. Args: name: The layer name.", "out_shapes = fn_out(*shapes) if isinstance(out_shapes, tshape.Shape): out_shapes = (out_shapes,) else:", "def ConcatTuples(tuples): # tuples is a list of tuples. return", "builder takes a tuple of tf.Tensor (one or more) and", "def params(self): \"\"\"Returns the params upon which this layer is", "repeat, *subs): r\"\"\"Connects sub-layers sequentially and repeat multiple times. E.g.,", "return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index]) def _Par(self, name, *subs): \"\"\"y = (f1,", "more concrete example. \"\"\" @classmethod def Params(cls): \"\"\"The params of", "name): \"\"\"Identity. (t_1, ..., t_n) -> (t1, ..., t_n).\"\"\" return", "to the input tuple. Typically, fn is a very simple", "\"\"\" def ConcatTuples(tuples): # tuples is a list of tuples.", "noise_shape_broadcast_dims=noise_shape_broadcast_dims, fprop_dtype=self.params.fprop_dtype) def _Linear(self, name, idims, odims, device_mesh=None, weight_split_dims_mapping=None, qdomain=None):", "example, _Seq() helps to build a sequential layer that calls", "OR CONDITIONS OF ANY KIND, either express or implied. #", "'weight_split_dims_mapping', None, 'Relevant only if device_mesh above is not None.", "the License is distributed on an \"AS IS\" BASIS, #", "of the sublayers ' 'should be sharded over device mesh.", "points in the sub-layer body. [input1, input2, ..., inputN, fetch1,", "tuples. return tuple(functools.reduce(lambda x, y: x + list(y), tuples, []))", "# The following method returns a layer param, whose FProp", "keys): \"\"\"Returns a NestedMap with keys from fprop args.\"\"\" return", "None, 'A numpy.ndarray specifying the topology of a device mesh", "when to use and when not to use. Please discuss", "return tuple(functools.reduce(lambda x, y: x + list(y), tuples, [])) def", "None, 'Activations datatype to use. To enable bfloat16 activations for", "law or agreed to in writing, software # distributed under", "device. Here are some examples: ' 'np.array([0, 1, 2, 3,", "activations of fetch points in the sub-layer body. [input1, input2,", "fprop args.\"\"\" return builder_layers.CreateNestedMapLayer.Params().Set( name=name, keys=keys) ########################################################################### # Basic nn", "_Arg(self, name, index): \"\"\"Picks index-th element. (t_1, ..., t_n) ->", "NestedMap with keys from fprop args.\"\"\" return builder_layers.CreateNestedMapLayer.Params().Set( name=name, keys=keys)", "= device_mesh p.weight_split_dims_mapping = weight_split_dims_mapping p.qdomain.default = qdomain return p", "dims: A list of int. i-th layer has dims[i] as", "override these methods with different options. E.g., a # sub-class", "designed to have minimal knobs. Sub-classes which needs to #", "can override _BN() to tune the decay option. ########################################################################### def", "body sub-layer. E.g.: _AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...), _Save('layer1_out', ...),", "from lingvo.core import py_utils from lingvo.core import tshape class Base:", "rematerialization on FProp of the body layer.\"\"\" return builder_layers.RematerializationLayer.Params().Set( name=name,", "may obtain a copy of the License at # #", "the composed layer. \"\"\" l = [] for n, (i,", "list(y), tuples, []))) return builder_layers.ParallelLayer.Params().Set( name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta) def", "upon which this layer is built.\"\"\" return self._params def __init__(self,", "layer. The bias is added to the last dimension of", "final output together with intermediate activations from layer1_out and layer2_out.", "logic as a sub-class of BaseLayer for all established layers", "may not use this file except in compliance with the", "int. i-th layer has dims[i] as its input dimension, and", "device mesh to place the ' 'computations onto. If device_mesh", "compute the forward pass on multiple devices. Args: name: This", "and returns a tuple of tf.Tensor (one or more). Even", "this file except in compliance with the License. # You", "(f1, f2, ..., fn)(x). We feed the input tuple to", "connected. y = act(matmul(x, w) + b).\"\"\" # pyformat: disable", "be flexible can override these methods with different options. E.g.,", "a fn: tuple(Tensor) -> a single Tensor or tuple(Tensor) to", "sequentially and repeat multiple times. E.g., _Rep('foo', 2, sa, sb,", "# # Licensed under the Apache License, Version 2.0 (the", "sb2, sc2]. sa1 and sa2 have the same structure as", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "dimensions. act: The activation function. Returns: The param for the", "decay=0.99) def _LN(self, name, dims, use_fused_layernorm=False): \"\"\"Layer norm.\"\"\" return layers.LayerNorm.Params().Set(", "return self._params def __init__(self, params): # Sub-classes should put some", "of sub-layers. Returns: The param for the composed layer. \"\"\"", "sa2 do not share the same weight. Args: name: The", "for p in subs])) return self._Seq(name, *iterations) def _Seq(self, name,", "p.Define( 'activation_split_dims_mapping', None, 'Relevant only if device_mesh above is not", "and sa2 do not share the same weight. Args: name:", "activation of this layer or those of the sublayers '", "certain layers support FProp argument being None (e.g., Conv2DLayer), builder", "sub-layers into a data flow graph.\"\"\" return builder_layers.GraphLayer.Params().Set( name=name, input_endpoints=input_endpoints,", "@classmethod def Params(cls): \"\"\"The params of this layer.\"\"\" p =", "inside the sub-layer body. Returns: A layer whose outputs correspond", "Copyright 2020 The TensorFlow Authors. All Rights Reserved. # #", "License. # ============================================================================== \"\"\"A library to build composite layers. WARNING:", "The param for the composed layer. \"\"\" iterations = []", "name, body, fetches): \"\"\"Fetches saved activations in the body sub-layer.", "E.g.: _AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...), _Save('layer1_out', ...), _Layer('layer2', ...),", "sub-layer body. [input1, input2, ..., inputN, fetch1, ..., fetchM]. \"\"\"", "often a composition of multiple sub-layers connected in certain patterns.", "return self._Seq(name, *iterations) def _Seq(self, name, *subs): \"\"\"Connects sub-layers sequentially.\"\"\"", "def Params(cls): \"\"\"The params of this layer.\"\"\" p = hyperparams.InstantiableParams(cls)", "or implied. # See the License for the specific language", "self._params = params.Copy() ###################################################################### # Layers to compose multiple layers.", "sub=list(subs)) def _Graph(self, name, input_endpoints, output_endpoints, *signature_sub_param_list): \"\"\"Connects sub-layers into", "layers. A method in a builder class constructs a layer", "_Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None): \"\"\"Returns a DropoutLayer Params.\"\"\" if self.params.deterministic_dropout:", "complicated layers. \"\"\" import functools from lingvo.core import activations from", "in the compose layer. *subs: A list of sub-layers. Returns:", "out_shapes = shapes if fn_flops: flops = fn_flops(*shapes) else: flops", "lambda tuple(Tensor) -> tuple(Tensor). fn_out: A lambda tuple(tshape.Shape) -> output", "p @property def params(self): \"\"\"Returns the params upon which this", "on such a support. The constructed layer is often a", "of this layer or those of the sublayers should '", "this many times in the compose layer. *subs: A list", "support. The constructed layer is often a composition of multiple", "input tuple to all sub-layers and concatenates their output tuples", "disable return self._Seq( name, self._Linear('linear', idims, odims), self._Bias('bias', odims), self._Activation('act',", "input dimension, and dims[i+1] as its output dimensions. act: The", "a more concrete example. \"\"\" @classmethod def Params(cls): \"\"\"The params", "or not.') p.Define( 'fprop_dtype', None, 'Activations datatype to use. To", "'bfloat16 activations. Default is None, which will use float32 '", "dims[i+1] as its output dimensions. act: The activation function. Returns:", "= params.Copy() ###################################################################### # Layers to compose multiple layers. #", "f2, ..., fn)(x). We feed the input tuple to all", "to layers that support ' 'bfloat16 activations. Default is None,", "built.\"\"\" return self._params def __init__(self, params): # Sub-classes should put", "_Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...), _Output('output', ...)), ['layer1_out', 'layer2_out'])", "input2, ..., inputN, fetch1, ..., fetchM]. \"\"\" return builder_layers.BranchLayer.Params().Set( name=name,", "If None, we assume flops == sum of elements in", "prototyping but we advice to implement the logic as a", "the logic as a sub-class of BaseLayer for all established", "limitations under the License. # ============================================================================== \"\"\"A library to build", "filter_stride): \"\"\"Conv2D layer.\"\"\" return layers.Conv2DLayerNoPadding.Params().Set( name=name, filter_shape=filter_shape, filter_stride=filter_stride, fprop_dtype=self.params.fprop_dtype) def", "be a ' 'single device. Here are some examples: '", "input_endpoints, output_endpoints, *signature_sub_param_list): \"\"\"Connects sub-layers into a data flow graph.\"\"\"", "or tuple(Tensor) to the input tuple. Typically, fn is a", "__init__. self._params = params.Copy() ###################################################################### # Layers to compose multiple", "repeat multiple times. E.g., _Rep('foo', 2, sa, sb, sc) constructs", "name): \"\"\"Print FProp input shape information.\"\"\" return builder_layers.PrintShapeLayer.Params().Set(name=name) def _CreateNestedMap(self,", "'should be sharded over device mesh. ') return p @property", "data flow graph.\"\"\" return builder_layers.GraphLayer.Params().Set( name=name, input_endpoints=input_endpoints, output_endpoints=output_endpoints, sub=list(signature_sub_param_list)) def", "the sub-layer body. [input1, input2, ..., inputN, fetch1, ..., fetchM].", "tshape class Base: \"\"\"Model builder with commonly used layers. A", "def _Rematerialize(self, name, body): \"\"\"Forces rematerialization on FProp of the", "name=name, sub=list(subs)) def _Graph(self, name, input_endpoints, output_endpoints, *signature_sub_param_list): \"\"\"Connects sub-layers", "idx=[index]) def _Par(self, name, *subs): \"\"\"y = (f1, f2, ...,", "as its input dimension, and dims[i+1] as its output dimensions.", "sum([s.size for s in shapes]) return py_utils.NestedMap(flops=flops, out_shapes=out_shapes) return builder_layers.FnLayer.Params().Set(name=name,", "_BN(self, name, dims): \"\"\"Batch norm.\"\"\" return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99) def", "those of the sublayers should ' 'be sharded over device", "self._Seq( name, self._Linear('linear', idims, odims), self._Bias('bias', odims), self._Activation('act', fn=act)) def", "concrete example. \"\"\" @classmethod def Params(cls): \"\"\"The params of this", "sc1, sa2, sb2, sc2]. sa1 and sa2 have the same", "= matmul([..., idims], [idims, odims]).\"\"\" p = builder_layers.LinearLayer.Params() p.name =", "layer name. fn: A lambda tuple(Tensor) -> tuple(Tensor). fn_out: A", "the activation and gradient can be accessed.\"\"\" return layers.FetchLayer.Params().Set(name=name) def", "layers of feed-forward fully connected. Args: name: The layer name.", "6, 7]) which is a 1d mesh with 8 devices,", "when not to use. Please discuss w/ teammates before using", "params(self): \"\"\"Returns the params upon which this layer is built.\"\"\"", "to use. Please discuss w/ teammates before using it to", "use and when not to use. Please discuss w/ teammates", "sa2 have the same structure as the given sa, but", "..., t_n) -> (t_{index},).\"\"\" return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index]) def _Par(self, name,", "tuple. Typically, fn is a very simple python function. This", "_FC(self, name, idims, odims, act='RELU'): \"\"\"Feed-forward fully connected. y =", "in writing, software # distributed under the License is distributed", "constructs a layer with 6 layers sequentially connected: [sa1, sb1,", "# # Sub-classes are discouraged to override these composition method.", "device_mesh=device_mesh, weight_split_dims_mapping=weight_split_dims_mapping) def _Activation(self, name, fn='RELU'): \"\"\"Activation layer.\"\"\" return activations.ActivationLayer.Params().Set(activation=fn,", "to place the ' 'computations onto. If device_mesh is None,", "of feed-forward fully connected. Args: name: The layer name. dims:", "a single Tensor or tuple(Tensor) to the input tuple. Typically,", "body. [input1, input2, ..., inputN, fetch1, ..., fetchM]. \"\"\" return", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "compose layer. *subs: A list of sub-layers. Returns: The param", "[4, 5, 6, 7]]) which is 2d matrix of 8", "we advice to implement the logic as a sub-class of", "using model builder, set fprop_dtype to ' 'tf.bfloat16, which will", "l += [self._FC('l%03d' % n, i, o, act)] return self._Seq(name,", "tuple(Tensor) to the input tuple. Typically, fn is a very", "the License for the specific language governing permissions and #", "name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims) return layers.DropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims, fprop_dtype=self.params.fprop_dtype) def", "layer. \"\"\" l = [] for n, (i, o) in", "constructed by a builder takes a tuple of tf.Tensor (one", "ConcatMeta(tuples): return py_utils.NestedMap( flops=0, out_shapes=tuple( functools.reduce(lambda x, y: x +", "Tensor. # # These methods are designed to have minimal", "' 'tf.bfloat16, which will be propagated to layers that support", "out_shapes=tuple( functools.reduce(lambda x, y: x + list(y), tuples, []))) return", "tuple(tshape.Shape) fn_flops: A lambda tuple(tshape.Shape) -> estimated flops of fn.", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "to tune the decay option. ########################################################################### def _BN(self, name, dims):", "\"\"\"Fetches saved activations in the body sub-layer. E.g.: _AddFetches('foo', _Seq(", "from fprop args.\"\"\" return builder_layers.CreateNestedMapLayer.Params().Set( name=name, keys=keys) ########################################################################### # Basic", "Basic nn layers. # # The following method returns a", "'tf.bfloat16, which will be propagated to layers that support '", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "share the same weight. Args: name: The layer name. repeat:", "name=name) def _FC(self, name, idims, odims, act='RELU'): \"\"\"Feed-forward fully connected.", "constructs a layer param. FProp of a layer constructed by", "a data flow graph.\"\"\" return builder_layers.GraphLayer.Params().Set( name=name, input_endpoints=input_endpoints, output_endpoints=output_endpoints, sub=list(signature_sub_param_list))", "else: out_shapes = shapes if fn_flops: flops = fn_flops(*shapes) else:", "Returns: The param for the composed layer. \"\"\" def FnMeta(*shapes):", "\"\"\"Returns a NestedMap with keys from fprop args.\"\"\" return builder_layers.CreateNestedMapLayer.Params().Set(", "not None. If not None, it ' 'specifies how activation", "% n, i, o, act)] return self._Seq(name, *l) def _Conv2D(self,", "is built.\"\"\" return self._params def __init__(self, params): # Sub-classes should", "dimension of the input.\"\"\" return builder_layers.BiasLayer.Params().Set( name=name, dims=dims, fprop_dtype=self.params.fprop_dtype, device_mesh=device_mesh,", "set fprop_dtype to ' 'tf.bfloat16, which will be propagated to", "\"\"\"Connects sub-layers sequentially.\"\"\" return builder_layers.SequentialLayer.Params().Set( name=name, sub=list(subs)) def _Graph(self, name,", "lingvo.core import py_utils from lingvo.core import tshape class Base: \"\"\"Model", "and sa2 have the same structure as the given sa,", "sub: The sub-layer. Returns: A BatchParallel layer which splits the", "certain patterns. We expect to have a few methods to", "builder_layers.ParallelLayer.Params().Set( name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta) def _Fn(self, name, fn, fn_out=None,", "lingvo.core import tshape class Base: \"\"\"Model builder with commonly used", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "None. If not None, it ' 'specifies how weight of", "name=name, input_endpoints=input_endpoints, output_endpoints=output_endpoints, sub=list(signature_sub_param_list)) def _Id(self, name): \"\"\"Identity. (t_1, ...,", "def FnMeta(*shapes): \"\"\"A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}.\"\"\" if fn_out:", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "onto. If device_mesh is None, it is assumed to be", "act)] return self._Seq(name, *l) def _Conv2D(self, name, filter_shape, filter_stride): \"\"\"Conv2D", "in the inputs. Returns: The param for the composed layer.", "def _FC(self, name, idims, odims, act='RELU'): \"\"\"Feed-forward fully connected. y", "...), _Save('layer2_out', ...), _Output('output', ...)), ['layer1_out', 'layer2_out']) The layer returns", "sa, but sa1 and sa2 do not share the same", "we need to gain experience on when to use and", "build a sequential layer that calls its sub-layer one after", "single # Tensor and returns a single Tensor. # #", "output tuple(tshape.Shape) fn_flops: A lambda tuple(tshape.Shape) -> estimated flops of", "builder_layers.SequentialLayer.Params().Set( name=name, sub=list(subs)) def _Graph(self, name, input_endpoints, output_endpoints, *signature_sub_param_list): \"\"\"Connects", "_Layer('layer2', ...), _Save('layer2_out', ...), _Output('output', ...)), ['layer1_out', 'layer2_out']) The layer", "the Apache License, Version 2.0 (the \"License\"); # you may", "intermediate activations from layer1_out and layer2_out. Args: name: This layer's", "nn layers. # # The following method returns a layer", "different options. E.g., a # sub-class builder can override _BN()", "None, 'Relevant only if device_mesh above is not None. If", "filter_shape, filter_stride): \"\"\"Conv2D layer.\"\"\" return layers.Conv2DLayerNoPadding.Params().Set( name=name, filter_shape=filter_shape, filter_stride=filter_stride, fprop_dtype=self.params.fprop_dtype)", "to have minimal knobs. Sub-classes which needs to # be", "structure as the given sa, but sa1 and sa2 do", "the forward pass on multiple devices. \"\"\" return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub)", "the composed layer. \"\"\" def FnMeta(*shapes): \"\"\"A lambda tuple(tshape.Shape) ->", "sharded over device mesh. ') p.Define( 'activation_split_dims_mapping', None, 'Relevant only", "x, y: x + list(y), tuples, []))) return builder_layers.ParallelLayer.Params().Set( name=name,", "takes a tuple of tf.Tensor (one or more) and returns", "above is not None. If not None, it ' 'specifies", "on multiple devices. Args: name: This layer's name. sub: The", "5, 6, 7]]) which is 2d matrix of 8 '", "its input dimension, and dims[i+1] as its output dimensions. act:", "= [] for i in range(repeat): iterations.append(self._Seq('iter_%03d' % i, *[p.Copy()", "1, 2, 3, 4, 5, 6, 7]) which is a", "for the composed layer. \"\"\" def FnMeta(*shapes): \"\"\"A lambda tuple(tshape.Shape)", "None, which will use float32 ' 'activations.') # SPMD partition", "name, repeat, *subs): r\"\"\"Connects sub-layers sequentially and repeat multiple times.", "assume flops == sum of elements in the inputs. Returns:", "can't be serialized. Args: name: The layer name. fn: A", "on multiple devices. \"\"\" return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub) def _PrintShape(self, name):", "= fn(x). Applies a fn: tuple(Tensor) -> a single Tensor", "return p def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None): \"\"\"Bias layer.", "under the License is distributed on an \"AS IS\" BASIS,", "though certain layers support FProp argument being None (e.g., Conv2DLayer),", "E.g., a # sub-class builder can override _BN() to tune", "that calls its sub-layer one after another. TODO(zhifengc): Adds a", "self._Activation('act', fn=act)) def _MLP(self, name, dims, act='RELU'): \"\"\"Multiple layers of", "specifying the topology of a device mesh to place the", "output tuples into one tuple. Args: name: The layer name.", "enumerate(zip(dims[:-1], dims[1:])): l += [self._FC('l%03d' % n, i, o, act)]", "a DropoutLayer Params.\"\"\" if self.params.deterministic_dropout: return layers.DeterministicDropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims)", "flops of fn. If None, we assume flops == sum", "will use float32 ' 'activations.') # SPMD partition related params.", "which needs to # be flexible can override these methods", "param, whose FProp takes a single # Tensor and returns", "layer1_out and layer2_out. Args: name: This layer's name. body: The", "a list of tuples. return tuple(functools.reduce(lambda x, y: x +", "methods with different options. E.g., a # sub-class builder can", "name, idims, odims, device_mesh=None, weight_split_dims_mapping=None, qdomain=None): \"\"\"Linear layer. y =", "w) + b).\"\"\" # pyformat: disable return self._Seq( name, self._Linear('linear',", "_Fn(self, name, fn, fn_out=None, fn_flops=None): \"\"\"y = fn(x). Applies a", "\"\"\" iterations = [] for i in range(repeat): iterations.append(self._Seq('iter_%03d' %", "many times in the compose layer. *subs: A list of", "merge=ConcatTuples, merge_meta=ConcatMeta) def _Fn(self, name, fn, fn_out=None, fn_flops=None): \"\"\"y =", "which is a 1d mesh with 8 devices, ' 'np.array([[0,", "_BatchParallel(self, name, sub): \"\"\"Splits the batch and compute the forward", "body layer.\"\"\" return builder_layers.RematerializationLayer.Params().Set( name=name, body=body) def _BatchParallel(self, name, sub):", "act='RELU'): \"\"\"Multiple layers of feed-forward fully connected. Args: name: The", "idims], [idims, odims]).\"\"\" p = builder_layers.LinearLayer.Params() p.name = name p.input_dims", "NestedMap{flops, out_shapes}.\"\"\" if fn_out: out_shapes = fn_out(*shapes) if isinstance(out_shapes, tshape.Shape):", "BaseLayer for all established layers as FnLayer can't be serialized.", "builder_layers.PrintShapeLayer.Params().Set(name=name) def _CreateNestedMap(self, name, keys): \"\"\"Returns a NestedMap with keys", "return builder_layers.SequentialLayer.Params().Set( name=name, sub=list(subs)) def _Graph(self, name, input_endpoints, output_endpoints, *signature_sub_param_list):", "needs to # be flexible can override these methods with", "dims, use_fused_layernorm=False): \"\"\"Layer norm.\"\"\" return layers.LayerNorm.Params().Set( name=name, input_dim=dims, use_fused_layernorm=use_fused_layernorm, fprop_dtype=self.params.fprop_dtype)", "builder class constructs a layer param. FProp of a layer", "to the activations of fetch points in the sub-layer body.", "-> output tuple(tshape.Shape) fn_flops: A lambda tuple(tshape.Shape) -> estimated flops", "ANY KIND, either express or implied. # See the License", "the License. # You may obtain a copy of the", "body=body) def _BatchParallel(self, name, sub): \"\"\"Splits the batch and compute", "fprop_dtype to ' 'tf.bfloat16, which will be propagated to layers", "# See the License for the specific language governing permissions", "the sub-layer body. Returns: A layer whose outputs correspond to", "-> tuple(Tensor). fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape) fn_flops:", "and compute the forward pass on multiple devices. Args: name:", "and # limitations under the License. # ============================================================================== \"\"\"A library", "options. E.g., a # sub-class builder can override _BN() to", "builder_layers.GraphLayer.Params().Set( name=name, input_endpoints=input_endpoints, output_endpoints=output_endpoints, sub=list(signature_sub_param_list)) def _Id(self, name): \"\"\"Identity. (t_1,", "batch and computes the forward pass on multiple devices. \"\"\"", "= name p.input_dims = idims p.output_dims = odims p.fprop_dtype =", "from layer1_out and layer2_out. Args: name: This layer's name. body:", "fn_flops: A lambda tuple(tshape.Shape) -> estimated flops of fn. If", "be accessed.\"\"\" return layers.FetchLayer.Params().Set(name=name) def _AddFetches(self, name, body, fetches): \"\"\"Fetches", "layers from lingvo.core import py_utils from lingvo.core import tshape class", "return builder_layers.GraphLayer.Params().Set( name=name, input_endpoints=input_endpoints, output_endpoints=output_endpoints, sub=list(signature_sub_param_list)) def _Id(self, name): \"\"\"Identity.", "devices, ' 'np.array([[0, 1, 2, 3], [4, 5, 6, 7]])", "a single Tensor. # # These methods are designed to", "# Lint as: python3 # Copyright 2020 The TensorFlow Authors.", "The sub-layer. fetches: A list of fetch names inside the", "filter_shape=filter_shape, filter_stride=filter_stride, fprop_dtype=self.params.fprop_dtype) def _Reshape(self, name, shape): \"\"\"Reshape inputs to", "layer name. repeat: Repeat \\*subs this many times in the", "fn='RELU'): \"\"\"Activation layer.\"\"\" return activations.ActivationLayer.Params().Set(activation=fn, name=name) def _FC(self, name, idims,", "name: The layer name. dims: A list of int. i-th", "a very simple python function. This layer can be used", "layer has dims[i] as its input dimension, and dims[i+1] as", "in certain patterns. We expect to have a few methods", "as FnLayer can't be serialized. Args: name: The layer name.", "None, we assume flops == sum of elements in the", "== sum of elements in the inputs. Returns: The param", "fully connected. y = act(matmul(x, w) + b).\"\"\" # pyformat:", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "the composed layer. \"\"\" iterations = [] for i in", "writing, software # distributed under the License is distributed on", "of this layer or those of the sublayers ' 'should", "given sa, but sa1 and sa2 do not share the", "is often a composition of multiple sub-layers connected in certain", "fn=act)) def _MLP(self, name, dims, act='RELU'): \"\"\"Multiple layers of feed-forward", "list of sub-layers. Returns: The param for the composed layer.", "information.\"\"\" return builder_layers.PrintShapeLayer.Params().Set(name=name) def _CreateNestedMap(self, name, keys): \"\"\"Returns a NestedMap", "_Rep('foo', 2, sa, sb, sc) constructs a layer with 6", "is None, it is assumed to be a ' 'single", "shape): \"\"\"Reshape inputs to the shape provided.\"\"\" return builder_layers.ReshapeLayer.Params().Set(name=name, shape=shape)", "-> (t1, ..., t_n).\"\"\" return self._Seq(name) def _Arg(self, name, index):", "The builder pattern is still experimental and we need to", "'layers built using model builder, set fprop_dtype to ' 'tf.bfloat16,", "sub-layer. fetches: A list of fetch names inside the sub-layer", "return builder_layers.PrintShapeLayer.Params().Set(name=name) def _CreateNestedMap(self, name, keys): \"\"\"Returns a NestedMap with", "layers support FProp argument being None (e.g., Conv2DLayer), builder should", "layers.Conv2DLayerNoPadding.Params().Set( name=name, filter_shape=filter_shape, filter_stride=filter_stride, fprop_dtype=self.params.fprop_dtype) def _Reshape(self, name, shape): \"\"\"Reshape", "out_shapes}.\"\"\" if fn_out: out_shapes = fn_out(*shapes) if isinstance(out_shapes, tshape.Shape): out_shapes", "use. To enable bfloat16 activations for ' 'layers built using", "layer. y = matmul([..., idims], [idims, odims]).\"\"\" p = builder_layers.LinearLayer.Params()", "gradient can be accessed.\"\"\" return layers.FetchLayer.Params().Set(name=name) def _AddFetches(self, name, body,", "(e.g., Conv2DLayer), builder should not depend on such a support.", "\"\"\"Feed-forward fully connected. y = act(matmul(x, w) + b).\"\"\" #", "name, fn='RELU'): \"\"\"Activation layer.\"\"\" return activations.ActivationLayer.Params().Set(activation=fn, name=name) def _FC(self, name,", "-> a single Tensor or tuple(Tensor) to the input tuple.", "place the ' 'computations onto. If device_mesh is None, it", "flops = sum([s.size for s in shapes]) return py_utils.NestedMap(flops=flops, out_shapes=out_shapes)", "p.Define( 'fprop_dtype', None, 'Activations datatype to use. To enable bfloat16", "and layer2_out. Args: name: This layer's name. body: The sub-layer.", "datatype to use. To enable bfloat16 activations for ' 'layers", "2d matrix of 8 ' 'devices.') p.Define( 'weight_split_dims_mapping', None, 'Relevant", "name: This layer's name. body: The sub-layer. fetches: A list", "is a list of tuples. return tuple(functools.reduce(lambda x, y: x", "isinstance(out_shapes, tshape.Shape): out_shapes = (out_shapes,) else: out_shapes = shapes if", "tuples is a list of tuples. return tuple(functools.reduce(lambda x, y:", "(out_shapes,) else: out_shapes = shapes if fn_flops: flops = fn_flops(*shapes)", "matrix of 8 ' 'devices.') p.Define( 'weight_split_dims_mapping', None, 'Relevant only", "fn_flops=None): \"\"\"y = fn(x). Applies a fn: tuple(Tensor) -> a", "builder, set fprop_dtype to ' 'tf.bfloat16, which will be propagated", "the License. # ============================================================================== \"\"\"A library to build composite layers.", "\"\"\"Batch norm.\"\"\" return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99) def _LN(self, name, dims,", "weight_split_dims_mapping p.qdomain.default = qdomain return p def _Bias(self, name, dims,", "the inputs. Returns: The param for the composed layer. \"\"\"", "the sublayers ' 'should be sharded over device mesh. ')", "Sub-classes should put some options common to many layers in", "The layer name. fn: A lambda tuple(Tensor) -> tuple(Tensor). fn_out:", "fn: tuple(Tensor) -> a single Tensor or tuple(Tensor) to the", "support FProp argument being None (e.g., Conv2DLayer), builder should not", "returns the stack's final output together with intermediate activations from", "the activations of fetch points in the sub-layer body. [input1,", "return layers.LayerNorm.Params().Set( name=name, input_dim=dims, use_fused_layernorm=use_fused_layernorm, fprop_dtype=self.params.fprop_dtype) def _Dropout(self, name, keep_prob,", "inputN, fetch1, ..., fetchM]. \"\"\" return builder_layers.BranchLayer.Params().Set( name=name, body=body, fetches=fetches)", "y = act(matmul(x, w) + b).\"\"\" # pyformat: disable return", "list of tuples. return tuple(functools.reduce(lambda x, y: x + list(y),", "_Output('output', ...)), ['layer1_out', 'layer2_out']) The layer returns the stack's final", "has dims[i] as its input dimension, and dims[i+1] as its", "for i in range(repeat): iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p", "return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub) def _PrintShape(self, name): \"\"\"Print FProp input shape", "p in subs])) return self._Seq(name, *iterations) def _Seq(self, name, *subs):", "p.Define( 'weight_split_dims_mapping', None, 'Relevant only if device_mesh above is not", "(t_1, ..., t_n) -> (t_{index},).\"\"\" return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index]) def _Par(self,", "device mesh. ') p.Define( 'activation_split_dims_mapping', None, 'Relevant only if device_mesh", "# Copyright 2020 The TensorFlow Authors. All Rights Reserved. #", "and repeat multiple times. E.g., _Rep('foo', 2, sa, sb, sc)", "*subs: A list of sub-layers. Returns: The param for the", "whose FProp takes a single # Tensor and returns a", "lambda tuple(tshape.Shape) -> estimated flops of fn. If None, we", "'be sharded over device mesh. ') p.Define( 'activation_split_dims_mapping', None, 'Relevant", "sub-layers sequentially.\"\"\" return builder_layers.SequentialLayer.Params().Set( name=name, sub=list(subs)) def _Graph(self, name, input_endpoints,", "_CreateNestedMap(self, name, keys): \"\"\"Returns a NestedMap with keys from fprop", "*l) def _Conv2D(self, name, filter_shape, filter_stride): \"\"\"Conv2D layer.\"\"\" return layers.Conv2DLayerNoPadding.Params().Set(", "a sub-class of BaseLayer for all established layers as FnLayer", "a few methods to facilitate building these patterns. For example,", "act: The activation function. Returns: The param for the composed", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "_Rep(self, name, repeat, *subs): r\"\"\"Connects sub-layers sequentially and repeat multiple", "device mesh. ') return p @property def params(self): \"\"\"Returns the", "tuples into one tuple. Args: name: The layer name. *subs:", "feed-forward fully connected. Args: name: The layer name. dims: A", "output_endpoints, *signature_sub_param_list): \"\"\"Connects sub-layers into a data flow graph.\"\"\" return", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "= idims p.output_dims = odims p.fprop_dtype = self.params.fprop_dtype p.device_mesh =", "_Save(self, name): \"\"\"Returns a layer from which the activation and", "def _MLP(self, name, dims, act='RELU'): \"\"\"Multiple layers of feed-forward fully", "returns a layer param, whose FProp takes a single #", "p.Define( 'device_mesh', None, 'A numpy.ndarray specifying the topology of a", "qdomain return p def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None): \"\"\"Bias", "fn_flops(*shapes) else: flops = sum([s.size for s in shapes]) return", "_Par(self, name, *subs): \"\"\"y = (f1, f2, ..., fn)(x). We", "iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p in subs])) return self._Seq(name,", "or more) and returns a tuple of tf.Tensor (one or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "layer from which the activation and gradient can be accessed.\"\"\"", "return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta) def _Save(self, name): \"\"\"Returns a layer", "a support. The constructed layer is often a composition of", "\"\"\"Linear layer. y = matmul([..., idims], [idims, odims]).\"\"\" p =", "p.output_dims = odims p.fprop_dtype = self.params.fprop_dtype p.device_mesh = device_mesh p.weight_split_dims_mapping", "r\"\"\"Connects sub-layers sequentially and repeat multiple times. E.g., _Rep('foo', 2,", "is added to the last dimension of the input.\"\"\" return", "'np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is", "support ' 'bfloat16 activations. Default is None, which will use", "Layers to compose multiple layers. # # Sub-classes are discouraged", "params of this layer.\"\"\" p = hyperparams.InstantiableParams(cls) p.Define('deterministic_dropout', False, 'Used", "composition of multiple sub-layers connected in certain patterns. We expect", "The layer name. *subs: A list of sub-layers. Returns: The", "still experimental and we need to gain experience on when", "device_mesh is None, it is assumed to be a '", "forward pass on multiple devices. \"\"\" return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub) def", "Default is None, which will use float32 ' 'activations.') #", "builder should not depend on such a support. The constructed", "be propagated to layers that support ' 'bfloat16 activations. Default", "Repeat \\*subs this many times in the compose layer. *subs:", "_MLP(self, name, dims, act='RELU'): \"\"\"Multiple layers of feed-forward fully connected.", "input tuple. Typically, fn is a very simple python function.", "use_fused_layernorm=use_fused_layernorm, fprop_dtype=self.params.fprop_dtype) def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None): \"\"\"Returns a DropoutLayer", "p.input_dims = idims p.output_dims = odims p.fprop_dtype = self.params.fprop_dtype p.device_mesh", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "p = builder_layers.LinearLayer.Params() p.name = name p.input_dims = idims p.output_dims", "\"\"\" return builder_layers.BranchLayer.Params().Set( name=name, body=body, fetches=fetches) def _Rematerialize(self, name, body):", "of this layer.\"\"\" p = hyperparams.InstantiableParams(cls) p.Define('deterministic_dropout', False, 'Used deterministic", "Rights Reserved. # # Licensed under the Apache License, Version", "_Graph(self, name, input_endpoints, output_endpoints, *signature_sub_param_list): \"\"\"Connects sub-layers into a data", "'A numpy.ndarray specifying the topology of a device mesh to", "assumed to be a ' 'single device. Here are some", "specific language governing permissions and # limitations under the License.", "this layer or those of the sublayers ' 'should be", "these composition method. ###################################################################### def _Rep(self, name, repeat, *subs): r\"\"\"Connects", "keys=keys) ########################################################################### # Basic nn layers. # # The following", "accessed.\"\"\" return layers.FetchLayer.Params().Set(name=name) def _AddFetches(self, name, body, fetches): \"\"\"Fetches saved", "devices. \"\"\" return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub) def _PrintShape(self, name): \"\"\"Print FProp", "can override these methods with different options. E.g., a #", "p.name = name p.input_dims = idims p.output_dims = odims p.fprop_dtype", "only if device_mesh above is not None. If not None,", "sub): \"\"\"Splits the batch and compute the forward pass on", "A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape) fn_flops: A lambda tuple(tshape.Shape)", "# you may not use this file except in compliance", "for s in shapes]) return py_utils.NestedMap(flops=flops, out_shapes=out_shapes) return builder_layers.FnLayer.Params().Set(name=name, fn=fn,", "None (e.g., Conv2DLayer), builder should not depend on such a", "it ' 'specifies how weight of this layer or those", "implement the logic as a sub-class of BaseLayer for all", "last dimension of the input.\"\"\" return builder_layers.BiasLayer.Params().Set( name=name, dims=dims, fprop_dtype=self.params.fprop_dtype,", "not None, it ' 'specifies how weight of this layer", "method. ###################################################################### def _Rep(self, name, repeat, *subs): r\"\"\"Connects sub-layers sequentially", "tuple(tshape.Shape) -> NestedMap{flops, out_shapes}.\"\"\" if fn_out: out_shapes = fn_out(*shapes) if", "p = hyperparams.InstantiableParams(cls) p.Define('deterministic_dropout', False, 'Used deterministic dropout or not.')", "a ' 'single device. Here are some examples: ' 'np.array([0,", "partition related params. p.Define( 'device_mesh', None, 'A numpy.ndarray specifying the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "and gradient can be accessed.\"\"\" return layers.FetchLayer.Params().Set(name=name) def _AddFetches(self, name,", "sub-layers connected in certain patterns. We expect to have a", "\"\"\"Layer norm.\"\"\" return layers.LayerNorm.Params().Set( name=name, input_dim=dims, use_fused_layernorm=use_fused_layernorm, fprop_dtype=self.params.fprop_dtype) def _Dropout(self,", "input_dim=dims, use_fused_layernorm=use_fused_layernorm, fprop_dtype=self.params.fprop_dtype) def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None): \"\"\"Returns a", "= (f1, f2, ..., fn)(x). We feed the input tuple", "name=name, body=body, fetches=fetches) def _Rematerialize(self, name, body): \"\"\"Forces rematerialization on", "# ============================================================================== \"\"\"A library to build composite layers. WARNING: The", "elements in the inputs. Returns: The param for the composed", "###################################################################### def _Rep(self, name, repeat, *subs): r\"\"\"Connects sub-layers sequentially and", "of fetch names inside the sub-layer body. Returns: A layer", "are some examples: ' 'np.array([0, 1, 2, 3, 4, 5,", "self.params.fprop_dtype p.device_mesh = device_mesh p.weight_split_dims_mapping = weight_split_dims_mapping p.qdomain.default = qdomain", "activations from layer1_out and layer2_out. Args: name: This layer's name.", "keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims, fprop_dtype=self.params.fprop_dtype) def _Linear(self, name, idims, odims, device_mesh=None, weight_split_dims_mapping=None,", "under the Apache License, Version 2.0 (the \"License\"); # you", "device_mesh above is not None. If not None, it '", "params): # Sub-classes should put some options common to many", "filter_stride=filter_stride, fprop_dtype=self.params.fprop_dtype) def _Reshape(self, name, shape): \"\"\"Reshape inputs to the", "return builder_layers.RematerializationLayer.Params().Set( name=name, body=body) def _BatchParallel(self, name, sub): \"\"\"Splits the", "_BN() to tune the decay option. ########################################################################### def _BN(self, name,", "dim=dims, decay=0.99) def _LN(self, name, dims, use_fused_layernorm=False): \"\"\"Layer norm.\"\"\" return", "A lambda tuple(Tensor) -> tuple(Tensor). fn_out: A lambda tuple(tshape.Shape) ->", "together with intermediate activations from layer1_out and layer2_out. Args: name:", "########################################################################### def _BN(self, name, dims): \"\"\"Batch norm.\"\"\" return layers.BatchNormLayer.Params().Set(name=name, dim=dims,", "the ' 'computations onto. If device_mesh is None, it is", "# # The following method returns a layer param, whose", "dims): \"\"\"Batch norm.\"\"\" return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99) def _LN(self, name,", "in subs])) return self._Seq(name, *iterations) def _Seq(self, name, *subs): \"\"\"Connects", "output_endpoints=output_endpoints, sub=list(signature_sub_param_list)) def _Id(self, name): \"\"\"Identity. (t_1, ..., t_n) ->", "if fn_out: out_shapes = fn_out(*shapes) if isinstance(out_shapes, tshape.Shape): out_shapes =", "builder_layers.RematerializationLayer.Params().Set( name=name, body=body) def _BatchParallel(self, name, sub): \"\"\"Splits the batch", "name=name, input_dim=dims, use_fused_layernorm=use_fused_layernorm, fprop_dtype=self.params.fprop_dtype) def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None): \"\"\"Returns", "= hyperparams.InstantiableParams(cls) p.Define('deterministic_dropout', False, 'Used deterministic dropout or not.') p.Define(", "if self.params.deterministic_dropout: return layers.DeterministicDropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims) return layers.DropoutLayer.Params().Set( name=name,", "layers. # # Sub-classes are discouraged to override these composition", "use_fused_layernorm=False): \"\"\"Layer norm.\"\"\" return layers.LayerNorm.Params().Set( name=name, input_dim=dims, use_fused_layernorm=use_fused_layernorm, fprop_dtype=self.params.fprop_dtype) def", "= fn_flops(*shapes) else: flops = sum([s.size for s in shapes])", "facilitate building these patterns. For example, _Seq() helps to build", "fetch names inside the sub-layer body. Returns: A layer whose", "with 6 layers sequentially connected: [sa1, sb1, sc1, sa2, sb2,", "layer.\"\"\" return layers.Conv2DLayerNoPadding.Params().Set( name=name, filter_shape=filter_shape, filter_stride=filter_stride, fprop_dtype=self.params.fprop_dtype) def _Reshape(self, name,", "composed layer. \"\"\" iterations = [] for i in range(repeat):", "stack's final output together with intermediate activations from layer1_out and", "following method returns a layer param, whose FProp takes a", "for prototyping but we advice to implement the logic as", "\"\"\"Bias layer. The bias is added to the last dimension", "None, it is assumed to be a ' 'single device.", "this layer or those of the sublayers should ' 'be", "layers. \"\"\" import functools from lingvo.core import activations from lingvo.core", "outputs correspond to the activations of fetch points in the", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "name. dims: A list of int. i-th layer has dims[i]", "as a sub-class of BaseLayer for all established layers as", "' 'np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which", "but sa1 and sa2 do not share the same weight.", "Returns: The param for the composed layer. \"\"\" iterations =", "these patterns. For example, _Seq() helps to build a sequential", "...)), ['layer1_out', 'layer2_out']) The layer returns the stack's final output", "dims[i] as its input dimension, and dims[i+1] as its output", "idims, odims, device_mesh=None, weight_split_dims_mapping=None, qdomain=None): \"\"\"Linear layer. y = matmul([...,", "composite layers. WARNING: The builder pattern is still experimental and", "enable bfloat16 activations for ' 'layers built using model builder,", "to use and when not to use. Please discuss w/", "estimated flops of fn. If None, we assume flops ==", "_Save('layer2_out', ...), _Output('output', ...)), ['layer1_out', 'layer2_out']) The layer returns the", "of a device mesh to place the ' 'computations onto.", "return builder_layers.BranchLayer.Params().Set( name=name, body=body, fetches=fetches) def _Rematerialize(self, name, body): \"\"\"Forces", "is not None. If not None, it ' 'specifies how", "'activations.') # SPMD partition related params. p.Define( 'device_mesh', None, 'A", "these methods with different options. E.g., a # sub-class builder", "7]]) which is 2d matrix of 8 ' 'devices.') p.Define(", "Base: \"\"\"Model builder with commonly used layers. A method in", "name, sub): \"\"\"Splits the batch and compute the forward pass", "of the body layer.\"\"\" return builder_layers.RematerializationLayer.Params().Set( name=name, body=body) def _BatchParallel(self,", "being None (e.g., Conv2DLayer), builder should not depend on such", "dropout or not.') p.Define( 'fprop_dtype', None, 'Activations datatype to use.", "To enable bfloat16 activations for ' 'layers built using model", "SPMD partition related params. p.Define( 'device_mesh', None, 'A numpy.ndarray specifying", "DropoutLayer Params.\"\"\" if self.params.deterministic_dropout: return layers.DeterministicDropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims) return", "flexible can override these methods with different options. E.g., a", "p def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None): \"\"\"Bias layer. The", "########################################################################### # Basic nn layers. # # The following method", "\"\"\" @classmethod def Params(cls): \"\"\"The params of this layer.\"\"\" p", "return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99) def _LN(self, name, dims, use_fused_layernorm=False): \"\"\"Layer", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "Authors. All Rights Reserved. # # Licensed under the Apache", "mesh. ') return p @property def params(self): \"\"\"Returns the params", "which this layer is built.\"\"\" return self._params def __init__(self, params):", "x + list(y), tuples, [])) def ConcatMeta(tuples): return py_utils.NestedMap( flops=0,", "many layers in __init__. self._params = params.Copy() ###################################################################### # Layers", "sequentially connected: [sa1, sb1, sc1, sa2, sb2, sc2]. sa1 and", "method returns a layer param, whose FProp takes a single", "return layers.DropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims, fprop_dtype=self.params.fprop_dtype) def _Linear(self, name, idims,", "fn)(x). We feed the input tuple to all sub-layers and", "def _Id(self, name): \"\"\"Identity. (t_1, ..., t_n) -> (t1, ...,", "into a data flow graph.\"\"\" return builder_layers.GraphLayer.Params().Set( name=name, input_endpoints=input_endpoints, output_endpoints=output_endpoints,", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "__init__(self, params): # Sub-classes should put some options common to", "of fn. If None, we assume flops == sum of", "' 'bfloat16 activations. Default is None, which will use float32", "matmul([..., idims], [idims, odims]).\"\"\" p = builder_layers.LinearLayer.Params() p.name = name", "Args: name: The layer name. dims: A list of int.", "can be used for prototyping but we advice to implement", "tuple(functools.reduce(lambda x, y: x + list(y), tuples, [])) def ConcatMeta(tuples):", "lingvo.core import hyperparams from lingvo.core import layers from lingvo.core import", "into one tuple. Args: name: The layer name. *subs: A", "layer param. FProp of a layer constructed by a builder", "some examples: ' 'np.array([0, 1, 2, 3, 4, 5, 6,", "_Rematerialize(self, name, body): \"\"\"Forces rematerialization on FProp of the body", "dimension, and dims[i+1] as its output dimensions. act: The activation", "A layer whose outputs correspond to the activations of fetch", "self._Seq(name, *iterations) def _Seq(self, name, *subs): \"\"\"Connects sub-layers sequentially.\"\"\" return", "builder with commonly used layers. A method in a builder", "layer.\"\"\" return builder_layers.RematerializationLayer.Params().Set( name=name, body=body) def _BatchParallel(self, name, sub): \"\"\"Splits", "put some options common to many layers in __init__. self._params", "list(y), tuples, [])) def ConcatMeta(tuples): return py_utils.NestedMap( flops=0, out_shapes=tuple( functools.reduce(lambda", "dims, device_mesh=None, weight_split_dims_mapping=None): \"\"\"Bias layer. The bias is added to", "t_n).\"\"\" return self._Seq(name) def _Arg(self, name, index): \"\"\"Picks index-th element.", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "n, i, o, act)] return self._Seq(name, *l) def _Conv2D(self, name,", "name, self._Linear('linear', idims, odims), self._Bias('bias', odims), self._Activation('act', fn=act)) def _MLP(self,", "a composition of multiple sub-layers connected in certain patterns. We", "mesh to place the ' 'computations onto. If device_mesh is", "used layers. A method in a builder class constructs a", "one tuple. Args: name: The layer name. *subs: A list", "sub=list(signature_sub_param_list)) def _Id(self, name): \"\"\"Identity. (t_1, ..., t_n) -> (t1,", "of tf.Tensor (one or more) and returns a tuple of", "def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None): \"\"\"Bias layer. The bias", "\"\"\"Conv2D layer.\"\"\" return layers.Conv2DLayerNoPadding.Params().Set( name=name, filter_shape=filter_shape, filter_stride=filter_stride, fprop_dtype=self.params.fprop_dtype) def _Reshape(self,", "batch and compute the forward pass on multiple devices. Args:", "\"\"\" import functools from lingvo.core import activations from lingvo.core import", "builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta) def _Save(self, name): \"\"\"Returns a layer from", "b).\"\"\" # pyformat: disable return self._Seq( name, self._Linear('linear', idims, odims),", "'np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is", "fn_out=None, fn_flops=None): \"\"\"y = fn(x). Applies a fn: tuple(Tensor) ->", "sub-layer. E.g.: _AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...), _Save('layer1_out', ...), _Layer('layer2',", "the batch and compute the forward pass on multiple devices.", "p.qdomain.default = qdomain return p def _Bias(self, name, dims, device_mesh=None,", "[] for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])): l +=", "tuple of tf.Tensor (one or more) and returns a tuple", "p.weight_split_dims_mapping = weight_split_dims_mapping p.qdomain.default = qdomain return p def _Bias(self,", "output dimensions. act: The activation function. Returns: The param for", "the last dimension of the input.\"\"\" return builder_layers.BiasLayer.Params().Set( name=name, dims=dims,", "of int. i-th layer has dims[i] as its input dimension,", "as: python3 # Copyright 2020 The TensorFlow Authors. All Rights", "def _LN(self, name, dims, use_fused_layernorm=False): \"\"\"Layer norm.\"\"\" return layers.LayerNorm.Params().Set( name=name,", "serialized. Args: name: The layer name. fn: A lambda tuple(Tensor)", "fn_out: out_shapes = fn_out(*shapes) if isinstance(out_shapes, tshape.Shape): out_shapes = (out_shapes,)", "which the activation and gradient can be accessed.\"\"\" return layers.FetchLayer.Params().Set(name=name)", "p.fprop_dtype = self.params.fprop_dtype p.device_mesh = device_mesh p.weight_split_dims_mapping = weight_split_dims_mapping p.qdomain.default", "to many layers in __init__. self._params = params.Copy() ###################################################################### #", "layers.DropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims, fprop_dtype=self.params.fprop_dtype) def _Linear(self, name, idims, odims,", "tuple(tshape.Shape) -> output tuple(tshape.Shape) fn_flops: A lambda tuple(tshape.Shape) -> estimated", "gain experience on when to use and when not to", "keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims) return layers.DropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims, fprop_dtype=self.params.fprop_dtype) def _Linear(self,", "use this file except in compliance with the License. #", "index-th element. (t_1, ..., t_n) -> (t_{index},).\"\"\" return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index])", "name. repeat: Repeat \\*subs this many times in the compose", "keys from fprop args.\"\"\" return builder_layers.CreateNestedMapLayer.Params().Set( name=name, keys=keys) ########################################################################### #", "The TensorFlow Authors. All Rights Reserved. # # Licensed under", "mesh with 8 devices, ' 'np.array([[0, 1, 2, 3], [4,", "We expect to have a few methods to facilitate building", "to gain experience on when to use and when not", "the forward pass on multiple devices. Args: name: This layer's", "builder_layers.BranchLayer.Params().Set( name=name, body=body, fetches=fetches) def _Rematerialize(self, name, body): \"\"\"Forces rematerialization", "i in range(repeat): iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p in", "to implement the logic as a sub-class of BaseLayer for", "multiple devices. \"\"\" return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub) def _PrintShape(self, name): \"\"\"Print", "tuple(Tensor) -> tuple(Tensor). fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape)", "w/ teammates before using it to build complicated layers. \"\"\"", "' 'single device. Here are some examples: ' 'np.array([0, 1,", "= self.params.fprop_dtype p.device_mesh = device_mesh p.weight_split_dims_mapping = weight_split_dims_mapping p.qdomain.default =", "be sharded over device mesh. ') return p @property def", "can be accessed.\"\"\" return layers.FetchLayer.Params().Set(name=name) def _AddFetches(self, name, body, fetches):", "def _Rep(self, name, repeat, *subs): r\"\"\"Connects sub-layers sequentially and repeat", "def _Linear(self, name, idims, odims, device_mesh=None, weight_split_dims_mapping=None, qdomain=None): \"\"\"Linear layer.", "7]) which is a 1d mesh with 8 devices, '", "fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape) fn_flops: A lambda", "[self._FC('l%03d' % n, i, o, act)] return self._Seq(name, *l) def", "helps to build a sequential layer that calls its sub-layer", "self.params.deterministic_dropout: return layers.DeterministicDropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims) return layers.DropoutLayer.Params().Set( name=name, keep_prob=keep_prob,", "in compliance with the License. # You may obtain a", "tuples, []))) return builder_layers.ParallelLayer.Params().Set( name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta) def _Fn(self,", "software # distributed under the License is distributed on an", "a 1d mesh with 8 devices, ' 'np.array([[0, 1, 2,", "hyperparams from lingvo.core import layers from lingvo.core import py_utils from", "self._params def __init__(self, params): # Sub-classes should put some options", "activation and gradient can be accessed.\"\"\" return layers.FetchLayer.Params().Set(name=name) def _AddFetches(self,", "body. Returns: A layer whose outputs correspond to the activations", "\"\"\"A library to build composite layers. WARNING: The builder pattern", "import hyperparams from lingvo.core import layers from lingvo.core import py_utils", "params. p.Define( 'device_mesh', None, 'A numpy.ndarray specifying the topology of", "connected in certain patterns. We expect to have a few", "name. body: The sub-layer. fetches: A list of fetch names", "connected: [sa1, sb1, sc1, sa2, sb2, sc2]. sa1 and sa2", "pattern is still experimental and we need to gain experience", "The param for the composed layer. \"\"\" l = []", "'fprop_dtype', None, 'Activations datatype to use. To enable bfloat16 activations", "A list of int. i-th layer has dims[i] as its", "Params.\"\"\" if self.params.deterministic_dropout: return layers.DeterministicDropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims) return layers.DropoutLayer.Params().Set(", "6 layers sequentially connected: [sa1, sb1, sc1, sa2, sb2, sc2].", "multiple times. E.g., _Rep('foo', 2, sa, sb, sc) constructs a", "name. *subs: A list of sub-layers. Returns: The param for", "a layer from which the activation and gradient can be", "flow graph.\"\"\" return builder_layers.GraphLayer.Params().Set( name=name, input_endpoints=input_endpoints, output_endpoints=output_endpoints, sub=list(signature_sub_param_list)) def _Id(self,", "layer's name. sub: The sub-layer. Returns: A BatchParallel layer which", "layers that support ' 'bfloat16 activations. Default is None, which", "The constructed layer is often a composition of multiple sub-layers", "for all established layers as FnLayer can't be serialized. Args:", "the input.\"\"\" return builder_layers.BiasLayer.Params().Set( name=name, dims=dims, fprop_dtype=self.params.fprop_dtype, device_mesh=device_mesh, weight_split_dims_mapping=weight_split_dims_mapping) def", "build composite layers. WARNING: The builder pattern is still experimental", "[]))) return builder_layers.ParallelLayer.Params().Set( name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta) def _Fn(self, name,", "few methods to facilitate building these patterns. For example, _Seq()", "def ConcatMeta(tuples): return py_utils.NestedMap( flops=0, out_shapes=tuple( functools.reduce(lambda x, y: x", "not None. If not None, it ' 'specifies how weight", "expect to have a few methods to facilitate building these", "sub=sub) def _PrintShape(self, name): \"\"\"Print FProp input shape information.\"\"\" return", "the compose layer. *subs: A list of sub-layers. Returns: The", "with the License. # You may obtain a copy of", "very simple python function. This layer can be used for", "(one or more). Even though certain layers support FProp argument", "y: x + list(y), tuples, []))) return builder_layers.ParallelLayer.Params().Set( name=name, sub=list(subs),", "do not share the same weight. Args: name: The layer", "a layer param, whose FProp takes a single # Tensor", "patterns. We expect to have a few methods to facilitate", "in the sub-layer body. [input1, input2, ..., inputN, fetch1, ...,", "bias is added to the last dimension of the input.\"\"\"", "express or implied. # See the License for the specific", "name, dims, device_mesh=None, weight_split_dims_mapping=None): \"\"\"Bias layer. The bias is added", "except in compliance with the License. # You may obtain", "') return p @property def params(self): \"\"\"Returns the params upon", "input shape information.\"\"\" return builder_layers.PrintShapeLayer.Params().Set(name=name) def _CreateNestedMap(self, name, keys): \"\"\"Returns", "_Reshape(self, name, shape): \"\"\"Reshape inputs to the shape provided.\"\"\" return", "of tuples. return tuple(functools.reduce(lambda x, y: x + list(y), tuples,", "'computations onto. If device_mesh is None, it is assumed to", "for the composed layer. \"\"\" iterations = [] for i", "name, idims, odims, act='RELU'): \"\"\"Feed-forward fully connected. y = act(matmul(x,", "not None, it ' 'specifies how activation of this layer", "a tuple of tf.Tensor (one or more). Even though certain", "a # sub-class builder can override _BN() to tune the", "sum of elements in the inputs. Returns: The param for", "= builder_layers.LinearLayer.Params() p.name = name p.input_dims = idims p.output_dims =", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "*subs): r\"\"\"Connects sub-layers sequentially and repeat multiple times. E.g., _Rep('foo',", "def __init__(self, params): # Sub-classes should put some options common", "sublayers ' 'should be sharded over device mesh. ') return", "layers sequentially connected: [sa1, sb1, sc1, sa2, sb2, sc2]. sa1", "CONDITIONS OF ANY KIND, either express or implied. # See", "_Linear(self, name, idims, odims, device_mesh=None, weight_split_dims_mapping=None, qdomain=None): \"\"\"Linear layer. y", "if isinstance(out_shapes, tshape.Shape): out_shapes = (out_shapes,) else: out_shapes = shapes", "is None, which will use float32 ' 'activations.') # SPMD", "layer's name. body: The sub-layer. fetches: A list of fetch", "def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None): \"\"\"Returns a DropoutLayer Params.\"\"\" if", "import layers from lingvo.core import py_utils from lingvo.core import tshape", "flops == sum of elements in the inputs. Returns: The", "a NestedMap with keys from fprop args.\"\"\" return builder_layers.CreateNestedMapLayer.Params().Set( name=name,", "sub-layers and concatenates their output tuples into one tuple. Args:", "more). Even though certain layers support FProp argument being None", "under the License. # ============================================================================== \"\"\"A library to build composite", "= qdomain return p def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None):", "from lingvo.core import hyperparams from lingvo.core import layers from lingvo.core", "'Used deterministic dropout or not.') p.Define( 'fprop_dtype', None, 'Activations datatype", "discuss w/ teammates before using it to build complicated layers.", "how activation of this layer or those of the sublayers", "def _Par(self, name, *subs): \"\"\"y = (f1, f2, ..., fn)(x).", "'specifies how weight of this layer or those of the", "'single device. Here are some examples: ' 'np.array([0, 1, 2,", "to compose multiple layers. # # Sub-classes are discouraged to", "not.') p.Define( 'fprop_dtype', None, 'Activations datatype to use. To enable", "'Activations datatype to use. To enable bfloat16 activations for '", "+ b).\"\"\" # pyformat: disable return self._Seq( name, self._Linear('linear', idims,", "Args: name: The layer name. repeat: Repeat \\*subs this many", "# tuples is a list of tuples. return tuple(functools.reduce(lambda x,", "from lingvo.core import tshape class Base: \"\"\"Model builder with commonly", "have the same structure as the given sa, but sa1", "with intermediate activations from layer1_out and layer2_out. Args: name: This", "methods are designed to have minimal knobs. Sub-classes which needs", "' 'np.array([0, 1, 2, 3, 4, 5, 6, 7]) which", "-> (t_{index},).\"\"\" return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index]) def _Par(self, name, *subs): \"\"\"y", "# sub-class builder can override _BN() to tune the decay", "Sub-classes are discouraged to override these composition method. ###################################################################### def", "use float32 ' 'activations.') # SPMD partition related params. p.Define(", "+ list(y), tuples, []))) return builder_layers.ParallelLayer.Params().Set( name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta)", "= (out_shapes,) else: out_shapes = shapes if fn_flops: flops =", "name, fn, fn_out=None, fn_flops=None): \"\"\"y = fn(x). Applies a fn:", "connected. Args: name: The layer name. dims: A list of", "def _PrintShape(self, name): \"\"\"Print FProp input shape information.\"\"\" return builder_layers.PrintShapeLayer.Params().Set(name=name)", "fully connected. Args: name: The layer name. dims: A list", "params.Copy() ###################################################################### # Layers to compose multiple layers. # #", "odims, device_mesh=None, weight_split_dims_mapping=None, qdomain=None): \"\"\"Linear layer. y = matmul([..., idims],", "weight_split_dims_mapping=weight_split_dims_mapping) def _Activation(self, name, fn='RELU'): \"\"\"Activation layer.\"\"\" return activations.ActivationLayer.Params().Set(activation=fn, name=name)", "model builder, set fprop_dtype to ' 'tf.bfloat16, which will be", "builder pattern is still experimental and we need to gain", "norm.\"\"\" return layers.LayerNorm.Params().Set( name=name, input_dim=dims, use_fused_layernorm=use_fused_layernorm, fprop_dtype=self.params.fprop_dtype) def _Dropout(self, name,", "Lint as: python3 # Copyright 2020 The TensorFlow Authors. All", "with different options. E.g., a # sub-class builder can override", "to override these composition method. ###################################################################### def _Rep(self, name, repeat,", "layer or those of the sublayers ' 'should be sharded", "of 8 ' 'devices.') p.Define( 'weight_split_dims_mapping', None, 'Relevant only if", "class Base: \"\"\"Model builder with commonly used layers. A method", "commonly used layers. A method in a builder class constructs", "common to many layers in __init__. self._params = params.Copy() ######################################################################", "shapes]) return py_utils.NestedMap(flops=flops, out_shapes=out_shapes) return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta) def _Save(self,", "t_n) -> (t1, ..., t_n).\"\"\" return self._Seq(name) def _Arg(self, name,", "this layer is built.\"\"\" return self._params def __init__(self, params): #", "body, fetches): \"\"\"Fetches saved activations in the body sub-layer. E.g.:", "'activation_split_dims_mapping', None, 'Relevant only if device_mesh above is not None.", "name: This layer's name. sub: The sub-layer. Returns: A BatchParallel", "teammates before using it to build complicated layers. \"\"\" import", "5, 6, 7]) which is a 1d mesh with 8", "to ' 'tf.bfloat16, which will be propagated to layers that", "Args: name: The layer name. *subs: A list of sub-layers.", "name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta) def _Fn(self, name, fn, fn_out=None, fn_flops=None):", "example. \"\"\" @classmethod def Params(cls): \"\"\"The params of this layer.\"\"\"", "p.device_mesh = device_mesh p.weight_split_dims_mapping = weight_split_dims_mapping p.qdomain.default = qdomain return", "from which the activation and gradient can be accessed.\"\"\" return", "The layer name. dims: A list of int. i-th layer", "of elements in the inputs. Returns: The param for the", "topology of a device mesh to place the ' 'computations", "1, 2, 3], [4, 5, 6, 7]]) which is 2d", "fn(x). Applies a fn: tuple(Tensor) -> a single Tensor or", "_Conv2D(self, name, filter_shape, filter_stride): \"\"\"Conv2D layer.\"\"\" return layers.Conv2DLayerNoPadding.Params().Set( name=name, filter_shape=filter_shape,", "keep_prob, noise_shape_broadcast_dims=None): \"\"\"Returns a DropoutLayer Params.\"\"\" if self.params.deterministic_dropout: return layers.DeterministicDropoutLayer.Params().Set(", "\"\"\"y = (f1, f2, ..., fn)(x). We feed the input", "sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta) def _Fn(self, name, fn, fn_out=None, fn_flops=None): \"\"\"y", "as its output dimensions. act: The activation function. Returns: The", "self._Bias('bias', odims), self._Activation('act', fn=act)) def _MLP(self, name, dims, act='RELU'): \"\"\"Multiple", "_PrintShape(self, name): \"\"\"Print FProp input shape information.\"\"\" return builder_layers.PrintShapeLayer.Params().Set(name=name) def", "fetchM]. \"\"\" return builder_layers.BranchLayer.Params().Set( name=name, body=body, fetches=fetches) def _Rematerialize(self, name,", "This layer's name. sub: The sub-layer. Returns: A BatchParallel layer", "in range(repeat): iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p in subs]))", "return builder_layers.CreateNestedMapLayer.Params().Set( name=name, keys=keys) ########################################################################### # Basic nn layers. #", "decay option. ########################################################################### def _BN(self, name, dims): \"\"\"Batch norm.\"\"\" return", "sa1 and sa2 have the same structure as the given", "Applies a fn: tuple(Tensor) -> a single Tensor or tuple(Tensor)", "sharded over device mesh. ') return p @property def params(self):", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])): l += [self._FC('l%03d'", "name. sub: The sub-layer. Returns: A BatchParallel layer which splits", "pass on multiple devices. \"\"\" return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub) def _PrintShape(self,", "correspond to the activations of fetch points in the sub-layer", "and we need to gain experience on when to use", "but we advice to implement the logic as a sub-class", "t_n) -> (t_{index},).\"\"\" return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index]) def _Par(self, name, *subs):", "another. TODO(zhifengc): Adds a more concrete example. \"\"\" @classmethod def", "name=name, body=body) def _BatchParallel(self, name, sub): \"\"\"Splits the batch and", "Sub-classes which needs to # be flexible can override these", "...), _Layer('layer2', ...), _Save('layer2_out', ...), _Output('output', ...)), ['layer1_out', 'layer2_out']) The", "patterns. For example, _Seq() helps to build a sequential layer", "devices. Args: name: This layer's name. sub: The sub-layer. Returns:", "else: flops = sum([s.size for s in shapes]) return py_utils.NestedMap(flops=flops,", "of BaseLayer for all established layers as FnLayer can't be", "def _BN(self, name, dims): \"\"\"Batch norm.\"\"\" return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99)", "fetches=fetches) def _Rematerialize(self, name, body): \"\"\"Forces rematerialization on FProp of", "\"\"\"Returns a DropoutLayer Params.\"\"\" if self.params.deterministic_dropout: return layers.DeterministicDropoutLayer.Params().Set( name=name, keep_prob=keep_prob,", "Version 2.0 (the \"License\"); # you may not use this", "over device mesh. ') return p @property def params(self): \"\"\"Returns", "lingvo.core import layers from lingvo.core import py_utils from lingvo.core import", "layer that calls its sub-layer one after another. TODO(zhifengc): Adds", "for the composed layer. \"\"\" def ConcatTuples(tuples): # tuples is", "in shapes]) return py_utils.NestedMap(flops=flops, out_shapes=out_shapes) return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta) def", "_AddFetches(self, name, body, fetches): \"\"\"Fetches saved activations in the body", "added to the last dimension of the input.\"\"\" return builder_layers.BiasLayer.Params().Set(", "p.Define('deterministic_dropout', False, 'Used deterministic dropout or not.') p.Define( 'fprop_dtype', None,", "deterministic dropout or not.') p.Define( 'fprop_dtype', None, 'Activations datatype to", "or those of the sublayers ' 'should be sharded over", "by applicable law or agreed to in writing, software #", "options common to many layers in __init__. self._params = params.Copy()", "layers as FnLayer can't be serialized. Args: name: The layer", "whose outputs correspond to the activations of fetch points in", "return p @property def params(self): \"\"\"Returns the params upon which", "We feed the input tuple to all sub-layers and concatenates", "device_mesh=None, weight_split_dims_mapping=None, qdomain=None): \"\"\"Linear layer. y = matmul([..., idims], [idims,", "composed layer. \"\"\" l = [] for n, (i, o)", "experience on when to use and when not to use.", "need to gain experience on when to use and when", "which will be propagated to layers that support ' 'bfloat16", "tf.Tensor (one or more). Even though certain layers support FProp", "FProp input shape information.\"\"\" return builder_layers.PrintShapeLayer.Params().Set(name=name) def _CreateNestedMap(self, name, keys):", "device_mesh p.weight_split_dims_mapping = weight_split_dims_mapping p.qdomain.default = qdomain return p def", "name, dims, use_fused_layernorm=False): \"\"\"Layer norm.\"\"\" return layers.LayerNorm.Params().Set( name=name, input_dim=dims, use_fused_layernorm=use_fused_layernorm,", "return self._Seq(name) def _Arg(self, name, index): \"\"\"Picks index-th element. (t_1,", "from lingvo.core import layers from lingvo.core import py_utils from lingvo.core", "to all sub-layers and concatenates their output tuples into one", "param for the composed layer. \"\"\" def FnMeta(*shapes): \"\"\"A lambda", "x + list(y), tuples, []))) return builder_layers.ParallelLayer.Params().Set( name=name, sub=list(subs), merge=ConcatTuples,", "idims, odims), self._Bias('bias', odims), self._Activation('act', fn=act)) def _MLP(self, name, dims,", "a single # Tensor and returns a single Tensor. #", "as the given sa, but sa1 and sa2 do not", "param. FProp of a layer constructed by a builder takes", "name, index): \"\"\"Picks index-th element. (t_1, ..., t_n) -> (t_{index},).\"\"\"", "if fn_flops: flops = fn_flops(*shapes) else: flops = sum([s.size for", "in __init__. self._params = params.Copy() ###################################################################### # Layers to compose", "some options common to many layers in __init__. self._params =", "applicable law or agreed to in writing, software # distributed", "merge_meta=ConcatMeta) def _Fn(self, name, fn, fn_out=None, fn_flops=None): \"\"\"y = fn(x).", "name, filter_shape, filter_stride): \"\"\"Conv2D layer.\"\"\" return layers.Conv2DLayerNoPadding.Params().Set( name=name, filter_shape=filter_shape, filter_stride=filter_stride,", "= fn_out(*shapes) if isinstance(out_shapes, tshape.Shape): out_shapes = (out_shapes,) else: out_shapes", "should put some options common to many layers in __init__.", "name: The layer name. *subs: A list of sub-layers. Returns:", "fetches): \"\"\"Fetches saved activations in the body sub-layer. E.g.: _AddFetches('foo',", "def _Save(self, name): \"\"\"Returns a layer from which the activation", "activations. Default is None, which will use float32 ' 'activations.')", "tuple(tshape.Shape) -> estimated flops of fn. If None, we assume", "after another. TODO(zhifengc): Adds a more concrete example. \"\"\" @classmethod", "build complicated layers. \"\"\" import functools from lingvo.core import activations", "param for the composed layer. \"\"\" l = [] for", "' 'computations onto. If device_mesh is None, it is assumed", "which splits the batch and computes the forward pass on", "on FProp of the body layer.\"\"\" return builder_layers.RematerializationLayer.Params().Set( name=name, body=body)", "name, shape): \"\"\"Reshape inputs to the shape provided.\"\"\" return builder_layers.ReshapeLayer.Params().Set(name=name,", "# You may obtain a copy of the License at", "\"\"\"Returns the params upon which this layer is built.\"\"\" return", "element. (t_1, ..., t_n) -> (t_{index},).\"\"\" return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index]) def", "= weight_split_dims_mapping p.qdomain.default = qdomain return p def _Bias(self, name,", "*iterations) def _Seq(self, name, *subs): \"\"\"Connects sub-layers sequentially.\"\"\" return builder_layers.SequentialLayer.Params().Set(", "mesh. ') p.Define( 'activation_split_dims_mapping', None, 'Relevant only if device_mesh above", "be serialized. Args: name: The layer name. fn: A lambda", "The sub-layer. Returns: A BatchParallel layer which splits the batch", "their output tuples into one tuple. Args: name: The layer", "body=body, fetches=fetches) def _Rematerialize(self, name, body): \"\"\"Forces rematerialization on FProp", "@property def params(self): \"\"\"Returns the params upon which this layer", "*[p.Copy() for p in subs])) return self._Seq(name, *iterations) def _Seq(self,", "with keys from fprop args.\"\"\" return builder_layers.CreateNestedMapLayer.Params().Set( name=name, keys=keys) ###########################################################################", "layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99) def _LN(self, name, dims, use_fused_layernorm=False): \"\"\"Layer norm.\"\"\"", "repeat: Repeat \\*subs this many times in the compose layer.", "tuple to all sub-layers and concatenates their output tuples into", "(t1, ..., t_n).\"\"\" return self._Seq(name) def _Arg(self, name, index): \"\"\"Picks", "idims p.output_dims = odims p.fprop_dtype = self.params.fprop_dtype p.device_mesh = device_mesh", "' 'should be sharded over device mesh. ') return p", "def _Fn(self, name, fn, fn_out=None, fn_flops=None): \"\"\"y = fn(x). Applies", "layer or those of the sublayers should ' 'be sharded", "= sum([s.size for s in shapes]) return py_utils.NestedMap(flops=flops, out_shapes=out_shapes) return", "is a 1d mesh with 8 devices, ' 'np.array([[0, 1,", "have a few methods to facilitate building these patterns. For", "name, *subs): \"\"\"Connects sub-layers sequentially.\"\"\" return builder_layers.SequentialLayer.Params().Set( name=name, sub=list(subs)) def", "sub-layer body. Returns: A layer whose outputs correspond to the", "activations for ' 'layers built using model builder, set fprop_dtype", "# Layers to compose multiple layers. # # Sub-classes are", "in a builder class constructs a layer param. FProp of", "body): \"\"\"Forces rematerialization on FProp of the body layer.\"\"\" return", "layer constructed by a builder takes a tuple of tf.Tensor", "FProp of the body layer.\"\"\" return builder_layers.RematerializationLayer.Params().Set( name=name, body=body) def", "sub-layer one after another. TODO(zhifengc): Adds a more concrete example.", "\"License\"); # you may not use this file except in", "times in the compose layer. *subs: A list of sub-layers.", "# Sub-classes are discouraged to override these composition method. ######################################################################", "override these composition method. ###################################################################### def _Rep(self, name, repeat, *subs):", "the params upon which this layer is built.\"\"\" return self._params", "subs])) return self._Seq(name, *iterations) def _Seq(self, name, *subs): \"\"\"Connects sub-layers", "FProp argument being None (e.g., Conv2DLayer), builder should not depend", "name: The layer name. repeat: Repeat \\*subs this many times", "names inside the sub-layer body. Returns: A layer whose outputs", "tf.Tensor (one or more) and returns a tuple of tf.Tensor", "layer2_out. Args: name: This layer's name. body: The sub-layer. fetches:", "have minimal knobs. Sub-classes which needs to # be flexible", "out_shapes=out_shapes) return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta) def _Save(self, name): \"\"\"Returns a", "weight_split_dims_mapping=None): \"\"\"Bias layer. The bias is added to the last", "[sa1, sb1, sc1, sa2, sb2, sc2]. sa1 and sa2 have", "use. Please discuss w/ teammates before using it to build", "A lambda tuple(tshape.Shape) -> estimated flops of fn. If None,", "fprop_dtype=self.params.fprop_dtype) def _Reshape(self, name, shape): \"\"\"Reshape inputs to the shape", "TODO(zhifengc): Adds a more concrete example. \"\"\" @classmethod def Params(cls):" ]
[ "back parameters: basename, first_arg, argv, cwd\"\"\" self.first_arg_basename_matches.append((text, cb)) def RegisterFirstArgumentBasenameRegex(self,", "cb(basename, first_arg, argv, cwd) if retval != None: return retval", "argv, cwd, regex_match\"\"\" self.first_arg_regexes.append((regex, cb)) def RegisterFirstArgumentBasenameMatch(self, text, cb): \"\"\"Call", "\"\"\"Returns a single string representing the tool in this command-line.", "= 0 while 1: seen[argv_joined] = None new_argv = self._GetTool(argv,", "if first_arg == text: retval = cb(first_arg, argv, cwd) if", "done this way because of the way the command-line is", "regex, cb): \"\"\"Call back parameters: basename, first_arg, argv, cw, regex_match\"\"\"", "m) if retval != None: return retval # Check the", "Copyright (c) 2010 by Cisco Systems, Inc. \"\"\" Manage the", "item in cmdline_args). argv_joined = ' '.join(cmdline_args) argv = argv_joined.split()", "first_arg, argv, cwd, regex_match\"\"\" self.first_arg_regexes.append((regex, cb)) def RegisterFirstArgumentBasenameMatch(self, text, cb):", "times as necessary to find # a non-changing answer. seen", "one true argv-item per item. However, # the instmakes that", "cb): \"\"\"Call back parameters: first_arg, argv, cwd\"\"\" self.first_arg_matches.append((text, cb)) def", "form a single command-line.\"\"\" # It's done this way because", "return retval # Get the first argument if len(argv) >=", "with this manager the circumstances under which they wish to", "tool plugins and use them appropriately. \"\"\" import os TOOLNAME_PLUGIN_PREFIX", "{} max_iterations = 100 i = 0 while 1: seen[argv_joined]", "cb): \"\"\"Call back parameters: basename, first_arg, argv, cwd\"\"\" self.first_arg_basename_matches.append((text, cb))", "of strings that will be concatenated with spaces to form", "seen[argv_joined] = None new_argv = self._GetTool(argv, cwd) new_argv_joined = '", "= regex.search(first_arg) if m: retval = cb(first_arg, argv, cwd, m)", "have to register with this manager the circumstances under which", "= argv_joined.split() # Call _GetTool as many times as necessary", "# stored in the instmake log. The top-most process (which", "cwd) if retval != None: return retval for (regex, cb)", "However, # the instmakes that were called from 'make' have", "instmake log. The top-most process (which is # the first", "!= None: return retval for (regex, cb) in self.first_arg_basename_regexes: m", "representing the tool in this command-line. cmdline_args is an array", "= cb(basename, first_arg, argv, cwd) if retval != None: return", "return new_argv[0] else: i += 1 if i == max_iterations:", "command-line for (regex, cb) in self.command_line_regexes: m = regex.search(cmdline) if", "record in the instmake log) # has a cmdline_args with", "= cb(first_arg, argv, cwd, m) if retval != None: return", "if retval != None: return retval # Nothing matched. Return", "the instmake log) # has a cmdline_args with one true", "cwd): cmdline = ' '.join(argv) # Check the command-line for", "retval = cb(basename, first_arg, argv, cwd) if retval != None:", "item. However, # the instmakes that were called from 'make'", "' '.join(new_argv) if new_argv_joined == argv_joined: return new_argv[0] elif seen.has_key(new_argv_joined):", "non-changing answer. seen = {} max_iterations = 100 i =", "parameters: first_arg, argv, cwd\"\"\" self.first_arg_matches.append((text, cb)) def RegisterFirstArgumentRegex(self, regex, cb):", "that were called from 'make' have their entire # command-line", "= None new_argv = self._GetTool(argv, cwd) new_argv_joined = ' '.join(new_argv)", "cb): \"\"\"Call back parameters: argv, cwd, regex_match\"\"\" self.command_line_regexes.append((regex, cb)) def", "m: retval = cb(first_arg, argv, cwd, m) if retval !=", "# item in cmdline_args). argv_joined = ' '.join(cmdline_args) argv =", "the way the command-line is # stored in the instmake", "regex.search(basename) if m: retval = cb(basename, first_arg, argv, cwd, m)", "= {} max_iterations = 100 i = 0 while 1:", "if len(argv) >= 1: first_arg = argv[0] else: return argv", "use them appropriately. \"\"\" import os TOOLNAME_PLUGIN_PREFIX = \"toolname\" class", "a non-changing answer. seen = {} max_iterations = 100 i", "of the first arg basename = os.path.basename(first_arg) for (text, cb)", "= ' '.join(cmdline_args) argv = argv_joined.split() # Call _GetTool as", "the instmakes that were called from 'make' have their entire", "from 'make' have their entire # command-line existing as a", "Check the first argument for (text, cb) in self.first_arg_matches: if", "cwd, regex_match\"\"\" self.first_arg_regexes.append((regex, cb)) def RegisterFirstArgumentBasenameMatch(self, text, cb): \"\"\"Call back", "cb) in self.first_arg_regexes: m = regex.search(first_arg) if m: retval =", "= new_argv argv_joined = new_argv_joined def _GetTool(self, argv, cwd): cmdline", "first_arg, argv, cw, regex_match\"\"\" self.first_arg_basename_regexes.append((regex, cb)) def RegisterCommandLineRegex(self, regex, cb):", "' '.join(argv) # Check the command-line for (regex, cb) in", "m) if retval != None: return retval # Nothing matched.", "# Check the command-line for (regex, cb) in self.command_line_regexes: m", "Check the command-line for (regex, cb) in self.command_line_regexes: m =", "cb) in self.first_arg_basename_matches: if basename == text: retval = cb(basename,", "\"\"\"Call back parameters: basename, first_arg, argv, cw, regex_match\"\"\" self.first_arg_basename_regexes.append((regex, cb))", "= cb(argv, cwd, m) if retval != None: return retval", "for (regex, cb) in self.command_line_regexes: m = regex.search(cmdline) if m:", "answer. seen = {} max_iterations = 100 i = 0", "def RegisterCommandLineRegex(self, regex, cb): \"\"\"Call back parameters: argv, cwd, regex_match\"\"\"", "argv_joined = ' '.join(cmdline_args) argv = argv_joined.split() # Call _GetTool", "[] self.first_arg_basename_matches = [] self.first_arg_regexes= [] self.first_arg_basename_regexes = [] self.command_line_regexes", "[] self.command_line_regexes = [] for plugin in toolname_plugins: plugin.register(self) def", "retval != None: return retval # Get the first argument", "first_arg, argv, cwd, m) if retval != None: return retval", "plugins have to register with this manager the circumstances under", "called from 'make' have their entire # command-line existing as", "cb) in self.first_arg_basename_regexes: m = regex.search(basename) if m: retval =", "m = regex.search(cmdline) if m: retval = cb(argv, cwd, m)", "plugin in toolname_plugins: plugin.register(self) def RegisterFirstArgumentMatch(self, text, cb): \"\"\"Call back", "Cisco Systems, Inc. \"\"\" Manage the tool plugins and use", "the tool in this command-line. cmdline_args is an array of", "new_argv_joined = ' '.join(new_argv) if new_argv_joined == argv_joined: return new_argv[0]", "'make' run, i.e., the last record in the instmake log)", "a cmdline_args with one true argv-item per item. However, #", "= ' '.join(argv) # Check the command-line for (regex, cb)", "retval = cb(basename, first_arg, argv, cwd, m) if retval !=", "argv, cwd, regex_match\"\"\" self.command_line_regexes.append((regex, cb)) def GetTool(self, cmdline_args, cwd): \"\"\"Returns", "them appropriately. \"\"\" import os TOOLNAME_PLUGIN_PREFIX = \"toolname\" class ToolNameManager:", "cmdline_args is an array of strings that will be concatenated", "toolname_plugins: plugin.register(self) def RegisterFirstArgumentMatch(self, text, cb): \"\"\"Call back parameters: first_arg,", "basename == text: retval = cb(basename, first_arg, argv, cwd) if", "return retval for (regex, cb) in self.first_arg_regexes: m = regex.search(first_arg)", "retval != None: return retval # Check the basename of", "i.e., the last record in the instmake log) # has", "is # the first 'make' run, i.e., the last record", "self.first_arg_regexes= [] self.first_arg_basename_regexes = [] self.command_line_regexes = [] for plugin", "concatenated with spaces to form a single command-line.\"\"\" # It's", "command-line is # stored in the instmake log. The top-most", "0 while 1: seen[argv_joined] = None new_argv = self._GetTool(argv, cwd)", "first argument if len(argv) >= 1: first_arg = argv[0] else:", "toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX) self.first_arg_matches = [] self.first_arg_basename_matches = [] self.first_arg_regexes=", "# has a cmdline_args with one true argv-item per item.", "run, i.e., the last record in the instmake log) #", "None new_argv = self._GetTool(argv, cwd) new_argv_joined = ' '.join(new_argv) if", "# the first 'make' run, i.e., the last record in", "if retval != None: return retval for (regex, cb) in", "the last record in the instmake log) # has a", "they wish to be called.\"\"\" def __init__(self, plugins): toolname_plugins =", "'make' have their entire # command-line existing as a single", "argv, cwd) if retval != None: return retval for (regex,", "the first arg basename = os.path.basename(first_arg) for (text, cb) in", "= [] self.first_arg_regexes= [] self.first_arg_basename_regexes = [] self.command_line_regexes = []", "cmdline_args). argv_joined = ' '.join(cmdline_args) argv = argv_joined.split() # Call", "self.first_arg_matches = [] self.first_arg_basename_matches = [] self.first_arg_regexes= [] self.first_arg_basename_regexes =", "first and only # item in cmdline_args). argv_joined = '", "'.join(cmdline_args) argv = argv_joined.split() # Call _GetTool as many times", "m) if retval != None: return retval # Get the", "command-line.\"\"\" # It's done this way because of the way", "1: first_arg = argv[0] else: return argv # Check the", "None: return retval for (regex, cb) in self.first_arg_regexes: m =", "in the instmake log. The top-most process (which is #", "back parameters: first_arg, argv, cwd\"\"\" self.first_arg_matches.append((text, cb)) def RegisterFirstArgumentRegex(self, regex,", "command-line existing as a single string (the first and only", "def RegisterFirstArgumentBasenameMatch(self, text, cb): \"\"\"Call back parameters: basename, first_arg, argv,", "seen.has_key(new_argv_joined): return new_argv[0] else: i += 1 if i ==", "entire # command-line existing as a single string (the first", "(regex, cb) in self.command_line_regexes: m = regex.search(cmdline) if m: retval", "parameters: basename, first_arg, argv, cw, regex_match\"\"\" self.first_arg_basename_regexes.append((regex, cb)) def RegisterCommandLineRegex(self,", "def _GetTool(self, argv, cwd): cmdline = ' '.join(argv) # Check", "= self._GetTool(argv, cwd) new_argv_joined = ' '.join(new_argv) if new_argv_joined ==", "\"\"\"Call back parameters: argv, cwd, regex_match\"\"\" self.command_line_regexes.append((regex, cb)) def GetTool(self,", "else: return argv # Check the first argument for (text,", "self.first_arg_basename_matches: if basename == text: retval = cb(basename, first_arg, argv,", "single command-line.\"\"\" # It's done this way because of the", "cb(argv, cwd, m) if retval != None: return retval #", ">= 1: first_arg = argv[0] else: return argv # Check", "argument for (text, cb) in self.first_arg_matches: if first_arg == text:", "for (regex, cb) in self.first_arg_regexes: m = regex.search(first_arg) if m:", "argv, cwd): cmdline = ' '.join(argv) # Check the command-line", "argv, cwd\"\"\" self.first_arg_basename_matches.append((text, cb)) def RegisterFirstArgumentBasenameRegex(self, regex, cb): \"\"\"Call back", "cwd\"\"\" self.first_arg_matches.append((text, cb)) def RegisterFirstArgumentRegex(self, regex, cb): \"\"\"Call back parameters:", "back parameters: basename, first_arg, argv, cw, regex_match\"\"\" self.first_arg_basename_regexes.append((regex, cb)) def", "= new_argv_joined def _GetTool(self, argv, cwd): cmdline = ' '.join(argv)", "existing as a single string (the first and only #", "which they wish to be called.\"\"\" def __init__(self, plugins): toolname_plugins", "retval for (regex, cb) in self.first_arg_basename_regexes: m = regex.search(basename) if", "stored in the instmake log. The top-most process (which is", "will be concatenated with spaces to form a single command-line.\"\"\"", "if retval != None: return retval # Get the first", "m: retval = cb(basename, first_arg, argv, cwd, m) if retval", "return retval for (regex, cb) in self.first_arg_basename_regexes: m = regex.search(basename)", "of the way the command-line is # stored in the", "as necessary to find # a non-changing answer. seen =", "plugins): toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX) self.first_arg_matches = [] self.first_arg_basename_matches = []", "be concatenated with spaces to form a single command-line.\"\"\" #", "regex.search(cmdline) if m: retval = cb(argv, cwd, m) if retval", "_GetTool(self, argv, cwd): cmdline = ' '.join(argv) # Check the", "Manage the tool plugins and use them appropriately. \"\"\" import", "regex, cb): \"\"\"Call back parameters: first_arg, argv, cwd, regex_match\"\"\" self.first_arg_regexes.append((regex,", "array of strings that will be concatenated with spaces to", "cwd, regex_match\"\"\" self.command_line_regexes.append((regex, cb)) def GetTool(self, cmdline_args, cwd): \"\"\"Returns a", "cb) in self.command_line_regexes: m = regex.search(cmdline) if m: retval =", "cwd) new_argv_joined = ' '.join(new_argv) if new_argv_joined == argv_joined: return", "100 i = 0 while 1: seen[argv_joined] = None new_argv", "RegisterFirstArgumentRegex(self, regex, cb): \"\"\"Call back parameters: first_arg, argv, cwd, regex_match\"\"\"", "__init__(self, plugins): toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX) self.first_arg_matches = [] self.first_arg_basename_matches =", "the first argument for (text, cb) in self.first_arg_matches: if first_arg", "basename, first_arg, argv, cw, regex_match\"\"\" self.first_arg_basename_regexes.append((regex, cb)) def RegisterCommandLineRegex(self, regex,", "cwd, m) if retval != None: return retval # Nothing", "cb(basename, first_arg, argv, cwd, m) if retval != None: return", "max_iterations: return new_argv[0] argv = new_argv argv_joined = new_argv_joined def", "parameters: first_arg, argv, cwd, regex_match\"\"\" self.first_arg_regexes.append((regex, cb)) def RegisterFirstArgumentBasenameMatch(self, text,", "new_argv[0] argv = new_argv argv_joined = new_argv_joined def _GetTool(self, argv,", "None: return retval # Nothing matched. Return the default value.", "\"\"\"Call back parameters: first_arg, argv, cwd, regex_match\"\"\" self.first_arg_regexes.append((regex, cb)) def", "their entire # command-line existing as a single string (the", "strings that will be concatenated with spaces to form a", "while 1: seen[argv_joined] = None new_argv = self._GetTool(argv, cwd) new_argv_joined", "regex_match\"\"\" self.first_arg_regexes.append((regex, cb)) def RegisterFirstArgumentBasenameMatch(self, text, cb): \"\"\"Call back parameters:", "text: retval = cb(first_arg, argv, cwd) if retval != None:", "(which is # the first 'make' run, i.e., the last", "for (text, cb) in self.first_arg_basename_matches: if basename == text: retval", "first_arg, argv, cwd\"\"\" self.first_arg_matches.append((text, cb)) def RegisterFirstArgumentRegex(self, regex, cb): \"\"\"Call", "!= None: return retval # Get the first argument if", "the command-line is # stored in the instmake log. The", "'.join(new_argv) if new_argv_joined == argv_joined: return new_argv[0] elif seen.has_key(new_argv_joined): return", "import os TOOLNAME_PLUGIN_PREFIX = \"toolname\" class ToolNameManager: \"\"\"ToolName plugins have", "= regex.search(cmdline) if m: retval = cb(argv, cwd, m) if", "single string (the first and only # item in cmdline_args).", "return retval # Nothing matched. Return the default value. return", "RegisterFirstArgumentBasenameMatch(self, text, cb): \"\"\"Call back parameters: basename, first_arg, argv, cwd\"\"\"", "# It's done this way because of the way the", "plugins and use them appropriately. \"\"\" import os TOOLNAME_PLUGIN_PREFIX =", "(text, cb) in self.first_arg_matches: if first_arg == text: retval =", "retval != None: return retval # Nothing matched. Return the", "\"\"\"Call back parameters: basename, first_arg, argv, cwd\"\"\" self.first_arg_basename_matches.append((text, cb)) def", "true argv-item per item. However, # the instmakes that were", "retval for (regex, cb) in self.first_arg_regexes: m = regex.search(first_arg) if", "is an array of strings that will be concatenated with", "m = regex.search(basename) if m: retval = cb(basename, first_arg, argv,", "# Check the first argument for (text, cb) in self.first_arg_matches:", "text, cb): \"\"\"Call back parameters: basename, first_arg, argv, cwd\"\"\" self.first_arg_basename_matches.append((text,", "in self.first_arg_matches: if first_arg == text: retval = cb(first_arg, argv,", "i = 0 while 1: seen[argv_joined] = None new_argv =", "m = regex.search(first_arg) if m: retval = cb(first_arg, argv, cwd,", "a single string (the first and only # item in", "!= None: return retval # Nothing matched. Return the default", "new_argv_joined == argv_joined: return new_argv[0] elif seen.has_key(new_argv_joined): return new_argv[0] else:", "way the command-line is # stored in the instmake log.", "self.command_line_regexes: m = regex.search(cmdline) if m: retval = cb(argv, cwd,", "def RegisterFirstArgumentBasenameRegex(self, regex, cb): \"\"\"Call back parameters: basename, first_arg, argv,", "None: return retval for (regex, cb) in self.first_arg_basename_regexes: m =", "# the instmakes that were called from 'make' have their", "spaces to form a single command-line.\"\"\" # It's done this", "argv # Check the first argument for (text, cb) in", "to register with this manager the circumstances under which they", "def RegisterFirstArgumentMatch(self, text, cb): \"\"\"Call back parameters: first_arg, argv, cwd\"\"\"", "the instmake log. The top-most process (which is # the", "2010 by Cisco Systems, Inc. \"\"\" Manage the tool plugins", "the basename of the first arg basename = os.path.basename(first_arg) for", "argv, cwd, m) if retval != None: return retval #", "# command-line existing as a single string (the first and", "only # item in cmdline_args). argv_joined = ' '.join(cmdline_args) argv", "register with this manager the circumstances under which they wish", "\"toolname\" class ToolNameManager: \"\"\"ToolName plugins have to register with this", "Inc. \"\"\" Manage the tool plugins and use them appropriately.", "with one true argv-item per item. However, # the instmakes", "retval != None: return retval for (regex, cb) in self.first_arg_basename_regexes:", "plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX) self.first_arg_matches = [] self.first_arg_basename_matches = [] self.first_arg_regexes= [] self.first_arg_basename_regexes", "this way because of the way the command-line is #", "= 100 i = 0 while 1: seen[argv_joined] = None", "self.first_arg_basename_matches.append((text, cb)) def RegisterFirstArgumentBasenameRegex(self, regex, cb): \"\"\"Call back parameters: basename,", "in self.first_arg_basename_matches: if basename == text: retval = cb(basename, first_arg,", "argv-item per item. However, # the instmakes that were called", "new_argv_joined def _GetTool(self, argv, cwd): cmdline = ' '.join(argv) #", "in toolname_plugins: plugin.register(self) def RegisterFirstArgumentMatch(self, text, cb): \"\"\"Call back parameters:", "_GetTool as many times as necessary to find # a", "plugin.register(self) def RegisterFirstArgumentMatch(self, text, cb): \"\"\"Call back parameters: first_arg, argv,", "for plugin in toolname_plugins: plugin.register(self) def RegisterFirstArgumentMatch(self, text, cb): \"\"\"Call", "first_arg, argv, cwd\"\"\" self.first_arg_basename_matches.append((text, cb)) def RegisterFirstArgumentBasenameRegex(self, regex, cb): \"\"\"Call", "find # a non-changing answer. seen = {} max_iterations =", "def __init__(self, plugins): toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX) self.first_arg_matches = [] self.first_arg_basename_matches", "cb): \"\"\"Call back parameters: basename, first_arg, argv, cw, regex_match\"\"\" self.first_arg_basename_regexes.append((regex,", "return argv # Check the first argument for (text, cb)", "= cb(basename, first_arg, argv, cwd, m) if retval != None:", "tool in this command-line. cmdline_args is an array of strings", "in self.first_arg_basename_regexes: m = regex.search(basename) if m: retval = cb(basename,", "necessary to find # a non-changing answer. seen = {}", "and use them appropriately. \"\"\" import os TOOLNAME_PLUGIN_PREFIX = \"toolname\"", "Check the basename of the first arg basename = os.path.basename(first_arg)", "(the first and only # item in cmdline_args). argv_joined =", "to find # a non-changing answer. seen = {} max_iterations", "cwd\"\"\" self.first_arg_basename_matches.append((text, cb)) def RegisterFirstArgumentBasenameRegex(self, regex, cb): \"\"\"Call back parameters:", "' '.join(cmdline_args) argv = argv_joined.split() # Call _GetTool as many", "were called from 'make' have their entire # command-line existing", "ToolNameManager: \"\"\"ToolName plugins have to register with this manager the", "= plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX) self.first_arg_matches = [] self.first_arg_basename_matches = [] self.first_arg_regexes= []", "= cb(first_arg, argv, cwd) if retval != None: return retval", "(c) 2010 by Cisco Systems, Inc. \"\"\" Manage the tool", "= [] self.first_arg_basename_matches = [] self.first_arg_regexes= [] self.first_arg_basename_regexes = []", "the circumstances under which they wish to be called.\"\"\" def", "string representing the tool in this command-line. cmdline_args is an", "TOOLNAME_PLUGIN_PREFIX = \"toolname\" class ToolNameManager: \"\"\"ToolName plugins have to register", "is # stored in the instmake log. The top-most process", "parameters: basename, first_arg, argv, cwd\"\"\" self.first_arg_basename_matches.append((text, cb)) def RegisterFirstArgumentBasenameRegex(self, regex,", "retval != None: return retval for (regex, cb) in self.first_arg_regexes:", "self.first_arg_basename_regexes = [] self.command_line_regexes = [] for plugin in toolname_plugins:", "i += 1 if i == max_iterations: return new_argv[0] argv", "new_argv[0] elif seen.has_key(new_argv_joined): return new_argv[0] else: i += 1 if", "basename, first_arg, argv, cwd\"\"\" self.first_arg_basename_matches.append((text, cb)) def RegisterFirstArgumentBasenameRegex(self, regex, cb):", "many times as necessary to find # a non-changing answer.", "'.join(argv) # Check the command-line for (regex, cb) in self.command_line_regexes:", "return new_argv[0] elif seen.has_key(new_argv_joined): return new_argv[0] else: i += 1", "cb)) def RegisterFirstArgumentBasenameRegex(self, regex, cb): \"\"\"Call back parameters: basename, first_arg,", "1 if i == max_iterations: return new_argv[0] argv = new_argv", "if m: retval = cb(first_arg, argv, cwd, m) if retval", "= os.path.basename(first_arg) for (text, cb) in self.first_arg_basename_matches: if basename ==", "cb): \"\"\"Call back parameters: first_arg, argv, cwd, regex_match\"\"\" self.first_arg_regexes.append((regex, cb))", "\"\"\"ToolName plugins have to register with this manager the circumstances", "def GetTool(self, cmdline_args, cwd): \"\"\"Returns a single string representing the", "top-most process (which is # the first 'make' run, i.e.,", "argv = argv_joined.split() # Call _GetTool as many times as", "a single string representing the tool in this command-line. cmdline_args", "if m: retval = cb(argv, cwd, m) if retval !=", "cwd): \"\"\"Returns a single string representing the tool in this", "argv_joined: return new_argv[0] elif seen.has_key(new_argv_joined): return new_argv[0] else: i +=", "== text: retval = cb(basename, first_arg, argv, cwd) if retval", "under which they wish to be called.\"\"\" def __init__(self, plugins):", "new_argv argv_joined = new_argv_joined def _GetTool(self, argv, cwd): cmdline =", "that will be concatenated with spaces to form a single", "self.first_arg_basename_matches = [] self.first_arg_regexes= [] self.first_arg_basename_regexes = [] self.command_line_regexes =", "as many times as necessary to find # a non-changing", "regex.search(first_arg) if m: retval = cb(first_arg, argv, cwd, m) if", "RegisterCommandLineRegex(self, regex, cb): \"\"\"Call back parameters: argv, cwd, regex_match\"\"\" self.command_line_regexes.append((regex,", "os TOOLNAME_PLUGIN_PREFIX = \"toolname\" class ToolNameManager: \"\"\"ToolName plugins have to", "self.first_arg_matches.append((text, cb)) def RegisterFirstArgumentRegex(self, regex, cb): \"\"\"Call back parameters: first_arg,", "# Copyright (c) 2010 by Cisco Systems, Inc. \"\"\" Manage", "wish to be called.\"\"\" def __init__(self, plugins): toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX)", "the first argument if len(argv) >= 1: first_arg = argv[0]", "be called.\"\"\" def __init__(self, plugins): toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX) self.first_arg_matches =", "have their entire # command-line existing as a single string", "!= None: return retval for (regex, cb) in self.first_arg_regexes: m", "<filename>instmakelib/instmake_toolnames.py # Copyright (c) 2010 by Cisco Systems, Inc. \"\"\"", "by Cisco Systems, Inc. \"\"\" Manage the tool plugins and", "first_arg == text: retval = cb(first_arg, argv, cwd) if retval", "in cmdline_args). argv_joined = ' '.join(cmdline_args) argv = argv_joined.split() #", "len(argv) >= 1: first_arg = argv[0] else: return argv #", "cb)) def RegisterCommandLineRegex(self, regex, cb): \"\"\"Call back parameters: argv, cwd,", "(regex, cb) in self.first_arg_basename_regexes: m = regex.search(basename) if m: retval", "+= 1 if i == max_iterations: return new_argv[0] argv =", "self.first_arg_basename_regexes.append((regex, cb)) def RegisterCommandLineRegex(self, regex, cb): \"\"\"Call back parameters: argv,", "retval = cb(argv, cwd, m) if retval != None: return", "= argv[0] else: return argv # Check the first argument", "(text, cb) in self.first_arg_basename_matches: if basename == text: retval =", "new_argv[0] else: i += 1 if i == max_iterations: return", "self.first_arg_regexes: m = regex.search(first_arg) if m: retval = cb(first_arg, argv,", "if basename == text: retval = cb(basename, first_arg, argv, cwd)", "self.first_arg_regexes.append((regex, cb)) def RegisterFirstArgumentBasenameMatch(self, text, cb): \"\"\"Call back parameters: basename,", "first 'make' run, i.e., the last record in the instmake", "back parameters: argv, cwd, regex_match\"\"\" self.command_line_regexes.append((regex, cb)) def GetTool(self, cmdline_args,", "The top-most process (which is # the first 'make' run,", "[] self.first_arg_regexes= [] self.first_arg_basename_regexes = [] self.command_line_regexes = [] for", "class ToolNameManager: \"\"\"ToolName plugins have to register with this manager", "= ' '.join(new_argv) if new_argv_joined == argv_joined: return new_argv[0] elif", "in the instmake log) # has a cmdline_args with one", "circumstances under which they wish to be called.\"\"\" def __init__(self,", "argument if len(argv) >= 1: first_arg = argv[0] else: return", "in self.first_arg_regexes: m = regex.search(first_arg) if m: retval = cb(first_arg,", "Get the first argument if len(argv) >= 1: first_arg =", "!= None: return retval # Check the basename of the", "= \"toolname\" class ToolNameManager: \"\"\"ToolName plugins have to register with", "instmake log) # has a cmdline_args with one true argv-item", "regex_match\"\"\" self.command_line_regexes.append((regex, cb)) def GetTool(self, cmdline_args, cwd): \"\"\"Returns a single", "for (text, cb) in self.first_arg_matches: if first_arg == text: retval", "has a cmdline_args with one true argv-item per item. However,", "the first 'make' run, i.e., the last record in the", "the tool plugins and use them appropriately. \"\"\" import os", "to be called.\"\"\" def __init__(self, plugins): toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX) self.first_arg_matches", "first arg basename = os.path.basename(first_arg) for (text, cb) in self.first_arg_basename_matches:", "command-line. cmdline_args is an array of strings that will be", "It's done this way because of the way the command-line", "retval = cb(first_arg, argv, cwd, m) if retval != None:", "instmakes that were called from 'make' have their entire #", "retval # Nothing matched. Return the default value. return argv", "basename = os.path.basename(first_arg) for (text, cb) in self.first_arg_basename_matches: if basename", "RegisterFirstArgumentBasenameRegex(self, regex, cb): \"\"\"Call back parameters: basename, first_arg, argv, cw,", "Call _GetTool as many times as necessary to find #", "if i == max_iterations: return new_argv[0] argv = new_argv argv_joined", "self.first_arg_matches: if first_arg == text: retval = cb(first_arg, argv, cwd)", "back parameters: first_arg, argv, cwd, regex_match\"\"\" self.first_arg_regexes.append((regex, cb)) def RegisterFirstArgumentBasenameMatch(self,", "seen = {} max_iterations = 100 i = 0 while", "cb)) def RegisterFirstArgumentBasenameMatch(self, text, cb): \"\"\"Call back parameters: basename, first_arg,", "cb)) def RegisterFirstArgumentRegex(self, regex, cb): \"\"\"Call back parameters: first_arg, argv,", "log) # has a cmdline_args with one true argv-item per", "argv = new_argv argv_joined = new_argv_joined def _GetTool(self, argv, cwd):", "last record in the instmake log) # has a cmdline_args", "m: retval = cb(argv, cwd, m) if retval != None:", "arg basename = os.path.basename(first_arg) for (text, cb) in self.first_arg_basename_matches: if", "(regex, cb) in self.first_arg_regexes: m = regex.search(first_arg) if m: retval", "= [] self.command_line_regexes = [] for plugin in toolname_plugins: plugin.register(self)", "= [] for plugin in toolname_plugins: plugin.register(self) def RegisterFirstArgumentMatch(self, text,", "return new_argv[0] argv = new_argv argv_joined = new_argv_joined def _GetTool(self,", "log. The top-most process (which is # the first 'make'", "elif seen.has_key(new_argv_joined): return new_argv[0] else: i += 1 if i", "else: i += 1 if i == max_iterations: return new_argv[0]", "# Check the basename of the first arg basename =", "if new_argv_joined == argv_joined: return new_argv[0] elif seen.has_key(new_argv_joined): return new_argv[0]", "as a single string (the first and only # item", "cb) in self.first_arg_matches: if first_arg == text: retval = cb(first_arg,", "os.path.basename(first_arg) for (text, cb) in self.first_arg_basename_matches: if basename == text:", "max_iterations = 100 i = 0 while 1: seen[argv_joined] =", "process (which is # the first 'make' run, i.e., the", "# Get the first argument if len(argv) >= 1: first_arg", "retval = cb(first_arg, argv, cwd) if retval != None: return", "def RegisterFirstArgumentRegex(self, regex, cb): \"\"\"Call back parameters: first_arg, argv, cwd,", "\"\"\" Manage the tool plugins and use them appropriately. \"\"\"", "None: return retval # Get the first argument if len(argv)", "for (regex, cb) in self.first_arg_basename_regexes: m = regex.search(basename) if m:", "because of the way the command-line is # stored in", "and only # item in cmdline_args). argv_joined = ' '.join(cmdline_args)", "# a non-changing answer. seen = {} max_iterations = 100", "cb)) def GetTool(self, cmdline_args, cwd): \"\"\"Returns a single string representing", "manager the circumstances under which they wish to be called.\"\"\"", "cb(first_arg, argv, cwd, m) if retval != None: return retval", "if m: retval = cb(basename, first_arg, argv, cwd, m) if", "None: return retval # Check the basename of the first", "retval # Get the first argument if len(argv) >= 1:", "cw, regex_match\"\"\" self.first_arg_basename_regexes.append((regex, cb)) def RegisterCommandLineRegex(self, regex, cb): \"\"\"Call back", "appropriately. \"\"\" import os TOOLNAME_PLUGIN_PREFIX = \"toolname\" class ToolNameManager: \"\"\"ToolName", "Systems, Inc. \"\"\" Manage the tool plugins and use them", "called.\"\"\" def __init__(self, plugins): toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX) self.first_arg_matches = []", "[] self.first_arg_basename_regexes = [] self.command_line_regexes = [] for plugin in", "if retval != None: return retval # Check the basename", "return retval # Check the basename of the first arg", "regex, cb): \"\"\"Call back parameters: argv, cwd, regex_match\"\"\" self.command_line_regexes.append((regex, cb))", "\"\"\"Call back parameters: first_arg, argv, cwd\"\"\" self.first_arg_matches.append((text, cb)) def RegisterFirstArgumentRegex(self,", "the command-line for (regex, cb) in self.command_line_regexes: m = regex.search(cmdline)", "cmdline_args, cwd): \"\"\"Returns a single string representing the tool in", "RegisterFirstArgumentMatch(self, text, cb): \"\"\"Call back parameters: first_arg, argv, cwd\"\"\" self.first_arg_matches.append((text,", "1: seen[argv_joined] = None new_argv = self._GetTool(argv, cwd) new_argv_joined =", "self.command_line_regexes.append((regex, cb)) def GetTool(self, cmdline_args, cwd): \"\"\"Returns a single string", "new_argv = self._GetTool(argv, cwd) new_argv_joined = ' '.join(new_argv) if new_argv_joined", "cwd, m) if retval != None: return retval # Get", "self.first_arg_basename_regexes: m = regex.search(basename) if m: retval = cb(basename, first_arg,", "i == max_iterations: return new_argv[0] argv = new_argv argv_joined =", "this manager the circumstances under which they wish to be", "self._GetTool(argv, cwd) new_argv_joined = ' '.join(new_argv) if new_argv_joined == argv_joined:", "cwd, m) if retval != None: return retval # Check", "a single command-line.\"\"\" # It's done this way because of", "this command-line. cmdline_args is an array of strings that will", "retval # Check the basename of the first arg basename", "first_arg, argv, cwd) if retval != None: return retval for", "parameters: argv, cwd, regex_match\"\"\" self.command_line_regexes.append((regex, cb)) def GetTool(self, cmdline_args, cwd):", "# Call _GetTool as many times as necessary to find", "self.command_line_regexes = [] for plugin in toolname_plugins: plugin.register(self) def RegisterFirstArgumentMatch(self,", "way because of the way the command-line is # stored", "basename of the first arg basename = os.path.basename(first_arg) for (text,", "== argv_joined: return new_argv[0] elif seen.has_key(new_argv_joined): return new_argv[0] else: i", "argv, cw, regex_match\"\"\" self.first_arg_basename_regexes.append((regex, cb)) def RegisterCommandLineRegex(self, regex, cb): \"\"\"Call", "text, cb): \"\"\"Call back parameters: first_arg, argv, cwd\"\"\" self.first_arg_matches.append((text, cb))", "== text: retval = cb(first_arg, argv, cwd) if retval !=", "per item. However, # the instmakes that were called from", "argv, cwd\"\"\" self.first_arg_matches.append((text, cb)) def RegisterFirstArgumentRegex(self, regex, cb): \"\"\"Call back", "text: retval = cb(basename, first_arg, argv, cwd) if retval !=", "cb(first_arg, argv, cwd) if retval != None: return retval for", "argv_joined = new_argv_joined def _GetTool(self, argv, cwd): cmdline = '", "cmdline = ' '.join(argv) # Check the command-line for (regex,", "with spaces to form a single command-line.\"\"\" # It's done", "string (the first and only # item in cmdline_args). argv_joined", "argv_joined.split() # Call _GetTool as many times as necessary to", "\"\"\" import os TOOLNAME_PLUGIN_PREFIX = \"toolname\" class ToolNameManager: \"\"\"ToolName plugins", "single string representing the tool in this command-line. cmdline_args is", "in this command-line. cmdline_args is an array of strings that", "[] for plugin in toolname_plugins: plugin.register(self) def RegisterFirstArgumentMatch(self, text, cb):", "== max_iterations: return new_argv[0] argv = new_argv argv_joined = new_argv_joined", "argv[0] else: return argv # Check the first argument for", "first argument for (text, cb) in self.first_arg_matches: if first_arg ==", "regex_match\"\"\" self.first_arg_basename_regexes.append((regex, cb)) def RegisterCommandLineRegex(self, regex, cb): \"\"\"Call back parameters:", "GetTool(self, cmdline_args, cwd): \"\"\"Returns a single string representing the tool", "in self.command_line_regexes: m = regex.search(cmdline) if m: retval = cb(argv,", "cmdline_args with one true argv-item per item. However, # the", "= regex.search(basename) if m: retval = cb(basename, first_arg, argv, cwd,", "to form a single command-line.\"\"\" # It's done this way", "first_arg = argv[0] else: return argv # Check the first", "an array of strings that will be concatenated with spaces" ]
[ "sending the \"extra\" deposit forward for server_from, server_to in pairs:", "if isinstance(val, str) and val.startswith(\"0x\"): kwargs[key] = to_canonical_address(val) with apiserver.flask_app.app_context():", "balances balances by sending the \"extra\" deposit forward for server_from,", "deposit: TokenAmount, ) -> None: \"\"\"Send transfers of value one", "server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from", "None: pairs = list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]])) for first, second", "from eth_utils import to_canonical_address from flask import url_for from raiden.api.python", "stress_send_and_receive_parallel_transfers( rest_apis, token_address, identifier_generator, deposit ) raiden_network, rest_apis = restart_network_and_apiservers(", "+ [rest_apis[0]])) # deplete the channels in one direction iwait_and_get(", "raiden.constants import RoutingMode from raiden.message_handler import MessageHandler from raiden.network.transport import", "] ) # deplete the channels in the backwards direction", "= request.send().response duration = time.monotonic() - start log.debug(\"PAYMENT RESPONSE\", url=url,", "deposit) with watch_for_unlock_failures(*raiden_network): stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network, rest_apis =", "raiden_network: List[RaidenService], token_network_address: TokenNetworkAddress, deposit: TokenAmount, ) -> None: pairs", "RestartNode, api_servers: List[APIServer], port_generator: Iterator[Port], ) -> Tuple[List[RaidenService], List[APIServer]]: \"\"\"Stop", "# deplete the channels in one direction iwait_and_get( [ gevent.spawn(", "in gevent.iwait(items): item.get() def _url_for(apiserver: APIServer, endpoint: str, **kwargs) ->", "one in parallel\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) forward_transfers", "def _url_for(apiserver: APIServer, endpoint: str, **kwargs) -> str: # url_for()", "= RestAPI(raiden_api) api_server = APIServer( rest_api, config=RestApiConfig(host=Host(\"localhost\"), port=rest_api_port_number) ) #", "from raiden.api.python import RaidenAPI from raiden.api.rest import APIServer, RestAPI from", "def stress_send_serial_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount,", "sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) def stress_send_parallel_transfers( rest_apis:", "server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from in", "the \"extra\" deposit forward for server_from, server_to in pairs: sequential_transfers(", "in api_servers: rest_api.stop() new_network = restart_network(raiden_network, restart_node) new_servers = start_apiserver_for_network(new_network,", "time.monotonic() - start log.debug(\"PAYMENT RESPONSE\", url=url, json=json, response=response, duration=duration) assert", "for app in raiden_network) gevent.joinall(set(wait_network), raise_error=True) new_network = [greenlet.get() for", "next(port_generator)) for app in raiden_network] def restart_app(app: RaidenService, restart_node: RestartNode)", "channels in the backwards direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from,", "item.get() def _url_for(apiserver: APIServer, endpoint: str, **kwargs) -> str: #", "number_of_transfers: int, token_address: TokenAddress, identifier_generator: Iterator[int], ) -> None: for", "itertools import count from typing import Sequence import gevent import", "channels in one direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to,", "number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, ) # reset the balances", "rest_api_port_number: Port) -> APIServer: raiden_api = RaidenAPI(raiden_app) rest_api = RestAPI(raiden_api)", "), services_bundle=app.default_services_bundle, transport=new_transport, raiden_event_handler=hold_handler, message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) restart_node(app) return app", ") -> List[RaidenService]: for app in raiden_network: app.stop() wait_network =", "for key, val in kwargs.items(): if isinstance(val, str) and val.startswith(\"0x\"):", "\"\"\"Send `deposit` transfers of value `1` one at a time,", "channels in the backwards direction for server_to, server_from in pairs:", "raiden_network) gevent.joinall(set(wait_network), raise_error=True) new_network = [greenlet.get() for greenlet in wait_network]", "time, without changing the initial capacity. \"\"\" pairs = list(zip(rest_apis,", "= APIServer( rest_api, config=RestApiConfig(host=Host(\"localhost\"), port=rest_api_port_number) ) # required for url_for", "with watch_for_unlock_failures(*raiden_network): stress_send_and_receive_parallel_transfers( rest_apis, token_address, identifier_generator, deposit ) raiden_network, rest_apis", "balances by sending the \"extra\" deposit forward iwait_and_get( [ gevent.spawn(", "apiserver.rest_api.raiden_api.address def transfer_and_assert( server_from: APIServer, server_to: APIServer, token_address: TokenAddress, identifier:", "server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, ) # reset the", "wait_network = (gevent.spawn(restart_app, app, restart_node) for app in raiden_network) gevent.joinall(set(wait_network),", "deplete the channels in the backwards direction for server_to, server_from", "raiden.raiden_event_handler import RaidenEventHandler from raiden.raiden_service import RaidenService from raiden.settings import", "and get on passed greenlets. This ensures exceptions in the", "url = _url_for( server_from, \"token_target_paymentresource\", token_address=to_checksum_address(token_address), target_address=to_checksum_address(address_from_apiserver(server_to)), ) json =", ") -> List[APIServer]: return [start_apiserver(app, next(port_generator)) for app in raiden_network]", "not None assert response.status_code == HTTPStatus.OK, f\"Payment failed, reason: {response.content}\"", "[2]) @pytest.mark.parametrize(\"deposit\", [2]) @pytest.mark.parametrize(\"reveal_timeout\", [15]) @pytest.mark.parametrize(\"settle_timeout\", [120]) def test_stress( raiden_network:", "token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network, rest_apis", "raiden.settings import RestApiConfig from raiden.tests.integration.api.utils import wait_for_listening_port from raiden.tests.integration.fixtures.raiden_network import", "is not None assert response.status_code == HTTPStatus.OK, f\"Payment failed, reason:", "raiden.utils.typing import ( Address, BlockNumber, Host, Iterator, List, Port, TokenAddress,", "have to convert here for key, val in kwargs.items(): if", "in pairs: wait_assert( assert_synced_channel_state, token_network_address, first, deposit, [], second, deposit,", "assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network,", "restart_network( raiden_network: List[RaidenService], restart_node: RestartNode ) -> List[RaidenService]: for app", "direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2,", "at a time, without changing the initial capacity. \"\"\" pairs", "gevent.iwait(items): item.get() def _url_for(apiserver: APIServer, endpoint: str, **kwargs) -> str:", "+ backwards_transfers) def assert_channels( raiden_network: List[RaidenService], token_network_address: TokenNetworkAddress, deposit: TokenAmount,", "] ) def stress_send_and_receive_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int],", "watch_for_unlock_failures(*raiden_network): stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network,", "deplete the channels in the backwards direction iwait_and_get( [ gevent.spawn(", ") for server_to, server_from in pairs ] ) # reset", "from raiden.constants import RoutingMode from raiden.message_handler import MessageHandler from raiden.network.transport", "app in raiden_network] def restart_app(app: RaidenService, restart_node: RestartNode) -> RaidenService:", "_url_for(apiserver: APIServer, endpoint: str, **kwargs) -> str: # url_for() expects", "[120]) def test_stress( raiden_network: List[RaidenService], restart_node: RestartNode, deposit: TokenAmount, token_addresses:", "raiden_network[0].default_registry.address, token_address, ) assert token_network_address for _ in range(2): assert_channels(raiden_network,", "raiden.utils.formatting import to_checksum_address from raiden.utils.typing import ( Address, BlockNumber, Host,", "restart_node: RestartNode ) -> List[RaidenService]: for app in raiden_network: app.stop()", "raiden.transfer import views from raiden.ui.startup import RaidenBundle from raiden.utils.formatting import", ") # deplete the channels in the backwards direction for", "greenlet in wait_network] return new_network def restart_network_and_apiservers( raiden_network: List[RaidenService], restart_node:", "def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None: \"\"\"Iteratively wait and get on", "This ensures exceptions in the greenlets are re-raised as soon", "port_generator ) assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_parallel_transfers(rest_apis, token_address, identifier_generator,", "reset the balances balances by sending the \"extra\" deposit forward", "= list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]])) for first, second in pairs:", "token_network_address, first, deposit, [], second, deposit, [], ) @pytest.mark.skip(reason=\"flaky, see", "test_stress( raiden_network: List[RaidenService], restart_node: RestartNode, deposit: TokenAmount, token_addresses: List[TokenAddress], port_generator:", "restart_node: RestartNode) -> RaidenService: new_transport = MatrixTransport( config=app.config.transport, environment=app.config.environment_type )", "count from typing import Sequence import gevent import grequests import", "log.debug(\"PAYMENT REQUEST\", url=url, json=json) request = grequests.post(url, json=json) start =", "assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network,", "Iterator[Port], ) -> None: token_address = token_addresses[0] rest_apis = start_apiserver_for_network(raiden_network,", "port_generator ) assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_and_receive_parallel_transfers( rest_apis, token_address,", ") -> None: \"\"\"Send transfers of value one in parallel\"\"\"", "in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator,", "parallel, without changing the initial capacity.\"\"\" pairs = list(zip(rest_apis, rest_apis[1:]", "= [greenlet.get() for greenlet in wait_network] return new_network def restart_network_and_apiservers(", "in one direction for server_from, server_to in pairs: sequential_transfers( server_from=server_from,", ") for server_to, server_from in pairs ] iwait_and_get(forward_transfers + backwards_transfers)", "= RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler) app = RaidenService( config=app.config, rpc_client=app.rpc_client,", "pairs ] backwards_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit,", "rest_apis = start_apiserver_for_network(raiden_network, port_generator) identifier_generator = count(start=1) token_network_address = views.get_token_network_address_by_token_address(", "server_to, server_from in pairs ] iwait_and_get(forward_transfers + backwards_transfers) def assert_channels(", "from http import HTTPStatus from itertools import count from typing", "identifier: int, amount: TokenAmount, ) -> None: url = _url_for(", "for server_to, server_from in pairs ] iwait_and_get(forward_transfers + backwards_transfers) def", "raiden.tests.integration.api.utils import wait_for_listening_port from raiden.tests.integration.fixtures.raiden_network import RestartNode from raiden.tests.utils.detect_failure import", "https://github.com/raiden-network/raiden/issues/4803\") @raise_on_failure @pytest.mark.parametrize(\"number_of_nodes\", [3]) @pytest.mark.parametrize(\"number_of_tokens\", [1]) @pytest.mark.parametrize(\"channels_per_node\", [2]) @pytest.mark.parametrize(\"deposit\", [2])", "Host, Iterator, List, Port, TokenAddress, TokenAmount, TokenNetworkAddress, Tuple, ) log", "TokenAmount, ) -> None: \"\"\"Send `deposit` transfers in parallel, without", "RaidenEventHandler from raiden.raiden_service import RaidenService from raiden.settings import RestApiConfig from", "None assert response.status_code == HTTPStatus.OK, f\"Payment failed, reason: {response.content}\" assert", "json=json) request = grequests.post(url, json=json) start = time.monotonic() response =", "from raiden.tests.integration.api.utils import wait_for_listening_port from raiden.tests.integration.fixtures.raiden_network import RestartNode from raiden.tests.utils.detect_failure", ") -> None: for _ in range(number_of_transfers): transfer_and_assert( server_from=server_from, server_to=server_to,", "\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) # deplete the", "def address_from_apiserver(apiserver: APIServer) -> Address: return apiserver.rest_api.raiden_api.address def transfer_and_assert( server_from:", "rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, ) ->", "sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) # deplete the", "changing the initial capacity. \"\"\" pairs = list(zip(rest_apis, rest_apis[1:] +", ") log = structlog.get_logger(__name__) def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None: \"\"\"Iteratively", "pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) def stress_send_parallel_transfers(", "[], ) @pytest.mark.skip(reason=\"flaky, see https://github.com/raiden-network/raiden/issues/4803\") @raise_on_failure @pytest.mark.parametrize(\"number_of_nodes\", [3]) @pytest.mark.parametrize(\"number_of_tokens\", [1])", "_ in range(2): assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_serial_transfers(rest_apis, token_address,", "without changing the initial capacity.\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] +", "# url_for() expects binary address so we have to convert", "raise_error=True) new_network = [greenlet.get() for greenlet in wait_network] return new_network", ") @pytest.mark.skip(reason=\"flaky, see https://github.com/raiden-network/raiden/issues/4803\") @raise_on_failure @pytest.mark.parametrize(\"number_of_nodes\", [3]) @pytest.mark.parametrize(\"number_of_tokens\", [1]) @pytest.mark.parametrize(\"channels_per_node\",", "# deplete the channels in the backwards direction for server_to,", "the channels in one direction for server_from, server_to in pairs:", "import wait_for_listening_port from raiden.tests.integration.fixtures.raiden_network import RestartNode from raiden.tests.utils.detect_failure import raise_on_failure", "request.send().response duration = time.monotonic() - start log.debug(\"PAYMENT RESPONSE\", url=url, json=json,", "`1` one at a time, without changing the initial capacity.", "port=rest_api_port_number) ) # required for url_for api_server.flask_app.config[\"SERVER_NAME\"] = f\"localhost:{rest_api_port_number}\" api_server.start()", "restart_node: RestartNode, api_servers: List[APIServer], port_generator: Iterator[Port], ) -> Tuple[List[RaidenService], List[APIServer]]:", "the backwards direction for server_to, server_from in pairs: sequential_transfers( server_from=server_from,", "new_transport = MatrixTransport( config=app.config.transport, environment=app.config.environment_type ) raiden_event_handler = RaidenEventHandler() hold_handler", "changing the initial capacity.\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))", "@pytest.mark.parametrize(\"channels_per_node\", [2]) @pytest.mark.parametrize(\"deposit\", [2]) @pytest.mark.parametrize(\"reveal_timeout\", [15]) @pytest.mark.parametrize(\"settle_timeout\", [120]) def test_stress(", "[rest_apis[0]])) # deplete the channels in one direction iwait_and_get( [", "app, restart_node) for app in raiden_network) gevent.joinall(set(wait_network), raise_error=True) new_network =", "TokenAmount, ) -> None: url = _url_for( server_from, \"token_target_paymentresource\", token_address=to_checksum_address(token_address),", "= token_addresses[0] rest_apis = start_apiserver_for_network(raiden_network, port_generator) identifier_generator = count(start=1) token_network_address", "Tuple[List[RaidenService], List[APIServer]]: \"\"\"Stop an app and start it back\"\"\" for", "rest_apis, token_address, identifier_generator, deposit ) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network,", "import RaidenBundle from raiden.utils.formatting import to_checksum_address from raiden.utils.typing import (", "the channels in one direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from,", "RaidenAPI(raiden_app) rest_api = RestAPI(raiden_api) api_server = APIServer( rest_api, config=RestApiConfig(host=Host(\"localhost\"), port=rest_api_port_number)", "raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node, rest_apis, port_generator ) restart_network(raiden_network,", "HoldRaidenEventHandler from raiden.tests.utils.transfer import ( assert_synced_channel_state, wait_assert, watch_for_unlock_failures, ) from", "APIServer, server_to: APIServer, number_of_transfers: int, token_address: TokenAddress, identifier_generator: Iterator[int], )", "List[RaidenService], token_network_address: TokenNetworkAddress, deposit: TokenAmount, ) -> None: pairs =", "= {\"amount\": amount, \"identifier\": identifier} log.debug(\"PAYMENT REQUEST\", url=url, json=json) request", "restart_network(raiden_network, restart_node) new_servers = start_apiserver_for_network(new_network, port_generator) return (new_network, new_servers) def", "identifier} log.debug(\"PAYMENT REQUEST\", url=url, json=json) request = grequests.post(url, json=json) start", "server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) # deplete the channels in", "server_from: APIServer, server_to: APIServer, number_of_transfers: int, token_address: TokenAddress, identifier_generator: Iterator[int],", "{response.content}\" assert response.headers[\"Content-Type\"] == \"application/json\" def sequential_transfers( server_from: APIServer, server_to:", "token_addresses: List[TokenAddress], port_generator: Iterator[Port], ) -> None: token_address = token_addresses[0]", "parallel\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) forward_transfers = [", "new_network = [greenlet.get() for greenlet in wait_network] return new_network def", "url=url, json=json) request = grequests.post(url, json=json) start = time.monotonic() response", "amount=TokenAmount(1), ) def stress_send_serial_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int],", "identifier_generator=identifier_generator, ) # reset the balances balances by sending the", "raiden.tests.integration.fixtures.raiden_network import RestartNode from raiden.tests.utils.detect_failure import raise_on_failure from raiden.tests.utils.protocol import", "raiden.raiden_service import RaidenService from raiden.settings import RestApiConfig from raiden.tests.integration.api.utils import", "restart_network_and_apiservers( raiden_network: List[RaidenService], restart_node: RestartNode, api_servers: List[APIServer], port_generator: Iterator[Port], )", "return new_network def restart_network_and_apiservers( raiden_network: List[RaidenService], restart_node: RestartNode, api_servers: List[APIServer],", "initial capacity. \"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) #", "@raise_on_failure @pytest.mark.parametrize(\"number_of_nodes\", [3]) @pytest.mark.parametrize(\"number_of_tokens\", [1]) @pytest.mark.parametrize(\"channels_per_node\", [2]) @pytest.mark.parametrize(\"deposit\", [2]) @pytest.mark.parametrize(\"reveal_timeout\",", "range(2): assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit)", "MatrixTransport from raiden.raiden_event_handler import RaidenEventHandler from raiden.raiden_service import RaidenService from", "\"\"\"Send `deposit` transfers in parallel, without changing the initial capacity.\"\"\"", "List[RaidenService], restart_node: RestartNode ) -> List[RaidenService]: for app in raiden_network:", "= (gevent.spawn(restart_app, app, restart_node) for app in raiden_network) gevent.joinall(set(wait_network), raise_error=True)", "identifier_generator: Iterator[int], ) -> None: for _ in range(number_of_transfers): transfer_and_assert(", "List, Port, TokenAddress, TokenAmount, TokenNetworkAddress, Tuple, ) log = structlog.get_logger(__name__)", "the channels in the backwards direction for server_to, server_from in", "one at a time, without changing the initial capacity. \"\"\"", "number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from in", "sequential_transfers( server_from: APIServer, server_to: APIServer, number_of_transfers: int, token_address: TokenAddress, identifier_generator:", "token_address=token_address, identifier_generator=identifier_generator, ) for server_from, server_to in pairs ] )", "] backwards_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address,", "on passed greenlets. This ensures exceptions in the greenlets are", "server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from in pairs", "HTTPStatus.OK, f\"Payment failed, reason: {response.content}\" assert response.headers[\"Content-Type\"] == \"application/json\" def", "rest_apis = restart_network_and_apiservers( raiden_network, restart_node, rest_apis, port_generator ) assert_channels(raiden_network, token_network_address,", "raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node, rest_apis, port_generator ) assert_channels(raiden_network,", "of value `1` one at a time, without changing the", "message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) restart_node(app) return app def restart_network( raiden_network: List[RaidenService],", "deplete the channels in one direction iwait_and_get( [ gevent.spawn( sequential_transfers,", "in pairs ] ) # deplete the channels in the", "gevent import grequests import pytest import structlog from eth_utils import", "url=url, json=json, response=response, duration=duration) assert getattr(request, \"exception\", None) is None", "for app in raiden_network] def restart_app(app: RaidenService, restart_node: RestartNode) ->", "List[RaidenService], restart_node: RestartNode, deposit: TokenAmount, token_addresses: List[TokenAddress], port_generator: Iterator[Port], )", "[rest_apis[0]])) # deplete the channels in one direction for server_from,", "+ [rest_apis[0]])) forward_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit,", "from raiden.tests.utils.transfer import ( assert_synced_channel_state, wait_assert, watch_for_unlock_failures, ) from raiden.transfer", ") def stress_send_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit:", "= grequests.post(url, json=json) start = time.monotonic() response = request.send().response duration", "= count(start=1) token_network_address = views.get_token_network_address_by_token_address( views.state_from_raiden(raiden_network[0]), raiden_network[0].default_registry.address, token_address, ) assert", "assert_synced_channel_state, token_network_address, first, deposit, [], second, deposit, [], ) @pytest.mark.skip(reason=\"flaky,", "server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) # deplete the channels", "iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address,", "RestartNode ) -> List[RaidenService]: for app in raiden_network: app.stop() wait_network", "for rest_api in api_servers: rest_api.stop() new_network = restart_network(raiden_network, restart_node) new_servers", "def stress_send_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount,", "routing_mode=RoutingMode.PRIVATE, ) restart_node(app) return app def restart_network( raiden_network: List[RaidenService], restart_node:", "are re-raised as soon as possible. \"\"\" for item in", "f\"localhost:{rest_api_port_number}\" api_server.start() wait_for_listening_port(rest_api_port_number) return api_server def start_apiserver_for_network( raiden_network: List[RaidenService], port_generator:", "stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node,", "import ( assert_synced_channel_state, wait_assert, watch_for_unlock_failures, ) from raiden.transfer import views", "APIServer, server_to: APIServer, token_address: TokenAddress, identifier: int, amount: TokenAmount, )", "-> None: token_address = token_addresses[0] rest_apis = start_apiserver_for_network(raiden_network, port_generator) identifier_generator", "for url_for api_server.flask_app.config[\"SERVER_NAME\"] = f\"localhost:{rest_api_port_number}\" api_server.start() wait_for_listening_port(rest_api_port_number) return api_server def", "== \"application/json\" def sequential_transfers( server_from: APIServer, server_to: APIServer, number_of_transfers: int,", "@pytest.mark.parametrize(\"reveal_timeout\", [15]) @pytest.mark.parametrize(\"settle_timeout\", [120]) def test_stress( raiden_network: List[RaidenService], restart_node: RestartNode,", ") assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit)", "server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, ) # reset", "raiden_bundle=RaidenBundle( app.default_registry, app.default_secret_registry, ), services_bundle=app.default_services_bundle, transport=new_transport, raiden_event_handler=hold_handler, message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, )", "sending the \"extra\" deposit forward iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from,", "api_servers: List[APIServer], port_generator: Iterator[Port], ) -> Tuple[List[RaidenService], List[APIServer]]: \"\"\"Stop an", "watch_for_unlock_failures, ) from raiden.transfer import views from raiden.ui.startup import RaidenBundle", "identifier_generator=identifier_generator, ) for server_to, server_from in pairs ] iwait_and_get(forward_transfers +", "sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, ) for", "token_address=token_address, identifier=next(identifier_generator), amount=TokenAmount(1), ) def stress_send_serial_transfers( rest_apis: List[APIServer], token_address: TokenAddress,", "start_apiserver_for_network( raiden_network: List[RaidenService], port_generator: Iterator[Port] ) -> List[APIServer]: return [start_apiserver(app,", "server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_from, server_to in pairs", ") raiden_event_handler = RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler) app = RaidenService(", "to_canonical_address from flask import url_for from raiden.api.python import RaidenAPI from", "token_address, ) assert token_network_address for _ in range(2): assert_channels(raiden_network, token_network_address,", "RaidenService, restart_node: RestartNode) -> RaidenService: new_transport = MatrixTransport( config=app.config.transport, environment=app.config.environment_type", "back\"\"\" for rest_api in api_servers: rest_api.stop() new_network = restart_network(raiden_network, restart_node)", "APIServer) -> Address: return apiserver.rest_api.raiden_api.address def transfer_and_assert( server_from: APIServer, server_to:", "time.monotonic() response = request.send().response duration = time.monotonic() - start log.debug(\"PAYMENT", "exceptions in the greenlets are re-raised as soon as possible.", "second in pairs: wait_assert( assert_synced_channel_state, token_network_address, first, deposit, [], second,", "rest_apis = restart_network_and_apiservers( raiden_network, restart_node, rest_apis, port_generator ) restart_network(raiden_network, restart_node)", "-> None: \"\"\"Iteratively wait and get on passed greenlets. This", "one direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address,", "-> None: pairs = list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]])) for first,", "server_to: APIServer, number_of_transfers: int, token_address: TokenAddress, identifier_generator: Iterator[int], ) ->", "\"\"\"Stop an app and start it back\"\"\" for rest_api in", "MessageHandler from raiden.network.transport import MatrixTransport from raiden.raiden_event_handler import RaidenEventHandler from", "import to_canonical_address from flask import url_for from raiden.api.python import RaidenAPI", "Address, BlockNumber, Host, Iterator, List, Port, TokenAddress, TokenAmount, TokenNetworkAddress, Tuple,", "raiden_network: app.stop() wait_network = (gevent.spawn(restart_app, app, restart_node) for app in", "import RestApiConfig from raiden.tests.integration.api.utils import wait_for_listening_port from raiden.tests.integration.fixtures.raiden_network import RestartNode", "`deposit` transfers of value `1` one at a time, without", "app and start it back\"\"\" for rest_api in api_servers: rest_api.stop()", "assert response.status_code == HTTPStatus.OK, f\"Payment failed, reason: {response.content}\" assert response.headers[\"Content-Type\"]", "`deposit` transfers in parallel, without changing the initial capacity.\"\"\" pairs", "deposit forward for server_from, server_to in pairs: sequential_transfers( server_from=server_from, server_to=server_to,", "possible. \"\"\" for item in gevent.iwait(items): item.get() def _url_for(apiserver: APIServer,", "token_addresses[0] rest_apis = start_apiserver_for_network(raiden_network, port_generator) identifier_generator = count(start=1) token_network_address =", "-> None: \"\"\"Send `deposit` transfers of value `1` one at", "failed, reason: {response.content}\" assert response.headers[\"Content-Type\"] == \"application/json\" def sequential_transfers( server_from:", "number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_from, server_to in pairs ]", "in pairs ] iwait_and_get(forward_transfers + backwards_transfers) def assert_channels( raiden_network: List[RaidenService],", "str) and val.startswith(\"0x\"): kwargs[key] = to_canonical_address(val) with apiserver.flask_app.app_context(): return url_for(f\"v1_resources.{endpoint}\",", "= f\"localhost:{rest_api_port_number}\" api_server.start() wait_for_listening_port(rest_api_port_number) return api_server def start_apiserver_for_network( raiden_network: List[RaidenService],", "int, token_address: TokenAddress, identifier_generator: Iterator[int], ) -> None: for _", "server_from=server_from, server_to=server_to, token_address=token_address, identifier=next(identifier_generator), amount=TokenAmount(1), ) def stress_send_serial_transfers( rest_apis: List[APIServer],", "TokenNetworkAddress, Tuple, ) log = structlog.get_logger(__name__) def iwait_and_get(items: Sequence[gevent.Greenlet]) ->", "from raiden.api.rest import APIServer, RestAPI from raiden.constants import RoutingMode from", "APIServer: raiden_api = RaidenAPI(raiden_app) rest_api = RestAPI(raiden_api) api_server = APIServer(", "TokenAddress, identifier_generator: Iterator[int], ) -> None: for _ in range(number_of_transfers):", "token_address=to_checksum_address(token_address), target_address=to_checksum_address(address_from_apiserver(server_to)), ) json = {\"amount\": amount, \"identifier\": identifier} log.debug(\"PAYMENT", "wait_network] return new_network def restart_network_and_apiservers( raiden_network: List[RaidenService], restart_node: RestartNode, api_servers:", "{\"amount\": amount, \"identifier\": identifier} log.debug(\"PAYMENT REQUEST\", url=url, json=json) request =", "* 2, token_address=token_address, identifier_generator=identifier_generator, ) # reset the balances balances", "number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from in pairs ]", "[start_apiserver(app, next(port_generator)) for app in raiden_network] def restart_app(app: RaidenService, restart_node:", "identifier=next(identifier_generator), amount=TokenAmount(1), ) def stress_send_serial_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator:", "TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, ) -> None: \"\"\"Send `deposit`", "token_address: TokenAddress, identifier: int, amount: TokenAmount, ) -> None: url", "initial capacity.\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) # deplete", "\"token_target_paymentresource\", token_address=to_checksum_address(token_address), target_address=to_checksum_address(address_from_apiserver(server_to)), ) json = {\"amount\": amount, \"identifier\": identifier}", "deplete the channels in one direction for server_from, server_to in", "raiden_network, restart_node, rest_apis, port_generator ) assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network):", "port_generator: Iterator[Port] ) -> List[APIServer]: return [start_apiserver(app, next(port_generator)) for app", "duration=duration) assert getattr(request, \"exception\", None) is None assert response is", "def sequential_transfers( server_from: APIServer, server_to: APIServer, number_of_transfers: int, token_address: TokenAddress,", "token_network_address for _ in range(2): assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network):", "return apiserver.rest_api.raiden_api.address def transfer_and_assert( server_from: APIServer, server_to: APIServer, token_address: TokenAddress,", "pairs ] ) # reset the balances balances by sending", "import raise_on_failure from raiden.tests.utils.protocol import HoldRaidenEventHandler from raiden.tests.utils.transfer import (", "for first, second in pairs: wait_assert( assert_synced_channel_state, token_network_address, first, deposit,", "server_from, server_to in pairs ] ) # deplete the channels", "for server_from, server_to in pairs ] ) # deplete the", "[], second, deposit, [], ) @pytest.mark.skip(reason=\"flaky, see https://github.com/raiden-network/raiden/issues/4803\") @raise_on_failure @pytest.mark.parametrize(\"number_of_nodes\",", "start log.debug(\"PAYMENT RESPONSE\", url=url, json=json, response=response, duration=duration) assert getattr(request, \"exception\",", "in the backwards direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to,", "[1]) @pytest.mark.parametrize(\"channels_per_node\", [2]) @pytest.mark.parametrize(\"deposit\", [2]) @pytest.mark.parametrize(\"reveal_timeout\", [15]) @pytest.mark.parametrize(\"settle_timeout\", [120]) def", "restart_node) new_servers = start_apiserver_for_network(new_network, port_generator) return (new_network, new_servers) def address_from_apiserver(apiserver:", "str: # url_for() expects binary address so we have to", "TokenAmount, ) -> None: \"\"\"Send `deposit` transfers of value `1`", "APIServer, RestAPI from raiden.constants import RoutingMode from raiden.message_handler import MessageHandler", "server_from, \"token_target_paymentresource\", token_address=to_checksum_address(token_address), target_address=to_checksum_address(address_from_apiserver(server_to)), ) json = {\"amount\": amount, \"identifier\":", "= restart_network_and_apiservers( raiden_network, restart_node, rest_apis, port_generator ) assert_channels(raiden_network, token_network_address, deposit)", "import RestartNode from raiden.tests.utils.detect_failure import raise_on_failure from raiden.tests.utils.protocol import HoldRaidenEventHandler", "in the backwards direction for server_to, server_from in pairs: sequential_transfers(", "in kwargs.items(): if isinstance(val, str) and val.startswith(\"0x\"): kwargs[key] = to_canonical_address(val)", "] ) # reset the balances balances by sending the", ") -> None: pairs = list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]])) for", "in pairs ] backwards_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to,", "token_network_address = views.get_token_network_address_by_token_address( views.state_from_raiden(raiden_network[0]), raiden_network[0].default_registry.address, token_address, ) assert token_network_address for", "= list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) forward_transfers = [ gevent.spawn( sequential_transfers,", "\"\"\" for item in gevent.iwait(items): item.get() def _url_for(apiserver: APIServer, endpoint:", "import MessageHandler from raiden.network.transport import MatrixTransport from raiden.raiden_event_handler import RaidenEventHandler", "app.default_secret_registry, ), services_bundle=app.default_services_bundle, transport=new_transport, raiden_event_handler=hold_handler, message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) restart_node(app) return", "iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, )", "backwards direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit *", "def start_apiserver_for_network( raiden_network: List[RaidenService], port_generator: Iterator[Port] ) -> List[APIServer]: return", "services_bundle=app.default_services_bundle, transport=new_transport, raiden_event_handler=hold_handler, message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) restart_node(app) return app def", "def assert_channels( raiden_network: List[RaidenService], token_network_address: TokenNetworkAddress, deposit: TokenAmount, ) ->", "see https://github.com/raiden-network/raiden/issues/4803\") @raise_on_failure @pytest.mark.parametrize(\"number_of_nodes\", [3]) @pytest.mark.parametrize(\"number_of_tokens\", [1]) @pytest.mark.parametrize(\"channels_per_node\", [2]) @pytest.mark.parametrize(\"deposit\",", "port_generator: Iterator[Port], ) -> Tuple[List[RaidenService], List[APIServer]]: \"\"\"Stop an app and", "in parallel, without changing the initial capacity.\"\"\" pairs = list(zip(rest_apis,", "# deplete the channels in one direction for server_from, server_to", "from raiden.transfer import views from raiden.ui.startup import RaidenBundle from raiden.utils.formatting", "and start it back\"\"\" for rest_api in api_servers: rest_api.stop() new_network", "rest_api in api_servers: rest_api.stop() new_network = restart_network(raiden_network, restart_node) new_servers =", ") raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node, rest_apis, port_generator )", "wait_for_listening_port(rest_api_port_number) return api_server def start_apiserver_for_network( raiden_network: List[RaidenService], port_generator: Iterator[Port] )", "an app and start it back\"\"\" for rest_api in api_servers:", "RaidenService: new_transport = MatrixTransport( config=app.config.transport, environment=app.config.environment_type ) raiden_event_handler = RaidenEventHandler()", ") -> None: token_address = token_addresses[0] rest_apis = start_apiserver_for_network(raiden_network, port_generator)", "environment=app.config.environment_type ) raiden_event_handler = RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler) app =", "raiden.network.transport import MatrixTransport from raiden.raiden_event_handler import RaidenEventHandler from raiden.raiden_service import", "import RaidenAPI from raiden.api.rest import APIServer, RestAPI from raiden.constants import", "token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from in pairs ] iwait_and_get(forward_transfers", "api_server.start() wait_for_listening_port(rest_api_port_number) return api_server def start_apiserver_for_network( raiden_network: List[RaidenService], port_generator: Iterator[Port]", "typing import Sequence import gevent import grequests import pytest import", ") -> None: url = _url_for( server_from, \"token_target_paymentresource\", token_address=to_checksum_address(token_address), target_address=to_checksum_address(address_from_apiserver(server_to)),", "response=response, duration=duration) assert getattr(request, \"exception\", None) is None assert response", "start = time.monotonic() response = request.send().response duration = time.monotonic() -", "return api_server def start_apiserver_for_network( raiden_network: List[RaidenService], port_generator: Iterator[Port] ) ->", "duration = time.monotonic() - start log.debug(\"PAYMENT RESPONSE\", url=url, json=json, response=response,", "return [start_apiserver(app, next(port_generator)) for app in raiden_network] def restart_app(app: RaidenService,", "transfer_and_assert( server_from: APIServer, server_to: APIServer, token_address: TokenAddress, identifier: int, amount:", "capacity. \"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) # deplete", ") for server_from, server_to in pairs ] backwards_transfers = [", "-> List[RaidenService]: for app in raiden_network: app.stop() wait_network = (gevent.spawn(restart_app,", "= views.get_token_network_address_by_token_address( views.state_from_raiden(raiden_network[0]), raiden_network[0].default_registry.address, token_address, ) assert token_network_address for _", "import gevent import grequests import pytest import structlog from eth_utils", "getattr(request, \"exception\", None) is None assert response is not None", "2, token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from in pairs ]", "deposit: TokenAmount, ) -> None: \"\"\"Send `deposit` transfers of value", "raiden.ui.startup import RaidenBundle from raiden.utils.formatting import to_checksum_address from raiden.utils.typing import", "List[APIServer]]: \"\"\"Stop an app and start it back\"\"\" for rest_api", "\"identifier\": identifier} log.debug(\"PAYMENT REQUEST\", url=url, json=json) request = grequests.post(url, json=json)", "for server_from, server_to in pairs ] backwards_transfers = [ gevent.spawn(", "RestartNode, deposit: TokenAmount, token_addresses: List[TokenAddress], port_generator: Iterator[Port], ) -> None:", "= start_apiserver_for_network(raiden_network, port_generator) identifier_generator = count(start=1) token_network_address = views.get_token_network_address_by_token_address( views.state_from_raiden(raiden_network[0]),", "RoutingMode from raiden.message_handler import MessageHandler from raiden.network.transport import MatrixTransport from", "reason: {response.content}\" assert response.headers[\"Content-Type\"] == \"application/json\" def sequential_transfers( server_from: APIServer,", "port_generator) identifier_generator = count(start=1) token_network_address = views.get_token_network_address_by_token_address( views.state_from_raiden(raiden_network[0]), raiden_network[0].default_registry.address, token_address,", "amount, \"identifier\": identifier} log.debug(\"PAYMENT REQUEST\", url=url, json=json) request = grequests.post(url,", "api_server.flask_app.config[\"SERVER_NAME\"] = f\"localhost:{rest_api_port_number}\" api_server.start() wait_for_listening_port(rest_api_port_number) return api_server def start_apiserver_for_network( raiden_network:", "to_checksum_address from raiden.utils.typing import ( Address, BlockNumber, Host, Iterator, List,", "port_generator: Iterator[Port], ) -> None: token_address = token_addresses[0] rest_apis =", "server_from in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address,", "server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, ) for server_to,", "iwait_and_get(items: Sequence[gevent.Greenlet]) -> None: \"\"\"Iteratively wait and get on passed", "raiden_network: List[RaidenService], restart_node: RestartNode ) -> List[RaidenService]: for app in", "restart_node(app) return app def restart_network( raiden_network: List[RaidenService], restart_node: RestartNode )", "raiden.api.rest import APIServer, RestAPI from raiden.constants import RoutingMode from raiden.message_handler", "grequests.post(url, json=json) start = time.monotonic() response = request.send().response duration =", "* 2, token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from in pairs", "sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, ) #", "so we have to convert here for key, val in", "@pytest.mark.parametrize(\"number_of_tokens\", [1]) @pytest.mark.parametrize(\"channels_per_node\", [2]) @pytest.mark.parametrize(\"deposit\", [2]) @pytest.mark.parametrize(\"reveal_timeout\", [15]) @pytest.mark.parametrize(\"settle_timeout\", [120])", "token_address, identifier_generator, deposit ) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node,", "RESPONSE\", url=url, json=json, response=response, duration=duration) assert getattr(request, \"exception\", None) is", "= RaidenService( config=app.config, rpc_client=app.rpc_client, proxy_manager=app.proxy_manager, query_start_block=BlockNumber(0), raiden_bundle=RaidenBundle( app.default_registry, app.default_secret_registry, ),", "raiden_network: List[RaidenService], restart_node: RestartNode, deposit: TokenAmount, token_addresses: List[TokenAddress], port_generator: Iterator[Port],", "APIServer, number_of_transfers: int, token_address: TokenAddress, identifier_generator: Iterator[int], ) -> None:", "Iterator[int], deposit: TokenAmount, ) -> None: \"\"\"Send `deposit` transfers in", "token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_and_receive_parallel_transfers( rest_apis, token_address, identifier_generator, deposit )", "token_address: TokenAddress, identifier_generator: Iterator[int], ) -> None: for _ in", "app def restart_network( raiden_network: List[RaidenService], restart_node: RestartNode ) -> List[RaidenService]:", "port_generator) return (new_network, new_servers) def address_from_apiserver(apiserver: APIServer) -> Address: return", "one direction for server_from, server_to in pairs: sequential_transfers( server_from=server_from, server_to=server_to,", "app.default_registry, app.default_secret_registry, ), services_bundle=app.default_services_bundle, transport=new_transport, raiden_event_handler=hold_handler, message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) restart_node(app)", "int, amount: TokenAmount, ) -> None: url = _url_for( server_from,", "number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) def stress_send_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress,", "list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) forward_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from,", "@pytest.mark.parametrize(\"deposit\", [2]) @pytest.mark.parametrize(\"reveal_timeout\", [15]) @pytest.mark.parametrize(\"settle_timeout\", [120]) def test_stress( raiden_network: List[RaidenService],", "transfers of value one in parallel\"\"\" pairs = list(zip(rest_apis, rest_apis[1:]", "gevent.joinall(set(wait_network), raise_error=True) new_network = [greenlet.get() for greenlet in wait_network] return", "second, deposit, [], ) @pytest.mark.skip(reason=\"flaky, see https://github.com/raiden-network/raiden/issues/4803\") @raise_on_failure @pytest.mark.parametrize(\"number_of_nodes\", [3])", ") restart_node(app) return app def restart_network( raiden_network: List[RaidenService], restart_node: RestartNode", "stress_send_serial_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, )", "identifier_generator, deposit) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node, rest_apis, port_generator", "import RaidenEventHandler from raiden.raiden_service import RaidenService from raiden.settings import RestApiConfig", "from raiden.tests.utils.protocol import HoldRaidenEventHandler from raiden.tests.utils.transfer import ( assert_synced_channel_state, wait_assert,", "token_address, identifier_generator, deposit) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node, rest_apis,", "log = structlog.get_logger(__name__) def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None: \"\"\"Iteratively wait", "[ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator,", "gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, )", "in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) def", "\"extra\" deposit forward iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit,", "= list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) # deplete the channels in", "server_to=server_to, token_address=token_address, identifier=next(identifier_generator), amount=TokenAmount(1), ) def stress_send_serial_transfers( rest_apis: List[APIServer], token_address:", "MatrixTransport( config=app.config.transport, environment=app.config.environment_type ) raiden_event_handler = RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler)", "server_from, server_to in pairs ] ) def stress_send_and_receive_parallel_transfers( rest_apis: List[APIServer],", "identifier_generator=identifier_generator, ) # deplete the channels in the backwards direction", "[2]) @pytest.mark.parametrize(\"reveal_timeout\", [15]) @pytest.mark.parametrize(\"settle_timeout\", [120]) def test_stress( raiden_network: List[RaidenService], restart_node:", "raiden_network: List[RaidenService], port_generator: Iterator[Port] ) -> List[APIServer]: return [start_apiserver(app, next(port_generator))", "ensures exceptions in the greenlets are re-raised as soon as", "from raiden.raiden_service import RaidenService from raiden.settings import RestApiConfig from raiden.tests.integration.api.utils", "identifier_generator=identifier_generator, ) for server_to, server_from in pairs ] ) #", "deposit ) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node, rest_apis, port_generator", "import pytest import structlog from eth_utils import to_canonical_address from flask", "pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) forward_transfers = [ gevent.spawn(", "wait_for_listening_port from raiden.tests.integration.fixtures.raiden_network import RestartNode from raiden.tests.utils.detect_failure import raise_on_failure from", "Iterator, List, Port, TokenAddress, TokenAmount, TokenNetworkAddress, Tuple, ) log =", "number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) # deplete the channels in the", "deposit) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node, rest_apis, port_generator )", "**kwargs) def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) -> APIServer: raiden_api =", "we have to convert here for key, val in kwargs.items():", "import RaidenService from raiden.settings import RestApiConfig from raiden.tests.integration.api.utils import wait_for_listening_port", "app in raiden_network: app.stop() wait_network = (gevent.spawn(restart_app, app, restart_node) for", "response is not None assert response.status_code == HTTPStatus.OK, f\"Payment failed,", "binary address so we have to convert here for key,", "def restart_network_and_apiservers( raiden_network: List[RaidenService], restart_node: RestartNode, api_servers: List[APIServer], port_generator: Iterator[Port],", "= [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, )", "deposit) with watch_for_unlock_failures(*raiden_network): stress_send_and_receive_parallel_transfers( rest_apis, token_address, identifier_generator, deposit ) raiden_network,", "new_network def restart_network_and_apiservers( raiden_network: List[RaidenService], restart_node: RestartNode, api_servers: List[APIServer], port_generator:", "hold_handler = HoldRaidenEventHandler(raiden_event_handler) app = RaidenService( config=app.config, rpc_client=app.rpc_client, proxy_manager=app.proxy_manager, query_start_block=BlockNumber(0),", "kwargs[key] = to_canonical_address(val) with apiserver.flask_app.app_context(): return url_for(f\"v1_resources.{endpoint}\", **kwargs) def start_apiserver(raiden_app:", "TokenAmount, token_addresses: List[TokenAddress], port_generator: Iterator[Port], ) -> None: token_address =", "wait_assert, watch_for_unlock_failures, ) from raiden.transfer import views from raiden.ui.startup import", "in raiden_network) gevent.joinall(set(wait_network), raise_error=True) new_network = [greenlet.get() for greenlet in", "List[RaidenService], port_generator: Iterator[Port] ) -> List[APIServer]: return [start_apiserver(app, next(port_generator)) for", "address_from_apiserver(apiserver: APIServer) -> Address: return apiserver.rest_api.raiden_api.address def transfer_and_assert( server_from: APIServer,", "server_to: APIServer, token_address: TokenAddress, identifier: int, amount: TokenAmount, ) ->", "stress_send_and_receive_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, )", "token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network, rest_apis", ") assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_and_receive_parallel_transfers( rest_apis, token_address, identifier_generator,", "restart_node: RestartNode, deposit: TokenAmount, token_addresses: List[TokenAddress], port_generator: Iterator[Port], ) ->", "in the greenlets are re-raised as soon as possible. \"\"\"", "the balances balances by sending the \"extra\" deposit forward iwait_and_get(", "json=json) start = time.monotonic() response = request.send().response duration = time.monotonic()", "Iterator[int], ) -> None: for _ in range(number_of_transfers): transfer_and_assert( server_from=server_from,", "pairs ] ) def stress_send_and_receive_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator:", "isinstance(val, str) and val.startswith(\"0x\"): kwargs[key] = to_canonical_address(val) with apiserver.flask_app.app_context(): return", "def restart_app(app: RaidenService, restart_node: RestartNode) -> RaidenService: new_transport = MatrixTransport(", "Port, TokenAddress, TokenAmount, TokenNetworkAddress, Tuple, ) log = structlog.get_logger(__name__) def", "Iterator[Port], ) -> Tuple[List[RaidenService], List[APIServer]]: \"\"\"Stop an app and start", "forward for server_from, server_to in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit,", "stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node,", "rest_apis, port_generator ) assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_and_receive_parallel_transfers( rest_apis,", "# reset the balances balances by sending the \"extra\" deposit", "# required for url_for api_server.flask_app.config[\"SERVER_NAME\"] = f\"localhost:{rest_api_port_number}\" api_server.start() wait_for_listening_port(rest_api_port_number) return", "List[RaidenService], restart_node: RestartNode, api_servers: List[APIServer], port_generator: Iterator[Port], ) -> Tuple[List[RaidenService],", "def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) -> APIServer: raiden_api = RaidenAPI(raiden_app)", "watch_for_unlock_failures(*raiden_network): stress_send_and_receive_parallel_transfers( rest_apis, token_address, identifier_generator, deposit ) raiden_network, rest_apis =", "server_from, server_to in pairs ] backwards_transfers = [ gevent.spawn( sequential_transfers,", "transport=new_transport, raiden_event_handler=hold_handler, message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) restart_node(app) return app def restart_network(", "TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, ) -> None: \"\"\"Send transfers", "- start log.debug(\"PAYMENT RESPONSE\", url=url, json=json, response=response, duration=duration) assert getattr(request,", "kwargs.items(): if isinstance(val, str) and val.startswith(\"0x\"): kwargs[key] = to_canonical_address(val) with", "list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) # deplete the channels in one", "from typing import Sequence import gevent import grequests import pytest", "def transfer_and_assert( server_from: APIServer, server_to: APIServer, token_address: TokenAddress, identifier: int,", "of value one in parallel\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] +", "rest_apis, port_generator ) assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_parallel_transfers(rest_apis, token_address,", "with watch_for_unlock_failures(*raiden_network): stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network, rest_apis = restart_network_and_apiservers(", "== HTTPStatus.OK, f\"Payment failed, reason: {response.content}\" assert response.headers[\"Content-Type\"] == \"application/json\"", "[rest_apis[0]])) forward_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address,", "token_address=token_address, identifier_generator=identifier_generator, ) for server_from, server_to in pairs ] backwards_transfers", "List[TokenAddress], port_generator: Iterator[Port], ) -> None: token_address = token_addresses[0] rest_apis", "import grequests import pytest import structlog from eth_utils import to_canonical_address", "forward_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator,", "start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) -> APIServer: raiden_api = RaidenAPI(raiden_app) rest_api", "def restart_network( raiden_network: List[RaidenService], restart_node: RestartNode ) -> List[RaidenService]: for", "server_to, server_from in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2,", "-> None: \"\"\"Send transfers of value one in parallel\"\"\" pairs", "amount: TokenAmount, ) -> None: url = _url_for( server_from, \"token_target_paymentresource\",", "raiden_event_handler = RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler) app = RaidenService( config=app.config,", "gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_from,", "RestAPI from raiden.constants import RoutingMode from raiden.message_handler import MessageHandler from", "transfer_and_assert( server_from=server_from, server_to=server_to, token_address=token_address, identifier=next(identifier_generator), amount=TokenAmount(1), ) def stress_send_serial_transfers( rest_apis:", "import structlog from eth_utils import to_canonical_address from flask import url_for", "pairs: wait_assert( assert_synced_channel_state, token_network_address, first, deposit, [], second, deposit, [],", "it back\"\"\" for rest_api in api_servers: rest_api.stop() new_network = restart_network(raiden_network,", "import MatrixTransport from raiden.raiden_event_handler import RaidenEventHandler from raiden.raiden_service import RaidenService", "response.status_code == HTTPStatus.OK, f\"Payment failed, reason: {response.content}\" assert response.headers[\"Content-Type\"] ==", "\"\"\"Iteratively wait and get on passed greenlets. This ensures exceptions", "query_start_block=BlockNumber(0), raiden_bundle=RaidenBundle( app.default_registry, app.default_secret_registry, ), services_bundle=app.default_services_bundle, transport=new_transport, raiden_event_handler=hold_handler, message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE,", "assert response is not None assert response.status_code == HTTPStatus.OK, f\"Payment", "deposit, [], second, deposit, [], ) @pytest.mark.skip(reason=\"flaky, see https://github.com/raiden-network/raiden/issues/4803\") @raise_on_failure", "structlog.get_logger(__name__) def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None: \"\"\"Iteratively wait and get", "new_network = restart_network(raiden_network, restart_node) new_servers = start_apiserver_for_network(new_network, port_generator) return (new_network,", "the channels in the backwards direction iwait_and_get( [ gevent.spawn( sequential_transfers,", "eth_utils import to_canonical_address from flask import url_for from raiden.api.python import", ") def stress_send_and_receive_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit:", "HTTPStatus from itertools import count from typing import Sequence import", ") # deplete the channels in the backwards direction iwait_and_get(", "val.startswith(\"0x\"): kwargs[key] = to_canonical_address(val) with apiserver.flask_app.app_context(): return url_for(f\"v1_resources.{endpoint}\", **kwargs) def", "raiden_network: List[RaidenService], restart_node: RestartNode, api_servers: List[APIServer], port_generator: Iterator[Port], ) ->", "response.headers[\"Content-Type\"] == \"application/json\" def sequential_transfers( server_from: APIServer, server_to: APIServer, number_of_transfers:", "import APIServer, RestAPI from raiden.constants import RoutingMode from raiden.message_handler import", "token_address = token_addresses[0] rest_apis = start_apiserver_for_network(raiden_network, port_generator) identifier_generator = count(start=1)", "api_servers: rest_api.stop() new_network = restart_network(raiden_network, restart_node) new_servers = start_apiserver_for_network(new_network, port_generator)", "TokenAmount, ) -> None: pairs = list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]]))", "import url_for from raiden.api.python import RaidenAPI from raiden.api.rest import APIServer,", "in raiden_network: app.stop() wait_network = (gevent.spawn(restart_app, app, restart_node) for app", "gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_to,", "token_address=token_address, identifier_generator=identifier_generator, ) # reset the balances balances by sending", "from raiden.network.transport import MatrixTransport from raiden.raiden_event_handler import RaidenEventHandler from raiden.raiden_service", "as possible. \"\"\" for item in gevent.iwait(items): item.get() def _url_for(apiserver:", "import HTTPStatus from itertools import count from typing import Sequence", "new_servers) def address_from_apiserver(apiserver: APIServer) -> Address: return apiserver.rest_api.raiden_api.address def transfer_and_assert(", "url_for() expects binary address so we have to convert here", "None: token_address = token_addresses[0] rest_apis = start_apiserver_for_network(raiden_network, port_generator) identifier_generator =", "app.stop() wait_network = (gevent.spawn(restart_app, app, restart_node) for app in raiden_network)", "greenlets. This ensures exceptions in the greenlets are re-raised as", "[3]) @pytest.mark.parametrize(\"number_of_tokens\", [1]) @pytest.mark.parametrize(\"channels_per_node\", [2]) @pytest.mark.parametrize(\"deposit\", [2]) @pytest.mark.parametrize(\"reveal_timeout\", [15]) @pytest.mark.parametrize(\"settle_timeout\",", "from flask import url_for from raiden.api.python import RaidenAPI from raiden.api.rest", "views from raiden.ui.startup import RaidenBundle from raiden.utils.formatting import to_checksum_address from", "<gh_stars>1000+ import time from http import HTTPStatus from itertools import", "config=app.config.transport, environment=app.config.environment_type ) raiden_event_handler = RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler) app", "f\"Payment failed, reason: {response.content}\" assert response.headers[\"Content-Type\"] == \"application/json\" def sequential_transfers(", "transfers of value `1` one at a time, without changing", "in one direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit,", "server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_from, server_to in", "identifier_generator=identifier_generator, ) for server_from, server_to in pairs ] ) #", "pairs ] iwait_and_get(forward_transfers + backwards_transfers) def assert_channels( raiden_network: List[RaidenService], token_network_address:", "] iwait_and_get(forward_transfers + backwards_transfers) def assert_channels( raiden_network: List[RaidenService], token_network_address: TokenNetworkAddress,", "# deplete the channels in the backwards direction iwait_and_get( [", "pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) # deplete the channels", "sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from", "[15]) @pytest.mark.parametrize(\"settle_timeout\", [120]) def test_stress( raiden_network: List[RaidenService], restart_node: RestartNode, deposit:", "server_from: APIServer, server_to: APIServer, token_address: TokenAddress, identifier: int, amount: TokenAmount,", "rest_apis[1:] + [rest_apis[0]])) forward_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to,", "token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, ) -> None: \"\"\"Send", "-> APIServer: raiden_api = RaidenAPI(raiden_app) rest_api = RestAPI(raiden_api) api_server =", "from raiden.tests.utils.detect_failure import raise_on_failure from raiden.tests.utils.protocol import HoldRaidenEventHandler from raiden.tests.utils.transfer", "for _ in range(number_of_transfers): transfer_and_assert( server_from=server_from, server_to=server_to, token_address=token_address, identifier=next(identifier_generator), amount=TokenAmount(1),", "[raiden_network[0]])) for first, second in pairs: wait_assert( assert_synced_channel_state, token_network_address, first,", ") assert token_network_address for _ in range(2): assert_channels(raiden_network, token_network_address, deposit)", "in range(number_of_transfers): transfer_and_assert( server_from=server_from, server_to=server_to, token_address=token_address, identifier=next(identifier_generator), amount=TokenAmount(1), ) def", "channels in one direction for server_from, server_to in pairs: sequential_transfers(", "rest_apis[1:] + [rest_apis[0]])) # deplete the channels in one direction", "rest_api = RestAPI(raiden_api) api_server = APIServer( rest_api, config=RestApiConfig(host=Host(\"localhost\"), port=rest_api_port_number) )", "= HoldRaidenEventHandler(raiden_event_handler) app = RaidenService( config=app.config, rpc_client=app.rpc_client, proxy_manager=app.proxy_manager, query_start_block=BlockNumber(0), raiden_bundle=RaidenBundle(", "str, **kwargs) -> str: # url_for() expects binary address so", "-> None: url = _url_for( server_from, \"token_target_paymentresource\", token_address=to_checksum_address(token_address), target_address=to_checksum_address(address_from_apiserver(server_to)), )", "rest_api, config=RestApiConfig(host=Host(\"localhost\"), port=rest_api_port_number) ) # required for url_for api_server.flask_app.config[\"SERVER_NAME\"] =", "response = request.send().response duration = time.monotonic() - start log.debug(\"PAYMENT RESPONSE\",", ") -> None: \"\"\"Send `deposit` transfers in parallel, without changing", "= time.monotonic() response = request.send().response duration = time.monotonic() - start", "for server_from, server_to in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address,", "with watch_for_unlock_failures(*raiden_network): stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network, rest_apis = restart_network_and_apiservers(", "Tuple, ) log = structlog.get_logger(__name__) def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None:", "request = grequests.post(url, json=json) start = time.monotonic() response = request.send().response", "deposit: TokenAmount, token_addresses: List[TokenAddress], port_generator: Iterator[Port], ) -> None: token_address", "structlog from eth_utils import to_canonical_address from flask import url_for from", "by sending the \"extra\" deposit forward iwait_and_get( [ gevent.spawn( sequential_transfers,", "a time, without changing the initial capacity. \"\"\" pairs =", "config=RestApiConfig(host=Host(\"localhost\"), port=rest_api_port_number) ) # required for url_for api_server.flask_app.config[\"SERVER_NAME\"] = f\"localhost:{rest_api_port_number}\"", "HoldRaidenEventHandler(raiden_event_handler) app = RaidenService( config=app.config, rpc_client=app.rpc_client, proxy_manager=app.proxy_manager, query_start_block=BlockNumber(0), raiden_bundle=RaidenBundle( app.default_registry,", "(gevent.spawn(restart_app, app, restart_node) for app in raiden_network) gevent.joinall(set(wait_network), raise_error=True) new_network", "List[APIServer]: return [start_apiserver(app, next(port_generator)) for app in raiden_network] def restart_app(app:", "get on passed greenlets. This ensures exceptions in the greenlets", "raiden.tests.utils.detect_failure import raise_on_failure from raiden.tests.utils.protocol import HoldRaidenEventHandler from raiden.tests.utils.transfer import", "deposit) with watch_for_unlock_failures(*raiden_network): stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network, rest_apis =", "= RaidenAPI(raiden_app) rest_api = RestAPI(raiden_api) api_server = APIServer( rest_api, config=RestApiConfig(host=Host(\"localhost\"),", "val in kwargs.items(): if isinstance(val, str) and val.startswith(\"0x\"): kwargs[key] =", "RaidenService, rest_api_port_number: Port) -> APIServer: raiden_api = RaidenAPI(raiden_app) rest_api =", "first, second in pairs: wait_assert( assert_synced_channel_state, token_network_address, first, deposit, [],", "the balances balances by sending the \"extra\" deposit forward for", "value `1` one at a time, without changing the initial", "= time.monotonic() - start log.debug(\"PAYMENT RESPONSE\", url=url, json=json, response=response, duration=duration)", "in range(2): assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_serial_transfers(rest_apis, token_address, identifier_generator,", "pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, )", "token_address=token_address, identifier_generator=identifier_generator, ) # deplete the channels in the backwards", "TokenAddress, TokenAmount, TokenNetworkAddress, Tuple, ) log = structlog.get_logger(__name__) def iwait_and_get(items:", "return url_for(f\"v1_resources.{endpoint}\", **kwargs) def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) -> APIServer:", "@pytest.mark.skip(reason=\"flaky, see https://github.com/raiden-network/raiden/issues/4803\") @raise_on_failure @pytest.mark.parametrize(\"number_of_nodes\", [3]) @pytest.mark.parametrize(\"number_of_tokens\", [1]) @pytest.mark.parametrize(\"channels_per_node\", [2])", "backwards_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator,", "None: \"\"\"Send `deposit` transfers of value `1` one at a", "-> None: for _ in range(number_of_transfers): transfer_and_assert( server_from=server_from, server_to=server_to, token_address=token_address,", "from itertools import count from typing import Sequence import gevent", ") # reset the balances balances by sending the \"extra\"", "(new_network, new_servers) def address_from_apiserver(apiserver: APIServer) -> Address: return apiserver.rest_api.raiden_api.address def", "Sequence import gevent import grequests import pytest import structlog from", "assert getattr(request, \"exception\", None) is None assert response is not", "import time from http import HTTPStatus from itertools import count", "= start_apiserver_for_network(new_network, port_generator) return (new_network, new_servers) def address_from_apiserver(apiserver: APIServer) ->", "server_from, server_to in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator,", "expects binary address so we have to convert here for", "identifier_generator=identifier_generator, ) def stress_send_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int],", "direction for server_from, server_to in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit,", "in raiden_network] def restart_app(app: RaidenService, restart_node: RestartNode) -> RaidenService: new_transport", "return app def restart_network( raiden_network: List[RaidenService], restart_node: RestartNode ) ->", "token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from in pairs ] )", "restart_network_and_apiservers( raiden_network, restart_node, rest_apis, port_generator ) assert_channels(raiden_network, token_network_address, deposit) with", "from raiden.message_handler import MessageHandler from raiden.network.transport import MatrixTransport from raiden.raiden_event_handler", "for greenlet in wait_network] return new_network def restart_network_and_apiservers( raiden_network: List[RaidenService],", "start_apiserver_for_network(new_network, port_generator) return (new_network, new_servers) def address_from_apiserver(apiserver: APIServer) -> Address:", "None: \"\"\"Send `deposit` transfers in parallel, without changing the initial", "in pairs ] ) def stress_send_and_receive_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress,", "RaidenService from raiden.settings import RestApiConfig from raiden.tests.integration.api.utils import wait_for_listening_port from", "key, val in kwargs.items(): if isinstance(val, str) and val.startswith(\"0x\"): kwargs[key]", "by sending the \"extra\" deposit forward for server_from, server_to in", "the \"extra\" deposit forward iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to,", "passed greenlets. This ensures exceptions in the greenlets are re-raised", "raiden_network] def restart_app(app: RaidenService, restart_node: RestartNode) -> RaidenService: new_transport =", "backwards direction for server_to, server_from in pairs: sequential_transfers( server_from=server_from, server_to=server_to,", "server_to, server_from in pairs ] ) # reset the balances", "None: url = _url_for( server_from, \"token_target_paymentresource\", token_address=to_checksum_address(token_address), target_address=to_checksum_address(address_from_apiserver(server_to)), ) json", "assert_synced_channel_state, wait_assert, watch_for_unlock_failures, ) from raiden.transfer import views from raiden.ui.startup", "**kwargs) -> str: # url_for() expects binary address so we", "greenlets are re-raised as soon as possible. \"\"\" for item", "apiserver.flask_app.app_context(): return url_for(f\"v1_resources.{endpoint}\", **kwargs) def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) ->", "in wait_network] return new_network def restart_network_and_apiservers( raiden_network: List[RaidenService], restart_node: RestartNode,", "pairs = list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]])) for first, second in", "from raiden.tests.integration.fixtures.raiden_network import RestartNode from raiden.tests.utils.detect_failure import raise_on_failure from raiden.tests.utils.protocol", "required for url_for api_server.flask_app.config[\"SERVER_NAME\"] = f\"localhost:{rest_api_port_number}\" api_server.start() wait_for_listening_port(rest_api_port_number) return api_server", "deposit forward iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address,", "url_for(f\"v1_resources.{endpoint}\", **kwargs) def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) -> APIServer: raiden_api", "json=json, response=response, duration=duration) assert getattr(request, \"exception\", None) is None assert", "None) is None assert response is not None assert response.status_code", "None: for _ in range(number_of_transfers): transfer_and_assert( server_from=server_from, server_to=server_to, token_address=token_address, identifier=next(identifier_generator),", "server_to in pairs ] backwards_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from,", "item in gevent.iwait(items): item.get() def _url_for(apiserver: APIServer, endpoint: str, **kwargs)", "identifier_generator=identifier_generator, ) for server_from, server_to in pairs ] ) def", "[greenlet.get() for greenlet in wait_network] return new_network def restart_network_and_apiservers( raiden_network:", "pytest import structlog from eth_utils import to_canonical_address from flask import", "return (new_network, new_servers) def address_from_apiserver(apiserver: APIServer) -> Address: return apiserver.rest_api.raiden_api.address", "import RoutingMode from raiden.message_handler import MessageHandler from raiden.network.transport import MatrixTransport", "the backwards direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit", "config=app.config, rpc_client=app.rpc_client, proxy_manager=app.proxy_manager, query_start_block=BlockNumber(0), raiden_bundle=RaidenBundle( app.default_registry, app.default_secret_registry, ), services_bundle=app.default_services_bundle, transport=new_transport,", "server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) def stress_send_parallel_transfers( rest_apis: List[APIServer],", "@pytest.mark.parametrize(\"number_of_nodes\", [3]) @pytest.mark.parametrize(\"number_of_tokens\", [1]) @pytest.mark.parametrize(\"channels_per_node\", [2]) @pytest.mark.parametrize(\"deposit\", [2]) @pytest.mark.parametrize(\"reveal_timeout\", [15])", "= structlog.get_logger(__name__) def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None: \"\"\"Iteratively wait and", "RestApiConfig from raiden.tests.integration.api.utils import wait_for_listening_port from raiden.tests.integration.fixtures.raiden_network import RestartNode from", "first, deposit, [], second, deposit, [], ) @pytest.mark.skip(reason=\"flaky, see https://github.com/raiden-network/raiden/issues/4803\")", "views.get_token_network_address_by_token_address( views.state_from_raiden(raiden_network[0]), raiden_network[0].default_registry.address, token_address, ) assert token_network_address for _ in", "import count from typing import Sequence import gevent import grequests", "-> Address: return apiserver.rest_api.raiden_api.address def transfer_and_assert( server_from: APIServer, server_to: APIServer,", "restart_node, rest_apis, port_generator ) assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_parallel_transfers(rest_apis,", "direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator,", "start_apiserver_for_network(raiden_network, port_generator) identifier_generator = count(start=1) token_network_address = views.get_token_network_address_by_token_address( views.state_from_raiden(raiden_network[0]), raiden_network[0].default_registry.address,", "balances by sending the \"extra\" deposit forward for server_from, server_to", "balances balances by sending the \"extra\" deposit forward iwait_and_get( [", "url_for api_server.flask_app.config[\"SERVER_NAME\"] = f\"localhost:{rest_api_port_number}\" api_server.start() wait_for_listening_port(rest_api_port_number) return api_server def start_apiserver_for_network(", "backwards_transfers) def assert_channels( raiden_network: List[RaidenService], token_network_address: TokenNetworkAddress, deposit: TokenAmount, )", "Iterator[int], deposit: TokenAmount, ) -> None: \"\"\"Send transfers of value", "REQUEST\", url=url, json=json) request = grequests.post(url, json=json) start = time.monotonic()", "Iterator[int], deposit: TokenAmount, ) -> None: \"\"\"Send `deposit` transfers of", "deposit: TokenAmount, ) -> None: \"\"\"Send `deposit` transfers in parallel,", "stress_send_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, )", "for item in gevent.iwait(items): item.get() def _url_for(apiserver: APIServer, endpoint: str,", "List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, ) -> None:", "identifier_generator: Iterator[int], deposit: TokenAmount, ) -> None: \"\"\"Send `deposit` transfers", "RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler) app = RaidenService( config=app.config, rpc_client=app.rpc_client, proxy_manager=app.proxy_manager,", "from raiden.utils.typing import ( Address, BlockNumber, Host, Iterator, List, Port,", "pairs ] ) # deplete the channels in the backwards", "server_to in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, )", "server_to in pairs ] ) # deplete the channels in", "rest_api.stop() new_network = restart_network(raiden_network, restart_node) new_servers = start_apiserver_for_network(new_network, port_generator) return", "in parallel\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) forward_transfers =", "RestartNode) -> RaidenService: new_transport = MatrixTransport( config=app.config.transport, environment=app.config.environment_type ) raiden_event_handler", "TokenAmount, ) -> None: \"\"\"Send transfers of value one in", "raiden_network[1:] + [raiden_network[0]])) for first, second in pairs: wait_assert( assert_synced_channel_state,", "pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) # deplete", "_ in range(number_of_transfers): transfer_and_assert( server_from=server_from, server_to=server_to, token_address=token_address, identifier=next(identifier_generator), amount=TokenAmount(1), )", "for server_to, server_from in pairs ] ) # reset the", "( Address, BlockNumber, Host, Iterator, List, Port, TokenAddress, TokenAmount, TokenNetworkAddress,", "restart_node, rest_apis, port_generator ) assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_and_receive_parallel_transfers(", "def stress_send_and_receive_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount,", "restart_app(app: RaidenService, restart_node: RestartNode) -> RaidenService: new_transport = MatrixTransport( config=app.config.transport,", "for _ in range(2): assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_serial_transfers(rest_apis,", "wait and get on passed greenlets. This ensures exceptions in", "the initial capacity.\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) #", "assert_channels(raiden_network, token_network_address, deposit) with watch_for_unlock_failures(*raiden_network): stress_send_and_receive_parallel_transfers( rest_apis, token_address, identifier_generator, deposit", "new_servers = start_apiserver_for_network(new_network, port_generator) return (new_network, new_servers) def address_from_apiserver(apiserver: APIServer)", "without changing the initial capacity. \"\"\" pairs = list(zip(rest_apis, rest_apis[1:]", "-> List[APIServer]: return [start_apiserver(app, next(port_generator)) for app in raiden_network] def", "in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) #", "as soon as possible. \"\"\" for item in gevent.iwait(items): item.get()", "TokenAddress, identifier: int, amount: TokenAmount, ) -> None: url =", "json = {\"amount\": amount, \"identifier\": identifier} log.debug(\"PAYMENT REQUEST\", url=url, json=json)", "@pytest.mark.parametrize(\"settle_timeout\", [120]) def test_stress( raiden_network: List[RaidenService], restart_node: RestartNode, deposit: TokenAmount,", "List[APIServer], port_generator: Iterator[Port], ) -> Tuple[List[RaidenService], List[APIServer]]: \"\"\"Stop an app", "\"\"\"Send transfers of value one in parallel\"\"\" pairs = list(zip(rest_apis,", "BlockNumber, Host, Iterator, List, Port, TokenAddress, TokenAmount, TokenNetworkAddress, Tuple, )", "target_address=to_checksum_address(address_from_apiserver(server_to)), ) json = {\"amount\": amount, \"identifier\": identifier} log.debug(\"PAYMENT REQUEST\",", "import views from raiden.ui.startup import RaidenBundle from raiden.utils.formatting import to_checksum_address", "\"exception\", None) is None assert response is not None assert", "server_to in pairs ] ) def stress_send_and_receive_parallel_transfers( rest_apis: List[APIServer], token_address:", ") for server_from, server_to in pairs ] ) def stress_send_and_receive_parallel_transfers(", "url_for from raiden.api.python import RaidenAPI from raiden.api.rest import APIServer, RestAPI", "RaidenBundle from raiden.utils.formatting import to_checksum_address from raiden.utils.typing import ( Address,", "for server_from, server_to in pairs ] ) def stress_send_and_receive_parallel_transfers( rest_apis:", "def test_stress( raiden_network: List[RaidenService], restart_node: RestartNode, deposit: TokenAmount, token_addresses: List[TokenAddress],", ") json = {\"amount\": amount, \"identifier\": identifier} log.debug(\"PAYMENT REQUEST\", url=url,", "_url_for( server_from, \"token_target_paymentresource\", token_address=to_checksum_address(token_address), target_address=to_checksum_address(address_from_apiserver(server_to)), ) json = {\"amount\": amount,", "here for key, val in kwargs.items(): if isinstance(val, str) and", "\"extra\" deposit forward for server_from, server_to in pairs: sequential_transfers( server_from=server_from,", "RaidenService( config=app.config, rpc_client=app.rpc_client, proxy_manager=app.proxy_manager, query_start_block=BlockNumber(0), raiden_bundle=RaidenBundle( app.default_registry, app.default_secret_registry, ), services_bundle=app.default_services_bundle,", ") # required for url_for api_server.flask_app.config[\"SERVER_NAME\"] = f\"localhost:{rest_api_port_number}\" api_server.start() wait_for_listening_port(rest_api_port_number)", "RestAPI(raiden_api) api_server = APIServer( rest_api, config=RestApiConfig(host=Host(\"localhost\"), port=rest_api_port_number) ) # required", "log.debug(\"PAYMENT RESPONSE\", url=url, json=json, response=response, duration=duration) assert getattr(request, \"exception\", None)", "flask import url_for from raiden.api.python import RaidenAPI from raiden.api.rest import", "identifier_generator, deposit ) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network, restart_node, rest_apis,", "and val.startswith(\"0x\"): kwargs[key] = to_canonical_address(val) with apiserver.flask_app.app_context(): return url_for(f\"v1_resources.{endpoint}\", **kwargs)", "assert_channels( raiden_network: List[RaidenService], token_network_address: TokenNetworkAddress, deposit: TokenAmount, ) -> None:", "iwait_and_get(forward_transfers + backwards_transfers) def assert_channels( raiden_network: List[RaidenService], token_network_address: TokenNetworkAddress, deposit:", "None: \"\"\"Iteratively wait and get on passed greenlets. This ensures", ") -> None: \"\"\"Send `deposit` transfers of value `1` one", "server_from in pairs ] iwait_and_get(forward_transfers + backwards_transfers) def assert_channels( raiden_network:", "token_network_address: TokenNetworkAddress, deposit: TokenAmount, ) -> None: pairs = list(zip(raiden_network,", "deposit: TokenAmount, ) -> None: pairs = list(zip(raiden_network, raiden_network[1:] +", ") from raiden.transfer import views from raiden.ui.startup import RaidenBundle from", "http import HTTPStatus from itertools import count from typing import", "api_server def start_apiserver_for_network( raiden_network: List[RaidenService], port_generator: Iterator[Port] ) -> List[APIServer]:", "raiden.message_handler import MessageHandler from raiden.network.transport import MatrixTransport from raiden.raiden_event_handler import", "for server_to, server_from in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit *", "grequests import pytest import structlog from eth_utils import to_canonical_address from", "deposit, [], ) @pytest.mark.skip(reason=\"flaky, see https://github.com/raiden-network/raiden/issues/4803\") @raise_on_failure @pytest.mark.parametrize(\"number_of_nodes\", [3]) @pytest.mark.parametrize(\"number_of_tokens\",", "TokenNetworkAddress, deposit: TokenAmount, ) -> None: pairs = list(zip(raiden_network, raiden_network[1:]", "range(number_of_transfers): transfer_and_assert( server_from=server_from, server_to=server_to, token_address=token_address, identifier=next(identifier_generator), amount=TokenAmount(1), ) def stress_send_serial_transfers(", "sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_from, server_to", "wait_assert( assert_synced_channel_state, token_network_address, first, deposit, [], second, deposit, [], )", "restart_node) for app in raiden_network) gevent.joinall(set(wait_network), raise_error=True) new_network = [greenlet.get()", "from raiden.utils.formatting import to_checksum_address from raiden.utils.typing import ( Address, BlockNumber,", "value one in parallel\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))", "raise_on_failure from raiden.tests.utils.protocol import HoldRaidenEventHandler from raiden.tests.utils.transfer import ( assert_synced_channel_state,", "soon as possible. \"\"\" for item in gevent.iwait(items): item.get() def", "APIServer, token_address: TokenAddress, identifier: int, amount: TokenAmount, ) -> None:", "identifier_generator = count(start=1) token_network_address = views.get_token_network_address_by_token_address( views.state_from_raiden(raiden_network[0]), raiden_network[0].default_registry.address, token_address, )", "server_from in pairs ] ) # reset the balances balances", "raiden.api.python import RaidenAPI from raiden.api.rest import APIServer, RestAPI from raiden.constants", "import Sequence import gevent import grequests import pytest import structlog", "from raiden.ui.startup import RaidenBundle from raiden.utils.formatting import to_checksum_address from raiden.utils.typing", "token_address=token_address, identifier_generator=identifier_generator, ) def stress_send_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator:", "time from http import HTTPStatus from itertools import count from", "\"application/json\" def sequential_transfers( server_from: APIServer, server_to: APIServer, number_of_transfers: int, token_address:", "direction for server_to, server_from in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit", "raiden.tests.utils.protocol import HoldRaidenEventHandler from raiden.tests.utils.transfer import ( assert_synced_channel_state, wait_assert, watch_for_unlock_failures,", "the greenlets are re-raised as soon as possible. \"\"\" for", "= MatrixTransport( config=app.config.transport, environment=app.config.environment_type ) raiden_event_handler = RaidenEventHandler() hold_handler =", "proxy_manager=app.proxy_manager, query_start_block=BlockNumber(0), raiden_bundle=RaidenBundle( app.default_registry, app.default_secret_registry, ), services_bundle=app.default_services_bundle, transport=new_transport, raiden_event_handler=hold_handler, message_handler=MessageHandler(),", "is None assert response is not None assert response.status_code ==", "convert here for key, val in kwargs.items(): if isinstance(val, str)", "the initial capacity. \"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))", "list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]])) for first, second in pairs: wait_assert(", "views.state_from_raiden(raiden_network[0]), raiden_network[0].default_registry.address, token_address, ) assert token_network_address for _ in range(2):", ") def stress_send_serial_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit:", "2, token_address=token_address, identifier_generator=identifier_generator, ) # reset the balances balances by", "= _url_for( server_from, \"token_target_paymentresource\", token_address=to_checksum_address(token_address), target_address=to_checksum_address(address_from_apiserver(server_to)), ) json = {\"amount\":", "-> Tuple[List[RaidenService], List[APIServer]]: \"\"\"Stop an app and start it back\"\"\"", "server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) def stress_send_parallel_transfers( rest_apis: List[APIServer], token_address:", "raiden.tests.utils.transfer import ( assert_synced_channel_state, wait_assert, watch_for_unlock_failures, ) from raiden.transfer import", "identifier_generator=identifier_generator, ) for server_from, server_to in pairs ] backwards_transfers =", "api_server = APIServer( rest_api, config=RestApiConfig(host=Host(\"localhost\"), port=rest_api_port_number) ) # required for", "in pairs ] ) # reset the balances balances by", "APIServer( rest_api, config=RestApiConfig(host=Host(\"localhost\"), port=rest_api_port_number) ) # required for url_for api_server.flask_app.config[\"SERVER_NAME\"]", "transfers in parallel, without changing the initial capacity.\"\"\" pairs =", "from raiden.settings import RestApiConfig from raiden.tests.integration.api.utils import wait_for_listening_port from raiden.tests.integration.fixtures.raiden_network", "watch_for_unlock_failures(*raiden_network): stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit) raiden_network, rest_apis = restart_network_and_apiservers( raiden_network,", "-> None: \"\"\"Send `deposit` transfers in parallel, without changing the", "+ [raiden_network[0]])) for first, second in pairs: wait_assert( assert_synced_channel_state, token_network_address,", "None assert response is not None assert response.status_code == HTTPStatus.OK,", "for app in raiden_network: app.stop() wait_network = (gevent.spawn(restart_app, app, restart_node)", "assert response.headers[\"Content-Type\"] == \"application/json\" def sequential_transfers( server_from: APIServer, server_to: APIServer,", "import to_checksum_address from raiden.utils.typing import ( Address, BlockNumber, Host, Iterator,", ") for server_from, server_to in pairs ] ) # deplete", "Sequence[gevent.Greenlet]) -> None: \"\"\"Iteratively wait and get on passed greenlets.", "re-raised as soon as possible. \"\"\" for item in gevent.iwait(items):", "List[RaidenService]: for app in raiden_network: app.stop() wait_network = (gevent.spawn(restart_app, app,", "address so we have to convert here for key, val", "identifier_generator: Iterator[int], deposit: TokenAmount, ) -> None: \"\"\"Send transfers of", "RestartNode from raiden.tests.utils.detect_failure import raise_on_failure from raiden.tests.utils.protocol import HoldRaidenEventHandler from", "RaidenAPI from raiden.api.rest import APIServer, RestAPI from raiden.constants import RoutingMode", "raiden_api = RaidenAPI(raiden_app) rest_api = RestAPI(raiden_api) api_server = APIServer( rest_api,", "app = RaidenService( config=app.config, rpc_client=app.rpc_client, proxy_manager=app.proxy_manager, query_start_block=BlockNumber(0), raiden_bundle=RaidenBundle( app.default_registry, app.default_secret_registry,", "start it back\"\"\" for rest_api in api_servers: rest_api.stop() new_network =", "( assert_synced_channel_state, wait_assert, watch_for_unlock_failures, ) from raiden.transfer import views from", "[ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for", "APIServer, endpoint: str, **kwargs) -> str: # url_for() expects binary", "Address: return apiserver.rest_api.raiden_api.address def transfer_and_assert( server_from: APIServer, server_to: APIServer, token_address:", "+ [rest_apis[0]])) # deplete the channels in one direction for", "-> str: # url_for() expects binary address so we have", "to convert here for key, val in kwargs.items(): if isinstance(val,", "to_canonical_address(val) with apiserver.flask_app.app_context(): return url_for(f\"v1_resources.{endpoint}\", **kwargs) def start_apiserver(raiden_app: RaidenService, rest_api_port_number:", "capacity.\"\"\" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) # deplete the", "app in raiden_network) gevent.joinall(set(wait_network), raise_error=True) new_network = [greenlet.get() for greenlet", "from raiden.raiden_event_handler import RaidenEventHandler from raiden.raiden_service import RaidenService from raiden.settings", "= to_canonical_address(val) with apiserver.flask_app.app_context(): return url_for(f\"v1_resources.{endpoint}\", **kwargs) def start_apiserver(raiden_app: RaidenService,", "count(start=1) token_network_address = views.get_token_network_address_by_token_address( views.state_from_raiden(raiden_network[0]), raiden_network[0].default_registry.address, token_address, ) assert token_network_address", "TokenAmount, TokenNetworkAddress, Tuple, ) log = structlog.get_logger(__name__) def iwait_and_get(items: Sequence[gevent.Greenlet])", "rpc_client=app.rpc_client, proxy_manager=app.proxy_manager, query_start_block=BlockNumber(0), raiden_bundle=RaidenBundle( app.default_registry, app.default_secret_registry, ), services_bundle=app.default_services_bundle, transport=new_transport, raiden_event_handler=hold_handler,", "raiden_event_handler=hold_handler, message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) restart_node(app) return app def restart_network( raiden_network:", "= restart_network(raiden_network, restart_node) new_servers = start_apiserver_for_network(new_network, port_generator) return (new_network, new_servers)", "endpoint: str, **kwargs) -> str: # url_for() expects binary address", "import HoldRaidenEventHandler from raiden.tests.utils.transfer import ( assert_synced_channel_state, wait_assert, watch_for_unlock_failures, )", "Port) -> APIServer: raiden_api = RaidenAPI(raiden_app) rest_api = RestAPI(raiden_api) api_server", "import ( Address, BlockNumber, Host, Iterator, List, Port, TokenAddress, TokenAmount,", "forward iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator,", "with apiserver.flask_app.app_context(): return url_for(f\"v1_resources.{endpoint}\", **kwargs) def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port)", ") -> Tuple[List[RaidenService], List[APIServer]]: \"\"\"Stop an app and start it", "assert token_network_address for _ in range(2): assert_channels(raiden_network, token_network_address, deposit) with", "-> RaidenService: new_transport = MatrixTransport( config=app.config.transport, environment=app.config.environment_type ) raiden_event_handler =", "None: \"\"\"Send transfers of value one in parallel\"\"\" pairs =", "Iterator[Port] ) -> List[APIServer]: return [start_apiserver(app, next(port_generator)) for app in" ]
[ "data preprocess\") # process test convert_to_atepc(inpath, dist_test_fname, 'test') print (\"<<<", "print (\"text\", text) def convert_sentiment(sentiment_key): if sentiment_key == '正': sentiment_value", "= data['text'].map(lambda x: filter_emoji(x, restr='xx')) # drop id list not", "= open(dist_fname, 'w', encoding='utf8') data = pd.read_csv(inpath) data.columns = ['text',", "task): if not os.path.exists(folder_name): os.makedirs(folder_name) if task == 'aptepc': #", "os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt') # process train convert_to_apc(inpath, dist_train_fname, 'train')", "'Others' tags[label[4][0]] = 'B-ASP' sentiment[label[4][0]] = sentiment_value k = label[4][0]", "= [] for j in range(len(label_update)): str1 = text[:label_update[j][2][0]] +", "data_res = x_train.iloc[:, :].reset_index() else: data_res = x_test.iloc[:, :].reset_index() #", "sentiment_value = 'Others' tags[label[4][0]] = 'B-'+label[1] sentiment[label[4][0]] = sentiment_value k", "folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc') dist_test_fname = os.path.join(folder_name_prefix,", "pandas as pd import argparse import emoji import re from", "[] str2_list = [] str3_list = [] for j in", "finished!\") def convert_to_atepc_tag(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname)", "+ 1 while k < label[4][1]: tags[k] = 'I-ASP' sentiment[k]", "sentiment_value = 'Negative' else: sentiment_value = 'Others' tags[label[4][0]] = 'B-ASP'", "'\\u3000', '\\u2002', '\\u2003', '\\u2005', '\\x0c', '\\u2028', '\\u2009', '\\u200a']: f1.write(word +", "parser.add_argument(\"--inpath\", type=str, required=True, default='./raw_data/data1.csv') parser.add_argument(\"--folder_name\", type=str, required=False, default='./custom') parser.add_argument(\"--task\", type=str,", "# print (data_res.head()) for i in range(len(data_res)): text, label =", "= os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix +", "sentiment[k] = sentiment_value k += 1 return text, tags, sentiment", "= label[4][0] + 1 while k < label[4][1]: tags[k] =", "train convert_to_atepc(inpath, dist_train_fname, 'train') print (\"<<< finish training data preprocess\")", "j in range(len(labels)): label = labels[j] sentiment_key = labels[j][3] if", "+ sen + '\\n') else: f1.write(\"\\n\") f1.write(\"\\n\") f1.close() print (\"process", "'tag_sentiment_list'] # preprocess for emoji data['text'] = data['text'].map(lambda x: filter_emoji(x,", "'Negative' else: sentiment_value = 'Others' tags[label[4][0]] = 'B-'+label[1] sentiment[label[4][0]] =", "str3_list): f1.write(x1 + '\\n') f1.write(x2 + '\\n') f1.write(x3 + '\\n')", "(\"<<< finish training data preprocess\") # process test convert_to_apc(inpath, dist_test_fname,", "return sentiment_value def convert_apc(text, label): label_update = [(i[0], i[3], i[4])", "= re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]') return co.sub(restr, desstr) def convert_to_atepc(inpath, dist_fname, flag): #", "= pd.read_csv(inpath) # train test split x_train, x_test = train_test_split(data,", "[] for j in range(len(label_update)): str1 = text[:label_update[j][2][0]] + '$T$", "label to list try: labels = eval(labels) tags = ['O']", "str3_list.append(convert_sentiment(label_update[j][1])) return str1_list, str2_list, str3_list def filter_emoji(desstr, restr=''): # 过滤表情", "flag == 'train': data_res = x_train.iloc[:, :].reset_index() else: data_res =", "utf-8 -*- # file: preprocess.py # author: jackie # Copyright", "dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname) f1 = open(dist_fname,", "'\\u200a']: f1.write(word + ' ' + tag + ' '", "+ '.test.txt.atepc') # process train convert_to_atepc(inpath, dist_train_fname, 'train') print (\"<<<", "main(inpath, folder_name, task): if not os.path.exists(folder_name): os.makedirs(folder_name) if task ==", "co = re.compile(u'[\\U00010000-\\U0010ffff]') except re.error: co = re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]') return co.sub(restr,", "print (\"process apc finished!\") def main(inpath, folder_name, task): if not", "type=str, required=True, default='./raw_data/data1.csv') parser.add_argument(\"--folder_name\", type=str, required=False, default='./custom') parser.add_argument(\"--task\", type=str, required=False,", "in range(len(data_res)): text, label = data_res['text'][i], data_res['tag_sentiment_list'][i] str1_list, str2_list, str3_list", "i[4]) for i in eval(label)] label_update = list(set(label_update)) str1_list =", "data = pd.read_csv(inpath) # train test split x_train, x_test =", "label): label_update = [(i[0], i[3], i[4]) for i in eval(label)]", ":].reset_index() else: data_res = x_test.iloc[:, :].reset_index() # print (data_res.head()) for", "+ '$T$ ' + text[label_update[j][2][1]:] str1_list.append(str1) str2_list.append(label_update[j][0]) str3_list.append(convert_sentiment(label_update[j][1])) return str1_list,", "sentiment_key == '正': sentiment_value = 'Positive' else: sentiment_value = 'Negative'", "' + text[label_update[j][2][1]:] str1_list.append(str1) str2_list.append(label_update[j][0]) str3_list.append(convert_sentiment(label_update[j][1])) return str1_list, str2_list, str3_list", "folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt') dist_test_fname = os.path.join(folder_name_prefix,", "else: sentiment_value = 'Others' tags[label[4][0]] = 'B-'+label[1] sentiment[label[4][0]] = sentiment_value", "f1.close() print (\"process atepc finished!\") def convert_to_apc(inpath, dist_fname, flag): #", "Reserved. import os import pandas as pd import argparse import", "+ '.test.txt') # process train convert_to_apc(inpath, dist_train_fname, 'train') print (\"<<<", "def convert_to_atepc_tag(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname) f1", "data_res['tag_sentiment_list'][i] str1_list, str2_list, str3_list = convert_apc(text, label) for x1, x2,", "train_test_split(data, test_size=0.2, random_state=42) if flag == 'train': data_res = x_train.iloc[:,", "= data['text'].map(lambda x: filter_emoji(x, restr='xx')) # 只保留review的长度小于600的 data = data[data['text'].str.len()", "process train convert_to_apc(inpath, dist_train_fname, 'train') print (\"<<< finish training data", "argparse import emoji import re from sklearn.model_selection import train_test_split parser", "data.drop([8832]) # 只保留review的长度小于600的 data = data[data['text'].str.len() <= 600] # train", "sentiment_key = labels[j][3] if sentiment_key == '正': sentiment_value = 'Positive'", "co.sub(restr, desstr) def convert_to_atepc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname):", "str3_list = [] for j in range(len(label_update)): str1 = text[:label_update[j][2][0]]", "sentiment[label[4][0]] = sentiment_value k = label[4][0] + 1 while k", "re.error: co = re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]') return co.sub(restr, desstr) def convert_to_atepc(inpath, dist_fname,", "[(i[0], i[3], i[4]) for i in eval(label)] label_update = list(set(label_update))", "'\\n') f1.close() print (\"process apc finished!\") def main(inpath, folder_name, task):", "f1.write(x1 + '\\n') f1.write(x2 + '\\n') f1.write(x3 + '\\n') f1.close()", "== '负': sentiment_value = 'Negative' else: sentiment_value = 'Others' tags[label[4][0]]", "# Copyright (C) 2021. All Rights Reserved. import os import", "labels) print (\"text\", text) def convert_tag(text, labels): # convert label", "= ['-999'] * len(text) for j in range(len(labels)): label =", "def convert_to_apc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname) f1", "sentiment_value k += 1 return text, tags, sentiment except: print", "Rights Reserved. import os import pandas as pd import argparse", "elif sentiment_key == '负': sentiment_value = 'Negative' else: sentiment_value =", "tags[label[4][0]] = 'B-ASP' sentiment[label[4][0]] = sentiment_value k = label[4][0] +", "dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc') # process train convert_to_atepc(inpath,", "(\"labels\", labels) print (\"text\", text) def convert_tag(text, labels): # convert", "tags, sentiment except: print (\"labels\", labels) print (\"text\", text) def", "return text, tags, sentiment except: print (\"labels\", labels) print (\"text\",", "try: co = re.compile(u'[\\U00010000-\\U0010ffff]') except re.error: co = re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]') return", "folder_name_prefix + '.test.txt.atepc') # process train convert_to_atepc(inpath, dist_train_fname, 'train') print", "labels[j] sentiment_key = labels[j][3] if sentiment_key == '正': sentiment_value =", "= folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc') dist_test_fname =", "# train test split x_train, x_test = train_test_split(data, test_size=0.2, random_state=42)", "print (\"start process for an aptepc tag task\") folder_name_prefix =", "convert_to_atepc_tag(inpath, dist_test_fname, 'test') print (\"<<< finish test data preprocess\") main(args.inpath,", "label) for x1, x2, x3 in zip(str1_list, str2_list, str3_list): f1.write(x1", "os.remove(dist_fname) f1 = open(dist_fname, 'w', encoding='utf8') data = pd.read_csv(inpath) data.columns", "# get folder name print (\"start process for an aptepc", "data = pd.read_csv(inpath) data.columns = ['text', 'tag_sentiment_list'] # preprocess for", "convert_to_atepc(inpath, dist_train_fname, 'train') print (\"<<< finish training data preprocess\") #", "open(dist_fname, 'w', encoding='utf8') data = pd.read_csv(inpath) data.columns = ['text', 'tag_sentiment_list']", "label_update = list(set(label_update)) str1_list = [] str2_list = [] str3_list", "while k < label[4][1]: tags[k] = 'I-ASP' sentiment[k] = sentiment_value", "atepc finished!\") def convert_to_atepc_tag(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname):", "def convert_sentiment(sentiment_key): if sentiment_key == '正': sentiment_value = 'Positive' else:", "except: print (\"labels\", labels) print (\"text\", text) def convert_sentiment(sentiment_key): if", "= ['O'] * len(text) sentiment = ['-999'] * len(text) for", "name folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt')", "range(len(data_res)): text, label = data_res['text'][i], data_res['tag_sentiment_list'][i] text, tags, sentiment =", "1 while k < label[4][1]: tags[k] = 'I-'+label[1] sentiment[k] =", "for i in range(len(data_res)): text, label = data_res['text'][i], data_res['tag_sentiment_list'][i] str1_list,", "str2_list.append(label_update[j][0]) str3_list.append(convert_sentiment(label_update[j][1])) return str1_list, str2_list, str3_list def filter_emoji(desstr, restr=''): #", "# process test convert_to_atepc_tag(inpath, dist_test_fname, 'test') print (\"<<< finish test", "(\"process atepc finished!\") def convert_to_atepc_tag(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if", "['O'] * len(text) sentiment = ['-999'] * len(text) for j", "2021. All Rights Reserved. import os import pandas as pd", "# process test convert_to_atepc(inpath, dist_test_fname, 'test') print (\"<<< finish test", "if task == 'aptepc': # get folder name print (\"start", "convert label to list try: labels = eval(labels) tags =", "data = data[data['text'].str.len() <= 600] # train test split x_train,", "* len(text) for j in range(len(labels)): label = labels[j] sentiment_key", "process train convert_to_atepc_tag(inpath, dist_train_fname, 'train') print (\"<<< finish training data", "type=str, required=False, default='aptepc') args = parser.parse_args() def convert(text, labels): #", "range(len(data_res)): text, label = data_res['text'][i], data_res['tag_sentiment_list'][i] str1_list, str2_list, str3_list =", "text, tags, sentiment = convert(text, label) for word, tag, sen", "test data preprocess\") elif task == 'apc': # get folder", "= convert(text, label) for word, tag, sen in zip(text, tags,", "= argparse.ArgumentParser() parser.add_argument(\"--inpath\", type=str, required=True, default='./raw_data/data1.csv') parser.add_argument(\"--folder_name\", type=str, required=False, default='./custom')", "task == 'aptepc-tag': # get folder name print (\"start process", "folder name print (\"start process for an aptepc task\") folder_name_prefix", "labels[j][3] if sentiment_key == '正': sentiment_value = 'Positive' elif sentiment_key", "emoji data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx')) # drop id", "= text[:label_update[j][2][0]] + '$T$ ' + text[label_update[j][2][1]:] str1_list.append(str1) str2_list.append(label_update[j][0]) str3_list.append(convert_sentiment(label_update[j][1]))", "encoding='utf8') data = pd.read_csv(inpath) # train test split x_train, x_test", "print (\"<<< finish test data preprocess\") elif task == 'aptepc-tag':", "in range(len(labels)): label = labels[j] sentiment_key = labels[j][3] if sentiment_key", "= sentiment_value k = label[4][0] + 1 while k <", "# drop id list not able to process # print", "sentiment_value = 'Positive' elif sentiment_key == '负': sentiment_value = 'Negative'", "= folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt') dist_test_fname =", "process test convert_to_apc(inpath, dist_test_fname, 'test') print (\"<<< finish test data", "range(len(labels)): label = labels[j] sentiment_key = labels[j][3] if sentiment_key ==", "' ' + sen + '\\n') else: f1.write(\"\\n\") f1.write(\"\\n\") f1.close()", "text) def convert_sentiment(sentiment_key): if sentiment_key == '正': sentiment_value = 'Positive'", "task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')", "random_state=42) if flag == 'train': data_res = x_train.iloc[:, :].reset_index() else:", "sentiment_key == '负': sentiment_value = 'Negative' else: sentiment_value = 'Others'", "print (\"labels\", labels) print (\"text\", text) def convert_sentiment(sentiment_key): if sentiment_key", "not in [',', '。', ' ', '\\xa0', '\\u2006', '\\u3000', '\\u2002',", "type=str, required=False, default='./custom') parser.add_argument(\"--task\", type=str, required=False, default='aptepc') args = parser.parse_args()", "import re from sklearn.model_selection import train_test_split parser = argparse.ArgumentParser() parser.add_argument(\"--inpath\",", "re from sklearn.model_selection import train_test_split parser = argparse.ArgumentParser() parser.add_argument(\"--inpath\", type=str,", "= data_res['text'][i], data_res['tag_sentiment_list'][i] str1_list, str2_list, str3_list = convert_apc(text, label) for", "= list(set(label_update)) str1_list = [] str2_list = [] str3_list =", "sentiment_value def convert_apc(text, label): label_update = [(i[0], i[3], i[4]) for", "'train') print (\"<<< finish training data preprocess\") # process test", "'正': sentiment_value = 'Positive' elif sentiment_key == '负': sentiment_value =", "finish test data preprocess\") elif task == 'aptepc-tag': # get", ":].reset_index() # print (data_res.head()) for i in range(len(data_res)): text, label", "label[4][1]: tags[k] = 'I-'+label[1] sentiment[k] = sentiment_value k += 1", "for i in range(len(data_res)): text, label = data_res['text'][i], data_res['tag_sentiment_list'][i] text,", "print (data_res.head()) for i in range(len(data_res)): text, label = data_res['text'][i],", "' ', '\\xa0', '\\u2006', '\\u3000', '\\u2002', '\\u2003', '\\u2005', '\\x0c', '\\u2028',", "-*- # file: preprocess.py # author: jackie # Copyright (C)", "process # print (data.iloc[8832,:]) # data = data.drop([8832]) # 只保留review的长度小于600的", "convert_to_apc(inpath, dist_test_fname, 'test') print (\"<<< finish test data preprocess\") elif", "'。', ' ', '\\xa0', '\\u2006', '\\u3000', '\\u2002', '\\u2003', '\\u2005', '\\x0c',", "name print (\"start process for an aptepc tag task\") folder_name_prefix", "train convert_to_apc(inpath, dist_train_fname, 'train') print (\"<<< finish training data preprocess\")", "else: f1.write(\"\\n\") f1.write(\"\\n\") f1.close() print (\"process atepc finished!\") def convert_to_atepc_tag(inpath,", "k = label[4][0] + 1 while k < label[4][1]: tags[k]", "x2, x3 in zip(str1_list, str2_list, str3_list): f1.write(x1 + '\\n') f1.write(x2", "coding: utf-8 -*- # file: preprocess.py # author: jackie #", "data_res['text'][i], data_res['tag_sentiment_list'][i] text, tags, sentiment = convert(text, label) for word,", "= os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc') # process train convert_to_atepc_tag(inpath, dist_train_fname,", "x_test = train_test_split(data, test_size=0.2, random_state=42) if flag == 'train': data_res", "author: jackie # Copyright (C) 2021. All Rights Reserved. import", "['-999'] * len(text) for j in range(len(labels)): label = labels[j]", "i in range(len(data_res)): text, label = data_res['text'][i], data_res['tag_sentiment_list'][i] text, tags,", "drop id list not able to process # print (data.iloc[8832,:])", "sentiment_key == '正': sentiment_value = 'Positive' elif sentiment_key == '负':", "1 return text, tags, sentiment except: print (\"labels\", labels) print", "def filter_emoji(desstr, restr=''): # 过滤表情 try: co = re.compile(u'[\\U00010000-\\U0010ffff]') except", "print (\"<<< finish training data preprocess\") # process test convert_to_apc(inpath,", "finish test data preprocess\") elif task == 'apc': # get", "else: f1.write(\"\\n\") f1.write(\"\\n\") f1.close() print (\"process atepc finished!\") def convert_to_apc(inpath,", "split x_train, x_test = train_test_split(data, test_size=0.2, random_state=42) if flag ==", "x_test.iloc[:, :].reset_index() # print (data_res.head()) for i in range(len(data_res)): text,", "labels = eval(labels) tags = ['O'] * len(text) sentiment =", "re.compile(u'[\\U00010000-\\U0010ffff]') except re.error: co = re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]') return co.sub(restr, desstr) def", "(data_res.head()) for i in range(len(data_res)): text, label = data_res['text'][i], data_res['tag_sentiment_list'][i]", "preprocess for emoji data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx')) #", "not able to process # print (data.iloc[8832,:]) # data =", "required=True, default='./raw_data/data1.csv') parser.add_argument(\"--folder_name\", type=str, required=False, default='./custom') parser.add_argument(\"--task\", type=str, required=False, default='aptepc')", "print (\"<<< finish test data preprocess\") elif task == 'apc':", "= os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix +", "test convert_to_atepc(inpath, dist_test_fname, 'test') print (\"<<< finish test data preprocess\")", "str3_list def filter_emoji(desstr, restr=''): # 过滤表情 try: co = re.compile(u'[\\U00010000-\\U0010ffff]')", "tags[k] = 'I-'+label[1] sentiment[k] = sentiment_value k += 1 return", "sen in zip(text, tags, sentiment): if word not in [',',", "i[3], i[4]) for i in eval(label)] label_update = list(set(label_update)) str1_list", "'\\u2006', '\\u3000', '\\u2002', '\\u2003', '\\u2005', '\\x0c', '\\u2028', '\\u2009', '\\u200a']: f1.write(word", "'负': sentiment_value = 'Negative' else: sentiment_value = 'Others' tags[label[4][0]] =", "aptepc task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix +", "(\"<<< finish test data preprocess\") elif task == 'aptepc-tag': #", "os import pandas as pd import argparse import emoji import", "(\"labels\", labels) print (\"text\", text) def convert_sentiment(sentiment_key): if sentiment_key ==", "data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx')) # drop id list", "co = re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]') return co.sub(restr, desstr) def convert_to_atepc(inpath, dist_fname, flag):", "'\\n') f1.write(x2 + '\\n') f1.write(x3 + '\\n') f1.close() print (\"process", "range(len(label_update)): str1 = text[:label_update[j][2][0]] + '$T$ ' + text[label_update[j][2][1]:] str1_list.append(str1)", "for word, tag, sen in zip(text, tags, sentiment): if word", "' ' + tag + ' ' + sen +", "convert_to_apc(inpath, dist_train_fname, 'train') print (\"<<< finish training data preprocess\") #", "finished!\") def main(inpath, folder_name, task): if not os.path.exists(folder_name): os.makedirs(folder_name) if", "sentiment except: print (\"labels\", labels) print (\"text\", text) def convert_tag(text,", "x: filter_emoji(x, restr='xx')) # 只保留review的长度小于600的 data = data[data['text'].str.len() <= 600]", "str2_list, str3_list def filter_emoji(desstr, restr=''): # 过滤表情 try: co =", "(\"<<< finish training data preprocess\") # process test convert_to_atepc(inpath, dist_test_fname,", "= labels[j] sentiment_key = labels[j][3] if sentiment_key == '正': sentiment_value", "if word not in [',', '。', ' ', '\\xa0', '\\u2006',", "id list not able to process # print (data.iloc[8832,:]) #", "'test') print (\"<<< finish test data preprocess\") main(args.inpath, args.folder_name, args.task)", "# print (data.iloc[8832,:]) # data = data.drop([8832]) # 只保留review的长度小于600的 data", "convert(text, labels): # convert label to list try: labels =", "tag, sen in zip(text, tags, sentiment): if word not in", "print (\"process atepc finished!\") def convert_to_atepc_tag(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉", "+ '.train.txt.atepc') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc') # process", "'\\n') else: f1.write(\"\\n\") f1.write(\"\\n\") f1.close() print (\"process atepc finished!\") def", "convert_to_atepc_tag(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname) f1 =", "convert_apc(text, label): label_update = [(i[0], i[3], i[4]) for i in", "train_test_split parser = argparse.ArgumentParser() parser.add_argument(\"--inpath\", type=str, required=True, default='./raw_data/data1.csv') parser.add_argument(\"--folder_name\", type=str,", "dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix", "apc finished!\") def main(inpath, folder_name, task): if not os.path.exists(folder_name): os.makedirs(folder_name)", "'Others' tags[label[4][0]] = 'B-'+label[1] sentiment[label[4][0]] = sentiment_value k = label[4][0]", "finish training data preprocess\") # process test convert_to_atepc_tag(inpath, dist_test_fname, 'test')", "os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc') # process train convert_to_atepc_tag(inpath, dist_train_fname, 'train')", "test data preprocess\") elif task == 'aptepc-tag': # get folder", "parser.add_argument(\"--folder_name\", type=str, required=False, default='./custom') parser.add_argument(\"--task\", type=str, required=False, default='aptepc') args =", "+ '\\n') f1.write(x2 + '\\n') f1.write(x3 + '\\n') f1.close() print", "i in eval(label)] label_update = list(set(label_update)) str1_list = [] str2_list", "if os.path.exists(dist_fname): os.remove(dist_fname) f1 = open(dist_fname, 'w', encoding='utf8') data =", "x1, x2, x3 in zip(str1_list, str2_list, str3_list): f1.write(x1 + '\\n')", "folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc') dist_test_fname", "args = parser.parse_args() def convert(text, labels): # convert label to", "'\\u2005', '\\x0c', '\\u2028', '\\u2009', '\\u200a']: f1.write(word + ' ' +", "= convert_apc(text, label) for x1, x2, x3 in zip(str1_list, str2_list,", "+ ' ' + sen + '\\n') else: f1.write(\"\\n\") f1.write(\"\\n\")", "os.makedirs(folder_name) if task == 'aptepc': # get folder name print", "= ['text', 'tag_sentiment_list'] # preprocess for emoji data['text'] = data['text'].map(lambda", "try: labels = eval(labels) tags = ['O'] * len(text) sentiment", "if sentiment_key == '正': sentiment_value = 'Positive' else: sentiment_value =", "[',', '。', ' ', '\\xa0', '\\u2006', '\\u3000', '\\u2002', '\\u2003', '\\u2005',", "restr=''): # 过滤表情 try: co = re.compile(u'[\\U00010000-\\U0010ffff]') except re.error: co", "name print (\"start process for an aptepc task\") folder_name_prefix =", "sentiment except: print (\"labels\", labels) print (\"text\", text) def convert_sentiment(sentiment_key):", "text, label = data_res['text'][i], data_res['tag_sentiment_list'][i] str1_list, str2_list, str3_list = convert_apc(text,", "tags[label[4][0]] = 'B-'+label[1] sentiment[label[4][0]] = sentiment_value k = label[4][0] +", "Copyright (C) 2021. All Rights Reserved. import os import pandas", "== 'aptepc': # get folder name print (\"start process for", "f1.write(x3 + '\\n') f1.close() print (\"process apc finished!\") def main(inpath,", "f1.write(word + ' ' + tag + ' ' +", "'aptepc': # get folder name print (\"start process for an", "train test split x_train, x_test = train_test_split(data, test_size=0.2, random_state=42) if", "an aptepc task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix", "# process test convert_to_apc(inpath, dist_test_fname, 'test') print (\"<<< finish test", "'Negative' return sentiment_value def convert_apc(text, label): label_update = [(i[0], i[3],", "preprocess.py # author: jackie # Copyright (C) 2021. All Rights", "+ 1 while k < label[4][1]: tags[k] = 'I-'+label[1] sentiment[k]", "list try: labels = eval(labels) tags = ['O'] * len(text)", "过滤表情 try: co = re.compile(u'[\\U00010000-\\U0010ffff]') except re.error: co = re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]')", "else: sentiment_value = 'Others' tags[label[4][0]] = 'B-ASP' sentiment[label[4][0]] = sentiment_value", "data = data.drop([8832]) # 只保留review的长度小于600的 data = data[data['text'].str.len() <= 600]", "== 'train': data_res = x_train.iloc[:, :].reset_index() else: data_res = x_test.iloc[:,", "'apc': # get folder name folder_name_prefix = folder_name.split('/')[-1] dist_train_fname =", "parser.add_argument(\"--task\", type=str, required=False, default='aptepc') args = parser.parse_args() def convert(text, labels):", "while k < label[4][1]: tags[k] = 'I-'+label[1] sentiment[k] = sentiment_value", "i in range(len(data_res)): text, label = data_res['text'][i], data_res['tag_sentiment_list'][i] str1_list, str2_list,", "def convert_tag(text, labels): # convert label to list try: labels", "data_res = x_test.iloc[:, :].reset_index() # print (data_res.head()) for i in", "os.path.exists(folder_name): os.makedirs(folder_name) if task == 'aptepc': # get folder name", "(data.iloc[8832,:]) # data = data.drop([8832]) # 只保留review的长度小于600的 data = data[data['text'].str.len()", "parser.parse_args() def convert(text, labels): # convert label to list try:", "preprocess\") # process test convert_to_atepc(inpath, dist_test_fname, 'test') print (\"<<< finish", "get folder name print (\"start process for an aptepc task\")", "desstr) def convert_to_atepc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname)", "folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt') dist_test_fname", "elif task == 'apc': # get folder name folder_name_prefix =", "import pandas as pd import argparse import emoji import re", "print (\"text\", text) def convert_tag(text, labels): # convert label to", "str2_list, str3_list): f1.write(x1 + '\\n') f1.write(x2 + '\\n') f1.write(x3 +", "finish training data preprocess\") # process test convert_to_atepc(inpath, dist_test_fname, 'test')", "not os.path.exists(folder_name): os.makedirs(folder_name) if task == 'aptepc': # get folder", "except re.error: co = re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]') return co.sub(restr, desstr) def convert_to_atepc(inpath,", "argparse.ArgumentParser() parser.add_argument(\"--inpath\", type=str, required=True, default='./raw_data/data1.csv') parser.add_argument(\"--folder_name\", type=str, required=False, default='./custom') parser.add_argument(\"--task\",", "folder_name, task): if not os.path.exists(folder_name): os.makedirs(folder_name) if task == 'aptepc':", "'test') print (\"<<< finish test data preprocess\") elif task ==", "os.remove(dist_fname) f1 = open(dist_fname, 'w', encoding='utf8') data = pd.read_csv(inpath) #", "default='aptepc') args = parser.parse_args() def convert(text, labels): # convert label", "else: sentiment_value = 'Negative' return sentiment_value def convert_apc(text, label): label_update", "jackie # Copyright (C) 2021. All Rights Reserved. import os", "== 'apc': # get folder name folder_name_prefix = folder_name.split('/')[-1] dist_train_fname", "return co.sub(restr, desstr) def convert_to_atepc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if", "label_update = [(i[0], i[3], i[4]) for i in eval(label)] label_update", "return str1_list, str2_list, str3_list def filter_emoji(desstr, restr=''): # 过滤表情 try:", "def convert(text, labels): # convert label to list try: labels", "for emoji data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx')) # 只保留review的长度小于600的", "preprocess\") # process test convert_to_atepc_tag(inpath, dist_test_fname, 'test') print (\"<<< finish", "# file: preprocess.py # author: jackie # Copyright (C) 2021.", "'\\xa0', '\\u2006', '\\u3000', '\\u2002', '\\u2003', '\\u2005', '\\x0c', '\\u2028', '\\u2009', '\\u200a']:", "# preprocess for emoji data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx'))", "folder_name_prefix + '.train.txt.atepc') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc') #", "= x_train.iloc[:, :].reset_index() else: data_res = x_test.iloc[:, :].reset_index() # print", "x_train, x_test = train_test_split(data, test_size=0.2, random_state=42) if flag == 'train':", "f1.write(x2 + '\\n') f1.write(x3 + '\\n') f1.close() print (\"process apc", "'\\n') f1.write(x3 + '\\n') f1.close() print (\"process apc finished!\") def", "os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')", "tag task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix +", "k += 1 return text, tags, sentiment except: print (\"labels\",", "j in range(len(label_update)): str1 = text[:label_update[j][2][0]] + '$T$ ' +", "data preprocess\") elif task == 'aptepc-tag': # get folder name", "str1_list, str2_list, str3_list def filter_emoji(desstr, restr=''): # 过滤表情 try: co", "# 只保留review的长度小于600的 data = data[data['text'].str.len() <= 600] # train test", "# 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname) f1 = open(dist_fname, 'w', encoding='utf8')", "able to process # print (data.iloc[8832,:]) # data = data.drop([8832])", "required=False, default='aptepc') args = parser.parse_args() def convert(text, labels): # convert", "len(text) for j in range(len(labels)): label = labels[j] sentiment_key =", "zip(text, tags, sentiment): if word not in [',', '。', '", "default='./custom') parser.add_argument(\"--task\", type=str, required=False, default='aptepc') args = parser.parse_args() def convert(text,", "'train': data_res = x_train.iloc[:, :].reset_index() else: data_res = x_test.iloc[:, :].reset_index()", "<= 600] # train test split x_train, x_test = train_test_split(data,", "process for an aptepc tag task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname", "'I-ASP' sentiment[k] = sentiment_value k += 1 return text, tags,", "(\"start process for an aptepc tag task\") folder_name_prefix = folder_name.split('/')[-1]", "= 'Others' tags[label[4][0]] = 'B-'+label[1] sentiment[label[4][0]] = sentiment_value k =", "sentiment_value k = label[4][0] + 1 while k < label[4][1]:", "process test convert_to_atepc(inpath, dist_test_fname, 'test') print (\"<<< finish test data", "sentiment = ['-999'] * len(text) for j in range(len(labels)): label", "eval(label)] label_update = list(set(label_update)) str1_list = [] str2_list = []", "label) for word, tag, sen in zip(text, tags, sentiment): if", "+ '\\n') else: f1.write(\"\\n\") f1.write(\"\\n\") f1.close() print (\"process atepc finished!\")", "file: preprocess.py # author: jackie # Copyright (C) 2021. All", "for emoji data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx')) # drop", "training data preprocess\") # process test convert_to_apc(inpath, dist_test_fname, 'test') print", "['text', 'tag_sentiment_list'] # preprocess for emoji data['text'] = data['text'].map(lambda x:", "f1.write(\"\\n\") f1.close() print (\"process atepc finished!\") def convert_to_apc(inpath, dist_fname, flag):", "= os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt') # process train convert_to_apc(inpath, dist_train_fname,", "'aptepc-tag': # get folder name print (\"start process for an", "word, tag, sen in zip(text, tags, sentiment): if word not", "= pd.read_csv(inpath) data.columns = ['text', 'tag_sentiment_list'] # preprocess for emoji", "str2_list, str3_list = convert_apc(text, label) for x1, x2, x3 in", "elif task == 'aptepc-tag': # get folder name print (\"start", "def main(inpath, folder_name, task): if not os.path.exists(folder_name): os.makedirs(folder_name) if task", "test split x_train, x_test = train_test_split(data, test_size=0.2, random_state=42) if flag", "sentiment_value = 'Others' tags[label[4][0]] = 'B-ASP' sentiment[label[4][0]] = sentiment_value k", "sentiment): if word not in [',', '。', ' ', '\\xa0',", "folder_name_prefix + '.test.txt.atepc') # process train convert_to_atepc_tag(inpath, dist_train_fname, 'train') print", "< label[4][1]: tags[k] = 'I-ASP' sentiment[k] = sentiment_value k +=", "convert_to_atepc_tag(inpath, dist_train_fname, 'train') print (\"<<< finish training data preprocess\") #", "in range(len(label_update)): str1 = text[:label_update[j][2][0]] + '$T$ ' + text[label_update[j][2][1]:]", "f1.write(\"\\n\") f1.write(\"\\n\") f1.close() print (\"process atepc finished!\") def convert_to_atepc_tag(inpath, dist_fname,", "(\"process atepc finished!\") def convert_to_apc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if", "def convert_apc(text, label): label_update = [(i[0], i[3], i[4]) for i", "'正': sentiment_value = 'Positive' else: sentiment_value = 'Negative' return sentiment_value", "data['text'].map(lambda x: filter_emoji(x, restr='xx')) # drop id list not able", "print (\"<<< finish training data preprocess\") # process test convert_to_atepc(inpath,", "text, label = data_res['text'][i], data_res['tag_sentiment_list'][i] text, tags, sentiment = convert(text,", "# author: jackie # Copyright (C) 2021. All Rights Reserved.", "from sklearn.model_selection import train_test_split parser = argparse.ArgumentParser() parser.add_argument(\"--inpath\", type=str, required=True,", "# process train convert_to_atepc(inpath, dist_train_fname, 'train') print (\"<<< finish training", "data_res['tag_sentiment_list'][i] text, tags, sentiment = convert(text, label) for word, tag,", "print (\"process atepc finished!\") def convert_to_apc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉", "filter_emoji(desstr, restr=''): # 过滤表情 try: co = re.compile(u'[\\U00010000-\\U0010ffff]') except re.error:", "test convert_to_atepc_tag(inpath, dist_test_fname, 'test') print (\"<<< finish test data preprocess\")", "in zip(text, tags, sentiment): if word not in [',', '。',", "dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc') # process train convert_to_atepc_tag(inpath,", "'$T$ ' + text[label_update[j][2][1]:] str1_list.append(str1) str2_list.append(label_update[j][0]) str3_list.append(convert_sentiment(label_update[j][1])) return str1_list, str2_list,", "(\"<<< finish test data preprocess\") elif task == 'apc': #", "data['text'].map(lambda x: filter_emoji(x, restr='xx')) # 只保留review的长度小于600的 data = data[data['text'].str.len() <=", "f1.close() print (\"process apc finished!\") def main(inpath, folder_name, task): if", "restr='xx')) # 只保留review的长度小于600的 data = data[data['text'].str.len() <= 600] # train", "= parser.parse_args() def convert(text, labels): # convert label to list", "list not able to process # print (data.iloc[8832,:]) # data", "dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix", "'.test.txt.atepc') # process train convert_to_atepc_tag(inpath, dist_train_fname, 'train') print (\"<<< finish", "k < label[4][1]: tags[k] = 'I-'+label[1] sentiment[k] = sentiment_value k", "# -*- coding: utf-8 -*- # file: preprocess.py # author:", "import train_test_split parser = argparse.ArgumentParser() parser.add_argument(\"--inpath\", type=str, required=True, default='./raw_data/data1.csv') parser.add_argument(\"--folder_name\",", "= 'I-ASP' sentiment[k] = sentiment_value k += 1 return text,", "# process train convert_to_apc(inpath, dist_train_fname, 'train') print (\"<<< finish training", "'w', encoding='utf8') data = pd.read_csv(inpath) # train test split x_train,", "convert_apc(text, label) for x1, x2, x3 in zip(str1_list, str2_list, str3_list):", "* len(text) sentiment = ['-999'] * len(text) for j in", "# data = data.drop([8832]) # 只保留review的长度小于600的 data = data[data['text'].str.len() <=", "re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]') return co.sub(restr, desstr) def convert_to_atepc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉", "process train convert_to_atepc(inpath, dist_train_fname, 'train') print (\"<<< finish training data", "= open(dist_fname, 'w', encoding='utf8') data = pd.read_csv(inpath) # train test", "= data[data['text'].str.len() <= 600] # train test split x_train, x_test", "+ tag + ' ' + sen + '\\n') else:", "str1_list = [] str2_list = [] str3_list = [] for", "for an aptepc tag task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname =", "default='./raw_data/data1.csv') parser.add_argument(\"--folder_name\", type=str, required=False, default='./custom') parser.add_argument(\"--task\", type=str, required=False, default='aptepc') args", "for an aptepc task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix,", "if sentiment_key == '正': sentiment_value = 'Positive' elif sentiment_key ==", "= x_test.iloc[:, :].reset_index() # print (data_res.head()) for i in range(len(data_res)):", "'.test.txt.atepc') # process train convert_to_atepc(inpath, dist_train_fname, 'train') print (\"<<< finish", "= 'Others' tags[label[4][0]] = 'B-ASP' sentiment[label[4][0]] = sentiment_value k =", "'\\x0c', '\\u2028', '\\u2009', '\\u200a']: f1.write(word + ' ' + tag", "tag + ' ' + sen + '\\n') else: f1.write(\"\\n\")", "an aptepc tag task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix,", "data[data['text'].str.len() <= 600] # train test split x_train, x_test =", "emoji data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx')) # 只保留review的长度小于600的 data", "pd.read_csv(inpath) # train test split x_train, x_test = train_test_split(data, test_size=0.2,", "= 'Positive' elif sentiment_key == '负': sentiment_value = 'Negative' else:", "写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname) f1 = open(dist_fname, 'w', encoding='utf8') data", "preprocess\") # process test convert_to_apc(inpath, dist_test_fname, 'test') print (\"<<< finish", "'Positive' else: sentiment_value = 'Negative' return sentiment_value def convert_apc(text, label):", "= 'Negative' else: sentiment_value = 'Others' tags[label[4][0]] = 'B-'+label[1] sentiment[label[4][0]]", "= [] str3_list = [] for j in range(len(label_update)): str1", "= os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc') # process train convert_to_atepc(inpath, dist_train_fname,", "'B-ASP' sentiment[label[4][0]] = sentiment_value k = label[4][0] + 1 while", "task == 'aptepc': # get folder name print (\"start process", "convert_to_atepc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname) f1 =", "只保留review的长度小于600的 data = data[data['text'].str.len() <= 600] # train test split", "data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx')) # 只保留review的长度小于600的 data =", "str3_list = convert_apc(text, label) for x1, x2, x3 in zip(str1_list,", "f1.close() print (\"process atepc finished!\") def convert_to_atepc_tag(inpath, dist_fname, flag): #", "sentiment_value = 'Negative' else: sentiment_value = 'Others' tags[label[4][0]] = 'B-'+label[1]", "sentiment_value = 'Positive' else: sentiment_value = 'Negative' return sentiment_value def", "print (\"start process for an aptepc task\") folder_name_prefix = folder_name.split('/')[-1]", "tags = ['O'] * len(text) sentiment = ['-999'] * len(text)", "aptepc tag task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix", "(\"text\", text) def convert_sentiment(sentiment_key): if sentiment_key == '正': sentiment_value =", "convert_to_atepc(inpath, dist_test_fname, 'test') print (\"<<< finish test data preprocess\") elif", "task == 'apc': # get folder name folder_name_prefix = folder_name.split('/')[-1]", "label[4][0] + 1 while k < label[4][1]: tags[k] = 'I-ASP'", "'Negative' else: sentiment_value = 'Others' tags[label[4][0]] = 'B-ASP' sentiment[label[4][0]] =", "'.test.txt') # process train convert_to_apc(inpath, dist_train_fname, 'train') print (\"<<< finish", "for i in eval(label)] label_update = list(set(label_update)) str1_list = []", "get folder name print (\"start process for an aptepc tag", "print (data.iloc[8832,:]) # data = data.drop([8832]) # 只保留review的长度小于600的 data =", "= [] str2_list = [] str3_list = [] for j", "(C) 2021. All Rights Reserved. import os import pandas as", "str1 = text[:label_update[j][2][0]] + '$T$ ' + text[label_update[j][2][1]:] str1_list.append(str1) str2_list.append(label_update[j][0])", "process for an aptepc task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname =", "All Rights Reserved. import os import pandas as pd import", "label = data_res['text'][i], data_res['tag_sentiment_list'][i] text, tags, sentiment = convert(text, label)", "print (\"labels\", labels) print (\"text\", text) def convert_tag(text, labels): #", "'w', encoding='utf8') data = pd.read_csv(inpath) data.columns = ['text', 'tag_sentiment_list'] #", "finished!\") def convert_to_apc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname)", "convert(text, label) for word, tag, sen in zip(text, tags, sentiment):", "text[label_update[j][2][1]:] str1_list.append(str1) str2_list.append(label_update[j][0]) str3_list.append(convert_sentiment(label_update[j][1])) return str1_list, str2_list, str3_list def filter_emoji(desstr,", "== '正': sentiment_value = 'Positive' else: sentiment_value = 'Negative' return", "x_train.iloc[:, :].reset_index() else: data_res = x_test.iloc[:, :].reset_index() # print (data_res.head())", "convert_tag(text, labels): # convert label to list try: labels =", "import argparse import emoji import re from sklearn.model_selection import train_test_split", "(\"start process for an aptepc task\") folder_name_prefix = folder_name.split('/')[-1] dist_train_fname", "to process # print (data.iloc[8832,:]) # data = data.drop([8832]) #", "'.train.txt') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt') # process train", "1 while k < label[4][1]: tags[k] = 'I-ASP' sentiment[k] =", "as pd import argparse import emoji import re from sklearn.model_selection", "+= 1 return text, tags, sentiment except: print (\"labels\", labels)", "emoji import re from sklearn.model_selection import train_test_split parser = argparse.ArgumentParser()", "open(dist_fname, 'w', encoding='utf8') data = pd.read_csv(inpath) # train test split", "= data_res['text'][i], data_res['tag_sentiment_list'][i] text, tags, sentiment = convert(text, label) for", "word not in [',', '。', ' ', '\\xa0', '\\u2006', '\\u3000',", "folder_name_prefix + '.test.txt') # process train convert_to_apc(inpath, dist_train_fname, 'train') print", "in eval(label)] label_update = list(set(label_update)) str1_list = [] str2_list =", "in [',', '。', ' ', '\\xa0', '\\u2006', '\\u3000', '\\u2002', '\\u2003',", "dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt') # process train convert_to_apc(inpath,", "zip(str1_list, str2_list, str3_list): f1.write(x1 + '\\n') f1.write(x2 + '\\n') f1.write(x3", "labels): # convert label to list try: labels = eval(labels)", "labels) print (\"text\", text) def convert_sentiment(sentiment_key): if sentiment_key == '正':", "folder name folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix +", "in zip(str1_list, str2_list, str3_list): f1.write(x1 + '\\n') f1.write(x2 + '\\n')", "+ '\\n') f1.write(x3 + '\\n') f1.close() print (\"process apc finished!\")", "+ '.train.txt') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt') # process", "print (\"<<< finish training data preprocess\") # process test convert_to_atepc_tag(inpath,", "= labels[j][3] if sentiment_key == '正': sentiment_value = 'Positive' elif", "test convert_to_apc(inpath, dist_test_fname, 'test') print (\"<<< finish test data preprocess\")", "training data preprocess\") # process test convert_to_atepc_tag(inpath, dist_test_fname, 'test') print", "to list try: labels = eval(labels) tags = ['O'] *", "600] # train test split x_train, x_test = train_test_split(data, test_size=0.2,", "data_res['text'][i], data_res['tag_sentiment_list'][i] str1_list, str2_list, str3_list = convert_apc(text, label) for x1,", "'Positive' elif sentiment_key == '负': sentiment_value = 'Negative' else: sentiment_value", "in range(len(data_res)): text, label = data_res['text'][i], data_res['tag_sentiment_list'][i] text, tags, sentiment", "= eval(labels) tags = ['O'] * len(text) sentiment = ['-999']", "required=False, default='./custom') parser.add_argument(\"--task\", type=str, required=False, default='aptepc') args = parser.parse_args() def", "'\\u2003', '\\u2005', '\\x0c', '\\u2028', '\\u2009', '\\u200a']: f1.write(word + ' '", "flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname) f1 = open(dist_fname, 'w',", "= data.drop([8832]) # 只保留review的长度小于600的 data = data[data['text'].str.len() <= 600] #", "+ '\\n') f1.close() print (\"process apc finished!\") def main(inpath, folder_name,", "for j in range(len(label_update)): str1 = text[:label_update[j][2][0]] + '$T$ '", "= 'Positive' else: sentiment_value = 'Negative' return sentiment_value def convert_apc(text,", "def convert_to_atepc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname) f1", "folder_name_prefix + '.train.txt') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt') #", "# process train convert_to_atepc_tag(inpath, dist_train_fname, 'train') print (\"<<< finish training", "dist_test_fname, 'test') print (\"<<< finish test data preprocess\") elif task", "restr='xx')) # drop id list not able to process #", "(\"text\", text) def convert_tag(text, labels): # convert label to list", "= 'Negative' else: sentiment_value = 'Others' tags[label[4][0]] = 'B-ASP' sentiment[label[4][0]]", "os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt')", "convert_to_apc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname): os.remove(dist_fname) f1 =", "(\"<<< finish training data preprocess\") # process test convert_to_atepc_tag(inpath, dist_test_fname,", "process test convert_to_atepc_tag(inpath, dist_test_fname, 'test') print (\"<<< finish test data", "os.path.exists(dist_fname): os.remove(dist_fname) f1 = open(dist_fname, 'w', encoding='utf8') data = pd.read_csv(inpath)", "+ '.test.txt.atepc') # process train convert_to_atepc_tag(inpath, dist_train_fname, 'train') print (\"<<<", "'\\u2028', '\\u2009', '\\u200a']: f1.write(word + ' ' + tag +", "if not os.path.exists(folder_name): os.makedirs(folder_name) if task == 'aptepc': # get", "filter_emoji(x, restr='xx')) # 只保留review的长度小于600的 data = data[data['text'].str.len() <= 600] #", "eval(labels) tags = ['O'] * len(text) sentiment = ['-999'] *", "encoding='utf8') data = pd.read_csv(inpath) data.columns = ['text', 'tag_sentiment_list'] # preprocess", "import emoji import re from sklearn.model_selection import train_test_split parser =", "'B-'+label[1] sentiment[label[4][0]] = sentiment_value k = label[4][0] + 1 while", "= [(i[0], i[3], i[4]) for i in eval(label)] label_update =", "pd import argparse import emoji import re from sklearn.model_selection import", "sentiment = convert(text, label) for word, tag, sen in zip(text,", "tags, sentiment = convert(text, label) for word, tag, sen in", "data preprocess\") # process test convert_to_atepc_tag(inpath, dist_test_fname, 'test') print (\"<<<", "[] str3_list = [] for j in range(len(label_update)): str1 =", "', '\\xa0', '\\u2006', '\\u3000', '\\u2002', '\\u2003', '\\u2005', '\\x0c', '\\u2028', '\\u2009',", "x: filter_emoji(x, restr='xx')) # drop id list not able to", "tags[k] = 'I-ASP' sentiment[k] = sentiment_value k += 1 return", "# 过滤表情 try: co = re.compile(u'[\\U00010000-\\U0010ffff]') except re.error: co =", "else: data_res = x_test.iloc[:, :].reset_index() # print (data_res.head()) for i", "(\"process apc finished!\") def main(inpath, folder_name, task): if not os.path.exists(folder_name):", "pd.read_csv(inpath) data.columns = ['text', 'tag_sentiment_list'] # preprocess for emoji data['text']", "atepc finished!\") def convert_to_apc(inpath, dist_fname, flag): # 写之前,先检验文件是否存在,存在就删掉 if os.path.exists(dist_fname):", "= 'B-'+label[1] sentiment[label[4][0]] = sentiment_value k = label[4][0] + 1", "x3 in zip(str1_list, str2_list, str3_list): f1.write(x1 + '\\n') f1.write(x2 +", "finish training data preprocess\") # process test convert_to_apc(inpath, dist_test_fname, 'test')", "train convert_to_atepc_tag(inpath, dist_train_fname, 'train') print (\"<<< finish training data preprocess\")", "sklearn.model_selection import train_test_split parser = argparse.ArgumentParser() parser.add_argument(\"--inpath\", type=str, required=True, default='./raw_data/data1.csv')", "os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc') # process train convert_to_atepc(inpath, dist_train_fname, 'train')", "'\\u2002', '\\u2003', '\\u2005', '\\x0c', '\\u2028', '\\u2009', '\\u200a']: f1.write(word + '", "parser = argparse.ArgumentParser() parser.add_argument(\"--inpath\", type=str, required=True, default='./raw_data/data1.csv') parser.add_argument(\"--folder_name\", type=str, required=False,", "get folder name folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix", "convert_sentiment(sentiment_key): if sentiment_key == '正': sentiment_value = 'Positive' else: sentiment_value", "= 'B-ASP' sentiment[label[4][0]] = sentiment_value k = label[4][0] + 1", "+ text[label_update[j][2][1]:] str1_list.append(str1) str2_list.append(label_update[j][0]) str3_list.append(convert_sentiment(label_update[j][1])) return str1_list, str2_list, str3_list def", "f1.write(\"\\n\") f1.close() print (\"process atepc finished!\") def convert_to_atepc_tag(inpath, dist_fname, flag):", "sentiment_value = 'Negative' return sentiment_value def convert_apc(text, label): label_update =", "dist_train_fname, 'train') print (\"<<< finish training data preprocess\") # process", "list(set(label_update)) str1_list = [] str2_list = [] str3_list = []", "folder name print (\"start process for an aptepc tag task\")", "str2_list = [] str3_list = [] for j in range(len(label_update)):", "filter_emoji(x, restr='xx')) # drop id list not able to process", "f1 = open(dist_fname, 'w', encoding='utf8') data = pd.read_csv(inpath) # train", "== 'aptepc-tag': # get folder name print (\"start process for", "== '正': sentiment_value = 'Positive' elif sentiment_key == '负': sentiment_value", "sen + '\\n') else: f1.write(\"\\n\") f1.write(\"\\n\") f1.close() print (\"process atepc", "# convert label to list try: labels = eval(labels) tags", "f1.write(\"\\n\") f1.write(\"\\n\") f1.close() print (\"process atepc finished!\") def convert_to_apc(inpath, dist_fname,", "len(text) sentiment = ['-999'] * len(text) for j in range(len(labels)):", "f1 = open(dist_fname, 'w', encoding='utf8') data = pd.read_csv(inpath) data.columns =", "text) def convert_tag(text, labels): # convert label to list try:", "data preprocess\") # process test convert_to_apc(inpath, dist_test_fname, 'test') print (\"<<<", "str1_list.append(str1) str2_list.append(label_update[j][0]) str3_list.append(convert_sentiment(label_update[j][1])) return str1_list, str2_list, str3_list def filter_emoji(desstr, restr=''):", "label = data_res['text'][i], data_res['tag_sentiment_list'][i] str1_list, str2_list, str3_list = convert_apc(text, label)", "label = labels[j] sentiment_key = labels[j][3] if sentiment_key == '正':", "' + tag + ' ' + sen + '\\n')", "for x1, x2, x3 in zip(str1_list, str2_list, str3_list): f1.write(x1 +", "text, tags, sentiment except: print (\"labels\", labels) print (\"text\", text)", "= 'I-'+label[1] sentiment[k] = sentiment_value k += 1 return text,", "text[:label_update[j][2][0]] + '$T$ ' + text[label_update[j][2][1]:] str1_list.append(str1) str2_list.append(label_update[j][0]) str3_list.append(convert_sentiment(label_update[j][1])) return", "'.train.txt.atepc') dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc') # process train", "import os import pandas as pd import argparse import emoji", "if flag == 'train': data_res = x_train.iloc[:, :].reset_index() else: data_res", "label[4][1]: tags[k] = 'I-ASP' sentiment[k] = sentiment_value k += 1", "dist_test_fname, 'test') print (\"<<< finish test data preprocess\") main(args.inpath, args.folder_name,", "< label[4][1]: tags[k] = 'I-'+label[1] sentiment[k] = sentiment_value k +=", "tags, sentiment): if word not in [',', '。', ' ',", "'I-'+label[1] sentiment[k] = sentiment_value k += 1 return text, tags,", "# get folder name folder_name_prefix = folder_name.split('/')[-1] dist_train_fname = os.path.join(folder_name_prefix,", "-*- coding: utf-8 -*- # file: preprocess.py # author: jackie", "except: print (\"labels\", labels) print (\"text\", text) def convert_tag(text, labels):", "= re.compile(u'[\\U00010000-\\U0010ffff]') except re.error: co = re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]') return co.sub(restr, desstr)", "preprocess\") elif task == 'aptepc-tag': # get folder name print", "for j in range(len(labels)): label = labels[j] sentiment_key = labels[j][3]", "= train_test_split(data, test_size=0.2, random_state=42) if flag == 'train': data_res =", "test_size=0.2, random_state=42) if flag == 'train': data_res = x_train.iloc[:, :].reset_index()", "' + sen + '\\n') else: f1.write(\"\\n\") f1.write(\"\\n\") f1.close() print", "str1_list, str2_list, str3_list = convert_apc(text, label) for x1, x2, x3", "= sentiment_value k += 1 return text, tags, sentiment except:", "k < label[4][1]: tags[k] = 'I-ASP' sentiment[k] = sentiment_value k", "'\\u2009', '\\u200a']: f1.write(word + ' ' + tag + '", "preprocess\") elif task == 'apc': # get folder name folder_name_prefix", "label[4][0] + 1 while k < label[4][1]: tags[k] = 'I-'+label[1]", "= 'Negative' return sentiment_value def convert_apc(text, label): label_update = [(i[0],", "data preprocess\") elif task == 'apc': # get folder name", "data.columns = ['text', 'tag_sentiment_list'] # preprocess for emoji data['text'] =", "training data preprocess\") # process test convert_to_atepc(inpath, dist_test_fname, 'test') print", "+ ' ' + tag + ' ' + sen" ]
[ "name, data) def get_data_from_url(url): response = requests.get(url, stream=True) return response.raw", "def get_cat(folder, name): url = \"http://consuming-python-services-api.azurewebsites.net/cats/random\" data = get_data_from_url(url) save_image(folder,", "\"http://consuming-python-services-api.azurewebsites.net/cats/random\" data = get_data_from_url(url) save_image(folder, name, data) def get_data_from_url(url): response", "import os import shutil import requests def get_cat(folder, name): url", "response = requests.get(url, stream=True) return response.raw def save_image(folder, name, data):", "file_name = os.path.join(folder, name + '.jpg') with open(file_name, 'wb') as", "def get_data_from_url(url): response = requests.get(url, stream=True) return response.raw def save_image(folder,", "data) def get_data_from_url(url): response = requests.get(url, stream=True) return response.raw def", "name, data): file_name = os.path.join(folder, name + '.jpg') with open(file_name,", "get_data_from_url(url) save_image(folder, name, data) def get_data_from_url(url): response = requests.get(url, stream=True)", "stream=True) return response.raw def save_image(folder, name, data): file_name = os.path.join(folder,", "save_image(folder, name, data): file_name = os.path.join(folder, name + '.jpg') with", "return response.raw def save_image(folder, name, data): file_name = os.path.join(folder, name", "import shutil import requests def get_cat(folder, name): url = \"http://consuming-python-services-api.azurewebsites.net/cats/random\"", "os import shutil import requests def get_cat(folder, name): url =", "name + '.jpg') with open(file_name, 'wb') as fout: shutil.copyfileobj(data, fout)", "get_cat(folder, name): url = \"http://consuming-python-services-api.azurewebsites.net/cats/random\" data = get_data_from_url(url) save_image(folder, name,", "url = \"http://consuming-python-services-api.azurewebsites.net/cats/random\" data = get_data_from_url(url) save_image(folder, name, data) def", "import requests def get_cat(folder, name): url = \"http://consuming-python-services-api.azurewebsites.net/cats/random\" data =", "get_data_from_url(url): response = requests.get(url, stream=True) return response.raw def save_image(folder, name,", "os.path.join(folder, name + '.jpg') with open(file_name, 'wb') as fout: shutil.copyfileobj(data,", "name): url = \"http://consuming-python-services-api.azurewebsites.net/cats/random\" data = get_data_from_url(url) save_image(folder, name, data)", "= get_data_from_url(url) save_image(folder, name, data) def get_data_from_url(url): response = requests.get(url,", "def save_image(folder, name, data): file_name = os.path.join(folder, name + '.jpg')", "save_image(folder, name, data) def get_data_from_url(url): response = requests.get(url, stream=True) return", "shutil import requests def get_cat(folder, name): url = \"http://consuming-python-services-api.azurewebsites.net/cats/random\" data", "= requests.get(url, stream=True) return response.raw def save_image(folder, name, data): file_name", "= \"http://consuming-python-services-api.azurewebsites.net/cats/random\" data = get_data_from_url(url) save_image(folder, name, data) def get_data_from_url(url):", "requests def get_cat(folder, name): url = \"http://consuming-python-services-api.azurewebsites.net/cats/random\" data = get_data_from_url(url)", "requests.get(url, stream=True) return response.raw def save_image(folder, name, data): file_name =", "= os.path.join(folder, name + '.jpg') with open(file_name, 'wb') as fout:", "data = get_data_from_url(url) save_image(folder, name, data) def get_data_from_url(url): response =", "response.raw def save_image(folder, name, data): file_name = os.path.join(folder, name +", "data): file_name = os.path.join(folder, name + '.jpg') with open(file_name, 'wb')" ]
[ "compute=True, lock=None, dask_kwargs={}, **kwargs): \"\"\" Store Dask Dataframe to Hierarchical", "1, stop=storer.nrows)[0] division.append(division_end) divisions.append(division) else: divisions.append(None) return keys, stops, divisions", "return new dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,", "str): if path.count('*') + key.count('*') > 1: raise ValueError(\"A maximum", "a dask dataframe. This function is like ``pandas.read_hdf``, except it", "will be replaced with an increasing sequence of integers starting", "accepted in \" \"dataset key\") fmt_obj = lambda path, _:", "in paths]) if PY3: from ..core import _Frame _Frame.to_hdf.__doc__ =", "ambiguous because it could be interpreted as the starting and", "files as '2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc.. >>> from datetime import", "+ timedelta(days=i) >>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP Returns", "warn import pandas as pd from toolz import merge from", "within the filename or datapath, and an optional ``name_function``. The", "(defaults to None, the last row), row number to stop", "to denote many filenames key: string Datapath within the files.", "Read from hdf5 file with a lock \"\"\" if lock:", "shared keyword arguments. This function differs from the Pandas version", "columns, optional A list of columns that if not None,", "stop)) def update(s): new = base.copy() new.update({'start': s, 'stop': s", "multiprocessing from ...base import tokenize, compute_as_if_collection from ...bytes.utils import build_name_function", "1: base = {'name': empty.name, 'mode': mode} else: base =", "locking\"\"\" if lock: lock.acquire() try: pd_to_hdf(*args, **kwargs) finally: if lock:", "hdf.keys() if fnmatch(k, key)] stops = [] divisions = []", "key, stop, sorted_index, chunksize) if (start != 0 or stop", "doctest: +SKIP Save data to multiple datapaths within the same", "**kwargs) finally: if lock: lock.release() return None def to_hdf(df, path,", "in hdf.keys() if fnmatch(k, key)] stops = [] divisions =", "string, format using i_name if isinstance(path, str): if path.count('*') +", "in a number from 0 to the number of partitions", "if the file does not exist it is created. 'r+'", "single_node: keys = [(name, df.npartitions - 1)] else: keys =", "from the Pandas version by saving the many partitions of", "we're writing to a single entity _actual_get = get_scheduler(get=get, collections=[df],", "columns : list of columns, optional A list of columns", "# scheduler we don't need to lock lock = True", "many filenames key: string Datapath within the files. May contain", "is preserved when its saved and read # so we", "when opening file(s). 'r' Read-only; no data can be modified.", "date(year=2000, month=1, day=1) >>> def name_function(i): ... ''' Convert integer", "string. Should take in a number from 0 to the", "from fnmatch import fnmatch from glob import glob import os", "many locations name_function: function A function to convert the ``*``", "!= 0 or stop is not None) and sorted_index: raise", "dont_use_fixed_error_message = \"\"\" This HDFStore is not partitionable and can", "list of columns, optional A list of columns that if", "HDF files into a Dask DataFrame Read hdf files into", "lock, update(s))) for i, s in enumerate(range(start, stop, chunksize))) if", "(_pd_read_hdf, path, key, lock, update(s))) for i, s in enumerate(range(start,", "can be modified. 'a' Append; an existing file is opened", "file identified by the given path. Also get the index", "is not None) and len(keys) > 1: raise NotImplementedError(read_hdf_error_msg) from", "a string ''' ... return base + timedelta(days=i) >>> df.to_hdf('*.hdf',", "name = 'read-hdf-' + token if empty.ndim == 1: base", "stop at columns : list of columns, optional A list", "a positive integer\") if (start != 0 or stop is", "None: if not single_node: lock = True elif not single_file", "division: divisions = division else: divisions = [None] * (len(dsk)", "not exist it is created. 'r+' It is similar to", "name_function: function A function to convert the ``*`` in the", "= base.copy() new.update({'start': s, 'stop': s + chunksize}) return new", "from ...delayed import Delayed, delayed from ...utils import get_scheduler_lock def", "\"\"\" def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize): \"\"\" Get the", "Delayed, delayed from ...utils import get_scheduler_lock def _pd_to_hdf(pd_to_hdf, lock, args,", "= getattr(df._partition_type, 'to_hdf') single_file = True single_node = True #", "division, lock): \"\"\" Get the data frame corresponding to one", "file is opened for reading and writing, and if the", "not specify scheduler and write is sequential default to the", "= 'read-hdf-' + token if empty.ndim == 1: base =", "can only be use monolithically with pandas. In the future", "- 1 if single_node else 0 task = (_link, (name,", "replaced with an increasing sequence of integers starting from ``0``", "hdf: keys = [k for k in hdf.keys() if fnmatch(k,", "task = (_link, (name, link_dep), task) dsk[(name, i)] = task", "read_hdf. \"\"\" def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize): \"\"\" Get", "or not the input hdf files have a sorted index", "= tokenize((path, os.path.getmtime(path), key, start, stop, empty, chunksize, division)) name", "datapath, and an optional ``name_function``. The asterix will be replaced", "(len(dsk) + 1) return new_dd_object(dsk, name, empty, divisions) keys, stops,", "None, will limit the return columns (default is None) chunksize", "timedelta >>> base = date(year=2000, month=1, day=1) >>> def name_function(i):", "from toolz import merge from .io import _link from ...base", "the last row of data for each matched key. \"\"\"", "the same file: >>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP Save", "and return a string. (see examples below) compute: bool Whether", "elif stop > storer.nrows: raise ValueError(\"Stop keyword exceeds dataset number", "paths]) if PY3: from ..core import _Frame _Frame.to_hdf.__doc__ = to_hdf.__doc__", "naming scheme. This writes files as '2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc..", "stop, sorted_index, chunksize): \"\"\" Get the \"keys\" or group identifiers", "# doctest: +SKIP Load multiple files >>> dd.read_hdf('myfile.*.hdf5', '/x') #", "doctest: +SKIP \"\"\" if lock is True: lock = get_scheduler_lock()", "Format (HDF) files This is a parallel version of the", "optional Option to specify whether or not the input hdf", "integers. This function only supports the Pandas ``'table'`` format, not", "concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock) for k,", "else: stops.append(stop) if sorted_index: division = [storer.read_column('index', start=start, stop=start +", "_pd_to_hdf(pd_to_hdf, lock, args, kwargs=None): \"\"\" A wrapper function around pd_to_hdf", "between partitions name_function \" \"must preserve the order of its", "files have a sorted index (default is False). lock :", "pandas. In the future when creating HDFStores use the ``format='table'``", "if stop is None: stops.append(storer.nrows) elif stop > storer.nrows: raise", "token = tokenize((path, os.path.getmtime(path), key, start, stop, empty, chunksize, division))", "columns=None, chunksize=int(1e6), sorted_index=False, lock=None, mode='a'): \"\"\" Read a single hdf", "= 'a' if single_node: kwargs2['append'] = True filenames = []", "name_function(i) task = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, i), fmt_obj(path, i_name),", "def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize): \"\"\" Get the \"keys\"", "index per file, or starting and stopping index of the", "if start >= stop: raise ValueError(\"Start row number ({}) is", "storer = hdf.get_storer(k) if storer.format_type != 'table': raise TypeError(dont_use_fixed_error_message) if", "to the number of partitions and return a string. (see", "of columns, optional A list of columns that if not", "parallel version of the Pandas function of the same name.", "information about shared keyword arguments. This function differs from the", "+SKIP Save data to multiple files: >>> df.to_hdf('output-*.hdf', '/data') #", "Data to a single file >>> df.to_hdf('output.hdf', '/data') # doctest:", "else: divisions.append(None) return keys, stops, divisions def one_path_one_key(path, key, start,", "if lock: lock = get_scheduler_lock(get, df, scheduler=scheduler) kwargs.update({'format': 'table', 'mode':", "!= 0 or stop is not None) and len(paths) >", "``multiprocessing.Lock`` or ``SerializableLock`` will be used depending on your scheduler", "contain wildcards. key : group identifier in the store. Can", "0), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs) kwargs2 = kwargs.copy() if", "this returns a ``dask.Delayed`` value. lock: Lock, optional Lock to", "stop keywords are not supported when reading from more than", "lock.acquire() try: pd_to_hdf(*args, **kwargs) finally: if lock: lock.release() return None", "of calling ``name_function`` on each of those integers. This function", "supports the Pandas ``'table'`` format, not the more specialized ``'fixed'``", "_read_single_hdf(path, key, start=0, stop=None, columns=None, chunksize=int(1e6), sorted_index=False, lock=None, mode='a'): \"\"\"", "required. See dask.utils.get_scheduler_lock for more information about lock selection. **other:", "name_function=name_function) # doctest: +SKIP Returns ------- None: if compute ==", "single_file: kwargs2['mode'] = 'a' if single_node: kwargs2['append'] = True filenames", "key, stop, sorted_index, chunksize): \"\"\" Get the \"keys\" or group", "# doctest: +SKIP Save data to multiple datapaths within the", "DataFrame in parallel, either to many files, or to many", "args, kwargs=None): \"\"\" A wrapper function around pd_to_hdf that enables", "'*' in key: single_node = False if 'format' in kwargs", "= get_scheduler(get=get, collections=[df], scheduler=scheduler) if lock is None: if not", "doctest: +SKIP Save data to multiple files, using the multiprocessing", "not contain any wildcards). \"\"\" empty = pd.read_hdf(path, key, mode=mode,", "keys, stops, divisions def one_path_one_key(path, key, start, stop, columns, chunksize,", "lock: lock.release() return None def to_hdf(df, path, key, mode='a', append=False,", "and _actual_get is not multiprocessing.get: # if we're writing to", "import uuid from warnings import warn import pandas as pd", "lock, kwargs): \"\"\" Read from hdf5 file with a lock", "else '/' + key if isinstance(pattern, str): paths = sorted(glob(pattern))", "glob import glob import os import uuid from warnings import", "parallelized\"\"\" read_hdf_error_msg = \"\"\" The start and stop keywords are", "i_name if isinstance(path, str): if path.count('*') + key.count('*') > 1:", "# so we enforce name_function to maintain the order of", "scheduler = 'single-threaded' # handle lock default based on whether", "ValueError(\"A maximum of one asterisk is accepted in file \"", "Save Data to a single file >>> df.to_hdf('output.hdf', '/data') #", "or from multiple keys from the same file. Parameters ----------", "config.get('get', None) and scheduler is None and not config.get('scheduler', None)", "not None) and sorted_index: raise ValueError(\"When assuming pre-partitioned data, data", "= storer.read_column('index', start=storer.nrows - 1, stop=storer.nrows)[0] division.append(division_end) divisions.append(division) else: divisions.append(None)", "depending on your scheduler if a lock is required. See", "dsk[(name, i)] = task dsk = merge(df.dask, dsk) if single_file", "({})\".format(storer.nrows)) else: stops.append(stop) if sorted_index: division = [storer.read_column('index', start=start, stop=start", "is like ``pandas.read_hdf``, except it can read from a single", "is ambiguous because it could be interpreted as the starting", "in kwargs and kwargs['format'] not in ['t', 'table']: raise ValueError(\"Dask", "... ''' Convert integer 0 to n to a string", "empty, chunksize, division)) name = 'read-hdf-' + token if empty.ndim", "\" \"row number ({})\".format(start, stop)) def update(s): new = base.copy()", "not None: empty = empty[columns] token = tokenize((path, os.path.getmtime(path), key,", "use monolithically with pandas. In the future when creating HDFStores", "doctest: +SKIP Specify custom naming scheme. This writes files as", "i_name: path.replace('*', i_name) if '*' in path: single_file = False", "in zip(keys, stops, divisions)]) def _pd_read_hdf(path, key, lock, kwargs): \"\"\"", "[] for i in range(0,df.npartitions): i_name = name_function(i) filenames.append(fmt_obj(path, i_name))", "None) and len(keys) > 1: raise NotImplementedError(read_hdf_error_msg) from ..multi import", "'mode': mode} if start >= stop: raise ValueError(\"Start row number", "i), (_pd_read_hdf, path, key, lock, update(s))) for i, s in", "(default is 1000000). sorted_index : boolean, optional Option to specify", "key if key.startswith('/') else '/' + key if isinstance(pattern, str):", "list File pattern (string), buffer to read from, or list", "__future__ import absolute_import, division, print_function from fnmatch import fnmatch from", "files, using the multiprocessing scheduler: >>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') #", "# handle lock default based on whether we're writing to", "Path to a target filename. May contain a ``*`` to", "to maintain the order of its input. if not (single_file", "are not supported when reading from more than one file/dataset.", "uses the hdf file identified by the given path. Also", "is opened for reading and writing, and if the file", "Hierarchical Data Format (HDF) files This is a parallel version", "get=get, scheduler=scheduler, **dask_kwargs) return filenames else: return delayed([Delayed(k, dsk) for", "lock.release() return None def to_hdf(df, path, key, mode='a', append=False, get=None,", "start, stop, empty, chunksize, division)) name = 'read-hdf-' + token", "HDFStores use the ``format='table'`` option to ensure that your dataset", "None) and sorted_index: raise ValueError(\"When assuming pre-partitioned data, data must", "within the same file. You may specify this parallelism with", "lock, [(df._name, 0), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs) kwargs2 =", "for k in keys]) dont_use_fixed_error_message = \"\"\" This HDFStore is", "'table']: raise ValueError(\"Dask only support 'table' format in hdf files.\")", "= {'name': empty.name, 'mode': mode} else: base = {'columns': empty.columns,", "= kwargs.copy() if single_file: kwargs2['mode'] = 'a' if single_node: kwargs2['append']", "hdf.get_storer(k) if storer.format_type != 'table': raise TypeError(dont_use_fixed_error_message) if stop is", "a string. (see examples below) compute: bool Whether or not", "string. (see examples below) compute: bool Whether or not to", "If False then this returns a ``dask.Delayed`` value. lock: Lock,", "lock=None, mode='a'): \"\"\" Read a single hdf file into a", "not in ['t', 'table']: raise ValueError(\"Dask only support 'table' format", "specify scheduler and write is sequential default to the #", "key.startswith('/') else '/' + key if isinstance(pattern, str): paths =", "fnmatch(k, key)] stops = [] divisions = [] for k", "of the last row of data for each matched key.", "_actual_get is not multiprocessing.get: # if we're writing to multiple", "within the same file: >>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP", "contain wildcards start : optional, integer (defaults to 0), row", "Datapath within the files. May contain a ``*`` to denote", "data frame corresponding to one path and one key (which", "single_file: link_dep = i - 1 if single_node else 0", "'/data-*') # doctest: +SKIP Save data to multiple files: >>>", "TypeError(dont_use_fixed_error_message) if stop is None: stops.append(storer.nrows) elif stop > storer.nrows:", "issues. By default a ``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock`` will be", "to multiple datapaths within the same file: >>> df.to_hdf('output.hdf', '/data-*')", "of its input. if not (single_file and single_node): formatted_names =", "\" \"dataset key\") fmt_obj = lambda path, _: path if", "Get the data frame corresponding to one path and one", "is not None: empty = empty[columns] token = tokenize((path, os.path.getmtime(path),", "fmt_obj = lambda path, _: path if '*' in key:", "future when creating HDFStores use the ``format='table'`` option to ensure", "return None def to_hdf(df, path, key, mode='a', append=False, get=None, scheduler=None,", "{'a', 'r', 'r+'}, default 'a'. Mode to use when opening", "return keys, stops, divisions def one_path_one_key(path, key, start, stop, columns,", "start=start, stop=stop, columns=columns, chunksize=chunksize, sorted_index=sorted_index, lock=lock, mode=mode) for path in", "or ``SerializableLock`` will be used depending on your scheduler if", "in key: single_node = False if 'format' in kwargs and", "assuming pre-partitioned data, data must be \" \"read in its", "empty = empty[columns] token = tokenize((path, os.path.getmtime(path), key, start, stop,", "group identifiers which match the given key, which can contain", "lock.release() return result def read_hdf(pattern, key, start=0, stop=None, columns=None, chunksize=1000000,", "os import uuid from warnings import warn import pandas as", "= (_pd_to_hdf, pd_to_hdf, lock, [(df._name, 0), fmt_obj(path, i_name), key.replace('*', i_name)],", "read # so we enforce name_function to maintain the order", "the given key, which can contain wildcards. This uses the", "This function differs from the Pandas version by saving the", "divisions = get_keys_stops_divisions(path, key, stop, sorted_index, chunksize) if (start !=", "in its entirety using the same chunksizes\") from ..multi import", "or not to execute immediately. If False then this returns", "start at stop : optional, integer (defaults to None, the", "'mode': mode} else: base = {'columns': empty.columns, 'mode': mode} if", "an increasing sequence of integers starting from ``0`` or with", "the Pandas function of the same name. Please see the", "= get_scheduler_lock() key = key if key.startswith('/') else '/' +", ": optional, integer (defaults to None, the last row), row", "bool Whether or not to execute immediately. If False then", "\"\"\" empty = pd.read_hdf(path, key, mode=mode, stop=0) if columns is", "and not config.get('get', None) and scheduler is None and not", "lambda path, _: path if '*' in key: single_node =", "print_function from fnmatch import fnmatch from glob import glob import", "(defaults to 0), row number to start at stop :", "doctest: +SKIP Returns ------- None: if compute == True delayed", "dsk = merge(df.dask, dsk) if single_file and single_node: keys =", "will limit the return columns (default is None) chunksize :", "locations name_function: function A function to convert the ``*`` in", "as the starting and stopping index per file, or starting", "mode not in ('a', 'w', 'r+'): raise ValueError(\"Mode must be", "lock to prevent concurrency issues (default is True). mode :", "kwargs.update({'format': 'table', 'mode': mode, 'append': append}) dsk = dict() i_name", "the same name. Please see the Pandas docstring for more", "new_dd_object from ... import config, multiprocessing from ...base import tokenize,", "the same chunksizes\") from ..multi import concat return concat([_read_single_hdf(path, key,", "files This is a parallel version of the Pandas function", "import fnmatch from glob import glob import os import uuid", "the same file. Parameters ---------- pattern : string, list File", "read from a single large file, or from multiple files,", "= sorted(glob(pattern)) else: paths = pattern if (start != 0", "to multiple files: >>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP Save", "integers starting from ``0`` or with the result of calling", "stop, columns, chunksize, division, lock): \"\"\" Get the data frame", "path in paths]) if PY3: from ..core import _Frame _Frame.to_hdf.__doc__", "-------- Save Data to a single file >>> df.to_hdf('output.hdf', '/data')", "lock = get_scheduler_lock() key = key if key.startswith('/') else '/'", "except it can read from a single large file, or", "``dask.Delayed`` value. lock: Lock, optional Lock to use to prevent", "one_path_one_key(path, key, start, stop, columns, chunksize, division, lock): \"\"\" Get", "'a' if single_node: kwargs2['append'] = True filenames = [] for", "sorted_index : boolean, optional Option to specify whether or not", "number to stop at columns : list of columns, optional", "get_keys_stops_divisions(path, key, stop, sorted_index, chunksize): \"\"\" Get the \"keys\" or", "'stop': s + chunksize}) return new dsk = dict(((name, i),", "function differs from the Pandas version by saving the many", "partition (default is 1000000). sorted_index : boolean, optional Option to", "if formatted_names != sorted(formatted_names): warn(\"To preserve order between partitions name_function", "i_name) if '*' in path: single_file = False else: if", "name_function \" \"must preserve the order of its input\") #", "scheduler is None and not config.get('scheduler', None) and single_node and", "empty[columns] token = tokenize((path, os.path.getmtime(path), key, start, stop, empty, chunksize,", "True elif not single_file and _actual_get is not multiprocessing.get: #", "dataset key\") fmt_obj = lambda path, i_name: path.replace('*', i_name) if", "# doctest: +SKIP Returns ------- None: if compute == True", "result def read_hdf(pattern, key, start=0, stop=None, columns=None, chunksize=1000000, sorted_index=False, lock=True,", "input. if not (single_file and single_node): formatted_names = [name_function(i) for", "pd.read_hdf(path, key, mode=mode, stop=0) if columns is not None: empty", "per partition (default is 1000000). sorted_index : boolean, optional Option", "in range(1, df.npartitions): i_name = name_function(i) task = (_pd_to_hdf, pd_to_hdf,", "= name_function(0) dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, 0),", "storer.nrows: raise ValueError(\"Stop keyword exceeds dataset number \" \"of rows", "if lock is True: lock = get_scheduler_lock() key = key", "of 'a', 'w' or 'r+'\") if name_function is None: name_function", "return filenames else: return delayed([Delayed(k, dsk) for k in keys])", "'/data') # doctest: +SKIP Save data to multiple files, using", "in \" \"dataset key\") fmt_obj = lambda path, _: path", "to stop \" \"row number ({})\".format(start, stop)) def update(s): new", "df, scheduler=scheduler) kwargs.update({'format': 'table', 'mode': mode, 'append': append}) dsk =", "'read-hdf-' + token if empty.ndim == 1: base = {'name':", "True # if path is string, format using i_name if", "file >>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP Load multiple files", "that if not None, will limit the return columns (default", "entirety using the same chunksizes\") from ..multi import concat return", "as '2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc.. >>> from datetime import date,", "but the file must already exist. Returns ------- dask.DataFrame Examples", "a single hdf file into a dask.dataframe. Used for each", "get_scheduler_lock(get, df, scheduler=scheduler) kwargs.update({'format': 'table', 'mode': mode, 'append': append}) dsk", "if compute == False See Also -------- read_hdf: to_parquet: \"\"\"", "file. You may specify this parallelism with an asterix ``*``", "maximum of one asterisk is accepted in file \" \"path", "keywords are not supported when reading from more than one", "hdf5 file with a lock \"\"\" if lock: lock.acquire() try:", "_: path if '*' in key: single_node = False if", "from ..multi import concat return concat([one_path_one_key(path, k, start, s, columns,", "input hdf files have a sorted index (default is False).", "finally: if lock: lock.release() return None def to_hdf(df, path, key,", "This function is like ``pandas.read_hdf``, except it can read from", "else: base = {'columns': empty.columns, 'mode': mode} if start >=", "k in keys: storer = hdf.get_storer(k) if storer.format_type != 'table':", "0 or stop is not None) and len(paths) > 1:", "to multiple files with the multiprocessing # scheduler we don't", "sorted_index: division = [storer.read_column('index', start=start, stop=start + 1)[0] for start", "stops, divisions)]) def _pd_read_hdf(path, key, lock, kwargs): \"\"\" Read from", ">>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP Save data to multiple", "stops = [] divisions = [] for k in keys:", "a number from 0 to the number of partitions and", "# if we're writing to multiple files with the multiprocessing", "fmt_obj(path, i_name), key.replace('*', i_name)], kwargs) kwargs2 = kwargs.copy() if single_file:", "index of the last row of data for each matched", "multiprocessing scheduler: >>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP Specify", "ValueError(\"Mode must be one of 'a', 'w' or 'r+'\") if", "a parallel version of the Pandas function of the same", "Append; an existing file is opened for reading and writing,", "== True delayed value: if compute == False See Also", "sorted_index=sorted_index, lock=lock, mode=mode) for path in paths]) if PY3: from", "wildcards. key : group identifier in the store. Can contain", "using the multiprocessing scheduler: >>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest:", "in range(df.npartitions)] if compute: compute_as_if_collection(DataFrame, dsk, keys, get=get, scheduler=scheduler, **dask_kwargs)", "key, start, stop, empty, chunksize, division)) name = 'read-hdf-' +", "the input hdf files have a sorted index (default is", "= True filenames = [] for i in range(0,df.npartitions): i_name", "if fnmatch(k, key)] stops = [] divisions = [] for", "task) dsk[(name, i)] = task dsk = merge(df.dask, dsk) if", "entity _actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler) if lock is None:", "last row), row number to stop at columns : list", "= key if key.startswith('/') else '/' + key if isinstance(pattern,", "an asterix ``*`` within the filename or datapath, and an", "'r+'): raise ValueError(\"Mode must be one of 'a', 'w' or", "if compute: compute_as_if_collection(DataFrame, dsk, keys, get=get, scheduler=scheduler, **dask_kwargs) return filenames", "single large file, or from multiple files, or from multiple", "positive integer, optional Maximal number of rows per partition (default", "i, s in enumerate(range(start, stop, chunksize))) if division: divisions =", "df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP Save data to multiple files:", "dataframe. This function is like ``pandas.read_hdf``, except it can read", "stop=stop, columns=columns, chunksize=chunksize, sorted_index=sorted_index, lock=lock, mode=mode) for path in paths])", "to specify whether or not the input hdf files have", "'/x') # doctest: +SKIP Load multiple datasets >>> dd.read_hdf('myfile.1.hdf5', '/*')", "or stop is not None) and len(keys) > 1: raise", "{'columns': empty.columns, 'mode': mode} if start >= stop: raise ValueError(\"Start", "did not specify scheduler and write is sequential default to", "in path: single_file = False else: if key.count('*') > 1:", "its saved and read # so we enforce name_function to", "i_name), key.replace('*', i_name)], kwargs2) if single_file: link_dep = i -", "of columns that if not None, will limit the return", "= task dsk = merge(df.dask, dsk) if single_file and single_node:", "... import config, multiprocessing from ...base import tokenize, compute_as_if_collection from", "single hdf file into a dask.dataframe. Used for each file", "collections=[df], scheduler=scheduler) if lock is None: if not single_node: lock", "Used for each file in read_hdf. \"\"\" def get_keys_stops_divisions(path, key,", "keys: storer = hdf.get_storer(k) if storer.format_type != 'table': raise TypeError(dont_use_fixed_error_message)", "list of file paths. Can contain wildcards. key : group", "-------- Load single file >>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP", "mode='a'): \"\"\" Read HDF files into a Dask DataFrame Read", "increasing sequence of integers starting from ``0`` or with the", "k in hdf.keys() if fnmatch(k, key)] stops = [] divisions", "default 'a'. Mode to use when opening file(s). 'r' Read-only;", "is above or equal to stop \" \"row number ({})\".format(start,", "and single_node and single_file): scheduler = 'single-threaded' # handle lock", "[(df._name, i), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs2) if single_file: link_dep", "<= 0: raise ValueError(\"Chunksize must be a positive integer\") if", "new dsk = dict(((name, i), (_pd_read_hdf, path, key, lock, update(s)))", "of rows per partition (default is 1000000). sorted_index : boolean,", "if key.startswith('/') else '/' + key if isinstance(pattern, str): paths", "def name_function(i): ... ''' Convert integer 0 to n to", "with an increasing sequence of integers starting from ``0`` or", "dask_kwargs={}, **kwargs): \"\"\" Store Dask Dataframe to Hierarchical Data Format", "single_node): formatted_names = [name_function(i) for i in range(df.npartitions)] if formatted_names", "specialized ``'fixed'`` format. Parameters ---------- path: string Path to a", "is created. 'r+' It is similar to 'a', but the", "wildcards. This uses the hdf file identified by the given", ">>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP >>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x')", "get_scheduler_lock def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None): \"\"\" A wrapper function", "rows per partition (default is 1000000). sorted_index : boolean, optional", "filenames = [] for i in range(0,df.npartitions): i_name = name_function(i)", "key.count('*') > 1: raise ValueError(\"A maximum of one asterisk is", "config.get('scheduler', None) and single_node and single_file): scheduler = 'single-threaded' #", "one file/dataset. The combination is ambiguous because it could be", "boolean, optional Option to use a lock to prevent concurrency", "a lock \"\"\" if lock: lock.acquire() try: result = pd.read_hdf(path,", "a ``*`` to denote many filenames key: string Datapath within", "\"\"\" The start and stop keywords are not supported when", "in keys: storer = hdf.get_storer(k) if storer.format_type != 'table': raise", "to a single file >>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP", "in parallel, either to many files, or to many datasets", "'/data', scheduler='processes') # doctest: +SKIP Specify custom naming scheme. This", "ValueError(\"Chunksize must be a positive integer\") if (start != 0", "a ``dask.Delayed`` value. lock: Lock, optional Lock to use to", "keys, get=get, scheduler=scheduler, **dask_kwargs) return filenames else: return delayed([Delayed(k, dsk)", ">>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP Specify custom naming", "Data Format (HDF) files This is a parallel version of", "pd_to_hdf, lock, [(df._name, i), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs2) if", "chunksize))) if division: divisions = division else: divisions = [None]", "'/x') # doctest: +SKIP >>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest:", "optional, integer (defaults to None, the last row), row number", "Save data to multiple files, using the multiprocessing scheduler: >>>", "preserve the order of its input\") # If user did", "raise ValueError(\"Start row number ({}) is above or equal to", "be used depending on your scheduler if a lock is", "''' ... return base + timedelta(days=i) >>> df.to_hdf('*.hdf', '/data', name_function=name_function)", "May contain a ``*`` to denote many filenames key: string", "dsk) for k in keys]) dont_use_fixed_error_message = \"\"\" This HDFStore", "chunksize : positive integer, optional Maximal number of rows per", "task dsk = merge(df.dask, dsk) if single_file and single_node: keys", "chunksize)] division_end = storer.read_column('index', start=storer.nrows - 1, stop=storer.nrows)[0] division.append(division_end) divisions.append(division)", "or stop is not None) and sorted_index: raise ValueError(\"When assuming", "This writes files as '2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc.. >>> from", "== False See Also -------- read_hdf: to_parquet: \"\"\" name =", "task = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, i), fmt_obj(path, i_name), key.replace('*',", "in hdf files.\") if mode not in ('a', 'w', 'r+'):", "not single_file and _actual_get is not multiprocessing.get: # if we're", "This HDFStore is not partitionable and can only be use", "for i in range(df.npartitions)] if compute: compute_as_if_collection(DataFrame, dsk, keys, get=get,", "columns is not None: empty = empty[columns] token = tokenize((path,", "divisions = [None] * (len(dsk) + 1) return new_dd_object(dsk, name,", "mode} else: base = {'columns': empty.columns, 'mode': mode} if start", "frame corresponding to one path and one key (which should", "lock: lock.acquire() try: result = pd.read_hdf(path, key, **kwargs) finally: if", "options to a string. Should take in a number from", "``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock`` will be used depending on your", "optional Option to use a lock to prevent concurrency issues", "stop \" \"row number ({})\".format(start, stop)) def update(s): new =", "the file does not exist it is created. 'r+' It", "columns=None, chunksize=1000000, sorted_index=False, lock=True, mode='a'): \"\"\" Read HDF files into", "pd_to_hdf(*args, **kwargs) finally: if lock: lock.release() return None def to_hdf(df,", "format, not the more specialized ``'fixed'`` format. Parameters ---------- path:", "to a string ''' ... return base + timedelta(days=i) >>>", "writing to a single entity _actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler)", "start=0, stop=None, columns=None, chunksize=1000000, sorted_index=False, lock=True, mode='a'): \"\"\" Read HDF", "kwargs=None): \"\"\" A wrapper function around pd_to_hdf that enables locking\"\"\"", "NotImplementedError(read_hdf_error_msg) if chunksize <= 0: raise ValueError(\"Chunksize must be a", "for each file in read_hdf. \"\"\" def get_keys_stops_divisions(path, key, stop,", "concurrency issues (default is True). mode : {'a', 'r', 'r+'},", "be replaced with an increasing sequence of integers starting from", "file, or starting and stopping index of the global dataset.\"\"\"", "columns, chunksize, d, lock) for k, s, d in zip(keys,", "start : optional, integer (defaults to 0), row number to", "partitionable and can only be use monolithically with pandas. In", "group identifier in the store. Can contain wildcards start :", "NotImplementedError(read_hdf_error_msg) from ..multi import concat return concat([one_path_one_key(path, k, start, s,", "merge from .io import _link from ...base import get_scheduler from", "scheduler. otherwise let the _get method choose the scheduler if", "name_function(0) dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, 0), fmt_obj(path,", "import pandas as pd from toolz import merge from .io", "is not partitionable and can only be use monolithically with", "base = {'columns': empty.columns, 'mode': mode} if start >= stop:", "s, d in zip(keys, stops, divisions)]) def _pd_read_hdf(path, key, lock,", "'r', 'r+'}, default 'a'. Mode to use when opening file(s).", "kwargs2 = kwargs.copy() if single_file: kwargs2['mode'] = 'a' if single_node:", "- 1, stop=storer.nrows)[0] division.append(division_end) divisions.append(division) else: divisions.append(None) return keys, stops,", "False if lock: lock = get_scheduler_lock(get, df, scheduler=scheduler) kwargs.update({'format': 'table',", "fnmatch import fnmatch from glob import glob import os import", "If user did not specify scheduler and write is sequential", "and read # so we enforce name_function to maintain the", ">>> def name_function(i): ... ''' Convert integer 0 to n", "can be parallelized\"\"\" read_hdf_error_msg = \"\"\" The start and stop", "(_link, (name, link_dep), task) dsk[(name, i)] = task dsk =", "range(0, storer.nrows, chunksize)] division_end = storer.read_column('index', start=storer.nrows - 1, stop=storer.nrows)[0]", "lock : boolean, optional Option to use a lock to", "key, mode=mode, stop=0) if columns is not None: empty =", "------- None: if compute == True delayed value: if compute", "single_file and _actual_get is not multiprocessing.get: # if we're writing", "if '*' in path: single_file = False else: if key.count('*')", "a Dask DataFrame in parallel, either to many files, or", "build_name_function from ...compatibility import PY3 from ...delayed import Delayed, delayed", "division)) name = 'read-hdf-' + token if empty.ndim == 1:", "isinstance(pattern, str): paths = sorted(glob(pattern)) else: paths = pattern if", "a string. Should take in a number from 0 to", "+SKIP Save data to multiple datapaths within the same file:", "update(s): new = base.copy() new.update({'start': s, 'stop': s + chunksize})", "lock = get_scheduler_lock(get, df, scheduler=scheduler) kwargs.update({'format': 'table', 'mode': mode, 'append':", "The combination is ambiguous because it could be interpreted as", "supported when reading from more than one file/dataset. The combination", "the starting and stopping index per file, or starting and", "in range(0, storer.nrows, chunksize)] division_end = storer.read_column('index', start=storer.nrows - 1,", "if we're writing to multiple files with the multiprocessing #", "could be interpreted as the starting and stopping index per", "is None: name_function = build_name_function(df.npartitions - 1) # we guarantee", "dask.DataFrame Examples -------- Load single file >>> dd.read_hdf('myfile.1.hdf5', '/x') #", "returns a ``dask.Delayed`` value. lock: Lock, optional Lock to use", "config, multiprocessing from ...base import tokenize, compute_as_if_collection from ...bytes.utils import", "- 1) # we guarantee partition order is preserved when", "'r' Read-only; no data can be modified. 'a' Append; an", "``format='table'`` option to ensure that your dataset can be parallelized\"\"\"", "absolute_import, division, print_function from fnmatch import fnmatch from glob import", "'table', 'mode': mode, 'append': append}) dsk = dict() i_name =", "is None and not config.get('get', None) and scheduler is None", "lock = True else: lock = False if lock: lock", "optional Maximal number of rows per partition (default is 1000000).", "pattern (string), buffer to read from, or list of file", "None and not config.get('get', None) and scheduler is None and", "\"of rows ({})\".format(storer.nrows)) else: stops.append(stop) if sorted_index: division = [storer.read_column('index',", "it can read from a single large file, or from", "of those integers. This function only supports the Pandas ``'table'``", ">>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP Save data to multiple", "!= 0 or stop is not None) and len(keys) >", "None: name_function = build_name_function(df.npartitions - 1) # we guarantee partition", "build_name_function(df.npartitions - 1) # we guarantee partition order is preserved", "large file, or from multiple files, or from multiple keys", "False else: if key.count('*') > 1: raise ValueError(\"A maximum of", "data to multiple files: >>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP", "more detailed information about shared keyword arguments. This function differs", "data to multiple files, using the multiprocessing scheduler: >>> df.to_hdf('output-*.hdf',", "single file >>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP Save data", "start=storer.nrows - 1, stop=storer.nrows)[0] division.append(division_end) divisions.append(division) else: divisions.append(None) return keys,", "a lock is required. See dask.utils.get_scheduler_lock for more information about", "By default a ``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock`` will be used", "0 or stop is not None) and len(keys) > 1:", "``'fixed'`` format. Parameters ---------- path: string Path to a target", "file >>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP Save data to", "(single_file and single_node): formatted_names = [name_function(i) for i in range(df.npartitions)]", "Returns ------- None: if compute == True delayed value: if", "and len(paths) > 1: raise NotImplementedError(read_hdf_error_msg) if chunksize <= 0:", ": boolean, optional Option to specify whether or not the", "combination is ambiguous because it could be interpreted as the", "of its input\") # If user did not specify scheduler", "== 1: base = {'name': empty.name, 'mode': mode} else: base", "1) return new_dd_object(dsk, name, empty, divisions) keys, stops, divisions =", "if lock: lock.release() return None def to_hdf(df, path, key, mode='a',", "0 or stop is not None) and sorted_index: raise ValueError(\"When", "raise ValueError(\"A maximum of one asterisk is accepted in \"", "and can only be use monolithically with pandas. In the", "whether we're writing to a single entity _actual_get = get_scheduler(get=get,", "doctest: +SKIP >>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP Load", "or to many datasets within the same file. You may", "into a Dask DataFrame Read hdf files into a dask", "_actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler) if lock is None: if", "= lambda path, _: path if '*' in key: single_node", "False). lock : boolean, optional Option to use a lock", "will be used depending on your scheduler if a lock", "None) and scheduler is None and not config.get('scheduler', None) and", "same file: >>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP Save data", "* (len(dsk) + 1) return new_dd_object(dsk, name, empty, divisions) keys,", "keyword arguments. This function differs from the Pandas version by", "chunksize=chunksize, sorted_index=sorted_index, lock=lock, mode=mode) for path in paths]) if PY3:", "range(1, df.npartitions): i_name = name_function(i) task = (_pd_to_hdf, pd_to_hdf, lock,", "with an asterix ``*`` within the filename or datapath, and", "...delayed import Delayed, delayed from ...utils import get_scheduler_lock def _pd_to_hdf(pd_to_hdf,", "lambda path, i_name: path.replace('*', i_name) if '*' in path: single_file", "because it could be interpreted as the starting and stopping", "and sorted_index: raise ValueError(\"When assuming pre-partitioned data, data must be", "'a'. Mode to use when opening file(s). 'r' Read-only; no", "and write is sequential default to the # sequential scheduler.", "hdf files into a dask dataframe. This function is like", "Mode to use when opening file(s). 'r' Read-only; no data", "1 if single_node else 0 task = (_link, (name, link_dep),", "'r+' It is similar to 'a', but the file must", "to 'a', but the file must already exist. Returns -------", "Save data to multiple files: >>> df.to_hdf('output-*.hdf', '/data') # doctest:", "raise ValueError(\"Stop keyword exceeds dataset number \" \"of rows ({})\".format(storer.nrows))", "use to prevent concurrency issues. By default a ``threading.Lock``, ``multiprocessing.Lock``", "and single_file): scheduler = 'single-threaded' # handle lock default based", "string Datapath within the files. May contain a ``*`` to", "ensure that your dataset can be parallelized\"\"\" read_hdf_error_msg = \"\"\"", "either to many files, or to many datasets within the", "name_function(i): ... ''' Convert integer 0 to n to a", "kwargs): \"\"\" Read from hdf5 file with a lock \"\"\"", "single_node else 0 task = (_link, (name, link_dep), task) dsk[(name,", "key. \"\"\" with pd.HDFStore(path, mode=mode) as hdf: keys = [k", "denote many filenames key: string Datapath within the files. May", "and single_node): formatted_names = [name_function(i) for i in range(df.npartitions)] if", "def _read_single_hdf(path, key, start=0, stop=None, columns=None, chunksize=int(1e6), sorted_index=False, lock=None, mode='a'):", "must be one of 'a', 'w' or 'r+'\") if name_function", "**kwargs) finally: if lock: lock.release() return result def read_hdf(pattern, key,", "Lock, optional Lock to use to prevent concurrency issues. By", "compute_as_if_collection from ...bytes.utils import build_name_function from ...compatibility import PY3 from", "isinstance(path, str): if path.count('*') + key.count('*') > 1: raise ValueError(\"A", "parallelism with an asterix ``*`` within the filename or datapath,", "the order of its input\") # If user did not", "= True single_node = True # if path is string,", "of integers starting from ``0`` or with the result of", "= [storer.read_column('index', start=start, stop=start + 1)[0] for start in range(0,", "stop, sorted_index, chunksize) if (start != 0 or stop is", "empty = pd.read_hdf(path, key, mode=mode, stop=0) if columns is not", "name_function is None: name_function = build_name_function(df.npartitions - 1) # we", "1000000). sorted_index : boolean, optional Option to specify whether or", "= dict() i_name = name_function(0) dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf,", "the Pandas ``'table'`` format, not the more specialized ``'fixed'`` format.", "filename or datapath, and an optional ``name_function``. The asterix will", "paths. Can contain wildcards. key : group identifier in the", "import get_scheduler_lock def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None): \"\"\" A wrapper", "multiple files with the multiprocessing # scheduler we don't need", "buffer to read from, or list of file paths. Can", "paths = sorted(glob(pattern)) else: paths = pattern if (start !=", "i - 1 if single_node else 0 task = (_link,", "day=1) >>> def name_function(i): ... ''' Convert integer 0 to", "= [] for i in range(0,df.npartitions): i_name = name_function(i) filenames.append(fmt_obj(path,", "is None and not config.get('scheduler', None) and single_node and single_file):", "number ({}) is above or equal to stop \" \"row", "+ uuid.uuid1().hex pd_to_hdf = getattr(df._partition_type, 'to_hdf') single_file = True single_node", "'/' + key if isinstance(pattern, str): paths = sorted(glob(pattern)) else:", "kwargs) kwargs2 = kwargs.copy() if single_file: kwargs2['mode'] = 'a' if", "key.replace('*', i_name)], kwargs2) if single_file: link_dep = i - 1", "information about lock selection. **other: See pandas.to_hdf for more information", "order of its input\") # If user did not specify", "writing to multiple files with the multiprocessing # scheduler we", "= 'to-hdf-' + uuid.uuid1().hex pd_to_hdf = getattr(df._partition_type, 'to_hdf') single_file =", ">= stop: raise ValueError(\"Start row number ({}) is above or", "path: single_file = False else: if key.count('*') > 1: raise", "be use monolithically with pandas. In the future when creating", "files, or to many datasets within the same file. You", "to a target filename. May contain a ``*`` to denote", "one asterisk is accepted in file \" \"path and dataset", "stop=start + 1)[0] for start in range(0, storer.nrows, chunksize)] division_end", "chunksize, division)) name = 'read-hdf-' + token if empty.ndim ==", "guarantee partition order is preserved when its saved and read", "to the # sequential scheduler. otherwise let the _get method", "name_function = build_name_function(df.npartitions - 1) # we guarantee partition order", "Dataframe to Hierarchical Data Format (HDF) files This is a", "raise ValueError(\"Mode must be one of 'a', 'w' or 'r+'\")", "existing file is opened for reading and writing, and if", "from the same file. Parameters ---------- pattern : string, list", "lock.acquire() try: result = pd.read_hdf(path, key, **kwargs) finally: if lock:", "maximum of one asterisk is accepted in \" \"dataset key\")", "divisions.append(division) else: divisions.append(None) return keys, stops, divisions def one_path_one_key(path, key,", "for reading and writing, and if the file does not", "chunksizes\") from ..multi import concat return concat([_read_single_hdf(path, key, start=start, stop=stop,", "append}) dsk = dict() i_name = name_function(0) dsk[(name, 0)] =", "can contain wildcards. This uses the hdf file identified by", "``*`` in the above options to a string. Should take", "to use to prevent concurrency issues. By default a ``threading.Lock``,", "\"\"\" Get the \"keys\" or group identifiers which match the", "See dask.utils.get_scheduler_lock for more information about lock selection. **other: See", "if chunksize <= 0: raise ValueError(\"Chunksize must be a positive", "scheduler and write is sequential default to the # sequential", "Examples -------- Load single file >>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest:", "optional, integer (defaults to 0), row number to start at", "modified. 'a' Append; an existing file is opened for reading", "the files. May contain a ``*`` to denote many locations", "more than one file/dataset. The combination is ambiguous because it", "name_function=None, compute=True, lock=None, dask_kwargs={}, **kwargs): \"\"\" Store Dask Dataframe to", "not the more specialized ``'fixed'`` format. Parameters ---------- path: string", "A wrapper function around pd_to_hdf that enables locking\"\"\" if lock:", "keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index, chunksize) if", "if not (single_file and single_node): formatted_names = [name_function(i) for i", "if lock: lock.acquire() try: result = pd.read_hdf(path, key, **kwargs) finally:", "This is a parallel version of the Pandas function of", "using the same chunksizes\") from ..multi import concat return concat([_read_single_hdf(path,", "if name_function is None: name_function = build_name_function(df.npartitions - 1) #", "be interpreted as the starting and stopping index per file,", "sorted index (default is False). lock : boolean, optional Option", "else: keys = [(name, i) for i in range(df.npartitions)] if", "format. Parameters ---------- path: string Path to a target filename.", "+ key.count('*') > 1: raise ValueError(\"A maximum of one asterisk", "lock: lock.release() return result def read_hdf(pattern, key, start=0, stop=None, columns=None,", "i_name = name_function(i) filenames.append(fmt_obj(path, i_name)) for i in range(1, df.npartitions):", "fmt_obj(path, i_name), key.replace('*', i_name)], kwargs2) if single_file: link_dep = i", "dask.dataframe. Used for each file in read_hdf. \"\"\" def get_keys_stops_divisions(path,", "(HDF) files This is a parallel version of the Pandas", "file/dataset. The combination is ambiguous because it could be interpreted", "Option to use a lock to prevent concurrency issues (default", "raise TypeError(dont_use_fixed_error_message) if stop is None: stops.append(storer.nrows) elif stop >", "a ``*`` to denote many locations name_function: function A function", "'table': raise TypeError(dont_use_fixed_error_message) if stop is None: stops.append(storer.nrows) elif stop", "is not None) and sorted_index: raise ValueError(\"When assuming pre-partitioned data,", "matched key. \"\"\" with pd.HDFStore(path, mode=mode) as hdf: keys =", "not the input hdf files have a sorted index (default", "index of the global dataset.\"\"\" def _read_single_hdf(path, key, start=0, stop=None,", "\"keys\" or group identifiers which match the given key, which", "``name_function``. The asterix will be replaced with an increasing sequence", "use a lock to prevent concurrency issues (default is True).", "with pandas. In the future when creating HDFStores use the", "scheduler: >>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP Specify custom", "hdf file identified by the given path. Also get the", "= hdf.get_storer(k) if storer.format_type != 'table': raise TypeError(dont_use_fixed_error_message) if stop", "data, data must be \" \"read in its entirety using", "'single-threaded' # handle lock default based on whether we're writing", "for i in range(0,df.npartitions): i_name = name_function(i) filenames.append(fmt_obj(path, i_name)) for", "file must already exist. Returns ------- dask.DataFrame Examples -------- Load", "pattern : string, list File pattern (string), buffer to read", "starting and stopping index per file, or starting and stopping", "is 1000000). sorted_index : boolean, optional Option to specify whether", "format using i_name if isinstance(path, str): if path.count('*') + key.count('*')", "created. 'r+' It is similar to 'a', but the file", "mode=mode) for path in paths]) if PY3: from ..core import", "creating HDFStores use the ``format='table'`` option to ensure that your", "(which should not contain any wildcards). \"\"\" empty = pd.read_hdf(path,", "None) and single_node and single_file): scheduler = 'single-threaded' # handle", "in ['t', 'table']: raise ValueError(\"Dask only support 'table' format in", "if single_file and single_node: keys = [(name, df.npartitions - 1)]", "token if empty.ndim == 1: base = {'name': empty.name, 'mode':", "only be use monolithically with pandas. In the future when", "chunksize}) return new dsk = dict(((name, i), (_pd_read_hdf, path, key,", "-------- read_hdf: to_parquet: \"\"\" name = 'to-hdf-' + uuid.uuid1().hex pd_to_hdf", "[storer.read_column('index', start=start, stop=start + 1)[0] for start in range(0, storer.nrows,", "if compute == True delayed value: if compute == False", "``0`` or with the result of calling ``name_function`` on each", "corresponding to one path and one key (which should not", "each matched key. \"\"\" with pd.HDFStore(path, mode=mode) as hdf: keys", "= True elif not single_file and _actual_get is not multiprocessing.get:", "a dask.dataframe. Used for each file in read_hdf. \"\"\" def", "is a parallel version of the Pandas function of the", "chunksize=1000000, sorted_index=False, lock=True, mode='a'): \"\"\" Read HDF files into a", "'append': append}) dsk = dict() i_name = name_function(0) dsk[(name, 0)]", "above or equal to stop \" \"row number ({})\".format(start, stop))", "tokenize((path, os.path.getmtime(path), key, start, stop, empty, chunksize, division)) name =", "+SKIP Load multiple datasets >>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP", "[None] * (len(dsk) + 1) return new_dd_object(dsk, name, empty, divisions)", "toolz import merge from .io import _link from ...base import", "multiple keys from the same file. Parameters ---------- pattern :", "be parallelized\"\"\" read_hdf_error_msg = \"\"\" The start and stop keywords", "of the global dataset.\"\"\" def _read_single_hdf(path, key, start=0, stop=None, columns=None,", "value. lock: Lock, optional Lock to use to prevent concurrency", "integer, optional Maximal number of rows per partition (default is", "in range(df.npartitions)] if formatted_names != sorted(formatted_names): warn(\"To preserve order between", "to 0), row number to start at stop : optional,", "integer 0 to n to a string ''' ... return", "one path and one key (which should not contain any", "string ''' ... return base + timedelta(days=i) >>> df.to_hdf('*.hdf', '/data',", "preserve order between partitions name_function \" \"must preserve the order", "get_scheduler_lock() key = key if key.startswith('/') else '/' + key", "> 1: raise NotImplementedError(read_hdf_error_msg) from ..multi import concat return concat([one_path_one_key(path,", "base = date(year=2000, month=1, day=1) >>> def name_function(i): ... '''", "raise NotImplementedError(read_hdf_error_msg) from ..multi import concat return concat([one_path_one_key(path, k, start,", "from ..core import DataFrame, new_dd_object from ... import config, multiprocessing", "Read HDF files into a Dask DataFrame Read hdf files", "one asterisk is accepted in \" \"dataset key\") fmt_obj =", "the _get method choose the scheduler if (get is None", "those integers. This function only supports the Pandas ``'table'`` format,", "its input. if not (single_file and single_node): formatted_names = [name_function(i)", "enforce name_function to maintain the order of its input. if", "from ...utils import get_scheduler_lock def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None): \"\"\"", "``*`` to denote many locations name_function: function A function to", "divisions)]) def _pd_read_hdf(path, key, lock, kwargs): \"\"\" Read from hdf5", "``SerializableLock`` will be used depending on your scheduler if a", "sorted(formatted_names): warn(\"To preserve order between partitions name_function \" \"must preserve", "multiprocessing.get: # if we're writing to multiple files with the", "{'name': empty.name, 'mode': mode} else: base = {'columns': empty.columns, 'mode':", "pandas.to_hdf for more information Examples -------- Save Data to a", "'*' in path: single_file = False else: if key.count('*') >", "fnmatch from glob import glob import os import uuid from", "None def to_hdf(df, path, key, mode='a', append=False, get=None, scheduler=None, name_function=None,", "in read_hdf. \"\"\" def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize): \"\"\"", "contain a ``*`` to denote many filenames key: string Datapath", "read from, or list of file paths. Can contain wildcards.", "Pandas function of the same name. Please see the Pandas", "above options to a string. Should take in a number", "get=None, scheduler=None, name_function=None, compute=True, lock=None, dask_kwargs={}, **kwargs): \"\"\" Store Dask", "not single_node: lock = True elif not single_file and _actual_get", "= name_function(i) task = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, i), fmt_obj(path,", "Load multiple datasets >>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP \"\"\"", "dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP \"\"\" if lock is True:", "or starting and stopping index of the global dataset.\"\"\" def", "start=start, stop=start + 1)[0] for start in range(0, storer.nrows, chunksize)]", "division else: divisions = [None] * (len(dsk) + 1) return", "Dask DataFrame Read hdf files into a dask dataframe. This", "\"row number ({})\".format(start, stop)) def update(s): new = base.copy() new.update({'start':", "<filename>dask/dataframe/io/hdf.py from __future__ import absolute_import, division, print_function from fnmatch import", "i in range(1, df.npartitions): i_name = name_function(i) task = (_pd_to_hdf,", "mode='a', append=False, get=None, scheduler=None, name_function=None, compute=True, lock=None, dask_kwargs={}, **kwargs): \"\"\"", "the number of partitions and return a string. (see examples", "we don't need to lock lock = True else: lock", "dsk = dict() i_name = name_function(0) dsk[(name, 0)] = (_pd_to_hdf,", "def update(s): new = base.copy() new.update({'start': s, 'stop': s +", "(name, link_dep), task) dsk[(name, i)] = task dsk = merge(df.dask,", "does not exist it is created. 'r+' It is similar", "key\") fmt_obj = lambda path, _: path if '*' in", ": {'a', 'r', 'r+'}, default 'a'. Mode to use when", "datetime import date, timedelta >>> base = date(year=2000, month=1, day=1)", "dsk, keys, get=get, scheduler=scheduler, **dask_kwargs) return filenames else: return delayed([Delayed(k,", "Pandas ``'table'`` format, not the more specialized ``'fixed'`` format. Parameters", "not None, will limit the return columns (default is None)", "dsk) if single_file and single_node: keys = [(name, df.npartitions -", "delayed value: if compute == False See Also -------- read_hdf:", "= [None] * (len(dsk) + 1) return new_dd_object(dsk, name, empty,", "else: divisions = [None] * (len(dsk) + 1) return new_dd_object(dsk,", "if key.count('*') > 1: raise ValueError(\"A maximum of one asterisk", "for i in range(1, df.npartitions): i_name = name_function(i) task =", "paths = pattern if (start != 0 or stop is", "_pd_read_hdf(path, key, lock, kwargs): \"\"\" Read from hdf5 file with", "if not single_node: lock = True elif not single_file and", "chunksize): \"\"\" Get the \"keys\" or group identifiers which match", "multiple files, using the multiprocessing scheduler: >>> df.to_hdf('output-*.hdf', '/data', scheduler='processes')", "...base import tokenize, compute_as_if_collection from ...bytes.utils import build_name_function from ...compatibility", "partitions and return a string. (see examples below) compute: bool", "file, or from multiple files, or from multiple keys from", "None and not config.get('scheduler', None) and single_node and single_file): scheduler", "when its saved and read # so we enforce name_function", "n to a string ''' ... return base + timedelta(days=i)", "string Path to a target filename. May contain a ``*``", "many partitions of a Dask DataFrame in parallel, either to", "kwargs2) if single_file: link_dep = i - 1 if single_node", "Specify custom naming scheme. This writes files as '2000-01-01.hdf', '2000-01-02.hdf',", "name, empty, divisions) keys, stops, divisions = get_keys_stops_divisions(path, key, stop,", "# we guarantee partition order is preserved when its saved", "path, _: path if '*' in key: single_node = False", "key, start=start, stop=stop, columns=columns, chunksize=chunksize, sorted_index=sorted_index, lock=lock, mode=mode) for path", "+SKIP Save data to multiple files, using the multiprocessing scheduler:", "mode, 'append': append}) dsk = dict() i_name = name_function(0) dsk[(name,", "dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, 0), fmt_obj(path, i_name),", "Returns ------- dask.DataFrame Examples -------- Load single file >>> dd.read_hdf('myfile.1.hdf5',", "within the files. May contain a ``*`` to denote many", "import absolute_import, division, print_function from fnmatch import fnmatch from glob", "hdf files.\") if mode not in ('a', 'w', 'r+'): raise", "else 0 task = (_link, (name, link_dep), task) dsk[(name, i)]", "else: paths = pattern if (start != 0 or stop", "the filename or datapath, and an optional ``name_function``. The asterix", "specify this parallelism with an asterix ``*`` within the filename", "columns=columns, chunksize=chunksize, sorted_index=sorted_index, lock=lock, mode=mode) for path in paths]) if", "stops, divisions def one_path_one_key(path, key, start, stop, columns, chunksize, division,", "new.update({'start': s, 'stop': s + chunksize}) return new dsk =", "...bytes.utils import build_name_function from ...compatibility import PY3 from ...delayed import", "from warnings import warn import pandas as pd from toolz", "Please see the Pandas docstring for more detailed information about", "file: >>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP Save data to", "!= 'table': raise TypeError(dont_use_fixed_error_message) if stop is None: stops.append(storer.nrows) elif", "i_name)], kwargs) kwargs2 = kwargs.copy() if single_file: kwargs2['mode'] = 'a'", "lock is None: if not single_node: lock = True elif", "of one asterisk is accepted in file \" \"path and", "k in keys]) dont_use_fixed_error_message = \"\"\" This HDFStore is not", "or from multiple files, or from multiple keys from the", "0 task = (_link, (name, link_dep), task) dsk[(name, i)] =", "(get is None and not config.get('get', None) and scheduler is", "This function only supports the Pandas ``'table'`` format, not the", "stops.append(stop) if sorted_index: division = [storer.read_column('index', start=start, stop=start + 1)[0]", "the scheduler if (get is None and not config.get('get', None)", "import concat return concat([_read_single_hdf(path, key, start=start, stop=stop, columns=columns, chunksize=chunksize, sorted_index=sorted_index,", "path, i_name: path.replace('*', i_name) if '*' in path: single_file =", "or datapath, and an optional ``name_function``. The asterix will be", "pd_to_hdf = getattr(df._partition_type, 'to_hdf') single_file = True single_node = True", "import DataFrame, new_dd_object from ... import config, multiprocessing from ...base", "is string, format using i_name if isinstance(path, str): if path.count('*')", "stop=0) if columns is not None: empty = empty[columns] token", "around pd_to_hdf that enables locking\"\"\" if lock: lock.acquire() try: pd_to_hdf(*args,", "else: lock = False if lock: lock = get_scheduler_lock(get, df,", "rows ({})\".format(storer.nrows)) else: stops.append(stop) if sorted_index: division = [storer.read_column('index', start=start,", "and one key (which should not contain any wildcards). \"\"\"", "len(keys) > 1: raise NotImplementedError(read_hdf_error_msg) from ..multi import concat return", "columns (default is None) chunksize : positive integer, optional Maximal", "return delayed([Delayed(k, dsk) for k in keys]) dont_use_fixed_error_message = \"\"\"", "integer\") if (start != 0 or stop is not None)", "(start != 0 or stop is not None) and sorted_index:", "1)[0] for start in range(0, storer.nrows, chunksize)] division_end = storer.read_column('index',", "to_parquet: \"\"\" name = 'to-hdf-' + uuid.uuid1().hex pd_to_hdf = getattr(df._partition_type,", "order of its input. if not (single_file and single_node): formatted_names", "# doctest: +SKIP >>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP", "'a', but the file must already exist. Returns ------- dask.DataFrame", "not None) and len(paths) > 1: raise NotImplementedError(read_hdf_error_msg) if chunksize", "(_pd_to_hdf, pd_to_hdf, lock, [(df._name, i), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs2)", "path and one key (which should not contain any wildcards).", "update(s))) for i, s in enumerate(range(start, stop, chunksize))) if division:", "def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock): \"\"\"", "1) # we guarantee partition order is preserved when its", "limit the return columns (default is None) chunksize : positive", "stops.append(storer.nrows) elif stop > storer.nrows: raise ValueError(\"Stop keyword exceeds dataset", "accepted in file \" \"path and dataset key\") fmt_obj =", "function is like ``pandas.read_hdf``, except it can read from a", "else: return delayed([Delayed(k, dsk) for k in keys]) dont_use_fixed_error_message =", "data for each matched key. \"\"\" with pd.HDFStore(path, mode=mode) as", "lock lock = True else: lock = False if lock:", "of the Pandas function of the same name. Please see", "i_name), key.replace('*', i_name)], kwargs) kwargs2 = kwargs.copy() if single_file: kwargs2['mode']", "about shared keyword arguments. This function differs from the Pandas", "import os import uuid from warnings import warn import pandas", "in ('a', 'w', 'r+'): raise ValueError(\"Mode must be one of", "dict(((name, i), (_pd_read_hdf, path, key, lock, update(s))) for i, s", "os.path.getmtime(path), key, start, stop, empty, chunksize, division)) name = 'read-hdf-'", "pre-partitioned data, data must be \" \"read in its entirety", "with pd.HDFStore(path, mode=mode) as hdf: keys = [k for k", "positive integer\") if (start != 0 or stop is not", "i_name)) for i in range(1, df.npartitions): i_name = name_function(i) task", "number ({})\".format(start, stop)) def update(s): new = base.copy() new.update({'start': s,", "None: empty = empty[columns] token = tokenize((path, os.path.getmtime(path), key, start,", "from __future__ import absolute_import, division, print_function from fnmatch import fnmatch", "pd.read_hdf(path, key, **kwargs) finally: if lock: lock.release() return result def", "it could be interpreted as the starting and stopping index", "default to the # sequential scheduler. otherwise let the _get", "this parallelism with an asterix ``*`` within the filename or", "order between partitions name_function \" \"must preserve the order of", "multiple files >>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP >>> dd.read_hdf(['myfile.1.hdf5',", "immediately. If False then this returns a ``dask.Delayed`` value. lock:", "and if the file does not exist it is created.", "number \" \"of rows ({})\".format(storer.nrows)) else: stops.append(stop) if sorted_index: division", "docstring for more detailed information about shared keyword arguments. This", "result of calling ``name_function`` on each of those integers. This", "target filename. May contain a ``*`` to denote many filenames", "key, start=0, stop=None, columns=None, chunksize=1000000, sorted_index=False, lock=True, mode='a'): \"\"\" Read", "\"\"\" if lock: lock.acquire() try: result = pd.read_hdf(path, key, **kwargs)", ">>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP Returns ------- None:", "compute: compute_as_if_collection(DataFrame, dsk, keys, get=get, scheduler=scheduler, **dask_kwargs) return filenames else:", "= pd.read_hdf(path, key, **kwargs) finally: if lock: lock.release() return result", "in keys]) dont_use_fixed_error_message = \"\"\" This HDFStore is not partitionable", "we enforce name_function to maintain the order of its input.", "if sorted_index: division = [storer.read_column('index', start=start, stop=start + 1)[0] for", "\" \"must preserve the order of its input\") # If", "if lock: lock.release() return result def read_hdf(pattern, key, start=0, stop=None,", "= (_pd_to_hdf, pd_to_hdf, lock, [(df._name, i), fmt_obj(path, i_name), key.replace('*', i_name)],", "same file. You may specify this parallelism with an asterix", "[] divisions = [] for k in keys: storer =", "single_file = False else: if key.count('*') > 1: raise ValueError(\"A", "empty.name, 'mode': mode} else: base = {'columns': empty.columns, 'mode': mode}", "to convert the ``*`` in the above options to a", "This uses the hdf file identified by the given path.", "return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock) for", "if isinstance(pattern, str): paths = sorted(glob(pattern)) else: paths = pattern", "the Pandas version by saving the many partitions of a", "identified by the given path. Also get the index of", "in enumerate(range(start, stop, chunksize))) if division: divisions = division else:", "str): paths = sorted(glob(pattern)) else: paths = pattern if (start", "dict() i_name = name_function(0) dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,", "by saving the many partitions of a Dask DataFrame in", "with the multiprocessing # scheduler we don't need to lock", "``name_function`` on each of those integers. This function only supports", "pandas as pd from toolz import merge from .io import", "is sequential default to the # sequential scheduler. otherwise let", "\"path and dataset key\") fmt_obj = lambda path, i_name: path.replace('*',", "Can contain wildcards. key : group identifier in the store.", "concat return concat([_read_single_hdf(path, key, start=start, stop=stop, columns=columns, chunksize=chunksize, sorted_index=sorted_index, lock=lock,", "your dataset can be parallelized\"\"\" read_hdf_error_msg = \"\"\" The start", "= division else: divisions = [None] * (len(dsk) + 1)", "uuid.uuid1().hex pd_to_hdf = getattr(df._partition_type, 'to_hdf') single_file = True single_node =", "glob import os import uuid from warnings import warn import", "lock is required. See dask.utils.get_scheduler_lock for more information about lock", "writing, and if the file does not exist it is", "import get_scheduler from ..core import DataFrame, new_dd_object from ... import", "as pd from toolz import merge from .io import _link", "**dask_kwargs) return filenames else: return delayed([Delayed(k, dsk) for k in", "at columns : list of columns, optional A list of", "a single entity _actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler) if lock", "that enables locking\"\"\" if lock: lock.acquire() try: pd_to_hdf(*args, **kwargs) finally:", "on your scheduler if a lock is required. See dask.utils.get_scheduler_lock", "= name_function(i) filenames.append(fmt_obj(path, i_name)) for i in range(1, df.npartitions): i_name", "scheduler we don't need to lock lock = True else:", "and single_node: keys = [(name, df.npartitions - 1)] else: keys", "of file paths. Can contain wildcards. key : group identifier", "stop is None: stops.append(storer.nrows) elif stop > storer.nrows: raise ValueError(\"Stop", "its entirety using the same chunksizes\") from ..multi import concat", "into a dask.dataframe. Used for each file in read_hdf. \"\"\"", "files, or from multiple keys from the same file. Parameters", "from ...bytes.utils import build_name_function from ...compatibility import PY3 from ...delayed", "= [] for k in keys: storer = hdf.get_storer(k) if", "0 to the number of partitions and return a string.", "sorted_index, chunksize) if (start != 0 or stop is not", "'/x') # doctest: +SKIP Load multiple files >>> dd.read_hdf('myfile.*.hdf5', '/x')", "path, key, mode='a', append=False, get=None, scheduler=None, name_function=None, compute=True, lock=None, dask_kwargs={},", "append=False, get=None, scheduler=None, name_function=None, compute=True, lock=None, dask_kwargs={}, **kwargs): \"\"\" Store", "You may specify this parallelism with an asterix ``*`` within", "divisions) keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index, chunksize)", "the multiprocessing scheduler: >>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP", "\" \"path and dataset key\") fmt_obj = lambda path, i_name:", "try: result = pd.read_hdf(path, key, **kwargs) finally: if lock: lock.release()", "'/*') # doctest: +SKIP \"\"\" if lock is True: lock", "information Examples -------- Save Data to a single file >>>", "from more than one file/dataset. The combination is ambiguous because", "\"\"\" Read from hdf5 file with a lock \"\"\" if", "warn(\"To preserve order between partitions name_function \" \"must preserve the", "= (_link, (name, link_dep), task) dsk[(name, i)] = task dsk", "import date, timedelta >>> base = date(year=2000, month=1, day=1) >>>", "is similar to 'a', but the file must already exist.", "import concat return concat([one_path_one_key(path, k, start, s, columns, chunksize, d,", "in the above options to a string. Should take in", "dask dataframe. This function is like ``pandas.read_hdf``, except it can", "'myfile.2.hdf5'], '/x') # doctest: +SKIP Load multiple datasets >>> dd.read_hdf('myfile.1.hdf5',", "sequence of integers starting from ``0`` or with the result", "None) and len(paths) > 1: raise NotImplementedError(read_hdf_error_msg) if chunksize <=", "need to lock lock = True else: lock = False", "keys = [(name, df.npartitions - 1)] else: keys = [(name,", "[k for k in hdf.keys() if fnmatch(k, key)] stops =", "choose the scheduler if (get is None and not config.get('get',", "selection. **other: See pandas.to_hdf for more information Examples -------- Save", "'table' format in hdf files.\") if mode not in ('a',", "match the given key, which can contain wildcards. This uses", "key: single_node = False if 'format' in kwargs and kwargs['format']", "denote many locations name_function: function A function to convert the", "the store. Can contain wildcards start : optional, integer (defaults", "key)] stops = [] divisions = [] for k in", "Also -------- read_hdf: to_parquet: \"\"\" name = 'to-hdf-' + uuid.uuid1().hex", "s, columns, chunksize, d, lock) for k, s, d in", "Save data to multiple datapaths within the same file: >>>", "etc.. >>> from datetime import date, timedelta >>> base =", "...base import get_scheduler from ..core import DataFrame, new_dd_object from ...", "lock=None, dask_kwargs={}, **kwargs): \"\"\" Store Dask Dataframe to Hierarchical Data", "is accepted in \" \"dataset key\") fmt_obj = lambda path,", "not config.get('get', None) and scheduler is None and not config.get('scheduler',", "base.copy() new.update({'start': s, 'stop': s + chunksize}) return new dsk", "lock: lock.acquire() try: pd_to_hdf(*args, **kwargs) finally: if lock: lock.release() return", "\" \"read in its entirety using the same chunksizes\") from", "'w', 'r+'): raise ValueError(\"Mode must be one of 'a', 'w'", "dsk = dict(((name, i), (_pd_read_hdf, path, key, lock, update(s))) for", "each file in read_hdf. \"\"\" def get_keys_stops_divisions(path, key, stop, sorted_index,", "be \" \"read in its entirety using the same chunksizes\")", "from ... import config, multiprocessing from ...base import tokenize, compute_as_if_collection", "get_keys_stops_divisions(path, key, stop, sorted_index, chunksize) if (start != 0 or", "more information about lock selection. **other: See pandas.to_hdf for more", "import glob import os import uuid from warnings import warn", "lock = True elif not single_file and _actual_get is not", "not partitionable and can only be use monolithically with pandas.", "empty, divisions) keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index,", "equal to stop \" \"row number ({})\".format(start, stop)) def update(s):", "= [(name, df.npartitions - 1)] else: keys = [(name, i)", "range(df.npartitions)] if formatted_names != sorted(formatted_names): warn(\"To preserve order between partitions", "key, start=0, stop=None, columns=None, chunksize=int(1e6), sorted_index=False, lock=None, mode='a'): \"\"\" Read", "delayed from ...utils import get_scheduler_lock def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):", "chunksize, d, lock) for k, s, d in zip(keys, stops,", "row number to stop at columns : list of columns,", "!= sorted(formatted_names): warn(\"To preserve order between partitions name_function \" \"must", "from multiple keys from the same file. Parameters ---------- pattern", "None: stops.append(storer.nrows) elif stop > storer.nrows: raise ValueError(\"Stop keyword exceeds", "multiple files, or from multiple keys from the same file.", "function to convert the ``*`` in the above options to", "at stop : optional, integer (defaults to None, the last", "row), row number to stop at columns : list of", "= [name_function(i) for i in range(df.npartitions)] if formatted_names != sorted(formatted_names):", "is True). mode : {'a', 'r', 'r+'}, default 'a'. Mode", "stop is not None) and len(keys) > 1: raise NotImplementedError(read_hdf_error_msg)", "enumerate(range(start, stop, chunksize))) if division: divisions = division else: divisions", "based on whether we're writing to a single entity _actual_get", "by the given path. Also get the index of the", "lock selection. **other: See pandas.to_hdf for more information Examples --------", "k, s, d in zip(keys, stops, divisions)]) def _pd_read_hdf(path, key,", "single_file and single_node: keys = [(name, df.npartitions - 1)] else:", "empty.columns, 'mode': mode} if start >= stop: raise ValueError(\"Start row", "kwargs2['append'] = True filenames = [] for i in range(0,df.npartitions):", "hdf file into a dask.dataframe. Used for each file in", "can read from a single large file, or from multiple", "\"\"\" Get the data frame corresponding to one path and", "False then this returns a ``dask.Delayed`` value. lock: Lock, optional", "asterisk is accepted in file \" \"path and dataset key\")", "DataFrame, new_dd_object from ... import config, multiprocessing from ...base import", "start in range(0, storer.nrows, chunksize)] division_end = storer.read_column('index', start=storer.nrows -", "doctest: +SKIP Load multiple datasets >>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest:", "the data frame corresponding to one path and one key", "is True: lock = get_scheduler_lock() key = key if key.startswith('/')", "wildcards). \"\"\" empty = pd.read_hdf(path, key, mode=mode, stop=0) if columns", "wrapper function around pd_to_hdf that enables locking\"\"\" if lock: lock.acquire()", "starting and stopping index of the global dataset.\"\"\" def _read_single_hdf(path,", "True single_node = True # if path is string, format", "new_dd_object(dsk, name, empty, divisions) keys, stops, divisions = get_keys_stops_divisions(path, key,", "lock is True: lock = get_scheduler_lock() key = key if", "else: if key.count('*') > 1: raise ValueError(\"A maximum of one", "range(0,df.npartitions): i_name = name_function(i) filenames.append(fmt_obj(path, i_name)) for i in range(1,", "import PY3 from ...delayed import Delayed, delayed from ...utils import", "import warn import pandas as pd from toolz import merge", "storer.format_type != 'table': raise TypeError(dont_use_fixed_error_message) if stop is None: stops.append(storer.nrows)", "None) chunksize : positive integer, optional Maximal number of rows", "detailed information about shared keyword arguments. This function differs from", "path is string, format using i_name if isinstance(path, str): if", "must be \" \"read in its entirety using the same", "mode=mode, stop=0) if columns is not None: empty = empty[columns]", "\"\"\" Read HDF files into a Dask DataFrame Read hdf", "a single file >>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP Save", "maintain the order of its input. if not (single_file and", "False See Also -------- read_hdf: to_parquet: \"\"\" name = 'to-hdf-'", "if single_file: link_dep = i - 1 if single_node else", "compute_as_if_collection(DataFrame, dsk, keys, get=get, scheduler=scheduler, **dask_kwargs) return filenames else: return", "``*`` to denote many filenames key: string Datapath within the", "(see examples below) compute: bool Whether or not to execute", "if lock is None: if not single_node: lock = True", "scheme. This writes files as '2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc.. >>>", "a single large file, or from multiple files, or from", "a ``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock`` will be used depending on", "lock, args, kwargs=None): \"\"\" A wrapper function around pd_to_hdf that", "The asterix will be replaced with an increasing sequence of", "the given path. Also get the index of the last", "and kwargs['format'] not in ['t', 'table']: raise ValueError(\"Dask only support", "(start != 0 or stop is not None) and len(keys)", "datasets >>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP \"\"\" if lock", "k, start, s, columns, chunksize, d, lock) for k, s,", "from .io import _link from ...base import get_scheduler from ..core", "custom naming scheme. This writes files as '2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf',", "True). mode : {'a', 'r', 'r+'}, default 'a'. Mode to", "+SKIP Specify custom naming scheme. This writes files as '2000-01-01.hdf',", "be a positive integer\") if (start != 0 or stop", "date, timedelta >>> base = date(year=2000, month=1, day=1) >>> def", "contain wildcards. This uses the hdf file identified by the", "get the index of the last row of data for", "+SKIP \"\"\" if lock is True: lock = get_scheduler_lock() key", "0)] = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, 0), fmt_obj(path, i_name), key.replace('*',", "lock): \"\"\" Get the data frame corresponding to one path", "to denote many locations name_function: function A function to convert", "(_pd_to_hdf, pd_to_hdf, lock, [(df._name, 0), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs)", "result = pd.read_hdf(path, key, **kwargs) finally: if lock: lock.release() return", "Should take in a number from 0 to the number", "\"dataset key\") fmt_obj = lambda path, _: path if '*'", "kwargs and kwargs['format'] not in ['t', 'table']: raise ValueError(\"Dask only", "'format' in kwargs and kwargs['format'] not in ['t', 'table']: raise", "and an optional ``name_function``. The asterix will be replaced with", "if single_file: kwargs2['mode'] = 'a' if single_node: kwargs2['append'] = True", "function of the same name. Please see the Pandas docstring", "ValueError(\"Dask only support 'table' format in hdf files.\") if mode", "if (start != 0 or stop is not None) and", "'w' or 'r+'\") if name_function is None: name_function = build_name_function(df.npartitions", "user did not specify scheduler and write is sequential default", "keys = [(name, i) for i in range(df.npartitions)] if compute:", "and scheduler is None and not config.get('scheduler', None) and single_node", "stop=None, columns=None, chunksize=1000000, sorted_index=False, lock=True, mode='a'): \"\"\" Read HDF files", "function A function to convert the ``*`` in the above", "# doctest: +SKIP Save data to multiple files, using the", "monolithically with pandas. In the future when creating HDFStores use", "base + timedelta(days=i) >>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP", "number from 0 to the number of partitions and return", "or equal to stop \" \"row number ({})\".format(start, stop)) def", "data can be modified. 'a' Append; an existing file is", "integer (defaults to None, the last row), row number to", "path if '*' in key: single_node = False if 'format'", "list of columns that if not None, will limit the", "each of those integers. This function only supports the Pandas", "0: raise ValueError(\"Chunksize must be a positive integer\") if (start", "df.npartitions - 1)] else: keys = [(name, i) for i", "[(df._name, 0), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs) kwargs2 = kwargs.copy()", "your scheduler if a lock is required. See dask.utils.get_scheduler_lock for", "lock default based on whether we're writing to a single", "chunksize) if (start != 0 or stop is not None)", "\"\"\" Read a single hdf file into a dask.dataframe. Used", "identifiers which match the given key, which can contain wildcards.", "_link from ...base import get_scheduler from ..core import DataFrame, new_dd_object", "then this returns a ``dask.Delayed`` value. lock: Lock, optional Lock", "not to execute immediately. If False then this returns a", "See Also -------- read_hdf: to_parquet: \"\"\" name = 'to-hdf-' +", "not config.get('scheduler', None) and single_node and single_file): scheduler = 'single-threaded'", "Read a single hdf file into a dask.dataframe. Used for", "zip(keys, stops, divisions)]) def _pd_read_hdf(path, key, lock, kwargs): \"\"\" Read", "the above options to a string. Should take in a", "Also get the index of the last row of data", "division.append(division_end) divisions.append(division) else: divisions.append(None) return keys, stops, divisions def one_path_one_key(path,", "1)] else: keys = [(name, i) for i in range(df.npartitions)]", "writes files as '2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc.. >>> from datetime", "chunksize=int(1e6), sorted_index=False, lock=None, mode='a'): \"\"\" Read a single hdf file", "import _link from ...base import get_scheduler from ..core import DataFrame,", "used depending on your scheduler if a lock is required.", "mode : {'a', 'r', 'r+'}, default 'a'. Mode to use", ": boolean, optional Option to use a lock to prevent", "dataset number \" \"of rows ({})\".format(storer.nrows)) else: stops.append(stop) if sorted_index:", "...utils import get_scheduler_lock def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None): \"\"\" A", "'to_hdf') single_file = True single_node = True # if path", "if not None, will limit the return columns (default is", "'/data') # doctest: +SKIP Save data to multiple datapaths within", "= date(year=2000, month=1, day=1) >>> def name_function(i): ... ''' Convert", "per file, or starting and stopping index of the global", "path, key, lock, update(s))) for i, s in enumerate(range(start, stop,", "get_scheduler from ..core import DataFrame, new_dd_object from ... import config,", "stop : optional, integer (defaults to None, the last row),", "d in zip(keys, stops, divisions)]) def _pd_read_hdf(path, key, lock, kwargs):", "\"\"\" with pd.HDFStore(path, mode=mode) as hdf: keys = [k for", "file \" \"path and dataset key\") fmt_obj = lambda path,", "File pattern (string), buffer to read from, or list of", "row number to start at stop : optional, integer (defaults", "'a' Append; an existing file is opened for reading and", ".io import _link from ...base import get_scheduler from ..core import", "df.to_hdf('output.hdf', '/data') # doctest: +SKIP Save data to multiple datapaths", "import config, multiprocessing from ...base import tokenize, compute_as_if_collection from ...bytes.utils", "True else: lock = False if lock: lock = get_scheduler_lock(get,", "See pandas.to_hdf for more information Examples -------- Save Data to", "version by saving the many partitions of a Dask DataFrame", "'to-hdf-' + uuid.uuid1().hex pd_to_hdf = getattr(df._partition_type, 'to_hdf') single_file = True", "read_hdf: to_parquet: \"\"\" name = 'to-hdf-' + uuid.uuid1().hex pd_to_hdf =", "partition order is preserved when its saved and read #", "dataset can be parallelized\"\"\" read_hdf_error_msg = \"\"\" The start and", "for more information about lock selection. **other: See pandas.to_hdf for", "get_scheduler(get=get, collections=[df], scheduler=scheduler) if lock is None: if not single_node:", "to None, the last row), row number to stop at", "True delayed value: if compute == False See Also --------", "multiple files: >>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP Save data", "lock: lock = get_scheduler_lock(get, df, scheduler=scheduler) kwargs.update({'format': 'table', 'mode': mode,", "import Delayed, delayed from ...utils import get_scheduler_lock def _pd_to_hdf(pd_to_hdf, lock,", "file paths. Can contain wildcards. key : group identifier in", "[(name, i) for i in range(df.npartitions)] if compute: compute_as_if_collection(DataFrame, dsk,", "path.count('*') + key.count('*') > 1: raise ValueError(\"A maximum of one", "as hdf: keys = [k for k in hdf.keys() if", "1: raise ValueError(\"A maximum of one asterisk is accepted in", "if division: divisions = division else: divisions = [None] *", "many files, or to many datasets within the same file.", "not (single_file and single_node): formatted_names = [name_function(i) for i in", "sequential default to the # sequential scheduler. otherwise let the", "dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP >>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') #", "if '*' in key: single_node = False if 'format' in", "not supported when reading from more than one file/dataset. The", "partitions of a Dask DataFrame in parallel, either to many", "> 1: raise ValueError(\"A maximum of one asterisk is accepted", "stopping index of the global dataset.\"\"\" def _read_single_hdf(path, key, start=0,", "and stopping index of the global dataset.\"\"\" def _read_single_hdf(path, key,", "division_end = storer.read_column('index', start=storer.nrows - 1, stop=storer.nrows)[0] division.append(division_end) divisions.append(division) else:", "asterisk is accepted in \" \"dataset key\") fmt_obj = lambda", "for path in paths]) if PY3: from ..core import _Frame", "chunksize, division, lock): \"\"\" Get the data frame corresponding to", "is not multiprocessing.get: # if we're writing to multiple files", "Maximal number of rows per partition (default is 1000000). sorted_index", "multiple datasets >>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP \"\"\" if", "ValueError(\"Stop keyword exceeds dataset number \" \"of rows ({})\".format(storer.nrows)) else:", "in the store. Can contain wildcards start : optional, integer", "doctest: +SKIP Save data to multiple files: >>> df.to_hdf('output-*.hdf', '/data')", "**kwargs): \"\"\" Store Dask Dataframe to Hierarchical Data Format (HDF)", "empty.ndim == 1: base = {'name': empty.name, 'mode': mode} else:", "if 'format' in kwargs and kwargs['format'] not in ['t', 'table']:", "read_hdf(pattern, key, start=0, stop=None, columns=None, chunksize=1000000, sorted_index=False, lock=True, mode='a'): \"\"\"", "pd_to_hdf that enables locking\"\"\" if lock: lock.acquire() try: pd_to_hdf(*args, **kwargs)", "like ``pandas.read_hdf``, except it can read from a single large", "lock: Lock, optional Lock to use to prevent concurrency issues.", "ValueError(\"Start row number ({}) is above or equal to stop", "filename. May contain a ``*`` to denote many filenames key:", "saved and read # so we enforce name_function to maintain", "= \"\"\" The start and stop keywords are not supported", "from ...base import tokenize, compute_as_if_collection from ...bytes.utils import build_name_function from", "to one path and one key (which should not contain", "HDFStore is not partitionable and can only be use monolithically", ">>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP Save data to multiple", "Load multiple files >>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP >>>", "pattern if (start != 0 or stop is not None)", "\"read in its entirety using the same chunksizes\") from ..multi", "examples below) compute: bool Whether or not to execute immediately.", "i in range(df.npartitions)] if compute: compute_as_if_collection(DataFrame, dsk, keys, get=get, scheduler=scheduler,", ">>> from datetime import date, timedelta >>> base = date(year=2000,", "saving the many partitions of a Dask DataFrame in parallel,", "Examples -------- Save Data to a single file >>> df.to_hdf('output.hdf',", "Read-only; no data can be modified. 'a' Append; an existing", "['t', 'table']: raise ValueError(\"Dask only support 'table' format in hdf", "optional ``name_function``. The asterix will be replaced with an increasing", "the order of its input. if not (single_file and single_node):", "...compatibility import PY3 from ...delayed import Delayed, delayed from ...utils", "= i - 1 if single_node else 0 task =", "Load single file >>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP Load", "from ``0`` or with the result of calling ``name_function`` on", "``*`` within the filename or datapath, and an optional ``name_function``.", "the # sequential scheduler. otherwise let the _get method choose", "start, s, columns, chunksize, d, lock) for k, s, d", "the index of the last row of data for each", "not multiprocessing.get: # if we're writing to multiple files with", "or with the result of calling ``name_function`` on each of", "i), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs2) if single_file: link_dep =", "use when opening file(s). 'r' Read-only; no data can be", "It is similar to 'a', but the file must already", "name_function to maintain the order of its input. if not", "multiprocessing # scheduler we don't need to lock lock =", "exist. Returns ------- dask.DataFrame Examples -------- Load single file >>>", "than one file/dataset. The combination is ambiguous because it could", "key, which can contain wildcards. This uses the hdf file", "from hdf5 file with a lock \"\"\" if lock: lock.acquire()", "Read hdf files into a dask dataframe. This function is", "if columns is not None: empty = empty[columns] token =", "if single_node else 0 task = (_link, (name, link_dep), task)", "reading and writing, and if the file does not exist", "key\") fmt_obj = lambda path, i_name: path.replace('*', i_name) if '*'", "finally: if lock: lock.release() return result def read_hdf(pattern, key, start=0,", "kwargs2['mode'] = 'a' if single_node: kwargs2['append'] = True filenames =", ": optional, integer (defaults to 0), row number to start", "compute == False See Also -------- read_hdf: to_parquet: \"\"\" name", "no data can be modified. 'a' Append; an existing file", "warnings import warn import pandas as pd from toolz import", "return base + timedelta(days=i) >>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest:", "s in enumerate(range(start, stop, chunksize))) if division: divisions = division", "**other: See pandas.to_hdf for more information Examples -------- Save Data", "reading from more than one file/dataset. The combination is ambiguous", "to ensure that your dataset can be parallelized\"\"\" read_hdf_error_msg =", "lock=True, mode='a'): \"\"\" Read HDF files into a Dask DataFrame", "in file \" \"path and dataset key\") fmt_obj = lambda", "+ token if empty.ndim == 1: base = {'name': empty.name,", "boolean, optional Option to specify whether or not the input", "# doctest: +SKIP Specify custom naming scheme. This writes files", "..multi import concat return concat([_read_single_hdf(path, key, start=start, stop=stop, columns=columns, chunksize=chunksize,", "the global dataset.\"\"\" def _read_single_hdf(path, key, start=0, stop=None, columns=None, chunksize=int(1e6),", "({})\".format(start, stop)) def update(s): new = base.copy() new.update({'start': s, 'stop':", "= False if 'format' in kwargs and kwargs['format'] not in", "def to_hdf(df, path, key, mode='a', append=False, get=None, scheduler=None, name_function=None, compute=True,", "(default is True). mode : {'a', 'r', 'r+'}, default 'a'.", "The start and stop keywords are not supported when reading", "d, lock) for k, s, d in zip(keys, stops, divisions)])", "and stopping index per file, or starting and stopping index", "not in ('a', 'w', 'r+'): raise ValueError(\"Mode must be one", "sorted_index: raise ValueError(\"When assuming pre-partitioned data, data must be \"", "ValueError(\"When assuming pre-partitioned data, data must be \" \"read in", "lock, [(df._name, i), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs2) if single_file:", "with a lock \"\"\" if lock: lock.acquire() try: result =", "only support 'table' format in hdf files.\") if mode not", "to a single entity _actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler) if", "raise NotImplementedError(read_hdf_error_msg) if chunksize <= 0: raise ValueError(\"Chunksize must be", "data must be \" \"read in its entirety using the", "to a string. Should take in a number from 0", "below) compute: bool Whether or not to execute immediately. If", "parallel, either to many files, or to many datasets within", "# doctest: +SKIP Load multiple datasets >>> dd.read_hdf('myfile.1.hdf5', '/*') #", "[name_function(i) for i in range(df.npartitions)] if formatted_names != sorted(formatted_names): warn(\"To", "storer.nrows, chunksize)] division_end = storer.read_column('index', start=storer.nrows - 1, stop=storer.nrows)[0] division.append(division_end)", "last row of data for each matched key. \"\"\" with", "multiple datapaths within the same file: >>> df.to_hdf('output.hdf', '/data-*') #", "store. Can contain wildcards start : optional, integer (defaults to", "to Hierarchical Data Format (HDF) files This is a parallel", "single file >>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP Load multiple", "datapaths within the same file: >>> df.to_hdf('output.hdf', '/data-*') # doctest:", "# doctest: +SKIP Save data to multiple files: >>> df.to_hdf('output-*.hdf',", "= empty[columns] token = tokenize((path, os.path.getmtime(path), key, start, stop, empty,", "to use a lock to prevent concurrency issues (default is", "stop, chunksize))) if division: divisions = division else: divisions =", "Get the \"keys\" or group identifiers which match the given", "scheduler if (get is None and not config.get('get', None) and", "i)] = task dsk = merge(df.dask, dsk) if single_file and", "files into a Dask DataFrame Read hdf files into a", ": list of columns, optional A list of columns that", "concat([_read_single_hdf(path, key, start=start, stop=stop, columns=columns, chunksize=chunksize, sorted_index=sorted_index, lock=lock, mode=mode) for", "files into a dask dataframe. This function is like ``pandas.read_hdf``,", "ValueError(\"A maximum of one asterisk is accepted in \" \"dataset", "single_node: lock = True elif not single_file and _actual_get is", "of a Dask DataFrame in parallel, either to many files,", "write is sequential default to the # sequential scheduler. otherwise", "or list of file paths. Can contain wildcards. key :", "divisions = division else: divisions = [None] * (len(dsk) +", "is None: if not single_node: lock = True elif not", "kwargs.copy() if single_file: kwargs2['mode'] = 'a' if single_node: kwargs2['append'] =", "files: >>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP Save data to", "not None) and len(keys) > 1: raise NotImplementedError(read_hdf_error_msg) from ..multi", "raise ValueError(\"When assuming pre-partitioned data, data must be \" \"read", "try: pd_to_hdf(*args, **kwargs) finally: if lock: lock.release() return None def", "'2000-01-03.hdf', etc.. >>> from datetime import date, timedelta >>> base", "sequential scheduler. otherwise let the _get method choose the scheduler", "storer.read_column('index', start=storer.nrows - 1, stop=storer.nrows)[0] division.append(division_end) divisions.append(division) else: divisions.append(None) return", "into a dask dataframe. This function is like ``pandas.read_hdf``, except", "to many datasets within the same file. You may specify", "read_hdf_error_msg = \"\"\" The start and stop keywords are not", "have a sorted index (default is False). lock : boolean,", "key = key if key.startswith('/') else '/' + key if", "differs from the Pandas version by saving the many partitions", "> storer.nrows: raise ValueError(\"Stop keyword exceeds dataset number \" \"of", "= True else: lock = False if lock: lock =", "from ...compatibility import PY3 from ...delayed import Delayed, delayed from", "be modified. 'a' Append; an existing file is opened for", "should not contain any wildcards). \"\"\" empty = pd.read_hdf(path, key,", "s + chunksize}) return new dsk = dict(((name, i), (_pd_read_hdf,", "method choose the scheduler if (get is None and not", "the more specialized ``'fixed'`` format. Parameters ---------- path: string Path", "keyword exceeds dataset number \" \"of rows ({})\".format(storer.nrows)) else: stops.append(stop)", "compute == True delayed value: if compute == False See", "one of 'a', 'w' or 'r+'\") if name_function is None:", "In the future when creating HDFStores use the ``format='table'`` option", "mode='a'): \"\"\" Read a single hdf file into a dask.dataframe.", "mode=mode) as hdf: keys = [k for k in hdf.keys()", "for more detailed information about shared keyword arguments. This function", "name. Please see the Pandas docstring for more detailed information", "= \"\"\" This HDFStore is not partitionable and can only", "'2000-01-02.hdf', '2000-01-03.hdf', etc.. >>> from datetime import date, timedelta >>>", "0 to n to a string ''' ... return base", "if empty.ndim == 1: base = {'name': empty.name, 'mode': mode}", "file with a lock \"\"\" if lock: lock.acquire() try: result", "divisions def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):", "link_dep), task) dsk[(name, i)] = task dsk = merge(df.dask, dsk)", "files. May contain a ``*`` to denote many locations name_function:", "(default is False). lock : boolean, optional Option to use", "and len(keys) > 1: raise NotImplementedError(read_hdf_error_msg) from ..multi import concat", "('a', 'w', 'r+'): raise ValueError(\"Mode must be one of 'a',", "key if isinstance(pattern, str): paths = sorted(glob(pattern)) else: paths =", "lock=lock, mode=mode) for path in paths]) if PY3: from ..core", "value: if compute == False See Also -------- read_hdf: to_parquet:", "to execute immediately. If False then this returns a ``dask.Delayed``", "merge(df.dask, dsk) if single_file and single_node: keys = [(name, df.npartitions", "start and stop keywords are not supported when reading from", "already exist. Returns ------- dask.DataFrame Examples -------- Load single file", "row number ({}) is above or equal to stop \"", "(start != 0 or stop is not None) and len(paths)", "to read from, or list of file paths. Can contain", "single_node = True # if path is string, format using", "number of partitions and return a string. (see examples below)", "return new_dd_object(dsk, name, empty, divisions) keys, stops, divisions = get_keys_stops_divisions(path,", ">>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP Load multiple files >>>", "keys = [k for k in hdf.keys() if fnmatch(k, key)]", "= get_scheduler_lock(get, df, scheduler=scheduler) kwargs.update({'format': 'table', 'mode': mode, 'append': append})", "pd from toolz import merge from .io import _link from", ": group identifier in the store. Can contain wildcards start", "return a string. (see examples below) compute: bool Whether or", "file into a dask.dataframe. Used for each file in read_hdf.", "i in range(0,df.npartitions): i_name = name_function(i) filenames.append(fmt_obj(path, i_name)) for i", "timedelta(days=i) >>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP Returns -------", "key: string Datapath within the files. May contain a ``*``", "Dask Dataframe to Hierarchical Data Format (HDF) files This is", "function around pd_to_hdf that enables locking\"\"\" if lock: lock.acquire() try:", "delayed([Delayed(k, dsk) for k in keys]) dont_use_fixed_error_message = \"\"\" This", "for i in range(df.npartitions)] if formatted_names != sorted(formatted_names): warn(\"To preserve", "we're writing to multiple files with the multiprocessing # scheduler", "format in hdf files.\") if mode not in ('a', 'w',", "stop=storer.nrows)[0] division.append(division_end) divisions.append(division) else: divisions.append(None) return keys, stops, divisions def", "the ``format='table'`` option to ensure that your dataset can be", "issues (default is True). mode : {'a', 'r', 'r+'}, default", "single_node: kwargs2['append'] = True filenames = [] for i in", "must already exist. Returns ------- dask.DataFrame Examples -------- Load single", "stop: raise ValueError(\"Start row number ({}) is above or equal", "name = 'to-hdf-' + uuid.uuid1().hex pd_to_hdf = getattr(df._partition_type, 'to_hdf') single_file", "from glob import glob import os import uuid from warnings", "pd_to_hdf, lock, [(df._name, 0), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs) kwargs2", "= False if lock: lock = get_scheduler_lock(get, df, scheduler=scheduler) kwargs.update({'format':", "dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP Load multiple files >>> dd.read_hdf('myfile.*.hdf5',", "exist it is created. 'r+' It is similar to 'a',", "global dataset.\"\"\" def _read_single_hdf(path, key, start=0, stop=None, columns=None, chunksize=int(1e6), sorted_index=False,", "Lock to use to prevent concurrency issues. By default a", "must be a positive integer\") if (start != 0 or", "# if path is string, format using i_name if isinstance(path,", "to start at stop : optional, integer (defaults to None,", "number to start at stop : optional, integer (defaults to", "A function to convert the ``*`` in the above options", "to lock lock = True else: lock = False if", "= [k for k in hdf.keys() if fnmatch(k, key)] stops", "convert the ``*`` in the above options to a string.", "optional A list of columns that if not None, will", "df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP Returns ------- None: if", "PY3 from ...delayed import Delayed, delayed from ...utils import get_scheduler_lock", "import merge from .io import _link from ...base import get_scheduler", "scheduler=scheduler) kwargs.update({'format': 'table', 'mode': mode, 'append': append}) dsk = dict()", "or 'r+'\") if name_function is None: name_function = build_name_function(df.npartitions -", "[(name, df.npartitions - 1)] else: keys = [(name, i) for", "about lock selection. **other: See pandas.to_hdf for more information Examples", "given key, which can contain wildcards. This uses the hdf", "stop > storer.nrows: raise ValueError(\"Stop keyword exceeds dataset number \"", "if isinstance(path, str): if path.count('*') + key.count('*') > 1: raise", "path. Also get the index of the last row of", "many datasets within the same file. You may specify this", "key, **kwargs) finally: if lock: lock.release() return result def read_hdf(pattern,", "a target filename. May contain a ``*`` to denote many", "\"\"\" name = 'to-hdf-' + uuid.uuid1().hex pd_to_hdf = getattr(df._partition_type, 'to_hdf')", "any wildcards). \"\"\" empty = pd.read_hdf(path, key, mode=mode, stop=0) if", "len(paths) > 1: raise NotImplementedError(read_hdf_error_msg) if chunksize <= 0: raise", "may specify this parallelism with an asterix ``*`` within the", "a Dask DataFrame Read hdf files into a dask dataframe.", "to many files, or to many datasets within the same", "from, or list of file paths. Can contain wildcards. key", "we guarantee partition order is preserved when its saved and", "pd.HDFStore(path, mode=mode) as hdf: keys = [k for k in", "data to multiple datapaths within the same file: >>> df.to_hdf('output.hdf',", "# sequential scheduler. otherwise let the _get method choose the", "'r+'}, default 'a'. Mode to use when opening file(s). 'r'", "to prevent concurrency issues (default is True). mode : {'a',", "interpreted as the starting and stopping index per file, or", "path.replace('*', i_name) if '*' in path: single_file = False else:", "sorted_index=False, lock=None, mode='a'): \"\"\" Read a single hdf file into", "integer (defaults to 0), row number to start at stop", "'a', 'w' or 'r+'\") if name_function is None: name_function =", "stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index, chunksize) if (start", "- 1)] else: keys = [(name, i) for i in", "calling ``name_function`` on each of those integers. This function only", "let the _get method choose the scheduler if (get is", "+SKIP Load multiple files >>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP", "doctest: +SKIP Load multiple files >>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest:", "key, start, stop, columns, chunksize, division, lock): \"\"\" Get the", "+ 1)[0] for start in range(0, storer.nrows, chunksize)] division_end =", "..core import DataFrame, new_dd_object from ... import config, multiprocessing from", "single_node = False if 'format' in kwargs and kwargs['format'] not", "''' Convert integer 0 to n to a string '''", "Parameters ---------- pattern : string, list File pattern (string), buffer", "file(s). 'r' Read-only; no data can be modified. 'a' Append;", "key : group identifier in the store. Can contain wildcards", "if path.count('*') + key.count('*') > 1: raise ValueError(\"A maximum of", "None, the last row), row number to stop at columns", "Whether or not to execute immediately. If False then this", "key, mode='a', append=False, get=None, scheduler=None, name_function=None, compute=True, lock=None, dask_kwargs={}, **kwargs):", "key (which should not contain any wildcards). \"\"\" empty =", "DataFrame Read hdf files into a dask dataframe. This function", "is False). lock : boolean, optional Option to use a", "for i, s in enumerate(range(start, stop, chunksize))) if division: divisions", "and writing, and if the file does not exist it", "of the same name. Please see the Pandas docstring for", "execute immediately. If False then this returns a ``dask.Delayed`` value.", "same file. Parameters ---------- pattern : string, list File pattern", "division, print_function from fnmatch import fnmatch from glob import glob", "False if 'format' in kwargs and kwargs['format'] not in ['t',", "= pattern if (start != 0 or stop is not", "handle lock default based on whether we're writing to a", "May contain a ``*`` to denote many locations name_function: function", "fmt_obj = lambda path, i_name: path.replace('*', i_name) if '*' in", "with the result of calling ``name_function`` on each of those", "link_dep = i - 1 if single_node else 0 task", "'2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc.. >>> from datetime import date, timedelta", "default a ``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock`` will be used depending", "more specialized ``'fixed'`` format. Parameters ---------- path: string Path to", "filenames key: string Datapath within the files. May contain a", "kwargs['format'] not in ['t', 'table']: raise ValueError(\"Dask only support 'table'", "> 1: raise NotImplementedError(read_hdf_error_msg) if chunksize <= 0: raise ValueError(\"Chunksize", "the multiprocessing # scheduler we don't need to lock lock", "(string), buffer to read from, or list of file paths.", "use the ``format='table'`` option to ensure that your dataset can", "to_hdf(df, path, key, mode='a', append=False, get=None, scheduler=None, name_function=None, compute=True, lock=None,", "and stop keywords are not supported when reading from more", "mode} if start >= stop: raise ValueError(\"Start row number ({})", "sorted_index, chunksize): \"\"\" Get the \"keys\" or group identifiers which", "the same file. You may specify this parallelism with an", "the result of calling ``name_function`` on each of those integers.", "version of the Pandas function of the same name. Please", "raise ValueError(\"Chunksize must be a positive integer\") if (start !=", "[] for k in keys: storer = hdf.get_storer(k) if storer.format_type", "scheduler=scheduler, **dask_kwargs) return filenames else: return delayed([Delayed(k, dsk) for k", "using i_name if isinstance(path, str): if path.count('*') + key.count('*') >", "Pandas version by saving the many partitions of a Dask", "from multiple files, or from multiple keys from the same", "same chunksizes\") from ..multi import concat return concat([_read_single_hdf(path, key, start=start,", "------- dask.DataFrame Examples -------- Load single file >>> dd.read_hdf('myfile.1.hdf5', '/x')", "files >>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP >>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'],", ": positive integer, optional Maximal number of rows per partition", "is required. See dask.utils.get_scheduler_lock for more information about lock selection.", "= [(name, i) for i in range(df.npartitions)] if compute: compute_as_if_collection(DataFrame,", "def read_hdf(pattern, key, start=0, stop=None, columns=None, chunksize=1000000, sorted_index=False, lock=True, mode='a'):", "filenames.append(fmt_obj(path, i_name)) for i in range(1, df.npartitions): i_name = name_function(i)", "= 'single-threaded' # handle lock default based on whether we're", "path: string Path to a target filename. May contain a", "'mode': mode, 'append': append}) dsk = dict() i_name = name_function(0)", "key.replace('*', i_name)], kwargs) kwargs2 = kwargs.copy() if single_file: kwargs2['mode'] =", "to n to a string ''' ... return base +", "\"\"\" Store Dask Dataframe to Hierarchical Data Format (HDF) files", "whether or not the input hdf files have a sorted", "to prevent concurrency issues. By default a ``threading.Lock``, ``multiprocessing.Lock`` or", "string, list File pattern (string), buffer to read from, or", "\"must preserve the order of its input\") # If user", "if (get is None and not config.get('get', None) and scheduler", "division = [storer.read_column('index', start=start, stop=start + 1)[0] for start in", "concurrency issues. By default a ``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock`` will", "Can contain wildcards start : optional, integer (defaults to 0),", ">>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP Load multiple datasets", "A list of columns that if not None, will limit", "# If user did not specify scheduler and write is", "when reading from more than one file/dataset. The combination is", "= build_name_function(df.npartitions - 1) # we guarantee partition order is", "i_name = name_function(i) task = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, i),", "single_file): scheduler = 'single-threaded' # handle lock default based on", "None: if compute == True delayed value: if compute ==", "of data for each matched key. \"\"\" with pd.HDFStore(path, mode=mode)", "starting from ``0`` or with the result of calling ``name_function``", "and not config.get('scheduler', None) and single_node and single_file): scheduler =", "divisions.append(None) return keys, stops, divisions def one_path_one_key(path, key, start, stop,", "its input\") # If user did not specify scheduler and", "for k in keys: storer = hdf.get_storer(k) if storer.format_type !=", "= get_keys_stops_divisions(path, key, stop, sorted_index, chunksize) if (start != 0", "keys from the same file. Parameters ---------- pattern : string,", "on whether we're writing to a single entity _actual_get =", "df.npartitions): i_name = name_function(i) task = (_pd_to_hdf, pd_to_hdf, lock, [(df._name,", "for each matched key. \"\"\" with pd.HDFStore(path, mode=mode) as hdf:", "the \"keys\" or group identifiers which match the given key,", "df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP Specify custom naming scheme.", "= dict(((name, i), (_pd_read_hdf, path, key, lock, update(s))) for i,", "to stop at columns : list of columns, optional A", "filenames else: return delayed([Delayed(k, dsk) for k in keys]) dont_use_fixed_error_message", ": string, list File pattern (string), buffer to read from,", "= [] divisions = [] for k in keys: storer", "columns, chunksize, division, lock): \"\"\" Get the data frame corresponding", "more information Examples -------- Save Data to a single file", "files with the multiprocessing # scheduler we don't need to", "number of rows per partition (default is 1000000). sorted_index :", "see the Pandas docstring for more detailed information about shared", "import tokenize, compute_as_if_collection from ...bytes.utils import build_name_function from ...compatibility import", "which can contain wildcards. This uses the hdf file identified", "getattr(df._partition_type, 'to_hdf') single_file = True single_node = True # if", "chunksize <= 0: raise ValueError(\"Chunksize must be a positive integer\")", "so we enforce name_function to maintain the order of its", "if lock: lock.acquire() try: pd_to_hdf(*args, **kwargs) finally: if lock: lock.release()", "stop is not None) and len(paths) > 1: raise NotImplementedError(read_hdf_error_msg)", "a lock to prevent concurrency issues (default is True). mode", "``pandas.read_hdf``, except it can read from a single large file,", "name_function(i) filenames.append(fmt_obj(path, i_name)) for i in range(1, df.npartitions): i_name =", "start >= stop: raise ValueError(\"Start row number ({}) is above", "is not None) and len(paths) > 1: raise NotImplementedError(read_hdf_error_msg) if", "prevent concurrency issues (default is True). mode : {'a', 'r',", "tokenize, compute_as_if_collection from ...bytes.utils import build_name_function from ...compatibility import PY3", "= lambda path, i_name: path.replace('*', i_name) if '*' in path:", "row of data for each matched key. \"\"\" with pd.HDFStore(path,", "= pd.read_hdf(path, key, mode=mode, stop=0) if columns is not None:", "1: raise NotImplementedError(read_hdf_error_msg) if chunksize <= 0: raise ValueError(\"Chunksize must", "same name. Please see the Pandas docstring for more detailed", "start=0, stop=None, columns=None, chunksize=int(1e6), sorted_index=False, lock=None, mode='a'): \"\"\" Read a", "files.\") if mode not in ('a', 'w', 'r+'): raise ValueError(\"Mode", "asterix will be replaced with an increasing sequence of integers", "base = {'name': empty.name, 'mode': mode} else: base = {'columns':", "+ chunksize}) return new dsk = dict(((name, i), (_pd_read_hdf, path,", "= False else: if key.count('*') > 1: raise ValueError(\"A maximum", "1: raise NotImplementedError(read_hdf_error_msg) from ..multi import concat return concat([one_path_one_key(path, k,", "Pandas docstring for more detailed information about shared keyword arguments.", "the future when creating HDFStores use the ``format='table'`` option to", "\"\"\" if lock is True: lock = get_scheduler_lock() key =", "a sorted index (default is False). lock : boolean, optional", "arguments. This function differs from the Pandas version by saving", "single_file = True single_node = True # if path is", "don't need to lock lock = True else: lock =", "opened for reading and writing, and if the file does", "of partitions and return a string. (see examples below) compute:", "start, stop, columns, chunksize, division, lock): \"\"\" Get the data", "when creating HDFStores use the ``format='table'`` option to ensure that", "_get method choose the scheduler if (get is None and", "df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP Save data to multiple files,", "for more information Examples -------- Save Data to a single", "if path is string, format using i_name if isinstance(path, str):", "elif not single_file and _actual_get is not multiprocessing.get: # if", "\" \"of rows ({})\".format(storer.nrows)) else: stops.append(stop) if sorted_index: division =", "scheduler if a lock is required. See dask.utils.get_scheduler_lock for more", "index (default is False). lock : boolean, optional Option to", "given path. Also get the index of the last row", "``'table'`` format, not the more specialized ``'fixed'`` format. Parameters ----------", "or group identifiers which match the given key, which can", "Dask DataFrame in parallel, either to many files, or to", "stop, empty, chunksize, division)) name = 'read-hdf-' + token if", "0), row number to start at stop : optional, integer", "similar to 'a', but the file must already exist. Returns", "+ 1) return new_dd_object(dsk, name, empty, divisions) keys, stops, divisions", "function only supports the Pandas ``'table'`` format, not the more", "+SKIP Returns ------- None: if compute == True delayed value:", "exceeds dataset number \" \"of rows ({})\".format(storer.nrows)) else: stops.append(stop) if", "... return base + timedelta(days=i) >>> df.to_hdf('*.hdf', '/data', name_function=name_function) #", "be one of 'a', 'w' or 'r+'\") if name_function is", "and dataset key\") fmt_obj = lambda path, i_name: path.replace('*', i_name)", "i in range(df.npartitions)] if formatted_names != sorted(formatted_names): warn(\"To preserve order", "wildcards start : optional, integer (defaults to 0), row number", "the Pandas docstring for more detailed information about shared keyword", "i_name)], kwargs2) if single_file: link_dep = i - 1 if", "formatted_names != sorted(formatted_names): warn(\"To preserve order between partitions name_function \"", "dask.utils.get_scheduler_lock for more information about lock selection. **other: See pandas.to_hdf", "otherwise let the _get method choose the scheduler if (get", "from 0 to the number of partitions and return a", "on each of those integers. This function only supports the", "from datetime import date, timedelta >>> base = date(year=2000, month=1,", "new = base.copy() new.update({'start': s, 'stop': s + chunksize}) return", "if mode not in ('a', 'w', 'r+'): raise ValueError(\"Mode must", "import build_name_function from ...compatibility import PY3 from ...delayed import Delayed,", "formatted_names = [name_function(i) for i in range(df.npartitions)] if formatted_names !=", "columns that if not None, will limit the return columns", "True: lock = get_scheduler_lock() key = key if key.startswith('/') else", "---------- pattern : string, list File pattern (string), buffer to", "to use when opening file(s). 'r' Read-only; no data can", "key, lock, update(s))) for i, s in enumerate(range(start, stop, chunksize)))", "def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None): \"\"\" A wrapper function around", "Store Dask Dataframe to Hierarchical Data Format (HDF) files This", "---------- path: string Path to a target filename. May contain", "or stop is not None) and len(paths) > 1: raise", "'/data', name_function=name_function) # doctest: +SKIP Returns ------- None: if compute", "scheduler=scheduler) if lock is None: if not single_node: lock =", "\"\"\" A wrapper function around pd_to_hdf that enables locking\"\"\" if", ">>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP \"\"\" if lock is", "if a lock is required. See dask.utils.get_scheduler_lock for more information", "the return columns (default is None) chunksize : positive integer,", "scheduler='processes') # doctest: +SKIP Specify custom naming scheme. This writes", "the many partitions of a Dask DataFrame in parallel, either", "lock \"\"\" if lock: lock.acquire() try: result = pd.read_hdf(path, key,", "specify whether or not the input hdf files have a", "from ..multi import concat return concat([_read_single_hdf(path, key, start=start, stop=stop, columns=columns,", "def _pd_read_hdf(path, key, lock, kwargs): \"\"\" Read from hdf5 file", "option to ensure that your dataset can be parallelized\"\"\" read_hdf_error_msg", "contain any wildcards). \"\"\" empty = pd.read_hdf(path, key, mode=mode, stop=0)", "support 'table' format in hdf files.\") if mode not in", "Option to specify whether or not the input hdf files", "single entity _actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler) if lock is", "that your dataset can be parallelized\"\"\" read_hdf_error_msg = \"\"\" The", "input\") # If user did not specify scheduler and write", "take in a number from 0 to the number of", "= merge(df.dask, dsk) if single_file and single_node: keys = [(name,", "# doctest: +SKIP \"\"\" if lock is True: lock =", "if single_node: kwargs2['append'] = True filenames = [] for i", "file in read_hdf. \"\"\" def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize):", "raise ValueError(\"Dask only support 'table' format in hdf files.\") if", "uuid from warnings import warn import pandas as pd from", "is accepted in file \" \"path and dataset key\") fmt_obj", "to multiple files, using the multiprocessing scheduler: >>> df.to_hdf('output-*.hdf', '/data',", "stop=None, columns=None, chunksize=int(1e6), sorted_index=False, lock=None, mode='a'): \"\"\" Read a single", "= {'columns': empty.columns, 'mode': mode} if start >= stop: raise", "sorted_index=False, lock=True, mode='a'): \"\"\" Read HDF files into a Dask", "s, 'stop': s + chunksize}) return new dsk = dict(((name,", "scheduler=None, name_function=None, compute=True, lock=None, dask_kwargs={}, **kwargs): \"\"\" Store Dask Dataframe", "an optional ``name_function``. The asterix will be replaced with an", "Convert integer 0 to n to a string ''' ...", "the hdf file identified by the given path. Also get", "partitions name_function \" \"must preserve the order of its input\")", "concat return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock)", ">>> base = date(year=2000, month=1, day=1) >>> def name_function(i): ...", "month=1, day=1) >>> def name_function(i): ... ''' Convert integer 0", "lock = False if lock: lock = get_scheduler_lock(get, df, scheduler=scheduler)", "identifier in the store. Can contain wildcards start : optional,", "the last row), row number to stop at columns :", "if storer.format_type != 'table': raise TypeError(dont_use_fixed_error_message) if stop is None:", "return columns (default is None) chunksize : positive integer, optional", "for start in range(0, storer.nrows, chunksize)] division_end = storer.read_column('index', start=storer.nrows", "opening file(s). 'r' Read-only; no data can be modified. 'a'", "in range(0,df.npartitions): i_name = name_function(i) filenames.append(fmt_obj(path, i_name)) for i in", "default based on whether we're writing to a single entity", "file does not exist it is created. 'r+' It is", "single_node and single_file): scheduler = 'single-threaded' # handle lock default", "i) for i in range(df.npartitions)] if compute: compute_as_if_collection(DataFrame, dsk, keys,", "+SKIP >>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP Load multiple", "stop is not None) and sorted_index: raise ValueError(\"When assuming pre-partitioned", "Parameters ---------- path: string Path to a target filename. May", "return concat([_read_single_hdf(path, key, start=start, stop=stop, columns=columns, chunksize=chunksize, sorted_index=sorted_index, lock=lock, mode=mode)", "the file must already exist. Returns ------- dask.DataFrame Examples --------", "\"\"\" This HDFStore is not partitionable and can only be", "file. Parameters ---------- pattern : string, list File pattern (string),", "raise ValueError(\"A maximum of one asterisk is accepted in file", "divisions = [] for k in keys: storer = hdf.get_storer(k)", "order is preserved when its saved and read # so", "contain a ``*`` to denote many locations name_function: function A", "only supports the Pandas ``'table'`` format, not the more specialized", "i_name = name_function(0) dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock, [(df._name,", "sorted(glob(pattern)) else: paths = pattern if (start != 0 or", "is None: stops.append(storer.nrows) elif stop > storer.nrows: raise ValueError(\"Stop keyword", "= True # if path is string, format using i_name", "dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP Load multiple datasets >>>", "keys]) dont_use_fixed_error_message = \"\"\" This HDFStore is not partitionable and", "the ``*`` in the above options to a string. Should", "+ key if isinstance(pattern, str): paths = sorted(glob(pattern)) else: paths", "return result def read_hdf(pattern, key, start=0, stop=None, columns=None, chunksize=1000000, sorted_index=False,", "for k, s, d in zip(keys, stops, divisions)]) def _pd_read_hdf(path,", "({}) is above or equal to stop \" \"row number", "datasets within the same file. You may specify this parallelism", "is None) chunksize : positive integer, optional Maximal number of", "compute: bool Whether or not to execute immediately. If False", "from a single large file, or from multiple files, or", "key, lock, kwargs): \"\"\" Read from hdf5 file with a", "preserved when its saved and read # so we enforce", "dataset.\"\"\" def _read_single_hdf(path, key, start=0, stop=None, columns=None, chunksize=int(1e6), sorted_index=False, lock=None,", "from ...base import get_scheduler from ..core import DataFrame, new_dd_object from", "for k in hdf.keys() if fnmatch(k, key)] stops = []", "True filenames = [] for i in range(0,df.npartitions): i_name =", "stopping index per file, or starting and stopping index of", "it is created. 'r+' It is similar to 'a', but", "asterix ``*`` within the filename or datapath, and an optional", "range(df.npartitions)] if compute: compute_as_if_collection(DataFrame, dsk, keys, get=get, scheduler=scheduler, **dask_kwargs) return", "an existing file is opened for reading and writing, and", "(default is None) chunksize : positive integer, optional Maximal number", "prevent concurrency issues. By default a ``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock``", "optional Lock to use to prevent concurrency issues. By default", "which match the given key, which can contain wildcards. This", "hdf files have a sorted index (default is False). lock", "lock) for k, s, d in zip(keys, stops, divisions)]) def", "one key (which should not contain any wildcards). \"\"\" empty", "'r+'\") if name_function is None: name_function = build_name_function(df.npartitions - 1)", "enables locking\"\"\" if lock: lock.acquire() try: pd_to_hdf(*args, **kwargs) finally: if", "..multi import concat return concat([one_path_one_key(path, k, start, s, columns, chunksize,", "of one asterisk is accepted in \" \"dataset key\") fmt_obj" ]
[ "= ('runtime', 'width', 'height') def _is_complete(self, info): \"\"\" gets a", "dict() for provider in self._get_hooks(): current_result = provider.get_info(file, **options) result.update(current_result)", "into media info providers. :param AbstractMediaInfoProvider instance: media info provider", ":param dict info: media info to be checked. :rtype: bool", "def get_info(self, file, **options): \"\"\" gets a dict containing media", "import InvalidMediaInfoProviderTypeError class MediaInfoManager(Manager, HookMixin): \"\"\" media info manager class.", "hook_type = AbstractMediaInfoProvider invalid_hook_type_error = InvalidMediaInfoProviderTypeError REQUIRED_INFO = ('runtime', 'width',", "InvalidMediaInfoProviderTypeError: invalid media info provider type error. \"\"\" self.register_hook(instance) def", "not existed error. :raises IsNotFileError: is not directory error. :returns:", ":raises IsNotFileError: is not directory error. :returns: dict(int runtime, int", "type error. \"\"\" self.register_hook(instance) def get_info(self, file, **options): \"\"\" gets", "not directory error. :returns: dict(int runtime, int width, int height)", "file. :raises InvalidPathError: invalid path error. :raises PathIsNotAbsoluteError: path is", "info): \"\"\" gets a value indicating that given media info", "int width, int height) :rtype: dict \"\"\" path_utils.assert_is_file(file) result =", "media info manager class. \"\"\" package_class = MediaInfoPackage hook_type =", "gets a value indicating that given media info is complete.", "dict info: media info to be checked. :rtype: bool \"\"\"", "provider type error. \"\"\" self.register_hook(instance) def get_info(self, file, **options): \"\"\"", "self.REQUIRED_INFO: result = info.get(item) if result is None or result", "**options) result.update(current_result) if self._is_complete(result) is True: break result.setdefault('runtime', 0) result.setdefault('width',", "path of video file. :raises InvalidPathError: invalid path error. :raises", "\"\"\" registers the given instance into media info providers. :param", ":raises PathIsNotAbsoluteError: path is not absolute error. :raises PathNotExistedError: path", "instance to be registered. :raises InvalidMediaInfoProviderTypeError: invalid media info provider", "info to be checked. :rtype: bool \"\"\" for item in", "for item in self.REQUIRED_INFO: result = info.get(item) if result is", ":rtype: bool \"\"\" for item in self.REQUIRED_INFO: result = info.get(item)", "\"\"\" package_class = MediaInfoPackage hook_type = AbstractMediaInfoProvider invalid_hook_type_error = InvalidMediaInfoProviderTypeError", "Manager import pyrin.utils.path as path_utils from charma.media_info import MediaInfoPackage from", "PathNotExistedError: path not existed error. :raises IsNotFileError: is not directory", "= AbstractMediaInfoProvider invalid_hook_type_error = InvalidMediaInfoProviderTypeError REQUIRED_INFO = ('runtime', 'width', 'height')", "instance into media info providers. :param AbstractMediaInfoProvider instance: media info", "file, **options): \"\"\" gets a dict containing media info of", "return True def register_provider(self, instance): \"\"\" registers the given instance", "info providers. :param AbstractMediaInfoProvider instance: media info provider instance to", "True: break result.setdefault('runtime', 0) result.setdefault('width', 0) result.setdefault('height', 0) return result", "import Manager import pyrin.utils.path as path_utils from charma.media_info import MediaInfoPackage", "media info providers. :param AbstractMediaInfoProvider instance: media info provider instance", "existed error. :raises IsNotFileError: is not directory error. :returns: dict(int", "current_result = provider.get_info(file, **options) result.update(current_result) if self._is_complete(result) is True: break", "file: absolute path of video file. :raises InvalidPathError: invalid path", "class MediaInfoManager(Manager, HookMixin): \"\"\" media info manager class. \"\"\" package_class", "info.get(item) if result is None or result <= 0: return", "None or result <= 0: return False return True def", ":raises InvalidMediaInfoProviderTypeError: invalid media info provider type error. \"\"\" self.register_hook(instance)", "dict \"\"\" path_utils.assert_is_file(file) result = dict() for provider in self._get_hooks():", "info is complete. :param dict info: media info to be", "invalid path error. :raises PathIsNotAbsoluteError: path is not absolute error.", "path_utils.assert_is_file(file) result = dict() for provider in self._get_hooks(): current_result =", "item in self.REQUIRED_INFO: result = info.get(item) if result is None", "\"\"\" media info manager class. \"\"\" package_class = MediaInfoPackage hook_type", "media info provider instance to be registered. :raises InvalidMediaInfoProviderTypeError: invalid", "runtime, int width, int height) :rtype: dict \"\"\" path_utils.assert_is_file(file) result", "complete. :param dict info: media info to be checked. :rtype:", "module. \"\"\" from pyrin.core.mixin import HookMixin from pyrin.core.structs import Manager", "is not directory error. :returns: dict(int runtime, int width, int", "provider.get_info(file, **options) result.update(current_result) if self._is_complete(result) is True: break result.setdefault('runtime', 0)", "IsNotFileError: is not directory error. :returns: dict(int runtime, int width,", "error. :raises PathIsNotAbsoluteError: path is not absolute error. :raises PathNotExistedError:", "= InvalidMediaInfoProviderTypeError REQUIRED_INFO = ('runtime', 'width', 'height') def _is_complete(self, info):", "InvalidMediaInfoProviderTypeError class MediaInfoManager(Manager, HookMixin): \"\"\" media info manager class. \"\"\"", "height) :rtype: dict \"\"\" path_utils.assert_is_file(file) result = dict() for provider", "result = dict() for provider in self._get_hooks(): current_result = provider.get_info(file,", "absolute path of video file. :raises InvalidPathError: invalid path error.", "error. :raises PathNotExistedError: path not existed error. :raises IsNotFileError: is", "import pyrin.utils.path as path_utils from charma.media_info import MediaInfoPackage from charma.media_info.interface", "self._is_complete(result) is True: break result.setdefault('runtime', 0) result.setdefault('width', 0) result.setdefault('height', 0)", "\"\"\" from pyrin.core.mixin import HookMixin from pyrin.core.structs import Manager import", "= MediaInfoPackage hook_type = AbstractMediaInfoProvider invalid_hook_type_error = InvalidMediaInfoProviderTypeError REQUIRED_INFO =", "from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError class MediaInfoManager(Manager, HookMixin): \"\"\" media info", "given file. :param str file: absolute path of video file.", "import MediaInfoPackage from charma.media_info.interface import AbstractMediaInfoProvider from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError", "AbstractMediaInfoProvider from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError class MediaInfoManager(Manager, HookMixin): \"\"\" media", "\"\"\" gets a dict containing media info of given file.", "not absolute error. :raises PathNotExistedError: path not existed error. :raises", "if self._is_complete(result) is True: break result.setdefault('runtime', 0) result.setdefault('width', 0) result.setdefault('height',", "the given instance into media info providers. :param AbstractMediaInfoProvider instance:", "or result <= 0: return False return True def register_provider(self,", "dict containing media info of given file. :param str file:", "of video file. :raises InvalidPathError: invalid path error. :raises PathIsNotAbsoluteError:", ":raises InvalidPathError: invalid path error. :raises PathIsNotAbsoluteError: path is not", "from charma.media_info.interface import AbstractMediaInfoProvider from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError class MediaInfoManager(Manager,", "utf-8 -*- \"\"\" media info manager module. \"\"\" from pyrin.core.mixin", "path_utils from charma.media_info import MediaInfoPackage from charma.media_info.interface import AbstractMediaInfoProvider from", "= info.get(item) if result is None or result <= 0:", "checked. :rtype: bool \"\"\" for item in self.REQUIRED_INFO: result =", "import HookMixin from pyrin.core.structs import Manager import pyrin.utils.path as path_utils", ":param AbstractMediaInfoProvider instance: media info provider instance to be registered.", "MediaInfoPackage hook_type = AbstractMediaInfoProvider invalid_hook_type_error = InvalidMediaInfoProviderTypeError REQUIRED_INFO = ('runtime',", "media info of given file. :param str file: absolute path", ":returns: dict(int runtime, int width, int height) :rtype: dict \"\"\"", "in self._get_hooks(): current_result = provider.get_info(file, **options) result.update(current_result) if self._is_complete(result) is", "error. \"\"\" self.register_hook(instance) def get_info(self, file, **options): \"\"\" gets a", "from pyrin.core.mixin import HookMixin from pyrin.core.structs import Manager import pyrin.utils.path", "width, int height) :rtype: dict \"\"\" path_utils.assert_is_file(file) result = dict()", "AbstractMediaInfoProvider instance: media info provider instance to be registered. :raises", "as path_utils from charma.media_info import MediaInfoPackage from charma.media_info.interface import AbstractMediaInfoProvider", "media info to be checked. :rtype: bool \"\"\" for item", "package_class = MediaInfoPackage hook_type = AbstractMediaInfoProvider invalid_hook_type_error = InvalidMediaInfoProviderTypeError REQUIRED_INFO", "int height) :rtype: dict \"\"\" path_utils.assert_is_file(file) result = dict() for", "REQUIRED_INFO = ('runtime', 'width', 'height') def _is_complete(self, info): \"\"\" gets", "provider instance to be registered. :raises InvalidMediaInfoProviderTypeError: invalid media info", "absolute error. :raises PathNotExistedError: path not existed error. :raises IsNotFileError:", "for provider in self._get_hooks(): current_result = provider.get_info(file, **options) result.update(current_result) if", "is True: break result.setdefault('runtime', 0) result.setdefault('width', 0) result.setdefault('height', 0) return", "is None or result <= 0: return False return True", "InvalidMediaInfoProviderTypeError REQUIRED_INFO = ('runtime', 'width', 'height') def _is_complete(self, info): \"\"\"", "charma.media_info.exceptions import InvalidMediaInfoProviderTypeError class MediaInfoManager(Manager, HookMixin): \"\"\" media info manager", "get_info(self, file, **options): \"\"\" gets a dict containing media info", "info provider type error. \"\"\" self.register_hook(instance) def get_info(self, file, **options):", "\"\"\" self.register_hook(instance) def get_info(self, file, **options): \"\"\" gets a dict", "info manager module. \"\"\" from pyrin.core.mixin import HookMixin from pyrin.core.structs", "**options): \"\"\" gets a dict containing media info of given", "-*- coding: utf-8 -*- \"\"\" media info manager module. \"\"\"", "\"\"\" path_utils.assert_is_file(file) result = dict() for provider in self._get_hooks(): current_result", ":rtype: dict \"\"\" path_utils.assert_is_file(file) result = dict() for provider in", "be registered. :raises InvalidMediaInfoProviderTypeError: invalid media info provider type error.", "path is not absolute error. :raises PathNotExistedError: path not existed", "given instance into media info providers. :param AbstractMediaInfoProvider instance: media", "HookMixin): \"\"\" media info manager class. \"\"\" package_class = MediaInfoPackage", "be checked. :rtype: bool \"\"\" for item in self.REQUIRED_INFO: result", "provider in self._get_hooks(): current_result = provider.get_info(file, **options) result.update(current_result) if self._is_complete(result)", "result = info.get(item) if result is None or result <=", "containing media info of given file. :param str file: absolute", "if result is None or result <= 0: return False", "info of given file. :param str file: absolute path of", "'height') def _is_complete(self, info): \"\"\" gets a value indicating that", "('runtime', 'width', 'height') def _is_complete(self, info): \"\"\" gets a value", "False return True def register_provider(self, instance): \"\"\" registers the given", "media info is complete. :param dict info: media info to", "def _is_complete(self, info): \"\"\" gets a value indicating that given", "class. \"\"\" package_class = MediaInfoPackage hook_type = AbstractMediaInfoProvider invalid_hook_type_error =", "indicating that given media info is complete. :param dict info:", "0: return False return True def register_provider(self, instance): \"\"\" registers", "from charma.media_info import MediaInfoPackage from charma.media_info.interface import AbstractMediaInfoProvider from charma.media_info.exceptions", "is complete. :param dict info: media info to be checked.", "result is None or result <= 0: return False return", "media info provider type error. \"\"\" self.register_hook(instance) def get_info(self, file,", "path error. :raises PathIsNotAbsoluteError: path is not absolute error. :raises", "= dict() for provider in self._get_hooks(): current_result = provider.get_info(file, **options)", "invalid media info provider type error. \"\"\" self.register_hook(instance) def get_info(self,", "\"\"\" for item in self.REQUIRED_INFO: result = info.get(item) if result", "invalid_hook_type_error = InvalidMediaInfoProviderTypeError REQUIRED_INFO = ('runtime', 'width', 'height') def _is_complete(self,", "result.update(current_result) if self._is_complete(result) is True: break result.setdefault('runtime', 0) result.setdefault('width', 0)", "providers. :param AbstractMediaInfoProvider instance: media info provider instance to be", "is not absolute error. :raises PathNotExistedError: path not existed error.", "a dict containing media info of given file. :param str", "info: media info to be checked. :rtype: bool \"\"\" for", "of given file. :param str file: absolute path of video", "-*- \"\"\" media info manager module. \"\"\" from pyrin.core.mixin import", "import AbstractMediaInfoProvider from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError class MediaInfoManager(Manager, HookMixin): \"\"\"", "self._get_hooks(): current_result = provider.get_info(file, **options) result.update(current_result) if self._is_complete(result) is True:", "<= 0: return False return True def register_provider(self, instance): \"\"\"", "MediaInfoManager(Manager, HookMixin): \"\"\" media info manager class. \"\"\" package_class =", "media info manager module. \"\"\" from pyrin.core.mixin import HookMixin from", "result <= 0: return False return True def register_provider(self, instance):", "def register_provider(self, instance): \"\"\" registers the given instance into media", "instance: media info provider instance to be registered. :raises InvalidMediaInfoProviderTypeError:", "InvalidPathError: invalid path error. :raises PathIsNotAbsoluteError: path is not absolute", "to be checked. :rtype: bool \"\"\" for item in self.REQUIRED_INFO:", "error. :returns: dict(int runtime, int width, int height) :rtype: dict", "manager module. \"\"\" from pyrin.core.mixin import HookMixin from pyrin.core.structs import", "value indicating that given media info is complete. :param dict", "dict(int runtime, int width, int height) :rtype: dict \"\"\" path_utils.assert_is_file(file)", "info provider instance to be registered. :raises InvalidMediaInfoProviderTypeError: invalid media", "a value indicating that given media info is complete. :param", "directory error. :returns: dict(int runtime, int width, int height) :rtype:", "= provider.get_info(file, **options) result.update(current_result) if self._is_complete(result) is True: break result.setdefault('runtime',", "file. :param str file: absolute path of video file. :raises", "MediaInfoPackage from charma.media_info.interface import AbstractMediaInfoProvider from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError class", "bool \"\"\" for item in self.REQUIRED_INFO: result = info.get(item) if", ":param str file: absolute path of video file. :raises InvalidPathError:", "video file. :raises InvalidPathError: invalid path error. :raises PathIsNotAbsoluteError: path", ":raises PathNotExistedError: path not existed error. :raises IsNotFileError: is not", "AbstractMediaInfoProvider invalid_hook_type_error = InvalidMediaInfoProviderTypeError REQUIRED_INFO = ('runtime', 'width', 'height') def", "True def register_provider(self, instance): \"\"\" registers the given instance into", "pyrin.core.mixin import HookMixin from pyrin.core.structs import Manager import pyrin.utils.path as", "str file: absolute path of video file. :raises InvalidPathError: invalid", "\"\"\" gets a value indicating that given media info is", "charma.media_info import MediaInfoPackage from charma.media_info.interface import AbstractMediaInfoProvider from charma.media_info.exceptions import", "_is_complete(self, info): \"\"\" gets a value indicating that given media", "info manager class. \"\"\" package_class = MediaInfoPackage hook_type = AbstractMediaInfoProvider", "pyrin.utils.path as path_utils from charma.media_info import MediaInfoPackage from charma.media_info.interface import", "'width', 'height') def _is_complete(self, info): \"\"\" gets a value indicating", "given media info is complete. :param dict info: media info", "PathIsNotAbsoluteError: path is not absolute error. :raises PathNotExistedError: path not", "# -*- coding: utf-8 -*- \"\"\" media info manager module.", "self.register_hook(instance) def get_info(self, file, **options): \"\"\" gets a dict containing", "registered. :raises InvalidMediaInfoProviderTypeError: invalid media info provider type error. \"\"\"", "pyrin.core.structs import Manager import pyrin.utils.path as path_utils from charma.media_info import", "gets a dict containing media info of given file. :param", "in self.REQUIRED_INFO: result = info.get(item) if result is None or", "charma.media_info.interface import AbstractMediaInfoProvider from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError class MediaInfoManager(Manager, HookMixin):", "register_provider(self, instance): \"\"\" registers the given instance into media info", "to be registered. :raises InvalidMediaInfoProviderTypeError: invalid media info provider type", "registers the given instance into media info providers. :param AbstractMediaInfoProvider", "from pyrin.core.structs import Manager import pyrin.utils.path as path_utils from charma.media_info", "HookMixin from pyrin.core.structs import Manager import pyrin.utils.path as path_utils from", "manager class. \"\"\" package_class = MediaInfoPackage hook_type = AbstractMediaInfoProvider invalid_hook_type_error", "coding: utf-8 -*- \"\"\" media info manager module. \"\"\" from", "instance): \"\"\" registers the given instance into media info providers.", "return False return True def register_provider(self, instance): \"\"\" registers the", "\"\"\" media info manager module. \"\"\" from pyrin.core.mixin import HookMixin", "that given media info is complete. :param dict info: media", "error. :raises IsNotFileError: is not directory error. :returns: dict(int runtime,", "path not existed error. :raises IsNotFileError: is not directory error." ]
[ "file.read() self.parser = FightParser() def test_parses_draw(self): \"\"\"Test it correctly handles", "__init__(self, content, encoding, url): self.content= content self.encoding = encoding self.url", "with open('mock_data/fights/draw.html', 'rb') as file: self.drawn_fight = file.read() self.parser =", "result = self.parser.parse(mock_response) self.assertEqual(result.winner, 'drawn', \"Result should equal draw.\") class", "import FightParser class MockResponse(object): def __init__(self, content, encoding, url): self.content=", "self.parser = FightParser() def test_parses_draw(self): \"\"\"Test it correctly handles draws\"\"\"", "correctly handles draws\"\"\" mock_response = MockResponse( self.drawn_fight, 'UTF-8', \"http://boxrec.com/en/event/115689/202488\" )", "class TestFightParser(unittest.TestCase): def setUp(self): with open('mock_data/fights/draw.html', 'rb') as file: self.drawn_fight", "encoding self.url = url class TestFightParser(unittest.TestCase): def setUp(self): with open('mock_data/fights/draw.html',", "= file.read() self.parser = FightParser() def test_parses_draw(self): \"\"\"Test it correctly", "content self.encoding = encoding self.url = url class TestFightParser(unittest.TestCase): def", "= FightParser() def test_parses_draw(self): \"\"\"Test it correctly handles draws\"\"\" mock_response", "= url class TestFightParser(unittest.TestCase): def setUp(self): with open('mock_data/fights/draw.html', 'rb') as", "mock_response = MockResponse( self.drawn_fight, 'UTF-8', \"http://boxrec.com/en/event/115689/202488\" ) result = self.parser.parse(mock_response)", "self.drawn_fight = file.read() self.parser = FightParser() def test_parses_draw(self): \"\"\"Test it", "= self.parser.parse(mock_response) self.assertEqual(result.winner, 'drawn', \"Result should equal draw.\") class TestBoxerParser(unittest.TestCase):", "FightParser class MockResponse(object): def __init__(self, content, encoding, url): self.content= content", "FightParser() def test_parses_draw(self): \"\"\"Test it correctly handles draws\"\"\" mock_response =", "\"\"\"Test it correctly handles draws\"\"\" mock_response = MockResponse( self.drawn_fight, 'UTF-8',", "self.encoding = encoding self.url = url class TestFightParser(unittest.TestCase): def setUp(self):", "draws\"\"\" mock_response = MockResponse( self.drawn_fight, 'UTF-8', \"http://boxrec.com/en/event/115689/202488\" ) result =", "file: self.drawn_fight = file.read() self.parser = FightParser() def test_parses_draw(self): \"\"\"Test", "handles draws\"\"\" mock_response = MockResponse( self.drawn_fight, 'UTF-8', \"http://boxrec.com/en/event/115689/202488\" ) result", "= MockResponse( self.drawn_fight, 'UTF-8', \"http://boxrec.com/en/event/115689/202488\" ) result = self.parser.parse(mock_response) self.assertEqual(result.winner,", "url): self.content= content self.encoding = encoding self.url = url class", "MockResponse(object): def __init__(self, content, encoding, url): self.content= content self.encoding =", "content, encoding, url): self.content= content self.encoding = encoding self.url =", "it correctly handles draws\"\"\" mock_response = MockResponse( self.drawn_fight, 'UTF-8', \"http://boxrec.com/en/event/115689/202488\"", "TestFightParser(unittest.TestCase): def setUp(self): with open('mock_data/fights/draw.html', 'rb') as file: self.drawn_fight =", "from boxrec.parsers import FightParser class MockResponse(object): def __init__(self, content, encoding,", "boxrec.parsers import FightParser class MockResponse(object): def __init__(self, content, encoding, url):", "'rb') as file: self.drawn_fight = file.read() self.parser = FightParser() def", "= encoding self.url = url class TestFightParser(unittest.TestCase): def setUp(self): with", "open('mock_data/fights/draw.html', 'rb') as file: self.drawn_fight = file.read() self.parser = FightParser()", "as file: self.drawn_fight = file.read() self.parser = FightParser() def test_parses_draw(self):", "\"http://boxrec.com/en/event/115689/202488\" ) result = self.parser.parse(mock_response) self.assertEqual(result.winner, 'drawn', \"Result should equal", "self.content= content self.encoding = encoding self.url = url class TestFightParser(unittest.TestCase):", "setUp(self): with open('mock_data/fights/draw.html', 'rb') as file: self.drawn_fight = file.read() self.parser", "import unittest from boxrec.parsers import FightParser class MockResponse(object): def __init__(self,", "def __init__(self, content, encoding, url): self.content= content self.encoding = encoding", "unittest from boxrec.parsers import FightParser class MockResponse(object): def __init__(self, content,", "class MockResponse(object): def __init__(self, content, encoding, url): self.content= content self.encoding", ") result = self.parser.parse(mock_response) self.assertEqual(result.winner, 'drawn', \"Result should equal draw.\")", "encoding, url): self.content= content self.encoding = encoding self.url = url", "self.drawn_fight, 'UTF-8', \"http://boxrec.com/en/event/115689/202488\" ) result = self.parser.parse(mock_response) self.assertEqual(result.winner, 'drawn', \"Result", "MockResponse( self.drawn_fight, 'UTF-8', \"http://boxrec.com/en/event/115689/202488\" ) result = self.parser.parse(mock_response) self.assertEqual(result.winner, 'drawn',", "self.url = url class TestFightParser(unittest.TestCase): def setUp(self): with open('mock_data/fights/draw.html', 'rb')", "'UTF-8', \"http://boxrec.com/en/event/115689/202488\" ) result = self.parser.parse(mock_response) self.assertEqual(result.winner, 'drawn', \"Result should", "test_parses_draw(self): \"\"\"Test it correctly handles draws\"\"\" mock_response = MockResponse( self.drawn_fight,", "def setUp(self): with open('mock_data/fights/draw.html', 'rb') as file: self.drawn_fight = file.read()", "url class TestFightParser(unittest.TestCase): def setUp(self): with open('mock_data/fights/draw.html', 'rb') as file:", "def test_parses_draw(self): \"\"\"Test it correctly handles draws\"\"\" mock_response = MockResponse(", "self.parser.parse(mock_response) self.assertEqual(result.winner, 'drawn', \"Result should equal draw.\") class TestBoxerParser(unittest.TestCase): pass" ]
[ "cancelled. \"\"\" now = datetime.utcnow() jobs = [] while True:", "jobs = [] while True: job = self._collection.find_and_modify( query={\"trial\": {\"$nin\":", "priority=0): \"\"\" Adds new work to the workqueue. \"\"\" id", "finished and attach the result. \"\"\" t = datetime.utcnow() self._collection.update_one(", "if a certain job has been cancelled or all together", "\"orphaned\": False, } ) return id def update_job(self, _id, update=None):", "job = self._collection.find_and_modify( query={\"trial\": {\"$nin\": trial_list}, \"end_time\": -1}, update={ \"$set\":", "{ \"cancelled\": True, \"orphaned\": True, \"end_time\": now, \"result\": {\"state\": \"fail\",", "None def cancel_invalid_jobs(self, trial_list): \"\"\" Takes a list of all", "\"last_update\": t, \"worker\": worker_id}}, new=True, ) return job def add_job(self,", "_id): \"\"\" Marks a job as not orphaned. \"\"\" job", "\"update.container.long_id\": {\"$in\": id_list}} ) return [(j[\"update\"][\"container\"][\"long_id\"], j[\"_id\"]) for j in", "free job to worker. Returns the object from the mongodb.", "queue that handles the queue of experiment. \"\"\" def __init__(self,", "self._mongodb = mongodb self._collection = mongodb.workqueue def assign_next_job(self, worker_id): \"\"\"", "{\"$lt\": deadline}, }, sort=[(\"priority\", -1), (\"last_update\", 1)], update={ \"$set\": {", "or all together removed. \"\"\" return self._collection.find_one({\"_id\": _id, \"cancelled\": False})", "as not orphaned. \"\"\" job = self._collection.find_and_modify( query={\"_id\": _id}, update={\"$set\":", "now = datetime.utcnow() deadline = now - timedelta(seconds=WORK_TIMEOUT) jobs =", "{\"start_time\": t, \"last_update\": t, \"worker\": worker_id}}, new=True, ) return job", "{ \"start_time\": -1, \"end_time\": -1, \"last_update\": -1, \"created_on\": datetime.utcnow(), \"priority\":", "{\"last_update\": t, \"update\": update}} ) def is_job_cancelled(self, _id): \"\"\" Checks", "list of all active (not finished, cancelled or removed) trial", "trial_name, priority=0): \"\"\" Adds new work to the workqueue. \"\"\"", "(\"last_update\", 1)], update={ \"$set\": { \"cancelled\": True, \"orphaned\": True, \"end_time\":", "orphans. Returns a list of (Docker id, experiment id) tuples.", "\"end_time\": -1, \"last_update\": {\"$lt\": deadline}, }, sort=[(\"priority\", -1), (\"last_update\", 1)],", "True, \"orphaned\": True, \"end_time\": now, \"result\": {\"state\": \"fail\", \"msg\": \"Timed", "if job is not None: jobs.append(job) else: return jobs def", "update_job(self, _id, update=None): \"\"\" Marks the job as alive and", "\"\"\" Marks a job as not orphaned. \"\"\" job =", "(Docker id, experiment id) tuples. \"\"\" jobs = self._collection.find( {\"orphaned\":", "\"update\": update}} ) def is_job_cancelled(self, _id): \"\"\" Checks if a", "to worker. Returns the object from the mongodb. \"\"\" t", "id_list}} ) return [(j[\"update\"][\"container\"][\"long_id\"], j[\"_id\"]) for j in list(jobs)] def", "from the job. \"\"\" t = datetime.utcnow() self._collection.update( {\"_id\": _id},", "the job as alive and post an update from the", "\"\"\" id = self._collection.insert( { \"start_time\": -1, \"end_time\": -1, \"last_update\":", "trial_list): \"\"\" Takes a list of all active (not finished,", "result): \"\"\" Marks the job as finished and attach the", "{\"state\": \"fail\", \"msg\": \"Timed out!\"}, } }, new=True, ) if", "while True: job = self._collection.find_and_modify( query={ \"start_time\": {\"$ne\": -1}, \"end_time\":", "next free job to worker. Returns the object from the", "\"priority\": priority, \"parameters\": parameters, \"data\": data, \"worker\": None, \"result\": {},", "def is_job_cancelled(self, _id): \"\"\" Checks if a certain job has", "a list of (Docker id, experiment id) tuples. \"\"\" jobs", "\"\"\" Takes a list of all active (not finished, cancelled", "worker_id): \"\"\" Assigns the next free job to worker. Returns", "id def update_job(self, _id, update=None): \"\"\" Marks the job as", "these are cancelled. \"\"\" now = datetime.utcnow() jobs = []", "new=True, ) return job def add_job(self, parameters, data, trial_id, trial_name,", "id_list): \"\"\" Checks if a list of Docker container ids", "all active (not finished, cancelled or removed) trial ids. Work", "Adds new work to the workqueue. \"\"\" id = self._collection.insert(", "update={\"$set\": {\"start_time\": t, \"last_update\": t, \"worker\": worker_id}}, new=True, ) return", "them. \"\"\" now = datetime.utcnow() deadline = now - timedelta(seconds=WORK_TIMEOUT)", "job = self._collection.find_and_modify( query={ \"start_time\": {\"$ne\": -1}, \"end_time\": -1, \"last_update\":", "the mongodb. \"\"\" t = datetime.utcnow() job = self._collection.find_and_modify( query={\"start_time\":", "True, \"end_time\": now, \"result\": {\"state\": \"fail\", \"msg\": \"Timed out!\"}, }", "any of these are cancelled. \"\"\" now = datetime.utcnow() jobs", "query={\"start_time\": -1, \"cancelled\": False}, sort=[(\"priority\", -1), (\"created_on\", 1)], update={\"$set\": {\"start_time\":", "job = self._collection.find_and_modify( query={\"_id\": _id}, update={\"$set\": {\"orphaned\": False}}, new=True )", "= datetime.utcnow() job = self._collection.find_and_modify( query={\"start_time\": -1, \"cancelled\": False}, sort=[(\"priority\",", "datetime import datetime, timedelta from bson.objectid import ObjectId WORK_TIMEOUT =", "trial ids. Work that is not associated with any of", "due to worker death and cancel them. \"\"\" now =", "update={\"$set\": {\"orphaned\": False}}, new=True ) return job is not None", "the job. \"\"\" t = datetime.utcnow() self._collection.update( {\"_id\": _id}, {\"$set\":", "trial_list}, \"end_time\": -1}, update={ \"$set\": { \"cancelled\": True, \"end_time\": now,", "not None def cancel_invalid_jobs(self, trial_list): \"\"\" Takes a list of", "-1, \"cancelled\": False}, sort=[(\"priority\", -1), (\"created_on\", 1)], update={\"$set\": {\"start_time\": t,", "job has been cancelled or all together removed. \"\"\" return", "experiment. \"\"\" def __init__(self, mongodb): super().__init__() self._mongodb = mongodb self._collection", "\"last_update\": -1, \"created_on\": datetime.utcnow(), \"priority\": priority, \"parameters\": parameters, \"data\": data,", "\"cancelled\": False}) is None def finish_job(self, _id, result): \"\"\" Marks", "job def add_job(self, parameters, data, trial_id, trial_name, priority=0): \"\"\" Adds", "True: job = self._collection.find_and_modify( query={\"trial\": {\"$nin\": trial_list}, \"end_time\": -1}, update={", "new=True, ) if job is not None: jobs.append(job) else: return", "\"\"\" jobs = self._collection.find( {\"orphaned\": True, \"update.container.long_id\": {\"$in\": id_list}} )", "= self._collection.find_and_modify( query={\"_id\": _id}, update={\"$set\": {\"orphaned\": False}}, new=True ) return", "handles the queue of experiment. \"\"\" def __init__(self, mongodb): super().__init__()", "t = datetime.utcnow() self._collection.update( {\"_id\": _id}, {\"$set\": {\"last_update\": t, \"update\":", "from the mongodb. \"\"\" t = datetime.utcnow() job = self._collection.find_and_modify(", "-1, \"end_time\": -1, \"last_update\": -1, \"created_on\": datetime.utcnow(), \"priority\": priority, \"parameters\":", "-1), (\"created_on\", 1)], update={\"$set\": {\"start_time\": t, \"last_update\": t, \"worker\": worker_id}},", "id = self._collection.insert( { \"start_time\": -1, \"end_time\": -1, \"last_update\": -1,", "tuples. \"\"\" jobs = self._collection.find( {\"orphaned\": True, \"update.container.long_id\": {\"$in\": id_list}}", "for j in list(jobs)] def not_orphaned(self, _id): \"\"\" Marks a", "= datetime.utcnow() jobs = [] while True: job = self._collection.find_and_modify(", "work to the workqueue. \"\"\" id = self._collection.insert( { \"start_time\":", "j[\"_id\"]) for j in list(jobs)] def not_orphaned(self, _id): \"\"\" Marks", ") def is_job_cancelled(self, _id): \"\"\" Checks if a certain job", "sort=[(\"priority\", -1), (\"last_update\", 1)], update={ \"$set\": { \"cancelled\": True, \"orphaned\":", "WORK_TIMEOUT = 600 class WorkQueue: \"\"\" A simple MongoDB priority", "out!\"}, } }, new=True, ) if job is not None:", "new work to the workqueue. \"\"\" id = self._collection.insert( {", "a list of all active (not finished, cancelled or removed)", "deadline = now - timedelta(seconds=WORK_TIMEOUT) jobs = [] while True:", "\"\"\" Checks if a list of Docker container ids are", "\"\"\" job = self._collection.find_and_modify( query={\"_id\": _id}, update={\"$set\": {\"orphaned\": False}}, new=True", "mongodb self._collection = mongodb.workqueue def assign_next_job(self, worker_id): \"\"\" Assigns the", "are cancelled. \"\"\" now = datetime.utcnow() jobs = [] while", "= 600 class WorkQueue: \"\"\" A simple MongoDB priority work", "assign_next_job(self, worker_id): \"\"\" Assigns the next free job to worker.", "__init__(self, mongodb): super().__init__() self._mongodb = mongodb self._collection = mongodb.workqueue def", "_id, result): \"\"\" Marks the job as finished and attach", "to the workqueue. \"\"\" id = self._collection.insert( { \"start_time\": -1,", "job to worker. Returns the object from the mongodb. \"\"\"", "return id def update_job(self, _id, update=None): \"\"\" Marks the job", "update={ \"$set\": { \"cancelled\": True, \"end_time\": now, \"result\": {\"state\": \"fail\",", "jobs def check_for_orphans(self, id_list): \"\"\" Checks if a list of", "datetime.utcnow() self._collection.update( {\"_id\": _id}, {\"$set\": {\"last_update\": t, \"update\": update}} )", "= now - timedelta(seconds=WORK_TIMEOUT) jobs = [] while True: job", "purge_dead_jobs(self): \"\"\" Returns jobs that have timed out due to", "from bson.objectid import ObjectId WORK_TIMEOUT = 600 class WorkQueue: \"\"\"", "\"start_time\": -1, \"end_time\": -1, \"last_update\": -1, \"created_on\": datetime.utcnow(), \"priority\": priority,", "\"\"\" t = datetime.utcnow() job = self._collection.find_and_modify( query={\"start_time\": -1, \"cancelled\":", "as finished and attach the result. \"\"\" t = datetime.utcnow()", "is not associated with any of these are cancelled. \"\"\"", "jobs = self._collection.find( {\"orphaned\": True, \"update.container.long_id\": {\"$in\": id_list}} ) return", "= self._collection.find_and_modify( query={\"trial\": {\"$nin\": trial_list}, \"end_time\": -1}, update={ \"$set\": {", "= self._collection.find_and_modify( query={\"start_time\": -1, \"cancelled\": False}, sort=[(\"priority\", -1), (\"created_on\", 1)],", "id, experiment id) tuples. \"\"\" jobs = self._collection.find( {\"orphaned\": True,", "have timed out due to worker death and cancel them.", "Docker container ids are marked as orphans. Returns a list", "\"end_time\": now, \"result\": {\"state\": \"fail\", \"msg\": \"Timed out!\"}, } },", "of (Docker id, experiment id) tuples. \"\"\" jobs = self._collection.find(", "workqueue. \"\"\" id = self._collection.insert( { \"start_time\": -1, \"end_time\": -1,", "priority, \"parameters\": parameters, \"data\": data, \"worker\": None, \"result\": {}, \"trial\":", "id) tuples. \"\"\" jobs = self._collection.find( {\"orphaned\": True, \"update.container.long_id\": {\"$in\":", "\"result\": {}, \"trial\": trial_id, \"trial_name\": trial_name, \"_id\": str(ObjectId()), \"cancelled\": False,", "\"last_update\": {\"$lt\": deadline}, }, sort=[(\"priority\", -1), (\"last_update\", 1)], update={ \"$set\":", "Returns the object from the mongodb. \"\"\" t = datetime.utcnow()", "{\"$nin\": trial_list}, \"end_time\": -1}, update={ \"$set\": { \"cancelled\": True, \"end_time\":", "priority work queue that handles the queue of experiment. \"\"\"", "has been cancelled or all together removed. \"\"\" return self._collection.find_one({\"_id\":", "of Docker container ids are marked as orphans. Returns a", "None, \"result\": {}, \"trial\": trial_id, \"trial_name\": trial_name, \"_id\": str(ObjectId()), \"cancelled\":", "t, \"update\": update}} ) def is_job_cancelled(self, _id): \"\"\" Checks if", "\"\"\" now = datetime.utcnow() jobs = [] while True: job", "ids are marked as orphans. Returns a list of (Docker", "} ) return id def update_job(self, _id, update=None): \"\"\" Marks", "= datetime.utcnow() self._collection.update_one( {\"_id\": _id}, {\"$set\": {\"end_time\": t, \"last_update\": t,", "attach the result. \"\"\" t = datetime.utcnow() self._collection.update_one( {\"_id\": _id},", "update}} ) def is_job_cancelled(self, _id): \"\"\" Checks if a certain", "\"result\": {\"state\": \"fail\", \"msg\": \"Timed out!\"}, } }, new=True, )", "\"\"\" Adds new work to the workqueue. \"\"\" id =", "\"last_update\": t, \"result\": result}} ) def purge_dead_jobs(self): \"\"\" Returns jobs", "self._collection.find_and_modify( query={\"_id\": _id}, update={\"$set\": {\"orphaned\": False}}, new=True ) return job", "[] while True: job = self._collection.find_and_modify( query={ \"start_time\": {\"$ne\": -1},", "return job def add_job(self, parameters, data, trial_id, trial_name, priority=0): \"\"\"", "def cancel_invalid_jobs(self, trial_list): \"\"\" Takes a list of all active", "trial_id, \"trial_name\": trial_name, \"_id\": str(ObjectId()), \"cancelled\": False, \"orphaned\": False, }", "to worker death and cancel them. \"\"\" now = datetime.utcnow()", "= self._collection.find( {\"orphaned\": True, \"update.container.long_id\": {\"$in\": id_list}} ) return [(j[\"update\"][\"container\"][\"long_id\"],", "of experiment. \"\"\" def __init__(self, mongodb): super().__init__() self._mongodb = mongodb", "not associated with any of these are cancelled. \"\"\" now", "1)], update={\"$set\": {\"start_time\": t, \"last_update\": t, \"worker\": worker_id}}, new=True, )", "self._collection.update_one( {\"_id\": _id}, {\"$set\": {\"end_time\": t, \"last_update\": t, \"result\": result}}", "that is not associated with any of these are cancelled.", "job = self._collection.find_and_modify( query={\"start_time\": -1, \"cancelled\": False}, sort=[(\"priority\", -1), (\"created_on\",", "out due to worker death and cancel them. \"\"\" now", "now, \"result\": {\"state\": \"fail\", \"msg\": \"Timed out!\"}, } }, new=True,", "Assigns the next free job to worker. Returns the object", "\"data\": data, \"worker\": None, \"result\": {}, \"trial\": trial_id, \"trial_name\": trial_name,", "query={\"trial\": {\"$nin\": trial_list}, \"end_time\": -1}, update={ \"$set\": { \"cancelled\": True,", "while True: job = self._collection.find_and_modify( query={\"trial\": {\"$nin\": trial_list}, \"end_time\": -1},", "{ \"cancelled\": True, \"end_time\": now, \"result\": {\"state\": \"fail\", \"msg\": \"Abandoned\"},", "trial_id, trial_name, priority=0): \"\"\" Adds new work to the workqueue.", "deadline}, }, sort=[(\"priority\", -1), (\"last_update\", 1)], update={ \"$set\": { \"cancelled\":", "work queue that handles the queue of experiment. \"\"\" def", "t = datetime.utcnow() self._collection.update_one( {\"_id\": _id}, {\"$set\": {\"end_time\": t, \"last_update\":", "a certain job has been cancelled or all together removed.", "-1}, \"end_time\": -1, \"last_update\": {\"$lt\": deadline}, }, sort=[(\"priority\", -1), (\"last_update\",", "def not_orphaned(self, _id): \"\"\" Marks a job as not orphaned.", "update=None): \"\"\" Marks the job as alive and post an", "finish_job(self, _id, result): \"\"\" Marks the job as finished and", "query={ \"start_time\": {\"$ne\": -1}, \"end_time\": -1, \"last_update\": {\"$lt\": deadline}, },", "[(j[\"update\"][\"container\"][\"long_id\"], j[\"_id\"]) for j in list(jobs)] def not_orphaned(self, _id): \"\"\"", "a list of Docker container ids are marked as orphans.", "queue of experiment. \"\"\" def __init__(self, mongodb): super().__init__() self._mongodb =", "\"cancelled\": True, \"orphaned\": True, \"end_time\": now, \"result\": {\"state\": \"fail\", \"msg\":", "Takes a list of all active (not finished, cancelled or", "\"$set\": { \"cancelled\": True, \"end_time\": now, \"result\": {\"state\": \"fail\", \"msg\":", "worker. Returns the object from the mongodb. \"\"\" t =", "t = datetime.utcnow() job = self._collection.find_and_modify( query={\"start_time\": -1, \"cancelled\": False},", "Work that is not associated with any of these are", ") if job is not None: jobs.append(job) else: return jobs", "super().__init__() self._mongodb = mongodb self._collection = mongodb.workqueue def assign_next_job(self, worker_id):", "with any of these are cancelled. \"\"\" now = datetime.utcnow()", "-1, \"created_on\": datetime.utcnow(), \"priority\": priority, \"parameters\": parameters, \"data\": data, \"worker\":", "timedelta(seconds=WORK_TIMEOUT) jobs = [] while True: job = self._collection.find_and_modify( query={", "str(ObjectId()), \"cancelled\": False, \"orphaned\": False, } ) return id def", "self._collection.find_one({\"_id\": _id, \"cancelled\": False}) is None def finish_job(self, _id, result):", "[] while True: job = self._collection.find_and_modify( query={\"trial\": {\"$nin\": trial_list}, \"end_time\":", "{\"_id\": _id}, {\"$set\": {\"last_update\": t, \"update\": update}} ) def is_job_cancelled(self,", "\"worker\": None, \"result\": {}, \"trial\": trial_id, \"trial_name\": trial_name, \"_id\": str(ObjectId()),", "the next free job to worker. Returns the object from", "data, trial_id, trial_name, priority=0): \"\"\" Adds new work to the", "of all active (not finished, cancelled or removed) trial ids.", "-1), (\"last_update\", 1)], update={ \"$set\": { \"cancelled\": True, \"orphaned\": True,", "the result. \"\"\" t = datetime.utcnow() self._collection.update_one( {\"_id\": _id}, {\"$set\":", "class WorkQueue: \"\"\" A simple MongoDB priority work queue that", "return self._collection.find_one({\"_id\": _id, \"cancelled\": False}) is None def finish_job(self, _id,", "}, sort=[(\"priority\", -1), (\"last_update\", 1)], update={ \"$set\": { \"cancelled\": True,", "_id, update=None): \"\"\" Marks the job as alive and post", "marked as orphans. Returns a list of (Docker id, experiment", "cancel them. \"\"\" now = datetime.utcnow() deadline = now -", "True, \"update.container.long_id\": {\"$in\": id_list}} ) return [(j[\"update\"][\"container\"][\"long_id\"], j[\"_id\"]) for j", "datetime.utcnow() jobs = [] while True: job = self._collection.find_and_modify( query={\"trial\":", "together removed. \"\"\" return self._collection.find_one({\"_id\": _id, \"cancelled\": False}) is None", "False}, sort=[(\"priority\", -1), (\"created_on\", 1)], update={\"$set\": {\"start_time\": t, \"last_update\": t,", "Marks the job as alive and post an update from", "(not finished, cancelled or removed) trial ids. Work that is", "active (not finished, cancelled or removed) trial ids. Work that", "{}, \"trial\": trial_id, \"trial_name\": trial_name, \"_id\": str(ObjectId()), \"cancelled\": False, \"orphaned\":", "\"\"\" t = datetime.utcnow() self._collection.update_one( {\"_id\": _id}, {\"$set\": {\"end_time\": t,", ") def purge_dead_jobs(self): \"\"\" Returns jobs that have timed out", "that handles the queue of experiment. \"\"\" def __init__(self, mongodb):", "job is not None: jobs.append(job) else: return jobs def check_for_orphans(self,", "\"\"\" A simple MongoDB priority work queue that handles the", "\"fail\", \"msg\": \"Timed out!\"}, } }, new=True, ) if job", "cancel_invalid_jobs(self, trial_list): \"\"\" Takes a list of all active (not", "\"Abandoned\"}, } }, new=True, ) if job is not None:", "{\"orphaned\": False}}, new=True ) return job is not None def", "def finish_job(self, _id, result): \"\"\" Marks the job as finished", "that have timed out due to worker death and cancel", "the queue of experiment. \"\"\" def __init__(self, mongodb): super().__init__() self._mongodb", "\"\"\" Returns jobs that have timed out due to worker", "= [] while True: job = self._collection.find_and_modify( query={ \"start_time\": {\"$ne\":", "\"\"\" Marks the job as alive and post an update", "_id}, {\"$set\": {\"last_update\": t, \"update\": update}} ) def is_job_cancelled(self, _id):", "return job is not None def cancel_invalid_jobs(self, trial_list): \"\"\" Takes", "mongodb.workqueue def assign_next_job(self, worker_id): \"\"\" Assigns the next free job", "= datetime.utcnow() deadline = now - timedelta(seconds=WORK_TIMEOUT) jobs = []", "= self._collection.find_and_modify( query={ \"start_time\": {\"$ne\": -1}, \"end_time\": -1, \"last_update\": {\"$lt\":", "False, } ) return id def update_job(self, _id, update=None): \"\"\"", "not None: jobs.append(job) else: return jobs def check_for_orphans(self, id_list): \"\"\"", "\"cancelled\": False}, sort=[(\"priority\", -1), (\"created_on\", 1)], update={\"$set\": {\"start_time\": t, \"last_update\":", "_id}, {\"$set\": {\"end_time\": t, \"last_update\": t, \"result\": result}} ) def", "= self._collection.insert( { \"start_time\": -1, \"end_time\": -1, \"last_update\": -1, \"created_on\":", "= [] while True: job = self._collection.find_and_modify( query={\"trial\": {\"$nin\": trial_list},", "def update_job(self, _id, update=None): \"\"\" Marks the job as alive", "else: return jobs def check_for_orphans(self, id_list): \"\"\" Checks if a", "if a list of Docker container ids are marked as", "self._collection.find_and_modify( query={\"start_time\": -1, \"cancelled\": False}, sort=[(\"priority\", -1), (\"created_on\", 1)], update={\"$set\":", "False, \"orphaned\": False, } ) return id def update_job(self, _id,", "datetime.utcnow() deadline = now - timedelta(seconds=WORK_TIMEOUT) jobs = [] while", "self._collection.find_and_modify( query={ \"start_time\": {\"$ne\": -1}, \"end_time\": -1, \"last_update\": {\"$lt\": deadline},", "worker_id}}, new=True, ) return job def add_job(self, parameters, data, trial_id,", "sort=[(\"priority\", -1), (\"created_on\", 1)], update={\"$set\": {\"start_time\": t, \"last_update\": t, \"worker\":", "\"result\": {\"state\": \"fail\", \"msg\": \"Abandoned\"}, } }, new=True, ) if", "self._collection.update( {\"_id\": _id}, {\"$set\": {\"last_update\": t, \"update\": update}} ) def", "\"\"\" def __init__(self, mongodb): super().__init__() self._mongodb = mongodb self._collection =", "as orphans. Returns a list of (Docker id, experiment id)", "True, \"end_time\": now, \"result\": {\"state\": \"fail\", \"msg\": \"Abandoned\"}, } },", "all together removed. \"\"\" return self._collection.find_one({\"_id\": _id, \"cancelled\": False}) is", "} }, new=True, ) if job is not None: jobs.append(job)", "\"end_time\": -1, \"last_update\": -1, \"created_on\": datetime.utcnow(), \"priority\": priority, \"parameters\": parameters,", "the object from the mongodb. \"\"\" t = datetime.utcnow() job", "a job as not orphaned. \"\"\" job = self._collection.find_and_modify( query={\"_id\":", "datetime, timedelta from bson.objectid import ObjectId WORK_TIMEOUT = 600 class", "update from the job. \"\"\" t = datetime.utcnow() self._collection.update( {\"_id\":", "and cancel them. \"\"\" now = datetime.utcnow() deadline = now", "is not None: jobs.append(job) else: return jobs def check_for_orphans(self, id_list):", "in list(jobs)] def not_orphaned(self, _id): \"\"\" Marks a job as", "def add_job(self, parameters, data, trial_id, trial_name, priority=0): \"\"\" Adds new", "now - timedelta(seconds=WORK_TIMEOUT) jobs = [] while True: job =", "cancelled or all together removed. \"\"\" return self._collection.find_one({\"_id\": _id, \"cancelled\":", "Returns jobs that have timed out due to worker death", "def assign_next_job(self, worker_id): \"\"\" Assigns the next free job to", "\"end_time\": -1}, update={ \"$set\": { \"cancelled\": True, \"end_time\": now, \"result\":", "death and cancel them. \"\"\" now = datetime.utcnow() deadline =", "datetime.utcnow() self._collection.update_one( {\"_id\": _id}, {\"$set\": {\"end_time\": t, \"last_update\": t, \"result\":", "and post an update from the job. \"\"\" t =", "timedelta from bson.objectid import ObjectId WORK_TIMEOUT = 600 class WorkQueue:", "\"\"\" now = datetime.utcnow() deadline = now - timedelta(seconds=WORK_TIMEOUT) jobs", "experiment id) tuples. \"\"\" jobs = self._collection.find( {\"orphaned\": True, \"update.container.long_id\":", "bson.objectid import ObjectId WORK_TIMEOUT = 600 class WorkQueue: \"\"\" A", "}, new=True, ) if job is not None: jobs.append(job) else:", "not orphaned. \"\"\" job = self._collection.find_and_modify( query={\"_id\": _id}, update={\"$set\": {\"orphaned\":", "list of (Docker id, experiment id) tuples. \"\"\" jobs =", "\"worker\": worker_id}}, new=True, ) return job def add_job(self, parameters, data,", "job. \"\"\" t = datetime.utcnow() self._collection.update( {\"_id\": _id}, {\"$set\": {\"last_update\":", "WorkQueue: \"\"\" A simple MongoDB priority work queue that handles", "query={\"_id\": _id}, update={\"$set\": {\"orphaned\": False}}, new=True ) return job is", "the workqueue. \"\"\" id = self._collection.insert( { \"start_time\": -1, \"end_time\":", "_id): \"\"\" Checks if a certain job has been cancelled", "\"\"\" Checks if a certain job has been cancelled or", "\"Timed out!\"}, } }, new=True, ) if job is not", "job as not orphaned. \"\"\" job = self._collection.find_and_modify( query={\"_id\": _id},", "timed out due to worker death and cancel them. \"\"\"", "import ObjectId WORK_TIMEOUT = 600 class WorkQueue: \"\"\" A simple", "= mongodb self._collection = mongodb.workqueue def assign_next_job(self, worker_id): \"\"\" Assigns", "jobs.append(job) else: return jobs def check_for_orphans(self, id_list): \"\"\" Checks if", "600 class WorkQueue: \"\"\" A simple MongoDB priority work queue", "\"result\": result}} ) def purge_dead_jobs(self): \"\"\" Returns jobs that have", "j in list(jobs)] def not_orphaned(self, _id): \"\"\" Marks a job", "self._collection.find_and_modify( query={\"trial\": {\"$nin\": trial_list}, \"end_time\": -1}, update={ \"$set\": { \"cancelled\":", "\"created_on\": datetime.utcnow(), \"priority\": priority, \"parameters\": parameters, \"data\": data, \"worker\": None,", "\"\"\" Assigns the next free job to worker. Returns the", "Checks if a certain job has been cancelled or all", "1)], update={ \"$set\": { \"cancelled\": True, \"orphaned\": True, \"end_time\": now,", "None: jobs.append(job) else: return jobs def check_for_orphans(self, id_list): \"\"\" Checks", "cancelled or removed) trial ids. Work that is not associated", "self._collection.find( {\"orphaned\": True, \"update.container.long_id\": {\"$in\": id_list}} ) return [(j[\"update\"][\"container\"][\"long_id\"], j[\"_id\"])", "{\"$set\": {\"last_update\": t, \"update\": update}} ) def is_job_cancelled(self, _id): \"\"\"", "False}) is None def finish_job(self, _id, result): \"\"\" Marks the", "and attach the result. \"\"\" t = datetime.utcnow() self._collection.update_one( {\"_id\":", "not_orphaned(self, _id): \"\"\" Marks a job as not orphaned. \"\"\"", "\"fail\", \"msg\": \"Abandoned\"}, } }, new=True, ) if job is", "\"cancelled\": False, \"orphaned\": False, } ) return id def update_job(self,", "{\"$ne\": -1}, \"end_time\": -1, \"last_update\": {\"$lt\": deadline}, }, sort=[(\"priority\", -1),", "MongoDB priority work queue that handles the queue of experiment.", "container ids are marked as orphans. Returns a list of", "object from the mongodb. \"\"\" t = datetime.utcnow() job =", "False}}, new=True ) return job is not None def cancel_invalid_jobs(self,", "\"cancelled\": True, \"end_time\": now, \"result\": {\"state\": \"fail\", \"msg\": \"Abandoned\"}, }", "def purge_dead_jobs(self): \"\"\" Returns jobs that have timed out due", "ObjectId WORK_TIMEOUT = 600 class WorkQueue: \"\"\" A simple MongoDB", "A simple MongoDB priority work queue that handles the queue", "t, \"last_update\": t, \"worker\": worker_id}}, new=True, ) return job def", "\"msg\": \"Timed out!\"}, } }, new=True, ) if job is", "job is not None def cancel_invalid_jobs(self, trial_list): \"\"\" Takes a", "list of Docker container ids are marked as orphans. Returns", "\"trial\": trial_id, \"trial_name\": trial_name, \"_id\": str(ObjectId()), \"cancelled\": False, \"orphaned\": False,", "now, \"result\": {\"state\": \"fail\", \"msg\": \"Abandoned\"}, } }, new=True, )", "\"parameters\": parameters, \"data\": data, \"worker\": None, \"result\": {}, \"trial\": trial_id,", "removed) trial ids. Work that is not associated with any", "orphaned. \"\"\" job = self._collection.find_and_modify( query={\"_id\": _id}, update={\"$set\": {\"orphaned\": False}},", "jobs that have timed out due to worker death and", "new=True ) return job is not None def cancel_invalid_jobs(self, trial_list):", "result}} ) def purge_dead_jobs(self): \"\"\" Returns jobs that have timed", "\"\"\" t = datetime.utcnow() self._collection.update( {\"_id\": _id}, {\"$set\": {\"last_update\": t,", "self._collection = mongodb.workqueue def assign_next_job(self, worker_id): \"\"\" Assigns the next", "trial_name, \"_id\": str(ObjectId()), \"cancelled\": False, \"orphaned\": False, } ) return", "datetime.utcnow(), \"priority\": priority, \"parameters\": parameters, \"data\": data, \"worker\": None, \"result\":", "{\"end_time\": t, \"last_update\": t, \"result\": result}} ) def purge_dead_jobs(self): \"\"\"", "\"orphaned\": True, \"end_time\": now, \"result\": {\"state\": \"fail\", \"msg\": \"Timed out!\"},", "ids. Work that is not associated with any of these", "alive and post an update from the job. \"\"\" t", "removed. \"\"\" return self._collection.find_one({\"_id\": _id, \"cancelled\": False}) is None def", "are marked as orphans. Returns a list of (Docker id,", "{\"_id\": _id}, {\"$set\": {\"end_time\": t, \"last_update\": t, \"result\": result}} )", ") return job def add_job(self, parameters, data, trial_id, trial_name, priority=0):", "-1, \"last_update\": {\"$lt\": deadline}, }, sort=[(\"priority\", -1), (\"last_update\", 1)], update={", "associated with any of these are cancelled. \"\"\" now =", "mongodb. \"\"\" t = datetime.utcnow() job = self._collection.find_and_modify( query={\"start_time\": -1,", "is None def finish_job(self, _id, result): \"\"\" Marks the job", "now = datetime.utcnow() jobs = [] while True: job =", "the job as finished and attach the result. \"\"\" t", "been cancelled or all together removed. \"\"\" return self._collection.find_one({\"_id\": _id,", "return [(j[\"update\"][\"container\"][\"long_id\"], j[\"_id\"]) for j in list(jobs)] def not_orphaned(self, _id):", "\"end_time\": now, \"result\": {\"state\": \"fail\", \"msg\": \"Abandoned\"}, } }, new=True,", "- timedelta(seconds=WORK_TIMEOUT) jobs = [] while True: job = self._collection.find_and_modify(", "update={ \"$set\": { \"cancelled\": True, \"orphaned\": True, \"end_time\": now, \"result\":", "{\"$set\": {\"end_time\": t, \"last_update\": t, \"result\": result}} ) def purge_dead_jobs(self):", "parameters, data, trial_id, trial_name, priority=0): \"\"\" Adds new work to", "= datetime.utcnow() self._collection.update( {\"_id\": _id}, {\"$set\": {\"last_update\": t, \"update\": update}}", "Marks a job as not orphaned. \"\"\" job = self._collection.find_and_modify(", "(\"created_on\", 1)], update={\"$set\": {\"start_time\": t, \"last_update\": t, \"worker\": worker_id}}, new=True,", "{\"state\": \"fail\", \"msg\": \"Abandoned\"}, } }, new=True, ) if job", "def check_for_orphans(self, id_list): \"\"\" Checks if a list of Docker", "is_job_cancelled(self, _id): \"\"\" Checks if a certain job has been", "_id}, update={\"$set\": {\"orphaned\": False}}, new=True ) return job is not", "data, \"worker\": None, \"result\": {}, \"trial\": trial_id, \"trial_name\": trial_name, \"_id\":", "{\"$in\": id_list}} ) return [(j[\"update\"][\"container\"][\"long_id\"], j[\"_id\"]) for j in list(jobs)]", "result. \"\"\" t = datetime.utcnow() self._collection.update_one( {\"_id\": _id}, {\"$set\": {\"end_time\":", "\"\"\" return self._collection.find_one({\"_id\": _id, \"cancelled\": False}) is None def finish_job(self,", "check_for_orphans(self, id_list): \"\"\" Checks if a list of Docker container", "_id, \"cancelled\": False}) is None def finish_job(self, _id, result): \"\"\"", "True: job = self._collection.find_and_modify( query={ \"start_time\": {\"$ne\": -1}, \"end_time\": -1,", "t, \"result\": result}} ) def purge_dead_jobs(self): \"\"\" Returns jobs that", "t, \"last_update\": t, \"result\": result}} ) def purge_dead_jobs(self): \"\"\" Returns", "Checks if a list of Docker container ids are marked", "import datetime, timedelta from bson.objectid import ObjectId WORK_TIMEOUT = 600", ") return [(j[\"update\"][\"container\"][\"long_id\"], j[\"_id\"]) for j in list(jobs)] def not_orphaned(self,", "<filename>hyperdock/common/workqueue.py<gh_stars>1-10 from datetime import datetime, timedelta from bson.objectid import ObjectId", "\"start_time\": {\"$ne\": -1}, \"end_time\": -1, \"last_update\": {\"$lt\": deadline}, }, sort=[(\"priority\",", "def __init__(self, mongodb): super().__init__() self._mongodb = mongodb self._collection = mongodb.workqueue", "datetime.utcnow() job = self._collection.find_and_modify( query={\"start_time\": -1, \"cancelled\": False}, sort=[(\"priority\", -1),", "\"msg\": \"Abandoned\"}, } }, new=True, ) if job is not", "jobs = [] while True: job = self._collection.find_and_modify( query={ \"start_time\":", "mongodb): super().__init__() self._mongodb = mongodb self._collection = mongodb.workqueue def assign_next_job(self,", "job as alive and post an update from the job.", "as alive and post an update from the job. \"\"\"", "worker death and cancel them. \"\"\" now = datetime.utcnow() deadline", "return jobs def check_for_orphans(self, id_list): \"\"\" Checks if a list", "Returns a list of (Docker id, experiment id) tuples. \"\"\"", "\"$set\": { \"cancelled\": True, \"orphaned\": True, \"end_time\": now, \"result\": {\"state\":", "parameters, \"data\": data, \"worker\": None, \"result\": {}, \"trial\": trial_id, \"trial_name\":", "\"_id\": str(ObjectId()), \"cancelled\": False, \"orphaned\": False, } ) return id", "certain job has been cancelled or all together removed. \"\"\"", "None def finish_job(self, _id, result): \"\"\" Marks the job as", "from datetime import datetime, timedelta from bson.objectid import ObjectId WORK_TIMEOUT", "self._collection.insert( { \"start_time\": -1, \"end_time\": -1, \"last_update\": -1, \"created_on\": datetime.utcnow(),", "add_job(self, parameters, data, trial_id, trial_name, priority=0): \"\"\" Adds new work", "or removed) trial ids. Work that is not associated with", "simple MongoDB priority work queue that handles the queue of", "list(jobs)] def not_orphaned(self, _id): \"\"\" Marks a job as not", "t, \"worker\": worker_id}}, new=True, ) return job def add_job(self, parameters,", "post an update from the job. \"\"\" t = datetime.utcnow()", "= mongodb.workqueue def assign_next_job(self, worker_id): \"\"\" Assigns the next free", "Marks the job as finished and attach the result. \"\"\"", "-1, \"last_update\": -1, \"created_on\": datetime.utcnow(), \"priority\": priority, \"parameters\": parameters, \"data\":", "job as finished and attach the result. \"\"\" t =", "\"trial_name\": trial_name, \"_id\": str(ObjectId()), \"cancelled\": False, \"orphaned\": False, } )", "\"\"\" Marks the job as finished and attach the result.", "{\"orphaned\": True, \"update.container.long_id\": {\"$in\": id_list}} ) return [(j[\"update\"][\"container\"][\"long_id\"], j[\"_id\"]) for", "of these are cancelled. \"\"\" now = datetime.utcnow() jobs =", "finished, cancelled or removed) trial ids. Work that is not", "is not None def cancel_invalid_jobs(self, trial_list): \"\"\" Takes a list", ") return id def update_job(self, _id, update=None): \"\"\" Marks the", "-1}, update={ \"$set\": { \"cancelled\": True, \"end_time\": now, \"result\": {\"state\":", "an update from the job. \"\"\" t = datetime.utcnow() self._collection.update(", ") return job is not None def cancel_invalid_jobs(self, trial_list): \"\"\"" ]
[ "# '' as data, no content length should be present", "args[0][len(params2)]) def test_context_is_reset_after_request_has_finished(self): context = {'foo': 'bar'} def responseCls(connection, response):", "try: con.request('/') except ValueError: pass self.assertEqual(con.context, {}) def test_log_curl(self): url", "'PUT', 'post', 'put']: con.request('/test', method=method, data=None, headers={'Content-Length': '42'}, raw=True) putheader_call_list", "# -*- coding: utf-8 -*- # Licensed to the Apache", "con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # No data, raw request, do not", "'post', 'put']: con.request('/test', method=method, data=None) call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0')", "kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0][len(params2)]) def test_context_is_reset_after_request_has_finished(self): context =", "putheader_call_list) # '' as data, raw request, do not touch", "(r'Non https connections are not allowed \\(use ' 'secure=True\\)') self.assertRaisesRegexp(ValueError,", "connections are not allowed \\(use ' 'secure=True\\)') self.assertRaisesRegexp(ValueError, expected_msg, Connection,", "expected_msg = (r'Non https connections are not allowed \\(use '", "2.0 # (the \"License\"); you may not use this file", "# No data, content length should be present for method", "self.assertIn(call('Content-Length', '42'), putheader_call_list) # 'a' as data, content length should", "Mock, call from libcloud.test import unittest from libcloud.common.base import Connection", "GET method # No data, no content length should be", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Connection() con.connection = Mock() # GET method # No data,", "self.assertRaisesRegexp(ValueError, expected_msg, Connection, secure=False) def test_content_length(self): con = Connection() con.connection", "= Mock() con.responseCls = responseCls con.set_context(context) self.assertEqual(con.context, context) con.request('/') #", "= responseCls con.set_context(context) self.assertEqual(con.context, context) con.request('/') # Context should have", "Mock() con.set_context(context) self.assertEqual(con.context, context) con.responseCls = Mock(side_effect=ValueError()) try: con.request('/') except", "be present for method in ['POST', 'PUT', 'post', 'put']: con.request('/test',", "Mock() Connection.responseCls = Mock() Connection.allow_insecure = True def tearDown(self): Connection.connect", "kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0]) con.request(action='/path', params=params2) args, kwargs", "call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') # POST, PUT method #", "for head requests cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers) self.assertEqual(cmd,", "'' as data, raw request, do not touch Content-Length if", "Mock() # GET method # No data, no content length", "as data, content length should be present for method in", "call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers']) # '' as", "con.request(action='/path', params=params2) args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0][len(params2)]) def", "url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path') if", "should also be reset if a method inside request throws", "{}, {} con.cache_busting = False con.request(action='/path', params=params1) args, kwargs =", "cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl -i --head", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Mock(side_effect=ValueError()) try: con.request('/') except ValueError: pass self.assertEqual(con.context, {}) def test_log_curl(self):", "secure=False) def test_content_length(self): con = Connection() con.connection = Mock() #", "Context should have been reset self.assertTrue(con.called) self.assertEqual(con.context, {}) # Context", "params=params1) args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0]) con.request(action='/path', params=params2)", "test_log_curl(self): url = '/test/path' body = None headers = {}", "under the License is distributed on an \"AS IS\" BASIS,", "License for the specific language governing permissions and # limitations", "params2 = [('foo1', 'bar1'), ('foo2', 'bar2')] con = Connection() con.connection", "distributed with # this work for additional information regarding copyright", "self.assertEqual(con.context, {}) # Context should also be reset if a", "information regarding copyright ownership. # The ASF licenses this file", "con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # '' as data, content length should", "def tearDown(self): Connection.connect = self.originalConnect Connection.responseCls = Connection.responseCls Connection.allow_insecure =", "content length should be present con.request('/test', method='GET', data=None) call_kwargs =", "'1') # POST, PUT method # No data, content length", "putheader_call_list = con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) # '' as data,", "\\(use ' 'secure=True\\)') self.assertRaisesRegexp(ValueError, expected_msg, Connection, secure=False) def test_content_length(self): con", "= con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params1) con.request(action='/path', params=params2) args,", "raw request, do not touch Content-Length if present for method", "Apache Software Foundation (ASF) under one or more§ # contributor", "ssl.SSLError: pass self.assertEqual(con.context, {}) con.connection = Mock() con.set_context(context) self.assertEqual(con.context, context)", "= con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') def test_cache_busting(self): params1 = {'foo1': 'bar1',", "# 'a' as data, content length should be present (data", "be present (data in GET is not # correct, but", "con.request('/test', method=method, data=None, headers={'Content-Length': '42'}, raw=True) putheader_call_list = con.connection.putheader.call_args_list self.assertIn(call('Content-Length',", "True def tearDown(self): Connection.connect = self.originalConnect Connection.responseCls = Connection.responseCls Connection.allow_insecure", "ownership. # The ASF licenses this file to You under", "POST, PUT method # No data, content length should be", "content length should be present for method in ['POST', 'PUT',", "= con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') # POST, PUT method # No", "'put']: con.request('/test', method=method, data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') def", "software # distributed under the License is distributed on an", "with # this work for additional information regarding copyright ownership.", "con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0][len(params2)]) def test_context_is_reset_after_request_has_finished(self): context = {'foo': 'bar'}", "a method inside request throws con = Connection() con.connection =", "Foundation (ASF) under one or more§ # contributor license agreements.", "be present con.request('/test', method='GET', data=None) call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not", "call_kwargs['headers']) # '' as data, no content length should be", "import LoggingConnection class ConnectionClassTestCase(unittest.TestCase): def setUp(self): self.originalConnect = Connection.connect self.originalResponseCls", "as data, no content length should be present con.request('/test', method='GET',", "compliance with # the License. You may obtain a copy", "licenses this file to You under the Apache License, Version", "con.called = False con.connection = Mock() con.responseCls = responseCls con.set_context(context)", "context) con.request('/') # Context should have been reset self.assertTrue(con.called) self.assertEqual(con.context,", "{}) # Context should also be reset if a method", "head requests cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl", "Connection.responseCls = Mock() Connection.allow_insecure = True def tearDown(self): Connection.connect =", "= self.originalConnect Connection.responseCls = Connection.responseCls Connection.allow_insecure = True def test_dont_allow_insecure(self):", "self.assertTrue('Content-Length' not in call_kwargs['headers']) # 'a' as data, content length", "# No data, raw request, do not touch Content-Length if", "# Should use --head for head requests cmd = con._log_curl(method='HEAD',", "# '' as data, content length should be present for", "= {'foo1': 'bar1', 'foo2': 'bar2'} params2 = [('foo1', 'bar1'), ('foo2',", "params2) con.cache_busting = True con.request(action='/path', params=params1) args, kwargs = con.pre_connect_hook.call_args", "('foo2', 'bar2')] con = Connection() con.connection = Mock() con.pre_connect_hook =", "= con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0][len(params2)]) def test_context_is_reset_after_request_has_finished(self): context = {'foo':", "= [('foo1', 'bar1'), ('foo2', 'bar2')] con = Connection() con.connection =", "have been reset self.assertTrue(con.called) self.assertEqual(con.context, {}) # Context should also", "ConnectionClassTestCase(unittest.TestCase): def setUp(self): self.originalConnect = Connection.connect self.originalResponseCls = Connection.responseCls Connection.connect", "# The ASF licenses this file to You under the", "Connection.responseCls Connection.allow_insecure = True def test_dont_allow_insecure(self): Connection.allow_insecure = True Connection(secure=False)", "con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params2) con.cache_busting = True con.request(action='/path',", "tearDown(self): Connection.connect = self.originalConnect Connection.responseCls = Connection.responseCls Connection.allow_insecure = True", "{} con = LoggingConnection() con.protocol = 'http' con.host = 'example.com'", "params1) con.request(action='/path', params=params2) args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0])", "Connection, secure=False) def test_content_length(self): con = Connection() con.connection = Mock()", "responseCls(connection, response): connection.called = True self.assertEqual(connection.context, context) con = Connection()", "self.originalResponseCls = Connection.responseCls Connection.connect = Mock() Connection.responseCls = Mock() Connection.allow_insecure", "'post', 'put']: con.request('/test', method=method, data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1')", "con.set_context(context) self.assertEqual(con.context, context) con.responseCls = Mock(side_effect=ValueError()) try: con.request('/') except ValueError:", "self.assertEqual(con.context, context) con.responseCls = Mock(side_effect=ValueError()) try: con.request('/') except ValueError: pass", "in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data='a') call_kwargs =", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "con.responseCls = responseCls con.set_context(context) self.assertEqual(con.context, context) con.request('/') # Context should", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "use this file except in compliance with # the License.", "not in call_kwargs['headers']) # 'a' as data, content length should", "Content-Length if present for method in ['POST', 'PUT', 'post', 'put']:", "'bar1'), ('foo2', 'bar2')] con = Connection() con.connection = Mock() con.pre_connect_hook", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "data, content length should be present for method in ['POST',", "inside request throws con = Connection() con.connection = Mock() con.set_context(context)", "'bar'} def responseCls(connection, response): connection.called = True self.assertEqual(connection.context, context) con", "to in writing, software # distributed under the License is", "unittest from libcloud.common.base import Connection from libcloud.common.base import LoggingConnection class", "--head for head requests cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers)", "'POST', 'PUT', 'DELETE']: cmd = con._log_curl(method=method, url=url, body=body, headers=headers) self.assertEqual(cmd,", "# See the License for the specific language governing permissions", "headers=headers) self.assertEqual(cmd, 'curl -i -X %s --compress http://example.com:80/test/path' % (method))", "in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data=None) call_kwargs =", "Connection.allow_insecure = True def test_dont_allow_insecure(self): Connection.allow_insecure = True Connection(secure=False) Connection.allow_insecure", "present con.request('/test', method='GET', data='') call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not in", "length should be present con.request('/test', method='GET', data='') call_kwargs = con.connection.request.call_args[1]", "PUT method # No data, content length should be present", "cmd = con._log_curl(method=method, url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl -i -X", "setUp(self): self.originalConnect = Connection.connect self.originalResponseCls = Connection.responseCls Connection.connect = Mock()", "additional information regarding copyright ownership. # The ASF licenses this", "or agreed to in writing, software # distributed under the", "from libcloud.test import unittest from libcloud.common.base import Connection from libcloud.common.base", "required by applicable law or agreed to in writing, software", "except ssl.SSLError: pass self.assertEqual(con.context, {}) con.connection = Mock() con.set_context(context) self.assertEqual(con.context,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "= True def test_dont_allow_insecure(self): Connection.allow_insecure = True Connection(secure=False) Connection.allow_insecure =", "con.request('/') except ValueError: pass self.assertEqual(con.context, {}) def test_log_curl(self): url =", "self.assertTrue('cache-busting' in args[0]) con.request(action='/path', params=params2) args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting'", "= {}, {} con.cache_busting = False con.request(action='/path', params=params1) args, kwargs", "copyright ownership. # The ASF licenses this file to You", "responseCls con.set_context(context) self.assertEqual(con.context, context) con.request('/') # Context should have been", "test_dont_allow_insecure(self): Connection.allow_insecure = True Connection(secure=False) Connection.allow_insecure = False expected_msg =", "data, no content length should be present con.request('/test', method='GET', data=None)", "self.assertEqual(con.context, context) con.request('/') # Context should have been reset self.assertTrue(con.called)", "ssl from mock import Mock, call from libcloud.test import unittest", "method=method, data=None, headers={'Content-Length': '42'}, raw=True) putheader_call_list = con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'),", "= con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0]) con.request(action='/path', params=params2) args, kwargs =", "License, Version 2.0 # (the \"License\"); you may not use", "con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers']) # 'a' as data, content", "may not use this file except in compliance with #", "in args[0][len(params2)]) def test_context_is_reset_after_request_has_finished(self): context = {'foo': 'bar'} def responseCls(connection,", "putheader_call_list) # 'a' as data, content length should be present", "'a' as data, content length should be present for method", "agreed to in writing, software # distributed under the License", "def responseCls(connection, response): connection.called = True self.assertEqual(connection.context, context) con =", "(data in GET is not # correct, but anyways) con.request('/test',", "raw=True) putheader_call_list = con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) # '' as", "distributed under the License is distributed on an \"AS IS\"", "reset self.assertTrue(con.called) self.assertEqual(con.context, {}) # Context should also be reset", "utf-8 -*- # Licensed to the Apache Software Foundation (ASF)", "= {'foo': 'bar'} def responseCls(connection, response): connection.called = True self.assertEqual(connection.context,", "['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data='a') call_kwargs = con.connection.request.call_args[1]", "'post', 'put']: con.request('/test', method=method, data=None, headers={'Content-Length': '42'}, raw=True) putheader_call_list =", "con.request('/test', method=method, data=None) call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # ''", "con.host = 'example.com' con.port = 80 for method in ['GET',", "under the Apache License, Version 2.0 # (the \"License\"); you", "# the License. You may obtain a copy of the", "express or implied. # See the License for the specific", "call_kwargs['headers']) # 'a' as data, content length should be present", "this work for additional information regarding copyright ownership. # The", "Licensed to the Apache Software Foundation (ASF) under one or", "= (r'Non https connections are not allowed \\(use ' 'secure=True\\)')", "Mock() Connection.allow_insecure = True def tearDown(self): Connection.connect = self.originalConnect Connection.responseCls", "present (data in GET is not # correct, but anyways)", "in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data=None, headers={'Content-Length': '42'},", "con.pre_connect_hook.return_value = {}, {} con.cache_busting = False con.request(action='/path', params=params1) args,", "= Mock() con.pre_connect_hook.return_value = {}, {} con.cache_busting = False con.request(action='/path',", "is not # correct, but anyways) con.request('/test', method='GET', data='a') call_kwargs", "data=None) call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # '' as data,", "writing, software # distributed under the License is distributed on", "con._log_curl(method='HEAD', url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path')", "you may not use this file except in compliance with", "args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0]) con.request(action='/path', params=params2) args,", "<gh_stars>1-10 # -*- coding: utf-8 -*- # Licensed to the", "the License. You may obtain a copy of the License", "also be reset if a method inside request throws con", "con.responseCls = Mock(side_effect=ValueError()) try: con.request('/') except ValueError: pass self.assertEqual(con.context, {})", "'/test/path' body = None headers = {} con = LoggingConnection()", "response): connection.called = True self.assertEqual(connection.context, context) con = Connection() con.called", "correct, but anyways) con.request('/test', method='GET', data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'],", "# correct, but anyways) con.request('/test', method='GET', data='a') call_kwargs = con.connection.request.call_args[1]", "self.originalConnect = Connection.connect self.originalResponseCls = Connection.responseCls Connection.connect = Mock() Connection.responseCls", "con.connection = Mock() con.responseCls = responseCls con.set_context(context) self.assertEqual(con.context, context) con.request('/')", "in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data='') call_kwargs =", "call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # '' as data, content", "data, content length should be present (data in GET is", "CONDITIONS OF ANY KIND, either express or implied. # See", "Version 2.0 # (the \"License\"); you may not use this", "con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers']) # '' as data, no", "'PUT', 'post', 'put']: con.request('/test', method=method, data='') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'],", "call from libcloud.test import unittest from libcloud.common.base import Connection from", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "in call_kwargs['headers']) # '' as data, no content length should", "request, do not touch Content-Length if present for method in", "to the Apache Software Foundation (ASF) under one or more§", "{'foo': 'bar'} def responseCls(connection, response): connection.called = True self.assertEqual(connection.context, context)", "self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params2) con.cache_busting = True con.request(action='/path', params=params1)", "self.assertEqual(args[0], params1) con.request(action='/path', params=params2) args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in", "con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0]) con.request(action='/path', params=params2) args, kwargs = con.pre_connect_hook.call_args", "from mock import Mock, call from libcloud.test import unittest from", "# GET method # No data, no content length should", "body=body, headers=headers) self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path') if __name__", "governing permissions and # limitations under the License. import sys", "def setUp(self): self.originalConnect = Connection.connect self.originalResponseCls = Connection.responseCls Connection.connect =", "test_content_length(self): con = Connection() con.connection = Mock() # GET method", "'0') # '' as data, content length should be present", "(the \"License\"); you may not use this file except in", "OR CONDITIONS OF ANY KIND, either express or implied. #", "call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') def test_cache_busting(self): params1 = {'foo1':", "data, no content length should be present con.request('/test', method='GET', data='')", "Connection() con.connection = Mock() con.pre_connect_hook = Mock() con.pre_connect_hook.return_value = {},", "not in call_kwargs['headers']) # '' as data, no content length", "should be present (data in GET is not # correct,", "the License is distributed on an \"AS IS\" BASIS, #", "= Mock() Connection.responseCls = Mock() Connection.allow_insecure = True def tearDown(self):", "con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) # '' as data, raw request,", "con.request('/test', method='GET', data=None) call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers'])", "the License. import sys import ssl from mock import Mock,", "False con.connection = Mock() con.responseCls = responseCls con.set_context(context) self.assertEqual(con.context, context)", "= Connection.responseCls Connection.connect = Mock() Connection.responseCls = Mock() Connection.allow_insecure =", "'example.com' con.port = 80 for method in ['GET', 'POST', 'PUT',", "con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') def test_cache_busting(self): params1 = {'foo1': 'bar1', 'foo2':", "# POST, PUT method # No data, content length should", "except ValueError: pass self.assertEqual(con.context, {}) def test_log_curl(self): url = '/test/path'", "Connection() con.connection = Mock() con.set_context(context) self.assertEqual(con.context, context) con.connection.request = Mock(side_effect=ssl.SSLError())", "params1 = {'foo1': 'bar1', 'foo2': 'bar2'} params2 = [('foo1', 'bar1'),", "'1') def test_cache_busting(self): params1 = {'foo1': 'bar1', 'foo2': 'bar2'} params2", "import ssl from mock import Mock, call from libcloud.test import", "= True Connection(secure=False) Connection.allow_insecure = False expected_msg = (r'Non https", "con.request('/test', method='GET', data='') call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers'])", "self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # No data, raw request, do not touch", "law or agreed to in writing, software # distributed under", "# Context should also be reset if a method inside", "= con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) # '' as data, raw", "import sys import ssl from mock import Mock, call from", "self.assertEqual(call_kwargs['headers']['Content-Length'], '1') def test_cache_busting(self): params1 = {'foo1': 'bar1', 'foo2': 'bar2'}", "con = Connection() con.connection = Mock() con.pre_connect_hook = Mock() con.pre_connect_hook.return_value", "= Mock() con.pre_connect_hook = Mock() con.pre_connect_hook.return_value = {}, {} con.cache_busting", "kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params1) con.request(action='/path', params=params2)", "'secure=True\\)') self.assertRaisesRegexp(ValueError, expected_msg, Connection, secure=False) def test_content_length(self): con = Connection()", "'bar2')] con = Connection() con.connection = Mock() con.pre_connect_hook = Mock()", "con.request(action='/path', params=params2) args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0],", "= True self.assertEqual(connection.context, context) con = Connection() con.called = False", "Connection() con.called = False con.connection = Mock() con.responseCls = responseCls", "self.assertEqual(args[0], params2) con.cache_busting = True con.request(action='/path', params=params1) args, kwargs =", "but anyways) con.request('/test', method='GET', data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1')", "True self.assertEqual(connection.context, context) con = Connection() con.called = False con.connection", "self.assertEqual(con.context, context) con.connection.request = Mock(side_effect=ssl.SSLError()) try: con.request('/') except ssl.SSLError: pass", "the NOTICE file distributed with # this work for additional", "pass self.assertEqual(con.context, {}) def test_log_curl(self): url = '/test/path' body =", "= con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # '' as data, content length", "args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0][len(params2)]) def test_context_is_reset_after_request_has_finished(self): context", "con.request('/') except ssl.SSLError: pass self.assertEqual(con.context, {}) con.connection = Mock() con.set_context(context)", "may obtain a copy of the License at # #", "the Apache License, Version 2.0 # (the \"License\"); you may", "' 'secure=True\\)') self.assertRaisesRegexp(ValueError, expected_msg, Connection, secure=False) def test_content_length(self): con =", "= con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # No data, raw request, do", "'put']: con.request('/test', method=method, data='') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') #", "Mock() con.responseCls = responseCls con.set_context(context) self.assertEqual(con.context, context) con.request('/') # Context", "context) con = Connection() con.called = False con.connection = Mock()", "params=params2) args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params2)", "# (the \"License\"); you may not use this file except", "'DELETE']: cmd = con._log_curl(method=method, url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl -i", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# Licensed to the Apache Software Foundation (ASF) under one", "data='') call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers']) # 'a'", "data=None, headers={'Content-Length': '42'}, raw=True) putheader_call_list = con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list)", "= con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers']) # '' as data,", "in compliance with # the License. You may obtain a", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "method=method, data='') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # No data,", "[('foo1', 'bar1'), ('foo2', 'bar2')] con = Connection() con.connection = Mock()", "test_cache_busting(self): params1 = {'foo1': 'bar1', 'foo2': 'bar2'} params2 = [('foo1',", "-X %s --compress http://example.com:80/test/path' % (method)) # Should use --head", "license agreements. See the NOTICE file distributed with # this", "body=body, headers=headers) self.assertEqual(cmd, 'curl -i -X %s --compress http://example.com:80/test/path' %", "Connection.allow_insecure = False expected_msg = (r'Non https connections are not", "'PUT', 'post', 'put']: con.request('/test', method=method, data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'],", "= con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) # 'a' as data, content", "def test_dont_allow_insecure(self): Connection.allow_insecure = True Connection(secure=False) Connection.allow_insecure = False expected_msg", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "con.request('/test', method=method, data='') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # No", "'' as data, no content length should be present con.request('/test',", "contributor license agreements. See the NOTICE file distributed with #", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "method # No data, content length should be present for", "con.cache_busting = False con.request(action='/path', params=params1) args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting'", "= Mock(side_effect=ValueError()) try: con.request('/') except ValueError: pass self.assertEqual(con.context, {}) def", "'curl -i --head --compress http://example.com:80/test/path') if __name__ == '__main__': sys.exit(unittest.main())", "'put']: con.request('/test', method=method, data=None, headers={'Content-Length': '42'}, raw=True) putheader_call_list = con.connection.putheader.call_args_list", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "method='GET', data='') call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers']) #", "libcloud.test import unittest from libcloud.common.base import Connection from libcloud.common.base import", "GET is not # correct, but anyways) con.request('/test', method='GET', data='a')", "(method)) # Should use --head for head requests cmd =", "Connection from libcloud.common.base import LoggingConnection class ConnectionClassTestCase(unittest.TestCase): def setUp(self): self.originalConnect", "libcloud.common.base import LoggingConnection class ConnectionClassTestCase(unittest.TestCase): def setUp(self): self.originalConnect = Connection.connect", "file except in compliance with # the License. You may", "con.port = 80 for method in ['GET', 'POST', 'PUT', 'DELETE']:", "# 'a' as data, content length should be present for", "call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers']) # 'a' as", "Connection.allow_insecure = True Connection(secure=False) Connection.allow_insecure = False expected_msg = (r'Non", "def test_content_length(self): con = Connection() con.connection = Mock() # GET", "this file except in compliance with # the License. You", "False con.request(action='/path', params=params1) args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0])", "Mock() con.pre_connect_hook = Mock() con.pre_connect_hook.return_value = {}, {} con.cache_busting =", "Mock(side_effect=ssl.SSLError()) try: con.request('/') except ssl.SSLError: pass self.assertEqual(con.context, {}) con.connection =", "{} con.cache_busting = False con.request(action='/path', params=params1) args, kwargs = con.pre_connect_hook.call_args", "if a method inside request throws con = Connection() con.connection", "should be present for method in ['POST', 'PUT', 'post', 'put']:", "'PUT', 'DELETE']: cmd = con._log_curl(method=method, url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl", "coding: utf-8 -*- # Licensed to the Apache Software Foundation", "language governing permissions and # limitations under the License. import", "in args[0]) self.assertEqual(args[0], params2) con.cache_busting = True con.request(action='/path', params=params1) args,", "not # correct, but anyways) con.request('/test', method='GET', data='a') call_kwargs =", "= Connection() con.called = False con.connection = Mock() con.responseCls =", "%s --compress http://example.com:80/test/path' % (method)) # Should use --head for", "self.assertEqual(con.context, {}) def test_log_curl(self): url = '/test/path' body = None", "from libcloud.common.base import LoggingConnection class ConnectionClassTestCase(unittest.TestCase): def setUp(self): self.originalConnect =", "in GET is not # correct, but anyways) con.request('/test', method='GET',", "Apache License, Version 2.0 # (the \"License\"); you may not", "con.connection = Mock() # GET method # No data, no", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "LoggingConnection() con.protocol = 'http' con.host = 'example.com' con.port = 80", "or implied. # See the License for the specific language", "False expected_msg = (r'Non https connections are not allowed \\(use", "no content length should be present con.request('/test', method='GET', data=None) call_kwargs", "data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') def test_cache_busting(self): params1 =", "= False con.request(action='/path', params=params1) args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in", "mock import Mock, call from libcloud.test import unittest from libcloud.common.base", "Connection.connect self.originalResponseCls = Connection.responseCls Connection.connect = Mock() Connection.responseCls = Mock()", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "= 80 for method in ['GET', 'POST', 'PUT', 'DELETE']: cmd", "method # No data, no content length should be present", "\"License\"); you may not use this file except in compliance", "except in compliance with # the License. You may obtain", "url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl -i -X %s --compress http://example.com:80/test/path'", "raw=True) putheader_call_list = con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) # 'a' as", "if present for method in ['POST', 'PUT', 'post', 'put']: con.request('/test',", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "headers = {} con = LoggingConnection() con.protocol = 'http' con.host", "file to You under the Apache License, Version 2.0 #", "con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) # 'a' as data, content length", "http://example.com:80/test/path' % (method)) # Should use --head for head requests", "for method in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data=None,", "method in ['GET', 'POST', 'PUT', 'DELETE']: cmd = con._log_curl(method=method, url=url,", "in args[0]) con.request(action='/path', params=params2) args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in", "regarding copyright ownership. # The ASF licenses this file to", "import unittest from libcloud.common.base import Connection from libcloud.common.base import LoggingConnection", "method=method, data=None) call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # '' as", "['GET', 'POST', 'PUT', 'DELETE']: cmd = con._log_curl(method=method, url=url, body=body, headers=headers)", "for method in ['GET', 'POST', 'PUT', 'DELETE']: cmd = con._log_curl(method=method,", "NOTICE file distributed with # this work for additional information", "= con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params2) con.cache_busting = True", "con = Connection() con.connection = Mock() # GET method #", "con.connection = Mock() con.set_context(context) self.assertEqual(con.context, context) con.connection.request = Mock(side_effect=ssl.SSLError()) try:", "No data, raw request, do not touch Content-Length if present", "for method in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data='a')", "['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data='') call_kwargs = con.connection.request.call_args[1]", "'42'), putheader_call_list) # 'a' as data, content length should be", "= 'http' con.host = 'example.com' con.port = 80 for method", "'42'), putheader_call_list) # '' as data, raw request, do not", "# # Unless required by applicable law or agreed to", "one or more§ # contributor license agreements. See the NOTICE", "self.assertEqual(con.context, {}) con.connection = Mock() con.set_context(context) self.assertEqual(con.context, context) con.responseCls =", "'42'}, raw=True) putheader_call_list = con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) # ''", "= con._log_curl(method='HEAD', url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl -i --head --compress", "= Mock() # GET method # No data, no content", "= False con.connection = Mock() con.responseCls = responseCls con.set_context(context) self.assertEqual(con.context,", "self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params1) con.request(action='/path', params=params2) args, kwargs =", "file distributed with # this work for additional information regarding", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "self.assertTrue('Content-Length' not in call_kwargs['headers']) # '' as data, no content", "as data, raw request, do not touch Content-Length if present", "con.request(action='/path', params=params1) args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0],", "Software Foundation (ASF) under one or more§ # contributor license", "for additional information regarding copyright ownership. # The ASF licenses", "= Connection() con.connection = Mock() con.pre_connect_hook = Mock() con.pre_connect_hook.return_value =", "be reset if a method inside request throws con =", "# limitations under the License. import sys import ssl from", "= Connection() con.connection = Mock() con.set_context(context) self.assertEqual(con.context, context) con.connection.request =", "ASF licenses this file to You under the Apache License,", "sys import ssl from mock import Mock, call from libcloud.test", "the Apache Software Foundation (ASF) under one or more§ #", "= Mock() Connection.allow_insecure = True def tearDown(self): Connection.connect = self.originalConnect", "data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') # POST, PUT method", "No data, content length should be present for method in", "= 'example.com' con.port = 80 for method in ['GET', 'POST',", "con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params1) con.request(action='/path', params=params2) args, kwargs", "con.connection = Mock() con.set_context(context) self.assertEqual(con.context, context) con.responseCls = Mock(side_effect=ValueError()) try:", "= Connection() con.connection = Mock() # GET method # No", "data, raw request, do not touch Content-Length if present for", "def test_log_curl(self): url = '/test/path' body = None headers =", "implied. # See the License for the specific language governing", "from libcloud.common.base import Connection from libcloud.common.base import LoggingConnection class ConnectionClassTestCase(unittest.TestCase):", "self.assertTrue(con.called) self.assertEqual(con.context, {}) # Context should also be reset if", "args[0]) con.request(action='/path', params=params2) args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0][len(params2)])", "headers={'Content-Length': '42'}, raw=True) putheader_call_list = con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) #", "= Mock() con.set_context(context) self.assertEqual(con.context, context) con.connection.request = Mock(side_effect=ssl.SSLError()) try: con.request('/')", "method=method, data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') def test_cache_busting(self): params1", "Connection.allow_insecure = True def tearDown(self): Connection.connect = self.originalConnect Connection.responseCls =", "touch Content-Length if present for method in ['POST', 'PUT', 'post',", "# Context should have been reset self.assertTrue(con.called) self.assertEqual(con.context, {}) #", "libcloud.common.base import Connection from libcloud.common.base import LoggingConnection class ConnectionClassTestCase(unittest.TestCase): def", "self.assertIn(call('Content-Length', '42'), putheader_call_list) # '' as data, raw request, do", "call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # No data, raw request,", "= '/test/path' body = None headers = {} con =", "try: con.request('/') except ssl.SSLError: pass self.assertEqual(con.context, {}) con.connection = Mock()", "context) con.responseCls = Mock(side_effect=ValueError()) try: con.request('/') except ValueError: pass self.assertEqual(con.context,", "= Connection.connect self.originalResponseCls = Connection.responseCls Connection.connect = Mock() Connection.responseCls =", "by applicable law or agreed to in writing, software #", "not use this file except in compliance with # the", "under the License. import sys import ssl from mock import", "self.assertTrue('cache-busting' in args[0][len(params2)]) def test_context_is_reset_after_request_has_finished(self): context = {'foo': 'bar'} def", "self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path') if __name__ == '__main__':", "present con.request('/test', method='GET', data=None) call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not in", "expected_msg, Connection, secure=False) def test_content_length(self): con = Connection() con.connection =", "use --head for head requests cmd = con._log_curl(method='HEAD', url=url, body=body,", "allowed \\(use ' 'secure=True\\)') self.assertRaisesRegexp(ValueError, expected_msg, Connection, secure=False) def test_content_length(self):", "in ['GET', 'POST', 'PUT', 'DELETE']: cmd = con._log_curl(method=method, url=url, body=body,", "con.cache_busting = True con.request(action='/path', params=params1) args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting'", "Context should also be reset if a method inside request", "url = '/test/path' body = None headers = {} con", "data='') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # No data, raw", "should have been reset self.assertTrue(con.called) self.assertEqual(con.context, {}) # Context should", "--compress http://example.com:80/test/path' % (method)) # Should use --head for head", "= {} con = LoggingConnection() con.protocol = 'http' con.host =", "'post', 'put']: con.request('/test', method=method, data='') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0')", "'bar1', 'foo2': 'bar2'} params2 = [('foo1', 'bar1'), ('foo2', 'bar2')] con", "class ConnectionClassTestCase(unittest.TestCase): def setUp(self): self.originalConnect = Connection.connect self.originalResponseCls = Connection.responseCls", "and # limitations under the License. import sys import ssl", "method in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data='') call_kwargs", "'http' con.host = 'example.com' con.port = 80 for method in", "{}) con.connection = Mock() con.set_context(context) self.assertEqual(con.context, context) con.responseCls = Mock(side_effect=ValueError())", "(ASF) under one or more§ # contributor license agreements. See", "pass self.assertEqual(con.context, {}) con.connection = Mock() con.set_context(context) self.assertEqual(con.context, context) con.responseCls", "License. import sys import ssl from mock import Mock, call", "con._log_curl(method=method, url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl -i -X %s --compress", "Connection.responseCls = Connection.responseCls Connection.allow_insecure = True def test_dont_allow_insecure(self): Connection.allow_insecure =", "reset if a method inside request throws con = Connection()", "con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') # POST, PUT method # No data,", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "= False expected_msg = (r'Non https connections are not allowed", "Unless required by applicable law or agreed to in writing,", "None headers = {} con = LoggingConnection() con.protocol = 'http'", "import Connection from libcloud.common.base import LoggingConnection class ConnectionClassTestCase(unittest.TestCase): def setUp(self):", "Connection.connect = self.originalConnect Connection.responseCls = Connection.responseCls Connection.allow_insecure = True def", "con = Connection() con.called = False con.connection = Mock() con.responseCls", "'0') # No data, raw request, do not touch Content-Length", "% (method)) # Should use --head for head requests cmd", "True con.request(action='/path', params=params1) args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0])", "the specific language governing permissions and # limitations under the", "Connection.connect = Mock() Connection.responseCls = Mock() Connection.allow_insecure = True def", "throws con = Connection() con.connection = Mock() con.set_context(context) self.assertEqual(con.context, context)", "applicable law or agreed to in writing, software # distributed", "permissions and # limitations under the License. import sys import", "under one or more§ # contributor license agreements. See the", "'42'}, raw=True) putheader_call_list = con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) # 'a'", "self.originalConnect Connection.responseCls = Connection.responseCls Connection.allow_insecure = True def test_dont_allow_insecure(self): Connection.allow_insecure", "= True def tearDown(self): Connection.connect = self.originalConnect Connection.responseCls = Connection.responseCls", "# contributor license agreements. See the NOTICE file distributed with", "self.assertEqual(cmd, 'curl -i -X %s --compress http://example.com:80/test/path' % (method)) #", "context) con.connection.request = Mock(side_effect=ssl.SSLError()) try: con.request('/') except ssl.SSLError: pass self.assertEqual(con.context,", "# this work for additional information regarding copyright ownership. #", "in writing, software # distributed under the License is distributed", "putheader_call_list = con.connection.putheader.call_args_list self.assertIn(call('Content-Length', '42'), putheader_call_list) # 'a' as data,", "con.request('/test', method='GET', data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') # POST,", "con.request(action='/path', params=params1) args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0]) con.request(action='/path',", "with # the License. You may obtain a copy of", "'foo2': 'bar2'} params2 = [('foo1', 'bar1'), ('foo2', 'bar2')] con =", "args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params2) con.cache_busting", "Mock() con.set_context(context) self.assertEqual(con.context, context) con.connection.request = Mock(side_effect=ssl.SSLError()) try: con.request('/') except", "length should be present for method in ['POST', 'PUT', 'post',", "this file to You under the Apache License, Version 2.0", "= None headers = {} con = LoggingConnection() con.protocol =", "requests cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl -i", "con.request('/test', method=method, data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') def test_cache_busting(self):", "True def test_dont_allow_insecure(self): Connection.allow_insecure = True Connection(secure=False) Connection.allow_insecure = False", "or more§ # contributor license agreements. See the NOTICE file", "'curl -i -X %s --compress http://example.com:80/test/path' % (method)) # Should", "'PUT', 'post', 'put']: con.request('/test', method=method, data=None) call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'],", "def test_context_is_reset_after_request_has_finished(self): context = {'foo': 'bar'} def responseCls(connection, response): connection.called", "# No data, no content length should be present con.request('/test',", "anyways) con.request('/test', method='GET', data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') #", "for method in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data=None)", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "agreements. See the NOTICE file distributed with # this work", "method in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data=None) call_kwargs", "not allowed \\(use ' 'secure=True\\)') self.assertRaisesRegexp(ValueError, expected_msg, Connection, secure=False) def", "data=None) call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers']) # ''", "'put']: con.request('/test', method=method, data=None) call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '0') #", "args[0]) self.assertEqual(args[0], params1) con.request(action='/path', params=params2) args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting'", "con.connection.request = Mock(side_effect=ssl.SSLError()) try: con.request('/') except ssl.SSLError: pass self.assertEqual(con.context, {})", "as data, content length should be present (data in GET", "kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params2) con.cache_busting =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "LoggingConnection class ConnectionClassTestCase(unittest.TestCase): def setUp(self): self.originalConnect = Connection.connect self.originalResponseCls =", "should be present con.request('/test', method='GET', data='') call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length'", "con.set_context(context) self.assertEqual(con.context, context) con.connection.request = Mock(side_effect=ssl.SSLError()) try: con.request('/') except ssl.SSLError:", "True Connection(secure=False) Connection.allow_insecure = False expected_msg = (r'Non https connections", "headers=headers) self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path') if __name__ ==", "ValueError: pass self.assertEqual(con.context, {}) def test_log_curl(self): url = '/test/path' body", "con = Connection() con.connection = Mock() con.set_context(context) self.assertEqual(con.context, context) con.connection.request", "length should be present con.request('/test', method='GET', data=None) call_kwargs = con.connection.request.call_args[1]", "= Connection.responseCls Connection.allow_insecure = True def test_dont_allow_insecure(self): Connection.allow_insecure = True", "been reset self.assertTrue(con.called) self.assertEqual(con.context, {}) # Context should also be", "['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data=None, headers={'Content-Length': '42'}, raw=True)", "the License for the specific language governing permissions and #", "no content length should be present con.request('/test', method='GET', data='') call_kwargs", "body = None headers = {} con = LoggingConnection() con.protocol", "See the NOTICE file distributed with # this work for", "con.pre_connect_hook = Mock() con.pre_connect_hook.return_value = {}, {} con.cache_busting = False", "https connections are not allowed \\(use ' 'secure=True\\)') self.assertRaisesRegexp(ValueError, expected_msg,", "self.assertEqual(connection.context, context) con = Connection() con.called = False con.connection =", "either express or implied. # See the License for the", "Connection(secure=False) Connection.allow_insecure = False expected_msg = (r'Non https connections are", "['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data=None) call_kwargs = con.connection.request.call_args[1]", "to You under the Apache License, Version 2.0 # (the", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "test_context_is_reset_after_request_has_finished(self): context = {'foo': 'bar'} def responseCls(connection, response): connection.called =", "Mock() con.pre_connect_hook.return_value = {}, {} con.cache_busting = False con.request(action='/path', params=params1)", "# '' as data, raw request, do not touch Content-Length", "length should be present (data in GET is not #", "limitations under the License. import sys import ssl from mock", "You under the Apache License, Version 2.0 # (the \"License\");", "con.connection = Mock() con.pre_connect_hook = Mock() con.pre_connect_hook.return_value = {}, {}", "con = LoggingConnection() con.protocol = 'http' con.host = 'example.com' con.port", "= Mock() con.set_context(context) self.assertEqual(con.context, context) con.responseCls = Mock(side_effect=ValueError()) try: con.request('/')", "context = {'foo': 'bar'} def responseCls(connection, response): connection.called = True", "are not allowed \\(use ' 'secure=True\\)') self.assertRaisesRegexp(ValueError, expected_msg, Connection, secure=False)", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params1) con.request(action='/path',", "args[0]) self.assertEqual(args[0], params2) con.cache_busting = True con.request(action='/path', params=params1) args, kwargs", "method='GET', data=None) call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers']) #", "content length should be present con.request('/test', method='GET', data='') call_kwargs =", "'bar2'} params2 = [('foo1', 'bar1'), ('foo2', 'bar2')] con = Connection()", "self.assertEqual(call_kwargs['headers']['Content-Length'], '1') # POST, PUT method # No data, content", "The ASF licenses this file to You under the Apache", "in args[0]) self.assertEqual(args[0], params1) con.request(action='/path', params=params2) args, kwargs = con.pre_connect_hook.call_args", "80 for method in ['GET', 'POST', 'PUT', 'DELETE']: cmd =", "con.request('/') # Context should have been reset self.assertTrue(con.called) self.assertEqual(con.context, {})", "= con.connection.request.call_args[1] self.assertTrue('Content-Length' not in call_kwargs['headers']) # 'a' as data,", "def test_cache_busting(self): params1 = {'foo1': 'bar1', 'foo2': 'bar2'} params2 =", "{'foo1': 'bar1', 'foo2': 'bar2'} params2 = [('foo1', 'bar1'), ('foo2', 'bar2')]", "self.assertEqual(call_kwargs['headers']['Content-Length'], '0') # '' as data, content length should be", "= Mock(side_effect=ssl.SSLError()) try: con.request('/') except ssl.SSLError: pass self.assertEqual(con.context, {}) con.connection", "method in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data=None, headers={'Content-Length':", "= LoggingConnection() con.protocol = 'http' con.host = 'example.com' con.port =", "-i -X %s --compress http://example.com:80/test/path' % (method)) # Should use", "'a' as data, content length should be present (data in", "'' as data, content length should be present for method", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "params=params1) args, kwargs = con.pre_connect_hook.call_args self.assertFalse('cache-busting' in args[0]) self.assertEqual(args[0], params1)", "present for method in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method,", "-*- # Licensed to the Apache Software Foundation (ASF) under", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "not touch Content-Length if present for method in ['POST', 'PUT',", "method in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data='a') call_kwargs", "connection.called = True self.assertEqual(connection.context, context) con = Connection() con.called =", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "content length should be present (data in GET is not", "request throws con = Connection() con.connection = Mock() con.set_context(context) self.assertEqual(con.context,", "more§ # contributor license agreements. See the NOTICE file distributed", "be present con.request('/test', method='GET', data='') call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length' not", "-*- coding: utf-8 -*- # Licensed to the Apache Software", "License. You may obtain a copy of the License at", "= con._log_curl(method=method, url=url, body=body, headers=headers) self.assertEqual(cmd, 'curl -i -X %s", "You may obtain a copy of the License at #", "in call_kwargs['headers']) # 'a' as data, content length should be", "method inside request throws con = Connection() con.connection = Mock()", "{}) def test_log_curl(self): url = '/test/path' body = None headers", "con.protocol = 'http' con.host = 'example.com' con.port = 80 for", "work for additional information regarding copyright ownership. # The ASF", "Connection.responseCls Connection.connect = Mock() Connection.responseCls = Mock() Connection.allow_insecure = True", "Should use --head for head requests cmd = con._log_curl(method='HEAD', url=url,", "params=params2) args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in args[0][len(params2)]) def test_context_is_reset_after_request_has_finished(self):", "for method in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data='')", "con.set_context(context) self.assertEqual(con.context, context) con.request('/') # Context should have been reset", "should be present con.request('/test', method='GET', data=None) call_kwargs = con.connection.request.call_args[1] self.assertTrue('Content-Length'", "import Mock, call from libcloud.test import unittest from libcloud.common.base import", "= True con.request(action='/path', params=params1) args, kwargs = con.pre_connect_hook.call_args self.assertTrue('cache-busting' in", "method='GET', data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') # POST, PUT", "do not touch Content-Length if present for method in ['POST',", "No data, no content length should be present con.request('/test', method='GET'," ]
[ "import bake_model, clean_unused, export_ig_object, import_obj_folder ############################################# # Parse command line", "obj.select_set(True) bpy.context.view_layer.objects.active = obj bpy.ops.object.select_all(action=\"SELECT\") bpy.ops.object.join() channels = { \"DIFFUSE\":", "utils import bake_model, clean_unused, export_ig_object, import_obj_folder ############################################# # Parse command", "{} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_up)) import_axis_forward = get_arg(sys.argv, \"--forward\", default=\"X\")", "X,Y,Z,-X,-Y,-Z)\".format(import_axis_forward)) source_dir = get_arg(sys.argv, \"--source_dir\") if source_dir is None: raise", "import_axis_forward not in axis: raise ValueError(\"Axis forward not supported: {}", "exist_ok=True) model_id = os.path.basename(source_dir) ############################################# # Importing obj files from", "bpy.ops.object.select_all(action=\"DESELECT\") for on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] new_uv =", "not in axis: raise ValueError(\"Axis up not supported: {} (should", "bpy.context.scene.objects: if not o.data.uv_layers: uv_unwrapped = False if not uv_unwrapped:", "if import_axis_up not in axis: raise ValueError(\"Axis up not supported:", "\"--source_dir\") if source_dir is None: raise ValueError(\"Source directory not specified.\")", "# Optional UV Unwrapping # This only needed if baking", "obj bpy.ops.object.select_all(action=\"SELECT\") bpy.ops.object.join() channels = { \"DIFFUSE\": (2048, 32), \"ROUGHNESS\":", "get_arg(sys.argv, \"--up\", default=\"Z\") if import_axis_up not in axis: raise ValueError(\"Axis", "\"ROUGHNESS\": (1024, 16), \"METALLIC\": (1024, 16), \"NORMAL\": (1024, 16), }", "export_ig_object(dest_dir, save_material=not should_bake) ############################################# # Optional Texture Baking ############################################# if", "ValueError(\"Source directory not specified.\") dest_dir = get_arg(sys.argv, \"--dest_dir\") if dest_dir", "= get_arg(sys.argv, \"--dest_dir\") if dest_dir is None: raise ValueError(\"Destination directory", "ValueError(\"Axis forward not supported: {} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_forward)) source_dir", "bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action=\"SELECT\") bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02) bpy.context.tool_settings.mesh_select_mode = (False, False, True) bpy.ops.object.mode_set(mode=\"OBJECT\")", "Optional UV Unwrapping # This only needed if baking will", "= bpy.context.scene.objects[on] bpy.data.objects.remove(obj) clean_unused() import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward) ############################################# #", "False, True) bpy.ops.object.mode_set(mode=\"OBJECT\") ############################################# # Export models ############################################# export_ig_object(dest_dir, save_material=not", "import_axis_up not in axis: raise ValueError(\"Axis up not supported: {}", "default=\"X\") if import_axis_forward not in axis: raise ValueError(\"Axis forward not", "line arguments ############################################# def get_arg(argv, flag, default=None): if flag in", "bpy.context.scene.objects[on] bpy.data.objects.remove(obj) clean_unused() import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward) ############################################# # Optional", "# Importing obj files from source dir ############################################# for on", "############################################# # Optional Texture Baking ############################################# if should_bake: mat_dir =", "= (False, False, True) bpy.ops.object.mode_set(mode=\"OBJECT\") ############################################# # Export models #############################################", "not specified.\") dest_dir = get_arg(sys.argv, \"--dest_dir\") if dest_dir is None:", "(False, False, True) bpy.ops.object.mode_set(mode=\"OBJECT\") ############################################# # Export models ############################################# export_ig_object(dest_dir,", "source_dir = get_arg(sys.argv, \"--source_dir\") if source_dir is None: raise ValueError(\"Source", "uv_unwrapped = False if not uv_unwrapped: bpy.ops.object.mode_set(mode=\"OBJECT\") vl = bpy.context.view_layer", "clean_unused() import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward) ############################################# # Optional UV Unwrapping", "dest_dir is None: raise ValueError(\"Destination directory not specified.\") os.makedirs(dest_dir, exist_ok=True)", "bpy.ops.wm.open_mainfile(filepath=blend_path) # import_ig_object(model_root, import_mat=True) for obj in bpy.context.scene.objects: obj.select_set(True) bpy.context.view_layer.objects.active", "# Export models ############################################# export_ig_object(dest_dir, save_material=not should_bake) ############################################# # Optional", "import bpy script_dir = os.path.dirname(os.path.abspath(__file__)) utils_dir = os.path.join(script_dir, \"../../blender_utils\") sys.path.append(utils_dir)", "# Parse command line arguments ############################################# def get_arg(argv, flag, default=None):", "bpy.ops.object.select_all(action=\"SELECT\") bpy.ops.object.join() channels = { \"DIFFUSE\": (2048, 32), \"ROUGHNESS\": (1024,", "uv_unwrapped = True for o in bpy.context.scene.objects: if not o.data.uv_layers:", "up=import_axis_up, forward=import_axis_forward) ############################################# # Optional UV Unwrapping # This only", "+ 1] return default should_bake = \"--bake\" in sys.argv axis", "clean_unused, export_ig_object, import_obj_folder ############################################# # Parse command line arguments #############################################", "source_dir is None: raise ValueError(\"Source directory not specified.\") dest_dir =", "dir ############################################# for on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] bpy.data.objects.remove(obj)", "bpy.context.tool_settings.mesh_select_mode = (False, False, True) bpy.ops.object.mode_set(mode=\"OBJECT\") ############################################# # Export models", "default=\"Z\") if import_axis_up not in axis: raise ValueError(\"Axis up not", "o.data.uv_layers: uv_unwrapped = False if not uv_unwrapped: bpy.ops.object.mode_set(mode=\"OBJECT\") vl =", "import_mat=True) for obj in bpy.context.scene.objects: obj.select_set(True) bpy.context.view_layer.objects.active = obj bpy.ops.object.select_all(action=\"SELECT\")", "[\"X\", \"Y\", \"Z\", \"-X\", \"-Y\", \"-Z\"] import_axis_up = get_arg(sys.argv, \"--up\",", "be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_forward)) source_dir = get_arg(sys.argv, \"--source_dir\") if source_dir is", "directory not specified.\") dest_dir = get_arg(sys.argv, \"--dest_dir\") if dest_dir is", "argv: return argv[argv.index(flag) + 1] return default should_bake = \"--bake\"", "is None: raise ValueError(\"Destination directory not specified.\") os.makedirs(dest_dir, exist_ok=True) model_id", "not specified.\") os.makedirs(dest_dir, exist_ok=True) model_id = os.path.basename(source_dir) ############################################# # Importing", "= True for o in bpy.context.scene.objects: if not o.data.uv_layers: uv_unwrapped", "bpy.ops.object.join() channels = { \"DIFFUSE\": (2048, 32), \"ROUGHNESS\": (1024, 16),", "############################################# # Importing obj files from source dir ############################################# for", "uv_unwrapped: bpy.ops.object.mode_set(mode=\"OBJECT\") vl = bpy.context.view_layer bpy.ops.object.select_all(action=\"DESELECT\") for on in bpy.context.scene.objects.keys():", "\"-X\", \"-Y\", \"-Z\"] import_axis_up = get_arg(sys.argv, \"--up\", default=\"Z\") if import_axis_up", "\"METALLIC\": (1024, 16), \"NORMAL\": (1024, 16), } bake_model(mat_dir, channels, overwrite=True)", "should_bake = \"--bake\" in sys.argv axis = [\"X\", \"Y\", \"Z\",", "bpy.context.scene.objects[on] new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name=\"obj_uv\") vl.objects.active = obj obj.select_set(True) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action=\"SELECT\")", "get_arg(argv, flag, default=None): if flag in argv: return argv[argv.index(flag) +", "\"-Z\"] import_axis_up = get_arg(sys.argv, \"--up\", default=\"Z\") if import_axis_up not in", "<reponame>mamadbiabon/iGibson<filename>igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py import os import sys import bpy script_dir = os.path.dirname(os.path.abspath(__file__))", "should_bake: uv_unwrapped = True for o in bpy.context.scene.objects: if not", "bpy.ops.mesh.select_all(action=\"SELECT\") bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02) bpy.context.tool_settings.mesh_select_mode = (False, False, True) bpy.ops.object.mode_set(mode=\"OBJECT\") #############################################", "False if not uv_unwrapped: bpy.ops.object.mode_set(mode=\"OBJECT\") vl = bpy.context.view_layer bpy.ops.object.select_all(action=\"DESELECT\") for", "is None: raise ValueError(\"Source directory not specified.\") dest_dir = get_arg(sys.argv,", "= get_arg(sys.argv, \"--forward\", default=\"X\") if import_axis_forward not in axis: raise", "(should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_forward)) source_dir = get_arg(sys.argv, \"--source_dir\") if source_dir", "forward not supported: {} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_forward)) source_dir =", "Texture Baking ############################################# if should_bake: mat_dir = os.path.join(dest_dir, \"material\") os.makedirs(mat_dir,", "flag, default=None): if flag in argv: return argv[argv.index(flag) + 1]", "default should_bake = \"--bake\" in sys.argv axis = [\"X\", \"Y\",", "raise ValueError(\"Destination directory not specified.\") os.makedirs(dest_dir, exist_ok=True) model_id = os.path.basename(source_dir)", "directory not specified.\") os.makedirs(dest_dir, exist_ok=True) model_id = os.path.basename(source_dir) ############################################# #", "############################################# for on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] bpy.data.objects.remove(obj) clean_unused()", "not o.data.uv_layers: uv_unwrapped = False if not uv_unwrapped: bpy.ops.object.mode_set(mode=\"OBJECT\") vl", "ValueError(\"Axis up not supported: {} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_up)) import_axis_forward", "script_dir = os.path.dirname(os.path.abspath(__file__)) utils_dir = os.path.join(script_dir, \"../../blender_utils\") sys.path.append(utils_dir) from utils", "for o in bpy.context.scene.objects: if not o.data.uv_layers: uv_unwrapped = False", "new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name=\"obj_uv\") vl.objects.active = obj obj.select_set(True) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action=\"SELECT\") bpy.ops.uv.smart_project(angle_limit=66,", "in axis: raise ValueError(\"Axis forward not supported: {} (should be", "= { \"DIFFUSE\": (2048, 32), \"ROUGHNESS\": (1024, 16), \"METALLIC\": (1024,", "axis = [\"X\", \"Y\", \"Z\", \"-X\", \"-Y\", \"-Z\"] import_axis_up =", "# import_ig_object(model_root, import_mat=True) for obj in bpy.context.scene.objects: obj.select_set(True) bpy.context.view_layer.objects.active =", "Baking ############################################# if should_bake: mat_dir = os.path.join(dest_dir, \"material\") os.makedirs(mat_dir, exist_ok=True)", "import_axis_forward = get_arg(sys.argv, \"--forward\", default=\"X\") if import_axis_forward not in axis:", "bpy.data.objects.remove(obj) clean_unused() import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward) ############################################# # Optional UV", "\"DIFFUSE\": (2048, 32), \"ROUGHNESS\": (1024, 16), \"METALLIC\": (1024, 16), \"NORMAL\":", "specified.\") os.makedirs(dest_dir, exist_ok=True) model_id = os.path.basename(source_dir) ############################################# # Importing obj", "\"material\") os.makedirs(mat_dir, exist_ok=True) # bpy.ops.wm.open_mainfile(filepath=blend_path) # import_ig_object(model_root, import_mat=True) for obj", "baking will be performed ############################################# if should_bake: uv_unwrapped = True", "bpy.ops.object.mode_set(mode=\"OBJECT\") ############################################# # Export models ############################################# export_ig_object(dest_dir, save_material=not should_bake) #############################################", "flag in argv: return argv[argv.index(flag) + 1] return default should_bake", "= obj bpy.ops.object.select_all(action=\"SELECT\") bpy.ops.object.join() channels = { \"DIFFUSE\": (2048, 32),", "import os import sys import bpy script_dir = os.path.dirname(os.path.abspath(__file__)) utils_dir", "for on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name=\"obj_uv\")", "32), \"ROUGHNESS\": (1024, 16), \"METALLIC\": (1024, 16), \"NORMAL\": (1024, 16),", "############################################# export_ig_object(dest_dir, save_material=not should_bake) ############################################# # Optional Texture Baking #############################################", "sys.argv axis = [\"X\", \"Y\", \"Z\", \"-X\", \"-Y\", \"-Z\"] import_axis_up", "island_margin=0.02) bpy.context.tool_settings.mesh_select_mode = (False, False, True) bpy.ops.object.mode_set(mode=\"OBJECT\") ############################################# # Export", "= os.path.basename(source_dir) ############################################# # Importing obj files from source dir", "be performed ############################################# if should_bake: uv_unwrapped = True for o", "source dir ############################################# for on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on]", "os.path.dirname(os.path.abspath(__file__)) utils_dir = os.path.join(script_dir, \"../../blender_utils\") sys.path.append(utils_dir) from utils import bake_model,", "os.makedirs(mat_dir, exist_ok=True) # bpy.ops.wm.open_mainfile(filepath=blend_path) # import_ig_object(model_root, import_mat=True) for obj in", "= obj obj.select_set(True) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action=\"SELECT\") bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02) bpy.context.tool_settings.mesh_select_mode = (False,", "needed if baking will be performed ############################################# if should_bake: uv_unwrapped", "= \"--bake\" in sys.argv axis = [\"X\", \"Y\", \"Z\", \"-X\",", "export_ig_object, import_obj_folder ############################################# # Parse command line arguments ############################################# def", "in bpy.context.scene.objects: if not o.data.uv_layers: uv_unwrapped = False if not", "os.path.join(dest_dir, \"material\") os.makedirs(mat_dir, exist_ok=True) # bpy.ops.wm.open_mainfile(filepath=blend_path) # import_ig_object(model_root, import_mat=True) for", "if not o.data.uv_layers: uv_unwrapped = False if not uv_unwrapped: bpy.ops.object.mode_set(mode=\"OBJECT\")", "raise ValueError(\"Axis up not supported: {} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_up))", "bpy.context.scene.objects[on].data.uv_layers.new(name=\"obj_uv\") vl.objects.active = obj obj.select_set(True) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action=\"SELECT\") bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02) bpy.context.tool_settings.mesh_select_mode", "\"Y\", \"Z\", \"-X\", \"-Y\", \"-Z\"] import_axis_up = get_arg(sys.argv, \"--up\", default=\"Z\")", "def get_arg(argv, flag, default=None): if flag in argv: return argv[argv.index(flag)", "dest_dir = get_arg(sys.argv, \"--dest_dir\") if dest_dir is None: raise ValueError(\"Destination", "= [\"X\", \"Y\", \"Z\", \"-X\", \"-Y\", \"-Z\"] import_axis_up = get_arg(sys.argv,", "import sys import bpy script_dir = os.path.dirname(os.path.abspath(__file__)) utils_dir = os.path.join(script_dir,", "exist_ok=True) # bpy.ops.wm.open_mainfile(filepath=blend_path) # import_ig_object(model_root, import_mat=True) for obj in bpy.context.scene.objects:", "(should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_up)) import_axis_forward = get_arg(sys.argv, \"--forward\", default=\"X\") if", "This only needed if baking will be performed ############################################# if", "default=None): if flag in argv: return argv[argv.index(flag) + 1] return", "############################################# def get_arg(argv, flag, default=None): if flag in argv: return", "if should_bake: uv_unwrapped = True for o in bpy.context.scene.objects: if", "among X,Y,Z,-X,-Y,-Z)\".format(import_axis_up)) import_axis_forward = get_arg(sys.argv, \"--forward\", default=\"X\") if import_axis_forward not", "Importing obj files from source dir ############################################# for on in", "bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] bpy.data.objects.remove(obj) clean_unused() import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward)", "obj files from source dir ############################################# for on in bpy.context.scene.objects.keys():", "ValueError(\"Destination directory not specified.\") os.makedirs(dest_dir, exist_ok=True) model_id = os.path.basename(source_dir) #############################################", "in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] bpy.data.objects.remove(obj) clean_unused() import_obj_folder(model_id, source_dir, up=import_axis_up,", "if source_dir is None: raise ValueError(\"Source directory not specified.\") dest_dir", "1] return default should_bake = \"--bake\" in sys.argv axis =", "os import sys import bpy script_dir = os.path.dirname(os.path.abspath(__file__)) utils_dir =", "\"-Y\", \"-Z\"] import_axis_up = get_arg(sys.argv, \"--up\", default=\"Z\") if import_axis_up not", "import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward) ############################################# # Optional UV Unwrapping #", "bpy.ops.object.mode_set(mode=\"OBJECT\") vl = bpy.context.view_layer bpy.ops.object.select_all(action=\"DESELECT\") for on in bpy.context.scene.objects.keys(): obj", "(1024, 16), \"METALLIC\": (1024, 16), \"NORMAL\": (1024, 16), } bake_model(mat_dir,", "axis: raise ValueError(\"Axis up not supported: {} (should be among", "= os.path.join(dest_dir, \"material\") os.makedirs(mat_dir, exist_ok=True) # bpy.ops.wm.open_mainfile(filepath=blend_path) # import_ig_object(model_root, import_mat=True)", "# This only needed if baking will be performed #############################################", "True) bpy.ops.object.mode_set(mode=\"OBJECT\") ############################################# # Export models ############################################# export_ig_object(dest_dir, save_material=not should_bake)", "\"--dest_dir\") if dest_dir is None: raise ValueError(\"Destination directory not specified.\")", "# Optional Texture Baking ############################################# if should_bake: mat_dir = os.path.join(dest_dir,", "in bpy.context.scene.objects: obj.select_set(True) bpy.context.view_layer.objects.active = obj bpy.ops.object.select_all(action=\"SELECT\") bpy.ops.object.join() channels =", "arguments ############################################# def get_arg(argv, flag, default=None): if flag in argv:", "vl.objects.active = obj obj.select_set(True) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action=\"SELECT\") bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02) bpy.context.tool_settings.mesh_select_mode =", "not uv_unwrapped: bpy.ops.object.mode_set(mode=\"OBJECT\") vl = bpy.context.view_layer bpy.ops.object.select_all(action=\"DESELECT\") for on in", "be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_up)) import_axis_forward = get_arg(sys.argv, \"--forward\", default=\"X\") if import_axis_forward", "in axis: raise ValueError(\"Axis up not supported: {} (should be", "should_bake: mat_dir = os.path.join(dest_dir, \"material\") os.makedirs(mat_dir, exist_ok=True) # bpy.ops.wm.open_mainfile(filepath=blend_path) #", "= get_arg(sys.argv, \"--source_dir\") if source_dir is None: raise ValueError(\"Source directory", "get_arg(sys.argv, \"--dest_dir\") if dest_dir is None: raise ValueError(\"Destination directory not", "= bpy.context.scene.objects[on].data.uv_layers.new(name=\"obj_uv\") vl.objects.active = obj obj.select_set(True) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action=\"SELECT\") bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02)", "utils_dir = os.path.join(script_dir, \"../../blender_utils\") sys.path.append(utils_dir) from utils import bake_model, clean_unused,", "############################################# # Parse command line arguments ############################################# def get_arg(argv, flag,", "up not supported: {} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_up)) import_axis_forward =", "will be performed ############################################# if should_bake: uv_unwrapped = True for", "on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name=\"obj_uv\") vl.objects.active", "{} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_forward)) source_dir = get_arg(sys.argv, \"--source_dir\") if", "supported: {} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_up)) import_axis_forward = get_arg(sys.argv, \"--forward\",", "None: raise ValueError(\"Destination directory not specified.\") os.makedirs(dest_dir, exist_ok=True) model_id =", "get_arg(sys.argv, \"--source_dir\") if source_dir is None: raise ValueError(\"Source directory not", "models ############################################# export_ig_object(dest_dir, save_material=not should_bake) ############################################# # Optional Texture Baking", "not supported: {} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_forward)) source_dir = get_arg(sys.argv,", "not in axis: raise ValueError(\"Axis forward not supported: {} (should", "forward=import_axis_forward) ############################################# # Optional UV Unwrapping # This only needed", "sys import bpy script_dir = os.path.dirname(os.path.abspath(__file__)) utils_dir = os.path.join(script_dir, \"../../blender_utils\")", "= bpy.context.scene.objects[on] new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name=\"obj_uv\") vl.objects.active = obj obj.select_set(True) bpy.ops.object.editmode_toggle()", "if flag in argv: return argv[argv.index(flag) + 1] return default", "if not uv_unwrapped: bpy.ops.object.mode_set(mode=\"OBJECT\") vl = bpy.context.view_layer bpy.ops.object.select_all(action=\"DESELECT\") for on", "from utils import bake_model, clean_unused, export_ig_object, import_obj_folder ############################################# # Parse", "raise ValueError(\"Axis forward not supported: {} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_forward))", "obj = bpy.context.scene.objects[on] bpy.data.objects.remove(obj) clean_unused() import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward) #############################################", "should_bake) ############################################# # Optional Texture Baking ############################################# if should_bake: mat_dir", "in argv: return argv[argv.index(flag) + 1] return default should_bake =", "supported: {} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_forward)) source_dir = get_arg(sys.argv, \"--source_dir\")", "if should_bake: mat_dir = os.path.join(dest_dir, \"material\") os.makedirs(mat_dir, exist_ok=True) # bpy.ops.wm.open_mainfile(filepath=blend_path)", "for obj in bpy.context.scene.objects: obj.select_set(True) bpy.context.view_layer.objects.active = obj bpy.ops.object.select_all(action=\"SELECT\") bpy.ops.object.join()", "bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02) bpy.context.tool_settings.mesh_select_mode = (False, False, True) bpy.ops.object.mode_set(mode=\"OBJECT\") ############################################# #", "in sys.argv axis = [\"X\", \"Y\", \"Z\", \"-X\", \"-Y\", \"-Z\"]", "############################################# # Export models ############################################# export_ig_object(dest_dir, save_material=not should_bake) ############################################# #", "bpy.context.view_layer.objects.active = obj bpy.ops.object.select_all(action=\"SELECT\") bpy.ops.object.join() channels = { \"DIFFUSE\": (2048,", "bake_model, clean_unused, export_ig_object, import_obj_folder ############################################# # Parse command line arguments", "files from source dir ############################################# for on in bpy.context.scene.objects.keys(): obj", "import_axis_up = get_arg(sys.argv, \"--up\", default=\"Z\") if import_axis_up not in axis:", "\"../../blender_utils\") sys.path.append(utils_dir) from utils import bake_model, clean_unused, export_ig_object, import_obj_folder #############################################", "for on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] bpy.data.objects.remove(obj) clean_unused() import_obj_folder(model_id,", "if baking will be performed ############################################# if should_bake: uv_unwrapped =", "= bpy.context.view_layer bpy.ops.object.select_all(action=\"DESELECT\") for on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on]", "save_material=not should_bake) ############################################# # Optional Texture Baking ############################################# if should_bake:", "channels = { \"DIFFUSE\": (2048, 32), \"ROUGHNESS\": (1024, 16), \"METALLIC\":", "in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name=\"obj_uv\") vl.objects.active =", "command line arguments ############################################# def get_arg(argv, flag, default=None): if flag", "not supported: {} (should be among X,Y,Z,-X,-Y,-Z)\".format(import_axis_up)) import_axis_forward = get_arg(sys.argv,", "Unwrapping # This only needed if baking will be performed", "sys.path.append(utils_dir) from utils import bake_model, clean_unused, export_ig_object, import_obj_folder ############################################# #", "\"Z\", \"-X\", \"-Y\", \"-Z\"] import_axis_up = get_arg(sys.argv, \"--up\", default=\"Z\") if", "from source dir ############################################# for on in bpy.context.scene.objects.keys(): obj =", "############################################# # Optional UV Unwrapping # This only needed if", "= False if not uv_unwrapped: bpy.ops.object.mode_set(mode=\"OBJECT\") vl = bpy.context.view_layer bpy.ops.object.select_all(action=\"DESELECT\")", "16), \"METALLIC\": (1024, 16), \"NORMAL\": (1024, 16), } bake_model(mat_dir, channels,", "bpy script_dir = os.path.dirname(os.path.abspath(__file__)) utils_dir = os.path.join(script_dir, \"../../blender_utils\") sys.path.append(utils_dir) from", "os.path.join(script_dir, \"../../blender_utils\") sys.path.append(utils_dir) from utils import bake_model, clean_unused, export_ig_object, import_obj_folder", "argv[argv.index(flag) + 1] return default should_bake = \"--bake\" in sys.argv", "among X,Y,Z,-X,-Y,-Z)\".format(import_axis_forward)) source_dir = get_arg(sys.argv, \"--source_dir\") if source_dir is None:", "None: raise ValueError(\"Source directory not specified.\") dest_dir = get_arg(sys.argv, \"--dest_dir\")", "if dest_dir is None: raise ValueError(\"Destination directory not specified.\") os.makedirs(dest_dir,", "only needed if baking will be performed ############################################# if should_bake:", "obj obj.select_set(True) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action=\"SELECT\") bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02) bpy.context.tool_settings.mesh_select_mode = (False, False,", "{ \"DIFFUSE\": (2048, 32), \"ROUGHNESS\": (1024, 16), \"METALLIC\": (1024, 16),", "return argv[argv.index(flag) + 1] return default should_bake = \"--bake\" in", "return default should_bake = \"--bake\" in sys.argv axis = [\"X\",", "\"--up\", default=\"Z\") if import_axis_up not in axis: raise ValueError(\"Axis up", "performed ############################################# if should_bake: uv_unwrapped = True for o in", "(2048, 32), \"ROUGHNESS\": (1024, 16), \"METALLIC\": (1024, 16), \"NORMAL\": (1024,", "Parse command line arguments ############################################# def get_arg(argv, flag, default=None): if", "source_dir, up=import_axis_up, forward=import_axis_forward) ############################################# # Optional UV Unwrapping # This", "specified.\") dest_dir = get_arg(sys.argv, \"--dest_dir\") if dest_dir is None: raise", "UV Unwrapping # This only needed if baking will be", "\"--bake\" in sys.argv axis = [\"X\", \"Y\", \"Z\", \"-X\", \"-Y\",", "############################################# if should_bake: uv_unwrapped = True for o in bpy.context.scene.objects:", "get_arg(sys.argv, \"--forward\", default=\"X\") if import_axis_forward not in axis: raise ValueError(\"Axis", "o in bpy.context.scene.objects: if not o.data.uv_layers: uv_unwrapped = False if", "= get_arg(sys.argv, \"--up\", default=\"Z\") if import_axis_up not in axis: raise", "vl = bpy.context.view_layer bpy.ops.object.select_all(action=\"DESELECT\") for on in bpy.context.scene.objects.keys(): obj =", "obj.select_set(True) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action=\"SELECT\") bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02) bpy.context.tool_settings.mesh_select_mode = (False, False, True)", "X,Y,Z,-X,-Y,-Z)\".format(import_axis_up)) import_axis_forward = get_arg(sys.argv, \"--forward\", default=\"X\") if import_axis_forward not in", "bpy.context.view_layer bpy.ops.object.select_all(action=\"DESELECT\") for on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] new_uv", "\"--forward\", default=\"X\") if import_axis_forward not in axis: raise ValueError(\"Axis forward", "axis: raise ValueError(\"Axis forward not supported: {} (should be among", "on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] bpy.data.objects.remove(obj) clean_unused() import_obj_folder(model_id, source_dir,", "bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name=\"obj_uv\") vl.objects.active = obj", "model_id = os.path.basename(source_dir) ############################################# # Importing obj files from source", "os.path.basename(source_dir) ############################################# # Importing obj files from source dir #############################################", "############################################# if should_bake: mat_dir = os.path.join(dest_dir, \"material\") os.makedirs(mat_dir, exist_ok=True) #", "True for o in bpy.context.scene.objects: if not o.data.uv_layers: uv_unwrapped =", "raise ValueError(\"Source directory not specified.\") dest_dir = get_arg(sys.argv, \"--dest_dir\") if", "os.makedirs(dest_dir, exist_ok=True) model_id = os.path.basename(source_dir) ############################################# # Importing obj files", "(1024, 16), \"NORMAL\": (1024, 16), } bake_model(mat_dir, channels, overwrite=True) bpy.ops.wm.quit_blender()", "Optional Texture Baking ############################################# if should_bake: mat_dir = os.path.join(dest_dir, \"material\")", "# bpy.ops.wm.open_mainfile(filepath=blend_path) # import_ig_object(model_root, import_mat=True) for obj in bpy.context.scene.objects: obj.select_set(True)", "Export models ############################################# export_ig_object(dest_dir, save_material=not should_bake) ############################################# # Optional Texture", "= os.path.join(script_dir, \"../../blender_utils\") sys.path.append(utils_dir) from utils import bake_model, clean_unused, export_ig_object,", "import_obj_folder ############################################# # Parse command line arguments ############################################# def get_arg(argv,", "obj = bpy.context.scene.objects[on] new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name=\"obj_uv\") vl.objects.active = obj obj.select_set(True)", "import_ig_object(model_root, import_mat=True) for obj in bpy.context.scene.objects: obj.select_set(True) bpy.context.view_layer.objects.active = obj", "bpy.context.scene.objects: obj.select_set(True) bpy.context.view_layer.objects.active = obj bpy.ops.object.select_all(action=\"SELECT\") bpy.ops.object.join() channels = {", "obj in bpy.context.scene.objects: obj.select_set(True) bpy.context.view_layer.objects.active = obj bpy.ops.object.select_all(action=\"SELECT\") bpy.ops.object.join() channels", "if import_axis_forward not in axis: raise ValueError(\"Axis forward not supported:", "mat_dir = os.path.join(dest_dir, \"material\") os.makedirs(mat_dir, exist_ok=True) # bpy.ops.wm.open_mainfile(filepath=blend_path) # import_ig_object(model_root,", "= os.path.dirname(os.path.abspath(__file__)) utils_dir = os.path.join(script_dir, \"../../blender_utils\") sys.path.append(utils_dir) from utils import" ]
[ "response_key=None): body = self.api.put(url, json=body).json() # PUT requests may not", "[obj_class(self, res, loaded=True) for res in data if res] def", "loaded=True) for res in data if res] def _update(self, url,", "utilities to build API operation managers and objects on top", ":param loaded: prevent lazy-loading if set to True \"\"\" def", "return [] else: data = body if expect_single: data =", "dealing with relationships. \"\"\" try: return obj.id except AttributeError: return", "and provide CRUD operations for them. \"\"\" resource_class = None", "[] else: data = body if expect_single: data = [data]", "self.api.put(url, json=body).json() # PUT requests may not return a body", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "pretty much just a bag for attributes. :param manager: Manager", "data = body[response_key] except KeyError: return [] else: data =", "specific language governing permissions and limitations # under the License.", "# not use this file except in compliance with the", "alarms, etc. and provide CRUD operations for them. \"\"\" resource_class", "self.resource_class(self, body) def _list(self, url, response_key=None, obj_class=None, body=None, expect_single=False): try:", "in compliance with the License. You may obtain # a", "obj_class = self.resource_class if response_key: try: data = body[response_key] except", "an object. Resource might be tenant, user, etc. This is", "You may obtain # a copy of the License at", "def _update(self, url, body, response_key=None): body = self.api.put(url, json=body).json() #", "None def __init__(self, api): self.api = api @property def client(self):", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "of. \"\"\" import copy from ceilometerclient.apiclient import base from ceilometerclient.apiclient", "\"\"\" try: return obj.id except AttributeError: return obj class Manager(object):", "allowing both an object or an object's ID (UUID) as", "under the License is distributed on an \"AS IS\" BASIS,", "class Manager(object): \"\"\"Managers interact with a particular type of API.", "PUT requests may not return a body if body: return", "attributes :param loaded: prevent lazy-loading if set to True \"\"\"", "\"\"\" resource_class = None def __init__(self, api): self.api = api", "not resp.content: raise exc.HTTPNotFound body = resp.json() if obj_class is", "Reserved. # # Licensed under the Apache License, Version 2.0", "import exceptions from ceilometerclient import exc def getid(obj): \"\"\"Extracts object", "[data] return [obj_class(self, res, loaded=True) for res in data if", "= resp.json() if obj_class is None: obj_class = self.resource_class if", "url, response_key=None, obj_class=None, body=None, expect_single=False): try: resp = self.api.get(url) except", "this file except in compliance with the License. You may", "and objects on top of. \"\"\" import copy from ceilometerclient.apiclient", ":param manager: Manager object :param info: dictionary representing resource attributes", "if body: return self.resource_class(self, body) def _list(self, url, response_key=None, obj_class=None,", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "exc def getid(obj): \"\"\"Extracts object ID. Abstracts the common pattern", "for res in data if res] def _update(self, url, body,", "res in data if res] def _update(self, url, body, response_key=None):", "\"\"\"A resource represents a particular instance of an object. Resource", "if expect_single: data = [data] return [obj_class(self, res, loaded=True) for", "with samples, meters, alarms, etc. and provide CRUD operations for", "return self.api def _create(self, url, body): body = self.api.post(url, json=body).json()", "file except in compliance with the License. You may obtain", "exceptions from ceilometerclient import exc def getid(obj): \"\"\"Extracts object ID.", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "resource_class = None def __init__(self, api): self.api = api @property", "much just a bag for attributes. :param manager: Manager object", "under the Apache License, Version 2.0 (the \"License\"); you may", "def getid(obj): \"\"\"Extracts object ID. Abstracts the common pattern of", "raise exc.HTTPNotFound body = resp.json() if obj_class is None: obj_class", "is pretty much just a bag for attributes. :param manager:", "expect_single: data = [data] return [obj_class(self, res, loaded=True) for res", "dictionary representing resource attributes :param loaded: prevent lazy-loading if set", "build API operation managers and objects on top of. \"\"\"", "base from ceilometerclient.apiclient import exceptions from ceilometerclient import exc def", "try: data = body[response_key] except KeyError: return [] else: data", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "def client(self): \"\"\"Compatible with latest oslo-incubator.apiclient code.\"\"\" return self.api def", "obj_class is None: obj_class = self.resource_class if response_key: try: data", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "attributes. :param manager: Manager object :param info: dictionary representing resource", "to in writing, software # distributed under the License is", "ID. Abstracts the common pattern of allowing both an object", "body: return self.resource_class(self, body) def _delete(self, url): self.api.delete(url) class Resource(base.Resource):", "manager: Manager object :param info: dictionary representing resource attributes :param", "CRUD operations for them. \"\"\" resource_class = None def __init__(self,", "body) def _delete(self, url): self.api.delete(url) class Resource(base.Resource): \"\"\"A resource represents", "Copyright 2012 OpenStack Foundation # All Rights Reserved. # #", "top of. \"\"\" import copy from ceilometerclient.apiclient import base from", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "\"\"\" Base utilities to build API operation managers and objects", "data = [data] return [obj_class(self, res, loaded=True) for res in", "= api @property def client(self): \"\"\"Compatible with latest oslo-incubator.apiclient code.\"\"\"", "# PUT requests may not return a body if body:", "AttributeError: return obj class Manager(object): \"\"\"Managers interact with a particular", "Apache License, Version 2.0 (the \"License\"); you may # not", "them. \"\"\" resource_class = None def __init__(self, api): self.api =", "samples, meters, alarms, etc. and provide CRUD operations for them.", "ceilometerclient.apiclient import base from ceilometerclient.apiclient import exceptions from ceilometerclient import", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "= self.resource_class if response_key: try: data = body[response_key] except KeyError:", "return [obj_class(self, res, loaded=True) for res in data if res]", "distributed under the License is distributed on an \"AS IS\"", "if response_key: try: data = body[response_key] except KeyError: return []", "from ceilometerclient.apiclient import base from ceilometerclient.apiclient import exceptions from ceilometerclient", "License, Version 2.0 (the \"License\"); you may # not use", "CONDITIONS OF ANY KIND, either express or implied. See the", "the License. \"\"\" Base utilities to build API operation managers", "an object's ID (UUID) as a parameter when dealing with", "expect_single=False): try: resp = self.api.get(url) except exceptions.NotFound: raise exc.HTTPNotFound if", "= None def __init__(self, api): self.api = api @property def", "both an object or an object's ID (UUID) as a", "not use this file except in compliance with the License.", "writing, software # distributed under the License is distributed on", "def _create(self, url, body): body = self.api.post(url, json=body).json() if body:", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "provide CRUD operations for them. \"\"\" resource_class = None def", "the License. You may obtain # a copy of the", "__init__(self, api): self.api = api @property def client(self): \"\"\"Compatible with", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "use this file except in compliance with the License. You", "works with samples, meters, alarms, etc. and provide CRUD operations", "requests may not return a body if body: return self.resource_class(self,", "= self.api.get(url) except exceptions.NotFound: raise exc.HTTPNotFound if not resp.content: raise", "ID (UUID) as a parameter when dealing with relationships. \"\"\"", "body): body = self.api.post(url, json=body).json() if body: return self.resource_class(self, body)", "under the License. \"\"\" Base utilities to build API operation", "type of API. It works with samples, meters, alarms, etc.", "might be tenant, user, etc. This is pretty much just", "return obj class Manager(object): \"\"\"Managers interact with a particular type", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "KIND, either express or implied. See the # License for", "url, body): body = self.api.post(url, json=body).json() if body: return self.resource_class(self,", "objects on top of. \"\"\" import copy from ceilometerclient.apiclient import", "ceilometerclient import exc def getid(obj): \"\"\"Extracts object ID. Abstracts the", "\"License\"); you may # not use this file except in", "response_key=None, obj_class=None, body=None, expect_single=False): try: resp = self.api.get(url) except exceptions.NotFound:", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "except KeyError: return [] else: data = body if expect_single:", "express or implied. See the # License for the specific", "self.api.delete(url) class Resource(base.Resource): \"\"\"A resource represents a particular instance of", "the Apache License, Version 2.0 (the \"License\"); you may #", "user, etc. This is pretty much just a bag for", "self.api def _create(self, url, body): body = self.api.post(url, json=body).json() if", "bag for attributes. :param manager: Manager object :param info: dictionary", "from ceilometerclient import exc def getid(obj): \"\"\"Extracts object ID. Abstracts", "See the # License for the specific language governing permissions", "obj class Manager(object): \"\"\"Managers interact with a particular type of", "operations for them. \"\"\" resource_class = None def __init__(self, api):", "body = resp.json() if obj_class is None: obj_class = self.resource_class", "= [data] return [obj_class(self, res, loaded=True) for res in data", "of API. It works with samples, meters, alarms, etc. and", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "object ID. Abstracts the common pattern of allowing both an", "= self.api.put(url, json=body).json() # PUT requests may not return a", "law or agreed to in writing, software # distributed under", "None: obj_class = self.resource_class if response_key: try: data = body[response_key]", "resp = self.api.get(url) except exceptions.NotFound: raise exc.HTTPNotFound if not resp.content:", "self.resource_class if response_key: try: data = body[response_key] except KeyError: return", "resource represents a particular instance of an object. Resource might", "\"\"\"Managers interact with a particular type of API. It works", "implied. See the # License for the specific language governing", "self.api = api @property def client(self): \"\"\"Compatible with latest oslo-incubator.apiclient", "body = self.api.put(url, json=body).json() # PUT requests may not return", ":param info: dictionary representing resource attributes :param loaded: prevent lazy-loading", "res, loaded=True) for res in data if res] def _update(self,", "obj.id except AttributeError: return obj class Manager(object): \"\"\"Managers interact with", "on top of. \"\"\" import copy from ceilometerclient.apiclient import base", "parameter when dealing with relationships. \"\"\" try: return obj.id except", "etc. This is pretty much just a bag for attributes.", "may not return a body if body: return self.resource_class(self, body)", "managers and objects on top of. \"\"\" import copy from", "language governing permissions and limitations # under the License. \"\"\"", "json=body).json() # PUT requests may not return a body if", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "limitations # under the License. \"\"\" Base utilities to build", "# # Licensed under the Apache License, Version 2.0 (the", "import base from ceilometerclient.apiclient import exceptions from ceilometerclient import exc", "a bag for attributes. :param manager: Manager object :param info:", "a particular instance of an object. Resource might be tenant,", "permissions and limitations # under the License. \"\"\" Base utilities", "in data if res] def _update(self, url, body, response_key=None): body", "url, body, response_key=None): body = self.api.put(url, json=body).json() # PUT requests", "to build API operation managers and objects on top of.", "if obj_class is None: obj_class = self.resource_class if response_key: try:", "is None: obj_class = self.resource_class if response_key: try: data =", "obtain # a copy of the License at # #", "for attributes. :param manager: Manager object :param info: dictionary representing", "Version 2.0 (the \"License\"); you may # not use this", "relationships. \"\"\" try: return obj.id except AttributeError: return obj class", "with a particular type of API. It works with samples,", "License for the specific language governing permissions and limitations #", "res] def _update(self, url, body, response_key=None): body = self.api.put(url, json=body).json()", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "API operation managers and objects on top of. \"\"\" import", "Manager object :param info: dictionary representing resource attributes :param loaded:", "data if res] def _update(self, url, body, response_key=None): body =", "if res] def _update(self, url, body, response_key=None): body = self.api.put(url,", "License. \"\"\" Base utilities to build API operation managers and", "Rights Reserved. # # Licensed under the Apache License, Version", "exc.HTTPNotFound if not resp.content: raise exc.HTTPNotFound body = resp.json() if", "oslo-incubator.apiclient code.\"\"\" return self.api def _create(self, url, body): body =", "return a body if body: return self.resource_class(self, body) def _delete(self,", "if not resp.content: raise exc.HTTPNotFound body = resp.json() if obj_class", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "and limitations # under the License. \"\"\" Base utilities to", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "a particular type of API. It works with samples, meters,", "self.api.get(url) except exceptions.NotFound: raise exc.HTTPNotFound if not resp.content: raise exc.HTTPNotFound", "or an object's ID (UUID) as a parameter when dealing", "OpenStack Foundation # All Rights Reserved. # # Licensed under", "body if expect_single: data = [data] return [obj_class(self, res, loaded=True)", "else: data = body if expect_single: data = [data] return", "Resource might be tenant, user, etc. This is pretty much", "be tenant, user, etc. This is pretty much just a", "as a parameter when dealing with relationships. \"\"\" try: return", "a body if body: return self.resource_class(self, body) def _delete(self, url):", "_update(self, url, body, response_key=None): body = self.api.put(url, json=body).json() # PUT", "compliance with the License. You may obtain # a copy", "particular type of API. It works with samples, meters, alarms,", "def _delete(self, url): self.api.delete(url) class Resource(base.Resource): \"\"\"A resource represents a", "getid(obj): \"\"\"Extracts object ID. Abstracts the common pattern of allowing", "represents a particular instance of an object. Resource might be", "the # License for the specific language governing permissions and", "# under the License. \"\"\" Base utilities to build API", "# # Unless required by applicable law or agreed to", "self.api.post(url, json=body).json() if body: return self.resource_class(self, body) def _list(self, url,", "resource attributes :param loaded: prevent lazy-loading if set to True", "loaded: prevent lazy-loading if set to True \"\"\" def to_dict(self):", "lazy-loading if set to True \"\"\" def to_dict(self): return copy.deepcopy(self._info)", "return self.resource_class(self, body) def _delete(self, url): self.api.delete(url) class Resource(base.Resource): \"\"\"A", "class Resource(base.Resource): \"\"\"A resource represents a particular instance of an", "\"\"\" import copy from ceilometerclient.apiclient import base from ceilometerclient.apiclient import", "tenant, user, etc. This is pretty much just a bag", "2.0 (the \"License\"); you may # not use this file", "Abstracts the common pattern of allowing both an object or", "the common pattern of allowing both an object or an", "particular instance of an object. Resource might be tenant, user,", "representing resource attributes :param loaded: prevent lazy-loading if set to", "by applicable law or agreed to in writing, software #", "common pattern of allowing both an object or an object's", "= self.api.post(url, json=body).json() if body: return self.resource_class(self, body) def _list(self,", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "import copy from ceilometerclient.apiclient import base from ceilometerclient.apiclient import exceptions", "just a bag for attributes. :param manager: Manager object :param", "etc. and provide CRUD operations for them. \"\"\" resource_class =", "object. Resource might be tenant, user, etc. This is pretty", "try: resp = self.api.get(url) except exceptions.NotFound: raise exc.HTTPNotFound if not", "body, response_key=None): body = self.api.put(url, json=body).json() # PUT requests may", "json=body).json() if body: return self.resource_class(self, body) def _list(self, url, response_key=None,", "response_key: try: data = body[response_key] except KeyError: return [] else:", "resp.content: raise exc.HTTPNotFound body = resp.json() if obj_class is None:", "This is pretty much just a bag for attributes. :param", "may obtain # a copy of the License at #", "body[response_key] except KeyError: return [] else: data = body if", "code.\"\"\" return self.api def _create(self, url, body): body = self.api.post(url,", "# All Rights Reserved. # # Licensed under the Apache", "api): self.api = api @property def client(self): \"\"\"Compatible with latest", "exceptions.NotFound: raise exc.HTTPNotFound if not resp.content: raise exc.HTTPNotFound body =", "Unless required by applicable law or agreed to in writing,", "\"\"\"Compatible with latest oslo-incubator.apiclient code.\"\"\" return self.api def _create(self, url,", "except exceptions.NotFound: raise exc.HTTPNotFound if not resp.content: raise exc.HTTPNotFound body", "url): self.api.delete(url) class Resource(base.Resource): \"\"\"A resource represents a particular instance", "It works with samples, meters, alarms, etc. and provide CRUD", "pattern of allowing both an object or an object's ID", "data = body if expect_single: data = [data] return [obj_class(self,", "applicable law or agreed to in writing, software # distributed", "body: return self.resource_class(self, body) def _list(self, url, response_key=None, obj_class=None, body=None,", "Manager(object): \"\"\"Managers interact with a particular type of API. It", "OF ANY KIND, either express or implied. See the #", "if body: return self.resource_class(self, body) def _delete(self, url): self.api.delete(url) class", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "\"\"\"Extracts object ID. Abstracts the common pattern of allowing both", "= body[response_key] except KeyError: return [] else: data = body", "an object or an object's ID (UUID) as a parameter", "from ceilometerclient.apiclient import exceptions from ceilometerclient import exc def getid(obj):", "def __init__(self, api): self.api = api @property def client(self): \"\"\"Compatible", "ceilometerclient.apiclient import exceptions from ceilometerclient import exc def getid(obj): \"\"\"Extracts", "KeyError: return [] else: data = body if expect_single: data", "API. It works with samples, meters, alarms, etc. and provide", "def _list(self, url, response_key=None, obj_class=None, body=None, expect_single=False): try: resp =", "with relationships. \"\"\" try: return obj.id except AttributeError: return obj", "body) def _list(self, url, response_key=None, obj_class=None, body=None, expect_single=False): try: resp", "body = self.api.post(url, json=body).json() if body: return self.resource_class(self, body) def", "either express or implied. See the # License for the", "Base utilities to build API operation managers and objects on", "copy from ceilometerclient.apiclient import base from ceilometerclient.apiclient import exceptions from", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "of an object. Resource might be tenant, user, etc. This", "latest oslo-incubator.apiclient code.\"\"\" return self.api def _create(self, url, body): body", "may # not use this file except in compliance with", "# License for the specific language governing permissions and limitations", "with the License. You may obtain # a copy of", "object's ID (UUID) as a parameter when dealing with relationships.", "object :param info: dictionary representing resource attributes :param loaded: prevent", "you may # not use this file except in compliance", "obj_class=None, body=None, expect_single=False): try: resp = self.api.get(url) except exceptions.NotFound: raise", "try: return obj.id except AttributeError: return obj class Manager(object): \"\"\"Managers", "_delete(self, url): self.api.delete(url) class Resource(base.Resource): \"\"\"A resource represents a particular", "of allowing both an object or an object's ID (UUID)", "Foundation # All Rights Reserved. # # Licensed under the", "body if body: return self.resource_class(self, body) def _delete(self, url): self.api.delete(url)", "_create(self, url, body): body = self.api.post(url, json=body).json() if body: return", "# Copyright 2012 OpenStack Foundation # All Rights Reserved. #", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "api @property def client(self): \"\"\"Compatible with latest oslo-incubator.apiclient code.\"\"\" return", "for them. \"\"\" resource_class = None def __init__(self, api): self.api", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "@property def client(self): \"\"\"Compatible with latest oslo-incubator.apiclient code.\"\"\" return self.api", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "client(self): \"\"\"Compatible with latest oslo-incubator.apiclient code.\"\"\" return self.api def _create(self,", "self.resource_class(self, body) def _delete(self, url): self.api.delete(url) class Resource(base.Resource): \"\"\"A resource", "= body if expect_single: data = [data] return [obj_class(self, res,", "interact with a particular type of API. It works with", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "return self.resource_class(self, body) def _list(self, url, response_key=None, obj_class=None, body=None, expect_single=False):", "not return a body if body: return self.resource_class(self, body) def", "exc.HTTPNotFound body = resp.json() if obj_class is None: obj_class =", "for the specific language governing permissions and limitations # under", "info: dictionary representing resource attributes :param loaded: prevent lazy-loading if", "meters, alarms, etc. and provide CRUD operations for them. \"\"\"", "except AttributeError: return obj class Manager(object): \"\"\"Managers interact with a", "except in compliance with the License. You may obtain #", "_list(self, url, response_key=None, obj_class=None, body=None, expect_single=False): try: resp = self.api.get(url)", "instance of an object. Resource might be tenant, user, etc.", "when dealing with relationships. \"\"\" try: return obj.id except AttributeError:", "prevent lazy-loading if set to True \"\"\" def to_dict(self): return", "License. You may obtain # a copy of the License", "resp.json() if obj_class is None: obj_class = self.resource_class if response_key:", "ANY KIND, either express or implied. See the # License", "# distributed under the License is distributed on an \"AS", "raise exc.HTTPNotFound if not resp.content: raise exc.HTTPNotFound body = resp.json()", "# Unless required by applicable law or agreed to in", "governing permissions and limitations # under the License. \"\"\" Base", "return obj.id except AttributeError: return obj class Manager(object): \"\"\"Managers interact", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "2012 OpenStack Foundation # All Rights Reserved. # # Licensed", "import exc def getid(obj): \"\"\"Extracts object ID. Abstracts the common", "Resource(base.Resource): \"\"\"A resource represents a particular instance of an object.", "operation managers and objects on top of. \"\"\" import copy", "body=None, expect_single=False): try: resp = self.api.get(url) except exceptions.NotFound: raise exc.HTTPNotFound", "(UUID) as a parameter when dealing with relationships. \"\"\" try:", "a parameter when dealing with relationships. \"\"\" try: return obj.id", "object or an object's ID (UUID) as a parameter when", "with latest oslo-incubator.apiclient code.\"\"\" return self.api def _create(self, url, body):", "or implied. See the # License for the specific language" ]
[ "'/root' def log(msg, *args): hookenv.log(msg.format(*args), hookenv.INFO) def log_err(msg, *args): hookenv.log(msg.format(*args),", "import HTTPError from urllib.request import urlopen import yaml from charmhelpers.core", "StandardRole.NETWORK_MANAGER) def enable_security_management(request): \"\"\" Enable security management for the given", "is under max_len. That is, shorten the string, inserting an", "result.returncode != 0: raise AzureError.get(stderr) if return_stderr: return stderr if", "applies to the entire credential, so will almost certainly be", "exception from the stack trace raise AzureError(stderr) from None def", "json.dumps(role_data)) except AzureError as e: if 'already exists' not in", "is, shorten the string, inserting an ellipsis where the removed", "is set to /home/ubuntu, whereas # during normal hook execution,", "hook execution, it's /root. Set it here to be consistent.", "credentials config if config['credentials']: try: creds_data = b64decode(config['credentials']).decode('utf8') login_cli(creds_data) return", "credential info from the exception message stderr = re.sub(app_id, '<app-id>',", "application. \"\"\" log('Enabling object storage read') _assign_role(request, StandardRole.OBJECT_STORE_READER) def enable_object_storage_management(request):", "juju trust' # try credentials config if config['credentials']: try: creds_data", "a full role name and ensure that the custom role", "by making an unauthorized request to the API and extracting", "check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) creds = yaml.load(result.stdout.decode('utf8')) creds_data = creds['credential']['attributes'] login_cli(creds_data)", "False # no creds provided status.blocked(no_creds_msg) return False def login_cli(creds_data):", "with: {}', request.instance_tags) _azure('vm', 'update', '--name', request.vm_name, '--resource-group', request.resource_group, '--set',", "SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10' DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314' OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' OBJECT_STORE_MANAGER", "'create', '--assignee-object-id', msi, '--resource-group', request.resource_group, '--role', role) except AlreadyExistsAzureError: pass", "_assign_role(request, StandardRole.SECURITY_MANAGER) def enable_block_storage_management(request): \"\"\" Enable block storage (disk) management", "import subprocess from base64 import b64decode from enum import Enum", "so will almost certainly be reused, so there's not much", "overridden. \"\"\" no_creds_msg = 'missing credentials; set credentials config' config", "= subprocess.run(['credential-get'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) creds = yaml.load(result.stdout.decode('utf8')) creds_data =", "extracting the tenant ID from the WWW-Authenticate header in the", "the credential info from the exception message stderr = re.sub(app_id,", "import hookenv from charmhelpers.core.unitdata import kv from charms.layer import status", "def get_credentials(): \"\"\" Get the credentials from either the config", "'create', '--role-definition', json.dumps(role_data)) except AzureError as e: if 'already exists'", "= creds_data['subscription-id'] tenant_id = _get_tenant_id(sub_id) try: log('Forcing logout of Azure", "Identity') result = _azure('vm', 'identity', 'assign', '--name', request.vm_name, '--resource-group', request.resource_group)", "\"\"\" Use the credentials to authenticate the Azure CLI. \"\"\"", "hard-code most of these because with Juju, they're always the", "but the subscription ID applies to the entire credential, so", "import json import os import re import subprocess from base64", "the middle to ensure it is under max_len. That is,", "e.headers: log_err('Error getting tenant ID: missing WWW-Authenticate header') return None", "hooks, for some reason HOME is set to /home/ubuntu, whereas", "did not get \"unauthorized\" response') return None except HTTPError as", "from enum import Enum from math import ceil, floor from", "sub_id) except AzureError as e: # redact the credential info", "queries required to look them up are a PITA request.send_additional_metadata(", "s[:headl] + ellipsis + s[-taill:] return s def _get_tenant_id(subscription_id): \"\"\"", "= role_data['AssignableScopes'][0].format(sub_id) role_data['Name'] = role_fullname role_data['AssignableScopes'][0] = scope try: log('Ensuring", "to show that they've been removed. \"\"\" if len(s) >", "try credentials config if config['credentials']: try: creds_data = b64decode(config['credentials']).decode('utf8') login_cli(creds_data)", "instance with: {}', request.instance_tags) _azure('vm', 'update', '--name', request.vm_name, '--resource-group', request.resource_group,", "Translate the subscription ID into a tenant ID by making", "json.loads(stdout) return stdout def _get_msi(vm_id): \"\"\" Get the Managed System", "2 headl, taill = floor(hl), ceil(hl) s = s[:headl] +", "def _get_msi(vm_id): \"\"\" Get the Managed System Identity for the", "'<app-pass>', stderr) stderr = re.sub(tenant_id, '<tenant-id>', stderr) # from None", "None def ensure_msi(request): msi = _get_msi(request.vm_id) if not msi: log('Enabling", "app_id = creds_data['application-id'] app_pass = <PASSWORD>_<PASSWORD>['<PASSWORD>'] sub_id = creds_data['subscription-id'] tenant_id", "inspection') _assign_role(request, _get_role('vm-reader')) def enable_network_management(request): \"\"\" Enable network management for", "they've been removed. \"\"\" if len(s) > max_len: hl =", "= e.headers['WWW-Authenticate'] match = re.search(r'authorization_uri=\"[^\"]*/([^/\"]*)\"', www_auth) if not match: log_err('Error", "b64decode from enum import Enum from math import ceil, floor", "def _azure(cmd, *args, return_stderr=False): \"\"\" Call the azure-cli tool. \"\"\"", "= ['az', cmd] cmd.extend(args) result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout", "except AzureError as e: if 'already exists' not in e.args[0]:", "_azure('group', 'show', '--name', request.resource_group) # hard-code most of these because", "from the metadata server. \"\"\" res_grp = _azure('group', 'show', '--name',", "from charmhelpers.core import hookenv from charmhelpers.core.unitdata import kv from charms.layer", "for the VM. \"\"\" vm_identities = kv().get('charm.azure.vm-identities', {}) return vm_identities.get(vm_id)", "HTTPError as e: if 'WWW-Authenticate' not in e.headers: log_err('Error getting", "role limit. \"\"\" known_roles = kv().get('charm.azure.roles', {}) if role_name in", "subscription ID into a tenant ID by making an unauthorized", "the given application. \"\"\" log('Enabling object storage read') _assign_role(request, StandardRole.OBJECT_STORE_READER)", "debugging hooks, for some reason HOME is set to /home/ubuntu,", "for the given application. \"\"\" log('Enabling block storage management') _assign_role(request,", "'{}?api-version=2018-03-01-01.6.1'.format(subscription_id)) try: urlopen(url) log_err('Error getting tenant ID: did not get", "hl = (max_len - len(ellipsis)) / 2 headl, taill =", "= 64 MAX_POLICY_NAME_LEN = 128 class StandardRole(Enum): NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7'", "= _get_msi(request.vm_id) try: _azure('role', 'assignment', 'create', '--assignee-object-id', msi, '--resource-group', request.resource_group,", "class representing an error returned from the azure-cli tool. \"\"\"", "header') return None www_auth = e.headers['WWW-Authenticate'] match = re.search(r'authorization_uri=\"[^\"]*/([^/\"]*)\"', www_auth)", "_azure('vm', 'identity', 'assign', '--name', request.vm_name, '--resource-group', request.resource_group) vm_identities = kv().get('charm.azure.vm-identities',", "e.stderr.decode('utf8'): raise no_creds_msg = 'missing credentials access; grant with: juju", "'<app-id>', e.args[0]) stderr = re.sub(app_pass, '<app-pass>', stderr) stderr = re.sub(tenant_id,", "AzureError(Exception): \"\"\" Exception class representing an error returned from the", "'--service-principal', '-u', app_id, '-p', app_pass, '-t', tenant_id) # cache the", "as e: # redact the credential info from the exception", "tag_instance(request): \"\"\" Tag the given instance with the given tags.", "MODEL_UUID = os.environ['JUJU_MODEL_UUID'] MAX_ROLE_NAME_LEN = 64 MAX_POLICY_NAME_LEN = 128 class", "Call the azure-cli tool. \"\"\" cmd = ['az', cmd] cmd.extend(args)", "'charm.azure' MODEL_UUID = os.environ['JUJU_MODEL_UUID'] MAX_ROLE_NAME_LEN = 64 MAX_POLICY_NAME_LEN = 128", "# cache the subscription ID for use in roles kv().set('charm.azure.sub-id',", "\"\"\" log('Enabling security management') _assign_role(request, StandardRole.SECURITY_MANAGER) def enable_block_storage_management(request): \"\"\" Enable", "the azure-cli tool. \"\"\" @classmethod def get(cls, message): \"\"\" Factory", "juju trust not available except subprocess.CalledProcessError as e: if 'permission", "tenant ID by making an unauthorized request to the API", "return True except FileNotFoundError: pass # juju trust not available", "tags. \"\"\" log('Tagging instance with: {}', request.instance_tags) _azure('vm', 'update', '--name',", "'definition', 'create', '--role-definition', json.dumps(role_data)) except AzureError as e: if 'already", "= re.sub(tenant_id, '<tenant-id>', stderr) # from None suppresses the previous", "either an instance of this class or a meta-subclass for", "to a specific subscription ID, but the subscription ID applies", "trust' # try credentials config if config['credentials']: try: creds_data =", "exception message stderr = re.sub(app_id, '<app-id>', e.args[0]) stderr = re.sub(app_pass,", "\"\"\" log('Tagging instance with: {}', request.instance_tags) _azure('vm', 'update', '--name', request.vm_name,", "www_auth) return None return match.group(1) def _azure(cmd, *args, return_stderr=False): \"\"\"", "*args): hookenv.log(msg.format(*args), hookenv.INFO) def log_err(msg, *args): hookenv.log(msg.format(*args), hookenv.ERROR) def get_credentials():", "msi = _get_msi(request.vm_id) if not msi: log('Enabling Managed Service Identity')", "\"\"\" log('Enabling DNS management') _assign_role(request, StandardRole.DNS_MANAGER) def enable_object_storage_access(request): \"\"\" Enable", "for the given application. \"\"\" log('Enabling instance inspection') _assign_role(request, _get_role('vm-reader'))", "\"\"\" @classmethod def get(cls, message): \"\"\" Factory method to create", "{}) msi = vm_identities[request.vm_id] = result['systemAssignedIdentity'] kv().set('charm.azure.vm-identities', vm_identities) log('Instance MSI", "def cleanup(): \"\"\" Perform cleanup. \"\"\" pass # Internal helpers", "AzureError(stderr) from None def ensure_msi(request): msi = _get_msi(request.vm_id) if not", "get(cls, message): \"\"\" Factory method to create either an instance", "e: if 'WWW-Authenticate' not in e.headers: log_err('Error getting tenant ID:", "\"\"\" if 'already exists' in message: return AlreadyExistsAzureError(message) return AzureError(message)", "64 MAX_POLICY_NAME_LEN = 128 class StandardRole(Enum): NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7' SECURITY_MANAGER", "\"\"\" url = ('https://management.azure.com/subscriptions/' '{}?api-version=2018-03-01-01.6.1'.format(subscription_id)) try: urlopen(url) log_err('Error getting tenant", "in the middle to ensure it is under max_len. That", "When debugging hooks, for some reason HOME is set to", "res_grp = _azure('group', 'show', '--name', request.resource_group) # hard-code most of", "API and extracting the tenant ID from the WWW-Authenticate header", "role_data['Name'] = role_fullname role_data['AssignableScopes'][0] = scope try: log('Ensuring role {}',", "'--set', *['tags.{}={}'.format(tag, value) for tag, value in request.instance_tags.items()]) def enable_instance_inspection(request):", "the Azure CLI. \"\"\" app_id = creds_data['application-id'] app_pass = <PASSWORD>_<PASSWORD>['<PASSWORD>']", "enable_instance_inspection(request): \"\"\" Enable instance inspection access for the given application.", "in e.stderr.decode('utf8'): raise no_creds_msg = 'missing credentials access; grant with:", "object storage read') _assign_role(request, StandardRole.OBJECT_STORE_READER) def enable_object_storage_management(request): \"\"\" Enable object", "the credentials to authenticate the Azure CLI. \"\"\" app_id =", "AlreadyExistsAzureError(message) return AzureError(message) class AlreadyExistsAzureError(AzureError): \"\"\" Meta-error subclass of AzureError", "\"\"\" Get additional info about the requesting instance via the", "hitting the 2k custom role limit. \"\"\" known_roles = kv().get('charm.azure.roles',", "'WWW-Authenticate' not in e.headers: log_err('Error getting tenant ID: missing WWW-Authenticate", "\"\"\" Factory method to create either an instance of this", "tenant ID from the WWW-Authenticate header in the error response.", "urlopen(url) log_err('Error getting tenant ID: did not get \"unauthorized\" response')", "denied' not in e.stderr.decode('utf8'): raise no_creds_msg = 'missing credentials access;", "vnet_name='juju-internal-network', vnet_resource_group=request.resource_group, subnet_name='juju-internal-subnet', security_group_name='juju-internal-nsg', ) def tag_instance(request): \"\"\" Tag the", "log_err('Error getting tenant ID: did not get \"unauthorized\" response') return", "# from None suppresses the previous exception from the stack", "'assignment', 'create', '--assignee-object-id', msi, '--resource-group', request.resource_group, '--role', role) except AlreadyExistsAzureError:", "Managed Service Identity') result = _azure('vm', 'identity', 'assign', '--name', request.vm_name,", "previous exception from the stack trace raise AzureError(stderr) from None", "subclass of AzureError representing something already existing. \"\"\" pass def", "required to look them up are a PITA request.send_additional_metadata( resource_group_location=res_grp['location'],", "\"\"\" cmd = ['az', cmd] cmd.extend(args) result = subprocess.run(cmd, stdout=subprocess.PIPE,", "= scope try: log('Ensuring role {}', role_fullname) _azure('role', 'definition', 'create',", "StandardRole): role = role.value msi = _get_msi(request.vm_id) try: _azure('role', 'assignment',", "returned from the azure-cli tool. \"\"\" @classmethod def get(cls, message):", "\"\"\" Perform cleanup. \"\"\" pass # Internal helpers class AzureError(Exception):", "vnet_resource_group=request.resource_group, subnet_name='juju-internal-subnet', security_group_name='juju-internal-nsg', ) def tag_instance(request): \"\"\" Tag the given", "hookenv.INFO) def log_err(msg, *args): hookenv.log(msg.format(*args), hookenv.ERROR) def get_credentials(): \"\"\" Get", "def enable_network_management(request): \"\"\" Enable network management for the given application.", "application. \"\"\" log('Enabling instance inspection') _assign_role(request, _get_role('vm-reader')) def enable_network_management(request): \"\"\"", "floor(hl), ceil(hl) s = s[:headl] + ellipsis + s[-taill:] return", "msi) def send_additional_metadata(request): \"\"\" Get additional info about the requesting", "enable_dns_management(request): \"\"\" Enable DNS management for the given application. \"\"\"", "in e.args[0]: raise known_roles[role_name] = role_fullname return role_fullname def _assign_role(request,", "resource_group_location=res_grp['location'], vnet_name='juju-internal-network', vnet_resource_group=request.resource_group, subnet_name='juju-internal-subnet', security_group_name='juju-internal-nsg', ) def tag_instance(request): \"\"\" Tag", "security_group_name='juju-internal-nsg', ) def tag_instance(request): \"\"\" Tag the given instance with", "StandardRole.SECURITY_MANAGER) def enable_block_storage_management(request): \"\"\" Enable block storage (disk) management for", "the subscription ID for use in roles kv().set('charm.azure.sub-id', sub_id) except", "MAX_POLICY_NAME_LEN = 128 class StandardRole(Enum): NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7' SECURITY_MANAGER =", "= 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' # When debugging hooks, for some reason HOME", "vm_identities.get(vm_id) def _get_role(role_name): \"\"\" Translate short role name into a", "tool. \"\"\" @classmethod def get(cls, message): \"\"\" Factory method to", "_azure('logout') except AzureError: pass try: log('Logging in to Azure CLI')", "AzureError: pass try: log('Logging in to Azure CLI') _azure('login', '--service-principal',", "log('Enabling object storage read') _assign_role(request, StandardRole.OBJECT_STORE_READER) def enable_object_storage_management(request): \"\"\" Enable", "request to the API and extracting the tenant ID from", "Enable instance inspection access for the given application. \"\"\" log('Enabling", "except Exception: status.blocked('invalid value for credentials config') return False #", "= re.search(r'authorization_uri=\"[^\"]*/([^/\"]*)\"', www_auth) if not match: log_err('Error getting tenant ID:", "were to show that they've been removed. \"\"\" if len(s)", "try: urlopen(url) log_err('Error getting tenant ID: did not get \"unauthorized\"", "= kv().get('charm.azure.sub-id') role_file = Path('files/roles/{}.json'.format(role_name)) role_data = json.loads(role_file.read_text()) role_fullname =", "subnet_name='juju-internal-subnet', security_group_name='juju-internal-nsg', ) def tag_instance(request): \"\"\" Tag the given instance", "return_stderr: return stderr if stdout: stdout = json.loads(stdout) return stdout", "{}) return vm_identities.get(vm_id) def _get_role(role_name): \"\"\" Translate short role name", "{}', role_fullname) _azure('role', 'definition', 'create', '--role-definition', json.dumps(role_data)) except AzureError as", "enable_network_management(request): \"\"\" Enable network management for the given application. \"\"\"", "access for the given application. \"\"\" log('Enabling object storage read')", "AzureError as e: # redact the credential info from the", "config = hookenv.config() # try to use Juju's trust feature", "block storage management') _assign_role(request, _get_role('disk-manager')) def enable_dns_management(request): \"\"\" Enable DNS", "= kv().get('charm.azure.vm-identities', {}) return vm_identities.get(vm_id) def _get_role(role_name): \"\"\" Translate short", "full role name and ensure that the custom role is", "getting tenant ID: missing WWW-Authenticate header') return None www_auth =", "management for the given application. \"\"\" log('Enabling network management') _assign_role(request,", "and ensure that the custom role is loaded. The custom", "def _elide(s, max_len, ellipsis='...'): \"\"\" Elide s in the middle", "object store management') _assign_role(request, StandardRole.OBJECT_STORE_MANAGER) def cleanup(): \"\"\" Perform cleanup.", "result = _azure('vm', 'identity', 'assign', '--name', request.vm_name, '--resource-group', request.resource_group) vm_identities", "during normal hook execution, it's /root. Set it here to", "WWW-Authenticate header in the error response. \"\"\" url = ('https://management.azure.com/subscriptions/'", "the requesting instance via the API that isn't available from", "ensure_msi(request): msi = _get_msi(request.vm_id) if not msi: log('Enabling Managed Service", "None www_auth = e.headers['WWW-Authenticate'] match = re.search(r'authorization_uri=\"[^\"]*/([^/\"]*)\"', www_auth) if not", "= (max_len - len(ellipsis)) / 2 headl, taill = floor(hl),", "stdout: stdout = json.loads(stdout) return stdout def _get_msi(vm_id): \"\"\" Get", "to the entire credential, so will almost certainly be reused,", "hook tool. Prefers the config so that it can be", "already existing. \"\"\" pass def _elide(s, max_len, ellipsis='...'): \"\"\" Elide", "sub_id = creds_data['subscription-id'] tenant_id = _get_tenant_id(sub_id) try: log('Forcing logout of", "try: log('Ensuring role {}', role_fullname) _azure('role', 'definition', 'create', '--role-definition', json.dumps(role_data))", "def tag_instance(request): \"\"\" Tag the given instance with the given", "# hard-code most of these because with Juju, they're always", "Exception: status.blocked('invalid value for credentials config') return False # no", "removed characters were to show that they've been removed. \"\"\"", "it can be overridden. \"\"\" no_creds_msg = 'missing credentials; set", "normal hook execution, it's /root. Set it here to be", "from urllib.request import urlopen import yaml from charmhelpers.core import hookenv", "_get_msi(request.vm_id) try: _azure('role', 'assignment', 'create', '--assignee-object-id', msi, '--resource-group', request.resource_group, '--role',", "log('Enabling network management') _assign_role(request, StandardRole.NETWORK_MANAGER) def enable_security_management(request): \"\"\" Enable security", "2k custom role limit. \"\"\" known_roles = kv().get('charm.azure.roles', {}) if", "AzureError.get(stderr) if return_stderr: return stderr if stdout: stdout = json.loads(stdout)", "requesting instance via the API that isn't available from the", "log('Ensuring role {}', role_fullname) _azure('role', 'definition', 'create', '--role-definition', json.dumps(role_data)) except", "tenant ID: missing WWW-Authenticate header') return None www_auth = e.headers['WWW-Authenticate']", "= vm_identities[request.vm_id] = result['systemAssignedIdentity'] kv().set('charm.azure.vm-identities', vm_identities) log('Instance MSI is: {}',", "= 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10' DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314' OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' OBJECT_STORE_MANAGER =", "management for the given application. \"\"\" log('Enabling DNS management') _assign_role(request,", "except AzureError: pass try: log('Logging in to Azure CLI') _azure('login',", "find in {}', www_auth) return None return match.group(1) def _azure(cmd,", "azure-cli tool. \"\"\" cmd = ['az', cmd] cmd.extend(args) result =", "def ensure_msi(request): msi = _get_msi(request.vm_id) if not msi: log('Enabling Managed", "creds['credential']['attributes'] login_cli(creds_data) return True except FileNotFoundError: pass # juju trust", "in known_roles: return known_roles[role_name] sub_id = kv().get('charm.azure.sub-id') role_file = Path('files/roles/{}.json'.format(role_name))", "not in e.headers: log_err('Error getting tenant ID: missing WWW-Authenticate header')", "the custom role is loaded. The custom roles have to", "scope try: log('Ensuring role {}', role_fullname) _azure('role', 'definition', 'create', '--role-definition',", "'<tenant-id>', stderr) # from None suppresses the previous exception from", "status.blocked('invalid value for credentials config') return False # no creds", "name into a full role name and ensure that the", "pass # juju trust not available except subprocess.CalledProcessError as e:", "to Azure CLI') _azure('login', '--service-principal', '-u', app_id, '-p', app_pass, '-t',", "json.loads(role_file.read_text()) role_fullname = role_data['Name'].format(sub_id) scope = role_data['AssignableScopes'][0].format(sub_id) role_data['Name'] = role_fullname", "to the API and extracting the tenant ID from the", "MAX_ROLE_NAME_LEN = 64 MAX_POLICY_NAME_LEN = 128 class StandardRole(Enum): NETWORK_MANAGER =", "headl, taill = floor(hl), ceil(hl) s = s[:headl] + ellipsis", "ensure that the custom role is loaded. The custom roles", "max_len. That is, shorten the string, inserting an ellipsis where", "msi = _get_msi(request.vm_id) try: _azure('role', 'assignment', 'create', '--assignee-object-id', msi, '--resource-group',", "application. \"\"\" log('Enabling block storage management') _assign_role(request, _get_role('disk-manager')) def enable_dns_management(request):", "None return match.group(1) def _azure(cmd, *args, return_stderr=False): \"\"\" Call the", "from None def ensure_msi(request): msi = _get_msi(request.vm_id) if not msi:", "loaded. The custom roles have to be applied to a", "<reponame>freyes/charm-azure-integrator import json import os import re import subprocess from", "\"\"\" Get the Managed System Identity for the VM. \"\"\"", "= '/root' def log(msg, *args): hookenv.log(msg.format(*args), hookenv.INFO) def log_err(msg, *args):", "the Managed System Identity for the VM. \"\"\" vm_identities =", "value) for tag, value in request.instance_tags.items()]) def enable_instance_inspection(request): \"\"\" Enable", "= role_data['Name'].format(sub_id) scope = role_data['AssignableScopes'][0].format(sub_id) role_data['Name'] = role_fullname role_data['AssignableScopes'][0] =", "for the given application. \"\"\" log('Enabling security management') _assign_role(request, StandardRole.SECURITY_MANAGER)", "log('Enabling block storage management') _assign_role(request, _get_role('disk-manager')) def enable_dns_management(request): \"\"\" Enable", "ID by making an unauthorized request to the API and", "= _get_msi(request.vm_id) if not msi: log('Enabling Managed Service Identity') result", "import status ENTITY_PREFIX = 'charm.azure' MODEL_UUID = os.environ['JUJU_MODEL_UUID'] MAX_ROLE_NAME_LEN =", "middle to ensure it is under max_len. That is, shorten", "except HTTPError as e: if 'WWW-Authenticate' not in e.headers: log_err('Error", "role_data['AssignableScopes'][0] = scope try: log('Ensuring role {}', role_fullname) _azure('role', 'definition',", "return False # no creds provided status.blocked(no_creds_msg) return False def", "def enable_dns_management(request): \"\"\" Enable DNS management for the given application.", "feature try: result = subprocess.run(['credential-get'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) creds =", "= os.environ['JUJU_MODEL_UUID'] MAX_ROLE_NAME_LEN = 64 MAX_POLICY_NAME_LEN = 128 class StandardRole(Enum):", "try: log('Logging in to Azure CLI') _azure('login', '--service-principal', '-u', app_id,", "Enum from math import ceil, floor from pathlib import Path", "info from the exception message stderr = re.sub(app_id, '<app-id>', e.args[0])", "given application. \"\"\" log('Enabling network management') _assign_role(request, StandardRole.NETWORK_MANAGER) def enable_security_management(request):", "hookenv.log(msg.format(*args), hookenv.ERROR) def get_credentials(): \"\"\" Get the credentials from either", "stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = result.stdout.decode('utf8').strip() stderr = result.stderr.decode('utf8').strip() if result.returncode", "the API that isn't available from the metadata server. \"\"\"", "not much danger in hitting the 2k custom role limit.", "application. \"\"\" log('Enabling security management') _assign_role(request, StandardRole.SECURITY_MANAGER) def enable_block_storage_management(request): \"\"\"", "exists' not in e.args[0]: raise known_roles[role_name] = role_fullname return role_fullname", "if 'WWW-Authenticate' not in e.headers: log_err('Error getting tenant ID: missing", "= role_fullname return role_fullname def _assign_role(request, role): if isinstance(role, StandardRole):", "= role.value msi = _get_msi(request.vm_id) try: _azure('role', 'assignment', 'create', '--assignee-object-id',", "app_pass = <PASSWORD>_<PASSWORD>['<PASSWORD>'] sub_id = creds_data['subscription-id'] tenant_id = _get_tenant_id(sub_id) try:", "(max_len - len(ellipsis)) / 2 headl, taill = floor(hl), ceil(hl)", "ID, but the subscription ID applies to the entire credential,", "scope = role_data['AssignableScopes'][0].format(sub_id) role_data['Name'] = role_fullname role_data['AssignableScopes'][0] = scope try:", "the given application. \"\"\" log('Enabling block storage management') _assign_role(request, _get_role('disk-manager'))", "len(ellipsis)) / 2 headl, taill = floor(hl), ceil(hl) s =", "instance inspection') _assign_role(request, _get_role('vm-reader')) def enable_network_management(request): \"\"\" Enable network management", "Identity for the VM. \"\"\" vm_identities = kv().get('charm.azure.vm-identities', {}) return", "HOME is set to /home/ubuntu, whereas # during normal hook", "with the given tags. \"\"\" log('Tagging instance with: {}', request.instance_tags)", "given instance with the given tags. \"\"\" log('Tagging instance with:", "msi = vm_identities[request.vm_id] = result['systemAssignedIdentity'] kv().set('charm.azure.vm-identities', vm_identities) log('Instance MSI is:", "applied to a specific subscription ID, but the subscription ID", "an unauthorized request to the API and extracting the tenant", "WWW-Authenticate header') return None www_auth = e.headers['WWW-Authenticate'] match = re.search(r'authorization_uri=\"[^\"]*/([^/\"]*)\"',", "the config or the hook tool. Prefers the config so", "reused, so there's not much danger in hitting the 2k", "enable_security_management(request): \"\"\" Enable security management for the given application. \"\"\"", "_elide(s, max_len, ellipsis='...'): \"\"\" Elide s in the middle to", "from charms.layer import status ENTITY_PREFIX = 'charm.azure' MODEL_UUID = os.environ['JUJU_MODEL_UUID']", "ellipsis='...'): \"\"\" Elide s in the middle to ensure it", "no_creds_msg = 'missing credentials access; grant with: juju trust' #", "\"\"\" log('Enabling object storage read') _assign_role(request, StandardRole.OBJECT_STORE_READER) def enable_object_storage_management(request): \"\"\"", "\"\"\" pass def _elide(s, max_len, ellipsis='...'): \"\"\" Elide s in", "Enable network management for the given application. \"\"\" log('Enabling network", "role {}', role_fullname) _azure('role', 'definition', 'create', '--role-definition', json.dumps(role_data)) except AzureError", "return role_fullname def _assign_role(request, role): if isinstance(role, StandardRole): role =", "the given application. \"\"\" log('Enabling security management') _assign_role(request, StandardRole.SECURITY_MANAGER) def", "app_id, '-p', app_pass, '-t', tenant_id) # cache the subscription ID", "inspection access for the given application. \"\"\" log('Enabling instance inspection')", "def _assign_role(request, role): if isinstance(role, StandardRole): role = role.value msi", "import re import subprocess from base64 import b64decode from enum", "trust feature try: result = subprocess.run(['credential-get'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) creds", "\"\"\" Enable object storage management for the given application. \"\"\"", "for the given application. \"\"\" log('Enabling object store management') _assign_role(request,", "role name and ensure that the custom role is loaded.", "AzureError as e: if 'already exists' not in e.args[0]: raise", "roles kv().set('charm.azure.sub-id', sub_id) except AzureError as e: # redact the", "stderr=subprocess.PIPE) creds = yaml.load(result.stdout.decode('utf8')) creds_data = creds['credential']['attributes'] login_cli(creds_data) return True", "additional info about the requesting instance via the API that", "use in roles kv().set('charm.azure.sub-id', sub_id) except AzureError as e: #", "stderr = re.sub(tenant_id, '<tenant-id>', stderr) # from None suppresses the", "creds provided status.blocked(no_creds_msg) return False def login_cli(creds_data): \"\"\" Use the", "exists' in message: return AlreadyExistsAzureError(message) return AzureError(message) class AlreadyExistsAzureError(AzureError): \"\"\"", "s def _get_tenant_id(subscription_id): \"\"\" Translate the subscription ID into a", "www_auth = e.headers['WWW-Authenticate'] match = re.search(r'authorization_uri=\"[^\"]*/([^/\"]*)\"', www_auth) if not match:", "pathlib import Path from urllib.error import HTTPError from urllib.request import", "redact the credential info from the exception message stderr =", "Enable object storage management for the given application. \"\"\" log('Enabling", "from None suppresses the previous exception from the stack trace", "login_cli(creds_data) return True except FileNotFoundError: pass # juju trust not", "vm_identities = kv().get('charm.azure.vm-identities', {}) msi = vm_identities[request.vm_id] = result['systemAssignedIdentity'] kv().set('charm.azure.vm-identities',", "ensure it is under max_len. That is, shorten the string,", "None except HTTPError as e: if 'WWW-Authenticate' not in e.headers:", "result['systemAssignedIdentity'] kv().set('charm.azure.vm-identities', vm_identities) log('Instance MSI is: {}', msi) def send_additional_metadata(request):", "credentials access; grant with: juju trust' # try credentials config", "Use the credentials to authenticate the Azure CLI. \"\"\" app_id", "that isn't available from the metadata server. \"\"\" res_grp =", "_get_role(role_name): \"\"\" Translate short role name into a full role", "certainly be reused, so there's not much danger in hitting", "suppresses the previous exception from the stack trace raise AzureError(stderr)", "request.send_additional_metadata( resource_group_location=res_grp['location'], vnet_name='juju-internal-network', vnet_resource_group=request.resource_group, subnet_name='juju-internal-subnet', security_group_name='juju-internal-nsg', ) def tag_instance(request): \"\"\"", "the previous exception from the stack trace raise AzureError(stderr) from", "Exception class representing an error returned from the azure-cli tool.", "missing WWW-Authenticate header') return None www_auth = e.headers['WWW-Authenticate'] match =", "e.headers['WWW-Authenticate'] match = re.search(r'authorization_uri=\"[^\"]*/([^/\"]*)\"', www_auth) if not match: log_err('Error getting", "management for the given application. \"\"\" log('Enabling security management') _assign_role(request,", "store management') _assign_role(request, StandardRole.OBJECT_STORE_MANAGER) def cleanup(): \"\"\" Perform cleanup. \"\"\"", "unauthorized request to the API and extracting the tenant ID", "_get_tenant_id(subscription_id): \"\"\" Translate the subscription ID into a tenant ID", "as e: if 'WWW-Authenticate' not in e.headers: log_err('Error getting tenant", "known_roles: return known_roles[role_name] sub_id = kv().get('charm.azure.sub-id') role_file = Path('files/roles/{}.json'.format(role_name)) role_data", "ID: missing WWW-Authenticate header') return None www_auth = e.headers['WWW-Authenticate'] match", "given tags. \"\"\" log('Tagging instance with: {}', request.instance_tags) _azure('vm', 'update',", "\"\"\" Translate the subscription ID into a tenant ID by", "match.group(1) def _azure(cmd, *args, return_stderr=False): \"\"\" Call the azure-cli tool.", "from charmhelpers.core.unitdata import kv from charms.layer import status ENTITY_PREFIX =", "\"\"\" if len(s) > max_len: hl = (max_len - len(ellipsis))", "\"\"\" known_roles = kv().get('charm.azure.roles', {}) if role_name in known_roles: return", "metadata server. \"\"\" res_grp = _azure('group', 'show', '--name', request.resource_group) #", "creds_data['subscription-id'] tenant_id = _get_tenant_id(sub_id) try: log('Forcing logout of Azure CLI')", "in to Azure CLI') _azure('login', '--service-principal', '-u', app_id, '-p', app_pass,", "config' config = hookenv.config() # try to use Juju's trust", "= re.sub(app_id, '<app-id>', e.args[0]) stderr = re.sub(app_pass, '<app-pass>', stderr) stderr", "_assign_role(request, role): if isinstance(role, StandardRole): role = role.value msi =", "# try credentials config if config['credentials']: try: creds_data = b64decode(config['credentials']).decode('utf8')", "the credentials from either the config or the hook tool.", "ellipsis where the removed characters were to show that they've", "the azure-cli tool. \"\"\" cmd = ['az', cmd] cmd.extend(args) result", "\"\"\" app_id = creds_data['application-id'] app_pass = <PASSWORD>_<PASSWORD>['<PASSWORD>'] sub_id = creds_data['subscription-id']", "management for the given application. \"\"\" log('Enabling object store management')", "def enable_security_management(request): \"\"\" Enable security management for the given application.", "been removed. \"\"\" if len(s) > max_len: hl = (max_len", "*args, return_stderr=False): \"\"\" Call the azure-cli tool. \"\"\" cmd =", "role name into a full role name and ensure that", "request.vm_name, '--resource-group', request.resource_group) vm_identities = kv().get('charm.azure.vm-identities', {}) msi = vm_identities[request.vm_id]", "return s def _get_tenant_id(subscription_id): \"\"\" Translate the subscription ID into", "security management') _assign_role(request, StandardRole.SECURITY_MANAGER) def enable_block_storage_management(request): \"\"\" Enable block storage", "that the custom role is loaded. The custom roles have", "return stdout def _get_msi(vm_id): \"\"\" Get the Managed System Identity", "match = re.search(r'authorization_uri=\"[^\"]*/([^/\"]*)\"', www_auth) if not match: log_err('Error getting tenant", "to look them up are a PITA request.send_additional_metadata( resource_group_location=res_grp['location'], vnet_name='juju-internal-network',", "/ 2 headl, taill = floor(hl), ceil(hl) s = s[:headl]", "in the error response. \"\"\" url = ('https://management.azure.com/subscriptions/' '{}?api-version=2018-03-01-01.6.1'.format(subscription_id)) try:", "www_auth) if not match: log_err('Error getting tenant ID: unable to", "config') return False # no creds provided status.blocked(no_creds_msg) return False", "to be consistent. os.environ['HOME'] = '/root' def log(msg, *args): hookenv.log(msg.format(*args),", "for credentials config') return False # no creds provided status.blocked(no_creds_msg)", "Enable DNS management for the given application. \"\"\" log('Enabling DNS", "result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = result.stdout.decode('utf8').strip() stderr =", "'update', '--name', request.vm_name, '--resource-group', request.resource_group, '--set', *['tags.{}={}'.format(tag, value) for tag,", "StandardRole.OBJECT_STORE_MANAGER) def cleanup(): \"\"\" Perform cleanup. \"\"\" pass # Internal", "except AzureError as e: # redact the credential info from", "_get_role('disk-manager')) def enable_dns_management(request): \"\"\" Enable DNS management for the given", "the given instance with the given tags. \"\"\" log('Tagging instance", "raise AzureError(stderr) from None def ensure_msi(request): msi = _get_msi(request.vm_id) if", "# Internal helpers class AzureError(Exception): \"\"\" Exception class representing an", "tenant_id = _get_tenant_id(sub_id) try: log('Forcing logout of Azure CLI') _azure('logout')", "/home/ubuntu, whereas # during normal hook execution, it's /root. Set", "True except FileNotFoundError: pass # juju trust not available except", "hookenv.log(msg.format(*args), hookenv.INFO) def log_err(msg, *args): hookenv.log(msg.format(*args), hookenv.ERROR) def get_credentials(): \"\"\"", "'--name', request.vm_name, '--resource-group', request.resource_group, '--set', *['tags.{}={}'.format(tag, value) for tag, value", "\"\"\" Enable block storage (disk) management for the given application.", "\"\"\" Meta-error subclass of AzureError representing something already existing. \"\"\"", "login_cli(creds_data): \"\"\" Use the credentials to authenticate the Azure CLI.", "urlopen import yaml from charmhelpers.core import hookenv from charmhelpers.core.unitdata import", "Azure CLI. \"\"\" app_id = creds_data['application-id'] app_pass = <PASSWORD>_<PASSWORD>['<PASSWORD>'] sub_id", "send_additional_metadata(request): \"\"\" Get additional info about the requesting instance via", "custom role limit. \"\"\" known_roles = kv().get('charm.azure.roles', {}) if role_name", "_get_tenant_id(sub_id) try: log('Forcing logout of Azure CLI') _azure('logout') except AzureError:", "header in the error response. \"\"\" url = ('https://management.azure.com/subscriptions/' '{}?api-version=2018-03-01-01.6.1'.format(subscription_id))", "server. \"\"\" res_grp = _azure('group', 'show', '--name', request.resource_group) # hard-code", "tenant ID: did not get \"unauthorized\" response') return None except", "role = role.value msi = _get_msi(request.vm_id) try: _azure('role', 'assignment', 'create',", "storage (disk) management for the given application. \"\"\" log('Enabling block", "def login_cli(creds_data): \"\"\" Use the credentials to authenticate the Azure", "role_data = json.loads(role_file.read_text()) role_fullname = role_data['Name'].format(sub_id) scope = role_data['AssignableScopes'][0].format(sub_id) role_data['Name']", "= _azure('group', 'show', '--name', request.resource_group) # hard-code most of these", "subprocess from base64 import b64decode from enum import Enum from", "Prefers the config so that it can be overridden. \"\"\"", "the 2k custom role limit. \"\"\" known_roles = kv().get('charm.azure.roles', {})", "the given application. \"\"\" log('Enabling instance inspection') _assign_role(request, _get_role('vm-reader')) def", "StandardRole(Enum): NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7' SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10' DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314'", "'befefa01-2a29-4197-83a8-272ff33ce314' OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' # When debugging", "info about the requesting instance via the API that isn't", "access; grant with: juju trust' # try credentials config if", "\"\"\" Enable DNS management for the given application. \"\"\" log('Enabling", "given application. \"\"\" log('Enabling block storage management') _assign_role(request, _get_role('disk-manager')) def", "will almost certainly be reused, so there's not much danger", "= creds_data['application-id'] app_pass = <PASSWORD>_<PASSWORD>['<PASSWORD>'] sub_id = creds_data['subscription-id'] tenant_id =", "pass # Internal helpers class AzureError(Exception): \"\"\" Exception class representing", "log('Enabling DNS management') _assign_role(request, StandardRole.DNS_MANAGER) def enable_object_storage_access(request): \"\"\" Enable object", "of these because with Juju, they're always the same #", "= re.sub(app_pass, '<app-pass>', stderr) stderr = re.sub(tenant_id, '<tenant-id>', stderr) #", "subprocess.run(['credential-get'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) creds = yaml.load(result.stdout.decode('utf8')) creds_data = creds['credential']['attributes']", "a PITA request.send_additional_metadata( resource_group_location=res_grp['location'], vnet_name='juju-internal-network', vnet_resource_group=request.resource_group, subnet_name='juju-internal-subnet', security_group_name='juju-internal-nsg', ) def", "some reason HOME is set to /home/ubuntu, whereas # during", "can be overridden. \"\"\" no_creds_msg = 'missing credentials; set credentials", "this class or a meta-subclass for certain `message`s. \"\"\" if", "the string, inserting an ellipsis where the removed characters were", "logout of Azure CLI') _azure('logout') except AzureError: pass try: log('Logging", "= kv().get('charm.azure.vm-identities', {}) msi = vm_identities[request.vm_id] = result['systemAssignedIdentity'] kv().set('charm.azure.vm-identities', vm_identities)", "return None www_auth = e.headers['WWW-Authenticate'] match = re.search(r'authorization_uri=\"[^\"]*/([^/\"]*)\"', www_auth) if", "re.sub(tenant_id, '<tenant-id>', stderr) # from None suppresses the previous exception", "or a meta-subclass for certain `message`s. \"\"\" if 'already exists'", "stdout = result.stdout.decode('utf8').strip() stderr = result.stderr.decode('utf8').strip() if result.returncode != 0:", "them up are a PITA request.send_additional_metadata( resource_group_location=res_grp['location'], vnet_name='juju-internal-network', vnet_resource_group=request.resource_group, subnet_name='juju-internal-subnet',", "`message`s. \"\"\" if 'already exists' in message: return AlreadyExistsAzureError(message) return", "kv from charms.layer import status ENTITY_PREFIX = 'charm.azure' MODEL_UUID =", "role_fullname def _assign_role(request, role): if isinstance(role, StandardRole): role = role.value", "None suppresses the previous exception from the stack trace raise", "a specific subscription ID, but the subscription ID applies to", "is: {}', msi) def send_additional_metadata(request): \"\"\" Get additional info about", "object storage read-only access for the given application. \"\"\" log('Enabling", "reason HOME is set to /home/ubuntu, whereas # during normal", "stdout def _get_msi(vm_id): \"\"\" Get the Managed System Identity for", "Managed System Identity for the VM. \"\"\" vm_identities = kv().get('charm.azure.vm-identities',", "object storage management for the given application. \"\"\" log('Enabling object", "get \"unauthorized\" response') return None except HTTPError as e: if", "'--name', request.resource_group) # hard-code most of these because with Juju,", "name and ensure that the custom role is loaded. The", "the removed characters were to show that they've been removed.", "given application. \"\"\" log('Enabling security management') _assign_role(request, StandardRole.SECURITY_MANAGER) def enable_block_storage_management(request):", "response') return None except HTTPError as e: if 'WWW-Authenticate' not", "HTTPError from urllib.request import urlopen import yaml from charmhelpers.core import", "return False def login_cli(creds_data): \"\"\" Use the credentials to authenticate", "Juju, they're always the same # and the queries required", "Path from urllib.error import HTTPError from urllib.request import urlopen import", "azure-cli tool. \"\"\" @classmethod def get(cls, message): \"\"\" Factory method", "unable to find in {}', www_auth) return None return match.group(1)", "status.blocked(no_creds_msg) return False def login_cli(creds_data): \"\"\" Use the credentials to", "request.resource_group) vm_identities = kv().get('charm.azure.vm-identities', {}) msi = vm_identities[request.vm_id] = result['systemAssignedIdentity']", "application. \"\"\" log('Enabling DNS management') _assign_role(request, StandardRole.DNS_MANAGER) def enable_object_storage_access(request): \"\"\"", "NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7' SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10' DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314' OBJECT_STORE_READER", "shorten the string, inserting an ellipsis where the removed characters", "subscription ID for use in roles kv().set('charm.azure.sub-id', sub_id) except AzureError", "request.resource_group) # hard-code most of these because with Juju, they're", "ID from the WWW-Authenticate header in the error response. \"\"\"", "e.args[0]: raise known_roles[role_name] = role_fullname return role_fullname def _assign_role(request, role):", "application. \"\"\" log('Enabling object store management') _assign_role(request, StandardRole.OBJECT_STORE_MANAGER) def cleanup():", "'--resource-group', request.resource_group) vm_identities = kv().get('charm.azure.vm-identities', {}) msi = vm_identities[request.vm_id] =", "security management for the given application. \"\"\" log('Enabling security management')", "Path('files/roles/{}.json'.format(role_name)) role_data = json.loads(role_file.read_text()) role_fullname = role_data['Name'].format(sub_id) scope = role_data['AssignableScopes'][0].format(sub_id)", "stdout=subprocess.PIPE, stderr=subprocess.PIPE) creds = yaml.load(result.stdout.decode('utf8')) creds_data = creds['credential']['attributes'] login_cli(creds_data) return", "config['credentials']: try: creds_data = b64decode(config['credentials']).decode('utf8') login_cli(creds_data) return True except Exception:", "_get_msi(request.vm_id) if not msi: log('Enabling Managed Service Identity') result =", "known_roles = kv().get('charm.azure.roles', {}) if role_name in known_roles: return known_roles[role_name]", "if 'permission denied' not in e.stderr.decode('utf8'): raise no_creds_msg = 'missing", "the VM. \"\"\" vm_identities = kv().get('charm.azure.vm-identities', {}) return vm_identities.get(vm_id) def", "DNS management') _assign_role(request, StandardRole.DNS_MANAGER) def enable_object_storage_access(request): \"\"\" Enable object storage", "role): if isinstance(role, StandardRole): role = role.value msi = _get_msi(request.vm_id)", "= creds['credential']['attributes'] login_cli(creds_data) return True except FileNotFoundError: pass # juju", "Get additional info about the requesting instance via the API", "Meta-error subclass of AzureError representing something already existing. \"\"\" pass", "subscription ID applies to the entire credential, so will almost", "import urlopen import yaml from charmhelpers.core import hookenv from charmhelpers.core.unitdata", "or the hook tool. Prefers the config so that it", "management') _assign_role(request, StandardRole.DNS_MANAGER) def enable_object_storage_access(request): \"\"\" Enable object storage read-only", "to find in {}', www_auth) return None return match.group(1) def", "= floor(hl), ceil(hl) s = s[:headl] + ellipsis + s[-taill:]", "instance inspection access for the given application. \"\"\" log('Enabling instance", "class AzureError(Exception): \"\"\" Exception class representing an error returned from", "creds_data = b64decode(config['credentials']).decode('utf8') login_cli(creds_data) return True except Exception: status.blocked('invalid value", "value for credentials config') return False # no creds provided", "isn't available from the metadata server. \"\"\" res_grp = _azure('group',", "_assign_role(request, StandardRole.NETWORK_MANAGER) def enable_security_management(request): \"\"\" Enable security management for the", "if len(s) > max_len: hl = (max_len - len(ellipsis)) /", "# try to use Juju's trust feature try: result =", "execution, it's /root. Set it here to be consistent. os.environ['HOME']", "error returned from the azure-cli tool. \"\"\" @classmethod def get(cls,", "into a full role name and ensure that the custom", "e: if 'permission denied' not in e.stderr.decode('utf8'): raise no_creds_msg =", "kv().get('charm.azure.roles', {}) if role_name in known_roles: return known_roles[role_name] sub_id =", "isinstance(role, StandardRole): role = role.value msi = _get_msi(request.vm_id) try: _azure('role',", "Azure CLI') _azure('login', '--service-principal', '-u', app_id, '-p', app_pass, '-t', tenant_id)", "specific subscription ID, but the subscription ID applies to the", "role_fullname role_data['AssignableScopes'][0] = scope try: log('Ensuring role {}', role_fullname) _azure('role',", "if not msi: log('Enabling Managed Service Identity') result = _azure('vm',", "len(s) > max_len: hl = (max_len - len(ellipsis)) / 2", "to ensure it is under max_len. That is, shorten the", "known_roles[role_name] = role_fullname return role_fullname def _assign_role(request, role): if isinstance(role,", "it's /root. Set it here to be consistent. os.environ['HOME'] =", "Set it here to be consistent. os.environ['HOME'] = '/root' def", "here to be consistent. os.environ['HOME'] = '/root' def log(msg, *args):", "API that isn't available from the metadata server. \"\"\" res_grp", "return AlreadyExistsAzureError(message) return AzureError(message) class AlreadyExistsAzureError(AzureError): \"\"\" Meta-error subclass of", "sub_id = kv().get('charm.azure.sub-id') role_file = Path('files/roles/{}.json'.format(role_name)) role_data = json.loads(role_file.read_text()) role_fullname", "enum import Enum from math import ceil, floor from pathlib", "as e: if 'already exists' not in e.args[0]: raise known_roles[role_name]", "role is loaded. The custom roles have to be applied", "pass def _elide(s, max_len, ellipsis='...'): \"\"\" Elide s in the", "hookenv.config() # try to use Juju's trust feature try: result", "representing something already existing. \"\"\" pass def _elide(s, max_len, ellipsis='...'):", "log_err('Error getting tenant ID: unable to find in {}', www_auth)", "subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = result.stdout.decode('utf8').strip() stderr = result.stderr.decode('utf8').strip() if", "stderr = result.stderr.decode('utf8').strip() if result.returncode != 0: raise AzureError.get(stderr) if", "there's not much danger in hitting the 2k custom role", "'-t', tenant_id) # cache the subscription ID for use in", "\"\"\" Call the azure-cli tool. \"\"\" cmd = ['az', cmd]", "the stack trace raise AzureError(stderr) from None def ensure_msi(request): msi", "def _get_tenant_id(subscription_id): \"\"\" Translate the subscription ID into a tenant", "role_data['AssignableScopes'][0].format(sub_id) role_data['Name'] = role_fullname role_data['AssignableScopes'][0] = scope try: log('Ensuring role", "for the given application. \"\"\" log('Enabling network management') _assign_role(request, StandardRole.NETWORK_MANAGER)", "credentials config' config = hookenv.config() # try to use Juju's", "= json.loads(role_file.read_text()) role_fullname = role_data['Name'].format(sub_id) scope = role_data['AssignableScopes'][0].format(sub_id) role_data['Name'] =", "'assign', '--name', request.vm_name, '--resource-group', request.resource_group) vm_identities = kv().get('charm.azure.vm-identities', {}) msi", "= result.stdout.decode('utf8').strip() stderr = result.stderr.decode('utf8').strip() if result.returncode != 0: raise", "to be applied to a specific subscription ID, but the", "if isinstance(role, StandardRole): role = role.value msi = _get_msi(request.vm_id) try:", "\"\"\" no_creds_msg = 'missing credentials; set credentials config' config =", "trace raise AzureError(stderr) from None def ensure_msi(request): msi = _get_msi(request.vm_id)", "given application. \"\"\" log('Enabling DNS management') _assign_role(request, StandardRole.DNS_MANAGER) def enable_object_storage_access(request):", "method to create either an instance of this class or", "def log(msg, *args): hookenv.log(msg.format(*args), hookenv.INFO) def log_err(msg, *args): hookenv.log(msg.format(*args), hookenv.ERROR)", "the exception message stderr = re.sub(app_id, '<app-id>', e.args[0]) stderr =", "read') _assign_role(request, StandardRole.OBJECT_STORE_READER) def enable_object_storage_management(request): \"\"\" Enable object storage management", "show that they've been removed. \"\"\" if len(s) > max_len:", "def enable_instance_inspection(request): \"\"\" Enable instance inspection access for the given", "have to be applied to a specific subscription ID, but", "cache the subscription ID for use in roles kv().set('charm.azure.sub-id', sub_id)", "base64 import b64decode from enum import Enum from math import", "entire credential, so will almost certainly be reused, so there's", "try: creds_data = b64decode(config['credentials']).decode('utf8') login_cli(creds_data) return True except Exception: status.blocked('invalid", "hookenv.ERROR) def get_credentials(): \"\"\" Get the credentials from either the", "try to use Juju's trust feature try: result = subprocess.run(['credential-get'],", "_assign_role(request, StandardRole.OBJECT_STORE_MANAGER) def cleanup(): \"\"\" Perform cleanup. \"\"\" pass #", "via the API that isn't available from the metadata server.", "role_file = Path('files/roles/{}.json'.format(role_name)) role_data = json.loads(role_file.read_text()) role_fullname = role_data['Name'].format(sub_id) scope", "log(msg, *args): hookenv.log(msg.format(*args), hookenv.INFO) def log_err(msg, *args): hookenv.log(msg.format(*args), hookenv.ERROR) def", "consistent. os.environ['HOME'] = '/root' def log(msg, *args): hookenv.log(msg.format(*args), hookenv.INFO) def", "trust not available except subprocess.CalledProcessError as e: if 'permission denied'", "something already existing. \"\"\" pass def _elide(s, max_len, ellipsis='...'): \"\"\"", "<PASSWORD>_<PASSWORD>['<PASSWORD>'] sub_id = creds_data['subscription-id'] tenant_id = _get_tenant_id(sub_id) try: log('Forcing logout", "charmhelpers.core import hookenv from charmhelpers.core.unitdata import kv from charms.layer import", "request.resource_group, '--set', *['tags.{}={}'.format(tag, value) for tag, value in request.instance_tags.items()]) def", "StandardRole.DNS_MANAGER) def enable_object_storage_access(request): \"\"\" Enable object storage read-only access for", "= subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = result.stdout.decode('utf8').strip() stderr = result.stderr.decode('utf8').strip()", "ID applies to the entire credential, so will almost certainly", "raise no_creds_msg = 'missing credentials access; grant with: juju trust'", "log_err(msg, *args): hookenv.log(msg.format(*args), hookenv.ERROR) def get_credentials(): \"\"\" Get the credentials", "\"\"\" Tag the given instance with the given tags. \"\"\"", "'ba92f5b4-2d11-453d-a403-e96b0029c9fe' # When debugging hooks, for some reason HOME is", "for the given application. \"\"\" log('Enabling DNS management') _assign_role(request, StandardRole.DNS_MANAGER)", "_azure('login', '--service-principal', '-u', app_id, '-p', app_pass, '-t', tenant_id) # cache", "either the config or the hook tool. Prefers the config", "_assign_role(request, StandardRole.DNS_MANAGER) def enable_object_storage_access(request): \"\"\" Enable object storage read-only access", "import yaml from charmhelpers.core import hookenv from charmhelpers.core.unitdata import kv", "request.instance_tags.items()]) def enable_instance_inspection(request): \"\"\" Enable instance inspection access for the", "the config so that it can be overridden. \"\"\" no_creds_msg", "provided status.blocked(no_creds_msg) return False def login_cli(creds_data): \"\"\" Use the credentials", "be applied to a specific subscription ID, but the subscription", "no creds provided status.blocked(no_creds_msg) return False def login_cli(creds_data): \"\"\" Use", "e: if 'already exists' not in e.args[0]: raise known_roles[role_name] =", "instance of this class or a meta-subclass for certain `message`s.", "most of these because with Juju, they're always the same", "= 'befefa01-2a29-4197-83a8-272ff33ce314' OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' # When", "that they've been removed. \"\"\" if len(s) > max_len: hl", "CLI') _azure('login', '--service-principal', '-u', app_id, '-p', app_pass, '-t', tenant_id) #", "AlreadyExistsAzureError(AzureError): \"\"\" Meta-error subclass of AzureError representing something already existing.", "def enable_object_storage_access(request): \"\"\" Enable object storage read-only access for the", "{}', www_auth) return None return match.group(1) def _azure(cmd, *args, return_stderr=False):", "for the given application. \"\"\" log('Enabling object storage read') _assign_role(request,", "\"\"\" Translate short role name into a full role name", "tag, value in request.instance_tags.items()]) def enable_instance_inspection(request): \"\"\" Enable instance inspection", "from the WWW-Authenticate header in the error response. \"\"\" url", "_assign_role(request, _get_role('disk-manager')) def enable_dns_management(request): \"\"\" Enable DNS management for the", "enable_object_storage_access(request): \"\"\" Enable object storage read-only access for the given", "tenant ID: unable to find in {}', www_auth) return None", "network management') _assign_role(request, StandardRole.NETWORK_MANAGER) def enable_security_management(request): \"\"\" Enable security management", "in {}', www_auth) return None return match.group(1) def _azure(cmd, *args,", "from the azure-cli tool. \"\"\" @classmethod def get(cls, message): \"\"\"", "and extracting the tenant ID from the WWW-Authenticate header in", "the entire credential, so will almost certainly be reused, so", "enable_block_storage_management(request): \"\"\" Enable block storage (disk) management for the given", "Perform cleanup. \"\"\" pass # Internal helpers class AzureError(Exception): \"\"\"", "FileNotFoundError: pass # juju trust not available except subprocess.CalledProcessError as", "message stderr = re.sub(app_id, '<app-id>', e.args[0]) stderr = re.sub(app_pass, '<app-pass>',", "to create either an instance of this class or a", "vm_identities[request.vm_id] = result['systemAssignedIdentity'] kv().set('charm.azure.vm-identities', vm_identities) log('Instance MSI is: {}', msi)", "\"\"\" log('Enabling network management') _assign_role(request, StandardRole.NETWORK_MANAGER) def enable_security_management(request): \"\"\" Enable", "log('Enabling security management') _assign_role(request, StandardRole.SECURITY_MANAGER) def enable_block_storage_management(request): \"\"\" Enable block", "set credentials config' config = hookenv.config() # try to use", "\"\"\" res_grp = _azure('group', 'show', '--name', request.resource_group) # hard-code most", "try: _azure('role', 'assignment', 'create', '--assignee-object-id', msi, '--resource-group', request.resource_group, '--role', role)", "\"\"\" Enable network management for the given application. \"\"\" log('Enabling", "'already exists' not in e.args[0]: raise known_roles[role_name] = role_fullname return", "for tag, value in request.instance_tags.items()]) def enable_instance_inspection(request): \"\"\" Enable instance", "Translate short role name into a full role name and", "\"\"\" Exception class representing an error returned from the azure-cli", "\"\"\" Enable object storage read-only access for the given application.", "'missing credentials; set credentials config' config = hookenv.config() # try", "\"\"\" Enable instance inspection access for the given application. \"\"\"", "urllib.error import HTTPError from urllib.request import urlopen import yaml from", "that it can be overridden. \"\"\" no_creds_msg = 'missing credentials;", "role_fullname return role_fullname def _assign_role(request, role): if isinstance(role, StandardRole): role", "try: result = subprocess.run(['credential-get'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) creds = yaml.load(result.stdout.decode('utf8'))", "\"\"\" log('Enabling instance inspection') _assign_role(request, _get_role('vm-reader')) def enable_network_management(request): \"\"\" Enable", "def get(cls, message): \"\"\" Factory method to create either an", "use Juju's trust feature try: result = subprocess.run(['credential-get'], check=True, stdout=subprocess.PIPE,", "the API and extracting the tenant ID from the WWW-Authenticate", "['az', cmd] cmd.extend(args) result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout =", "cleanup. \"\"\" pass # Internal helpers class AzureError(Exception): \"\"\" Exception", "'permission denied' not in e.stderr.decode('utf8'): raise no_creds_msg = 'missing credentials", "_azure('role', 'assignment', 'create', '--assignee-object-id', msi, '--resource-group', request.resource_group, '--role', role) except", "role_fullname) _azure('role', 'definition', 'create', '--role-definition', json.dumps(role_data)) except AzureError as e:", "return match.group(1) def _azure(cmd, *args, return_stderr=False): \"\"\" Call the azure-cli", "\"\"\" Elide s in the middle to ensure it is", "= ('https://management.azure.com/subscriptions/' '{}?api-version=2018-03-01-01.6.1'.format(subscription_id)) try: urlopen(url) log_err('Error getting tenant ID: did", "_azure('vm', 'update', '--name', request.vm_name, '--resource-group', request.resource_group, '--set', *['tags.{}={}'.format(tag, value) for", "credential, so will almost certainly be reused, so there's not", "def enable_object_storage_management(request): \"\"\" Enable object storage management for the given", "an error returned from the azure-cli tool. \"\"\" @classmethod def", "custom role is loaded. The custom roles have to be", "!= 0: raise AzureError.get(stderr) if return_stderr: return stderr if stdout:", "log('Enabling Managed Service Identity') result = _azure('vm', 'identity', 'assign', '--name',", "= role_fullname role_data['AssignableScopes'][0] = scope try: log('Ensuring role {}', role_fullname)", "Get the credentials from either the config or the hook", "cleanup(): \"\"\" Perform cleanup. \"\"\" pass # Internal helpers class", "if result.returncode != 0: raise AzureError.get(stderr) if return_stderr: return stderr", "def _get_role(role_name): \"\"\" Translate short role name into a full", "ID: did not get \"unauthorized\" response') return None except HTTPError", "for some reason HOME is set to /home/ubuntu, whereas #", "urllib.request import urlopen import yaml from charmhelpers.core import hookenv from", "role.value msi = _get_msi(request.vm_id) try: _azure('role', 'assignment', 'create', '--assignee-object-id', msi,", "ceil(hl) s = s[:headl] + ellipsis + s[-taill:] return s", "try: log('Forcing logout of Azure CLI') _azure('logout') except AzureError: pass", "stderr) stderr = re.sub(tenant_id, '<tenant-id>', stderr) # from None suppresses", "return AzureError(message) class AlreadyExistsAzureError(AzureError): \"\"\" Meta-error subclass of AzureError representing", "kv().set('charm.azure.vm-identities', vm_identities) log('Instance MSI is: {}', msi) def send_additional_metadata(request): \"\"\"", "Service Identity') result = _azure('vm', 'identity', 'assign', '--name', request.vm_name, '--resource-group',", "s[-taill:] return s def _get_tenant_id(subscription_id): \"\"\" Translate the subscription ID", "import b64decode from enum import Enum from math import ceil,", "MSI is: {}', msi) def send_additional_metadata(request): \"\"\" Get additional info", "return vm_identities.get(vm_id) def _get_role(role_name): \"\"\" Translate short role name into", "e.args[0]) stderr = re.sub(app_pass, '<app-pass>', stderr) stderr = re.sub(tenant_id, '<tenant-id>',", "the given tags. \"\"\" log('Tagging instance with: {}', request.instance_tags) _azure('vm',", "url = ('https://management.azure.com/subscriptions/' '{}?api-version=2018-03-01-01.6.1'.format(subscription_id)) try: urlopen(url) log_err('Error getting tenant ID:", "# When debugging hooks, for some reason HOME is set", "> max_len: hl = (max_len - len(ellipsis)) / 2 headl,", "it is under max_len. That is, shorten the string, inserting", "from base64 import b64decode from enum import Enum from math", "of Azure CLI') _azure('logout') except AzureError: pass try: log('Logging in", "certain `message`s. \"\"\" if 'already exists' in message: return AlreadyExistsAzureError(message)", "AzureError representing something already existing. \"\"\" pass def _elide(s, max_len,", "yaml from charmhelpers.core import hookenv from charmhelpers.core.unitdata import kv from", "about the requesting instance via the API that isn't available", "always the same # and the queries required to look", "charms.layer import status ENTITY_PREFIX = 'charm.azure' MODEL_UUID = os.environ['JUJU_MODEL_UUID'] MAX_ROLE_NAME_LEN", "kv().get('charm.azure.vm-identities', {}) return vm_identities.get(vm_id) def _get_role(role_name): \"\"\" Translate short role", "not match: log_err('Error getting tenant ID: unable to find in", "_assign_role(request, _get_role('vm-reader')) def enable_network_management(request): \"\"\" Enable network management for the", "import os import re import subprocess from base64 import b64decode", "{}', request.instance_tags) _azure('vm', 'update', '--name', request.vm_name, '--resource-group', request.resource_group, '--set', *['tags.{}={}'.format(tag,", "pass try: log('Logging in to Azure CLI') _azure('login', '--service-principal', '-u',", "available from the metadata server. \"\"\" res_grp = _azure('group', 'show',", "Enable object storage read-only access for the given application. \"\"\"", "role_data['Name'].format(sub_id) scope = role_data['AssignableScopes'][0].format(sub_id) role_data['Name'] = role_fullname role_data['AssignableScopes'][0] = scope", "the hook tool. Prefers the config so that it can", "message): \"\"\" Factory method to create either an instance of", "'4d97b98b-1d4f-4787-a291-c67834d212e7' SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10' DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314' OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1'", "roles have to be applied to a specific subscription ID,", "characters were to show that they've been removed. \"\"\" if", "# juju trust not available except subprocess.CalledProcessError as e: if", "# no creds provided status.blocked(no_creds_msg) return False def login_cli(creds_data): \"\"\"", "log('Forcing logout of Azure CLI') _azure('logout') except AzureError: pass try:", "where the removed characters were to show that they've been", "stderr) # from None suppresses the previous exception from the", "storage management for the given application. \"\"\" log('Enabling object store", ") def tag_instance(request): \"\"\" Tag the given instance with the", "'already exists' in message: return AlreadyExistsAzureError(message) return AzureError(message) class AlreadyExistsAzureError(AzureError):", "result = subprocess.run(['credential-get'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) creds = yaml.load(result.stdout.decode('utf8')) creds_data", "class or a meta-subclass for certain `message`s. \"\"\" if 'already", "cmd] cmd.extend(args) result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = result.stdout.decode('utf8').strip()", "= hookenv.config() # try to use Juju's trust feature try:", "limit. \"\"\" known_roles = kv().get('charm.azure.roles', {}) if role_name in known_roles:", "in e.headers: log_err('Error getting tenant ID: missing WWW-Authenticate header') return", "CLI') _azure('logout') except AzureError: pass try: log('Logging in to Azure", "tool. Prefers the config so that it can be overridden.", "not msi: log('Enabling Managed Service Identity') result = _azure('vm', 'identity',", "OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' # When debugging hooks, for some reason", "max_len, ellipsis='...'): \"\"\" Elide s in the middle to ensure", "message: return AlreadyExistsAzureError(message) return AzureError(message) class AlreadyExistsAzureError(AzureError): \"\"\" Meta-error subclass", "from urllib.error import HTTPError from urllib.request import urlopen import yaml", "same # and the queries required to look them up", "under max_len. That is, shorten the string, inserting an ellipsis", "if stdout: stdout = json.loads(stdout) return stdout def _get_msi(vm_id): \"\"\"", "0: raise AzureError.get(stderr) if return_stderr: return stderr if stdout: stdout", "creds_data['application-id'] app_pass = <PASSWORD>_<PASSWORD>['<PASSWORD>'] sub_id = creds_data['subscription-id'] tenant_id = _get_tenant_id(sub_id)", "\"\"\" Enable security management for the given application. \"\"\" log('Enabling", "= 128 class StandardRole(Enum): NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7' SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10'", "True except Exception: status.blocked('invalid value for credentials config') return False", "management') _assign_role(request, StandardRole.SECURITY_MANAGER) def enable_block_storage_management(request): \"\"\" Enable block storage (disk)", "getting tenant ID: did not get \"unauthorized\" response') return None", "re.sub(app_pass, '<app-pass>', stderr) stderr = re.sub(tenant_id, '<tenant-id>', stderr) # from", "import Enum from math import ceil, floor from pathlib import", "log('Logging in to Azure CLI') _azure('login', '--service-principal', '-u', app_id, '-p',", "\"\"\" pass # Internal helpers class AzureError(Exception): \"\"\" Exception class", "DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314' OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' #", "not available except subprocess.CalledProcessError as e: if 'permission denied' not", "for use in roles kv().set('charm.azure.sub-id', sub_id) except AzureError as e:", "an instance of this class or a meta-subclass for certain", "OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' # When debugging hooks,", "removed. \"\"\" if len(s) > max_len: hl = (max_len -", "set to /home/ubuntu, whereas # during normal hook execution, it's", "*['tags.{}={}'.format(tag, value) for tag, value in request.instance_tags.items()]) def enable_instance_inspection(request): \"\"\"", "read-only access for the given application. \"\"\" log('Enabling object storage", "'-p', app_pass, '-t', tenant_id) # cache the subscription ID for", "helpers class AzureError(Exception): \"\"\" Exception class representing an error returned", "not in e.stderr.decode('utf8'): raise no_creds_msg = 'missing credentials access; grant", "taill = floor(hl), ceil(hl) s = s[:headl] + ellipsis +", "def log_err(msg, *args): hookenv.log(msg.format(*args), hookenv.ERROR) def get_credentials(): \"\"\" Get the", "if return_stderr: return stderr if stdout: stdout = json.loads(stdout) return", "to use Juju's trust feature try: result = subprocess.run(['credential-get'], check=True,", "Get the Managed System Identity for the VM. \"\"\" vm_identities", "os.environ['JUJU_MODEL_UUID'] MAX_ROLE_NAME_LEN = 64 MAX_POLICY_NAME_LEN = 128 class StandardRole(Enum): NETWORK_MANAGER", "the subscription ID applies to the entire credential, so will", "not get \"unauthorized\" response') return None except HTTPError as e:", "with Juju, they're always the same # and the queries", "if 'already exists' not in e.args[0]: raise known_roles[role_name] = role_fullname", "authenticate the Azure CLI. \"\"\" app_id = creds_data['application-id'] app_pass =", "ellipsis + s[-taill:] return s def _get_tenant_id(subscription_id): \"\"\" Translate the", "subprocess.CalledProcessError as e: if 'permission denied' not in e.stderr.decode('utf8'): raise", "look them up are a PITA request.send_additional_metadata( resource_group_location=res_grp['location'], vnet_name='juju-internal-network', vnet_resource_group=request.resource_group,", "floor from pathlib import Path from urllib.error import HTTPError from", "= s[:headl] + ellipsis + s[-taill:] return s def _get_tenant_id(subscription_id):", "the WWW-Authenticate header in the error response. \"\"\" url =", "be overridden. \"\"\" no_creds_msg = 'missing credentials; set credentials config'", "\"\"\" log('Enabling object store management') _assign_role(request, StandardRole.OBJECT_STORE_MANAGER) def cleanup(): \"\"\"", "kv().get('charm.azure.sub-id') role_file = Path('files/roles/{}.json'.format(role_name)) role_data = json.loads(role_file.read_text()) role_fullname = role_data['Name'].format(sub_id)", "128 class StandardRole(Enum): NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7' SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10' DNS_MANAGER", "raise known_roles[role_name] = role_fullname return role_fullname def _assign_role(request, role): if", "'e3d13bf0-dd5a-482e-ba6b-9b8433878d10' DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314' OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe'", "config so that it can be overridden. \"\"\" no_creds_msg =", "up are a PITA request.send_additional_metadata( resource_group_location=res_grp['location'], vnet_name='juju-internal-network', vnet_resource_group=request.resource_group, subnet_name='juju-internal-subnet', security_group_name='juju-internal-nsg',", "login_cli(creds_data) return True except Exception: status.blocked('invalid value for credentials config')", "representing an error returned from the azure-cli tool. \"\"\" @classmethod", "return_stderr=False): \"\"\" Call the azure-cli tool. \"\"\" cmd = ['az',", "given application. \"\"\" log('Enabling object store management') _assign_role(request, StandardRole.OBJECT_STORE_MANAGER) def", "s = s[:headl] + ellipsis + s[-taill:] return s def", "math import ceil, floor from pathlib import Path from urllib.error", "cmd = ['az', cmd] cmd.extend(args) result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "given application. \"\"\" log('Enabling instance inspection') _assign_role(request, _get_role('vm-reader')) def enable_network_management(request):", "_azure(cmd, *args, return_stderr=False): \"\"\" Call the azure-cli tool. \"\"\" cmd", "*args): hookenv.log(msg.format(*args), hookenv.ERROR) def get_credentials(): \"\"\" Get the credentials from", "status ENTITY_PREFIX = 'charm.azure' MODEL_UUID = os.environ['JUJU_MODEL_UUID'] MAX_ROLE_NAME_LEN = 64", "tool. \"\"\" cmd = ['az', cmd] cmd.extend(args) result = subprocess.run(cmd,", "ID: unable to find in {}', www_auth) return None return", "log('Tagging instance with: {}', request.instance_tags) _azure('vm', 'update', '--name', request.vm_name, '--resource-group',", "credentials to authenticate the Azure CLI. \"\"\" app_id = creds_data['application-id']", "{}) if role_name in known_roles: return known_roles[role_name] sub_id = kv().get('charm.azure.sub-id')", "management') _assign_role(request, _get_role('disk-manager')) def enable_dns_management(request): \"\"\" Enable DNS management for", "@classmethod def get(cls, message): \"\"\" Factory method to create either", "class StandardRole(Enum): NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7' SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10' DNS_MANAGER =", "= <PASSWORD>_<PASSWORD>['<PASSWORD>'] sub_id = creds_data['subscription-id'] tenant_id = _get_tenant_id(sub_id) try: log('Forcing", "Factory method to create either an instance of this class", "access for the given application. \"\"\" log('Enabling instance inspection') _assign_role(request,", "\"unauthorized\" response') return None except HTTPError as e: if 'WWW-Authenticate'", "= 'missing credentials; set credentials config' config = hookenv.config() #", "credentials; set credentials config' config = hookenv.config() # try to", "credentials config') return False # no creds provided status.blocked(no_creds_msg) return", "and the queries required to look them up are a", "if role_name in known_roles: return known_roles[role_name] sub_id = kv().get('charm.azure.sub-id') role_file", "= json.loads(stdout) return stdout def _get_msi(vm_id): \"\"\" Get the Managed", "yaml.load(result.stdout.decode('utf8')) creds_data = creds['credential']['attributes'] login_cli(creds_data) return True except FileNotFoundError: pass", "= Path('files/roles/{}.json'.format(role_name)) role_data = json.loads(role_file.read_text()) role_fullname = role_data['Name'].format(sub_id) scope =", "= 'charm.azure' MODEL_UUID = os.environ['JUJU_MODEL_UUID'] MAX_ROLE_NAME_LEN = 64 MAX_POLICY_NAME_LEN =", "return None return match.group(1) def _azure(cmd, *args, return_stderr=False): \"\"\" Call", "because with Juju, they're always the same # and the", "The custom roles have to be applied to a specific", "making an unauthorized request to the API and extracting the", "a meta-subclass for certain `message`s. \"\"\" if 'already exists' in", "msi: log('Enabling Managed Service Identity') result = _azure('vm', 'identity', 'assign',", "DNS management for the given application. \"\"\" log('Enabling DNS management')", "value in request.instance_tags.items()]) def enable_instance_inspection(request): \"\"\" Enable instance inspection access", "PITA request.send_additional_metadata( resource_group_location=res_grp['location'], vnet_name='juju-internal-network', vnet_resource_group=request.resource_group, subnet_name='juju-internal-subnet', security_group_name='juju-internal-nsg', ) def tag_instance(request):", "if config['credentials']: try: creds_data = b64decode(config['credentials']).decode('utf8') login_cli(creds_data) return True except", "ceil, floor from pathlib import Path from urllib.error import HTTPError", "to /home/ubuntu, whereas # during normal hook execution, it's /root.", "the subscription ID into a tenant ID by making an", "existing. \"\"\" pass def _elide(s, max_len, ellipsis='...'): \"\"\" Elide s", "the given application. \"\"\" log('Enabling object store management') _assign_role(request, StandardRole.OBJECT_STORE_MANAGER)", "_azure('role', 'definition', 'create', '--role-definition', json.dumps(role_data)) except AzureError as e: if", "whereas # during normal hook execution, it's /root. Set it", "available except subprocess.CalledProcessError as e: if 'permission denied' not in", "the given application. \"\"\" log('Enabling DNS management') _assign_role(request, StandardRole.DNS_MANAGER) def", "{}', msi) def send_additional_metadata(request): \"\"\" Get additional info about the", "Juju's trust feature try: result = subprocess.run(['credential-get'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "vm_identities = kv().get('charm.azure.vm-identities', {}) return vm_identities.get(vm_id) def _get_role(role_name): \"\"\" Translate", "getting tenant ID: unable to find in {}', www_auth) return", "except FileNotFoundError: pass # juju trust not available except subprocess.CalledProcessError", "'--name', request.vm_name, '--resource-group', request.resource_group) vm_identities = kv().get('charm.azure.vm-identities', {}) msi =", "hookenv from charmhelpers.core.unitdata import kv from charms.layer import status ENTITY_PREFIX", "grant with: juju trust' # try credentials config if config['credentials']:", "as e: if 'permission denied' not in e.stderr.decode('utf8'): raise no_creds_msg", "log_err('Error getting tenant ID: missing WWW-Authenticate header') return None www_auth", "danger in hitting the 2k custom role limit. \"\"\" known_roles", "VM. \"\"\" vm_identities = kv().get('charm.azure.vm-identities', {}) return vm_identities.get(vm_id) def _get_role(role_name):", "False def login_cli(creds_data): \"\"\" Use the credentials to authenticate the", "Internal helpers class AzureError(Exception): \"\"\" Exception class representing an error", "os.environ['HOME'] = '/root' def log(msg, *args): hookenv.log(msg.format(*args), hookenv.INFO) def log_err(msg,", "AzureError(message) class AlreadyExistsAzureError(AzureError): \"\"\" Meta-error subclass of AzureError representing something", "role_name in known_roles: return known_roles[role_name] sub_id = kv().get('charm.azure.sub-id') role_file =", "ENTITY_PREFIX = 'charm.azure' MODEL_UUID = os.environ['JUJU_MODEL_UUID'] MAX_ROLE_NAME_LEN = 64 MAX_POLICY_NAME_LEN", "request.instance_tags) _azure('vm', 'update', '--name', request.vm_name, '--resource-group', request.resource_group, '--set', *['tags.{}={}'.format(tag, value)", "response. \"\"\" url = ('https://management.azure.com/subscriptions/' '{}?api-version=2018-03-01-01.6.1'.format(subscription_id)) try: urlopen(url) log_err('Error getting", "much danger in hitting the 2k custom role limit. \"\"\"", "get_credentials(): \"\"\" Get the credentials from either the config or", "credentials from either the config or the hook tool. Prefers", "except subprocess.CalledProcessError as e: if 'permission denied' not in e.stderr.decode('utf8'):", "so that it can be overridden. \"\"\" no_creds_msg = 'missing", "_get_role('vm-reader')) def enable_network_management(request): \"\"\" Enable network management for the given", "config or the hook tool. Prefers the config so that", "kv().get('charm.azure.vm-identities', {}) msi = vm_identities[request.vm_id] = result['systemAssignedIdentity'] kv().set('charm.azure.vm-identities', vm_identities) log('Instance", "= result.stderr.decode('utf8').strip() if result.returncode != 0: raise AzureError.get(stderr) if return_stderr:", "return known_roles[role_name] sub_id = kv().get('charm.azure.sub-id') role_file = Path('files/roles/{}.json'.format(role_name)) role_data =", "network management for the given application. \"\"\" log('Enabling network management')", "from either the config or the hook tool. Prefers the", "Enable block storage (disk) management for the given application. \"\"\"", "block storage (disk) management for the given application. \"\"\" log('Enabling", "known_roles[role_name] sub_id = kv().get('charm.azure.sub-id') role_file = Path('files/roles/{}.json'.format(role_name)) role_data = json.loads(role_file.read_text())", "'-u', app_id, '-p', app_pass, '-t', tenant_id) # cache the subscription", "# redact the credential info from the exception message stderr", "re import subprocess from base64 import b64decode from enum import", "management') _assign_role(request, StandardRole.OBJECT_STORE_MANAGER) def cleanup(): \"\"\" Perform cleanup. \"\"\" pass", "kv().set('charm.azure.sub-id', sub_id) except AzureError as e: # redact the credential", "def enable_block_storage_management(request): \"\"\" Enable block storage (disk) management for the", "That is, shorten the string, inserting an ellipsis where the", "the same # and the queries required to look them", "# and the queries required to look them up are", "'show', '--name', request.resource_group) # hard-code most of these because with", "= yaml.load(result.stdout.decode('utf8')) creds_data = creds['credential']['attributes'] login_cli(creds_data) return True except FileNotFoundError:", "= '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' # When debugging hooks, for", "the given application. \"\"\" log('Enabling network management') _assign_role(request, StandardRole.NETWORK_MANAGER) def", "'--resource-group', request.resource_group, '--set', *['tags.{}={}'.format(tag, value) for tag, value in request.instance_tags.items()])", "raise AzureError.get(stderr) if return_stderr: return stderr if stdout: stdout =", "from the exception message stderr = re.sub(app_id, '<app-id>', e.args[0]) stderr", "stderr = re.sub(app_id, '<app-id>', e.args[0]) stderr = re.sub(app_pass, '<app-pass>', stderr)", "# during normal hook execution, it's /root. Set it here", "an ellipsis where the removed characters were to show that", "re.search(r'authorization_uri=\"[^\"]*/([^/\"]*)\"', www_auth) if not match: log_err('Error getting tenant ID: unable", "from pathlib import Path from urllib.error import HTTPError from urllib.request", "= kv().get('charm.azure.roles', {}) if role_name in known_roles: return known_roles[role_name] sub_id", "result.stdout.decode('utf8').strip() stderr = result.stderr.decode('utf8').strip() if result.returncode != 0: raise AzureError.get(stderr)", "role_fullname = role_data['Name'].format(sub_id) scope = role_data['AssignableScopes'][0].format(sub_id) role_data['Name'] = role_fullname role_data['AssignableScopes'][0]", "s in the middle to ensure it is under max_len.", "of AzureError representing something already existing. \"\"\" pass def _elide(s,", "stack trace raise AzureError(stderr) from None def ensure_msi(request): msi =", "storage read') _assign_role(request, StandardRole.OBJECT_STORE_READER) def enable_object_storage_management(request): \"\"\" Enable object storage", "\"\"\" Get the credentials from either the config or the", "in request.instance_tags.items()]) def enable_instance_inspection(request): \"\"\" Enable instance inspection access for", "\"\"\" vm_identities = kv().get('charm.azure.vm-identities', {}) return vm_identities.get(vm_id) def _get_role(role_name): \"\"\"", "with: juju trust' # try credentials config if config['credentials']: try:", "\"\"\" log('Enabling block storage management') _assign_role(request, _get_role('disk-manager')) def enable_dns_management(request): \"\"\"", "management for the given application. \"\"\" log('Enabling block storage management')", "of this class or a meta-subclass for certain `message`s. \"\"\"", "= b64decode(config['credentials']).decode('utf8') login_cli(creds_data) return True except Exception: status.blocked('invalid value for", "the error response. \"\"\" url = ('https://management.azure.com/subscriptions/' '{}?api-version=2018-03-01-01.6.1'.format(subscription_id)) try: urlopen(url)", "return stderr if stdout: stdout = json.loads(stdout) return stdout def", "inserting an ellipsis where the removed characters were to show", "management') _assign_role(request, StandardRole.NETWORK_MANAGER) def enable_security_management(request): \"\"\" Enable security management for", "class AlreadyExistsAzureError(AzureError): \"\"\" Meta-error subclass of AzureError representing something already", "for certain `message`s. \"\"\" if 'already exists' in message: return", "e: # redact the credential info from the exception message", "StandardRole.OBJECT_STORE_READER) def enable_object_storage_management(request): \"\"\" Enable object storage management for the", "+ s[-taill:] return s def _get_tenant_id(subscription_id): \"\"\" Translate the subscription", "it here to be consistent. os.environ['HOME'] = '/root' def log(msg,", "so there's not much danger in hitting the 2k custom", "System Identity for the VM. \"\"\" vm_identities = kv().get('charm.azure.vm-identities', {})", "os import re import subprocess from base64 import b64decode from", "+ ellipsis + s[-taill:] return s def _get_tenant_id(subscription_id): \"\"\" Translate", "the tenant ID from the WWW-Authenticate header in the error", "_get_msi(vm_id): \"\"\" Get the Managed System Identity for the VM.", "= _azure('vm', 'identity', 'assign', '--name', request.vm_name, '--resource-group', request.resource_group) vm_identities =", "Azure CLI') _azure('logout') except AzureError: pass try: log('Logging in to", "creds = yaml.load(result.stdout.decode('utf8')) creds_data = creds['credential']['attributes'] login_cli(creds_data) return True except", "/root. Set it here to be consistent. os.environ['HOME'] = '/root'", "is loaded. The custom roles have to be applied to", "'2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' # When debugging hooks, for some", "creds_data = creds['credential']['attributes'] login_cli(creds_data) return True except FileNotFoundError: pass #", "these because with Juju, they're always the same # and", "create either an instance of this class or a meta-subclass", "instance via the API that isn't available from the metadata", "Enable security management for the given application. \"\"\" log('Enabling security", "('https://management.azure.com/subscriptions/' '{}?api-version=2018-03-01-01.6.1'.format(subscription_id)) try: urlopen(url) log_err('Error getting tenant ID: did not", "'identity', 'assign', '--name', request.vm_name, '--resource-group', request.resource_group) vm_identities = kv().get('charm.azure.vm-identities', {})", "import ceil, floor from pathlib import Path from urllib.error import", "into a tenant ID by making an unauthorized request to", "config if config['credentials']: try: creds_data = b64decode(config['credentials']).decode('utf8') login_cli(creds_data) return True", "import Path from urllib.error import HTTPError from urllib.request import urlopen", "import kv from charms.layer import status ENTITY_PREFIX = 'charm.azure' MODEL_UUID", "in roles kv().set('charm.azure.sub-id', sub_id) except AzureError as e: # redact", "be reused, so there's not much danger in hitting the", "from the stack trace raise AzureError(stderr) from None def ensure_msi(request):", "_assign_role(request, StandardRole.OBJECT_STORE_READER) def enable_object_storage_management(request): \"\"\" Enable object storage management for", "application. \"\"\" log('Enabling network management') _assign_role(request, StandardRole.NETWORK_MANAGER) def enable_security_management(request): \"\"\"", "(disk) management for the given application. \"\"\" log('Enabling block storage", "the queries required to look them up are a PITA", "tenant_id) # cache the subscription ID for use in roles", "short role name into a full role name and ensure", "not in e.args[0]: raise known_roles[role_name] = role_fullname return role_fullname def", "'--role-definition', json.dumps(role_data)) except AzureError as e: if 'already exists' not", "app_pass, '-t', tenant_id) # cache the subscription ID for use", "= 'missing credentials access; grant with: juju trust' # try", "request.vm_name, '--resource-group', request.resource_group, '--set', *['tags.{}={}'.format(tag, value) for tag, value in", "Tag the given instance with the given tags. \"\"\" log('Tagging", "'missing credentials access; grant with: juju trust' # try credentials", "storage read-only access for the given application. \"\"\" log('Enabling object", "to authenticate the Azure CLI. \"\"\" app_id = creds_data['application-id'] app_pass", "a tenant ID by making an unauthorized request to the", "= _get_tenant_id(sub_id) try: log('Forcing logout of Azure CLI') _azure('logout') except", "be consistent. os.environ['HOME'] = '/root' def log(msg, *args): hookenv.log(msg.format(*args), hookenv.INFO)", "def send_additional_metadata(request): \"\"\" Get additional info about the requesting instance", "almost certainly be reused, so there's not much danger in", "json import os import re import subprocess from base64 import", "they're always the same # and the queries required to", "log('Enabling object store management') _assign_role(request, StandardRole.OBJECT_STORE_MANAGER) def cleanup(): \"\"\" Perform", "b64decode(config['credentials']).decode('utf8') login_cli(creds_data) return True except Exception: status.blocked('invalid value for credentials", "in message: return AlreadyExistsAzureError(message) return AzureError(message) class AlreadyExistsAzureError(AzureError): \"\"\" Meta-error", "vm_identities) log('Instance MSI is: {}', msi) def send_additional_metadata(request): \"\"\" Get", "enable_object_storage_management(request): \"\"\" Enable object storage management for the given application.", "cmd.extend(args) result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = result.stdout.decode('utf8').strip() stderr", "storage management') _assign_role(request, _get_role('disk-manager')) def enable_dns_management(request): \"\"\" Enable DNS management", "subscription ID, but the subscription ID applies to the entire", "in hitting the 2k custom role limit. \"\"\" known_roles =", "Elide s in the middle to ensure it is under", "the metadata server. \"\"\" res_grp = _azure('group', 'show', '--name', request.resource_group)", "= '4d97b98b-1d4f-4787-a291-c67834d212e7' SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10' DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314' OBJECT_STORE_READER =", "meta-subclass for certain `message`s. \"\"\" if 'already exists' in message:", "if not match: log_err('Error getting tenant ID: unable to find", "given application. \"\"\" log('Enabling object storage read') _assign_role(request, StandardRole.OBJECT_STORE_READER) def", "ID for use in roles kv().set('charm.azure.sub-id', sub_id) except AzureError as", "from math import ceil, floor from pathlib import Path from", "charmhelpers.core.unitdata import kv from charms.layer import status ENTITY_PREFIX = 'charm.azure'", "= result['systemAssignedIdentity'] kv().set('charm.azure.vm-identities', vm_identities) log('Instance MSI is: {}', msi) def", "match: log_err('Error getting tenant ID: unable to find in {}',", "no_creds_msg = 'missing credentials; set credentials config' config = hookenv.config()", "string, inserting an ellipsis where the removed characters were to", "max_len: hl = (max_len - len(ellipsis)) / 2 headl, taill", "log('Instance MSI is: {}', msi) def send_additional_metadata(request): \"\"\" Get additional", "if 'already exists' in message: return AlreadyExistsAzureError(message) return AzureError(message) class", "ID into a tenant ID by making an unauthorized request", "log('Enabling instance inspection') _assign_role(request, _get_role('vm-reader')) def enable_network_management(request): \"\"\" Enable network", "CLI. \"\"\" app_id = creds_data['application-id'] app_pass = <PASSWORD>_<PASSWORD>['<PASSWORD>'] sub_id =", "instance with the given tags. \"\"\" log('Tagging instance with: {}',", "result.stderr.decode('utf8').strip() if result.returncode != 0: raise AzureError.get(stderr) if return_stderr: return", "stdout = json.loads(stdout) return stdout def _get_msi(vm_id): \"\"\" Get the", "custom roles have to be applied to a specific subscription", "are a PITA request.send_additional_metadata( resource_group_location=res_grp['location'], vnet_name='juju-internal-network', vnet_resource_group=request.resource_group, subnet_name='juju-internal-subnet', security_group_name='juju-internal-nsg', )", "- len(ellipsis)) / 2 headl, taill = floor(hl), ceil(hl) s", "stderr=subprocess.PIPE) stdout = result.stdout.decode('utf8').strip() stderr = result.stderr.decode('utf8').strip() if result.returncode !=", "stderr if stdout: stdout = json.loads(stdout) return stdout def _get_msi(vm_id):", "return None except HTTPError as e: if 'WWW-Authenticate' not in", "re.sub(app_id, '<app-id>', e.args[0]) stderr = re.sub(app_pass, '<app-pass>', stderr) stderr =", "return True except Exception: status.blocked('invalid value for credentials config') return", "error response. \"\"\" url = ('https://management.azure.com/subscriptions/' '{}?api-version=2018-03-01-01.6.1'.format(subscription_id)) try: urlopen(url) log_err('Error", "stderr = re.sub(app_pass, '<app-pass>', stderr) stderr = re.sub(tenant_id, '<tenant-id>', stderr)" ]
[ "if usrpass[username]==password: return 1 else: print(\"Invalid Password\") return -1 def", "print(\"Bind with host at port number : \"+str(port)) s.listen(10) print(\"Socket", "res==1: print(\"Valid Password!\") conn.send(bytes(\"1\",\"utf-8\")) conn.send(bytes(\"1\",\"utf-8\")) else: conn.send(bytes(\"-1\",\"utf-8\")) conn.send(bytes(\"-1\",\"utf-8\")) # def", ": \"+str(port)) s.listen(10) print(\"Socket is listening!!\") except socket.error as msg:", "csv.reader(host_file, delimiter=\",\") for row in csv_hfile: iport.append(row[1]) port=int(iport[4]) def socketbind():", "socketaccept(): conn,add=s.accept() print(\"connection is established with IP : \"+str(add[0])+\" and", "host at port number : \"+str(port)) s.listen(10) print(\"Socket is listening!!\")", "usrpass={} def openfile(): filename=\"login_credentials.csv\" with open(filename,'r')as csvfile: csv_file = csv.reader(csvfile,", "\"+str(add[0])+\" and Port Number : \"+str(add[1])) conn.send(bytes(\"1\",\"utf-8\")) conversation(conn) conn.close() def", "def socketaccept(): conn,add=s.accept() print(\"connection is established with IP : \"+str(add[0])+\"", "openfile() socketbind() socketaccept() # count=0 # while (count<6): # new_thread=threading.Thread(target", "csv.reader(csvfile, delimiter=\",\") for col in csv_file: usrpass[col[0]]=col[1] usrpass.pop(\"Username\") #print(usrpass) ihost=socket.gethostname()", "csvfile: csv_file = csv.reader(csvfile, delimiter=\",\") for col in csv_file: usrpass[col[0]]=col[1]", "count=0 # while (count<6): # new_thread=threading.Thread(target =socketaccept) # new_thread.start() #", "# return 1 # else: # print(\"Invalid Username\") # return", "traceback import threading s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) usrpass={} def openfile(): filename=\"login_credentials.csv\" with open(filename,'r')as", "s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) usrpass={} def openfile(): filename=\"login_credentials.csv\" with open(filename,'r')as csvfile: csv_file =", "s.listen(10) print(\"Socket is listening!!\") except socket.error as msg: print(\"Error in", "delimiter=\",\") for row in csv_hfile: iport.append(row[1]) port=int(iport[4]) def socketbind(): try:", "def checkusr(username): # if username in usrpass: # return 1", "host_file: csv_hfile = csv.reader(host_file, delimiter=\",\") for row in csv_hfile: iport.append(row[1])", "established with IP : \"+str(add[0])+\" and Port Number : \"+str(add[1]))", "print(\"Invalid Password\") return -1 def main(): openfile() socketbind() socketaccept() #", "Password\") return -1 def main(): openfile() socketbind() socketaccept() # count=0", "filename=\"login_credentials.csv\" with open(filename,'r')as csvfile: csv_file = csv.reader(csvfile, delimiter=\",\") for col", "csv import traceback import threading s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) usrpass={} def openfile(): filename=\"login_credentials.csv\"", "openfile(): filename=\"login_credentials.csv\" with open(filename,'r')as csvfile: csv_file = csv.reader(csvfile, delimiter=\",\") for", "is listening!!\") except socket.error as msg: print(\"Error in Binding: \"+", "threading s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) usrpass={} def openfile(): filename=\"login_credentials.csv\" with open(filename,'r')as csvfile: csv_file", "# if username in usrpass: # return 1 # else:", "conn.send(bytes(\"1\",\"utf-8\")) else: conn.send(bytes(\"-1\",\"utf-8\")) conn.send(bytes(\"-1\",\"utf-8\")) # def checkusr(username): # if username", "with host at port number : \"+str(port)) s.listen(10) print(\"Socket is", "hostfile=\"host.csv\" with open(hostfile,'r')as host_file: csv_hfile = csv.reader(host_file, delimiter=\",\") for row", "usrpass[col[0]]=col[1] usrpass.pop(\"Username\") #print(usrpass) ihost=socket.gethostname() host=socket.gethostbyname(ihost) ihost=socket.gethostname() host=socket.gethostbyname(ihost) iport=[] hostfile=\"host.csv\" with", "and Port Number : \"+str(add[1])) conn.send(bytes(\"1\",\"utf-8\")) conversation(conn) conn.close() def conversation(conn):", "conversation(conn) conn.close() def conversation(conn): while True: username=str(conn.recv(1024),\"utf-8\") password=str(conn.recv(1024),\"utf-8\") res=checkpass(username,password) if", "open(hostfile,'r')as host_file: csv_hfile = csv.reader(host_file, delimiter=\",\") for row in csv_hfile:", "-1 def checkpass(username,password): if usrpass[username]==password: return 1 else: print(\"Invalid Password\")", "main(): openfile() socketbind() socketaccept() # count=0 # while (count<6): #", "in csv_hfile: iport.append(row[1]) port=int(iport[4]) def socketbind(): try: s.bind(('',port)) print(\"Bind with", "# def checkusr(username): # if username in usrpass: # return", "in Binding: \"+ str(msg)+\"\\n Retrying....\") socketbind() def socketaccept(): conn,add=s.accept() print(\"connection", "return 1 # else: # print(\"Invalid Username\") # return -1", "\"+ str(msg)+\"\\n Retrying....\") socketbind() def socketaccept(): conn,add=s.accept() print(\"connection is established", "listening!!\") except socket.error as msg: print(\"Error in Binding: \"+ str(msg)+\"\\n", ": \"+str(add[1])) conn.send(bytes(\"1\",\"utf-8\")) conversation(conn) conn.close() def conversation(conn): while True: username=str(conn.recv(1024),\"utf-8\")", "conn.send(bytes(\"1\",\"utf-8\")) conversation(conn) conn.close() def conversation(conn): while True: username=str(conn.recv(1024),\"utf-8\") password=str(conn.recv(1024),\"utf-8\") res=checkpass(username,password)", "is established with IP : \"+str(add[0])+\" and Port Number :", "checkpass(username,password): if usrpass[username]==password: return 1 else: print(\"Invalid Password\") return -1", "import socket import csv import traceback import threading s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) usrpass={}", "conn.send(bytes(\"-1\",\"utf-8\")) # def checkusr(username): # if username in usrpass: #", "def socketbind(): try: s.bind(('',port)) print(\"Bind with host at port number", "socketbind(): try: s.bind(('',port)) print(\"Bind with host at port number :", "number : \"+str(port)) s.listen(10) print(\"Socket is listening!!\") except socket.error as", "row in csv_hfile: iport.append(row[1]) port=int(iport[4]) def socketbind(): try: s.bind(('',port)) print(\"Bind", "print(\"Socket is listening!!\") except socket.error as msg: print(\"Error in Binding:", "password=str(conn.recv(1024),\"utf-8\") res=checkpass(username,password) if res==1: print(\"Valid Password!\") conn.send(bytes(\"1\",\"utf-8\")) conn.send(bytes(\"1\",\"utf-8\")) else: conn.send(bytes(\"-1\",\"utf-8\"))", "host=socket.gethostbyname(ihost) iport=[] hostfile=\"host.csv\" with open(hostfile,'r')as host_file: csv_hfile = csv.reader(host_file, delimiter=\",\")", "print(\"Valid Password!\") conn.send(bytes(\"1\",\"utf-8\")) conn.send(bytes(\"1\",\"utf-8\")) else: conn.send(bytes(\"-1\",\"utf-8\")) conn.send(bytes(\"-1\",\"utf-8\")) # def checkusr(username):", "checkusr(username): # if username in usrpass: # return 1 #", "Password!\") conn.send(bytes(\"1\",\"utf-8\")) conn.send(bytes(\"1\",\"utf-8\")) else: conn.send(bytes(\"-1\",\"utf-8\")) conn.send(bytes(\"-1\",\"utf-8\")) # def checkusr(username): #", "-1 def main(): openfile() socketbind() socketaccept() # count=0 # while", "with open(hostfile,'r')as host_file: csv_hfile = csv.reader(host_file, delimiter=\",\") for row in", "with IP : \"+str(add[0])+\" and Port Number : \"+str(add[1])) conn.send(bytes(\"1\",\"utf-8\"))", "port=int(iport[4]) def socketbind(): try: s.bind(('',port)) print(\"Bind with host at port", "IP : \"+str(add[0])+\" and Port Number : \"+str(add[1])) conn.send(bytes(\"1\",\"utf-8\")) conversation(conn)", "open(filename,'r')as csvfile: csv_file = csv.reader(csvfile, delimiter=\",\") for col in csv_file:", "Number : \"+str(add[1])) conn.send(bytes(\"1\",\"utf-8\")) conversation(conn) conn.close() def conversation(conn): while True:", "host=socket.gethostbyname(ihost) ihost=socket.gethostname() host=socket.gethostbyname(ihost) iport=[] hostfile=\"host.csv\" with open(hostfile,'r')as host_file: csv_hfile =", "# return -1 def checkpass(username,password): if usrpass[username]==password: return 1 else:", "#print(usrpass) ihost=socket.gethostname() host=socket.gethostbyname(ihost) ihost=socket.gethostname() host=socket.gethostbyname(ihost) iport=[] hostfile=\"host.csv\" with open(hostfile,'r')as host_file:", "port number : \"+str(port)) s.listen(10) print(\"Socket is listening!!\") except socket.error", "for row in csv_hfile: iport.append(row[1]) port=int(iport[4]) def socketbind(): try: s.bind(('',port))", "in csv_file: usrpass[col[0]]=col[1] usrpass.pop(\"Username\") #print(usrpass) ihost=socket.gethostname() host=socket.gethostbyname(ihost) ihost=socket.gethostname() host=socket.gethostbyname(ihost) iport=[]", "csv_file = csv.reader(csvfile, delimiter=\",\") for col in csv_file: usrpass[col[0]]=col[1] usrpass.pop(\"Username\")", "at port number : \"+str(port)) s.listen(10) print(\"Socket is listening!!\") except", "import csv import traceback import threading s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) usrpass={} def openfile():", "csv_hfile: iport.append(row[1]) port=int(iport[4]) def socketbind(): try: s.bind(('',port)) print(\"Bind with host", "ihost=socket.gethostname() host=socket.gethostbyname(ihost) ihost=socket.gethostname() host=socket.gethostbyname(ihost) iport=[] hostfile=\"host.csv\" with open(hostfile,'r')as host_file: csv_hfile", "username=str(conn.recv(1024),\"utf-8\") password=str(conn.recv(1024),\"utf-8\") res=checkpass(username,password) if res==1: print(\"Valid Password!\") conn.send(bytes(\"1\",\"utf-8\")) conn.send(bytes(\"1\",\"utf-8\")) else:", "if res==1: print(\"Valid Password!\") conn.send(bytes(\"1\",\"utf-8\")) conn.send(bytes(\"1\",\"utf-8\")) else: conn.send(bytes(\"-1\",\"utf-8\")) conn.send(bytes(\"-1\",\"utf-8\")) #", "s.bind(('',port)) print(\"Bind with host at port number : \"+str(port)) s.listen(10)", "Retrying....\") socketbind() def socketaccept(): conn,add=s.accept() print(\"connection is established with IP", "res=checkpass(username,password) if res==1: print(\"Valid Password!\") conn.send(bytes(\"1\",\"utf-8\")) conn.send(bytes(\"1\",\"utf-8\")) else: conn.send(bytes(\"-1\",\"utf-8\")) conn.send(bytes(\"-1\",\"utf-8\"))", "in usrpass: # return 1 # else: # print(\"Invalid Username\")", "# print(\"Invalid Username\") # return -1 def checkpass(username,password): if usrpass[username]==password:", "def main(): openfile() socketbind() socketaccept() # count=0 # while (count<6):", "1 else: print(\"Invalid Password\") return -1 def main(): openfile() socketbind()", "usrpass: # return 1 # else: # print(\"Invalid Username\") #", "str(msg)+\"\\n Retrying....\") socketbind() def socketaccept(): conn,add=s.accept() print(\"connection is established with", "= csv.reader(csvfile, delimiter=\",\") for col in csv_file: usrpass[col[0]]=col[1] usrpass.pop(\"Username\") #print(usrpass)", "# count=0 # while (count<6): # new_thread=threading.Thread(target =socketaccept) # new_thread.start()", "conversation(conn): while True: username=str(conn.recv(1024),\"utf-8\") password=str(conn.recv(1024),\"utf-8\") res=checkpass(username,password) if res==1: print(\"Valid Password!\")", "# else: # print(\"Invalid Username\") # return -1 def checkpass(username,password):", "with open(filename,'r')as csvfile: csv_file = csv.reader(csvfile, delimiter=\",\") for col in", "print(\"Error in Binding: \"+ str(msg)+\"\\n Retrying....\") socketbind() def socketaccept(): conn,add=s.accept()", "return -1 def checkpass(username,password): if usrpass[username]==password: return 1 else: print(\"Invalid", "socketbind() socketaccept() # count=0 # while (count<6): # new_thread=threading.Thread(target =socketaccept)", "socket import csv import traceback import threading s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) usrpass={} def", "try: s.bind(('',port)) print(\"Bind with host at port number : \"+str(port))", "iport.append(row[1]) port=int(iport[4]) def socketbind(): try: s.bind(('',port)) print(\"Bind with host at", "conn.close() def conversation(conn): while True: username=str(conn.recv(1024),\"utf-8\") password=str(conn.recv(1024),\"utf-8\") res=checkpass(username,password) if res==1:", "socketaccept() # count=0 # while (count<6): # new_thread=threading.Thread(target =socketaccept) #", "def openfile(): filename=\"login_credentials.csv\" with open(filename,'r')as csvfile: csv_file = csv.reader(csvfile, delimiter=\",\")", "if username in usrpass: # return 1 # else: #", "import threading s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) usrpass={} def openfile(): filename=\"login_credentials.csv\" with open(filename,'r')as csvfile:", "def conversation(conn): while True: username=str(conn.recv(1024),\"utf-8\") password=str(conn.recv(1024),\"utf-8\") res=checkpass(username,password) if res==1: print(\"Valid", "\"+str(add[1])) conn.send(bytes(\"1\",\"utf-8\")) conversation(conn) conn.close() def conversation(conn): while True: username=str(conn.recv(1024),\"utf-8\") password=str(conn.recv(1024),\"utf-8\")", "conn,add=s.accept() print(\"connection is established with IP : \"+str(add[0])+\" and Port", "csv_file: usrpass[col[0]]=col[1] usrpass.pop(\"Username\") #print(usrpass) ihost=socket.gethostname() host=socket.gethostbyname(ihost) ihost=socket.gethostname() host=socket.gethostbyname(ihost) iport=[] hostfile=\"host.csv\"", "Binding: \"+ str(msg)+\"\\n Retrying....\") socketbind() def socketaccept(): conn,add=s.accept() print(\"connection is", "except socket.error as msg: print(\"Error in Binding: \"+ str(msg)+\"\\n Retrying....\")", "print(\"connection is established with IP : \"+str(add[0])+\" and Port Number", "= csv.reader(host_file, delimiter=\",\") for row in csv_hfile: iport.append(row[1]) port=int(iport[4]) def", "while True: username=str(conn.recv(1024),\"utf-8\") password=str(conn.recv(1024),\"utf-8\") res=checkpass(username,password) if res==1: print(\"Valid Password!\") conn.send(bytes(\"1\",\"utf-8\"))", "for col in csv_file: usrpass[col[0]]=col[1] usrpass.pop(\"Username\") #print(usrpass) ihost=socket.gethostname() host=socket.gethostbyname(ihost) ihost=socket.gethostname()", "Port Number : \"+str(add[1])) conn.send(bytes(\"1\",\"utf-8\")) conversation(conn) conn.close() def conversation(conn): while", "return 1 else: print(\"Invalid Password\") return -1 def main(): openfile()", "conn.send(bytes(\"1\",\"utf-8\")) conn.send(bytes(\"1\",\"utf-8\")) else: conn.send(bytes(\"-1\",\"utf-8\")) conn.send(bytes(\"-1\",\"utf-8\")) # def checkusr(username): # if", "return -1 def main(): openfile() socketbind() socketaccept() # count=0 #", "socket.error as msg: print(\"Error in Binding: \"+ str(msg)+\"\\n Retrying....\") socketbind()", "Username\") # return -1 def checkpass(username,password): if usrpass[username]==password: return 1", "# while (count<6): # new_thread=threading.Thread(target =socketaccept) # new_thread.start() # count=count+1", "import traceback import threading s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) usrpass={} def openfile(): filename=\"login_credentials.csv\" with", "1 # else: # print(\"Invalid Username\") # return -1 def", "\"+str(port)) s.listen(10) print(\"Socket is listening!!\") except socket.error as msg: print(\"Error", "socketbind() def socketaccept(): conn,add=s.accept() print(\"connection is established with IP :", "else: print(\"Invalid Password\") return -1 def main(): openfile() socketbind() socketaccept()", "usrpass.pop(\"Username\") #print(usrpass) ihost=socket.gethostname() host=socket.gethostbyname(ihost) ihost=socket.gethostname() host=socket.gethostbyname(ihost) iport=[] hostfile=\"host.csv\" with open(hostfile,'r')as", "print(\"Invalid Username\") # return -1 def checkpass(username,password): if usrpass[username]==password: return", "def checkpass(username,password): if usrpass[username]==password: return 1 else: print(\"Invalid Password\") return", "while (count<6): # new_thread=threading.Thread(target =socketaccept) # new_thread.start() # count=count+1 main()", "conn.send(bytes(\"-1\",\"utf-8\")) conn.send(bytes(\"-1\",\"utf-8\")) # def checkusr(username): # if username in usrpass:", "else: conn.send(bytes(\"-1\",\"utf-8\")) conn.send(bytes(\"-1\",\"utf-8\")) # def checkusr(username): # if username in", "csv_hfile = csv.reader(host_file, delimiter=\",\") for row in csv_hfile: iport.append(row[1]) port=int(iport[4])", "else: # print(\"Invalid Username\") # return -1 def checkpass(username,password): if", "msg: print(\"Error in Binding: \"+ str(msg)+\"\\n Retrying....\") socketbind() def socketaccept():", ": \"+str(add[0])+\" and Port Number : \"+str(add[1])) conn.send(bytes(\"1\",\"utf-8\")) conversation(conn) conn.close()", "ihost=socket.gethostname() host=socket.gethostbyname(ihost) iport=[] hostfile=\"host.csv\" with open(hostfile,'r')as host_file: csv_hfile = csv.reader(host_file,", "as msg: print(\"Error in Binding: \"+ str(msg)+\"\\n Retrying....\") socketbind() def", "col in csv_file: usrpass[col[0]]=col[1] usrpass.pop(\"Username\") #print(usrpass) ihost=socket.gethostname() host=socket.gethostbyname(ihost) ihost=socket.gethostname() host=socket.gethostbyname(ihost)", "True: username=str(conn.recv(1024),\"utf-8\") password=str(conn.recv(1024),\"utf-8\") res=checkpass(username,password) if res==1: print(\"Valid Password!\") conn.send(bytes(\"1\",\"utf-8\")) conn.send(bytes(\"1\",\"utf-8\"))", "username in usrpass: # return 1 # else: # print(\"Invalid", "delimiter=\",\") for col in csv_file: usrpass[col[0]]=col[1] usrpass.pop(\"Username\") #print(usrpass) ihost=socket.gethostname() host=socket.gethostbyname(ihost)", "iport=[] hostfile=\"host.csv\" with open(hostfile,'r')as host_file: csv_hfile = csv.reader(host_file, delimiter=\",\") for", "usrpass[username]==password: return 1 else: print(\"Invalid Password\") return -1 def main():" ]
[ "in comb]) pipelines.append((name, est)) # Create intermediate parameter grids sub_grids", "product from sklearn.base import clone from sklearn.preprocessing import FunctionTransformer from", "ob is not None else (nm, FunctionTransformer(), ParameterGrid(sg)) for nm,", "): wrapper_label = wrapper[0] wrapper_obj = wrapper[1] wrapper_grid = wrapper[2]", "from sklearn.base import clone from sklearn.preprocessing import FunctionTransformer from sklearn.model_selection", "from rlearn.utils import check_random_states def check_pipelines(objects_list, random_state, n_runs): \"\"\"Extract estimators", "is not None else (nm, FunctionTransformer(), ParameterGrid(sg)) for nm, ob,", "random_states = { f\"{param_prefix}{param}\": [rs] for param in est.get_params() if", "state if grid not in param_grid: param_grid.append(grid) return pipelines, param_grid", "not None else (nm, FunctionTransformer(), ParameterGrid(sg)) for nm, ob, sg", "Create estimator if name not in [n[0] for n in", "nm, obj, sg in comb ] # Create parameter grids", "== 1 else f\"{name}__\" grid = {\"est_name\": [name]} grid.update( {f\"{param_prefix}{k}\":", "f\"{name}__\" grid = {\"est_name\": [name]} grid.update( {f\"{param_prefix}{k}\": [v] for d", "[rs] for param in est.get_params() if \"random_state\" in param }", "param_grid = [] for comb, rs in product(product(*objects_list), random_states): name", "from sklearn.model_selection import ParameterGrid from imblearn.pipeline import Pipeline from rlearn.utils", "for param in est.get_params() if \"random_state\" in param } grid.update(random_states)", "estimators ] wrapped_param_grids = [ { \"est_name\": [f'{wrapper_label}|{d[\"est_name\"][0]}'], **{ f'{wrapper_label}|{d[\"est_name\"][0]}__classifier__{k}':", "= \"\" if len(comb) == 1 else f\"{name}__\" grid =", "} for d in param_grids ] if wrapped_only: return wrapped_estimators,", "sklearn.base import clone from sklearn.preprocessing import FunctionTransformer from sklearn.model_selection import", "if \"random_state\" in param } grid.update(random_states) # Avoid multiple runs", "\"est_name\": [f'{wrapper_label}|{d[\"est_name\"][0]}'], **{ f'{wrapper_label}|{d[\"est_name\"][0]}__classifier__{k}': v for k, v in d.items()", "wrapper[0] wrapper_obj = wrapper[1] wrapper_grid = wrapper[2] estimators, param_grids =", "sklearn.model_selection import ParameterGrid from imblearn.pipeline import Pipeline from rlearn.utils import", "random_states): name = \"|\".join([i[0] for i in comb]) # name,", "n_runs, wrapped_only=False ): wrapper_label = wrapper[0] wrapper_obj = wrapper[1] wrapper_grid", "= wrapper[0] wrapper_obj = wrapper[1] wrapper_grid = wrapper[2] estimators, param_grids", "check_pipelines(objects_list, random_state, n_runs): \"\"\"Extract estimators and parameters grids.\"\"\" # Create", "= [ (nm, ob, ParameterGrid(sg)) if ob is not None", "estimator if name not in [n[0] for n in pipelines]:", "comb ] # Create parameter grids for sub_grid in product(*sub_grids):", "grid.update(random_states) # Avoid multiple runs over pipelines without random state", "import Pipeline from rlearn.utils import check_random_states def check_pipelines(objects_list, random_state, n_runs):", "[] for comb, rs in product(product(*objects_list), random_states): name = \"|\".join([i[0]", "return wrapped_estimators, wrapped_param_grids else: return (estimators + wrapped_estimators, param_grids +", "# Create random states random_states = check_random_states(random_state, n_runs) pipelines =", "param_grid def check_pipelines_wrapper( objects_list, wrapper, random_state, n_runs, wrapped_only=False ): wrapper_label", "d.items()} ) random_states = { f\"{param_prefix}{param}\": [rs] for param in", "= check_random_states(random_state, n_runs) pipelines = [] param_grid = [] for", "d in sub_grid for k, v in d.items()} ) random_states", "for k, v in d.items()} ) random_states = { f\"{param_prefix}{param}\":", "sklearn.preprocessing import FunctionTransformer from sklearn.model_selection import ParameterGrid from imblearn.pipeline import", "for i in comb]) # name, object, sub grid comb", "in comb ] # Create parameter grids for sub_grid in", "# Create parameter grids for sub_grid in product(*sub_grids): param_prefix =", "f\"{param_prefix}{param}\": [rs] for param in est.get_params() if \"random_state\" in param", "d in param_grids ] if wrapped_only: return wrapped_estimators, wrapped_param_grids else:", "Avoid multiple runs over pipelines without random state if grid", "def check_pipelines(objects_list, random_state, n_runs): \"\"\"Extract estimators and parameters grids.\"\"\" #", "check_random_states(random_state, n_runs) pipelines = [] param_grid = [] for comb,", "} grid.update(random_states) # Avoid multiple runs over pipelines without random", "# name, object, sub grid comb = [ (nm, ob,", "wrapper_obj = wrapper[1] wrapper_grid = wrapper[2] estimators, param_grids = check_pipelines(objects_list,", "for nm, ob, _ in comb]) pipelines.append((name, est)) # Create", "ParameterGrid(sg)) for nm, ob, sg in comb ] # Create", "[n[0] for n in pipelines]: est = Pipeline([(nm, ob) for", "[v] for d in sub_grid for k, v in d.items()}", "\"\"\"Extract estimators and parameters grids.\"\"\" # Create random states random_states", "import product from sklearn.base import clone from sklearn.preprocessing import FunctionTransformer", "# Create intermediate parameter grids sub_grids = [ [{f\"{nm}__{k}\": v", "in comb]) # name, object, sub grid comb = [", "= check_pipelines(objects_list, random_state, n_runs) wrapped_estimators = [ ( f\"{wrapper_label}|{name}\", clone(wrapper_obj).set_params(**{\"classifier\":", "check_pipelines(objects_list, random_state, n_runs) wrapped_estimators = [ ( f\"{wrapper_label}|{name}\", clone(wrapper_obj).set_params(**{\"classifier\": pipeline}),", "return pipelines, param_grid def check_pipelines_wrapper( objects_list, wrapper, random_state, n_runs, wrapped_only=False", "param_grids = check_pipelines(objects_list, random_state, n_runs) wrapped_estimators = [ ( f\"{wrapper_label}|{name}\",", "(nm, FunctionTransformer(), ParameterGrid(sg)) for nm, ob, sg in comb ]", "for comb, rs in product(product(*objects_list), random_states): name = \"|\".join([i[0] for", "for d in sub_grid for k, v in d.items()} )", "states random_states = check_random_states(random_state, n_runs) pipelines = [] param_grid =", "import ParameterGrid from imblearn.pipeline import Pipeline from rlearn.utils import check_random_states", "rs in product(product(*objects_list), random_states): name = \"|\".join([i[0] for i in", "sub grid comb = [ (nm, ob, ParameterGrid(sg)) if ob", "[ [{f\"{nm}__{k}\": v for k, v in param_def.items()} for param_def", "v in param_def.items()} for param_def in sg] for nm, obj,", "pipelines, param_grid def check_pipelines_wrapper( objects_list, wrapper, random_state, n_runs, wrapped_only=False ):", "grid = {\"est_name\": [name]} grid.update( {f\"{param_prefix}{k}\": [v] for d in", "v for k, v in d.items() if k != \"est_name\"", "# Avoid multiple runs over pipelines without random state if", "[name]} grid.update( {f\"{param_prefix}{k}\": [v] for d in sub_grid for k,", "runs over pipelines without random state if grid not in", "estimators, param_grids = check_pipelines(objects_list, random_state, n_runs) wrapped_estimators = [ (", "in estimators ] wrapped_param_grids = [ { \"est_name\": [f'{wrapper_label}|{d[\"est_name\"][0]}'], **{", "random_state, n_runs) wrapped_estimators = [ ( f\"{wrapper_label}|{name}\", clone(wrapper_obj).set_params(**{\"classifier\": pipeline}), )", "product(product(*objects_list), random_states): name = \"|\".join([i[0] for i in comb]) #", "rlearn.utils import check_random_states def check_pipelines(objects_list, random_state, n_runs): \"\"\"Extract estimators and", "( f\"{wrapper_label}|{name}\", clone(wrapper_obj).set_params(**{\"classifier\": pipeline}), ) for name, pipeline in estimators", "pipelines]: est = Pipeline([(nm, ob) for nm, ob, _ in", "parameter grids sub_grids = [ [{f\"{nm}__{k}\": v for k, v", "for sub_grid in product(*sub_grids): param_prefix = \"\" if len(comb) ==", "random states random_states = check_random_states(random_state, n_runs) pipelines = [] param_grid", "len(comb) == 1 else f\"{name}__\" grid = {\"est_name\": [name]} grid.update(", "random_state, n_runs, wrapped_only=False ): wrapper_label = wrapper[0] wrapper_obj = wrapper[1]", "] wrapped_param_grids = [ { \"est_name\": [f'{wrapper_label}|{d[\"est_name\"][0]}'], **{ f'{wrapper_label}|{d[\"est_name\"][0]}__classifier__{k}': v", "object, sub grid comb = [ (nm, ob, ParameterGrid(sg)) if", "for d in param_grids ] if wrapped_only: return wrapped_estimators, wrapped_param_grids", "comb, rs in product(product(*objects_list), random_states): name = \"|\".join([i[0] for i", "pipeline}), ) for name, pipeline in estimators ] wrapped_param_grids =", "1 else f\"{name}__\" grid = {\"est_name\": [name]} grid.update( {f\"{param_prefix}{k}\": [v]", "ParameterGrid from imblearn.pipeline import Pipeline from rlearn.utils import check_random_states def", "clone(wrapper_obj).set_params(**{\"classifier\": pipeline}), ) for name, pipeline in estimators ] wrapped_param_grids", "multiple runs over pipelines without random state if grid not", "] if wrapped_only: return wrapped_estimators, wrapped_param_grids else: return (estimators +", "param_grid: param_grid.append(grid) return pipelines, param_grid def check_pipelines_wrapper( objects_list, wrapper, random_state,", "Pipeline from rlearn.utils import check_random_states def check_pipelines(objects_list, random_state, n_runs): \"\"\"Extract", "check_random_states def check_pipelines(objects_list, random_state, n_runs): \"\"\"Extract estimators and parameters grids.\"\"\"", "itertools import product from sklearn.base import clone from sklearn.preprocessing import", "nm, ob, sg in comb ] # Create estimator if", "Pipeline([(nm, ob) for nm, ob, _ in comb]) pipelines.append((name, est))", "from itertools import product from sklearn.base import clone from sklearn.preprocessing", ") random_states = { f\"{param_prefix}{param}\": [rs] for param in est.get_params()", "not in [n[0] for n in pipelines]: est = Pipeline([(nm,", "if grid not in param_grid: param_grid.append(grid) return pipelines, param_grid def", "k, v in wrapper_grid.items() }, } for d in param_grids", "v in d.items()} ) random_states = { f\"{param_prefix}{param}\": [rs] for", "= wrapper[2] estimators, param_grids = check_pipelines(objects_list, random_state, n_runs) wrapped_estimators =", "in product(product(*objects_list), random_states): name = \"|\".join([i[0] for i in comb])", "(nm, ob, ParameterGrid(sg)) if ob is not None else (nm,", "param_prefix = \"\" if len(comb) == 1 else f\"{name}__\" grid", "for nm, ob, sg in comb ] # Create estimator", "check_pipelines_wrapper( objects_list, wrapper, random_state, n_runs, wrapped_only=False ): wrapper_label = wrapper[0]", "intermediate parameter grids sub_grids = [ [{f\"{nm}__{k}\": v for k,", "{\"est_name\": [name]} grid.update( {f\"{param_prefix}{k}\": [v] for d in sub_grid for", "in d.items()} ) random_states = { f\"{param_prefix}{param}\": [rs] for param", "in param } grid.update(random_states) # Avoid multiple runs over pipelines", "pipelines.append((name, est)) # Create intermediate parameter grids sub_grids = [", "if k != \"est_name\" }, **{ f'{wrapper_label}|{d[\"est_name\"][0]}__{k}': v for k,", "d.items() if k != \"est_name\" }, **{ f'{wrapper_label}|{d[\"est_name\"][0]}__{k}': v for", "sg in comb ] # Create parameter grids for sub_grid", "= \"|\".join([i[0] for i in comb]) # name, object, sub", "param } grid.update(random_states) # Avoid multiple runs over pipelines without", "[ { \"est_name\": [f'{wrapper_label}|{d[\"est_name\"][0]}'], **{ f'{wrapper_label}|{d[\"est_name\"][0]}__classifier__{k}': v for k, v", "comb]) pipelines.append((name, est)) # Create intermediate parameter grids sub_grids =", "= {\"est_name\": [name]} grid.update( {f\"{param_prefix}{k}\": [v] for d in sub_grid", "Create parameter grids for sub_grid in product(*sub_grids): param_prefix = \"\"", "{f\"{param_prefix}{k}\": [v] for d in sub_grid for k, v in", "import FunctionTransformer from sklearn.model_selection import ParameterGrid from imblearn.pipeline import Pipeline", "grids.\"\"\" # Create random states random_states = check_random_states(random_state, n_runs) pipelines", "ParameterGrid(sg)) if ob is not None else (nm, FunctionTransformer(), ParameterGrid(sg))", "ob, sg in comb ] # Create estimator if name", "Create intermediate parameter grids sub_grids = [ [{f\"{nm}__{k}\": v for", "for name, pipeline in estimators ] wrapped_param_grids = [ {", "for k, v in wrapper_grid.items() }, } for d in", "pipelines = [] param_grid = [] for comb, rs in", "param_def in sg] for nm, obj, sg in comb ]", "{ \"est_name\": [f'{wrapper_label}|{d[\"est_name\"][0]}'], **{ f'{wrapper_label}|{d[\"est_name\"][0]}__classifier__{k}': v for k, v in", "from imblearn.pipeline import Pipeline from rlearn.utils import check_random_states def check_pipelines(objects_list,", "from sklearn.preprocessing import FunctionTransformer from sklearn.model_selection import ParameterGrid from imblearn.pipeline", "and parameters grids.\"\"\" # Create random states random_states = check_random_states(random_state,", "random state if grid not in param_grid: param_grid.append(grid) return pipelines,", "obj, sg in comb ] # Create parameter grids for", "sub_grid for k, v in d.items()} ) random_states = {", "**{ f'{wrapper_label}|{d[\"est_name\"][0]}__{k}': v for k, v in wrapper_grid.items() }, }", "**{ f'{wrapper_label}|{d[\"est_name\"][0]}__classifier__{k}': v for k, v in d.items() if k", "in [n[0] for n in pipelines]: est = Pipeline([(nm, ob)", "= { f\"{param_prefix}{param}\": [rs] for param in est.get_params() if \"random_state\"", "not in param_grid: param_grid.append(grid) return pipelines, param_grid def check_pipelines_wrapper( objects_list,", "objects_list, wrapper, random_state, n_runs, wrapped_only=False ): wrapper_label = wrapper[0] wrapper_obj", "comb = [ (nm, ob, ParameterGrid(sg)) if ob is not", "= wrapper[1] wrapper_grid = wrapper[2] estimators, param_grids = check_pipelines(objects_list, random_state,", "k != \"est_name\" }, **{ f'{wrapper_label}|{d[\"est_name\"][0]}__{k}': v for k, v", "in comb ] # Create estimator if name not in", "sub_grid in product(*sub_grids): param_prefix = \"\" if len(comb) == 1", "] # Create parameter grids for sub_grid in product(*sub_grids): param_prefix", "for param_def in sg] for nm, obj, sg in comb", "random_state, n_runs): \"\"\"Extract estimators and parameters grids.\"\"\" # Create random", "name not in [n[0] for n in pipelines]: est =", "v in d.items() if k != \"est_name\" }, **{ f'{wrapper_label}|{d[\"est_name\"][0]}__{k}':", "wrapped_only=False ): wrapper_label = wrapper[0] wrapper_obj = wrapper[1] wrapper_grid =", "if ob is not None else (nm, FunctionTransformer(), ParameterGrid(sg)) for", "param_def.items()} for param_def in sg] for nm, obj, sg in", "n_runs) wrapped_estimators = [ ( f\"{wrapper_label}|{name}\", clone(wrapper_obj).set_params(**{\"classifier\": pipeline}), ) for", "wrapped_only: return wrapped_estimators, wrapped_param_grids else: return (estimators + wrapped_estimators, param_grids", "{ f\"{param_prefix}{param}\": [rs] for param in est.get_params() if \"random_state\" in", "[] param_grid = [] for comb, rs in product(product(*objects_list), random_states):", "est.get_params() if \"random_state\" in param } grid.update(random_states) # Avoid multiple", "without random state if grid not in param_grid: param_grid.append(grid) return", "param_grids ] if wrapped_only: return wrapped_estimators, wrapped_param_grids else: return (estimators", "f\"{wrapper_label}|{name}\", clone(wrapper_obj).set_params(**{\"classifier\": pipeline}), ) for name, pipeline in estimators ]", "grids for sub_grid in product(*sub_grids): param_prefix = \"\" if len(comb)", "n_runs) pipelines = [] param_grid = [] for comb, rs", "\"est_name\" }, **{ f'{wrapper_label}|{d[\"est_name\"][0]}__{k}': v for k, v in wrapper_grid.items()", "pipelines without random state if grid not in param_grid: param_grid.append(grid)", "param_grid.append(grid) return pipelines, param_grid def check_pipelines_wrapper( objects_list, wrapper, random_state, n_runs,", "in sub_grid for k, v in d.items()} ) random_states =", "nm, ob, _ in comb]) pipelines.append((name, est)) # Create intermediate", "grids sub_grids = [ [{f\"{nm}__{k}\": v for k, v in", "ob, ParameterGrid(sg)) if ob is not None else (nm, FunctionTransformer(),", "parameter grids for sub_grid in product(*sub_grids): param_prefix = \"\" if", "wrapper_grid = wrapper[2] estimators, param_grids = check_pipelines(objects_list, random_state, n_runs) wrapped_estimators", "= Pipeline([(nm, ob) for nm, ob, _ in comb]) pipelines.append((name,", "wrapper[2] estimators, param_grids = check_pipelines(objects_list, random_state, n_runs) wrapped_estimators = [", "k, v in d.items() if k != \"est_name\" }, **{", "v for k, v in wrapper_grid.items() }, } for d", "clone from sklearn.preprocessing import FunctionTransformer from sklearn.model_selection import ParameterGrid from", "imblearn.pipeline import Pipeline from rlearn.utils import check_random_states def check_pipelines(objects_list, random_state,", "] # Create estimator if name not in [n[0] for", "_ in comb]) pipelines.append((name, est)) # Create intermediate parameter grids", "in sg] for nm, obj, sg in comb ] #", "in product(*sub_grids): param_prefix = \"\" if len(comb) == 1 else", "pipeline in estimators ] wrapped_param_grids = [ { \"est_name\": [f'{wrapper_label}|{d[\"est_name\"][0]}'],", "for k, v in d.items() if k != \"est_name\" },", "in d.items() if k != \"est_name\" }, **{ f'{wrapper_label}|{d[\"est_name\"][0]}__{k}': v", "random_states = check_random_states(random_state, n_runs) pipelines = [] param_grid = []", "comb ] # Create estimator if name not in [n[0]", "None else (nm, FunctionTransformer(), ParameterGrid(sg)) for nm, ob, sg in", "in pipelines]: est = Pipeline([(nm, ob) for nm, ob, _", "k, v in d.items()} ) random_states = { f\"{param_prefix}{param}\": [rs]", "sub_grids = [ [{f\"{nm}__{k}\": v for k, v in param_def.items()}", "est = Pipeline([(nm, ob) for nm, ob, _ in comb])", "in est.get_params() if \"random_state\" in param } grid.update(random_states) # Avoid", "v for k, v in param_def.items()} for param_def in sg]", "}, **{ f'{wrapper_label}|{d[\"est_name\"][0]}__{k}': v for k, v in wrapper_grid.items() },", "in param_def.items()} for param_def in sg] for nm, obj, sg", "over pipelines without random state if grid not in param_grid:", "n in pipelines]: est = Pipeline([(nm, ob) for nm, ob,", "grid not in param_grid: param_grid.append(grid) return pipelines, param_grid def check_pipelines_wrapper(", "wrapper_grid.items() }, } for d in param_grids ] if wrapped_only:", "\"|\".join([i[0] for i in comb]) # name, object, sub grid", "FunctionTransformer from sklearn.model_selection import ParameterGrid from imblearn.pipeline import Pipeline from", "for n in pipelines]: est = Pipeline([(nm, ob) for nm,", "i in comb]) # name, object, sub grid comb =", "name = \"|\".join([i[0] for i in comb]) # name, object,", "wrapped_estimators, wrapped_param_grids else: return (estimators + wrapped_estimators, param_grids + wrapped_param_grids)", "!= \"est_name\" }, **{ f'{wrapper_label}|{d[\"est_name\"][0]}__{k}': v for k, v in", "v in wrapper_grid.items() }, } for d in param_grids ]", "wrapper[1] wrapper_grid = wrapper[2] estimators, param_grids = check_pipelines(objects_list, random_state, n_runs)", "\"random_state\" in param } grid.update(random_states) # Avoid multiple runs over", "sg in comb ] # Create estimator if name not", "grid comb = [ (nm, ob, ParameterGrid(sg)) if ob is", "= [ [{f\"{nm}__{k}\": v for k, v in param_def.items()} for", "in wrapper_grid.items() }, } for d in param_grids ] if", "if wrapped_only: return wrapped_estimators, wrapped_param_grids else: return (estimators + wrapped_estimators,", "[ ( f\"{wrapper_label}|{name}\", clone(wrapper_obj).set_params(**{\"classifier\": pipeline}), ) for name, pipeline in", "in param_grid: param_grid.append(grid) return pipelines, param_grid def check_pipelines_wrapper( objects_list, wrapper,", "[ (nm, ob, ParameterGrid(sg)) if ob is not None else", "= [] for comb, rs in product(product(*objects_list), random_states): name =", "product(*sub_grids): param_prefix = \"\" if len(comb) == 1 else f\"{name}__\"", "def check_pipelines_wrapper( objects_list, wrapper, random_state, n_runs, wrapped_only=False ): wrapper_label =", "parameters grids.\"\"\" # Create random states random_states = check_random_states(random_state, n_runs)", "= [] param_grid = [] for comb, rs in product(product(*objects_list),", "= [ ( f\"{wrapper_label}|{name}\", clone(wrapper_obj).set_params(**{\"classifier\": pipeline}), ) for name, pipeline", "for k, v in param_def.items()} for param_def in sg] for", "if name not in [n[0] for n in pipelines]: est", "f'{wrapper_label}|{d[\"est_name\"][0]}__classifier__{k}': v for k, v in d.items() if k !=", "name, object, sub grid comb = [ (nm, ob, ParameterGrid(sg))", "ob, _ in comb]) pipelines.append((name, est)) # Create intermediate parameter", "in param_grids ] if wrapped_only: return wrapped_estimators, wrapped_param_grids else: return", "estimators and parameters grids.\"\"\" # Create random states random_states =", "grid.update( {f\"{param_prefix}{k}\": [v] for d in sub_grid for k, v", "comb]) # name, object, sub grid comb = [ (nm,", "wrapped_estimators = [ ( f\"{wrapper_label}|{name}\", clone(wrapper_obj).set_params(**{\"classifier\": pipeline}), ) for name,", "[f'{wrapper_label}|{d[\"est_name\"][0]}'], **{ f'{wrapper_label}|{d[\"est_name\"][0]}__classifier__{k}': v for k, v in d.items() if", "n_runs): \"\"\"Extract estimators and parameters grids.\"\"\" # Create random states", "Create random states random_states = check_random_states(random_state, n_runs) pipelines = []", "import check_random_states def check_pipelines(objects_list, random_state, n_runs): \"\"\"Extract estimators and parameters", "for nm, obj, sg in comb ] # Create parameter", "if len(comb) == 1 else f\"{name}__\" grid = {\"est_name\": [name]}", "import clone from sklearn.preprocessing import FunctionTransformer from sklearn.model_selection import ParameterGrid", "param in est.get_params() if \"random_state\" in param } grid.update(random_states) #", "name, pipeline in estimators ] wrapped_param_grids = [ { \"est_name\":", "# Create estimator if name not in [n[0] for n", "wrapper, random_state, n_runs, wrapped_only=False ): wrapper_label = wrapper[0] wrapper_obj =", "sg] for nm, obj, sg in comb ] # Create", "f'{wrapper_label}|{d[\"est_name\"][0]}__{k}': v for k, v in wrapper_grid.items() }, } for", "ob) for nm, ob, _ in comb]) pipelines.append((name, est)) #", "k, v in param_def.items()} for param_def in sg] for nm,", "wrapped_param_grids = [ { \"est_name\": [f'{wrapper_label}|{d[\"est_name\"][0]}'], **{ f'{wrapper_label}|{d[\"est_name\"][0]}__classifier__{k}': v for", "= [ { \"est_name\": [f'{wrapper_label}|{d[\"est_name\"][0]}'], **{ f'{wrapper_label}|{d[\"est_name\"][0]}__classifier__{k}': v for k,", "else (nm, FunctionTransformer(), ParameterGrid(sg)) for nm, ob, sg in comb", ") for name, pipeline in estimators ] wrapped_param_grids = [", "else f\"{name}__\" grid = {\"est_name\": [name]} grid.update( {f\"{param_prefix}{k}\": [v] for", "}, } for d in param_grids ] if wrapped_only: return", "est)) # Create intermediate parameter grids sub_grids = [ [{f\"{nm}__{k}\":", "\"\" if len(comb) == 1 else f\"{name}__\" grid = {\"est_name\":", "FunctionTransformer(), ParameterGrid(sg)) for nm, ob, sg in comb ] #", "[{f\"{nm}__{k}\": v for k, v in param_def.items()} for param_def in", "wrapper_label = wrapper[0] wrapper_obj = wrapper[1] wrapper_grid = wrapper[2] estimators," ]
[ "list of min values of each data buffer plotted. If", "= [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class RewardPerEpisode(PlotItemBuffer): \"\"\" Class that represents", "limit is dotted; otherwise, it is printed as a solid", "from mushroom_rl.utils.plots import PlotItemBuffer, DataBuffer from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited class", "to be used; \"\"\" title = \"Len of Episode\" plot_params", "None): list of max values of each data buffer plotted.", "represents a plot for the reward at every step. \"\"\"", "element is None, no min line is drawn. dotted_limits (list,", "the length of the episode. \"\"\" def __init__(self, plot_buffer): \"\"\"", "(list, None): list of booleans. If True, the corresponding limit", "RewardPerEpisode(PlotItemBuffer): \"\"\" Class that represents a plot for the accumulated", "class Actions(PlotItemBufferLimited): \"\"\" Class that represents a plot for the", "a plot for the reward at every step. \"\"\" def", "If True, the corresponding limit is dotted; otherwise, it is", "line is drawn. dotted_limits (list, None): list of booleans. If", "Class that represents a plot for the reward at every", "otherwise, it is printed as a solid line. \"\"\" title", "a solid line. \"\"\" title = \"Observations\" super().__init__(title, plot_buffers, maxs=maxs,", "at every step. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor. Args:", "represents a plot for the accumulated reward per episode. \"\"\"", "reward per episode. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor. Args:", "= \"Actions\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins) class Observations(PlotItemBufferLimited): \"\"\" Class", "the accumulated reward per episode. \"\"\" def __init__(self, plot_buffer): \"\"\"", "is printed as a solid line. \"\"\" title = \"Observations\"", "per episode. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor. Args: plot_buffer", "dotted; otherwise, it is printed as a solid line. \"\"\"", "\"\"\" Class that represents a plot for the reward at", "is None, no max line is drawn; mins(list, None): list", "observations. \"\"\" def __init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None): \"\"\" Constructor.", "\"\"\" title = \"Len of Episode\" plot_params = [dict(data_buffer=plot_buffer)] super().__init__(title,", "an element is None, no min line is drawn. dotted_limits", "plot_buffer): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer to be", "step. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor. Args: plot_buffer (DataBuffer):", "is None, no min line is drawn. \"\"\" title =", "of the episode. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor. Args:", "drawn; mins(list, None): list of min values of each data", "Args: plot_buffer (DataBuffer): data buffer to be used; \"\"\" title", "= \"Observations\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins, dotted_limits=dotted_limits) class LenOfEpisodeTraining(PlotItemBuffer): \"\"\"", "every step. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor. Args: plot_buffer", "__init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None): \"\"\" Constructor. Args: plot_buffer (DataBuffer):", "for the observations. \"\"\" def __init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None):", "RewardPerStep(PlotItemBuffer): \"\"\" Class that represents a plot for the reward", "data buffer to be used. \"\"\" title = \"Episode_Reward\" curves_params", "is drawn. \"\"\" title = \"Actions\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins)", "be used. \"\"\" title = \"Episode_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title,", "used. \"\"\" title = \"Episode_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params)", "plot for the actions. \"\"\" def __init__(self, plot_buffers, maxs=None, mins=None):", "buffer to be used. \"\"\" title = \"Step_Reward\" curves_params =", "represents a plot for the actions. \"\"\" def __init__(self, plot_buffers,", "no max line is drawn; mins(list, None): list of min", "super().__init__(title, curves_params) class Actions(PlotItemBufferLimited): \"\"\" Class that represents a plot", "plot_buffers, maxs=None, mins=None): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer", "True, the corresponding limit is dotted; otherwise, it is printed", "plot for the reward at every step. \"\"\" def __init__(self,", "None, no min line is drawn. dotted_limits (list, None): list", "min line is drawn. dotted_limits (list, None): list of booleans.", "to be used. \"\"\" title = \"Step_Reward\" curves_params = [dict(data_buffer=plot_buffer)]", "class RewardPerStep(PlotItemBuffer): \"\"\" Class that represents a plot for the", "Args: plot_buffer (DataBuffer): data buffer to be used; maxs(list, None):", "of each data buffer plotted. If an element is None,", "used; maxs(list, None): list of max values of each data", "plot_buffers, maxs=maxs, mins=mins) class Observations(PlotItemBufferLimited): \"\"\" Class that represents a", "data buffer to be used. \"\"\" title = \"Step_Reward\" curves_params", "curves_params) class RewardPerEpisode(PlotItemBuffer): \"\"\" Class that represents a plot for", "\"\"\" title = \"Step_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class", "maxs=maxs, mins=mins, dotted_limits=dotted_limits) class LenOfEpisodeTraining(PlotItemBuffer): \"\"\" Class that represents a", "title = \"Len of Episode\" plot_params = [dict(data_buffer=plot_buffer)] super().__init__(title, plot_params)", "mins(list, None): list of min values of each data buffer", "list of booleans. If True, the corresponding limit is dotted;", "DataBuffer from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited class RewardPerStep(PlotItemBuffer): \"\"\" Class that", "class RewardPerEpisode(PlotItemBuffer): \"\"\" Class that represents a plot for the", "max line is drawn; mins(list, None): list of min values", "title = \"Episode_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class Actions(PlotItemBufferLimited):", "__init__(self, plot_buffer): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer to", "maxs=None, mins=None): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer to", "be used; \"\"\" title = \"Len of Episode\" plot_params =", "\"Actions\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins) class Observations(PlotItemBufferLimited): \"\"\" Class that", "min line is drawn. \"\"\" title = \"Actions\" super().__init__(title, plot_buffers,", "maxs=maxs, mins=mins) class Observations(PlotItemBufferLimited): \"\"\" Class that represents a plot", "Constructor. Args: plot_buffer (DataBuffer): data buffer to be used; maxs(list,", "Class that represents a plot for the observations. \"\"\" def", "import PlotItemBuffer, DataBuffer from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited class RewardPerStep(PlotItemBuffer): \"\"\"", "\"\"\" title = \"Episode_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class", "represents a plot for the observations. \"\"\" def __init__(self, plot_buffers,", "length of the episode. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor.", "Constructor. Args: plot_buffer (DataBuffer): data buffer to be used. \"\"\"", "line is drawn. \"\"\" title = \"Actions\" super().__init__(title, plot_buffers, maxs=maxs,", "data buffer to be used; maxs(list, None): list of max", "corresponding limit is dotted; otherwise, it is printed as a", "represents a plot for the length of the episode. \"\"\"", "\"Episode_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class Actions(PlotItemBufferLimited): \"\"\" Class", "from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited class RewardPerStep(PlotItemBuffer): \"\"\" Class that represents", "dotted_limits (list, None): list of booleans. If True, the corresponding", "that represents a plot for the length of the episode.", "If an element is None, no min line is drawn.", "maxs(list, None): list of max values of each data buffer", "title = \"Actions\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins) class Observations(PlotItemBufferLimited): \"\"\"", "the actions. \"\"\" def __init__(self, plot_buffers, maxs=None, mins=None): \"\"\" Constructor.", "Actions(PlotItemBufferLimited): \"\"\" Class that represents a plot for the actions.", "data buffer to be used; \"\"\" title = \"Len of", "a plot for the accumulated reward per episode. \"\"\" def", "mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited class RewardPerStep(PlotItemBuffer): \"\"\" Class that represents a", "buffer plotted. If an element is None, no max line", "solid line. \"\"\" title = \"Observations\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins,", "(DataBuffer): data buffer to be used. \"\"\" title = \"Episode_Reward\"", "PlotItemBuffer, DataBuffer from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited class RewardPerStep(PlotItemBuffer): \"\"\" Class", "data buffer plotted. If an element is None, no min", "an element is None, no min line is drawn. \"\"\"", "\"\"\" title = \"Actions\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins) class Observations(PlotItemBufferLimited):", "element is None, no min line is drawn. \"\"\" title", "plotted. If an element is None, no max line is", "printed as a solid line. \"\"\" title = \"Observations\" super().__init__(title,", "None): list of booleans. If True, the corresponding limit is", "title = \"Step_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class RewardPerEpisode(PlotItemBuffer):", "curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class Actions(PlotItemBufferLimited): \"\"\" Class that", "used. \"\"\" title = \"Step_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params)", "mins=None, dotted_limits=None): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer to", "each data buffer plotted. If an element is None, no", "\"\"\" def __init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None): \"\"\" Constructor. Args:", "be used; maxs(list, None): list of max values of each", "an element is None, no max line is drawn; mins(list,", "None): list of min values of each data buffer plotted.", "(DataBuffer): data buffer to be used. \"\"\" title = \"Step_Reward\"", "= [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class Actions(PlotItemBufferLimited): \"\"\" Class that represents", "plotted. If an element is None, no min line is", "that represents a plot for the reward at every step.", "list of max values of each data buffer plotted. If", "plot for the observations. \"\"\" def __init__(self, plot_buffers, maxs=None, mins=None,", "mushroom_rl.utils.plots import PlotItemBuffer, DataBuffer from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited class RewardPerStep(PlotItemBuffer):", "plot_buffers, maxs=None, mins=None, dotted_limits=None): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data", "the observations. \"\"\" def __init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None): \"\"\"", "\"Observations\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins, dotted_limits=dotted_limits) class LenOfEpisodeTraining(PlotItemBuffer): \"\"\" Class", "of booleans. If True, the corresponding limit is dotted; otherwise,", "to be used; maxs(list, None): list of max values of", "data buffer plotted. If an element is None, no max", "Args: plot_buffer (DataBuffer): data buffer to be used. \"\"\" title", "dotted_limits=None): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer to be", "that represents a plot for the accumulated reward per episode.", "be used. \"\"\" title = \"Step_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title,", "mins=None): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer to be", "it is printed as a solid line. \"\"\" title =", "used; \"\"\" title = \"Len of Episode\" plot_params = [dict(data_buffer=plot_buffer)]", "for the actions. \"\"\" def __init__(self, plot_buffers, maxs=None, mins=None): \"\"\"", "\"\"\" Class that represents a plot for the observations. \"\"\"", "class Observations(PlotItemBufferLimited): \"\"\" Class that represents a plot for the", "max values of each data buffer plotted. If an element", "class LenOfEpisodeTraining(PlotItemBuffer): \"\"\" Class that represents a plot for the", "\"\"\" def __init__(self, plot_buffers, maxs=None, mins=None): \"\"\" Constructor. Args: plot_buffer", "\"\"\" def __init__(self, plot_buffer): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data", "\"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer to be used.", "to be used. \"\"\" title = \"Episode_Reward\" curves_params = [dict(data_buffer=plot_buffer)]", "plot_buffer (DataBuffer): data buffer to be used; \"\"\" title =", "Observations(PlotItemBufferLimited): \"\"\" Class that represents a plot for the observations.", "of max values of each data buffer plotted. If an", "\"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer to be used;", "maxs=None, mins=None, dotted_limits=None): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer", "def __init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None): \"\"\" Constructor. Args: plot_buffer", "accumulated reward per episode. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor.", "booleans. If True, the corresponding limit is dotted; otherwise, it", "mins=mins, dotted_limits=dotted_limits) class LenOfEpisodeTraining(PlotItemBuffer): \"\"\" Class that represents a plot", "dotted_limits=dotted_limits) class LenOfEpisodeTraining(PlotItemBuffer): \"\"\" Class that represents a plot for", "that represents a plot for the observations. \"\"\" def __init__(self,", "buffer to be used. \"\"\" title = \"Episode_Reward\" curves_params =", "is dotted; otherwise, it is printed as a solid line.", "= \"Step_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class RewardPerEpisode(PlotItemBuffer): \"\"\"", "\"\"\" Class that represents a plot for the accumulated reward", "= \"Episode_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class Actions(PlotItemBufferLimited): \"\"\"", "plot_buffers, maxs=maxs, mins=mins, dotted_limits=dotted_limits) class LenOfEpisodeTraining(PlotItemBuffer): \"\"\" Class that represents", "\"\"\" title = \"Observations\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins, dotted_limits=dotted_limits) class", "[dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class Actions(PlotItemBufferLimited): \"\"\" Class that represents a", "episode. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor. Args: plot_buffer (DataBuffer):", "def __init__(self, plot_buffer): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data buffer", "buffer to be used; maxs(list, None): list of max values", "for the accumulated reward per episode. \"\"\" def __init__(self, plot_buffer):", "None, no max line is drawn; mins(list, None): list of", "the corresponding limit is dotted; otherwise, it is printed as", "that represents a plot for the actions. \"\"\" def __init__(self,", "None, no min line is drawn. \"\"\" title = \"Actions\"", "is drawn. dotted_limits (list, None): list of booleans. If True,", "\"\"\" Class that represents a plot for the length of", "(DataBuffer): data buffer to be used; \"\"\" title = \"Len", "Class that represents a plot for the actions. \"\"\" def", "a plot for the observations. \"\"\" def __init__(self, plot_buffers, maxs=None,", "line is drawn; mins(list, None): list of min values of", "LenOfEpisodeTraining(PlotItemBuffer): \"\"\" Class that represents a plot for the length", "def __init__(self, plot_buffers, maxs=None, mins=None): \"\"\" Constructor. Args: plot_buffer (DataBuffer):", "super().__init__(title, plot_buffers, maxs=maxs, mins=mins) class Observations(PlotItemBufferLimited): \"\"\" Class that represents", "curves_params) class Actions(PlotItemBufferLimited): \"\"\" Class that represents a plot for", "drawn. \"\"\" title = \"Actions\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins) class", "element is None, no max line is drawn; mins(list, None):", "buffer plotted. If an element is None, no min line", "the episode. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor. Args: plot_buffer", "the reward at every step. \"\"\" def __init__(self, plot_buffer): \"\"\"", "reward at every step. \"\"\" def __init__(self, plot_buffer): \"\"\" Constructor.", "for the length of the episode. \"\"\" def __init__(self, plot_buffer):", "plot for the accumulated reward per episode. \"\"\" def __init__(self,", "plot_buffer (DataBuffer): data buffer to be used; maxs(list, None): list", "as a solid line. \"\"\" title = \"Observations\" super().__init__(title, plot_buffers,", "for the reward at every step. \"\"\" def __init__(self, plot_buffer):", "no min line is drawn. dotted_limits (list, None): list of", "a plot for the actions. \"\"\" def __init__(self, plot_buffers, maxs=None,", "no min line is drawn. \"\"\" title = \"Actions\" super().__init__(title,", "\"\"\" Class that represents a plot for the actions. \"\"\"", "actions. \"\"\" def __init__(self, plot_buffers, maxs=None, mins=None): \"\"\" Constructor. Args:", "title = \"Observations\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins, dotted_limits=dotted_limits) class LenOfEpisodeTraining(PlotItemBuffer):", "[dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class RewardPerEpisode(PlotItemBuffer): \"\"\" Class that represents a", "line. \"\"\" title = \"Observations\" super().__init__(title, plot_buffers, maxs=maxs, mins=mins, dotted_limits=dotted_limits)", "is None, no min line is drawn. dotted_limits (list, None):", "buffer to be used; \"\"\" title = \"Len of Episode\"", "(DataBuffer): data buffer to be used; maxs(list, None): list of", "mins=mins) class Observations(PlotItemBufferLimited): \"\"\" Class that represents a plot for", "a plot for the length of the episode. \"\"\" def", "plot_buffer (DataBuffer): data buffer to be used. \"\"\" title =", "PlotItemBufferLimited class RewardPerStep(PlotItemBuffer): \"\"\" Class that represents a plot for", "Class that represents a plot for the length of the", "super().__init__(title, curves_params) class RewardPerEpisode(PlotItemBuffer): \"\"\" Class that represents a plot", "min values of each data buffer plotted. If an element", "\"Step_Reward\" curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class RewardPerEpisode(PlotItemBuffer): \"\"\" Class", "If an element is None, no max line is drawn;", "Constructor. Args: plot_buffer (DataBuffer): data buffer to be used; \"\"\"", "values of each data buffer plotted. If an element is", "drawn. dotted_limits (list, None): list of booleans. If True, the", "plot for the length of the episode. \"\"\" def __init__(self,", "__init__(self, plot_buffers, maxs=None, mins=None): \"\"\" Constructor. Args: plot_buffer (DataBuffer): data", "import PlotItemBufferLimited class RewardPerStep(PlotItemBuffer): \"\"\" Class that represents a plot", "of min values of each data buffer plotted. If an", "is drawn; mins(list, None): list of min values of each", "super().__init__(title, plot_buffers, maxs=maxs, mins=mins, dotted_limits=dotted_limits) class LenOfEpisodeTraining(PlotItemBuffer): \"\"\" Class that", "Class that represents a plot for the accumulated reward per", "curves_params = [dict(data_buffer=plot_buffer)] super().__init__(title, curves_params) class RewardPerEpisode(PlotItemBuffer): \"\"\" Class that" ]
[ "= None if not testtools.helpers.safe_hasattr(instance, self.attribute_name): result = AttributeNotFoundMismatch(instance, self.attribute_name)", "FileNotFoundError except NameError: # Python 2 uses IOError. FileNotFoundError =", "'version': \"UNKNOWN\", 'maintainer': \"UNKNOWN\", } scenarios = [ ('version 0.0',", "\"\"\" if hasattr(self, 'get_distribution_error'): self.skipTest(\"No access to distribution\") metadata.get_distribution_version_info(**self.test_args) self.mock_distribution.has_metadata.assert_called_with(", "FakeYearRange(begin=1970, end=1970), }), ('no end year', { 'begin_year': \"1970\", 'end_date':", "up test fixtures. \"\"\" super(get_distribution_version_info_TestCase, self).setUp() self.test_args = {} if", "] def setUp(self): \"\"\" Set up test fixtures. \"\"\" super(get_distribution_version_info_TestCase,", "as urlparse except ImportError: # Python 2 standard library. import", "hasattr(testcase, 'get_distribution_error'): raise testcase.get_distribution_error mock_distribution = testcase.mock_distribution mock_distribution.has_metadata.side_effect = functools.partial(", "'test_filename': \"lorem_ipsum.json\", 'version_info_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_resource_name':", "# This is free software: you may copy, modify, and/or", "\"1.0\", }), 'expected_version_info': {'version': \"1.0\"}, }), ('file lorem_ipsum.json', { 'test_filename':", "year', { 'begin_year': 1970, 'end_year': None, 'expected_text': \"1970\", }), ]", "False return True def fake_func_get_metadata(testcase, resource_name): \"\"\" Fake the behaviour", "json.dumps({ 'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': default_version_info, }), ]", "self).setUp() self.test_args = {} if hasattr(self, 'test_filename'): self.test_args['filename'] = self.test_filename", "instance=self.instance, name=self.attribute_name) return text class metadata_value_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for", "= name def match(self, instance): \"\"\" Assert the object `instance`", "Unit test for ‘_metadata’ private module. \"\"\" from __future__ import", "class YearRange_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘YearRange’ class. \"\"\" scenarios", "Optional range dash and four-digit year. ) regex_flags = re.UNICODE", "'expected_text': \"1970–1979\", }), ('same year', { 'begin_year': 1970, 'end_year': 1970,", "\"1.0\"}, }), ('file lorem_ipsum.json', { 'test_filename': \"lorem_ipsum.json\", 'version_info_filename': \"lorem_ipsum.json\", 'test_version_info':", "{'version': \"1.0\"}, }), ('not installed', { 'get_distribution_error': pkg_resources.DistributionNotFound(), 'expected_version_info': default_version_info,", "FUTURE token', { 'begin_year': \"1970\", 'end_date': \"FUTURE\", 'expected_range': FakeYearRange(begin=1970, end=None),", "metadata module values. \"\"\" expected_str_attributes = set([ 'version_installed', 'author', 'copyright',", "may copy, modify, and/or distribute this work # under the", "test/test_metadata.py # Part of ‘python-daemon’, an implementation of PEP 3143.", "self.begin_year, self.end_year) def test_text_representation_as_expected(self): \"\"\" Text representation should be as", "}), ('no end year', { 'begin_year': \"1970\", 'end_date': None, 'expected_range':", "testtools.helpers import testtools.matchers from . import scaffold from .scaffold import", "the object `instance` has an attribute named `name`. \"\"\" result", "[ (name, {'attribute_name': name}) for name in expected_str_attributes] for (name,", "NotImplemented continue # Expect an attribute of ‘str’ to test", "‘make_year_range’ function. \"\"\" scenarios = [ ('simple', { 'begin_year': \"1970\",", "testcase) mock_distribution.get_metadata.side_effect = functools.partial( fake_func_get_metadata, testcase) return mock_distribution @mock.patch.object(metadata, 'distribution_name',", "get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘get_distribution_version_info’ function. \"\"\" default_version_info =", "should match expected YearRange. \"\"\" result = metadata.make_year_range(self.begin_year, self.end_date) self.assertEqual(result,", "metadata.YearRange( self.begin_year, self.end_year) def test_text_representation_as_expected(self): \"\"\" Text representation should be", "'end_year': 1979, 'expected_text': \"1970–1979\", }), ('same year', { 'begin_year': 1970,", "under the terms of the GNU General Public License as", "\"\"\" Metadata should have expected value as a module attribute.", "def test_url_parses_correctly(self): \"\"\" Homepage URL should parse correctly. \"\"\" result", "match the expected items. \"\"\" version_info = metadata.get_distribution_version_info(**self.test_args) self.assertEqual(self.expected_version_info, version_info)", "{ 'test_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\",", "(name, params) in scenarios: if name == 'version_installed': # No", "\"\"\" Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. \"\"\" if ( resource_name", "'version_info_filename'): self.version_info_filename = version_info_filename if not hasattr(self, 'expected_resource_name'): self.expected_resource_name =", "self.skipTest(\"Can't assert this attribute's type\") instance = getattr(metadata, self.attribute_name) self.assertThat(", "The package distribution should be retrieved. \"\"\" expected_distribution_name = metadata.distribution_name", "expressed or implied. See the file ‘LICENSE.GPL-3’ for details. #", "Copyright statement should be formatted correctly. \"\"\" regex_pattern = (", "class AttributeNotFoundMismatch(testtools.matchers.Mismatch): \"\"\" The specified instance does not have the", "resource_name): \"\"\" Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. \"\"\" if not", "test_module_attribute_has_duck_type(self): \"\"\" Metadata value should have expected duck-typing attribute. \"\"\"", "expected duck-typing attribute. \"\"\" if self.ducktype_attribute_name == NotImplemented: self.skipTest(\"Can't assert", "\"1970\", 'end_date': \"1979-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1979), }), ('same year', {", "specified instance does not have the named attribute. \"\"\" def", "('no end year', { 'begin_year': 1970, 'end_year': None, 'expected_text': \"1970\",", "AttributeNotFoundMismatch(instance, self.attribute_name) return result class AttributeNotFoundMismatch(testtools.matchers.Mismatch): \"\"\" The specified instance", "\"\"\" Emit a text description of this mismatch. \"\"\" text", "('version 1.0', { 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_version_info': {'version':", "{ 'test_filename': \"lorem_ipsum.json\", 'version_info_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\", }),", "has an attribute named `name`. \"\"\" result = None if", "}), ('no version_info', { 'expected_version_info': default_version_info, }), ('wrong filename', {", "}), ('version 1.0', { 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_version_info':", "\"\"\" result = None if not testtools.helpers.safe_hasattr(instance, self.attribute_name): result =", "import scaffold from .scaffold import unicode import daemon._metadata as metadata", "software, and you are welcome to redistribute it under #", "scenarios = [ ('version 0.0', { 'test_version_info': json.dumps({ 'version': \"0.0\",", "# Python 3 standard library. import urllib.parse as urlparse except", "be formatted correctly. \"\"\" regex_pattern = ( \"Copyright © \"", "metadata. \"\"\" def test_copyright_formatted_correctly(self): \"\"\" Copyright statement should be formatted", "1970, 'end_year': 1970, 'expected_text': \"1970\", }), ('no end year', {", "value {url!r} did not parse correctly\".format( url=metadata.url)) try: FileNotFoundError except", "attribute of ‘str’ to test this value. params['ducktype_attribute_name'] = 'isdigit'", "in scenarios: if name == 'version_installed': # No duck typing,", "\" has no attribute named {name!r}\").format( instance=self.instance, name=self.attribute_name) return text", "typing, this attribute might be None. params['ducktype_attribute_name'] = NotImplemented continue", "= functools.partial( fake_func_has_metadata, testcase) mock_distribution.get_metadata.side_effect = functools.partial( fake_func_get_metadata, testcase) return", "metadata.distribution_name metadata.get_distribution_version_info(**self.test_args) pkg_resources.get_distribution.assert_called_with( expected_distribution_name) def test_requests_specified_filename(self): \"\"\" The specified metadata", "}), 'expected_version_info': {'version': \"1.0\"}, }), ('file lorem_ipsum.json', { 'test_filename': \"lorem_ipsum.json\",", "self.skipTest(\"No access to distribution\") metadata.get_distribution_version_info(**self.test_args) self.mock_distribution.has_metadata.assert_called_with( self.expected_resource_name) def test_result_matches_expected_items(self): \"\"\"", "this work # under the terms of the GNU General", "version. # No warranty expressed or implied. See the file", "import re try: # Python 3 standard library. import urllib.parse", "{ 'expected_version_info': default_version_info, }), ('wrong filename', { 'test_filename': \"lorem_ipsum.json\", 'test_version_info':", "= functools.partial( fake_func_get_distribution, self) def test_requests_installed_distribution(self): \"\"\" The package distribution", "# Python 2 uses IOError. FileNotFoundError = functools.partial(IOError, errno.ENOENT) version_info_filename", "class metadata_value_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for metadata module values. \"\"\"", "fake_func_has_metadata(testcase, resource_name): error = FileNotFoundError(resource_name) raise error content = testcase.test_version_info", "'get_distribution_error': pkg_resources.DistributionNotFound(), 'expected_version_info': default_version_info, }), ('no version_info', { 'expected_version_info': default_version_info,", "conditions; see the end of this file for copyright #", "Email address, in angle brackets. ) regex_flags = re.UNICODE self.assertThat(", "Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. \"\"\" if not fake_func_has_metadata(testcase, resource_name):", "class metadata_content_TestCase(scaffold.TestCase): \"\"\" Test cases for content of metadata. \"\"\"", "of the GNU General Public License as published by the", "or not hasattr(testcase, 'test_version_info')): return False return True def fake_func_get_metadata(testcase,", "regex_pattern = ( \"Copyright © \" \"\\d{4}\" # Four-digit year.", "of that license or any later version. # No warranty", "'test_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info':", "import collections import errno import functools import json import re", "Part of ‘python-daemon’, an implementation of PEP 3143. # #", "coding: utf-8 -*- # # test/test_metadata.py # Part of ‘python-daemon’,", "module attribute. \"\"\" self.assertThat( metadata, HasAttribute(self.attribute_name)) def test_module_attribute_has_duck_type(self): \"\"\" Metadata", "are welcome to redistribute it under # certain conditions; see", "Test cases for ‘get_distribution_version_info’ function. \"\"\" default_version_info = { 'release_date':", "self.assertIsInstance( result, urlparse.ParseResult, \"URL value {url!r} did not parse correctly\".format(", "implied. See the file ‘LICENSE.GPL-3’ for details. # Local variables:", "have expected value as a module attribute. \"\"\" self.assertThat( metadata,", "self.expected_range) class metadata_content_TestCase(scaffold.TestCase): \"\"\" Test cases for content of metadata.", "This is free software: you may copy, modify, and/or distribute", "© \" \"\\d{4}\" # Four-digit year. \"(?:–\\d{4})?\" # Optional range", "metadata.get_distribution_version_info(**self.test_args) self.assertEqual(self.expected_version_info, version_info) # Copyright © 2008–2018 <NAME> <<EMAIL>> #", "}), ('wrong filename', { 'test_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\",", "'end_date': \"FUTURE\", 'expected_range': FakeYearRange(begin=1970, end=None), }), ] def test_result_matches_expected_range(self): \"\"\"", "to distribution\") metadata.get_distribution_version_info(**self.test_args) self.mock_distribution.has_metadata.assert_called_with( self.expected_resource_name) def test_result_matches_expected_items(self): \"\"\" The result", "hasattr(self, 'test_filename'): self.test_args['filename'] = self.test_filename if not hasattr(self, 'version_info_filename'): self.version_info_filename", "did not parse correctly\".format( url=metadata.url)) try: FileNotFoundError except NameError: #", "'begin_year': \"1970\", 'end_date': \"1979-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1979), }), ('same year',", "test fixtures. \"\"\" super(YearRange_TestCase, self).setUp() self.test_instance = metadata.YearRange( self.begin_year, self.end_year)", "testtools.helpers.safe_hasattr(instance, self.attribute_name): result = AttributeNotFoundMismatch(instance, self.attribute_name) return result class AttributeNotFoundMismatch(testtools.matchers.Mismatch):", "name in expected_str_attributes] for (name, params) in scenarios: if name", "testcase) return mock_distribution @mock.patch.object(metadata, 'distribution_name', new=\"mock-dist\") class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test", "= [ (name, {'attribute_name': name}) for name in expected_str_attributes] for", "def test_result_matches_expected_range(self): \"\"\" Result should match expected YearRange. \"\"\" result", "set([ 'version_installed', 'author', 'copyright', 'license', 'url', ]) scenarios = [", "'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': {'version': \"1.0\"}, }), ('not installed', { 'get_distribution_error':", "'end_year': 1970, 'expected_text': \"1970\", }), ('no end year', { 'begin_year':", "test for ‘_metadata’ private module. \"\"\" from __future__ import (absolute_import,", "testcase.version_info_filename or not hasattr(testcase, 'test_version_info')): return False return True def", "a module attribute. \"\"\" self.assertThat( metadata, HasAttribute(self.attribute_name)) def test_module_attribute_has_duck_type(self): \"\"\"", "regex_flags)) def test_author_formatted_correctly(self): \"\"\" Author information should be formatted correctly.", "object has a named attribute. \"\"\" def __init__(self, name): self.attribute_name", "}), ('end date UNKNOWN token', { 'begin_year': \"1970\", 'end_date': \"UNKNOWN\",", "\"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': default_version_info,", "@mock.patch.object(metadata, 'distribution_name', new=\"mock-dist\") class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘get_distribution_version_info’", "testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_author_formatted_correctly(self): \"\"\" Author information should be formatted", "for details. # Local variables: # coding: utf-8 # mode:", "see the end of this file for copyright # information,", "A matcher to assert an object has a named attribute.", "should parse correctly. \"\"\" result = urlparse.urlparse(metadata.url) self.assertIsInstance( result, urlparse.ParseResult,", "for name in expected_str_attributes] for (name, params) in scenarios: if", "# Free Software Foundation; version 3 of that license or", "assert this attribute's type\") instance = getattr(metadata, self.attribute_name) self.assertThat( instance,", "an implementation of PEP 3143. # # This is free", "an attribute named `name`. \"\"\" result = None if not", "\"\"\" Test cases for metadata module values. \"\"\" expected_str_attributes =", "re.UNICODE self.assertThat( metadata.author, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_copyright_contains_author(self): \"\"\" Copyright information", "metadata.copyright, testtools.matchers.Contains(metadata.author)) def test_url_parses_correctly(self): \"\"\" Homepage URL should parse correctly.", "2 standard library. import urlparse import mock import pkg_resources import", "distribution_name != metadata.distribution_name: raise pkg_resources.DistributionNotFound if hasattr(testcase, 'get_distribution_error'): raise testcase.get_distribution_error", "lorem_ipsum.json', { 'test_filename': \"lorem_ipsum.json\", 'version_info_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\",", "'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': default_version_info, }),", "is free software, and you are welcome to redistribute it", "an object has a named attribute. \"\"\" def __init__(self, name):", "file for copyright # information, grant of license, and disclaimer", "disclaimer of warranty. \"\"\" Unit test for ‘_metadata’ private module.", "scaffold from .scaffold import unicode import daemon._metadata as metadata class", "'copyright', 'license', 'url', ]) scenarios = [ (name, {'attribute_name': name})", "of license, and disclaimer of warranty. \"\"\" Unit test for", "'version_installed', 'author', 'copyright', 'license', 'url', ]) scenarios = [ (name,", "Python 2 uses IOError. FileNotFoundError = functools.partial(IOError, errno.ENOENT) version_info_filename =", "# This is free software, and you are welcome to", "resource name should be requested. \"\"\" if hasattr(self, 'get_distribution_error'): self.skipTest(\"No", "self.end_date) self.assertEqual(result, self.expected_range) class metadata_content_TestCase(scaffold.TestCase): \"\"\" Test cases for content", "instance, name): self.instance = instance self.attribute_name = name def describe(self):", "\"\"\" Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. \"\"\" if not fake_func_has_metadata(testcase,", "def test_author_formatted_correctly(self): \"\"\" Author information should be formatted correctly. \"\"\"", "‘pkg_resources.Distribution.get_metadata’. \"\"\" if not fake_func_has_metadata(testcase, resource_name): error = FileNotFoundError(resource_name) raise", "# Python 2 standard library. import urlparse import mock import", "\"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': {'version':", "# Local variables: # coding: utf-8 # mode: python #", "any later version. # No warranty expressed or implied. See", "= 'isdigit' def test_module_has_attribute(self): \"\"\" Metadata should have expected value", "test_copyright_formatted_correctly(self): \"\"\" Copyright statement should be formatted correctly. \"\"\" regex_pattern", "not fake_func_has_metadata(testcase, resource_name): error = FileNotFoundError(resource_name) raise error content =", "module values. \"\"\" expected_str_attributes = set([ 'version_installed', 'author', 'copyright', 'license',", "None, 'expected_text': \"1970\", }), ] def setUp(self): \"\"\" Set up", "of ‘pkg_resources.Distribution.get_metadata’. \"\"\" if not fake_func_has_metadata(testcase, resource_name): error = FileNotFoundError(resource_name)", "specified metadata resource name should be requested. \"\"\" if hasattr(self,", "dash and four-digit year. ) regex_flags = re.UNICODE self.assertThat( metadata.copyright,", "func_patcher_get_distribution.start() self.addCleanup(func_patcher_get_distribution.stop) pkg_resources.get_distribution.side_effect = functools.partial( fake_func_get_distribution, self) def test_requests_installed_distribution(self): \"\"\"", "# Name. \"<[^>]+>\" # Email address, in angle brackets. )", "\"UNKNOWN\", 'expected_range': FakeYearRange(begin=1970, end=None), }), ('end date FUTURE token', {", "not hasattr(self, 'expected_resource_name'): self.expected_resource_name = version_info_filename self.mock_distribution = mock.MagicMock() func_patcher_get_distribution", "'test_filename'): self.test_args['filename'] = self.test_filename if not hasattr(self, 'version_info_filename'): self.version_info_filename =", "`instance` has an attribute named `name`. \"\"\" result = None", "attribute. \"\"\" self.assertThat( metadata, HasAttribute(self.attribute_name)) def test_module_attribute_has_duck_type(self): \"\"\" Metadata value", "information. \"\"\" self.assertThat( metadata.copyright, testtools.matchers.Contains(metadata.author)) def test_url_parses_correctly(self): \"\"\" Homepage URL", "json.dumps({ 'version': \"0.0\", }), 'expected_version_info': {'version': \"0.0\"}, }), ('version 1.0',", "\"\"\" A matcher to assert an object has a named", "redistribute it under # certain conditions; see the end of", "= re.UNICODE self.assertThat( metadata.copyright, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_author_formatted_correctly(self): \"\"\" Author", "'get_distribution_error'): self.skipTest(\"No access to distribution\") metadata.get_distribution_version_info(**self.test_args) self.mock_distribution.has_metadata.assert_called_with( self.expected_resource_name) def test_result_matches_expected_items(self):", "name): self.instance = instance self.attribute_name = name def describe(self): \"\"\"", "== 'version_installed': # No duck typing, this attribute might be", "pkg_resources.DistributionNotFound if hasattr(testcase, 'get_distribution_error'): raise testcase.get_distribution_error mock_distribution = testcase.mock_distribution mock_distribution.has_metadata.side_effect", "= mock.MagicMock() func_patcher_get_distribution = mock.patch.object( pkg_resources, 'get_distribution') func_patcher_get_distribution.start() self.addCleanup(func_patcher_get_distribution.stop) pkg_resources.get_distribution.side_effect", "instance does not have the named attribute. \"\"\" def __init__(self,", "(absolute_import, unicode_literals) import collections import errno import functools import json", "{ 'begin_year': 1970, 'end_year': 1979, 'expected_text': \"1970–1979\", }), ('same year',", "behaviour of ‘pkg_resources.Distribution.has_metadata’. \"\"\" if ( resource_name != testcase.version_info_filename or", "def fake_func_has_metadata(testcase, resource_name): \"\"\" Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. \"\"\"", "new=\"mock-dist\") class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘get_distribution_version_info’ function. \"\"\"", "metadata resource name should be requested. \"\"\" if hasattr(self, 'get_distribution_error'):", "= urlparse.urlparse(metadata.url) self.assertIsInstance( result, urlparse.ParseResult, \"URL value {url!r} did not", "The specified metadata resource name should be requested. \"\"\" if", "self.assertThat( metadata.copyright, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_author_formatted_correctly(self): \"\"\" Author information should", "pkg_resources import testtools.helpers import testtools.matchers from . import scaffold from", "= version_info_filename self.mock_distribution = mock.MagicMock() func_patcher_get_distribution = mock.patch.object( pkg_resources, 'get_distribution')", "type\") instance = getattr(metadata, self.attribute_name) self.assertThat( instance, HasAttribute(self.ducktype_attribute_name)) class YearRange_TestCase(scaffold.TestCaseWithScenarios):", "warranty. \"\"\" Unit test for ‘_metadata’ private module. \"\"\" from", "result = metadata.make_year_range(self.begin_year, self.end_date) self.assertEqual(result, self.expected_range) class metadata_content_TestCase(scaffold.TestCase): \"\"\" Test", "return False return True def fake_func_get_metadata(testcase, resource_name): \"\"\" Fake the", "functools import json import re try: # Python 3 standard", "terms of the GNU General Public License as published by", "and you are welcome to redistribute it under # certain", "setUp(self): \"\"\" Set up test fixtures. \"\"\" super(YearRange_TestCase, self).setUp() self.test_instance", "{} if hasattr(self, 'test_filename'): self.test_args['filename'] = self.test_filename if not hasattr(self,", "attribute. \"\"\" if self.ducktype_attribute_name == NotImplemented: self.skipTest(\"Can't assert this attribute's", "'isdigit' def test_module_has_attribute(self): \"\"\" Metadata should have expected value as", "Result should match expected YearRange. \"\"\" result = metadata.make_year_range(self.begin_year, self.end_date)", "{'version': \"1.0\"}, }), ('file lorem_ipsum.json', { 'test_filename': \"lorem_ipsum.json\", 'version_info_filename': \"lorem_ipsum.json\",", "end=1970), }), ('no end year', { 'begin_year': \"1970\", 'end_date': None,", "import urlparse import mock import pkg_resources import testtools.helpers import testtools.matchers", "object `instance` has an attribute named `name`. \"\"\" result =", "pkg_resources.DistributionNotFound(), 'expected_version_info': default_version_info, }), ('no version_info', { 'expected_version_info': default_version_info, }),", "metadata.copyright, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_author_formatted_correctly(self): \"\"\" Author information should be", "expected_distribution_name) def test_requests_specified_filename(self): \"\"\" The specified metadata resource name should", "# test/test_metadata.py # Part of ‘python-daemon’, an implementation of PEP", "Fake the behaviour of ‘pkg_resources.get_distribution’. \"\"\" if distribution_name != metadata.distribution_name:", "}), 'expected_version_info': {'version': \"0.0\"}, }), ('version 1.0', { 'test_version_info': json.dumps({", "expected YearRange. \"\"\" result = metadata.make_year_range(self.begin_year, self.end_date) self.assertEqual(result, self.expected_range) class", "setUp(self): \"\"\" Set up test fixtures. \"\"\" super(get_distribution_version_info_TestCase, self).setUp() self.test_args", "}), ('same year', { 'begin_year': 1970, 'end_year': 1970, 'expected_text': \"1970\",", "'expected_range': FakeYearRange(begin=1970, end=1979), }), ('same year', { 'begin_year': \"1970\", 'end_date':", "utf-8 # mode: python # End: # vim: fileencoding=utf-8 filetype=python", "= AttributeNotFoundMismatch(instance, self.attribute_name) return result class AttributeNotFoundMismatch(testtools.matchers.Mismatch): \"\"\" The specified", "scenarios = [ ('simple', { 'begin_year': 1970, 'end_year': 1979, 'expected_text':", "new=FakeYearRange) class make_year_range_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘make_year_range’ function. \"\"\"", "date UNKNOWN token', { 'begin_year': \"1970\", 'end_date': \"UNKNOWN\", 'expected_range': FakeYearRange(begin=1970,", "\"\"\" regex_pattern = ( \".+ \" # Name. \"<[^>]+>\" #", "}), ('end date FUTURE token', { 'begin_year': \"1970\", 'end_date': \"FUTURE\",", "self) def test_requests_installed_distribution(self): \"\"\" The package distribution should be retrieved.", "Text representation should be as expected. \"\"\" result = unicode(self.test_instance)", "('simple', { 'begin_year': 1970, 'end_year': 1979, 'expected_text': \"1970–1979\", }), ('same", "'expected_range': FakeYearRange(begin=1970, end=1970), }), ('no end year', { 'begin_year': \"1970\",", "name should be requested. \"\"\" if hasattr(self, 'get_distribution_error'): self.skipTest(\"No access", "= functools.partial(IOError, errno.ENOENT) version_info_filename = \"version_info.json\" def fake_func_has_metadata(testcase, resource_name): \"\"\"", "1979, 'expected_text': \"1970–1979\", }), ('same year', { 'begin_year': 1970, 'end_year':", "]) scenarios = [ (name, {'attribute_name': name}) for name in", "('not installed', { 'get_distribution_error': pkg_resources.DistributionNotFound(), 'expected_version_info': default_version_info, }), ('no version_info',", "date FUTURE token', { 'begin_year': \"1970\", 'end_date': \"FUTURE\", 'expected_range': FakeYearRange(begin=1970,", "the end of this file for copyright # information, grant", "= version_info_filename if not hasattr(self, 'expected_resource_name'): self.expected_resource_name = version_info_filename self.mock_distribution", "mock_distribution = testcase.mock_distribution mock_distribution.has_metadata.side_effect = functools.partial( fake_func_has_metadata, testcase) mock_distribution.get_metadata.side_effect =", "library. import urllib.parse as urlparse except ImportError: # Python 2", "library. import urlparse import mock import pkg_resources import testtools.helpers import", "'release_date': \"UNKNOWN\", 'version': \"UNKNOWN\", 'maintainer': \"UNKNOWN\", } scenarios = [", "URL should parse correctly. \"\"\" result = urlparse.urlparse(metadata.url) self.assertIsInstance( result,", "'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': {'version': \"1.0\"}, }), ('not", "instance self.attribute_name = name def describe(self): \"\"\" Emit a text", "value should have expected duck-typing attribute. \"\"\" if self.ducktype_attribute_name ==", "FakeYearRange(begin=1970, end=None), }), ] def test_result_matches_expected_range(self): \"\"\" Result should match", "General Public License as published by the # Free Software", "raise pkg_resources.DistributionNotFound if hasattr(testcase, 'get_distribution_error'): raise testcase.get_distribution_error mock_distribution = testcase.mock_distribution", "class make_year_range_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘make_year_range’ function. \"\"\" scenarios", "duck-typing attribute. \"\"\" if self.ducktype_attribute_name == NotImplemented: self.skipTest(\"Can't assert this", "fake_func_has_metadata, testcase) mock_distribution.get_metadata.side_effect = functools.partial( fake_func_get_metadata, testcase) return mock_distribution @mock.patch.object(metadata,", "functools.partial( fake_func_get_metadata, testcase) return mock_distribution @mock.patch.object(metadata, 'distribution_name', new=\"mock-dist\") class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios):", "resource_name): error = FileNotFoundError(resource_name) raise error content = testcase.test_version_info return", "behaviour of ‘pkg_resources.get_distribution’. \"\"\" if distribution_name != metadata.distribution_name: raise pkg_resources.DistributionNotFound", "daemon._metadata as metadata class HasAttribute(testtools.matchers.Matcher): \"\"\" A matcher to assert", "result should match the expected items. \"\"\" version_info = metadata.get_distribution_version_info(**self.test_args)", "copy, modify, and/or distribute this work # under the terms", "installed', { 'get_distribution_error': pkg_resources.DistributionNotFound(), 'expected_version_info': default_version_info, }), ('no version_info', {", "= set([ 'version_installed', 'author', 'copyright', 'license', 'url', ]) scenarios =", "func_patcher_get_distribution = mock.patch.object( pkg_resources, 'get_distribution') func_patcher_get_distribution.start() self.addCleanup(func_patcher_get_distribution.stop) pkg_resources.get_distribution.side_effect = functools.partial(", ".scaffold import unicode import daemon._metadata as metadata class HasAttribute(testtools.matchers.Matcher): \"\"\"", "description of this mismatch. \"\"\" text = ( \"{instance!r}\" \"", "text class metadata_value_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for metadata module values.", "information should contain author information. \"\"\" self.assertThat( metadata.copyright, testtools.matchers.Contains(metadata.author)) def", "distribution\") metadata.get_distribution_version_info(**self.test_args) self.mock_distribution.has_metadata.assert_called_with( self.expected_resource_name) def test_result_matches_expected_items(self): \"\"\" The result should", "filename', { 'test_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_resource_name':", "copyright # information, grant of license, and disclaimer of warranty.", "def describe(self): \"\"\" Emit a text description of this mismatch.", "{name!r}\").format( instance=self.instance, name=self.attribute_name) return text class metadata_value_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases", "as metadata class HasAttribute(testtools.matchers.Matcher): \"\"\" A matcher to assert an", "The specified instance does not have the named attribute. \"\"\"", "'expected_version_info': {'version': \"0.0\"}, }), ('version 1.0', { 'test_version_info': json.dumps({ 'version':", "for copyright # information, grant of license, and disclaimer of", "not parse correctly\".format( url=metadata.url)) try: FileNotFoundError except NameError: # Python", "have the named attribute. \"\"\" def __init__(self, instance, name): self.instance", "result = urlparse.urlparse(metadata.url) self.assertIsInstance( result, urlparse.ParseResult, \"URL value {url!r} did", "-*- coding: utf-8 -*- # # test/test_metadata.py # Part of", "FileNotFoundError(resource_name) raise error content = testcase.test_version_info return content def fake_func_get_distribution(testcase,", "'test_version_info': json.dumps({ 'version': \"0.0\", }), 'expected_version_info': {'version': \"0.0\"}, }), ('version", "result, urlparse.ParseResult, \"URL value {url!r} did not parse correctly\".format( url=metadata.url))", "class. \"\"\" scenarios = [ ('simple', { 'begin_year': 1970, 'end_year':", "# Part of ‘python-daemon’, an implementation of PEP 3143. #", "'begin_year': \"1970\", 'end_date': None, 'expected_range': FakeYearRange(begin=1970, end=None), }), ('end date", "of ‘pkg_resources.get_distribution’. \"\"\" if distribution_name != metadata.distribution_name: raise pkg_resources.DistributionNotFound if", "uses IOError. FileNotFoundError = functools.partial(IOError, errno.ENOENT) version_info_filename = \"version_info.json\" def", "'expected_version_info': default_version_info, }), ('wrong filename', { 'test_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({", "The result should match the expected items. \"\"\" version_info =", "fake_func_get_distribution(testcase, distribution_name): \"\"\" Fake the behaviour of ‘pkg_resources.get_distribution’. \"\"\" if", "unicode import daemon._metadata as metadata class HasAttribute(testtools.matchers.Matcher): \"\"\" A matcher", "self.addCleanup(func_patcher_get_distribution.stop) pkg_resources.get_distribution.side_effect = functools.partial( fake_func_get_distribution, self) def test_requests_installed_distribution(self): \"\"\" The", "\"\"\" The specified metadata resource name should be requested. \"\"\"", "'expected_range': FakeYearRange(begin=1970, end=None), }), ('end date FUTURE token', { 'begin_year':", "under # certain conditions; see the end of this file", "self.assertEqual(result, self.expected_text) FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end']) @mock.patch.object(metadata, 'YearRange', new=FakeYearRange)", "‘pkg_resources.get_distribution’. \"\"\" if distribution_name != metadata.distribution_name: raise pkg_resources.DistributionNotFound if hasattr(testcase,", "'begin_year': \"1970\", 'end_date': \"UNKNOWN\", 'expected_range': FakeYearRange(begin=1970, end=None), }), ('end date", "if name == 'version_installed': # No duck typing, this attribute", "__init__(self, instance, name): self.instance = instance self.attribute_name = name def", "result class AttributeNotFoundMismatch(testtools.matchers.Mismatch): \"\"\" The specified instance does not have", "if not testtools.helpers.safe_hasattr(instance, self.attribute_name): result = AttributeNotFoundMismatch(instance, self.attribute_name) return result", "\"\"\" def __init__(self, name): self.attribute_name = name def match(self, instance):", "\"\"\" Set up test fixtures. \"\"\" super(get_distribution_version_info_TestCase, self).setUp() self.test_args =", "Assert the object `instance` has an attribute named `name`. \"\"\"", "}), ] def setUp(self): \"\"\" Set up test fixtures. \"\"\"", "Free Software Foundation; version 3 of that license or any", "default_version_info = { 'release_date': \"UNKNOWN\", 'version': \"UNKNOWN\", 'maintainer': \"UNKNOWN\", }", "mock.patch.object( pkg_resources, 'get_distribution') func_patcher_get_distribution.start() self.addCleanup(func_patcher_get_distribution.stop) pkg_resources.get_distribution.side_effect = functools.partial( fake_func_get_distribution, self)", "coding: utf-8 # mode: python # End: # vim: fileencoding=utf-8", "'version_info_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info':", "UNKNOWN token', { 'begin_year': \"1970\", 'end_date': \"UNKNOWN\", 'expected_range': FakeYearRange(begin=1970, end=None),", "duck typing, this attribute might be None. params['ducktype_attribute_name'] = NotImplemented", "metadata.make_year_range(self.begin_year, self.end_date) self.assertEqual(result, self.expected_range) class metadata_content_TestCase(scaffold.TestCase): \"\"\" Test cases for", "return content def fake_func_get_distribution(testcase, distribution_name): \"\"\" Fake the behaviour of", "{ 'begin_year': \"1970\", 'end_date': \"FUTURE\", 'expected_range': FakeYearRange(begin=1970, end=None), }), ]", "\"1970\", 'end_date': \"FUTURE\", 'expected_range': FakeYearRange(begin=1970, end=None), }), ] def test_result_matches_expected_range(self):", "self.mock_distribution.has_metadata.assert_called_with( self.expected_resource_name) def test_result_matches_expected_items(self): \"\"\" The result should match the", "= ( \"{instance!r}\" \" has no attribute named {name!r}\").format( instance=self.instance,", "\"1970-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1970), }), ('no end year', { 'begin_year':", "{url!r} did not parse correctly\".format( url=metadata.url)) try: FileNotFoundError except NameError:", "\"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': {'version': \"1.0\"}, }), ('not installed',", "version_info_filename self.mock_distribution = mock.MagicMock() func_patcher_get_distribution = mock.patch.object( pkg_resources, 'get_distribution') func_patcher_get_distribution.start()", "Local variables: # coding: utf-8 # mode: python # End:", "collections import errno import functools import json import re try:", "== NotImplemented: self.skipTest(\"Can't assert this attribute's type\") instance = getattr(metadata,", "cases for ‘get_distribution_version_info’ function. \"\"\" default_version_info = { 'release_date': \"UNKNOWN\",", "the GNU General Public License as published by the #", "'expected_version_info': default_version_info, }), ('no version_info', { 'expected_version_info': default_version_info, }), ('wrong", "\"Copyright © \" \"\\d{4}\" # Four-digit year. \"(?:–\\d{4})?\" # Optional", "# Optional range dash and four-digit year. ) regex_flags =", "value. params['ducktype_attribute_name'] = 'isdigit' def test_module_has_attribute(self): \"\"\" Metadata should have", "test_requests_specified_filename(self): \"\"\" The specified metadata resource name should be requested.", "-*- # # test/test_metadata.py # Part of ‘python-daemon’, an implementation", "Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. \"\"\" if ( resource_name !=", "\"lorem_ipsum.json\", 'version_info_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\",", "# under the terms of the GNU General Public License", "'end_date': \"UNKNOWN\", 'expected_range': FakeYearRange(begin=1970, end=None), }), ('end date FUTURE token',", "\" \"\\d{4}\" # Four-digit year. \"(?:–\\d{4})?\" # Optional range dash", "default_version_info, }), ] def setUp(self): \"\"\" Set up test fixtures.", "'expected_text': \"1970\", }), ] def setUp(self): \"\"\" Set up test", "named `name`. \"\"\" result = None if not testtools.helpers.safe_hasattr(instance, self.attribute_name):", "text = ( \"{instance!r}\" \" has no attribute named {name!r}\").format(", "\"1970\", 'end_date': \"1970-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1970), }), ('no end year',", "does not have the named attribute. \"\"\" def __init__(self, instance,", "return True def fake_func_get_metadata(testcase, resource_name): \"\"\" Fake the behaviour of", "by the # Free Software Foundation; version 3 of that", "of metadata. \"\"\" def test_copyright_formatted_correctly(self): \"\"\" Copyright statement should be", "self.test_args = {} if hasattr(self, 'test_filename'): self.test_args['filename'] = self.test_filename if", "mismatch. \"\"\" text = ( \"{instance!r}\" \" has no attribute", "version_info_filename = \"version_info.json\" def fake_func_has_metadata(testcase, resource_name): \"\"\" Fake the behaviour", "fake_func_get_metadata(testcase, resource_name): \"\"\" Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. \"\"\" if", "}), ('file lorem_ipsum.json', { 'test_filename': \"lorem_ipsum.json\", 'version_info_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({", "result = AttributeNotFoundMismatch(instance, self.attribute_name) return result class AttributeNotFoundMismatch(testtools.matchers.Mismatch): \"\"\" The", "variables: # coding: utf-8 # mode: python # End: #", "{ 'begin_year': \"1970\", 'end_date': None, 'expected_range': FakeYearRange(begin=1970, end=None), }), ('end", "this value. params['ducktype_attribute_name'] = 'isdigit' def test_module_has_attribute(self): \"\"\" Metadata should", "metadata.distribution_name: raise pkg_resources.DistributionNotFound if hasattr(testcase, 'get_distribution_error'): raise testcase.get_distribution_error mock_distribution =", "license or any later version. # No warranty expressed or", "no attribute named {name!r}\").format( instance=self.instance, name=self.attribute_name) return text class metadata_value_TestCase(scaffold.TestCaseWithScenarios):", "\"\"\" default_version_info = { 'release_date': \"UNKNOWN\", 'version': \"UNKNOWN\", 'maintainer': \"UNKNOWN\",", "\"\"\" Test cases for content of metadata. \"\"\" def test_copyright_formatted_correctly(self):", "\"UNKNOWN\", 'version': \"UNKNOWN\", 'maintainer': \"UNKNOWN\", } scenarios = [ ('version", "Test cases for ‘YearRange’ class. \"\"\" scenarios = [ ('simple',", "FakeYearRange(begin=1970, end=None), }), ('end date FUTURE token', { 'begin_year': \"1970\",", "urlparse.urlparse(metadata.url) self.assertIsInstance( result, urlparse.ParseResult, \"URL value {url!r} did not parse", "version_info_filename if not hasattr(self, 'expected_resource_name'): self.expected_resource_name = version_info_filename self.mock_distribution =", "modify, and/or distribute this work # under the terms of", "or implied. See the file ‘LICENSE.GPL-3’ for details. # Local", "params['ducktype_attribute_name'] = 'isdigit' def test_module_has_attribute(self): \"\"\" Metadata should have expected", "regex_flags = re.UNICODE self.assertThat( metadata.author, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_copyright_contains_author(self): \"\"\"", "import testtools.matchers from . import scaffold from .scaffold import unicode", "cases for ‘YearRange’ class. \"\"\" scenarios = [ ('simple', {", "for ‘YearRange’ class. \"\"\" scenarios = [ ('simple', { 'begin_year':", "}), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': default_version_info, }), ] def setUp(self): \"\"\"", "access to distribution\") metadata.get_distribution_version_info(**self.test_args) self.mock_distribution.has_metadata.assert_called_with( self.expected_resource_name) def test_result_matches_expected_items(self): \"\"\" The", "for metadata module values. \"\"\" expected_str_attributes = set([ 'version_installed', 'author',", "unicode_literals) import collections import errno import functools import json import", "import errno import functools import json import re try: #", "collections.namedtuple('FakeYearRange', ['begin', 'end']) @mock.patch.object(metadata, 'YearRange', new=FakeYearRange) class make_year_range_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test", "the named attribute. \"\"\" def __init__(self, instance, name): self.instance =", "except NameError: # Python 2 uses IOError. FileNotFoundError = functools.partial(IOError,", "def test_copyright_contains_author(self): \"\"\" Copyright information should contain author information. \"\"\"", "name}) for name in expected_str_attributes] for (name, params) in scenarios:", "self.mock_distribution = mock.MagicMock() func_patcher_get_distribution = mock.patch.object( pkg_resources, 'get_distribution') func_patcher_get_distribution.start() self.addCleanup(func_patcher_get_distribution.stop)", "import unicode import daemon._metadata as metadata class HasAttribute(testtools.matchers.Matcher): \"\"\" A", "‘str’ to test this value. params['ducktype_attribute_name'] = 'isdigit' def test_module_has_attribute(self):", "you may copy, modify, and/or distribute this work # under", "\"\"\" The specified instance does not have the named attribute.", "‘pkg_resources.Distribution.has_metadata’. \"\"\" if ( resource_name != testcase.version_info_filename or not hasattr(testcase,", "has a named attribute. \"\"\" def __init__(self, name): self.attribute_name =", "\"\"\" Test cases for ‘make_year_range’ function. \"\"\" scenarios = [", "'expected_resource_name'): self.expected_resource_name = version_info_filename self.mock_distribution = mock.MagicMock() func_patcher_get_distribution = mock.patch.object(", "\"\"\" version_info = metadata.get_distribution_version_info(**self.test_args) self.assertEqual(self.expected_version_info, version_info) # Copyright © 2008–2018", "YearRange_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘YearRange’ class. \"\"\" scenarios =", "\"\"\" Copyright statement should be formatted correctly. \"\"\" regex_pattern =", "\"1979-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1979), }), ('same year', { 'begin_year': \"1970\",", "import testtools.helpers import testtools.matchers from . import scaffold from .scaffold", "Four-digit year. \"(?:–\\d{4})?\" # Optional range dash and four-digit year.", "# mode: python # End: # vim: fileencoding=utf-8 filetype=python :", "FakeYearRange(begin=1970, end=1979), }), ('same year', { 'begin_year': \"1970\", 'end_date': \"1970-01-01\",", "private module. \"\"\" from __future__ import (absolute_import, unicode_literals) import collections", "\"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': default_version_info, }), ] def setUp(self):", "test_module_has_attribute(self): \"\"\" Metadata should have expected value as a module", "Emit a text description of this mismatch. \"\"\" text =", "\"1970–1979\", }), ('same year', { 'begin_year': 1970, 'end_year': 1970, 'expected_text':", "( resource_name != testcase.version_info_filename or not hasattr(testcase, 'test_version_info')): return False", "\"\"\" Author information should be formatted correctly. \"\"\" regex_pattern =", "= \"version_info.json\" def fake_func_has_metadata(testcase, resource_name): \"\"\" Fake the behaviour of", "True def fake_func_get_metadata(testcase, resource_name): \"\"\" Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’.", "mock_distribution.get_metadata.side_effect = functools.partial( fake_func_get_metadata, testcase) return mock_distribution @mock.patch.object(metadata, 'distribution_name', new=\"mock-dist\")", "cases for ‘make_year_range’ function. \"\"\" scenarios = [ ('simple', {", "mock_distribution @mock.patch.object(metadata, 'distribution_name', new=\"mock-dist\") class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for", "test fixtures. \"\"\" super(get_distribution_version_info_TestCase, self).setUp() self.test_args = {} if hasattr(self,", "'version': \"1.0\", }), 'expected_version_info': {'version': \"1.0\"}, }), ('file lorem_ipsum.json', {", "name def describe(self): \"\"\" Emit a text description of this", "not testtools.helpers.safe_hasattr(instance, self.attribute_name): result = AttributeNotFoundMismatch(instance, self.attribute_name) return result class", "has no attribute named {name!r}\").format( instance=self.instance, name=self.attribute_name) return text class", "return text class metadata_value_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for metadata module", "= metadata.distribution_name metadata.get_distribution_version_info(**self.test_args) pkg_resources.get_distribution.assert_called_with( expected_distribution_name) def test_requests_specified_filename(self): \"\"\" The specified", "( \".+ \" # Name. \"<[^>]+>\" # Email address, in", "author information. \"\"\" self.assertThat( metadata.copyright, testtools.matchers.Contains(metadata.author)) def test_url_parses_correctly(self): \"\"\" Homepage", "Set up test fixtures. \"\"\" super(get_distribution_version_info_TestCase, self).setUp() self.test_args = {}", "\"\"\" super(get_distribution_version_info_TestCase, self).setUp() self.test_args = {} if hasattr(self, 'test_filename'): self.test_args['filename']", "instance): \"\"\" Assert the object `instance` has an attribute named", "instance = getattr(metadata, self.attribute_name) self.assertThat( instance, HasAttribute(self.ducktype_attribute_name)) class YearRange_TestCase(scaffold.TestCaseWithScenarios): \"\"\"", "'end_date': \"1979-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1979), }), ('same year', { 'begin_year':", "correctly. \"\"\" result = urlparse.urlparse(metadata.url) self.assertIsInstance( result, urlparse.ParseResult, \"URL value", "Metadata should have expected value as a module attribute. \"\"\"", "the file ‘LICENSE.GPL-3’ for details. # Local variables: # coding:", "\"\"\" scenarios = [ ('simple', { 'begin_year': \"1970\", 'end_date': \"1979-01-01\",", "parse correctly\".format( url=metadata.url)) try: FileNotFoundError except NameError: # Python 2", "software: you may copy, modify, and/or distribute this work #", "def test_module_has_attribute(self): \"\"\" Metadata should have expected value as a", "'license', 'url', ]) scenarios = [ (name, {'attribute_name': name}) for", "end=None), }), ('end date FUTURE token', { 'begin_year': \"1970\", 'end_date':", "self.assertEqual(self.expected_version_info, version_info) # Copyright © 2008–2018 <NAME> <<EMAIL>> # #", "information, grant of license, and disclaimer of warranty. \"\"\" Unit", "‘YearRange’ class. \"\"\" scenarios = [ ('simple', { 'begin_year': 1970,", "[ ('version 0.0', { 'test_version_info': json.dumps({ 'version': \"0.0\", }), 'expected_version_info':", "'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': {'version': \"1.0\"},", "if hasattr(testcase, 'get_distribution_error'): raise testcase.get_distribution_error mock_distribution = testcase.mock_distribution mock_distribution.has_metadata.side_effect =", "expected_str_attributes = set([ 'version_installed', 'author', 'copyright', 'license', 'url', ]) scenarios", "= [ ('simple', { 'begin_year': 1970, 'end_year': 1979, 'expected_text': \"1970–1979\",", "fixtures. \"\"\" super(YearRange_TestCase, self).setUp() self.test_instance = metadata.YearRange( self.begin_year, self.end_year) def", "re.UNICODE self.assertThat( metadata.copyright, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_author_formatted_correctly(self): \"\"\" Author information", "retrieved. \"\"\" expected_distribution_name = metadata.distribution_name metadata.get_distribution_version_info(**self.test_args) pkg_resources.get_distribution.assert_called_with( expected_distribution_name) def test_requests_specified_filename(self):", "of this mismatch. \"\"\" text = ( \"{instance!r}\" \" has", "year. ) regex_flags = re.UNICODE self.assertThat( metadata.copyright, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def", "the behaviour of ‘pkg_resources.get_distribution’. \"\"\" if distribution_name != metadata.distribution_name: raise", "= self.test_filename if not hasattr(self, 'version_info_filename'): self.version_info_filename = version_info_filename if", "AttributeNotFoundMismatch(testtools.matchers.Mismatch): \"\"\" The specified instance does not have the named", "\"\"\" scenarios = [ ('simple', { 'begin_year': 1970, 'end_year': 1979,", "items. \"\"\" version_info = metadata.get_distribution_version_info(**self.test_args) self.assertEqual(self.expected_version_info, version_info) # Copyright ©", "from .scaffold import unicode import daemon._metadata as metadata class HasAttribute(testtools.matchers.Matcher):", "\"\"\" result = metadata.make_year_range(self.begin_year, self.end_date) self.assertEqual(result, self.expected_range) class metadata_content_TestCase(scaffold.TestCase): \"\"\"", "None if not testtools.helpers.safe_hasattr(instance, self.attribute_name): result = AttributeNotFoundMismatch(instance, self.attribute_name) return", "import json import re try: # Python 3 standard library.", "describe(self): \"\"\" Emit a text description of this mismatch. \"\"\"", "content of metadata. \"\"\" def test_copyright_formatted_correctly(self): \"\"\" Copyright statement should", "test_url_parses_correctly(self): \"\"\" Homepage URL should parse correctly. \"\"\" result =", "3 standard library. import urllib.parse as urlparse except ImportError: #", "angle brackets. ) regex_flags = re.UNICODE self.assertThat( metadata.author, testtools.matchers.MatchesRegex(regex_pattern, regex_flags))", "\"\"\" The result should match the expected items. \"\"\" version_info", "NotImplemented: self.skipTest(\"Can't assert this attribute's type\") instance = getattr(metadata, self.attribute_name)", "hasattr(testcase, 'test_version_info')): return False return True def fake_func_get_metadata(testcase, resource_name): \"\"\"", "have expected duck-typing attribute. \"\"\" if self.ducktype_attribute_name == NotImplemented: self.skipTest(\"Can't", "('wrong filename', { 'test_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version': \"1.0\", }),", "\"1970\", 'end_date': None, 'expected_range': FakeYearRange(begin=1970, end=None), }), ('end date UNKNOWN", "name=self.attribute_name) return text class metadata_value_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for metadata", "def test_text_representation_as_expected(self): \"\"\" Text representation should be as expected. \"\"\"", "= [ ('version 0.0', { 'test_version_info': json.dumps({ 'version': \"0.0\", }),", "not have the named attribute. \"\"\" def __init__(self, instance, name):", "named {name!r}\").format( instance=self.instance, name=self.attribute_name) return text class metadata_value_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test", "\"\"\" result = urlparse.urlparse(metadata.url) self.assertIsInstance( result, urlparse.ParseResult, \"URL value {url!r}", "make_year_range_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘make_year_range’ function. \"\"\" scenarios =", "ImportError: # Python 2 standard library. import urlparse import mock", "self.assertThat( instance, HasAttribute(self.ducktype_attribute_name)) class YearRange_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘YearRange’", "function. \"\"\" default_version_info = { 'release_date': \"UNKNOWN\", 'version': \"UNKNOWN\", 'maintainer':", "0.0', { 'test_version_info': json.dumps({ 'version': \"0.0\", }), 'expected_version_info': {'version': \"0.0\"},", "= FileNotFoundError(resource_name) raise error content = testcase.test_version_info return content def", "errno.ENOENT) version_info_filename = \"version_info.json\" def fake_func_has_metadata(testcase, resource_name): \"\"\" Fake the", "functools.partial(IOError, errno.ENOENT) version_info_filename = \"version_info.json\" def fake_func_has_metadata(testcase, resource_name): \"\"\" Fake", "correctly. \"\"\" regex_pattern = ( \"Copyright © \" \"\\d{4}\" #", "\"\"\" Set up test fixtures. \"\"\" super(YearRange_TestCase, self).setUp() self.test_instance =", "import daemon._metadata as metadata class HasAttribute(testtools.matchers.Matcher): \"\"\" A matcher to", "error = FileNotFoundError(resource_name) raise error content = testcase.test_version_info return content", "standard library. import urllib.parse as urlparse except ImportError: # Python", "statement should be formatted correctly. \"\"\" regex_pattern = ( \"Copyright", "{'attribute_name': name}) for name in expected_str_attributes] for (name, params) in", "self.assertThat( metadata.author, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_copyright_contains_author(self): \"\"\" Copyright information should", "file ‘LICENSE.GPL-3’ for details. # Local variables: # coding: utf-8", "you are welcome to redistribute it under # certain conditions;", "FileNotFoundError = functools.partial(IOError, errno.ENOENT) version_info_filename = \"version_info.json\" def fake_func_has_metadata(testcase, resource_name):", "year', { 'begin_year': 1970, 'end_year': 1970, 'expected_text': \"1970\", }), ('no", "re try: # Python 3 standard library. import urllib.parse as", "\"\"\" self.assertThat( metadata.copyright, testtools.matchers.Contains(metadata.author)) def test_url_parses_correctly(self): \"\"\" Homepage URL should", "= {} if hasattr(self, 'test_filename'): self.test_args['filename'] = self.test_filename if not", "version_info = metadata.get_distribution_version_info(**self.test_args) self.assertEqual(self.expected_version_info, version_info) # Copyright © 2008–2018 <NAME>", "{'version': \"0.0\"}, }), ('version 1.0', { 'test_version_info': json.dumps({ 'version': \"1.0\",", "HasAttribute(self.ducktype_attribute_name)) class YearRange_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘YearRange’ class. \"\"\"", "certain conditions; see the end of this file for copyright", "test_result_matches_expected_items(self): \"\"\" The result should match the expected items. \"\"\"", "'expected_text': \"1970\", }), ('no end year', { 'begin_year': 1970, 'end_year':", "parse correctly. \"\"\" result = urlparse.urlparse(metadata.url) self.assertIsInstance( result, urlparse.ParseResult, \"URL", "(name, {'attribute_name': name}) for name in expected_str_attributes] for (name, params)", "}), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': {'version': \"1.0\"}, }), ('not installed', {", "hasattr(self, 'expected_resource_name'): self.expected_resource_name = version_info_filename self.mock_distribution = mock.MagicMock() func_patcher_get_distribution =", "© 2008–2018 <NAME> <<EMAIL>> # # This is free software:", "except ImportError: # Python 2 standard library. import urlparse import", "IOError. FileNotFoundError = functools.partial(IOError, errno.ENOENT) version_info_filename = \"version_info.json\" def fake_func_has_metadata(testcase,", "import urllib.parse as urlparse except ImportError: # Python 2 standard", "free software, and you are welcome to redistribute it under", "YearRange. \"\"\" result = metadata.make_year_range(self.begin_year, self.end_date) self.assertEqual(result, self.expected_range) class metadata_content_TestCase(scaffold.TestCase):", "token', { 'begin_year': \"1970\", 'end_date': \"UNKNOWN\", 'expected_range': FakeYearRange(begin=1970, end=None), }),", "functools.partial( fake_func_has_metadata, testcase) mock_distribution.get_metadata.side_effect = functools.partial( fake_func_get_metadata, testcase) return mock_distribution", "module. \"\"\" from __future__ import (absolute_import, unicode_literals) import collections import", "\"\"\" Metadata value should have expected duck-typing attribute. \"\"\" if", "'end_date': None, 'expected_range': FakeYearRange(begin=1970, end=None), }), ('end date UNKNOWN token',", "resource_name != testcase.version_info_filename or not hasattr(testcase, 'test_version_info')): return False return", "{ 'release_date': \"UNKNOWN\", 'version': \"UNKNOWN\", 'maintainer': \"UNKNOWN\", } scenarios =", "instance, HasAttribute(self.ducktype_attribute_name)) class YearRange_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘YearRange’ class.", "# certain conditions; see the end of this file for", "super(YearRange_TestCase, self).setUp() self.test_instance = metadata.YearRange( self.begin_year, self.end_year) def test_text_representation_as_expected(self): \"\"\"", "requested. \"\"\" if hasattr(self, 'get_distribution_error'): self.skipTest(\"No access to distribution\") metadata.get_distribution_version_info(**self.test_args)", "params['ducktype_attribute_name'] = NotImplemented continue # Expect an attribute of ‘str’", "an attribute of ‘str’ to test this value. params['ducktype_attribute_name'] =", "of PEP 3143. # # This is free software, and", "unicode(self.test_instance) self.assertEqual(result, self.expected_text) FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end']) @mock.patch.object(metadata, 'YearRange',", "'begin_year': 1970, 'end_year': 1979, 'expected_text': \"1970–1979\", }), ('same year', {", "{ 'get_distribution_error': pkg_resources.DistributionNotFound(), 'expected_version_info': default_version_info, }), ('no version_info', { 'expected_version_info':", "( \"{instance!r}\" \" has no attribute named {name!r}\").format( instance=self.instance, name=self.attribute_name)", "{ 'begin_year': \"1970\", 'end_date': \"1970-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1970), }), ('no", "representation should be as expected. \"\"\" result = unicode(self.test_instance) self.assertEqual(result,", "'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': default_version_info, }), ] def", "this attribute might be None. params['ducktype_attribute_name'] = NotImplemented continue #", "\"\"\" Test cases for ‘YearRange’ class. \"\"\" scenarios = [", "for ‘_metadata’ private module. \"\"\" from __future__ import (absolute_import, unicode_literals)", "be formatted correctly. \"\"\" regex_pattern = ( \".+ \" #", "# # This is free software, and you are welcome", "fake_func_get_metadata, testcase) return mock_distribution @mock.patch.object(metadata, 'distribution_name', new=\"mock-dist\") class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios): \"\"\"", "a text description of this mismatch. \"\"\" text = (", "= metadata.make_year_range(self.begin_year, self.end_date) self.assertEqual(result, self.expected_range) class metadata_content_TestCase(scaffold.TestCase): \"\"\" Test cases", "try: # Python 3 standard library. import urllib.parse as urlparse", "utf-8 -*- # # test/test_metadata.py # Part of ‘python-daemon’, an", "\"\"\" result = unicode(self.test_instance) self.assertEqual(result, self.expected_text) FakeYearRange = collections.namedtuple('FakeYearRange', ['begin',", "attribute. \"\"\" def __init__(self, instance, name): self.instance = instance self.attribute_name", "1970, 'expected_text': \"1970\", }), ('no end year', { 'begin_year': 1970,", "\"\"\" super(YearRange_TestCase, self).setUp() self.test_instance = metadata.YearRange( self.begin_year, self.end_year) def test_text_representation_as_expected(self):", "should have expected duck-typing attribute. \"\"\" if self.ducktype_attribute_name == NotImplemented:", "= instance self.attribute_name = name def describe(self): \"\"\" Emit a", "['begin', 'end']) @mock.patch.object(metadata, 'YearRange', new=FakeYearRange) class make_year_range_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases", "this mismatch. \"\"\" text = ( \"{instance!r}\" \" has no", "four-digit year. ) regex_flags = re.UNICODE self.assertThat( metadata.copyright, testtools.matchers.MatchesRegex(regex_pattern, regex_flags))", "def fake_func_get_distribution(testcase, distribution_name): \"\"\" Fake the behaviour of ‘pkg_resources.get_distribution’. \"\"\"", "self.test_instance = metadata.YearRange( self.begin_year, self.end_year) def test_text_representation_as_expected(self): \"\"\" Text representation", "{ 'begin_year': \"1970\", 'end_date': \"1979-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1979), }), ('same", "('same year', { 'begin_year': 1970, 'end_year': 1970, 'expected_text': \"1970\", }),", "\"\"\" Fake the behaviour of ‘pkg_resources.get_distribution’. \"\"\" if distribution_name !=", "self.expected_text) FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end']) @mock.patch.object(metadata, 'YearRange', new=FakeYearRange) class", "self.ducktype_attribute_name == NotImplemented: self.skipTest(\"Can't assert this attribute's type\") instance =", "errno import functools import json import re try: # Python", "scenarios = [ (name, {'attribute_name': name}) for name in expected_str_attributes]", "‘_metadata’ private module. \"\"\" from __future__ import (absolute_import, unicode_literals) import", "year. \"(?:–\\d{4})?\" # Optional range dash and four-digit year. )", "matcher to assert an object has a named attribute. \"\"\"", "\"0.0\"}, }), ('version 1.0', { 'test_version_info': json.dumps({ 'version': \"1.0\", }),", "a named attribute. \"\"\" def __init__(self, name): self.attribute_name = name", "self.assertEqual(result, self.expected_range) class metadata_content_TestCase(scaffold.TestCase): \"\"\" Test cases for content of", "import mock import pkg_resources import testtools.helpers import testtools.matchers from .", "\"\"\" if not fake_func_has_metadata(testcase, resource_name): error = FileNotFoundError(resource_name) raise error", "def match(self, instance): \"\"\" Assert the object `instance` has an", "\"URL value {url!r} did not parse correctly\".format( url=metadata.url)) try: FileNotFoundError", "\"\"\" Assert the object `instance` has an attribute named `name`.", "formatted correctly. \"\"\" regex_pattern = ( \".+ \" # Name.", "testcase.get_distribution_error mock_distribution = testcase.mock_distribution mock_distribution.has_metadata.side_effect = functools.partial( fake_func_has_metadata, testcase) mock_distribution.get_metadata.side_effect", "= ( \"Copyright © \" \"\\d{4}\" # Four-digit year. \"(?:–\\d{4})?\"", "'maintainer': \"UNKNOWN\", } scenarios = [ ('version 0.0', { 'test_version_info':", "# Email address, in angle brackets. ) regex_flags = re.UNICODE", "'end_year': None, 'expected_text': \"1970\", }), ] def setUp(self): \"\"\" Set", "3143. # # This is free software, and you are", "implementation of PEP 3143. # # This is free software,", "= NotImplemented continue # Expect an attribute of ‘str’ to", "self.attribute_name): result = AttributeNotFoundMismatch(instance, self.attribute_name) return result class AttributeNotFoundMismatch(testtools.matchers.Mismatch): \"\"\"", "This is free software, and you are welcome to redistribute", "import (absolute_import, unicode_literals) import collections import errno import functools import", "attribute. \"\"\" def __init__(self, name): self.attribute_name = name def match(self,", "'expected_version_info': {'version': \"1.0\"}, }), ('file lorem_ipsum.json', { 'test_filename': \"lorem_ipsum.json\", 'version_info_filename':", "if hasattr(self, 'test_filename'): self.test_args['filename'] = self.test_filename if not hasattr(self, 'version_info_filename'):", "('end date UNKNOWN token', { 'begin_year': \"1970\", 'end_date': \"UNKNOWN\", 'expected_range':", "self.end_year) def test_text_representation_as_expected(self): \"\"\" Text representation should be as expected.", "‘get_distribution_version_info’ function. \"\"\" default_version_info = { 'release_date': \"UNKNOWN\", 'version': \"UNKNOWN\",", "None. params['ducktype_attribute_name'] = NotImplemented continue # Expect an attribute of", "formatted correctly. \"\"\" regex_pattern = ( \"Copyright © \" \"\\d{4}\"", "Python 2 standard library. import urlparse import mock import pkg_resources", "or any later version. # No warranty expressed or implied.", "Set up test fixtures. \"\"\" super(YearRange_TestCase, self).setUp() self.test_instance = metadata.YearRange(", "end=None), }), ] def test_result_matches_expected_range(self): \"\"\" Result should match expected", "year', { 'begin_year': \"1970\", 'end_date': \"1970-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1970), }),", "Software Foundation; version 3 of that license or any later", "= metadata.YearRange( self.begin_year, self.end_year) def test_text_representation_as_expected(self): \"\"\" Text representation should", "metadata class HasAttribute(testtools.matchers.Matcher): \"\"\" A matcher to assert an object", "and disclaimer of warranty. \"\"\" Unit test for ‘_metadata’ private", "('simple', { 'begin_year': \"1970\", 'end_date': \"1979-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1979), }),", "\"\"\" if distribution_name != metadata.distribution_name: raise pkg_resources.DistributionNotFound if hasattr(testcase, 'get_distribution_error'):", "value as a module attribute. \"\"\" self.assertThat( metadata, HasAttribute(self.attribute_name)) def", "correctly. \"\"\" regex_pattern = ( \".+ \" # Name. \"<[^>]+>\"", "\"1970\", }), ] def setUp(self): \"\"\" Set up test fixtures.", "\"\"\" def test_copyright_formatted_correctly(self): \"\"\" Copyright statement should be formatted correctly.", "named attribute. \"\"\" def __init__(self, instance, name): self.instance = instance", "of ‘python-daemon’, an implementation of PEP 3143. # # This", "from . import scaffold from .scaffold import unicode import daemon._metadata", "No duck typing, this attribute might be None. params['ducktype_attribute_name'] =", "name == 'version_installed': # No duck typing, this attribute might", "brackets. ) regex_flags = re.UNICODE self.assertThat( metadata.author, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def", "!= testcase.version_info_filename or not hasattr(testcase, 'test_version_info')): return False return True", "text description of this mismatch. \"\"\" text = ( \"{instance!r}\"", "Copyright information should contain author information. \"\"\" self.assertThat( metadata.copyright, testtools.matchers.Contains(metadata.author))", "Expect an attribute of ‘str’ to test this value. params['ducktype_attribute_name']", "self.attribute_name) return result class AttributeNotFoundMismatch(testtools.matchers.Mismatch): \"\"\" The specified instance does", "\"\"\" Unit test for ‘_metadata’ private module. \"\"\" from __future__", "default_version_info, }), ('wrong filename', { 'test_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version':", "distribution_name): \"\"\" Fake the behaviour of ‘pkg_resources.get_distribution’. \"\"\" if distribution_name", "package distribution should be retrieved. \"\"\" expected_distribution_name = metadata.distribution_name metadata.get_distribution_version_info(**self.test_args)", "test_copyright_contains_author(self): \"\"\" Copyright information should contain author information. \"\"\" self.assertThat(", "test_text_representation_as_expected(self): \"\"\" Text representation should be as expected. \"\"\" result", "import pkg_resources import testtools.helpers import testtools.matchers from . import scaffold", "urlparse.ParseResult, \"URL value {url!r} did not parse correctly\".format( url=metadata.url)) try:", "as expected. \"\"\" result = unicode(self.test_instance) self.assertEqual(result, self.expected_text) FakeYearRange =", "'test_version_info')): return False return True def fake_func_get_metadata(testcase, resource_name): \"\"\" Fake", "if self.ducktype_attribute_name == NotImplemented: self.skipTest(\"Can't assert this attribute's type\") instance", "‘python-daemon’, an implementation of PEP 3143. # # This is", "fixtures. \"\"\" super(get_distribution_version_info_TestCase, self).setUp() self.test_args = {} if hasattr(self, 'test_filename'):", "urllib.parse as urlparse except ImportError: # Python 2 standard library.", "match(self, instance): \"\"\" Assert the object `instance` has an attribute", "function. \"\"\" scenarios = [ ('simple', { 'begin_year': \"1970\", 'end_date':", "def test_requests_installed_distribution(self): \"\"\" The package distribution should be retrieved. \"\"\"", "if not hasattr(self, 'version_info_filename'): self.version_info_filename = version_info_filename if not hasattr(self,", "should match the expected items. \"\"\" version_info = metadata.get_distribution_version_info(**self.test_args) self.assertEqual(self.expected_version_info,", "distribution should be retrieved. \"\"\" expected_distribution_name = metadata.distribution_name metadata.get_distribution_version_info(**self.test_args) pkg_resources.get_distribution.assert_called_with(", "\"\"\" text = ( \"{instance!r}\" \" has no attribute named", "# Expect an attribute of ‘str’ to test this value.", "] def test_result_matches_expected_range(self): \"\"\" Result should match expected YearRange. \"\"\"", "('same year', { 'begin_year': \"1970\", 'end_date': \"1970-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1970),", "named attribute. \"\"\" def __init__(self, name): self.attribute_name = name def", "\"\"\" regex_pattern = ( \"Copyright © \" \"\\d{4}\" # Four-digit", "NameError: # Python 2 uses IOError. FileNotFoundError = functools.partial(IOError, errno.ENOENT)", "'begin_year': 1970, 'end_year': None, 'expected_text': \"1970\", }), ] def setUp(self):", "testcase.test_version_info return content def fake_func_get_distribution(testcase, distribution_name): \"\"\" Fake the behaviour", "'expected_version_info': default_version_info, }), ] def setUp(self): \"\"\" Set up test", "version_info) # Copyright © 2008–2018 <NAME> <<EMAIL>> # # This", "should have expected value as a module attribute. \"\"\" self.assertThat(", "test_author_formatted_correctly(self): \"\"\" Author information should be formatted correctly. \"\"\" regex_pattern", "for ‘make_year_range’ function. \"\"\" scenarios = [ ('simple', { 'begin_year':", "{ 'begin_year': 1970, 'end_year': 1970, 'expected_text': \"1970\", }), ('no end", "# coding: utf-8 # mode: python # End: # vim:", "Author information should be formatted correctly. \"\"\" regex_pattern = (", "# # test/test_metadata.py # Part of ‘python-daemon’, an implementation of", "name): self.attribute_name = name def match(self, instance): \"\"\" Assert the", "= ( \".+ \" # Name. \"<[^>]+>\" # Email address,", "<reponame>helion-security/helion # -*- coding: utf-8 -*- # # test/test_metadata.py #", "'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_version_info': {'version': \"1.0\"}, }), ('file", "this file for copyright # information, grant of license, and", "= [ ('simple', { 'begin_year': \"1970\", 'end_date': \"1979-01-01\", 'expected_range': FakeYearRange(begin=1970,", "def test_requests_specified_filename(self): \"\"\" The specified metadata resource name should be", "later version. # No warranty expressed or implied. See the", "'begin_year': \"1970\", 'end_date': \"FUTURE\", 'expected_range': FakeYearRange(begin=1970, end=None), }), ] def", "as a module attribute. \"\"\" self.assertThat( metadata, HasAttribute(self.attribute_name)) def test_module_attribute_has_duck_type(self):", "params) in scenarios: if name == 'version_installed': # No duck", "mock.MagicMock() func_patcher_get_distribution = mock.patch.object( pkg_resources, 'get_distribution') func_patcher_get_distribution.start() self.addCleanup(func_patcher_get_distribution.stop) pkg_resources.get_distribution.side_effect =", "to assert an object has a named attribute. \"\"\" def", "mock import pkg_resources import testtools.helpers import testtools.matchers from . import", "self.test_args['filename'] = self.test_filename if not hasattr(self, 'version_info_filename'): self.version_info_filename = version_info_filename", "FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end']) @mock.patch.object(metadata, 'YearRange', new=FakeYearRange) class make_year_range_TestCase(scaffold.TestCaseWithScenarios):", "Foundation; version 3 of that license or any later version.", "\"(?:–\\d{4})?\" # Optional range dash and four-digit year. ) regex_flags", "if ( resource_name != testcase.version_info_filename or not hasattr(testcase, 'test_version_info')): return", "\"\"\" expected_distribution_name = metadata.distribution_name metadata.get_distribution_version_info(**self.test_args) pkg_resources.get_distribution.assert_called_with( expected_distribution_name) def test_requests_specified_filename(self): \"\"\"", "( \"Copyright © \" \"\\d{4}\" # Four-digit year. \"(?:–\\d{4})?\" #", "url=metadata.url)) try: FileNotFoundError except NameError: # Python 2 uses IOError.", "\"lorem_ipsum.json\", 'expected_version_info': {'version': \"1.0\"}, }), ('not installed', { 'get_distribution_error': pkg_resources.DistributionNotFound(),", "assert an object has a named attribute. \"\"\" def __init__(self,", "might be None. params['ducktype_attribute_name'] = NotImplemented continue # Expect an", "License as published by the # Free Software Foundation; version", "\"\"\" Result should match expected YearRange. \"\"\" result = metadata.make_year_range(self.begin_year,", "3 of that license or any later version. # No", "HasAttribute(testtools.matchers.Matcher): \"\"\" A matcher to assert an object has a", "scenarios = [ ('simple', { 'begin_year': \"1970\", 'end_date': \"1979-01-01\", 'expected_range':", "1970, 'end_year': None, 'expected_text': \"1970\", }), ] def setUp(self): \"\"\"", "!= metadata.distribution_name: raise pkg_resources.DistributionNotFound if hasattr(testcase, 'get_distribution_error'): raise testcase.get_distribution_error mock_distribution", "should be formatted correctly. \"\"\" regex_pattern = ( \"Copyright ©", "cases for metadata module values. \"\"\" expected_str_attributes = set([ 'version_installed',", "raise testcase.get_distribution_error mock_distribution = testcase.mock_distribution mock_distribution.has_metadata.side_effect = functools.partial( fake_func_has_metadata, testcase)", "in expected_str_attributes] for (name, params) in scenarios: if name ==", "not hasattr(self, 'version_info_filename'): self.version_info_filename = version_info_filename if not hasattr(self, 'expected_resource_name'):", "pkg_resources, 'get_distribution') func_patcher_get_distribution.start() self.addCleanup(func_patcher_get_distribution.stop) pkg_resources.get_distribution.side_effect = functools.partial( fake_func_get_distribution, self) def", "metadata, HasAttribute(self.attribute_name)) def test_module_attribute_has_duck_type(self): \"\"\" Metadata value should have expected", "'YearRange', new=FakeYearRange) class make_year_range_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘make_year_range’ function.", "return mock_distribution @mock.patch.object(metadata, 'distribution_name', new=\"mock-dist\") class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases", "testcase.mock_distribution mock_distribution.has_metadata.side_effect = functools.partial( fake_func_has_metadata, testcase) mock_distribution.get_metadata.side_effect = functools.partial( fake_func_get_metadata,", "published by the # Free Software Foundation; version 3 of", "match expected YearRange. \"\"\" result = metadata.make_year_range(self.begin_year, self.end_date) self.assertEqual(result, self.expected_range)", "distribute this work # under the terms of the GNU", "('end date FUTURE token', { 'begin_year': \"1970\", 'end_date': \"FUTURE\", 'expected_range':", "functools.partial( fake_func_get_distribution, self) def test_requests_installed_distribution(self): \"\"\" The package distribution should", "= mock.patch.object( pkg_resources, 'get_distribution') func_patcher_get_distribution.start() self.addCleanup(func_patcher_get_distribution.stop) pkg_resources.get_distribution.side_effect = functools.partial( fake_func_get_distribution,", "'url', ]) scenarios = [ (name, {'attribute_name': name}) for name", "def test_module_attribute_has_duck_type(self): \"\"\" Metadata value should have expected duck-typing attribute.", "'get_distribution_error'): raise testcase.get_distribution_error mock_distribution = testcase.mock_distribution mock_distribution.has_metadata.side_effect = functools.partial( fake_func_has_metadata,", "continue # Expect an attribute of ‘str’ to test this", "\"1970\", 'end_date': \"UNKNOWN\", 'expected_range': FakeYearRange(begin=1970, end=None), }), ('end date FUTURE", "\" # Name. \"<[^>]+>\" # Email address, in angle brackets.", "json.dumps({ 'version': \"1.0\", }), 'expected_version_info': {'version': \"1.0\"}, }), ('file lorem_ipsum.json',", "\"\"\" The package distribution should be retrieved. \"\"\" expected_distribution_name =", "# Copyright © 2008–2018 <NAME> <<EMAIL>> # # This is", "`name`. \"\"\" result = None if not testtools.helpers.safe_hasattr(instance, self.attribute_name): result", "Python 3 standard library. import urllib.parse as urlparse except ImportError:", "cases for content of metadata. \"\"\" def test_copyright_formatted_correctly(self): \"\"\" Copyright", "metadata.author, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_copyright_contains_author(self): \"\"\" Copyright information should contain", "if not fake_func_has_metadata(testcase, resource_name): error = FileNotFoundError(resource_name) raise error content", "\"<[^>]+>\" # Email address, in angle brackets. ) regex_flags =", "to redistribute it under # certain conditions; see the end", "of warranty. \"\"\" Unit test for ‘_metadata’ private module. \"\"\"", "class HasAttribute(testtools.matchers.Matcher): \"\"\" A matcher to assert an object has", "be as expected. \"\"\" result = unicode(self.test_instance) self.assertEqual(result, self.expected_text) FakeYearRange", "test_requests_installed_distribution(self): \"\"\" The package distribution should be retrieved. \"\"\" expected_distribution_name", "'end']) @mock.patch.object(metadata, 'YearRange', new=FakeYearRange) class make_year_range_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for", "and/or distribute this work # under the terms of the", "from __future__ import (absolute_import, unicode_literals) import collections import errno import", "as published by the # Free Software Foundation; version 3", "end=None), }), ('end date UNKNOWN token', { 'begin_year': \"1970\", 'end_date':", "= { 'release_date': \"UNKNOWN\", 'version': \"UNKNOWN\", 'maintainer': \"UNKNOWN\", } scenarios", "HasAttribute(self.attribute_name)) def test_module_attribute_has_duck_type(self): \"\"\" Metadata value should have expected duck-typing", "attribute named {name!r}\").format( instance=self.instance, name=self.attribute_name) return text class metadata_value_TestCase(scaffold.TestCaseWithScenarios): \"\"\"", "Test cases for ‘make_year_range’ function. \"\"\" scenarios = [ ('simple',", "pkg_resources.get_distribution.assert_called_with( expected_distribution_name) def test_requests_specified_filename(self): \"\"\" The specified metadata resource name", "range dash and four-digit year. ) regex_flags = re.UNICODE self.assertThat(", "behaviour of ‘pkg_resources.Distribution.get_metadata’. \"\"\" if not fake_func_has_metadata(testcase, resource_name): error =", "information should be formatted correctly. \"\"\" regex_pattern = ( \".+", "self.assertThat( metadata.copyright, testtools.matchers.Contains(metadata.author)) def test_url_parses_correctly(self): \"\"\" Homepage URL should parse", "attribute named `name`. \"\"\" result = None if not testtools.helpers.safe_hasattr(instance,", "\"\"\" Text representation should be as expected. \"\"\" result =", "self.attribute_name) self.assertThat( instance, HasAttribute(self.ducktype_attribute_name)) class YearRange_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for", "self).setUp() self.test_instance = metadata.YearRange( self.begin_year, self.end_year) def test_text_representation_as_expected(self): \"\"\" Text", "not hasattr(testcase, 'test_version_info')): return False return True def fake_func_get_metadata(testcase, resource_name):", "\"0.0\", }), 'expected_version_info': {'version': \"0.0\"}, }), ('version 1.0', { 'test_version_info':", "in angle brackets. ) regex_flags = re.UNICODE self.assertThat( metadata.author, testtools.matchers.MatchesRegex(regex_pattern,", "should be requested. \"\"\" if hasattr(self, 'get_distribution_error'): self.skipTest(\"No access to", "'end_date': \"1970-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1970), }), ('no end year', {", "super(get_distribution_version_info_TestCase, self).setUp() self.test_args = {} if hasattr(self, 'test_filename'): self.test_args['filename'] =", "hasattr(self, 'version_info_filename'): self.version_info_filename = version_info_filename if not hasattr(self, 'expected_resource_name'): self.expected_resource_name", "PEP 3143. # # This is free software, and you", "'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': default_version_info, }), ] def setUp(self): \"\"\" Set", "def __init__(self, instance, name): self.instance = instance self.attribute_name = name", "None, 'expected_range': FakeYearRange(begin=1970, end=None), }), ('end date UNKNOWN token', {", "end year', { 'begin_year': 1970, 'end_year': None, 'expected_text': \"1970\", }),", "self.attribute_name = name def match(self, instance): \"\"\" Assert the object", "the terms of the GNU General Public License as published", "self.test_filename if not hasattr(self, 'version_info_filename'): self.version_info_filename = version_info_filename if not", "@mock.patch.object(metadata, 'YearRange', new=FakeYearRange) class make_year_range_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘make_year_range’", "= functools.partial( fake_func_get_metadata, testcase) return mock_distribution @mock.patch.object(metadata, 'distribution_name', new=\"mock-dist\") class", "result = unicode(self.test_instance) self.assertEqual(result, self.expected_text) FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end'])", "metadata.get_distribution_version_info(**self.test_args) self.mock_distribution.has_metadata.assert_called_with( self.expected_resource_name) def test_result_matches_expected_items(self): \"\"\" The result should match", "of ‘pkg_resources.Distribution.has_metadata’. \"\"\" if ( resource_name != testcase.version_info_filename or not", "the expected items. \"\"\" version_info = metadata.get_distribution_version_info(**self.test_args) self.assertEqual(self.expected_version_info, version_info) #", "Public License as published by the # Free Software Foundation;", "standard library. import urlparse import mock import pkg_resources import testtools.helpers", "{ 'test_version_info': json.dumps({ 'version': \"0.0\", }), 'expected_version_info': {'version': \"0.0\"}, }),", "expected items. \"\"\" version_info = metadata.get_distribution_version_info(**self.test_args) self.assertEqual(self.expected_version_info, version_info) # Copyright", "= metadata.get_distribution_version_info(**self.test_args) self.assertEqual(self.expected_version_info, version_info) # Copyright © 2008–2018 <NAME> <<EMAIL>>", "default_version_info, }), ('no version_info', { 'expected_version_info': default_version_info, }), ('wrong filename',", "should contain author information. \"\"\" self.assertThat( metadata.copyright, testtools.matchers.Contains(metadata.author)) def test_url_parses_correctly(self):", "Test cases for content of metadata. \"\"\" def test_copyright_formatted_correctly(self): \"\"\"", "hasattr(self, 'get_distribution_error'): self.skipTest(\"No access to distribution\") metadata.get_distribution_version_info(**self.test_args) self.mock_distribution.has_metadata.assert_called_with( self.expected_resource_name) def", "test this value. params['ducktype_attribute_name'] = 'isdigit' def test_module_has_attribute(self): \"\"\" Metadata", "\"\"\" Homepage URL should parse correctly. \"\"\" result = urlparse.urlparse(metadata.url)", "self.version_info_filename = version_info_filename if not hasattr(self, 'expected_resource_name'): self.expected_resource_name = version_info_filename", "correctly\".format( url=metadata.url)) try: FileNotFoundError except NameError: # Python 2 uses", "warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for details.", "{ 'begin_year': 1970, 'end_year': None, 'expected_text': \"1970\", }), ] def", "\"FUTURE\", 'expected_range': FakeYearRange(begin=1970, end=None), }), ] def test_result_matches_expected_range(self): \"\"\" Result", "\"lorem_ipsum.json\", 'expected_version_info': default_version_info, }), ] def setUp(self): \"\"\" Set up", "self.attribute_name = name def describe(self): \"\"\" Emit a text description", "for ‘get_distribution_version_info’ function. \"\"\" default_version_info = { 'release_date': \"UNKNOWN\", 'version':", "‘LICENSE.GPL-3’ for details. # Local variables: # coding: utf-8 #", "Metadata value should have expected duck-typing attribute. \"\"\" if self.ducktype_attribute_name", "metadata_value_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for metadata module values. \"\"\" expected_str_attributes", "def test_result_matches_expected_items(self): \"\"\" The result should match the expected items.", "that license or any later version. # No warranty expressed", "if distribution_name != metadata.distribution_name: raise pkg_resources.DistributionNotFound if hasattr(testcase, 'get_distribution_error'): raise", "contain author information. \"\"\" self.assertThat( metadata.copyright, testtools.matchers.Contains(metadata.author)) def test_url_parses_correctly(self): \"\"\"", "urlparse except ImportError: # Python 2 standard library. import urlparse", "def fake_func_get_metadata(testcase, resource_name): \"\"\" Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. \"\"\"", "this attribute's type\") instance = getattr(metadata, self.attribute_name) self.assertThat( instance, HasAttribute(self.ducktype_attribute_name))", "test_result_matches_expected_range(self): \"\"\" Result should match expected YearRange. \"\"\" result =", "urlparse import mock import pkg_resources import testtools.helpers import testtools.matchers from", "\"UNKNOWN\", 'maintainer': \"UNKNOWN\", } scenarios = [ ('version 0.0', {", "Test cases for metadata module values. \"\"\" expected_str_attributes = set([", "{ 'begin_year': \"1970\", 'end_date': \"UNKNOWN\", 'expected_range': FakeYearRange(begin=1970, end=None), }), ('end", "metadata.get_distribution_version_info(**self.test_args) pkg_resources.get_distribution.assert_called_with( expected_distribution_name) def test_requests_specified_filename(self): \"\"\" The specified metadata resource", "self.instance = instance self.attribute_name = name def describe(self): \"\"\" Emit", "work # under the terms of the GNU General Public", "2008–2018 <NAME> <<EMAIL>> # # This is free software: you", "{ 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_version_info': {'version': \"1.0\"}, }),", "should be formatted correctly. \"\"\" regex_pattern = ( \".+ \"", "def setUp(self): \"\"\" Set up test fixtures. \"\"\" super(YearRange_TestCase, self).setUp()", "class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘get_distribution_version_info’ function. \"\"\" default_version_info", "regex_flags)) def test_copyright_contains_author(self): \"\"\" Copyright information should contain author information.", "for (name, params) in scenarios: if name == 'version_installed': #", "year', { 'begin_year': \"1970\", 'end_date': None, 'expected_range': FakeYearRange(begin=1970, end=None), }),", "= collections.namedtuple('FakeYearRange', ['begin', 'end']) @mock.patch.object(metadata, 'YearRange', new=FakeYearRange) class make_year_range_TestCase(scaffold.TestCaseWithScenarios): \"\"\"", "be requested. \"\"\" if hasattr(self, 'get_distribution_error'): self.skipTest(\"No access to distribution\")", "end of this file for copyright # information, grant of", "expected_str_attributes] for (name, params) in scenarios: if name == 'version_installed':", "the # Free Software Foundation; version 3 of that license", "welcome to redistribute it under # certain conditions; see the", "('no version_info', { 'expected_version_info': default_version_info, }), ('wrong filename', { 'test_filename':", "__init__(self, name): self.attribute_name = name def match(self, instance): \"\"\" Assert", "= re.UNICODE self.assertThat( metadata.author, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_copyright_contains_author(self): \"\"\" Copyright", "testtools.matchers.Contains(metadata.author)) def test_url_parses_correctly(self): \"\"\" Homepage URL should parse correctly. \"\"\"", "is free software: you may copy, modify, and/or distribute this", "try: FileNotFoundError except NameError: # Python 2 uses IOError. FileNotFoundError", ". import scaffold from .scaffold import unicode import daemon._metadata as", "\"\"\" expected_str_attributes = set([ 'version_installed', 'author', 'copyright', 'license', 'url', ])", "}), ('same year', { 'begin_year': \"1970\", 'end_date': \"1970-01-01\", 'expected_range': FakeYearRange(begin=1970,", "= testcase.test_version_info return content def fake_func_get_distribution(testcase, distribution_name): \"\"\" Fake the", "of this file for copyright # information, grant of license,", "'begin_year': \"1970\", 'end_date': \"1970-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1970), }), ('no end", "up test fixtures. \"\"\" super(YearRange_TestCase, self).setUp() self.test_instance = metadata.YearRange( self.begin_year,", "\"1970\", }), ('no end year', { 'begin_year': 1970, 'end_year': None,", "'version_installed': # No duck typing, this attribute might be None.", "attribute's type\") instance = getattr(metadata, self.attribute_name) self.assertThat( instance, HasAttribute(self.ducktype_attribute_name)) class", "token', { 'begin_year': \"1970\", 'end_date': \"FUTURE\", 'expected_range': FakeYearRange(begin=1970, end=None), }),", "}), ] def test_result_matches_expected_range(self): \"\"\" Result should match expected YearRange.", "error content = testcase.test_version_info return content def fake_func_get_distribution(testcase, distribution_name): \"\"\"", "if not hasattr(self, 'expected_resource_name'): self.expected_resource_name = version_info_filename self.mock_distribution = mock.MagicMock()", "should be retrieved. \"\"\" expected_distribution_name = metadata.distribution_name metadata.get_distribution_version_info(**self.test_args) pkg_resources.get_distribution.assert_called_with( expected_distribution_name)", "} scenarios = [ ('version 0.0', { 'test_version_info': json.dumps({ 'version':", "getattr(metadata, self.attribute_name) self.assertThat( instance, HasAttribute(self.ducktype_attribute_name)) class YearRange_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases", "content = testcase.test_version_info return content def fake_func_get_distribution(testcase, distribution_name): \"\"\" Fake", "# # This is free software: you may copy, modify,", "\".+ \" # Name. \"<[^>]+>\" # Email address, in angle", "name def match(self, instance): \"\"\" Assert the object `instance` has", "fake_func_get_distribution, self) def test_requests_installed_distribution(self): \"\"\" The package distribution should be", "\"\"\" Test cases for ‘get_distribution_version_info’ function. \"\"\" default_version_info = {", "] def setUp(self): \"\"\" Set up test fixtures. \"\"\" super(YearRange_TestCase,", "\"1.0\"}, }), ('not installed', { 'get_distribution_error': pkg_resources.DistributionNotFound(), 'expected_version_info': default_version_info, }),", "'begin_year': 1970, 'end_year': 1970, 'expected_text': \"1970\", }), ('no end year',", "it under # certain conditions; see the end of this", "for content of metadata. \"\"\" def test_copyright_formatted_correctly(self): \"\"\" Copyright statement", "license, and disclaimer of warranty. \"\"\" Unit test for ‘_metadata’", "('no end year', { 'begin_year': \"1970\", 'end_date': None, 'expected_range': FakeYearRange(begin=1970,", "'expected_range': FakeYearRange(begin=1970, end=None), }), ('end date UNKNOWN token', { 'begin_year':", "('version 0.0', { 'test_version_info': json.dumps({ 'version': \"0.0\", }), 'expected_version_info': {'version':", "'get_distribution') func_patcher_get_distribution.start() self.addCleanup(func_patcher_get_distribution.stop) pkg_resources.get_distribution.side_effect = functools.partial( fake_func_get_distribution, self) def test_requests_installed_distribution(self):", "result = None if not testtools.helpers.safe_hasattr(instance, self.attribute_name): result = AttributeNotFoundMismatch(instance,", "<<EMAIL>> # # This is free software: you may copy,", "free software: you may copy, modify, and/or distribute this work", "No warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for", "regex_flags = re.UNICODE self.assertThat( metadata.copyright, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_author_formatted_correctly(self): \"\"\"", "}), ('no end year', { 'begin_year': 1970, 'end_year': None, 'expected_text':", "expected. \"\"\" result = unicode(self.test_instance) self.assertEqual(result, self.expected_text) FakeYearRange = collections.namedtuple('FakeYearRange',", ") regex_flags = re.UNICODE self.assertThat( metadata.author, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_copyright_contains_author(self):", "of ‘str’ to test this value. params['ducktype_attribute_name'] = 'isdigit' def", "Copyright © 2008–2018 <NAME> <<EMAIL>> # # This is free", "return result class AttributeNotFoundMismatch(testtools.matchers.Mismatch): \"\"\" The specified instance does not", "version 3 of that license or any later version. #", "regex_pattern = ( \".+ \" # Name. \"<[^>]+>\" # Email", "version_info', { 'expected_version_info': default_version_info, }), ('wrong filename', { 'test_filename': \"lorem_ipsum.json\",", "See the file ‘LICENSE.GPL-3’ for details. # Local variables: #", "}), ('not installed', { 'get_distribution_error': pkg_resources.DistributionNotFound(), 'expected_version_info': default_version_info, }), ('no", "raise error content = testcase.test_version_info return content def fake_func_get_distribution(testcase, distribution_name):", "# No duck typing, this attribute might be None. params['ducktype_attribute_name']", "be retrieved. \"\"\" expected_distribution_name = metadata.distribution_name metadata.get_distribution_version_info(**self.test_args) pkg_resources.get_distribution.assert_called_with( expected_distribution_name) def", "= testcase.mock_distribution mock_distribution.has_metadata.side_effect = functools.partial( fake_func_has_metadata, testcase) mock_distribution.get_metadata.side_effect = functools.partial(", "scenarios: if name == 'version_installed': # No duck typing, this", "FakeYearRange(begin=1970, end=None), }), ('end date UNKNOWN token', { 'begin_year': \"1970\",", "to test this value. params['ducktype_attribute_name'] = 'isdigit' def test_module_has_attribute(self): \"\"\"", "\"\"\" if self.ducktype_attribute_name == NotImplemented: self.skipTest(\"Can't assert this attribute's type\")", "the behaviour of ‘pkg_resources.Distribution.get_metadata’. \"\"\" if not fake_func_has_metadata(testcase, resource_name): error", "self.assertThat( metadata, HasAttribute(self.attribute_name)) def test_module_attribute_has_duck_type(self): \"\"\" Metadata value should have", "fake_func_has_metadata(testcase, resource_name): \"\"\" Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. \"\"\" if", "def test_copyright_formatted_correctly(self): \"\"\" Copyright statement should be formatted correctly. \"\"\"", "mock_distribution.has_metadata.side_effect = functools.partial( fake_func_has_metadata, testcase) mock_distribution.get_metadata.side_effect = functools.partial( fake_func_get_metadata, testcase)", "<NAME> <<EMAIL>> # # This is free software: you may", "'version': \"0.0\", }), 'expected_version_info': {'version': \"0.0\"}, }), ('version 1.0', {", "1.0', { 'test_version_info': json.dumps({ 'version': \"1.0\", }), 'expected_version_info': {'version': \"1.0\"},", "('file lorem_ipsum.json', { 'test_filename': \"lorem_ipsum.json\", 'version_info_filename': \"lorem_ipsum.json\", 'test_version_info': json.dumps({ 'version':", "1970, 'end_year': 1979, 'expected_text': \"1970–1979\", }), ('same year', { 'begin_year':", "be None. params['ducktype_attribute_name'] = NotImplemented continue # Expect an attribute", "# -*- coding: utf-8 -*- # # test/test_metadata.py # Part", "GNU General Public License as published by the # Free", "details. # Local variables: # coding: utf-8 # mode: python", "= getattr(metadata, self.attribute_name) self.assertThat( instance, HasAttribute(self.ducktype_attribute_name)) class YearRange_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test", "end=1979), }), ('same year', { 'begin_year': \"1970\", 'end_date': \"1970-01-01\", 'expected_range':", "content def fake_func_get_distribution(testcase, distribution_name): \"\"\" Fake the behaviour of ‘pkg_resources.get_distribution’.", "self.expected_resource_name = version_info_filename self.mock_distribution = mock.MagicMock() func_patcher_get_distribution = mock.patch.object( pkg_resources,", "'distribution_name', new=\"mock-dist\") class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios): \"\"\" Test cases for ‘get_distribution_version_info’ function.", "def __init__(self, name): self.attribute_name = name def match(self, instance): \"\"\"", "import functools import json import re try: # Python 3", "values. \"\"\" expected_str_attributes = set([ 'version_installed', 'author', 'copyright', 'license', 'url',", "# Four-digit year. \"(?:–\\d{4})?\" # Optional range dash and four-digit", "\"UNKNOWN\", } scenarios = [ ('version 0.0', { 'test_version_info': json.dumps({", "resource_name): \"\"\" Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. \"\"\" if (", "\"{instance!r}\" \" has no attribute named {name!r}\").format( instance=self.instance, name=self.attribute_name) return", "expected_distribution_name = metadata.distribution_name metadata.get_distribution_version_info(**self.test_args) pkg_resources.get_distribution.assert_called_with( expected_distribution_name) def test_requests_specified_filename(self): \"\"\" The", "self.expected_resource_name) def test_result_matches_expected_items(self): \"\"\" The result should match the expected", "grant of license, and disclaimer of warranty. \"\"\" Unit test", "\"version_info.json\" def fake_func_has_metadata(testcase, resource_name): \"\"\" Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’.", "expected value as a module attribute. \"\"\" self.assertThat( metadata, HasAttribute(self.attribute_name))", "attribute might be None. params['ducktype_attribute_name'] = NotImplemented continue # Expect", "def setUp(self): \"\"\" Set up test fixtures. \"\"\" super(get_distribution_version_info_TestCase, self).setUp()", "pkg_resources.get_distribution.side_effect = functools.partial( fake_func_get_distribution, self) def test_requests_installed_distribution(self): \"\"\" The package", "# information, grant of license, and disclaimer of warranty. \"\"\"", "should be as expected. \"\"\" result = unicode(self.test_instance) self.assertEqual(result, self.expected_text)", "end year', { 'begin_year': \"1970\", 'end_date': None, 'expected_range': FakeYearRange(begin=1970, end=None),", "Homepage URL should parse correctly. \"\"\" result = urlparse.urlparse(metadata.url) self.assertIsInstance(", "metadata_content_TestCase(scaffold.TestCase): \"\"\" Test cases for content of metadata. \"\"\" def", "\"\"\" Copyright information should contain author information. \"\"\" self.assertThat( metadata.copyright,", "testtools.matchers from . import scaffold from .scaffold import unicode import", "# No warranty expressed or implied. See the file ‘LICENSE.GPL-3’", ") regex_flags = re.UNICODE self.assertThat( metadata.copyright, testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_author_formatted_correctly(self):", "Name. \"<[^>]+>\" # Email address, in angle brackets. ) regex_flags", "the behaviour of ‘pkg_resources.Distribution.has_metadata’. \"\"\" if ( resource_name != testcase.version_info_filename", "[ ('simple', { 'begin_year': 1970, 'end_year': 1979, 'expected_text': \"1970–1979\", }),", "\"\\d{4}\" # Four-digit year. \"(?:–\\d{4})?\" # Optional range dash and", "testtools.matchers.MatchesRegex(regex_pattern, regex_flags)) def test_copyright_contains_author(self): \"\"\" Copyright information should contain author", "address, in angle brackets. ) regex_flags = re.UNICODE self.assertThat( metadata.author,", "\"\"\" self.assertThat( metadata, HasAttribute(self.attribute_name)) def test_module_attribute_has_duck_type(self): \"\"\" Metadata value should", "= name def describe(self): \"\"\" Emit a text description of", "__future__ import (absolute_import, unicode_literals) import collections import errno import functools", "\"\"\" def __init__(self, instance, name): self.instance = instance self.attribute_name =", "'author', 'copyright', 'license', 'url', ]) scenarios = [ (name, {'attribute_name':", "\"\"\" from __future__ import (absolute_import, unicode_literals) import collections import errno", "= unicode(self.test_instance) self.assertEqual(result, self.expected_text) FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end']) @mock.patch.object(metadata,", "\"\"\" if ( resource_name != testcase.version_info_filename or not hasattr(testcase, 'test_version_info')):", "json.dumps({ 'version': \"1.0\", }), 'expected_resource_name': \"lorem_ipsum.json\", 'expected_version_info': {'version': \"1.0\"}, }),", "'expected_version_info': {'version': \"1.0\"}, }), ('not installed', { 'get_distribution_error': pkg_resources.DistributionNotFound(), 'expected_version_info':", "json import re try: # Python 3 standard library. import", "and four-digit year. ) regex_flags = re.UNICODE self.assertThat( metadata.copyright, testtools.matchers.MatchesRegex(regex_pattern,", "[ ('simple', { 'begin_year': \"1970\", 'end_date': \"1979-01-01\", 'expected_range': FakeYearRange(begin=1970, end=1979),", "2 uses IOError. FileNotFoundError = functools.partial(IOError, errno.ENOENT) version_info_filename = \"version_info.json\"", "'expected_range': FakeYearRange(begin=1970, end=None), }), ] def test_result_matches_expected_range(self): \"\"\" Result should", "if hasattr(self, 'get_distribution_error'): self.skipTest(\"No access to distribution\") metadata.get_distribution_version_info(**self.test_args) self.mock_distribution.has_metadata.assert_called_with( self.expected_resource_name)" ]
[ "from .trait_reference import TraitReference from cdm.utilities import JObject class PurposeReference(JObject):", "JObject class PurposeReference(JObject): def __init__(self): super().__init__() self.purposeReference = None #", "information. from typing import Union, List from .purpose import *", "typing import Union, List from .purpose import * from .trait_reference", "# type: Union[str, Purpose] self.appliedTraits = [] # type: List[Union[str,", "class PurposeReference(JObject): def __init__(self): super().__init__() self.purposeReference = None # type:", "Union, List from .purpose import * from .trait_reference import TraitReference", "license information. from typing import Union, List from .purpose import", "the MIT License. See License.txt in the project root for", "import TraitReference from cdm.utilities import JObject class PurposeReference(JObject): def __init__(self):", "License. See License.txt in the project root for license information.", "def __init__(self): super().__init__() self.purposeReference = None # type: Union[str, Purpose]", "= None # type: Union[str, Purpose] self.appliedTraits = [] #", "type: Union[str, Purpose] self.appliedTraits = [] # type: List[Union[str, TraitReference]]", "Microsoft Corporation. All rights reserved. # Licensed under the MIT", "Licensed under the MIT License. See License.txt in the project", "<reponame>wheatdog/CDM # Copyright (c) Microsoft Corporation. All rights reserved. #", "for license information. from typing import Union, List from .purpose", "rights reserved. # Licensed under the MIT License. See License.txt", "import * from .trait_reference import TraitReference from cdm.utilities import JObject", "cdm.utilities import JObject class PurposeReference(JObject): def __init__(self): super().__init__() self.purposeReference =", "from cdm.utilities import JObject class PurposeReference(JObject): def __init__(self): super().__init__() self.purposeReference", "PurposeReference(JObject): def __init__(self): super().__init__() self.purposeReference = None # type: Union[str,", "MIT License. See License.txt in the project root for license", "in the project root for license information. from typing import", "import Union, List from .purpose import * from .trait_reference import", ".purpose import * from .trait_reference import TraitReference from cdm.utilities import", "None # type: Union[str, Purpose] self.appliedTraits = [] # type:", "TraitReference from cdm.utilities import JObject class PurposeReference(JObject): def __init__(self): super().__init__()", "project root for license information. from typing import Union, List", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "See License.txt in the project root for license information. from", "reserved. # Licensed under the MIT License. See License.txt in", "the project root for license information. from typing import Union,", "List from .purpose import * from .trait_reference import TraitReference from", "self.purposeReference = None # type: Union[str, Purpose] self.appliedTraits = []", "under the MIT License. See License.txt in the project root", "root for license information. from typing import Union, List from", "from .purpose import * from .trait_reference import TraitReference from cdm.utilities", ".trait_reference import TraitReference from cdm.utilities import JObject class PurposeReference(JObject): def", "import JObject class PurposeReference(JObject): def __init__(self): super().__init__() self.purposeReference = None", "Corporation. All rights reserved. # Licensed under the MIT License.", "(c) Microsoft Corporation. All rights reserved. # Licensed under the", "All rights reserved. # Licensed under the MIT License. See", "# Licensed under the MIT License. See License.txt in the", "from typing import Union, List from .purpose import * from", "* from .trait_reference import TraitReference from cdm.utilities import JObject class", "super().__init__() self.purposeReference = None # type: Union[str, Purpose] self.appliedTraits =", "__init__(self): super().__init__() self.purposeReference = None # type: Union[str, Purpose] self.appliedTraits", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under", "License.txt in the project root for license information. from typing" ]
[ "x def remove_rating(x): return re.sub('\\W\\d/\\d+\\S*', '', x) x = x.lower()", "str]) -> List[str]: \"\"\" Accepts text (possibly tokenized) and makes", "= self._lemmatize(x) return x def _remove_stop_words(self, x: Union[list, str]) ->", "<filename>text_preprocessing/normalizer.py import re from typing import Union, List import nltk", "x: Union[list, str]) -> List[str]: \"\"\" Accepts text (possibly tokenized)", "self._denoise(x) x = self._lemmatize(x) return x def _remove_stop_words(self, x: Union[list,", "[w for w in x if not w in stop_words]", "__init__(self): self.lemmatizer = nltk.stem.WordNetLemmatizer() def normalize(self, x: Union[list, str]) ->", "x.lower() x = re.sub(',|\\.|!|\\?', '', x) x = strip_html(x) x", "x.split(' ') stop_words = set(nltk.corpus.stopwords.words('english')) return [w for w in", "= x.lower() x = re.sub(',|\\.|!|\\?', '', x) x = strip_html(x)", "\"html.parser\") x = soup.get_text() return x def remove_between_square_brackets(x): x =", "if not w in stop_words] def _lemmatize(self, x: Union[list, str])", "remove_between_square_brackets(x): x = re.sub('\\[[^]]*\\]', '', x) x = re.sub(r'http\\S+', '',", "x) x = x.lower() x = re.sub(',|\\.|!|\\?', '', x) x", "nltk.stem.WordNetLemmatizer() def normalize(self, x: Union[list, str]) -> List[str]: \"\"\" Accepts", "List[str]: \"\"\" Removes endings, \"\"\" if isinstance(x, list): x =", "def _remove_stop_words(self, x: Union[list, str]) -> List[str]: \"\"\" Removes stop", "isinstance(x, list): x = ' '.join(x) def strip_html(x): soup =", "bs4 import BeautifulSoup class Normalizer: def __init__(self): self.lemmatizer = nltk.stem.WordNetLemmatizer()", "return [w for w in x if not w in", "x def remove_between_square_brackets(x): x = re.sub('\\[[^]]*\\]', '', x) x =", "str]) -> List[str]: \"\"\" Removes stop words from text in", "'.join(x) x = self.lemmatizer.lemmatize(x) return x def _denoise(self, x: Union[list,", "= ' '.join(x) def strip_html(x): soup = BeautifulSoup(x, \"html.parser\") x", "= ' '.join(x) x = self.lemmatizer.lemmatize(x) return x def _denoise(self,", "isinstance(x, str): x = x.split(' ') stop_words = set(nltk.corpus.stopwords.words('english')) return", "\"\"\" Accepts text (possibly tokenized) and makes it suitable for", "x def _denoise(self, x: Union[list, str]) -> str: if isinstance(x,", "x = ' '.join(x) def strip_html(x): soup = BeautifulSoup(x, \"html.parser\")", "re.sub('\\W\\d/\\d+\\S*', '', x) x = x.lower() x = re.sub(',|\\.|!|\\?', '',", "Removes endings, \"\"\" if isinstance(x, list): x = ' '.join(x)", "not w in stop_words] def _lemmatize(self, x: Union[list, str]) ->", "stop_words = set(nltk.corpus.stopwords.words('english')) return [w for w in x if", "x = x.lower() x = re.sub(',|\\.|!|\\?', '', x) x =", "from bs4 import BeautifulSoup class Normalizer: def __init__(self): self.lemmatizer =", "def remove_rating(x): return re.sub('\\W\\d/\\d+\\S*', '', x) x = x.lower() x", "import nltk from bs4 import BeautifulSoup class Normalizer: def __init__(self):", "re.sub(',|\\.|!|\\?', '', x) x = strip_html(x) x = remove_between_square_brackets(x) x", "re.sub('\\[[^]]*\\]', '', x) x = re.sub(r'http\\S+', '', x) return x", "\"\"\" if isinstance(x, list): x = ' '.join(x) x =", "x def _remove_stop_words(self, x: Union[list, str]) -> List[str]: \"\"\" Removes", "= set(nltk.corpus.stopwords.words('english')) return [w for w in x if not", "List import nltk from bs4 import BeautifulSoup class Normalizer: def", "x: Union[list, str]) -> str: if isinstance(x, list): x =", "x = x.split(' ') stop_words = set(nltk.corpus.stopwords.words('english')) return [w for", "_remove_stop_words(self, x: Union[list, str]) -> List[str]: \"\"\" Removes stop words", "= re.sub(r'http\\S+', '', x) return x def remove_rating(x): return re.sub('\\W\\d/\\d+\\S*',", "'', x) x = strip_html(x) x = remove_between_square_brackets(x) x =", "self.lemmatizer = nltk.stem.WordNetLemmatizer() def normalize(self, x: Union[list, str]) -> List[str]:", "x) x = strip_html(x) x = remove_between_square_brackets(x) x = remove_rating(x)", "normalize(self, x: Union[list, str]) -> List[str]: \"\"\" Accepts text (possibly", "if isinstance(x, list): x = ' '.join(x) x = self.lemmatizer.lemmatize(x)", "w in x if not w in stop_words] def _lemmatize(self,", "return re.sub('\\W\\d/\\d+\\S*', '', x) x = x.lower() x = re.sub(',|\\.|!|\\?',", "in stop_words] def _lemmatize(self, x: Union[list, str]) -> List[str]: \"\"\"", "in x if not w in stop_words] def _lemmatize(self, x:", "'.join(x) def strip_html(x): soup = BeautifulSoup(x, \"html.parser\") x = soup.get_text()", "suitable for machine processing \"\"\" x = self._remove_stop_words(x) x =", "processing \"\"\" x = self._remove_stop_words(x) x = self._denoise(x) x =", "x = self._lemmatize(x) return x def _remove_stop_words(self, x: Union[list, str])", "list): x = ' '.join(x) x = self.lemmatizer.lemmatize(x) return x", "Union[list, str]) -> str: if isinstance(x, list): x = '", "tokenized) and makes it suitable for machine processing \"\"\" x", "x = self.lemmatizer.lemmatize(x) return x def _denoise(self, x: Union[list, str])", "for w in x if not w in stop_words] def", "x) return x def remove_rating(x): return re.sub('\\W\\d/\\d+\\S*', '', x) x", "-> List[str]: \"\"\" Removes stop words from text in english", "class Normalizer: def __init__(self): self.lemmatizer = nltk.stem.WordNetLemmatizer() def normalize(self, x:", "\"\"\" Removes stop words from text in english \"\"\" if", "x = self._denoise(x) x = self._lemmatize(x) return x def _remove_stop_words(self,", "(possibly tokenized) and makes it suitable for machine processing \"\"\"", "english \"\"\" if isinstance(x, str): x = x.split(' ') stop_words", "List[str]: \"\"\" Accepts text (possibly tokenized) and makes it suitable", "= self._denoise(x) x = self._lemmatize(x) return x def _remove_stop_words(self, x:", "x = re.sub(r'http\\S+', '', x) return x def remove_rating(x): return", "isinstance(x, list): x = ' '.join(x) x = self.lemmatizer.lemmatize(x) return", "= BeautifulSoup(x, \"html.parser\") x = soup.get_text() return x def remove_between_square_brackets(x):", "import BeautifulSoup class Normalizer: def __init__(self): self.lemmatizer = nltk.stem.WordNetLemmatizer() def", "makes it suitable for machine processing \"\"\" x = self._remove_stop_words(x)", "self.lemmatizer.lemmatize(x) return x def _denoise(self, x: Union[list, str]) -> str:", "Union[list, str]) -> List[str]: \"\"\" Accepts text (possibly tokenized) and", "= self._remove_stop_words(x) x = self._denoise(x) x = self._lemmatize(x) return x", "set(nltk.corpus.stopwords.words('english')) return [w for w in x if not w", "\"\"\" Removes endings, \"\"\" if isinstance(x, list): x = '", "= x.split(' ') stop_words = set(nltk.corpus.stopwords.words('english')) return [w for w", "List[str]: \"\"\" Removes stop words from text in english \"\"\"", "str: if isinstance(x, list): x = ' '.join(x) def strip_html(x):", "BeautifulSoup(x, \"html.parser\") x = soup.get_text() return x def remove_between_square_brackets(x): x", "typing import Union, List import nltk from bs4 import BeautifulSoup", "x = self._remove_stop_words(x) x = self._denoise(x) x = self._lemmatize(x) return", "x = re.sub(',|\\.|!|\\?', '', x) x = strip_html(x) x =", "Accepts text (possibly tokenized) and makes it suitable for machine", "self._remove_stop_words(x) x = self._denoise(x) x = self._lemmatize(x) return x def", "Removes stop words from text in english \"\"\" if isinstance(x,", "-> str: if isinstance(x, list): x = ' '.join(x) def", "x = soup.get_text() return x def remove_between_square_brackets(x): x = re.sub('\\[[^]]*\\]',", "return x def _denoise(self, x: Union[list, str]) -> str: if", "x) x = re.sub(r'http\\S+', '', x) return x def remove_rating(x):", "_lemmatize(self, x: Union[list, str]) -> List[str]: \"\"\" Removes endings, \"\"\"", "= nltk.stem.WordNetLemmatizer() def normalize(self, x: Union[list, str]) -> List[str]: \"\"\"", "machine processing \"\"\" x = self._remove_stop_words(x) x = self._denoise(x) x", "from typing import Union, List import nltk from bs4 import", "self._lemmatize(x) return x def _remove_stop_words(self, x: Union[list, str]) -> List[str]:", "= strip_html(x) x = remove_between_square_brackets(x) x = remove_rating(x) return x", "import Union, List import nltk from bs4 import BeautifulSoup class", "text in english \"\"\" if isinstance(x, str): x = x.split('", "= self.lemmatizer.lemmatize(x) return x def _denoise(self, x: Union[list, str]) ->", "def _denoise(self, x: Union[list, str]) -> str: if isinstance(x, list):", "w in stop_words] def _lemmatize(self, x: Union[list, str]) -> List[str]:", "str]) -> str: if isinstance(x, list): x = ' '.join(x)", "'', x) return x def remove_rating(x): return re.sub('\\W\\d/\\d+\\S*', '', x)", "def __init__(self): self.lemmatizer = nltk.stem.WordNetLemmatizer() def normalize(self, x: Union[list, str])", "def remove_between_square_brackets(x): x = re.sub('\\[[^]]*\\]', '', x) x = re.sub(r'http\\S+',", "' '.join(x) def strip_html(x): soup = BeautifulSoup(x, \"html.parser\") x =", "Normalizer: def __init__(self): self.lemmatizer = nltk.stem.WordNetLemmatizer() def normalize(self, x: Union[list,", "Union[list, str]) -> List[str]: \"\"\" Removes endings, \"\"\" if isinstance(x,", "Union, List import nltk from bs4 import BeautifulSoup class Normalizer:", "it suitable for machine processing \"\"\" x = self._remove_stop_words(x) x", "\"\"\" if isinstance(x, str): x = x.split(' ') stop_words =", "return x def remove_between_square_brackets(x): x = re.sub('\\[[^]]*\\]', '', x) x", "return x def _remove_stop_words(self, x: Union[list, str]) -> List[str]: \"\"\"", "'', x) x = x.lower() x = re.sub(',|\\.|!|\\?', '', x)", "\"\"\" x = self._remove_stop_words(x) x = self._denoise(x) x = self._lemmatize(x)", "= soup.get_text() return x def remove_between_square_brackets(x): x = re.sub('\\[[^]]*\\]', '',", "def strip_html(x): soup = BeautifulSoup(x, \"html.parser\") x = soup.get_text() return", "-> List[str]: \"\"\" Accepts text (possibly tokenized) and makes it", "Union[list, str]) -> List[str]: \"\"\" Removes stop words from text", "_denoise(self, x: Union[list, str]) -> str: if isinstance(x, list): x", "if isinstance(x, str): x = x.split(' ') stop_words = set(nltk.corpus.stopwords.words('english'))", "x = re.sub('\\[[^]]*\\]', '', x) x = re.sub(r'http\\S+', '', x)", "= re.sub(',|\\.|!|\\?', '', x) x = strip_html(x) x = remove_between_square_brackets(x)", "x = ' '.join(x) x = self.lemmatizer.lemmatize(x) return x def", "x: Union[list, str]) -> List[str]: \"\"\" Removes stop words from", "nltk from bs4 import BeautifulSoup class Normalizer: def __init__(self): self.lemmatizer", "for machine processing \"\"\" x = self._remove_stop_words(x) x = self._denoise(x)", "= re.sub('\\[[^]]*\\]', '', x) x = re.sub(r'http\\S+', '', x) return", "from text in english \"\"\" if isinstance(x, str): x =", "'', x) x = re.sub(r'http\\S+', '', x) return x def", "return x def remove_rating(x): return re.sub('\\W\\d/\\d+\\S*', '', x) x =", "BeautifulSoup class Normalizer: def __init__(self): self.lemmatizer = nltk.stem.WordNetLemmatizer() def normalize(self,", "words from text in english \"\"\" if isinstance(x, str): x", "soup.get_text() return x def remove_between_square_brackets(x): x = re.sub('\\[[^]]*\\]', '', x)", "re from typing import Union, List import nltk from bs4", "soup = BeautifulSoup(x, \"html.parser\") x = soup.get_text() return x def", "import re from typing import Union, List import nltk from", "and makes it suitable for machine processing \"\"\" x =", "str]) -> List[str]: \"\"\" Removes endings, \"\"\" if isinstance(x, list):", "') stop_words = set(nltk.corpus.stopwords.words('english')) return [w for w in x", "list): x = ' '.join(x) def strip_html(x): soup = BeautifulSoup(x,", "re.sub(r'http\\S+', '', x) return x def remove_rating(x): return re.sub('\\W\\d/\\d+\\S*', '',", "str): x = x.split(' ') stop_words = set(nltk.corpus.stopwords.words('english')) return [w", "endings, \"\"\" if isinstance(x, list): x = ' '.join(x) x", "stop_words] def _lemmatize(self, x: Union[list, str]) -> List[str]: \"\"\" Removes", "def _lemmatize(self, x: Union[list, str]) -> List[str]: \"\"\" Removes endings,", "x: Union[list, str]) -> List[str]: \"\"\" Removes endings, \"\"\" if", "' '.join(x) x = self.lemmatizer.lemmatize(x) return x def _denoise(self, x:", "x = strip_html(x) x = remove_between_square_brackets(x) x = remove_rating(x) return", "x if not w in stop_words] def _lemmatize(self, x: Union[list,", "def normalize(self, x: Union[list, str]) -> List[str]: \"\"\" Accepts text", "-> List[str]: \"\"\" Removes endings, \"\"\" if isinstance(x, list): x", "stop words from text in english \"\"\" if isinstance(x, str):", "remove_rating(x): return re.sub('\\W\\d/\\d+\\S*', '', x) x = x.lower() x =", "in english \"\"\" if isinstance(x, str): x = x.split(' ')", "strip_html(x): soup = BeautifulSoup(x, \"html.parser\") x = soup.get_text() return x", "text (possibly tokenized) and makes it suitable for machine processing", "if isinstance(x, list): x = ' '.join(x) def strip_html(x): soup" ]
[ "rely on this file.) \"\"\" from __future__ import unicode_literals from", "are going to have key bindings that rely on this", "this file.) \"\"\" from __future__ import unicode_literals from .app import", "IsSearching = lambda: is_searching HasSearch = lambda: is_searching ControlIsSearchable =", "__future__ import unicode_literals from .app import * __all__ = [", "= lambda: vi_waiting_for_text_object_mode ViSelectionMode = lambda: vi_selection_mode ViReplaceMode = lambda:", "= lambda: vi_digraph_mode ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode ViSelectionMode = lambda:", "backwards-compatibility. keep this file. (Many people are going to have", ".app import * __all__ = [ # Old names. 'HasArg',", "'InPasteMode', 'ViMode', 'ViNavigationMode', 'ViInsertMode', 'ViInsertMultipleMode', 'ViReplaceMode', 'ViSelectionMode', 'ViWaitingForTextObjectMode', 'ViDigraphMode', 'EmacsMode',", "= lambda: is_searching ControlIsSearchable = lambda: control_is_searchable EmacsSelectionMode = lambda:", "EmacsInsertMode = lambda: emacs_insert_mode ViMode = lambda: vi_mode IsSearching =", "'EmacsMode', 'EmacsInsertMode', 'EmacsSelectionMode', 'IsSearching', 'HasSearch', 'ControlIsSearchable', ] # Keep the", "'ViReplaceMode', 'ViSelectionMode', 'ViWaitingForTextObjectMode', 'ViDigraphMode', 'EmacsMode', 'EmacsInsertMode', 'EmacsSelectionMode', 'IsSearching', 'HasSearch', 'ControlIsSearchable',", "'HasSearch', 'ControlIsSearchable', ] # Keep the original classnames for backwards", "emacs_insert_mode ViMode = lambda: vi_mode IsSearching = lambda: is_searching HasSearch", "ViReplaceMode = lambda: vi_replace_mode ViInsertMultipleMode = lambda: vi_insert_multiple_mode ViInsertMode =", "= lambda: vi_replace_mode ViInsertMultipleMode = lambda: vi_insert_multiple_mode ViInsertMode = lambda:", "vi_navigation_mode InPasteMode = lambda: in_paste_mode EmacsMode = lambda: emacs_mode EmacsInsertMode", "'ViWaitingForTextObjectMode', 'ViDigraphMode', 'EmacsMode', 'EmacsInsertMode', 'EmacsSelectionMode', 'IsSearching', 'HasSearch', 'ControlIsSearchable', ] #", "has_selection HasCompletions = lambda: has_completions IsReadOnly = lambda: is_read_only IsMultiline", "lambda: vi_waiting_for_text_object_mode ViSelectionMode = lambda: vi_selection_mode ViReplaceMode = lambda: vi_replace_mode", "'ControlIsSearchable', ] # Keep the original classnames for backwards compatibility.", "lambda: has_validation_error HasArg = lambda: has_arg IsDone = lambda: is_done", "'HasCompletions', 'HasFocus', 'HasSelection', 'HasValidationError', 'IsDone', 'IsReadOnly', 'IsMultiline', 'RendererHeightIsKnown', 'InEditingMode', 'InPasteMode',", "HasValidationError = lambda: has_validation_error HasArg = lambda: has_arg IsDone =", "ViNavigationMode = lambda: vi_navigation_mode InPasteMode = lambda: in_paste_mode EmacsMode =", "EmacsSelectionMode = lambda: emacs_selection_mode ViDigraphMode = lambda: vi_digraph_mode ViWaitingForTextObjectMode =", "= lambda: has_selection HasCompletions = lambda: has_completions IsReadOnly = lambda:", "lambda here! (Has_focus is callable that returns a callable.) InEditingMode", "has_arg IsDone = lambda: is_done RendererHeightIsKnown = lambda: renderer_height_is_known ViNavigationMode", "ViDigraphMode = lambda: vi_digraph_mode ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode ViSelectionMode =", "vi_insert_mode HasSelection = lambda: has_selection HasCompletions = lambda: has_completions IsReadOnly", "lambda: has_selection HasCompletions = lambda: has_completions IsReadOnly = lambda: is_read_only", "__all__ = [ # Old names. 'HasArg', 'HasCompletions', 'HasFocus', 'HasSelection',", "= lambda: in_paste_mode EmacsMode = lambda: emacs_mode EmacsInsertMode = lambda:", "HasSelection = lambda: has_selection HasCompletions = lambda: has_completions IsReadOnly =", "= lambda: emacs_mode EmacsInsertMode = lambda: emacs_insert_mode ViMode = lambda:", "ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode ViSelectionMode = lambda: vi_selection_mode ViReplaceMode =", "'EmacsInsertMode', 'EmacsSelectionMode', 'IsSearching', 'HasSearch', 'ControlIsSearchable', ] # Keep the original", "= [ # Old names. 'HasArg', 'HasCompletions', 'HasFocus', 'HasSelection', 'HasValidationError',", "'EmacsSelectionMode', 'IsSearching', 'HasSearch', 'ControlIsSearchable', ] # Keep the original classnames", "compatibility. HasValidationError = lambda: has_validation_error HasArg = lambda: has_arg IsDone", "classnames for backwards compatibility. HasValidationError = lambda: has_validation_error HasArg =", "import unicode_literals from .app import * __all__ = [ #", "HasFocus = has_focus # No lambda here! (Has_focus is callable", "lambda: emacs_insert_mode ViMode = lambda: vi_mode IsSearching = lambda: is_searching", "lambda: vi_insert_mode HasSelection = lambda: has_selection HasCompletions = lambda: has_completions", "going to have key bindings that rely on this file.)", "vi_waiting_for_text_object_mode ViSelectionMode = lambda: vi_selection_mode ViReplaceMode = lambda: vi_replace_mode ViInsertMultipleMode", "vi_mode IsSearching = lambda: is_searching HasSearch = lambda: is_searching ControlIsSearchable", "* __all__ = [ # Old names. 'HasArg', 'HasCompletions', 'HasFocus',", "'ViNavigationMode', 'ViInsertMode', 'ViInsertMultipleMode', 'ViReplaceMode', 'ViSelectionMode', 'ViWaitingForTextObjectMode', 'ViDigraphMode', 'EmacsMode', 'EmacsInsertMode', 'EmacsSelectionMode',", "# Keep the original classnames for backwards compatibility. HasValidationError =", "lambda: vi_navigation_mode InPasteMode = lambda: in_paste_mode EmacsMode = lambda: emacs_mode", "IsMultiline = lambda: is_multiline HasFocus = has_focus # No lambda", "unicode_literals from .app import * __all__ = [ # Old", "= lambda: emacs_insert_mode ViMode = lambda: vi_mode IsSearching = lambda:", "lambda: control_is_searchable EmacsSelectionMode = lambda: emacs_selection_mode ViDigraphMode = lambda: vi_digraph_mode", "lambda: vi_replace_mode ViInsertMultipleMode = lambda: vi_insert_multiple_mode ViInsertMode = lambda: vi_insert_mode", "\"\"\" For backwards-compatibility. keep this file. (Many people are going", "'InEditingMode', 'InPasteMode', 'ViMode', 'ViNavigationMode', 'ViInsertMode', 'ViInsertMultipleMode', 'ViReplaceMode', 'ViSelectionMode', 'ViWaitingForTextObjectMode', 'ViDigraphMode',", "lambda: emacs_selection_mode ViDigraphMode = lambda: vi_digraph_mode ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode", "ViInsertMultipleMode = lambda: vi_insert_multiple_mode ViInsertMode = lambda: vi_insert_mode HasSelection =", "this file. (Many people are going to have key bindings", "= lambda: vi_insert_mode HasSelection = lambda: has_selection HasCompletions = lambda:", "'ViInsertMultipleMode', 'ViReplaceMode', 'ViSelectionMode', 'ViWaitingForTextObjectMode', 'ViDigraphMode', 'EmacsMode', 'EmacsInsertMode', 'EmacsSelectionMode', 'IsSearching', 'HasSearch',", "= lambda: is_multiline HasFocus = has_focus # No lambda here!", "original classnames for backwards compatibility. HasValidationError = lambda: has_validation_error HasArg", "is_done RendererHeightIsKnown = lambda: renderer_height_is_known ViNavigationMode = lambda: vi_navigation_mode InPasteMode", "vi_digraph_mode ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode ViSelectionMode = lambda: vi_selection_mode ViReplaceMode", "lambda: vi_insert_multiple_mode ViInsertMode = lambda: vi_insert_mode HasSelection = lambda: has_selection", "lambda: vi_digraph_mode ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode ViSelectionMode = lambda: vi_selection_mode", "= lambda: vi_selection_mode ViReplaceMode = lambda: vi_replace_mode ViInsertMultipleMode = lambda:", "= has_focus # No lambda here! (Has_focus is callable that", "from .app import * __all__ = [ # Old names.", "lambda: renderer_height_is_known ViNavigationMode = lambda: vi_navigation_mode InPasteMode = lambda: in_paste_mode", "lambda: has_arg IsDone = lambda: is_done RendererHeightIsKnown = lambda: renderer_height_is_known", "ViMode = lambda: vi_mode IsSearching = lambda: is_searching HasSearch =", "have key bindings that rely on this file.) \"\"\" from", "'RendererHeightIsKnown', 'InEditingMode', 'InPasteMode', 'ViMode', 'ViNavigationMode', 'ViInsertMode', 'ViInsertMultipleMode', 'ViReplaceMode', 'ViSelectionMode', 'ViWaitingForTextObjectMode',", "backwards compatibility. HasValidationError = lambda: has_validation_error HasArg = lambda: has_arg", "lambda: vi_selection_mode ViReplaceMode = lambda: vi_replace_mode ViInsertMultipleMode = lambda: vi_insert_multiple_mode", "import * __all__ = [ # Old names. 'HasArg', 'HasCompletions',", "(Many people are going to have key bindings that rely", "'ViMode', 'ViNavigationMode', 'ViInsertMode', 'ViInsertMultipleMode', 'ViReplaceMode', 'ViSelectionMode', 'ViWaitingForTextObjectMode', 'ViDigraphMode', 'EmacsMode', 'EmacsInsertMode',", "is_read_only IsMultiline = lambda: is_multiline HasFocus = has_focus # No", "= lambda: renderer_height_is_known ViNavigationMode = lambda: vi_navigation_mode InPasteMode = lambda:", "from __future__ import unicode_literals from .app import * __all__ =", "Keep the original classnames for backwards compatibility. HasValidationError = lambda:", "InPasteMode = lambda: in_paste_mode EmacsMode = lambda: emacs_mode EmacsInsertMode =", "= lambda: is_done RendererHeightIsKnown = lambda: renderer_height_is_known ViNavigationMode = lambda:", "file. (Many people are going to have key bindings that", "names. 'HasArg', 'HasCompletions', 'HasFocus', 'HasSelection', 'HasValidationError', 'IsDone', 'IsReadOnly', 'IsMultiline', 'RendererHeightIsKnown',", "= lambda: emacs_selection_mode ViDigraphMode = lambda: vi_digraph_mode ViWaitingForTextObjectMode = lambda:", "'IsMultiline', 'RendererHeightIsKnown', 'InEditingMode', 'InPasteMode', 'ViMode', 'ViNavigationMode', 'ViInsertMode', 'ViInsertMultipleMode', 'ViReplaceMode', 'ViSelectionMode',", "file.) \"\"\" from __future__ import unicode_literals from .app import *", "lambda: is_multiline HasFocus = has_focus # No lambda here! (Has_focus", "= lambda: has_completions IsReadOnly = lambda: is_read_only IsMultiline = lambda:", "IsDone = lambda: is_done RendererHeightIsKnown = lambda: renderer_height_is_known ViNavigationMode =", "'IsReadOnly', 'IsMultiline', 'RendererHeightIsKnown', 'InEditingMode', 'InPasteMode', 'ViMode', 'ViNavigationMode', 'ViInsertMode', 'ViInsertMultipleMode', 'ViReplaceMode',", "vi_replace_mode ViInsertMultipleMode = lambda: vi_insert_multiple_mode ViInsertMode = lambda: vi_insert_mode HasSelection", "people are going to have key bindings that rely on", "'HasArg', 'HasCompletions', 'HasFocus', 'HasSelection', 'HasValidationError', 'IsDone', 'IsReadOnly', 'IsMultiline', 'RendererHeightIsKnown', 'InEditingMode',", "is_searching ControlIsSearchable = lambda: control_is_searchable EmacsSelectionMode = lambda: emacs_selection_mode ViDigraphMode", "has_completions IsReadOnly = lambda: is_read_only IsMultiline = lambda: is_multiline HasFocus", "ViSelectionMode = lambda: vi_selection_mode ViReplaceMode = lambda: vi_replace_mode ViInsertMultipleMode =", "'HasValidationError', 'IsDone', 'IsReadOnly', 'IsMultiline', 'RendererHeightIsKnown', 'InEditingMode', 'InPasteMode', 'ViMode', 'ViNavigationMode', 'ViInsertMode',", "lambda: is_searching ControlIsSearchable = lambda: control_is_searchable EmacsSelectionMode = lambda: emacs_selection_mode", "lambda: vi_mode IsSearching = lambda: is_searching HasSearch = lambda: is_searching", "vi_insert_multiple_mode ViInsertMode = lambda: vi_insert_mode HasSelection = lambda: has_selection HasCompletions", "= lambda: is_read_only IsMultiline = lambda: is_multiline HasFocus = has_focus", "'IsDone', 'IsReadOnly', 'IsMultiline', 'RendererHeightIsKnown', 'InEditingMode', 'InPasteMode', 'ViMode', 'ViNavigationMode', 'ViInsertMode', 'ViInsertMultipleMode',", "control_is_searchable EmacsSelectionMode = lambda: emacs_selection_mode ViDigraphMode = lambda: vi_digraph_mode ViWaitingForTextObjectMode", "No lambda here! (Has_focus is callable that returns a callable.)", "is_searching HasSearch = lambda: is_searching ControlIsSearchable = lambda: control_is_searchable EmacsSelectionMode", "(Has_focus is callable that returns a callable.) InEditingMode = in_editing_mode", "'ViDigraphMode', 'EmacsMode', 'EmacsInsertMode', 'EmacsSelectionMode', 'IsSearching', 'HasSearch', 'ControlIsSearchable', ] # Keep", "bindings that rely on this file.) \"\"\" from __future__ import", "= lambda: vi_mode IsSearching = lambda: is_searching HasSearch = lambda:", "'IsSearching', 'HasSearch', 'ControlIsSearchable', ] # Keep the original classnames for", "ControlIsSearchable = lambda: control_is_searchable EmacsSelectionMode = lambda: emacs_selection_mode ViDigraphMode =", "= lambda: vi_navigation_mode InPasteMode = lambda: in_paste_mode EmacsMode = lambda:", "= lambda: is_searching HasSearch = lambda: is_searching ControlIsSearchable = lambda:", "] # Keep the original classnames for backwards compatibility. HasValidationError", "'ViSelectionMode', 'ViWaitingForTextObjectMode', 'ViDigraphMode', 'EmacsMode', 'EmacsInsertMode', 'EmacsSelectionMode', 'IsSearching', 'HasSearch', 'ControlIsSearchable', ]", "for backwards compatibility. HasValidationError = lambda: has_validation_error HasArg = lambda:", "on this file.) \"\"\" from __future__ import unicode_literals from .app", "'HasSelection', 'HasValidationError', 'IsDone', 'IsReadOnly', 'IsMultiline', 'RendererHeightIsKnown', 'InEditingMode', 'InPasteMode', 'ViMode', 'ViNavigationMode',", "emacs_mode EmacsInsertMode = lambda: emacs_insert_mode ViMode = lambda: vi_mode IsSearching", "emacs_selection_mode ViDigraphMode = lambda: vi_digraph_mode ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode ViSelectionMode", "to have key bindings that rely on this file.) \"\"\"", "= lambda: control_is_searchable EmacsSelectionMode = lambda: emacs_selection_mode ViDigraphMode = lambda:", "keep this file. (Many people are going to have key", "\"\"\" from __future__ import unicode_literals from .app import * __all__", "that rely on this file.) \"\"\" from __future__ import unicode_literals", "'HasFocus', 'HasSelection', 'HasValidationError', 'IsDone', 'IsReadOnly', 'IsMultiline', 'RendererHeightIsKnown', 'InEditingMode', 'InPasteMode', 'ViMode',", "'ViInsertMode', 'ViInsertMultipleMode', 'ViReplaceMode', 'ViSelectionMode', 'ViWaitingForTextObjectMode', 'ViDigraphMode', 'EmacsMode', 'EmacsInsertMode', 'EmacsSelectionMode', 'IsSearching',", "renderer_height_is_known ViNavigationMode = lambda: vi_navigation_mode InPasteMode = lambda: in_paste_mode EmacsMode", "has_validation_error HasArg = lambda: has_arg IsDone = lambda: is_done RendererHeightIsKnown", "Old names. 'HasArg', 'HasCompletions', 'HasFocus', 'HasSelection', 'HasValidationError', 'IsDone', 'IsReadOnly', 'IsMultiline',", "lambda: emacs_mode EmacsInsertMode = lambda: emacs_insert_mode ViMode = lambda: vi_mode", "in_paste_mode EmacsMode = lambda: emacs_mode EmacsInsertMode = lambda: emacs_insert_mode ViMode", "= lambda: vi_insert_multiple_mode ViInsertMode = lambda: vi_insert_mode HasSelection = lambda:", "RendererHeightIsKnown = lambda: renderer_height_is_known ViNavigationMode = lambda: vi_navigation_mode InPasteMode =", "has_focus # No lambda here! (Has_focus is callable that returns", "# Old names. 'HasArg', 'HasCompletions', 'HasFocus', 'HasSelection', 'HasValidationError', 'IsDone', 'IsReadOnly',", "lambda: is_searching HasSearch = lambda: is_searching ControlIsSearchable = lambda: control_is_searchable", "here! (Has_focus is callable that returns a callable.) InEditingMode =", "EmacsMode = lambda: emacs_mode EmacsInsertMode = lambda: emacs_insert_mode ViMode =", "HasArg = lambda: has_arg IsDone = lambda: is_done RendererHeightIsKnown =", "= lambda: has_arg IsDone = lambda: is_done RendererHeightIsKnown = lambda:", "HasCompletions = lambda: has_completions IsReadOnly = lambda: is_read_only IsMultiline =", "= lambda: has_validation_error HasArg = lambda: has_arg IsDone = lambda:", "lambda: has_completions IsReadOnly = lambda: is_read_only IsMultiline = lambda: is_multiline", "ViInsertMode = lambda: vi_insert_mode HasSelection = lambda: has_selection HasCompletions =", "IsReadOnly = lambda: is_read_only IsMultiline = lambda: is_multiline HasFocus =", "the original classnames for backwards compatibility. HasValidationError = lambda: has_validation_error", "HasSearch = lambda: is_searching ControlIsSearchable = lambda: control_is_searchable EmacsSelectionMode =", "For backwards-compatibility. keep this file. (Many people are going to", "lambda: in_paste_mode EmacsMode = lambda: emacs_mode EmacsInsertMode = lambda: emacs_insert_mode", "key bindings that rely on this file.) \"\"\" from __future__", "is_multiline HasFocus = has_focus # No lambda here! (Has_focus is", "vi_selection_mode ViReplaceMode = lambda: vi_replace_mode ViInsertMultipleMode = lambda: vi_insert_multiple_mode ViInsertMode", "lambda: is_read_only IsMultiline = lambda: is_multiline HasFocus = has_focus #", "[ # Old names. 'HasArg', 'HasCompletions', 'HasFocus', 'HasSelection', 'HasValidationError', 'IsDone',", "# No lambda here! (Has_focus is callable that returns a", "lambda: is_done RendererHeightIsKnown = lambda: renderer_height_is_known ViNavigationMode = lambda: vi_navigation_mode" ]
[ "is number of labs # nlecs is number of lecture", "sql) sql='''CREATE TABLE IF NOT EXISTS spaces ( spid INTEGER", "Semaster Planning Version 0.03 2018 <NAME> Sh<EMAIL> ''' import xdb", "# nlabs is number of labs # nlecs is number", "(drop): sql=\"DROP TABLE IF EXISTS spaces;\" success, count=xdb.runSQL(cursor, sql) sql='''CREATE", "spaces;\" success, count=xdb.runSQL(cursor, sql) sql='''CREATE TABLE IF NOT EXISTS spaces", "<NAME> Sh<EMAIL> ''' import xdb def crt_spaces_table(cursor,drop=False): if (drop): sql=\"DROP", "(name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' success, count=xdb.runSQL_stmts(cursor, sqls,delay)", "LIMIT 1\"; success, count=xdb.runSQL(cursor, sql) if (count > 0): print(\"spaces", "PRIMARY KEY AUTOINCREMENT, name varchar(30), sptype INTEGER, fitness INTEGER, gid", "str(i+1) sptype=1 sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{},", "spid INTEGER PRIMARY KEY AUTOINCREMENT, name varchar(30), sptype INTEGER, fitness", "INTEGER, gid INTEGER DEFAULT 0, semid INTEGER DEFAULT 0) '''", "+ str(i+1) sptype=2 sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+", "+'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');'", "'\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' success, count=xdb.runSQL_stmts(cursor, sqls,delay) return success, count", "== \"__main__\": delay=0.05 conn=xdb.opendb('genetic56.db') cursor =conn.cursor() # create a cursor", "#spaces.py ''' AlgoHack Genetic Algorithm for University Semaster Planning Version", "KEY AUTOINCREMENT, name varchar(30), sptype INTEGER, fitness INTEGER, gid INTEGER", "of lecture halls # if gid =0 common for all", "IF EXISTS spaces;\" success, count=xdb.runSQL(cursor, sql) sql='''CREATE TABLE IF NOT", "INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' for", "IF NOT EXISTS spaces ( spid INTEGER PRIMARY KEY AUTOINCREMENT,", "Hall \" + str(i+1) sptype=1 sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid)", "def insert_spaces(cursor,nlect,nlabs,gid,semid, delay): # nlabs is number of labs #", "delay): # nlabs is number of labs # nlecs is", "number of labs # nlecs is number of lecture halls", "# create a cursor object success=crt_spaces_table(cursor, True) # create spaces", "gid =0 common for all groups else dedicated # if", "VALUES ('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' for i in range", "spaces (name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' success, count=xdb.runSQL_stmts(cursor,", "def crt_spaces_table(cursor,drop=False): if (drop): sql=\"DROP TABLE IF EXISTS spaces;\" success,", "crt_spaces_table(cursor,drop=False): if (drop): sql=\"DROP TABLE IF EXISTS spaces;\" success, count=xdb.runSQL(cursor,", "0) ''' success, count=xdb.runSQL(cursor, sql) return success def insert_spaces(cursor,nlect,nlabs,gid,semid, delay):", "AUTOINCREMENT, name varchar(30), sptype INTEGER, fitness INTEGER, gid INTEGER DEFAULT", "TABLE IF NOT EXISTS spaces ( spid INTEGER PRIMARY KEY", "else dedicated # if semid=0 common for all semasters else", "spaces ( spid INTEGER PRIMARY KEY AUTOINCREMENT, name varchar(30), sptype", "EXISTS spaces ( spid INTEGER PRIMARY KEY AUTOINCREMENT, name varchar(30),", "Version 0.03 2018 <NAME> Sh<EMAIL> ''' import xdb def crt_spaces_table(cursor,drop=False):", "spaces table #dedicated lecture hall, lab for group and semaster", "''' AlgoHack Genetic Algorithm for University Semaster Planning Version 0.03", "dedicated # if semid=0 common for all semasters else dedicated", "0, semid INTEGER DEFAULT 0) ''' success, count=xdb.runSQL(cursor, sql) return", "'\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' for i in range (nlabs): name=\"Lab", "count=xdb.runSQL_stmts(cursor, sqls,delay) return success, count if __name__ == \"__main__\": delay=0.05", "xdb def crt_spaces_table(cursor,drop=False): if (drop): sql=\"DROP TABLE IF EXISTS spaces;\"", "group and semaster success, count =insert_spaces(cursor,1,1,1,1,delay) # generate records xdb.commit(conn)", "delay=0.05 conn=xdb.opendb('genetic56.db') cursor =conn.cursor() # create a cursor object success=crt_spaces_table(cursor,", "\" + str(i+1) sptype=2 sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES", "is number of lecture halls # if gid =0 common", "success, count=xdb.runSQL(cursor, sql) return success def insert_spaces(cursor,nlect,nlabs,gid,semid, delay): # nlabs", "( spid INTEGER PRIMARY KEY AUTOINCREMENT, name varchar(30), sptype INTEGER,", "=0 common for all groups else dedicated # if semid=0", "print(\"spaces table: Records exist\") return False, 0 sqls=\"\" fitness=1 for", "sptype,fitness,gid,semid) +');' for i in range (nlabs): name=\"Lab \" +", "\"__main__\": delay=0.05 conn=xdb.opendb('genetic56.db') cursor =conn.cursor() # create a cursor object", "all groups else dedicated # if semid=0 common for all", "dedicated sql=\"SELECT * FROM spaces LIMIT 1\"; success, count=xdb.runSQL(cursor, sql)", "if gid =0 common for all groups else dedicated #", "> 0): print(\"spaces table: Records exist\") return False, 0 sqls=\"\"", "Sh<EMAIL> ''' import xdb def crt_spaces_table(cursor,drop=False): if (drop): sql=\"DROP TABLE", "+');' success, count=xdb.runSQL_stmts(cursor, sqls,delay) return success, count if __name__ ==", "(count > 0): print(\"spaces table: Records exist\") return False, 0", "sptype=1 sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{}, {},{},{}'.format(name,", "# create spaces table #dedicated lecture hall, lab for group", "range (nlect): name=\"Lect Hall \" + str(i+1) sptype=1 sqls=sqls +'INSERT", "count if __name__ == \"__main__\": delay=0.05 conn=xdb.opendb('genetic56.db') cursor =conn.cursor() #", "sptype INTEGER, fitness INTEGER, gid INTEGER DEFAULT 0, semid INTEGER", "object success=crt_spaces_table(cursor, True) # create spaces table #dedicated lecture hall,", "University Semaster Planning Version 0.03 2018 <NAME> Sh<EMAIL> ''' import", "EXISTS spaces;\" success, count=xdb.runSQL(cursor, sql) sql='''CREATE TABLE IF NOT EXISTS", "if __name__ == \"__main__\": delay=0.05 conn=xdb.opendb('genetic56.db') cursor =conn.cursor() # create", "i in range (nlabs): name=\"Lab \" + str(i+1) sptype=2 sqls=sqls", "cursor =conn.cursor() # create a cursor object success=crt_spaces_table(cursor, True) #", "0): print(\"spaces table: Records exist\") return False, 0 sqls=\"\" fitness=1", "nlabs is number of labs # nlecs is number of", "DEFAULT 0, semid INTEGER DEFAULT 0) ''' success, count=xdb.runSQL(cursor, sql)", "spaces (name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' for i", "name varchar(30), sptype INTEGER, fitness INTEGER, gid INTEGER DEFAULT 0,", "lecture halls # if gid =0 common for all groups", "halls # if gid =0 common for all groups else", "table: Records exist\") return False, 0 sqls=\"\" fitness=1 for i", "=conn.cursor() # create a cursor object success=crt_spaces_table(cursor, True) # create", "sql='''CREATE TABLE IF NOT EXISTS spaces ( spid INTEGER PRIMARY", "insert_spaces(cursor,nlect,nlabs,gid,semid, delay): # nlabs is number of labs # nlecs", "INTEGER DEFAULT 0, semid INTEGER DEFAULT 0) ''' success, count=xdb.runSQL(cursor,", "labs # nlecs is number of lecture halls # if", "of labs # nlecs is number of lecture halls #", "''' success, count=xdb.runSQL(cursor, sql) return success def insert_spaces(cursor,nlect,nlabs,gid,semid, delay): #", "<filename>genetic/spaces.py<gh_stars>1-10 #spaces.py ''' AlgoHack Genetic Algorithm for University Semaster Planning", "# if semid=0 common for all semasters else dedicated sql=\"SELECT", "exist\") return False, 0 sqls=\"\" fitness=1 for i in range", "import xdb def crt_spaces_table(cursor,drop=False): if (drop): sql=\"DROP TABLE IF EXISTS", "semid INTEGER DEFAULT 0) ''' success, count=xdb.runSQL(cursor, sql) return success", "a cursor object success=crt_spaces_table(cursor, True) # create spaces table #dedicated", "success, count=xdb.runSQL(cursor, sql) if (count > 0): print(\"spaces table: Records", "Records exist\") return False, 0 sqls=\"\" fitness=1 for i in", "return success def insert_spaces(cursor,nlect,nlabs,gid,semid, delay): # nlabs is number of", "+');' for i in range (nlabs): name=\"Lab \" + str(i+1)", "DEFAULT 0) ''' success, count=xdb.runSQL(cursor, sql) return success def insert_spaces(cursor,nlect,nlabs,gid,semid,", "conn=xdb.opendb('genetic56.db') cursor =conn.cursor() # create a cursor object success=crt_spaces_table(cursor, True)", "fitness INTEGER, gid INTEGER DEFAULT 0, semid INTEGER DEFAULT 0)", "__name__ == \"__main__\": delay=0.05 conn=xdb.opendb('genetic56.db') cursor =conn.cursor() # create a", "and semaster success, count =insert_spaces(cursor,1,1,1,1,delay) # generate records xdb.commit(conn) xdb.closedb(conn)", "1\"; success, count=xdb.runSQL(cursor, sql) if (count > 0): print(\"spaces table:", "(name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' for i in", "Algorithm for University Semaster Planning Version 0.03 2018 <NAME> Sh<EMAIL>", "sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid)", "(nlect): name=\"Lect Hall \" + str(i+1) sptype=1 sqls=sqls +'INSERT INTO", "2018 <NAME> Sh<EMAIL> ''' import xdb def crt_spaces_table(cursor,drop=False): if (drop):", "name=\"Lect Hall \" + str(i+1) sptype=1 sqls=sqls +'INSERT INTO spaces", "{},{},{}'.format(name, sptype,fitness,gid,semid) +');' success, count=xdb.runSQL_stmts(cursor, sqls,delay) return success, count if", "sptype,fitness,gid,semid) +');' success, count=xdb.runSQL_stmts(cursor, sqls,delay) return success, count if __name__", "for all groups else dedicated # if semid=0 common for", "for i in range (nlabs): name=\"Lab \" + str(i+1) sptype=2", "sql) return success def insert_spaces(cursor,nlect,nlabs,gid,semid, delay): # nlabs is number", "FROM spaces LIMIT 1\"; success, count=xdb.runSQL(cursor, sql) if (count >", "success, count=xdb.runSQL_stmts(cursor, sqls,delay) return success, count if __name__ == \"__main__\":", "cursor object success=crt_spaces_table(cursor, True) # create spaces table #dedicated lecture", "gid INTEGER DEFAULT 0, semid INTEGER DEFAULT 0) ''' success,", "0 sqls=\"\" fitness=1 for i in range (nlect): name=\"Lect Hall", "lab for group and semaster success, count =insert_spaces(cursor,1,1,1,1,delay) # generate", "# if gid =0 common for all groups else dedicated", "True) # create spaces table #dedicated lecture hall, lab for", "Genetic Algorithm for University Semaster Planning Version 0.03 2018 <NAME>", "success=crt_spaces_table(cursor, True) # create spaces table #dedicated lecture hall, lab", "create a cursor object success=crt_spaces_table(cursor, True) # create spaces table", "hall, lab for group and semaster success, count =insert_spaces(cursor,1,1,1,1,delay) #", "NOT EXISTS spaces ( spid INTEGER PRIMARY KEY AUTOINCREMENT, name", "str(i+1) sptype=2 sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{},", "number of lecture halls # if gid =0 common for", "sqls=\"\" fitness=1 for i in range (nlect): name=\"Lect Hall \"", "if (drop): sql=\"DROP TABLE IF EXISTS spaces;\" success, count=xdb.runSQL(cursor, sql)", "sql) if (count > 0): print(\"spaces table: Records exist\") return", "fitness=1 for i in range (nlect): name=\"Lect Hall \" +", "INTEGER PRIMARY KEY AUTOINCREMENT, name varchar(30), sptype INTEGER, fitness INTEGER,", "common for all groups else dedicated # if semid=0 common", "return False, 0 sqls=\"\" fitness=1 for i in range (nlect):", "''' import xdb def crt_spaces_table(cursor,drop=False): if (drop): sql=\"DROP TABLE IF", "in range (nlabs): name=\"Lab \" + str(i+1) sptype=2 sqls=sqls +'INSERT", "#dedicated lecture hall, lab for group and semaster success, count", "lecture hall, lab for group and semaster success, count =insert_spaces(cursor,1,1,1,1,delay)", "in range (nlect): name=\"Lect Hall \" + str(i+1) sptype=1 sqls=sqls", "INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' success,", "success, count=xdb.runSQL(cursor, sql) sql='''CREATE TABLE IF NOT EXISTS spaces (", "groups else dedicated # if semid=0 common for all semasters", "False, 0 sqls=\"\" fitness=1 for i in range (nlect): name=\"Lect", "0.03 2018 <NAME> Sh<EMAIL> ''' import xdb def crt_spaces_table(cursor,drop=False): if", "spaces LIMIT 1\"; success, count=xdb.runSQL(cursor, sql) if (count > 0):", "if semid=0 common for all semasters else dedicated sql=\"SELECT *", "for University Semaster Planning Version 0.03 2018 <NAME> Sh<EMAIL> '''", "create spaces table #dedicated lecture hall, lab for group and", "+ str(i+1) sptype=1 sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+", "count=xdb.runSQL(cursor, sql) sql='''CREATE TABLE IF NOT EXISTS spaces ( spid", "range (nlabs): name=\"Lab \" + str(i+1) sptype=2 sqls=sqls +'INSERT INTO", "(nlabs): name=\"Lab \" + str(i+1) sptype=2 sqls=sqls +'INSERT INTO spaces", "if (count > 0): print(\"spaces table: Records exist\") return False,", "success, count if __name__ == \"__main__\": delay=0.05 conn=xdb.opendb('genetic56.db') cursor =conn.cursor()", "i in range (nlect): name=\"Lect Hall \" + str(i+1) sptype=1", "Planning Version 0.03 2018 <NAME> Sh<EMAIL> ''' import xdb def", "INTEGER, fitness INTEGER, gid INTEGER DEFAULT 0, semid INTEGER DEFAULT", "{},{},{}'.format(name, sptype,fitness,gid,semid) +');' for i in range (nlabs): name=\"Lab \"", "sqls,delay) return success, count if __name__ == \"__main__\": delay=0.05 conn=xdb.opendb('genetic56.db')", "semid=0 common for all semasters else dedicated sql=\"SELECT * FROM", "sptype=2 sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '\"{}\",{}, {},{},{}'.format(name,", "VALUES ('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' success, count=xdb.runSQL_stmts(cursor, sqls,delay) return", "varchar(30), sptype INTEGER, fitness INTEGER, gid INTEGER DEFAULT 0, semid", "* FROM spaces LIMIT 1\"; success, count=xdb.runSQL(cursor, sql) if (count", "sql=\"DROP TABLE IF EXISTS spaces;\" success, count=xdb.runSQL(cursor, sql) sql='''CREATE TABLE", "name=\"Lab \" + str(i+1) sptype=2 sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid)", "table #dedicated lecture hall, lab for group and semaster success,", "AlgoHack Genetic Algorithm for University Semaster Planning Version 0.03 2018", "count=xdb.runSQL(cursor, sql) return success def insert_spaces(cursor,nlect,nlabs,gid,semid, delay): # nlabs is", "\" + str(i+1) sptype=1 sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES", "common for all semasters else dedicated sql=\"SELECT * FROM spaces", "semasters else dedicated sql=\"SELECT * FROM spaces LIMIT 1\"; success,", "# nlecs is number of lecture halls # if gid", "return success, count if __name__ == \"__main__\": delay=0.05 conn=xdb.opendb('genetic56.db') cursor", "for all semasters else dedicated sql=\"SELECT * FROM spaces LIMIT", "all semasters else dedicated sql=\"SELECT * FROM spaces LIMIT 1\";", "else dedicated sql=\"SELECT * FROM spaces LIMIT 1\"; success, count=xdb.runSQL(cursor,", "('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' success, count=xdb.runSQL_stmts(cursor, sqls,delay) return success,", "for group and semaster success, count =insert_spaces(cursor,1,1,1,1,delay) # generate records", "INTEGER DEFAULT 0) ''' success, count=xdb.runSQL(cursor, sql) return success def", "nlecs is number of lecture halls # if gid =0", "sql=\"SELECT * FROM spaces LIMIT 1\"; success, count=xdb.runSQL(cursor, sql) if", "success def insert_spaces(cursor,nlect,nlabs,gid,semid, delay): # nlabs is number of labs", "for i in range (nlect): name=\"Lect Hall \" + str(i+1)", "count=xdb.runSQL(cursor, sql) if (count > 0): print(\"spaces table: Records exist\")", "TABLE IF EXISTS spaces;\" success, count=xdb.runSQL(cursor, sql) sql='''CREATE TABLE IF", "('+ '\"{}\",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');' for i in range (nlabs):" ]
[ "self.frames() def __enter__(self): return self def __exit__(self, *args): self.release() def", "end+2], dtype=np.uint8), cv2.IMREAD_COLOR) if self.flip is not None: jpg =", "self.get_frame() self.ev.set() self.stream.close() def read(self): ''' while self.frame is None:", "None: time.sleep(.1) f = self.frame self.frame = None return f", "JPEG end if not end == -1: start = self.total_bytes.find(b'\\xff\\xd8')", "if hflip and vflip: self.flip = -1 elif hflip: self.flip", "cv2 import numpy as np import time import threading class", "is None: time.sleep(.1) f = self.frame self.frame = None return", "''' while self.frame is None: time.sleep(.1) f = self.frame self.frame", "self.total_bytes += self.stream.read(1024) end = self.total_bytes.find(b'\\xff\\xd9') # JPEG end if", "threading.Thread(target=self.run, daemon=True) self.running = True self.frame = None self.th.start() def", "import threading class ThreadedRemotePiCamera: def __init__(self, pi_address, resolution=(320,240), framerate=10, hflip=False,", "def __enter__(self): return self def __exit__(self, *args): self.release() def __del__(self):", "pi_address, resolution=(320,240), framerate=10, hflip=False, vflip=False): if hflip and vflip: self.flip", "elif hflip: self.flip = 0 elif vflip: self.flip = 1", "dtype=np.uint8), cv2.IMREAD_COLOR) if self.flip is not None: jpg = cv2.flip(jpg,", "self.flip is not None: jpg = cv2.flip(jpg, self.flip) self.total_bytes =", "self.th.join() def frames(self): while True: yield self.read() def __iter__(self): return", "True self.frame = None self.th.start() def run(self): while self.running: self.frame", "def __init__(self, pi_address, resolution=(320,240), framerate=10, hflip=False, vflip=False): if hflip and", "vflip=False): if hflip and vflip: self.flip = -1 elif hflip:", "self.flip = None self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,))) self.total_bytes =", "hflip and vflip: self.flip = -1 elif hflip: self.flip =", "__enter__(self): return self def __exit__(self, *args): self.release() def __del__(self): self.release()", "self.ev.clear() return self.frame def get_frame(self): while True: self.total_bytes += self.stream.read(1024)", "def get_frame(self): while True: self.total_bytes += self.stream.read(1024) end = self.total_bytes.find(b'\\xff\\xd9')", "= b'' self.ev = threading.Event() self.th = threading.Thread(target=self.run, daemon=True) self.running", "resolution=(320,240), framerate=10, hflip=False, vflip=False): if hflip and vflip: self.flip =", "self.total_bytes.find(b'\\xff\\xd8') # JPEG start jpg = cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8), cv2.IMREAD_COLOR)", "= urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,))) self.total_bytes = b'' self.ev = threading.Event()", "b'' self.ev = threading.Event() self.th = threading.Thread(target=self.run, daemon=True) self.running =", "threading class ThreadedRemotePiCamera: def __init__(self, pi_address, resolution=(320,240), framerate=10, hflip=False, vflip=False):", "return self.frame def get_frame(self): while True: self.total_bytes += self.stream.read(1024) end", "start jpg = cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8), cv2.IMREAD_COLOR) if self.flip is", "self.frame is None: time.sleep(.1) f = self.frame self.frame = None", "0 elif vflip: self.flip = 1 else: self.flip = None", "is not None: jpg = cv2.flip(jpg, self.flip) self.total_bytes = self.total_bytes[end+2:]", "start = self.total_bytes.find(b'\\xff\\xd8') # JPEG start jpg = cv2.imdecode(np.fromstring(self.total_bytes[start: end+2],", "return jpg def release(self): self.running = False self.th.join() def frames(self):", "self.frame = self.get_frame() self.ev.set() self.stream.close() def read(self): ''' while self.frame", "while True: self.total_bytes += self.stream.read(1024) end = self.total_bytes.find(b'\\xff\\xd9') # JPEG", "((pi_address,)+resolution+(framerate,))) self.total_bytes = b'' self.ev = threading.Event() self.th = threading.Thread(target=self.run,", "self.frame = None self.th.start() def run(self): while self.running: self.frame =", "self.total_bytes = b'' self.ev = threading.Event() self.th = threading.Thread(target=self.run, daemon=True)", "self.flip = -1 elif hflip: self.flip = 0 elif vflip:", "# JPEG end if not end == -1: start =", "not None: jpg = cv2.flip(jpg, self.flip) self.total_bytes = self.total_bytes[end+2:] return", "False self.th.join() def frames(self): while True: yield self.read() def __iter__(self):", "None self.th.start() def run(self): while self.running: self.frame = self.get_frame() self.ev.set()", "self.ev = threading.Event() self.th = threading.Thread(target=self.run, daemon=True) self.running = True", "time import threading class ThreadedRemotePiCamera: def __init__(self, pi_address, resolution=(320,240), framerate=10,", "np import time import threading class ThreadedRemotePiCamera: def __init__(self, pi_address,", "elif vflip: self.flip = 1 else: self.flip = None self.stream", "self.total_bytes[end+2:] return jpg def release(self): self.running = False self.th.join() def", "__init__(self, pi_address, resolution=(320,240), framerate=10, hflip=False, vflip=False): if hflip and vflip:", "f = self.frame self.frame = None return f ''' self.ev.wait()", "release(self): self.running = False self.th.join() def frames(self): while True: yield", "return self.frames() def __enter__(self): return self def __exit__(self, *args): self.release()", "f ''' self.ev.wait() self.ev.clear() return self.frame def get_frame(self): while True:", "else: self.flip = None self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,))) self.total_bytes", "= self.frame self.frame = None return f ''' self.ev.wait() self.ev.clear()", "threading.Event() self.th = threading.Thread(target=self.run, daemon=True) self.running = True self.frame =", "None: jpg = cv2.flip(jpg, self.flip) self.total_bytes = self.total_bytes[end+2:] return jpg", "jpg = cv2.flip(jpg, self.flip) self.total_bytes = self.total_bytes[end+2:] return jpg def", "jpg def release(self): self.running = False self.th.join() def frames(self): while", "None return f ''' self.ev.wait() self.ev.clear() return self.frame def get_frame(self):", "self.stream.read(1024) end = self.total_bytes.find(b'\\xff\\xd9') # JPEG end if not end", "= True self.frame = None self.th.start() def run(self): while self.running:", "daemon=True) self.running = True self.frame = None self.th.start() def run(self):", "self.ev.wait() self.ev.clear() return self.frame def get_frame(self): while True: self.total_bytes +=", "def release(self): self.running = False self.th.join() def frames(self): while True:", "yield self.read() def __iter__(self): return self.frames() def __enter__(self): return self", "self.flip = 1 else: self.flip = None self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d'", "self.stream.close() def read(self): ''' while self.frame is None: time.sleep(.1) f", "end == -1: start = self.total_bytes.find(b'\\xff\\xd8') # JPEG start jpg", "vflip: self.flip = 1 else: self.flip = None self.stream =", "= 1 else: self.flip = None self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' %", "return f ''' self.ev.wait() self.ev.clear() return self.frame def get_frame(self): while", "cv2.IMREAD_COLOR) if self.flip is not None: jpg = cv2.flip(jpg, self.flip)", "self.running = True self.frame = None self.th.start() def run(self): while", "self.th.start() def run(self): while self.running: self.frame = self.get_frame() self.ev.set() self.stream.close()", "+= self.stream.read(1024) end = self.total_bytes.find(b'\\xff\\xd9') # JPEG end if not", "== -1: start = self.total_bytes.find(b'\\xff\\xd8') # JPEG start jpg =", "self.flip) self.total_bytes = self.total_bytes[end+2:] return jpg def release(self): self.running =", "vflip: self.flip = -1 elif hflip: self.flip = 0 elif", "= -1 elif hflip: self.flip = 0 elif vflip: self.flip", "1 else: self.flip = None self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,)))", "get_frame(self): while True: self.total_bytes += self.stream.read(1024) end = self.total_bytes.find(b'\\xff\\xd9') #", "% ((pi_address,)+resolution+(framerate,))) self.total_bytes = b'' self.ev = threading.Event() self.th =", "urllib.request import cv2 import numpy as np import time import", "<reponame>hyansuper/flask-video-streaming import urllib.request import cv2 import numpy as np import", "import time import threading class ThreadedRemotePiCamera: def __init__(self, pi_address, resolution=(320,240),", "= None return f ''' self.ev.wait() self.ev.clear() return self.frame def", "= 0 elif vflip: self.flip = 1 else: self.flip =", "self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,))) self.total_bytes = b'' self.ev =", "self.ev.set() self.stream.close() def read(self): ''' while self.frame is None: time.sleep(.1)", "def run(self): while self.running: self.frame = self.get_frame() self.ev.set() self.stream.close() def", "= self.total_bytes[end+2:] return jpg def release(self): self.running = False self.th.join()", "def read(self): ''' while self.frame is None: time.sleep(.1) f =", "import numpy as np import time import threading class ThreadedRemotePiCamera:", "= None self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,))) self.total_bytes = b''", "run(self): while self.running: self.frame = self.get_frame() self.ev.set() self.stream.close() def read(self):", "self.frame def get_frame(self): while True: self.total_bytes += self.stream.read(1024) end =", "= None self.th.start() def run(self): while self.running: self.frame = self.get_frame()", "True: self.total_bytes += self.stream.read(1024) end = self.total_bytes.find(b'\\xff\\xd9') # JPEG end", "frames(self): while True: yield self.read() def __iter__(self): return self.frames() def", "class ThreadedRemotePiCamera: def __init__(self, pi_address, resolution=(320,240), framerate=10, hflip=False, vflip=False): if", "ThreadedRemotePiCamera: def __init__(self, pi_address, resolution=(320,240), framerate=10, hflip=False, vflip=False): if hflip", "''' self.ev.wait() self.ev.clear() return self.frame def get_frame(self): while True: self.total_bytes", "time.sleep(.1) f = self.frame self.frame = None return f '''", "self.read() def __iter__(self): return self.frames() def __enter__(self): return self def", "__iter__(self): return self.frames() def __enter__(self): return self def __exit__(self, *args):", "while True: yield self.read() def __iter__(self): return self.frames() def __enter__(self):", "= self.total_bytes.find(b'\\xff\\xd8') # JPEG start jpg = cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8),", "import urllib.request import cv2 import numpy as np import time", "self.th = threading.Thread(target=self.run, daemon=True) self.running = True self.frame = None", "self.total_bytes.find(b'\\xff\\xd9') # JPEG end if not end == -1: start", "self.running = False self.th.join() def frames(self): while True: yield self.read()", "hflip: self.flip = 0 elif vflip: self.flip = 1 else:", "True: yield self.read() def __iter__(self): return self.frames() def __enter__(self): return", "import cv2 import numpy as np import time import threading", "JPEG start jpg = cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8), cv2.IMREAD_COLOR) if self.flip", "urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,))) self.total_bytes = b'' self.ev = threading.Event() self.th", "if not end == -1: start = self.total_bytes.find(b'\\xff\\xd8') # JPEG", "-1: start = self.total_bytes.find(b'\\xff\\xd8') # JPEG start jpg = cv2.imdecode(np.fromstring(self.total_bytes[start:", "self.frame self.frame = None return f ''' self.ev.wait() self.ev.clear() return", "not end == -1: start = self.total_bytes.find(b'\\xff\\xd8') # JPEG start", "self.flip = 0 elif vflip: self.flip = 1 else: self.flip", "self.running: self.frame = self.get_frame() self.ev.set() self.stream.close() def read(self): ''' while", "# JPEG start jpg = cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8), cv2.IMREAD_COLOR) if", "cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8), cv2.IMREAD_COLOR) if self.flip is not None: jpg", "-1 elif hflip: self.flip = 0 elif vflip: self.flip =", "= self.get_frame() self.ev.set() self.stream.close() def read(self): ''' while self.frame is", "self.total_bytes = self.total_bytes[end+2:] return jpg def release(self): self.running = False", "= cv2.flip(jpg, self.flip) self.total_bytes = self.total_bytes[end+2:] return jpg def release(self):", "= False self.th.join() def frames(self): while True: yield self.read() def", "= self.total_bytes.find(b'\\xff\\xd9') # JPEG end if not end == -1:", "if self.flip is not None: jpg = cv2.flip(jpg, self.flip) self.total_bytes", "as np import time import threading class ThreadedRemotePiCamera: def __init__(self,", "end if not end == -1: start = self.total_bytes.find(b'\\xff\\xd8') #", "def __iter__(self): return self.frames() def __enter__(self): return self def __exit__(self,", "= cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8), cv2.IMREAD_COLOR) if self.flip is not None:", "while self.running: self.frame = self.get_frame() self.ev.set() self.stream.close() def read(self): '''", "def frames(self): while True: yield self.read() def __iter__(self): return self.frames()", "hflip=False, vflip=False): if hflip and vflip: self.flip = -1 elif", "read(self): ''' while self.frame is None: time.sleep(.1) f = self.frame", "while self.frame is None: time.sleep(.1) f = self.frame self.frame =", "and vflip: self.flip = -1 elif hflip: self.flip = 0", "None self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,))) self.total_bytes = b'' self.ev", "self.frame = None return f ''' self.ev.wait() self.ev.clear() return self.frame", "cv2.flip(jpg, self.flip) self.total_bytes = self.total_bytes[end+2:] return jpg def release(self): self.running", "= threading.Event() self.th = threading.Thread(target=self.run, daemon=True) self.running = True self.frame", "numpy as np import time import threading class ThreadedRemotePiCamera: def", "framerate=10, hflip=False, vflip=False): if hflip and vflip: self.flip = -1", "= threading.Thread(target=self.run, daemon=True) self.running = True self.frame = None self.th.start()", "end = self.total_bytes.find(b'\\xff\\xd9') # JPEG end if not end ==", "jpg = cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8), cv2.IMREAD_COLOR) if self.flip is not" ]
[ "deploymentName, CustomSchedulingData in deploymentCustomSchedulingData.items(): print(\"deploymentName={} CustomSchedulingData={}\".format(deploymentName, CustomSchedulingData)) #exit(0) #podsList =", "int(nodeStrategySubPartList[1]) totalWeight += weight print(\"weight={}\".format(weight)) else: nodeLabel = nodeStrategyPart print(\"label", "numOfReplicas)) print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) totalNumOfLables = len (CustomPodScheduleStrategy) labelNum =", "deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) if deploymentCustomSchedulingData != {}: CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData) def CustomSchedulePerNamespace(namespace,", "= 'Ec2Spot' #get_node_available_nodes_list(lifecycle) def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList): namespace = 'default' for", "#print(nodesListPerNodeLabel) #for nodeLabel, availableNodesData in nodesListPerNodeLabel.items(): #print(\"nodeLabel={} availableNodesData={}\".format(nodeLabel, availableNodesData)) #exit(0)", "field_selector = (\"status.phase!=Succeeded,status.phase!=Failed,\" + \"spec.nodeName=\" + node_name) stats[\"cpu_alloc\"] = Q_(allocatable[\"cpu\"])", "NumOfPodsToBeScheduled = NumOfPodsToBeRunning - NumOfAlreadyRunningPods try: schedulePods(NumOfPodsToBeScheduled, NodesList) except Exception", "print(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) return CustomKubeSchedulingClusterDeploymentData def get_custom_deployments_per_namespace(namespace): #CustomKubeSchedulingDeploymentData = [] CustomKubeSchedulingDeploymentData =", "node_name={}\".format(i,p['node_name'], p['name'])) #print(\"nodeLabel={} NumOfAlreadyRunningPods={}\".format(nodeLabel, NumOfAlreadyRunningPods)) print(\"lifecycle={} NumOfNodes={}\".format(lifecycle, len(NodesList))) for nodeLabel,", "print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) if deploymentCustomSchedulingData != {}: CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData) def", "== totalNumOfLables - 1: weightReplicas = numOfReplicas replicas = replicas", "node_name) stats[\"cpu_alloc\"] = Q_(allocatable[\"cpu\"]) stats[\"mem_alloc\"] = Q_(allocatable[\"memory\"]) #stats[\"lifecycle\"] = lifecycle", "#lifecycle = 'OnDemand' #NodesList = get_node_available_nodes_list(lifecycle) #pprint(NodesList) NumOfPodsRunningAlready = 0", "sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"cpu_req_per\"] = (stats[\"cpu_req\"] / stats[\"cpu_alloc\"] *", "#selector = \"metadata.labels.\"+nodeLabelParts[0]+\"=\"+nodeLabelParts[1] #selector = \"metadata.labels.nodesize=\"+nodeLabelParts[1] #print(\"selector={}\".format(selector)) #name = 'ip-192-168-73-104.ec2.internal'", "print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas))", "'Running': stats[\"node_name\"] = pod['spec']['node_name'] runningPodsList.append(stats) elif stats[\"status\"] == 'Failed': failedPodsList.append(stats)", "{} annotations = deployment.metadata.annotations if 'UseCustomKubeScheduler' in annotations.keys(): if annotations['UseCustomKubeScheduler']", "= (selector) #resp = core_api.list_node(field_selector=field_selector).to_dict()['items'] #pprint(\"resp={}\".format(resp)) #exit(0) availableNodesData = {}", "pprint import pprint from kubernetes.client.rest import ApiException from pint import", "#return pendingPodsList,runningPodsList,failedPodsList #return podsList def get_custom_deployments(): CustomKubeSchedulingClusterDeploymentData = {} #namespaceList", "if __name__ == '__main__': #ready_nodes = nodes_available() #pprint(ready_nodes) #name='review-v1-787d8fbfbb-ltdzt' node='ip-10-0-3-253.ec2.internal'", "NumOfPodsToBeRunning: NumOfPodsToBeScheduled = NumOfPodsToBeRunning - NumOfAlreadyRunningPods try: schedulePods(NumOfPodsToBeScheduled, NodesList) except", "= weight CustomPodScheduleStrategy [nodeLabel] = base print(\"nodeLabelToReplicas={} nodeLabelToWights={}\".format(nodeLabelToReplicas, nodeLabelToWights)) print(\"numOfBaseValues", "deploymentCustomSchedulingData)) if deploymentCustomSchedulingData != {}: CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData) def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData):", "namespace) #pprint(ret) #main() #test() #testpod() #check_node_resources(node) #RunEc2SpotCustomScheduler() #getPodsListForDeployment(' ') #lifecycle", "Q_(allocatable[\"cpu\"]) stats[\"mem_alloc\"] = Q_(allocatable[\"memory\"]) #stats[\"lifecycle\"] = lifecycle pods = core_api.list_pod_for_all_namespaces(limit=max_pods,", "NodesList.items(): print(\"schedulePods Checking for free resources on node={} with cpu_free={}", "CustomPodScheduleStrategy.items(): weight = nodeLabelToWights[key] print(\"key: {} replicas={} weight={}, totalWeight={}\".format(key, replicas,", "e: pprint(e) elif NumOfAlreadyRunningPods > NumOfPodsToBeRunning: NumOfPodsToDeleted = NumOfAlreadyRunningPods -", "numOfReplicas: numOfReplicas -= base else: base = numOfReplicas numOfReplicas =", "in enumerate (podsAlreadyRunningOnNodeLabelList): pprint(\"running pod i={} nodeLabel={} node_name={} name={}\".format(i,nodeLabel, p['node_name'],", "CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData) def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData): global runningPodsList global pendingPodsList global", "key, replicas in CustomPodScheduleStrategy.items(): weight = nodeLabelToWights[key] print(\"key: {} replicas={}", "runningPodsList={} failedPodsList={}\".format(runningPodsList, runningPodsList, failedPodsList ) #return pendingPodsList,runningPodsList,failedPodsList #return podsList def", "#print(\"node_name={}\".format(pod['spec']['node_name'])) #return \"\" stats = {} cpureqs,cpulmts,memreqs,memlmts = [], [],", "sum(cpulmts) stats[\"cpu_req_per\"] = (stats[\"cpu_req\"] / stats[\"cpu_alloc\"] * 100) stats[\"cpu_lmt_per\"] =", "Strategy = annotations['CustomPodScheduleStrategy'] #deploymentData['pod_replicas'] = deployment.spec.replicas #deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas)", "= [] #pprint(podsList) #lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle)", "numOfReplicas={}\".format(Strategy, numOfReplicas)) CustomPodScheduleStrategy = {} nodeLabelToReplicas = {} nodeLabelToWights =", "#pprint(ret) #main() #test() #testpod() #check_node_resources(node) #RunEc2SpotCustomScheduler() #getPodsListForDeployment(' ') #lifecycle =", "= NumOfAlreadyRunningPods - NumOfPodsToBeRunning try: deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) except Exception as", "elif stats[\"status\"] == 'Running': stats[\"node_name\"] = pod['spec']['node_name'] runningPodsList.append(stats) elif stats[\"status\"]", "in NodesList.items(): print(\"schedulePods Checking for free resources on node={} with", "deploymentData = {} CustomPodScheduleStrategy = {} annotations = deployment.metadata.annotations if", "(failedPodsList): pprint(\"i={} failed pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) #print(\"nodeLabel={} NumOfAlreadyRunningPods={}\".format(nodeLabel, NumOfAlreadyRunningPods)) print(\"lifecycle={}", "strategy\") exit(1) else: numOfBaseValues += 1 base = int(nodeStrategySubPartList[1]) if", "= 0 for key, replicas in CustomPodScheduleStrategy.items(): weight = nodeLabelToWights[key]", "= (\"metadata.annotations.OnDemandBase=\" + name) # get deployment by namespace #resp", "pod_replicas, NumOfOnDemandPods, NumOfSpotPods)) def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas): print(\"Strategy={} numOfReplicas={}\".format(Strategy, numOfReplicas)) CustomPodScheduleStrategy", "grace_period_seconds=grace_period_seconds, body=body) pprint(response) def schedulePods(NumOfPodsToBeScheduled, NodesList): global pendingPodsList global failedPodsList", "nodeLabel={}\".format(numOfReplicas, nodeLabel)) #pprint(podsList) #lifecycle = 'OnDemand' #NodesList = get_node_available_nodes_list(lifecycle) #pprint(NodesList)", "#main() #test() #testpod() #check_node_resources(node) #RunEc2SpotCustomScheduler() #getPodsListForDeployment(' ') #lifecycle = 'OnDemand'", "if NumOfAlreadyRunningPods == NumOfPodsToBeRunning: print(\"NumOfAlreadyRunningPods == NumOfPodsToBeRunning = {}. So", "runningPodsList)) #print(\"NumOfPodsPending={} pendingPodsList={}\".format(NumOfPodsPending, pendingPodsList)) #print(\"NumOfPodsFailed={} failedPodsList={}\".format(NumOfPodsFailed, failedPodsList)) get_node_available_nodes_list(CustomSchedulingData) for i,", "Schedule\".format(NumOfAlreadyRunningPods)) elif NumOfAlreadyRunningPods < NumOfPodsToBeRunning: NumOfPodsToBeScheduled = NumOfPodsToBeRunning - NumOfAlreadyRunningPods", "for more than node strategy\") exit(1) else: numOfBaseValues += 1", "numOfReplicas -= base else: base = numOfReplicas numOfReplicas = 0", "#nodeLabelToReplicas [nodeLabel] = base nodeLabelToWights [nodeLabel] = weight CustomPodScheduleStrategy [nodeLabel]", "#data = {} for nodeLabel in CustomSchedulingData.keys(): nodesListPerNodeLabel[nodeLabel] = {}", "#deploymentData[deploymentName] = deployment.metadata.name Strategy = annotations['CustomPodScheduleStrategy'] #deploymentData['pod_replicas'] = deployment.spec.replicas #deploymentData['CustomPodScheduleStrategy']", "= stats[\"mem_alloc\"] - stats[\"mem_req\"] #stats[\"name\"] = node['metadata']['name'] #data.append(stats) availableNodesData[node_name] =", "StrategyList = Strategy.split(':') print(\"StrategyList={}\".format(StrategyList)) numOfBaseValues = 0 for nodeStrategy in", "- 1: weightReplicas = numOfReplicas replicas = replicas + weightReplicas", "key={} value={}\".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1])) #nodeLabelToReplicas [nodeLabel] = base nodeLabelToWights [nodeLabel] =", "node={}\".format(pod['name'], node)) res = scheduler(pod['name'], node, namespace) pprint(res) stats['cpu_free'] =", "pod in pods['items']: #pprint(pod) #print(\"node_name={}\".format(pod['spec']['node_name'])) #return \"\" stats = {}", "core_api.list_node().to_dict()['items']: #pprint(node) node_labels = node['metadata']['labels'] if nodeLabelKey in node_labels.keys(): if", "running pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i, p in enumerate (pendingPodsList):", "'Ec2Spot'] for deploymentName, CustomSchedulingData in deploymentCustomSchedulingData.items(): print(\"deploymentName={} CustomSchedulingData={}\".format(deploymentName, CustomSchedulingData)) #exit(0)", "p['name'])) for i, p in enumerate (failedPodsList): pprint(\"i={} failed pod_name={}", "replicas print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight,", "#RunEc2SpotCustomScheduler() #getPodsListForDeployment(' ') #lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle)", "= failedPodsList #pprint(podsList) #pprint(\"pendingPodsList={} runningPodsList={} failedPodsList={}\".format(runningPodsList, runningPodsList, failedPodsList ) #return", "def get_node_available_nodes_list(CustomSchedulingData): global nodesListPerNodeLabel #data = [] #data = {}", "== 'base': if numOfBaseValues != 0: print(\"base value cannot be", "= {}\".format(CustomPodScheduleStrategy)) print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) return", "failedPodsList.append(stats) #podsList['pendingPodsList'] = pendingPodsList #podsList['runningPodsList'] = runningPodsList #podsList['failedPodsList'] = failedPodsList", "doing this computation within a k8s cluster #k8s.config.load_incluster_config() core_api =", "stats[\"name\"] = pod['metadata']['name'] stats[\"status\"] = pod['status']['phase'] if stats[\"status\"] == 'Pending':", "[], [], [] if deploymentName in pod['metadata']['name'] and pod['spec']['scheduler_name'] ==", "nodeLabelParts[0] nodeLabelValue = nodeLabelParts[1] #selector = \"metadata.labels.\"+nodeLabelParts[0]+\"=\"+nodeLabelParts[1] #selector = \"metadata.labels.nodesize=\"+nodeLabelParts[1]", "#NodesList = get_node_available_nodes_list(lifecycle) #pprint(NodesList) NumOfPodsRunningAlready = 0 podsAlreadyRunningOnNodeLabelList = []", "def schedulePods(NumOfPodsToBeScheduled, NodesList): global pendingPodsList global failedPodsList namespace = 'default'", "if stats[\"status\"] == 'Pending': pendingPodsList.append(stats) elif stats[\"status\"] == 'Running': stats[\"node_name\"]", "(failedPodsList) #print(\"NumOfPodsRunning={} runningPodsList={}\".format(NumOfPodsRunning, runningPodsList)) #print(\"NumOfPodsPending={} pendingPodsList={}\".format(NumOfPodsPending, pendingPodsList)) #print(\"NumOfPodsFailed={} failedPodsList={}\".format(NumOfPodsFailed, failedPodsList))", "nodeStrategyPartsList: nodeStrategySubPartList = nodeStrategyPart.split('=') if nodeStrategySubPartList[0] == 'base': if numOfBaseValues", "pod in pods: #pprint(pod) for container in pod['spec']['containers']: res =", "{} cpureqs,cpulmts,memreqs,memlmts = [], [], [], [] if deploymentName in", "= {} cpureqs,cpulmts,memreqs,memlmts = [], [], [], [] if deploymentName", "= 1 #podsAlreadyRunningOnNodeLabelList = [] #d ={'name':'nginx-66cb875766-vx6bp'} #podsAlreadyRunningOnNodeLabelList.append(d) #deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList)", "print(\"Strategy={} numOfReplicas={}\".format(Strategy, numOfReplicas)) CustomPodScheduleStrategy = {} nodeLabelToReplicas = {} nodeLabelToWights", "* 100) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"mem_req_per\"] =", "for deployment in resp.items: #pprint(deployment.metadata.annotations) #pprint(deployment) deploymentData = {} CustomPodScheduleStrategy", "sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"mem_req_per\"] = (stats[\"mem_req\"] / stats[\"mem_alloc\"] *", "= int(int(allocatable[\"pods\"]) * 1.5) field_selector = (\"status.phase!=Succeeded,status.phase!=Failed,\" + \"spec.nodeName=\" +", "mem_free={}\".format(node, stats['cpu_free'], stats['mem_free'])) #pprint(node) if pod['cpu_req'] <= stats['cpu_free'] and pod['mem_req']", "totalWeight += weight print(\"weight={}\".format(weight)) else: nodeLabel = nodeStrategyPart print(\"label key={}", "stats = {} cpureqs,cpulmts,memreqs,memlmts = [], [], [], [] if", "CustomSchedulingData.keys(): nodesListPerNodeLabel[nodeLabel] = {} nodeLabelParts = nodeLabel.split('=') nodeLabelKey = nodeLabelParts[0]", "sysdig_metric, \"aggregations\": { \"time\": \"timeAvg\", \"group\": \"avg\" } }] #scheduler_name", "print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) totalNumOfLables = len (CustomPodScheduleStrategy) labelNum = 0", "import json import os from pprint import pprint from kubernetes.client.rest", "nodeStrategyPartsList = nodeStrategy.split(',') base = 0 weight = 0 nodeLabel", "nodeLabelToReplicas = {} nodeLabelToWights = {} totalWeight = 0 StrategyList", "base else: base = numOfReplicas numOfReplicas = 0 print(\"base={}\".format(nodeStrategySubPartList[1])) elif", "\"aggregations\": { \"time\": \"timeAvg\", \"group\": \"avg\" } }] #scheduler_name =", "in pod['metadata']['name'] and pod['spec']['scheduler_name'] == CustomSchedulerName: for container in pod['spec']['containers']:", "#selector = \"metadata.name\"+\"=\"+name #print(\"selector={}\".format(selector)) #field_selector = (selector) #resp = core_api.list_node(field_selector=field_selector).to_dict()['items']", "resp = apis_api.list_namespaced_deployment(namespace=namespace) for deployment in resp.items: #pprint(deployment.metadata.annotations) #pprint(deployment) deploymentData", "len (runningPodsList) NumOfPodsPending = len (pendingPodsList) NumOfPodsFailed = len (failedPodsList)", "than node strategy\") exit(1) else: numOfBaseValues += 1 base =", "nodeLabelParts = nodeLabel.split('=') nodeLabelKey = nodeLabelParts[0] nodeLabelValue = nodeLabelParts[1] #selector", "= int(nodeStrategySubPartList[1]) totalWeight += weight print(\"weight={}\".format(weight)) else: nodeLabel = nodeStrategyPart", "= Q_(allocatable[\"cpu\"]) stats[\"mem_alloc\"] = Q_(allocatable[\"memory\"]) #stats[\"lifecycle\"] = lifecycle pods =", "30 body = client.V1DeleteOptions() #body = {} pprint(\"deletePods i={} pod={}", "{}\".format(nodeStrategy)) nodeStrategyPartsList = nodeStrategy.split(',') base = 0 weight = 0", "podsList def get_custom_deployments(): CustomKubeSchedulingClusterDeploymentData = {} #namespaceList =[] namespacedataList =", "= apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector) resp = apis_api.list_namespaced_deployment(namespace=namespace) for deployment in resp.items:", "labelNum={}, numOfReplicas={}\".format(weightReplicas, replicas, labelNum, numOfReplicas)) CustomPodScheduleStrategy[key] = replicas print(\"CustomPodScheduleStrategy =", "get_node_available_nodes_list(CustomSchedulingData): global nodesListPerNodeLabel #data = [] #data = {} for", "{}. So no need to Schedule\".format(NumOfAlreadyRunningPods)) elif NumOfAlreadyRunningPods < NumOfPodsToBeRunning:", "_preload_content=False) #tl = Timeloop() <EMAIL>(interval=timedelta(seconds=10)) def RunEc2SpotCustomScheduler(): #global pendingPodsList #global", "int (deploymentData['OnDemandBase'] + (deploymentData['pod_replicas'] - deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage'] / 100)", "runningPodsList={}\".format(NumOfPodsRunning, runningPodsList)) #print(\"NumOfPodsPending={} pendingPodsList={}\".format(NumOfPodsPending, pendingPodsList)) #print(\"NumOfPodsFailed={} failedPodsList={}\".format(NumOfPodsFailed, failedPodsList)) get_node_available_nodes_list(CustomSchedulingData) for", "failedPodsList)) get_node_available_nodes_list(CustomSchedulingData) for i, p in enumerate (runningPodsList): pprint(\"i={} running", "in CustomSchedulingData.items(): print(\"Scheduling numOfReplicas={} on nodeLabel={}\".format(numOfReplicas, nodeLabel)) #pprint(podsList) #lifecycle =", "weightReplicas print(\"weightReplicas: {} replicas={} labelNum={}, numOfReplicas={}\".format(weightReplicas, replicas, labelNum, numOfReplicas)) CustomPodScheduleStrategy[key]", "max_pods = int(int(allocatable[\"pods\"]) * 1.5) field_selector = (\"status.phase!=Succeeded,status.phase!=Failed,\" + \"spec.nodeName=\"", "annotations['UseCustomKubeScheduler'] == 'true': deploymentName = deployment.metadata.name numOfReplicas = deployment.spec.replicas #deploymentData[deploymentName]", "if annotations['UseCustomKubeScheduler'] == 'true': deploymentName = deployment.metadata.name numOfReplicas = deployment.spec.replicas", "sum(memlmts) stats[\"name\"] = pod['metadata']['name'] stats[\"status\"] = pod['status']['phase'] if stats[\"status\"] ==", "+= weight print(\"weight={}\".format(weight)) else: nodeLabel = nodeStrategyPart print(\"label key={} value={}\".format(nodeStrategySubPartList[0],", "#pprint(podsList) #lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) def deletePods(NumOfPodsToDeleted,", "runningPodsList: if podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys(): podsAlreadyRunningOnNodeLabelList.append(podRunning) NumOfAlreadyRunningPods = len (podsAlreadyRunningOnNodeLabelList)", "body=client.V1Binding(metadata=meta, target=target) return core_api.create_namespaced_binding(namespace, body, _preload_content=False) #tl = Timeloop() <EMAIL>(interval=timedelta(seconds=10))", "i, p in enumerate (podsAlreadyRunningOnNodeLabelList): pprint(\"running pod i={} nodeLabel={} node_name={}", "for node in core_api.list_node().to_dict()['items']: #pprint(node) node_labels = node['metadata']['labels'] if nodeLabelKey", "as e: pprint(e) elif NumOfAlreadyRunningPods > NumOfPodsToBeRunning: NumOfPodsToDeleted = NumOfAlreadyRunningPods", "schedule i={} NumOfPodsToBeScheduled={} pod={} with cpu_req={} mem_req={}\".format(i, NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'],", "i, p in enumerate (runningPodsList): pprint(\"i={} running pod_name={} node_name={}\".format(i,p['node_name'], p['name']))", "=[] #failedPodsList =[] #podsList = {} #namespace='default' #name='Ec2SpotK8sScheduler' #field_selector =", "\"metadata.name\"+\"=\"+name #print(\"selector={}\".format(selector)) #field_selector = (selector) #resp = core_api.list_node(field_selector=field_selector).to_dict()['items'] #pprint(\"resp={}\".format(resp)) #exit(0)", "def get_custom_deployments_per_namespace(namespace): #CustomKubeSchedulingDeploymentData = [] CustomKubeSchedulingDeploymentData = {} #namespace='default' #name", "node['metadata']['name'] allocatable = node['status']['allocatable'] max_pods = int(int(allocatable[\"pods\"]) * 1.5) field_selector", "pods={}. So skipping schedulePods\".format(NumOfPodsToBeScheduled, len(pendingPodsList))) return for i in range(NumOfPodsToBeScheduled):", "pod['spec']['scheduler_name'] == CustomSchedulerName: for container in pod['spec']['containers']: res = container['resources']", "stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"cpu_req_per\"] = (stats[\"cpu_req\"] /", "on nodeLabel={}\".format(numOfReplicas, nodeLabel)) #pprint(podsList) #lifecycle = 'OnDemand' #NodesList = get_node_available_nodes_list(lifecycle)", "= node['metadata']['labels'] if nodeLabelKey in node_labels.keys(): if node_labels[nodeLabelKey] == nodeLabelValue:", "failedPodsList = [] runningPodsList =[] nodesListPerNodeLabel = {} Q_ =", "pprint(res) stats['cpu_free'] = stats['cpu_free'] - pod['cpu_req'] stats['mem_free'] = stats['mem_free'] -", "# get deployment by namespace #resp = apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector) resp", "__all__ = [\"get_node_available_nodes_list\"] def get_node_available_nodes_list(CustomSchedulingData): global nodesListPerNodeLabel #data = []", "CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData): global runningPodsList global pendingPodsList global failedPodsList global nodesListPerNodeLabel", "ureg = UnitRegistry() ureg.load_definitions('kubernetes_units.txt') pendingPodsList = [] failedPodsList = []", "nodeStrategySubPartList = nodeStrategyPart.split('=') if nodeStrategySubPartList[0] == 'base': if numOfBaseValues !=", "pendingPodsList = [] failedPodsList = [] runningPodsList =[] nodesListPerNodeLabel =", "podsAlreadyRunningOnNodeLabelList) except Exception as e: pprint(e) pendingPodsList = [] NumOfPodsFailed", "pprint(\"i={} failed pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) #print(\"nodeLabel={} NumOfAlreadyRunningPods={}\".format(nodeLabel, NumOfAlreadyRunningPods)) print(\"lifecycle={} NumOfNodes={}\".format(lifecycle,", "= get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas) CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas) #deploymentData['NumOfOnDemandPodsToBeRunning'] = int", "#namespace='default' #name = 'nginx' name = '1' #field_selector = (\"metadata.name=\"", "resp.items: #pprint(deployment.metadata.annotations) #pprint(deployment) deploymentData = {} CustomPodScheduleStrategy = {} annotations", "ureg.Quantity def scheduler(name, node, namespace): target=client.V1ObjectReference(api_version='v1', kind=\"Node\", name=node) meta=client.V1ObjectMeta() meta.name=name", "CustomSchedulingData.items(): print(\"Scheduling numOfReplicas={} on nodeLabel={}\".format(numOfReplicas, nodeLabel)) #pprint(podsList) #lifecycle = 'OnDemand'", "- deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage'] / 100) #deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas'] -", "'__main__': #ready_nodes = nodes_available() #pprint(ready_nodes) #name='review-v1-787d8fbfbb-ltdzt' node='ip-10-0-3-253.ec2.internal' #namespace='ecommerce' #ret=scheduler(name, node,", "\"net.http.request.time\" metrics = [{ \"id\": sysdig_metric, \"aggregations\": { \"time\": \"timeAvg\",", "\"time\": \"timeAvg\", \"group\": \"avg\" } }] #scheduler_name = \"Ec2SpotK8sScheduler\" CustomSchedulerName", "#lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) #RunEc2SpotCustomScheduler() #NumOfPodsToDeleted = 1 #podsAlreadyRunningOnNodeLabelList =", "enumerate (runningPodsList): pprint(\"i={} running pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i, p", "= sum(cpulmts) stats[\"cpu_req_per\"] = (stats[\"cpu_req\"] / stats[\"cpu_alloc\"] * 100) stats[\"cpu_lmt_per\"]", "deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items(): print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) if deploymentCustomSchedulingData != {}:", "totalWeight, numOfReplicas)) return CustomPodScheduleStrategy __all__ = [\"get_node_available_nodes_list\"] def get_node_available_nodes_list(CustomSchedulingData): global", "= (\"status.phase!=Succeeded,status.phase!=Failed,\" + \"spec.nodeName=\" + node_name) stats[\"cpu_alloc\"] = Q_(allocatable[\"cpu\"]) stats[\"mem_alloc\"]", "#deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) #deploymentName='nginx' #deploymentName = 'kube-ops-view' #getPodsListForDeployment(deploymentName) #testlist() #tl.start(block=True) while", "0 for key, replicas in CustomPodScheduleStrategy.items(): weight = nodeLabelToWights[key] print(\"key:", "= core_api.list_pod_for_all_namespaces(limit=max_pods, field_selector=field_selector).to_dict()['items'] # compute the allocated resources cpureqs,cpulmts,memreqs,memlmts =", "#deploymentData['pod_replicas'] = deployment.spec.replicas #deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas) CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy,", "node['status']['allocatable'] max_pods = int(int(allocatable[\"pods\"]) * 1.5) field_selector = (\"status.phase!=Succeeded,status.phase!=Failed,\" +", "pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i, p in enumerate (failedPodsList): pprint(\"i={}", "Sysdig API token>) sysdig_metric = \"net.http.request.time\" metrics = [{ \"id\":", "core_api = client.CoreV1Api() apis_api = client.AppsV1Api() #sdclient = SdcClient(<Your Sysdig", "== CustomSchedulerName: for container in pod['spec']['containers']: res = container['resources'] reqs", "sum(cpulmts) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"name\"] = pod['metadata']['name']", "global failedPodsList global nodesListPerNodeLabel print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) #exit(0) #namespace =", "#failedPodsList =[] #podsList = {} #namespace='default' #name='Ec2SpotK8sScheduler' #field_selector = (\"spec.scheduler_name=\"", "* 100) stats[\"cpu_lmt_per\"] = (stats[\"cpu_lmt\"] / stats[\"cpu_alloc\"] * 100) stats[\"mem_req\"]", "print(\"schedulePods Checking for free resources on node={} with cpu_free={} mem_free={}\".format(node,", "def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas): print(\"Strategy={} numOfReplicas={}\".format(Strategy, numOfReplicas)) CustomPodScheduleStrategy = {} nodeLabelToReplicas", "runningPodsList #podsList['failedPodsList'] = failedPodsList #pprint(podsList) #pprint(\"pendingPodsList={} runningPodsList={} failedPodsList={}\".format(runningPodsList, runningPodsList, failedPodsList", "get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas) #deploymentData['NumOfOnDemandPodsToBeRunning'] = int (deploymentData['OnDemandBase'] + (deploymentData['pod_replicas'] - deploymentData['OnDemandBase'])", "len(pendingPodsList))) return for i in range(NumOfPodsToBeScheduled): pod = pendingPodsList[0] print(\"schedulePods", "apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector) resp = apis_api.list_namespaced_deployment(namespace=namespace) for deployment in resp.items: #pprint(deployment.metadata.annotations)", "= [], [], [], [] for pod in pods: #pprint(pod)", "for nodeStrategyPart in nodeStrategyPartsList: nodeStrategySubPartList = nodeStrategyPart.split('=') if nodeStrategySubPartList[0] ==", "#podsList['runningPodsList'] = runningPodsList #podsList['failedPodsList'] = failedPodsList #pprint(podsList) #pprint(\"pendingPodsList={} runningPodsList={} failedPodsList={}\".format(runningPodsList,", "#runningPodsList = podsList['runningPodsList'] #pendingPodsList = podsList['pendingPodsList'] #failedPodsList = podsList['failedPodsList'] for", "OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}\".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName, OnDemandASGName, pod_replicas,", "= \"Ec2SpotK8sScheduler\" CustomSchedulerName ='K8SCustomScheduler' ureg = UnitRegistry() ureg.load_definitions('kubernetes_units.txt') pendingPodsList =", "p in enumerate (runningPodsList): pprint(\"i={} running pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for", "is greater than number of pending pods={}. So skipping schedulePods\".format(NumOfPodsToBeScheduled,", "= NumOfPodsToBeRunning - NumOfAlreadyRunningPods try: schedulePods(NumOfPodsToBeScheduled, NodesList) except Exception as", "'weight': weight = int(nodeStrategySubPartList[1]) totalWeight += weight print(\"weight={}\".format(weight)) else: nodeLabel", "nodesListPerNodeLabel[nodeLabel] = {} nodeLabelParts = nodeLabel.split('=') nodeLabelKey = nodeLabelParts[0] nodeLabelValue", "enumerate (pendingPodsList): pprint(\"i={} pending pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i, p", "range(0, NumOfPodsToDeleted): pod = podsAlreadyRunningOnNodeLabelList[i] grace_period_seconds = 30 body =", "i in range(NumOfPodsToBeScheduled): pod = pendingPodsList[0] print(\"schedulePods Trying to schedule", "pods = core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict() #pods = core_api.list_namespaced_pod(namespace=namespace).to_dict() #print(\"pods={}\".format(pods)) for pod", "int(int(allocatable[\"pods\"]) * 1.5) field_selector = (\"status.phase!=Succeeded,status.phase!=Failed,\" + \"spec.nodeName=\" + node_name)", "= node['metadata']['name'] allocatable = node['status']['allocatable'] max_pods = int(int(allocatable[\"pods\"]) * 1.5)", "= \"metadata.labels.nodesize=\"+nodeLabelParts[1] #print(\"selector={}\".format(selector)) #name = 'ip-192-168-73-104.ec2.internal' #selector = \"metadata.name\"+\"=\"+name #print(\"selector={}\".format(selector))", "pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}\".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName, OnDemandASGName, pod_replicas, NumOfOnDemandPods, NumOfSpotPods)) def", "get_node_available_nodes_list(lifecycle) #pprint(NodesList) NumOfPodsRunningAlready = 0 podsAlreadyRunningOnNodeLabelList = [] for podRunning", "for i in range(0, NumOfPodsToDeleted): pod = podsAlreadyRunningOnNodeLabelList[i] grace_period_seconds =", "= 'nginx' name = '1' #field_selector = (\"metadata.name=\" + name)", "= (stats[\"cpu_req\"] / stats[\"cpu_alloc\"] * 100) stats[\"cpu_lmt_per\"] = (stats[\"cpu_lmt\"] /", "for container in pod['spec']['containers']: res = container['resources'] reqs = defaultdict(lambda:", "deploymentCustomSchedulingData.items(): print(\"deploymentName={} CustomSchedulingData={}\".format(deploymentName, CustomSchedulingData)) #exit(0) #podsList = getPodsListForDeployment(namespace, deploymentName) runningPodsList", "if node_labels[nodeLabelKey] == nodeLabelValue: stats = {} node_name = node['metadata']['name']", "computation within a k8s cluster #k8s.config.load_incluster_config() core_api = client.CoreV1Api() apis_api", "pod['cpu_req'] <= stats['cpu_free'] and pod['mem_req'] <= stats['mem_free']: print(\"schedulePods scheduling pod={}", "get_custom_deployments(): CustomKubeSchedulingClusterDeploymentData = {} #namespaceList =[] namespacedataList = core_api.list_namespace().to_dict()['items'] for", "#pprint(\"resp={}\".format(resp)) #exit(0) availableNodesData = {} for node in core_api.list_node().to_dict()['items']: #pprint(node)", "= (\"spec.schedulerName=\" + CustomSchedulerName) pods = core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict() #pods =", "numOfReplicas): print(\"Strategy={} numOfReplicas={}\".format(Strategy, numOfReplicas)) CustomPodScheduleStrategy = {} nodeLabelToReplicas = {}", "def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData): global runningPodsList global pendingPodsList global failedPodsList global", "getPodsListForDeployment(namespace, deploymentName) NumOfPodsRunning = len (runningPodsList) NumOfPodsPending = len (pendingPodsList)", "= \"net.http.request.time\" metrics = [{ \"id\": sysdig_metric, \"aggregations\": { \"time\":", "= [] runningPodsList =[] nodesListPerNodeLabel = {} Q_ = ureg.Quantity", "deploymentCustomSchedulingData != {}: CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData) def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData): global runningPodsList", "numOfReplicas -= weightReplicas print(\"weightReplicas: {} replicas={} labelNum={}, numOfReplicas={}\".format(weightReplicas, replicas, labelNum,", "CustomSchedulerName) pods = core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict() #pods = core_api.list_namespaced_pod(namespace=namespace).to_dict() #print(\"pods={}\".format(pods)) for", "memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"mem_req\"] = sum(memreqs)", "#pprint(node) node_labels = node['metadata']['labels'] if nodeLabelKey in node_labels.keys(): if node_labels[nodeLabelKey]", "in namespacedataList: namespace = namespaceData['metadata']['name'] CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace) #namespaceList.append(name) print(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData))", "#runningPodsList =[] #failedPodsList =[] #podsList = {} #namespace='default' #name='Ec2SpotK8sScheduler' #field_selector", "global nodesListPerNodeLabel #data = [] #data = {} for nodeLabel", "#exit(0) #namespace = 'default' #lifecycleList = ['OnDemand', 'Ec2Spot'] for deploymentName,", "= 0 StrategyList = Strategy.split(':') print(\"StrategyList={}\".format(StrategyList)) numOfBaseValues = 0 for", "NumOfPodsPending = len (pendingPodsList) NumOfPodsFailed = len (failedPodsList) #print(\"NumOfPodsRunning={} runningPodsList={}\".format(NumOfPodsRunning,", "(runningPodsList): pprint(\"i={} running pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i, p in", "need to Schedule\".format(NumOfAlreadyRunningPods)) elif NumOfAlreadyRunningPods < NumOfPodsToBeRunning: NumOfPodsToBeScheduled = NumOfPodsToBeRunning", "for i in range(NumOfPodsToBeScheduled): pod = pendingPodsList[0] print(\"schedulePods Trying to", "[] failedPodsList =[] getPodsListForDeployment(namespace, deploymentName) NumOfPodsRunning = len (runningPodsList) NumOfPodsPending", "e: pprint(e) pendingPodsList = [] NumOfPodsFailed = [] #pprint(podsList) #lifecycle", "runningPodsList, failedPodsList ) #return pendingPodsList,runningPodsList,failedPodsList #return podsList def get_custom_deployments(): CustomKubeSchedulingClusterDeploymentData", "pod['metadata']['name'] and pod['spec']['scheduler_name'] == CustomSchedulerName: for container in pod['spec']['containers']: res", "deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) except Exception as e: pprint(e) pendingPodsList = []", "NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'], pod['mem_req'])) for node, stats in NodesList.items(): print(\"schedulePods", "= core_api.list_namespaced_pod(namespace=namespace).to_dict() #print(\"pods={}\".format(pods)) for pod in pods['items']: #pprint(pod) #print(\"node_name={}\".format(pod['spec']['node_name'])) #return", "defaultdict(lambda: 0, res['requests'] or {}) lmts = defaultdict(lambda: 0, res['limits']", "#namespace = 'default' #lifecycleList = ['OnDemand', 'Ec2Spot'] for deploymentName, CustomSchedulingData", "SpotASGName, OnDemandASGName, pod_replicas, NumOfOnDemandPods, NumOfSpotPods)) def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas): print(\"Strategy={} numOfReplicas={}\".format(Strategy,", "nodeLabelParts[1] #selector = \"metadata.labels.\"+nodeLabelParts[0]+\"=\"+nodeLabelParts[1] #selector = \"metadata.labels.nodesize=\"+nodeLabelParts[1] #print(\"selector={}\".format(selector)) #name =", "#name='review-v1-787d8fbfbb-ltdzt' node='ip-10-0-3-253.ec2.internal' #namespace='ecommerce' #ret=scheduler(name, node, namespace) #pprint(ret) #main() #test() #testpod()", "+ CustomSchedulerName) field_selector = (\"spec.schedulerName=\" + CustomSchedulerName) pods = core_api.list_namespaced_pod(namespace=namespace,", "= stats[\"cpu_alloc\"] - stats[\"cpu_req\"] stats[\"mem_free\"] = stats[\"mem_alloc\"] - stats[\"mem_req\"] #stats[\"name\"]", "numOfReplicas numOfReplicas = 0 print(\"base={}\".format(nodeStrategySubPartList[1])) elif nodeStrategySubPartList[0] == 'weight': weight", "NumOfPodsRunningAlready = 0 podsAlreadyRunningOnNodeLabelList = [] for podRunning in runningPodsList:", "= deployment.spec.replicas #deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas) CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas)", "return CustomKubeSchedulingDeploymentData #print(\"OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}\".format(OnDemandBase, OnDemandAbovePercentage,", "#! /usr/bin/python3 import time import random import json import os", "namespace=namespace, grace_period_seconds=grace_period_seconds, body=body) pprint(response) def schedulePods(NumOfPodsToBeScheduled, NodesList): global pendingPodsList global", "in range(0, NumOfPodsToDeleted): pod = podsAlreadyRunningOnNodeLabelList[i] grace_period_seconds = 30 body", "(deploymentData['OnDemandBase'] + (deploymentData['pod_replicas'] - deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage'] / 100) #deploymentData['NumOfSpotPodsToBeRunning']", "= defaultdict(lambda: 0, res['limits'] or {}) cpureqs.append(Q_(reqs[\"cpu\"])) memreqs.append(Q_(reqs[\"memory\"])) cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"]))", "nodesListPerNodeLabel[nodeLabel].keys(): podsAlreadyRunningOnNodeLabelList.append(podRunning) NumOfAlreadyRunningPods = len (podsAlreadyRunningOnNodeLabelList) for i, p in", "pendingPodsList = [] NumOfPodsFailed = [] #pprint(podsList) #lifecycle = 'OnDemand'", "= pendingPodsList #podsList['runningPodsList'] = runningPodsList #podsList['failedPodsList'] = failedPodsList #pprint(podsList) #pprint(\"pendingPodsList={}", "for nodeLabel, numOfReplicas in CustomSchedulingData.items(): print(\"Scheduling numOfReplicas={} on nodeLabel={}\".format(numOfReplicas, nodeLabel))", "+ name) field_selector = (\"metadata.annotations.OnDemandBase=\" + name) # get deployment", "NumOfPodsFailed = len (failedPodsList) #print(\"NumOfPodsRunning={} runningPodsList={}\".format(NumOfPodsRunning, runningPodsList)) #print(\"NumOfPodsPending={} pendingPodsList={}\".format(NumOfPodsPending, pendingPodsList))", "weight={}, totalWeight={}\".format(key, replicas, weight, totalWeight)) if labelNum == totalNumOfLables -", "stats[\"mem_req_per\"] = (stats[\"mem_req\"] / stats[\"mem_alloc\"] * 100) stats[\"mem_lmt_per\"] = (stats[\"mem_lmt\"]", "availableNodesData = {} for node in core_api.list_node().to_dict()['items']: #pprint(node) node_labels =", "import os from pprint import pprint from kubernetes.client.rest import ApiException", "base = numOfReplicas numOfReplicas = 0 print(\"base={}\".format(nodeStrategySubPartList[1])) elif nodeStrategySubPartList[0] ==", "0, res['requests'] or {}) lmts = defaultdict(lambda: 0, res['limits'] or", "pprint(\"schedulePods NumOfPodsToBeScheduled={} is greater than number of pending pods={}. So", "= '' for nodeStrategyPart in nodeStrategyPartsList: nodeStrategySubPartList = nodeStrategyPart.split('=') if", "[], [], [], [] if deploymentName in pod['metadata']['name'] and pod['spec']['scheduler_name']", "Timeloop() <EMAIL>(interval=timedelta(seconds=10)) def RunEc2SpotCustomScheduler(): #global pendingPodsList #global failedPodsList CustomKubeSchedulingClusterDeploymentData =", "k8s cluster #k8s.config.load_incluster_config() core_api = client.CoreV1Api() apis_api = client.AppsV1Api() #sdclient", "deployment.spec.replicas #deploymentData[deploymentName] = deployment.metadata.name Strategy = annotations['CustomPodScheduleStrategy'] #deploymentData['pod_replicas'] = deployment.spec.replicas", "res = scheduler(pod['name'], node, namespace) pprint(res) stats['cpu_free'] = stats['cpu_free'] -", "= (stats[\"mem_req\"] / stats[\"mem_alloc\"] * 100) stats[\"mem_lmt_per\"] = (stats[\"mem_lmt\"] /", "{}) lmts = defaultdict(lambda: 0, res['limits'] or {}) cpureqs.append(Q_(reqs[\"cpu\"])) memreqs.append(Q_(reqs[\"memory\"]))", "= [] failedPodsList = [] runningPodsList =[] nodesListPerNodeLabel = {}", "numOfBaseValues = 0 for nodeStrategy in StrategyList: print(\"nodeStrategy: {}\".format(nodeStrategy)) nodeStrategyPartsList", "podRunning in runningPodsList: if podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys(): podsAlreadyRunningOnNodeLabelList.append(podRunning) NumOfAlreadyRunningPods =", "stats['cpu_free'] and pod['mem_req'] <= stats['mem_free']: print(\"schedulePods scheduling pod={} onto the", "deployment.metadata.name numOfReplicas = deployment.spec.replicas #deploymentData[deploymentName] = deployment.metadata.name Strategy = annotations['CustomPodScheduleStrategy']", "if nodeStrategySubPartList[0] == 'base': if numOfBaseValues != 0: print(\"base value", "= [] CustomKubeSchedulingDeploymentData = {} #namespace='default' #name = 'nginx' name", "= client.AppsV1Api() #sdclient = SdcClient(<Your Sysdig API token>) sysdig_metric =", "data if __name__ == '__main__': #ready_nodes = nodes_available() #pprint(ready_nodes) #name='review-v1-787d8fbfbb-ltdzt'", "#ret=scheduler(name, node, namespace) #pprint(ret) #main() #test() #testpod() #check_node_resources(node) #RunEc2SpotCustomScheduler() #getPodsListForDeployment('", "runningPodsList = [] pendingPodsList = [] failedPodsList =[] getPodsListForDeployment(namespace, deploymentName)", "node_labels.keys(): if node_labels[nodeLabelKey] == nodeLabelValue: stats = {} node_name =", "container in pod['spec']['containers']: res = container['resources'] reqs = defaultdict(lambda: 0,", "'default' if NumOfPodsToBeScheduled > len(pendingPodsList): pprint(\"schedulePods NumOfPodsToBeScheduled={} is greater than", "no need to Schedule\".format(NumOfAlreadyRunningPods)) elif NumOfAlreadyRunningPods < NumOfPodsToBeRunning: NumOfPodsToBeScheduled =", "p in enumerate (failedPodsList): pprint(\"i={} failed pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) #print(\"nodeLabel={}", "= {} pprint(\"deletePods i={} pod={} NumOfPodsToDeleted={}\".format(i, pod['name'], NumOfPodsToDeleted )) response", "cpureqs.append(Q_(reqs[\"cpu\"])) memreqs.append(Q_(reqs[\"memory\"])) cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts)", "#check_node_resources(node) #RunEc2SpotCustomScheduler() #getPodsListForDeployment(' ') #lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot'", "NumOfAlreadyRunningPods == NumOfPodsToBeRunning: print(\"NumOfAlreadyRunningPods == NumOfPodsToBeRunning = {}. So no", "'1' #field_selector = (\"metadata.name=\" + name) field_selector = (\"metadata.annotations.OnDemandBase=\" +", "#name = 'ip-192-168-73-104.ec2.internal' #selector = \"metadata.name\"+\"=\"+name #print(\"selector={}\".format(selector)) #field_selector = (selector)", "node_name = node['metadata']['name'] allocatable = node['status']['allocatable'] max_pods = int(int(allocatable[\"pods\"]) *", "CustomKubeSchedulingClusterDeploymentData def get_custom_deployments_per_namespace(namespace): #CustomKubeSchedulingDeploymentData = [] CustomKubeSchedulingDeploymentData = {} #namespace='default'", "stats[\"cpu_alloc\"] * 100) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"mem_req_per\"]", "#pprint(data) return data if __name__ == '__main__': #ready_nodes = nodes_available()", "= sum(memlmts) stats[\"name\"] = pod['metadata']['name'] stats[\"status\"] = pod['status']['phase'] if stats[\"status\"]", "namespace = namespaceData['metadata']['name'] CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace) #namespaceList.append(name) print(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) return CustomKubeSchedulingClusterDeploymentData", "') #lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) #RunEc2SpotCustomScheduler() #NumOfPodsToDeleted", "in annotations.keys(): if annotations['UseCustomKubeScheduler'] == 'true': deploymentName = deployment.metadata.name numOfReplicas", "in node_labels.keys(): if node_labels[nodeLabelKey] == nodeLabelValue: stats = {} node_name", "cannot be non-zero for more than node strategy\") exit(1) else:", "if nodeLabelKey in node_labels.keys(): if node_labels[nodeLabelKey] == nodeLabelValue: stats =", "this computation within a k8s cluster #k8s.config.load_incluster_config() core_api = client.CoreV1Api()", "stats[\"mem_alloc\"] * 100) stats[\"cpu_free\"] = stats[\"cpu_alloc\"] - stats[\"cpu_req\"] stats[\"mem_free\"] =", "+ node_name) stats[\"cpu_alloc\"] = Q_(allocatable[\"cpu\"]) stats[\"mem_alloc\"] = Q_(allocatable[\"memory\"]) #stats[\"lifecycle\"] =", "mem_req={}\".format(i, NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'], pod['mem_req'])) for node, stats in NodesList.items():", "nodesListPerNodeLabel = {} Q_ = ureg.Quantity def scheduler(name, node, namespace):", "to Schedule\".format(NumOfAlreadyRunningPods)) elif NumOfAlreadyRunningPods < NumOfPodsToBeRunning: NumOfPodsToBeScheduled = NumOfPodsToBeRunning -", "stats[\"status\"] == 'Running': stats[\"node_name\"] = pod['spec']['node_name'] runningPodsList.append(stats) elif stats[\"status\"] ==", "{} pprint(\"deletePods i={} pod={} NumOfPodsToDeleted={}\".format(i, pod['name'], NumOfPodsToDeleted )) response =", "[] failedPodsList = [] runningPodsList =[] nodesListPerNodeLabel = {} Q_", "= numOfReplicas numOfReplicas = 0 print(\"base={}\".format(nodeStrategySubPartList[1])) elif nodeStrategySubPartList[0] == 'weight':", "> NumOfPodsToBeRunning: NumOfPodsToDeleted = NumOfAlreadyRunningPods - NumOfPodsToBeRunning try: deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList)", "= annotations['CustomPodScheduleStrategy'] #deploymentData['pod_replicas'] = deployment.spec.replicas #deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas) CustomKubeSchedulingDeploymentData[deploymentName]", "#NumOfPodsToDeleted = 1 #podsAlreadyRunningOnNodeLabelList = [] #d ={'name':'nginx-66cb875766-vx6bp'} #podsAlreadyRunningOnNodeLabelList.append(d) #deletePods(NumOfPodsToDeleted,", "#get_node_available_nodes_list(lifecycle) def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList): namespace = 'default' for i in", "client.CoreV1Api() apis_api = client.AppsV1Api() #sdclient = SdcClient(<Your Sysdig API token>)", "except Exception as e: pprint(e) pendingPodsList = [] NumOfPodsFailed =", "namespace = 'default' if NumOfPodsToBeScheduled > len(pendingPodsList): pprint(\"schedulePods NumOfPodsToBeScheduled={} is", "[] #data = {} for nodeLabel in CustomSchedulingData.keys(): nodesListPerNodeLabel[nodeLabel] =", "#deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas) CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas) #deploymentData['NumOfOnDemandPodsToBeRunning'] =", "defaultdict from kubernetes import client, config, watch from timeloop import", "= \"metadata.labels.\"+nodeLabelParts[0]+\"=\"+nodeLabelParts[1] #selector = \"metadata.labels.nodesize=\"+nodeLabelParts[1] #print(\"selector={}\".format(selector)) #name = 'ip-192-168-73-104.ec2.internal' #selector", "replicas, weight, totalWeight)) if labelNum == totalNumOfLables - 1: weightReplicas", "\"metadata.labels.\"+nodeLabelParts[0]+\"=\"+nodeLabelParts[1] #selector = \"metadata.labels.nodesize=\"+nodeLabelParts[1] #print(\"selector={}\".format(selector)) #name = 'ip-192-168-73-104.ec2.internal' #selector =", "failedPodsList={}\".format(NumOfPodsFailed, failedPodsList)) get_node_available_nodes_list(CustomSchedulingData) for i, p in enumerate (runningPodsList): pprint(\"i={}", "annotations = deployment.metadata.annotations if 'UseCustomKubeScheduler' in annotations.keys(): if annotations['UseCustomKubeScheduler'] ==", "#global pendingPodsList #global failedPodsList CustomKubeSchedulingClusterDeploymentData = get_custom_deployments() pprint(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) for namespace,", "= 'default' #lifecycleList = ['OnDemand', 'Ec2Spot'] for deploymentName, CustomSchedulingData in", "labelNum, numOfReplicas)) CustomPodScheduleStrategy[key] = replicas print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) print(\"numOfBaseValues =", "def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList): namespace = 'default' for i in range(0,", "\"metadata.labels.nodesize=\"+nodeLabelParts[1] #print(\"selector={}\".format(selector)) #name = 'ip-192-168-73-104.ec2.internal' #selector = \"metadata.name\"+\"=\"+name #print(\"selector={}\".format(selector)) #field_selector", "{} replicas={} labelNum={}, numOfReplicas={}\".format(weightReplicas, replicas, labelNum, numOfReplicas)) CustomPodScheduleStrategy[key] = replicas", "pod['mem_req'] <= stats['mem_free']: print(\"schedulePods scheduling pod={} onto the node={}\".format(pod['name'], node))", "stats['cpu_free'], stats['mem_free'])) #pprint(node) if pod['cpu_req'] <= stats['cpu_free'] and pod['mem_req'] <=", "* deploymentData['OnDemandAbovePercentage'] / 100) #deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning'] #CustomKubeSchedulingDeploymentData.append(deploymentData)", "{} Q_ = ureg.Quantity def scheduler(name, node, namespace): target=client.V1ObjectReference(api_version='v1', kind=\"Node\",", "= nodes_available() #pprint(ready_nodes) #name='review-v1-787d8fbfbb-ltdzt' node='ip-10-0-3-253.ec2.internal' #namespace='ecommerce' #ret=scheduler(name, node, namespace) #pprint(ret)", "+ \"spec.nodeName=\" + node_name) stats[\"cpu_alloc\"] = Q_(allocatable[\"cpu\"]) stats[\"mem_alloc\"] = Q_(allocatable[\"memory\"])", "} }] #scheduler_name = \"Ec2SpotK8sScheduler\" CustomSchedulerName ='K8SCustomScheduler' ureg = UnitRegistry()", "= stats nodesListPerNodeLabel[nodeLabel] = availableNodesData #print(nodesListPerNodeLabel) #for nodeLabel, availableNodesData in", "response = core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds, body=body) pprint(response) def schedulePods(NumOfPodsToBeScheduled, NodesList):", "'UseCustomKubeScheduler' in annotations.keys(): if annotations['UseCustomKubeScheduler'] == 'true': deploymentName = deployment.metadata.name", "= 0 podsAlreadyRunningOnNodeLabelList = [] for podRunning in runningPodsList: if", "stats[\"mem_lmt_per\"] = (stats[\"mem_lmt\"] / stats[\"mem_alloc\"] * 100) stats[\"cpu_free\"] = stats[\"cpu_alloc\"]", "client.AppsV1Api() #sdclient = SdcClient(<Your Sysdig API token>) sysdig_metric = \"net.http.request.time\"", "#print(\"selector={}\".format(selector)) #field_selector = (selector) #resp = core_api.list_node(field_selector=field_selector).to_dict()['items'] #pprint(\"resp={}\".format(resp)) #exit(0) availableNodesData", "-= base else: base = numOfReplicas numOfReplicas = 0 print(\"base={}\".format(nodeStrategySubPartList[1]))", "= sum(cpulmts) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"name\"] =", "#global failedPodsList CustomKubeSchedulingClusterDeploymentData = get_custom_deployments() pprint(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) for namespace, deploymentCustomSchedulingData in", "i, p in enumerate (pendingPodsList): pprint(\"i={} pending pod_name={} node_name={}\".format(i,p['node_name'], p['name']))", "#print(\"NumOfPodsFailed={} failedPodsList={}\".format(NumOfPodsFailed, failedPodsList)) get_node_available_nodes_list(CustomSchedulingData) for i, p in enumerate (runningPodsList):", "deployment in resp.items: #pprint(deployment.metadata.annotations) #pprint(deployment) deploymentData = {} CustomPodScheduleStrategy =", "= node['metadata']['name'] #data.append(stats) availableNodesData[node_name] = stats nodesListPerNodeLabel[nodeLabel] = availableNodesData #print(nodesListPerNodeLabel)", "timeloop import Timeloop from datetime import timedelta config.load_kube_config() #config.load_incluster_config() #", "name={}\".format(i,nodeLabel, p['node_name'], p['name'])) if NumOfAlreadyRunningPods == NumOfPodsToBeRunning: print(\"NumOfAlreadyRunningPods == NumOfPodsToBeRunning", "numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) totalNumOfLables = len (CustomPodScheduleStrategy)", "resources cpureqs,cpulmts,memreqs,memlmts = [], [], [], [] for pod in", "= [] #d ={'name':'nginx-66cb875766-vx6bp'} #podsAlreadyRunningOnNodeLabelList.append(d) #deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) #deploymentName='nginx' #deploymentName =", "nodeStrategyPart in nodeStrategyPartsList: nodeStrategySubPartList = nodeStrategyPart.split('=') if nodeStrategySubPartList[0] == 'base':", "failedPodsList #pprint(podsList) #pprint(\"pendingPodsList={} runningPodsList={} failedPodsList={}\".format(runningPodsList, runningPodsList, failedPodsList ) #return pendingPodsList,runningPodsList,failedPodsList", "pod['mem_req'] pendingPodsList.remove(pod) break def getPodsListForDeployment(namespace, deploymentName): #global pendingPodsList #runningPodsList =[]", "nodesListPerNodeLabel[nodeLabel] = availableNodesData #print(nodesListPerNodeLabel) #for nodeLabel, availableNodesData in nodesListPerNodeLabel.items(): #print(\"nodeLabel={}", "NodesList): global pendingPodsList global failedPodsList namespace = 'default' if NumOfPodsToBeScheduled", "apis_api.list_namespaced_deployment(namespace=namespace) for deployment in resp.items: #pprint(deployment.metadata.annotations) #pprint(deployment) deploymentData = {}", "={'name':'nginx-66cb875766-vx6bp'} #podsAlreadyRunningOnNodeLabelList.append(d) #deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) #deploymentName='nginx' #deploymentName = 'kube-ops-view' #getPodsListForDeployment(deploymentName) #testlist()", "elif nodeStrategySubPartList[0] == 'weight': weight = int(nodeStrategySubPartList[1]) totalWeight += weight", "* (weight/totalWeight)) replicas = replicas + weightReplicas labelNum += 1", "!= {}: CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData) def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData): global runningPodsList global", "p['name'])) for i, p in enumerate (pendingPodsList): pprint(\"i={} pending pod_name={}", "nodeLabel = '' for nodeStrategyPart in nodeStrategyPartsList: nodeStrategySubPartList = nodeStrategyPart.split('=')", "#CustomKubeSchedulingDeploymentData.append(deploymentData) return CustomKubeSchedulingDeploymentData #print(\"OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}\".format(OnDemandBase,", "weight = int(nodeStrategySubPartList[1]) totalWeight += weight print(\"weight={}\".format(weight)) else: nodeLabel =", "numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) return CustomPodScheduleStrategy __all__ = [\"get_node_available_nodes_list\"] def get_node_available_nodes_list(CustomSchedulingData):", "collections import defaultdict from kubernetes import client, config, watch from", "(podsAlreadyRunningOnNodeLabelList): pprint(\"running pod i={} nodeLabel={} node_name={} name={}\".format(i,nodeLabel, p['node_name'], p['name'])) if", "'nginx' name = '1' #field_selector = (\"metadata.name=\" + name) field_selector", "meta.name=name body=client.V1Binding(metadata=meta, target=target) return core_api.create_namespaced_binding(namespace, body, _preload_content=False) #tl = Timeloop()", "weightReplicas labelNum += 1 numOfReplicas -= weightReplicas print(\"weightReplicas: {} replicas={}", "stats['mem_free']: print(\"schedulePods scheduling pod={} onto the node={}\".format(pod['name'], node)) res =", "(\"metadata.annotations.OnDemandBase=\" + name) # get deployment by namespace #resp =", "from pprint import pprint from kubernetes.client.rest import ApiException from pint", "def get_custom_deployments(): CustomKubeSchedulingClusterDeploymentData = {} #namespaceList =[] namespacedataList = core_api.list_namespace().to_dict()['items']", "in nodeStrategyPartsList: nodeStrategySubPartList = nodeStrategyPart.split('=') if nodeStrategySubPartList[0] == 'base': if", "= sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"mem_req_per\"] = (stats[\"mem_req\"] / stats[\"mem_alloc\"]", "nodeLabelKey = nodeLabelParts[0] nodeLabelValue = nodeLabelParts[1] #selector = \"metadata.labels.\"+nodeLabelParts[0]+\"=\"+nodeLabelParts[1] #selector", "#data.append(stats) availableNodesData[node_name] = stats nodesListPerNodeLabel[nodeLabel] = availableNodesData #print(nodesListPerNodeLabel) #for nodeLabel,", "field_selector = (\"metadata.annotations.OnDemandBase=\" + name) # get deployment by namespace", "node_labels = node['metadata']['labels'] if nodeLabelKey in node_labels.keys(): if node_labels[nodeLabelKey] ==", "weightReplicas = int (numOfReplicas * (weight/totalWeight)) replicas = replicas +", "NumOfAlreadyRunningPods try: schedulePods(NumOfPodsToBeScheduled, NodesList) except Exception as e: pprint(e) elif", "= nodeStrategyPart.split('=') if nodeStrategySubPartList[0] == 'base': if numOfBaseValues != 0:", "nodeLabelToWights={}\".format(nodeLabelToReplicas, nodeLabelToWights)) print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) print(\"CustomPodScheduleStrategy", "print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) #exit(0) #namespace = 'default' #lifecycleList = ['OnDemand',", "UnitRegistry() ureg.load_definitions('kubernetes_units.txt') pendingPodsList = [] failedPodsList = [] runningPodsList =[]", "get_custom_deployments() pprint(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) for namespace, deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items(): print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData))", "= apis_api.list_namespaced_deployment(namespace=namespace) for deployment in resp.items: #pprint(deployment.metadata.annotations) #pprint(deployment) deploymentData =", "#lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList): namespace = 'default'", "StrategyList: print(\"nodeStrategy: {}\".format(nodeStrategy)) nodeStrategyPartsList = nodeStrategy.split(',') base = 0 weight", "= availableNodesData #print(nodesListPerNodeLabel) #for nodeLabel, availableNodesData in nodesListPerNodeLabel.items(): #print(\"nodeLabel={} availableNodesData={}\".format(nodeLabel,", "podsAlreadyRunningOnNodeLabelList): namespace = 'default' for i in range(0, NumOfPodsToDeleted): pod", "SdcClient(<Your Sysdig API token>) sysdig_metric = \"net.http.request.time\" metrics = [{", "NumOfAlreadyRunningPods)) print(\"lifecycle={} NumOfNodes={}\".format(lifecycle, len(NodesList))) for nodeLabel, in NodesList.keys(): pprint(\"node_name={}\".format(n)) #exit(0)", "numOfReplicas replicas = replicas + weightReplicas else: weightReplicas = int", "=[] nodesListPerNodeLabel = {} Q_ = ureg.Quantity def scheduler(name, node,", "= \"metadata.name\"+\"=\"+name #print(\"selector={}\".format(selector)) #field_selector = (selector) #resp = core_api.list_node(field_selector=field_selector).to_dict()['items'] #pprint(\"resp={}\".format(resp))", "the allocated resources cpureqs,cpulmts,memreqs,memlmts = [], [], [], [] for", "random import json import os from pprint import pprint from", "= stats['cpu_free'] - pod['cpu_req'] stats['mem_free'] = stats['mem_free'] - pod['mem_req'] pendingPodsList.remove(pod)", "scheduler(pod['name'], node, namespace) pprint(res) stats['cpu_free'] = stats['cpu_free'] - pod['cpu_req'] stats['mem_free']", "100) stats[\"mem_lmt_per\"] = (stats[\"mem_lmt\"] / stats[\"mem_alloc\"] * 100) stats[\"cpu_free\"] =", "#pprint(NodesList) NumOfPodsRunningAlready = 0 podsAlreadyRunningOnNodeLabelList = [] for podRunning in", "len(pendingPodsList): pprint(\"schedulePods NumOfPodsToBeScheduled={} is greater than number of pending pods={}.", "name = '1' #field_selector = (\"metadata.name=\" + name) field_selector =", "deployment.metadata.annotations if 'UseCustomKubeScheduler' in annotations.keys(): if annotations['UseCustomKubeScheduler'] == 'true': deploymentName", "stats[\"cpu_alloc\"] - stats[\"cpu_req\"] stats[\"mem_free\"] = stats[\"mem_alloc\"] - stats[\"mem_req\"] #stats[\"name\"] =", "#namespace='ecommerce' #ret=scheduler(name, node, namespace) #pprint(ret) #main() #test() #testpod() #check_node_resources(node) #RunEc2SpotCustomScheduler()", "in range(NumOfPodsToBeScheduled): pod = pendingPodsList[0] print(\"schedulePods Trying to schedule i={}", "= {} nodeLabelToWights = {} totalWeight = 0 StrategyList =", "in pods['items']: #pprint(pod) #print(\"node_name={}\".format(pod['spec']['node_name'])) #return \"\" stats = {} cpureqs,cpulmts,memreqs,memlmts", "print(\"schedulePods scheduling pod={} onto the node={}\".format(pod['name'], node)) res = scheduler(pod['name'],", "\"group\": \"avg\" } }] #scheduler_name = \"Ec2SpotK8sScheduler\" CustomSchedulerName ='K8SCustomScheduler' ureg", "totalNumOfLables = len (CustomPodScheduleStrategy) labelNum = 0 for key, replicas", "nodeLabelToWights [nodeLabel] = weight CustomPodScheduleStrategy [nodeLabel] = base print(\"nodeLabelToReplicas={} nodeLabelToWights={}\".format(nodeLabelToReplicas,", "try: deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) except Exception as e: pprint(e) pendingPodsList =", "deployment by namespace #resp = apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector) resp = apis_api.list_namespaced_deployment(namespace=namespace)", "numOfReplicas in CustomSchedulingData.items(): print(\"Scheduling numOfReplicas={} on nodeLabel={}\".format(numOfReplicas, nodeLabel)) #pprint(podsList) #lifecycle", "= core_api.list_node(field_selector=field_selector).to_dict()['items'] #pprint(\"resp={}\".format(resp)) #exit(0) availableNodesData = {} for node in", "nodeLabel, availableNodesData in nodesListPerNodeLabel.items(): #print(\"nodeLabel={} availableNodesData={}\".format(nodeLabel, availableNodesData)) #exit(0) #pprint(data) return", "replicas={} labelNum={}, numOfReplicas={}\".format(weightReplicas, replicas, labelNum, numOfReplicas)) CustomPodScheduleStrategy[key] = replicas print(\"CustomPodScheduleStrategy", "stats[\"mem_req\"] #stats[\"name\"] = node['metadata']['name'] #data.append(stats) availableNodesData[node_name] = stats nodesListPerNodeLabel[nodeLabel] =", "= ureg.Quantity def scheduler(name, node, namespace): target=client.V1ObjectReference(api_version='v1', kind=\"Node\", name=node) meta=client.V1ObjectMeta()", "NumOfPodsToBeRunning = {}. So no need to Schedule\".format(NumOfAlreadyRunningPods)) elif NumOfAlreadyRunningPods", "totalWeight={}\".format(key, replicas, weight, totalWeight)) if labelNum == totalNumOfLables - 1:", "1 #podsAlreadyRunningOnNodeLabelList = [] #d ={'name':'nginx-66cb875766-vx6bp'} #podsAlreadyRunningOnNodeLabelList.append(d) #deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) #deploymentName='nginx'", "i in range(0, NumOfPodsToDeleted): pod = podsAlreadyRunningOnNodeLabelList[i] grace_period_seconds = 30", "failedPodsList =[] getPodsListForDeployment(namespace, deploymentName) NumOfPodsRunning = len (runningPodsList) NumOfPodsPending =", "{} nodeLabelParts = nodeLabel.split('=') nodeLabelKey = nodeLabelParts[0] nodeLabelValue = nodeLabelParts[1]", "pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) #print(\"nodeLabel={} NumOfAlreadyRunningPods={}\".format(nodeLabel, NumOfAlreadyRunningPods)) print(\"lifecycle={} NumOfNodes={}\".format(lifecycle, len(NodesList))) for", "deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList): namespace = 'default' for i in range(0, NumOfPodsToDeleted):", "replicas + weightReplicas else: weightReplicas = int (numOfReplicas * (weight/totalWeight))", "ApiException from pint import UnitRegistry from collections import defaultdict from", "= 'Ec2Spot' #get_node_available_nodes_list(lifecycle) #RunEc2SpotCustomScheduler() #NumOfPodsToDeleted = 1 #podsAlreadyRunningOnNodeLabelList = []", "cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"cpu_req_per\"] =", "Exception as e: pprint(e) elif NumOfAlreadyRunningPods > NumOfPodsToBeRunning: NumOfPodsToDeleted =", "OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}\".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName, OnDemandASGName, pod_replicas, NumOfOnDemandPods, NumOfSpotPods))", "than number of pending pods={}. So skipping schedulePods\".format(NumOfPodsToBeScheduled, len(pendingPodsList))) return", "pendingPodsList global failedPodsList global nodesListPerNodeLabel print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) #exit(0) #namespace", "pod['name'], NumOfPodsToDeleted )) response = core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds, body=body) pprint(response)", "target=client.V1ObjectReference(api_version='v1', kind=\"Node\", name=node) meta=client.V1ObjectMeta() meta.name=name body=client.V1Binding(metadata=meta, target=target) return core_api.create_namespaced_binding(namespace, body,", "CustomKubeSchedulingDeploymentData #print(\"OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}\".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName,", "deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning'] #CustomKubeSchedulingDeploymentData.append(deploymentData) return CustomKubeSchedulingDeploymentData #print(\"OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={}", "len (CustomPodScheduleStrategy) labelNum = 0 for key, replicas in CustomPodScheduleStrategy.items():", "base = int(nodeStrategySubPartList[1]) if base <= numOfReplicas: numOfReplicas -= base", "= len (runningPodsList) NumOfPodsPending = len (pendingPodsList) NumOfPodsFailed = len", "= 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) #RunEc2SpotCustomScheduler() #NumOfPodsToDeleted = 1", "= len (CustomPodScheduleStrategy) labelNum = 0 for key, replicas in", "import UnitRegistry from collections import defaultdict from kubernetes import client,", "#pprint(pod) for container in pod['spec']['containers']: res = container['resources'] reqs =", "len (pendingPodsList) NumOfPodsFailed = len (failedPodsList) #print(\"NumOfPodsRunning={} runningPodsList={}\".format(NumOfPodsRunning, runningPodsList)) #print(\"NumOfPodsPending={}", "nodeStrategyPart.split('=') if nodeStrategySubPartList[0] == 'base': if numOfBaseValues != 0: print(\"base", "= podsAlreadyRunningOnNodeLabelList[i] grace_period_seconds = 30 body = client.V1DeleteOptions() #body =", "{} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) totalNumOfLables =", "=[] #podsList = {} #namespace='default' #name='Ec2SpotK8sScheduler' #field_selector = (\"spec.scheduler_name=\" +", "#name = 'nginx' name = '1' #field_selector = (\"metadata.name=\" +", "/ stats[\"mem_alloc\"] * 100) stats[\"cpu_free\"] = stats[\"cpu_alloc\"] - stats[\"cpu_req\"] stats[\"mem_free\"]", "pendingPodsList #podsList['runningPodsList'] = runningPodsList #podsList['failedPodsList'] = failedPodsList #pprint(podsList) #pprint(\"pendingPodsList={} runningPodsList={}", "pod['spec']['containers']: res = container['resources'] reqs = defaultdict(lambda: 0, res['requests'] or", "else: numOfBaseValues += 1 base = int(nodeStrategySubPartList[1]) if base <=", "NumOfSpotPods)) def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas): print(\"Strategy={} numOfReplicas={}\".format(Strategy, numOfReplicas)) CustomPodScheduleStrategy = {}", "#test() #testpod() #check_node_resources(node) #RunEc2SpotCustomScheduler() #getPodsListForDeployment(' ') #lifecycle = 'OnDemand' #lifecycle", "== '__main__': #ready_nodes = nodes_available() #pprint(ready_nodes) #name='review-v1-787d8fbfbb-ltdzt' node='ip-10-0-3-253.ec2.internal' #namespace='ecommerce' #ret=scheduler(name,", "schedulePods(NumOfPodsToBeScheduled, NodesList) except Exception as e: pprint(e) elif NumOfAlreadyRunningPods >", "stats['cpu_free'] - pod['cpu_req'] stats['mem_free'] = stats['mem_free'] - pod['mem_req'] pendingPodsList.remove(pod) break", "100) #deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning'] #CustomKubeSchedulingDeploymentData.append(deploymentData) return CustomKubeSchedulingDeploymentData #print(\"OnDemandBase={},", "#k8s.config.load_incluster_config() core_api = client.CoreV1Api() apis_api = client.AppsV1Api() #sdclient = SdcClient(<Your", "totalWeight)) if labelNum == totalNumOfLables - 1: weightReplicas = numOfReplicas", "= defaultdict(lambda: 0, res['requests'] or {}) lmts = defaultdict(lambda: 0,", "return data if __name__ == '__main__': #ready_nodes = nodes_available() #pprint(ready_nodes)", "availableNodesData in nodesListPerNodeLabel.items(): #print(\"nodeLabel={} availableNodesData={}\".format(nodeLabel, availableNodesData)) #exit(0) #pprint(data) return data", "replicas, labelNum, numOfReplicas)) CustomPodScheduleStrategy[key] = replicas print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) print(\"numOfBaseValues", "nodeLabel, numOfReplicas in CustomSchedulingData.items(): print(\"Scheduling numOfReplicas={} on nodeLabel={}\".format(numOfReplicas, nodeLabel)) #pprint(podsList)", "stats['mem_free'] = stats['mem_free'] - pod['mem_req'] pendingPodsList.remove(pod) break def getPodsListForDeployment(namespace, deploymentName):", "= deployment.metadata.name Strategy = annotations['CustomPodScheduleStrategy'] #deploymentData['pod_replicas'] = deployment.spec.replicas #deploymentData['CustomPodScheduleStrategy'] =", "def scheduler(name, node, namespace): target=client.V1ObjectReference(api_version='v1', kind=\"Node\", name=node) meta=client.V1ObjectMeta() meta.name=name body=client.V1Binding(metadata=meta,", "#deploymentName='nginx' #deploymentName = 'kube-ops-view' #getPodsListForDeployment(deploymentName) #testlist() #tl.start(block=True) while True: RunEc2SpotCustomScheduler()", "\"timeAvg\", \"group\": \"avg\" } }] #scheduler_name = \"Ec2SpotK8sScheduler\" CustomSchedulerName ='K8SCustomScheduler'", "weightReplicas = numOfReplicas replicas = replicas + weightReplicas else: weightReplicas", "== nodeLabelValue: stats = {} node_name = node['metadata']['name'] allocatable =", "#testpod() #check_node_resources(node) #RunEc2SpotCustomScheduler() #getPodsListForDeployment(' ') #lifecycle = 'OnDemand' #lifecycle =", "'Pending': pendingPodsList.append(stats) elif stats[\"status\"] == 'Running': stats[\"node_name\"] = pod['spec']['node_name'] runningPodsList.append(stats)", "memreqs.append(Q_(reqs[\"memory\"])) cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"mem_req\"]", "deploymentName) NumOfPodsRunning = len (runningPodsList) NumOfPodsPending = len (pendingPodsList) NumOfPodsFailed", "= Timeloop() <EMAIL>(interval=timedelta(seconds=10)) def RunEc2SpotCustomScheduler(): #global pendingPodsList #global failedPodsList CustomKubeSchedulingClusterDeploymentData", "p in enumerate (podsAlreadyRunningOnNodeLabelList): pprint(\"running pod i={} nodeLabel={} node_name={} name={}\".format(i,nodeLabel,", "in runningPodsList: if podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys(): podsAlreadyRunningOnNodeLabelList.append(podRunning) NumOfAlreadyRunningPods = len", "* 100) stats[\"mem_lmt_per\"] = (stats[\"mem_lmt\"] / stats[\"mem_alloc\"] * 100) stats[\"cpu_free\"]", "=[] getPodsListForDeployment(namespace, deploymentName) NumOfPodsRunning = len (runningPodsList) NumOfPodsPending = len", "#selector = \"metadata.labels.nodesize=\"+nodeLabelParts[1] #print(\"selector={}\".format(selector)) #name = 'ip-192-168-73-104.ec2.internal' #selector = \"metadata.name\"+\"=\"+name", "== 'Pending': pendingPodsList.append(stats) elif stats[\"status\"] == 'Running': stats[\"node_name\"] = pod['spec']['node_name']", "p['name'])) if NumOfAlreadyRunningPods == NumOfPodsToBeRunning: print(\"NumOfAlreadyRunningPods == NumOfPodsToBeRunning = {}.", "stats[\"status\"] = pod['status']['phase'] if stats[\"status\"] == 'Pending': pendingPodsList.append(stats) elif stats[\"status\"]", "podsAlreadyRunningOnNodeLabelList = [] for podRunning in runningPodsList: if podRunning['node_name'] in", "(\"metadata.name=\" + name) field_selector = (\"metadata.annotations.OnDemandBase=\" + name) # get", "RunEc2SpotCustomScheduler(): #global pendingPodsList #global failedPodsList CustomKubeSchedulingClusterDeploymentData = get_custom_deployments() pprint(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) for", "stats[\"cpu_alloc\"] * 100) stats[\"cpu_lmt_per\"] = (stats[\"cpu_lmt\"] / stats[\"cpu_alloc\"] * 100)", "in NodesList.keys(): pprint(\"node_name={}\".format(n)) #exit(0) #runningPodsList = podsList['runningPodsList'] #pendingPodsList = podsList['pendingPodsList']", "/ 100) #deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning'] #CustomKubeSchedulingDeploymentData.append(deploymentData) return CustomKubeSchedulingDeploymentData", "+ CustomSchedulerName) pods = core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict() #pods = core_api.list_namespaced_pod(namespace=namespace).to_dict() #print(\"pods={}\".format(pods))", "container['resources'] reqs = defaultdict(lambda: 0, res['requests'] or {}) lmts =", "pendingPodsList.append(stats) elif stats[\"status\"] == 'Running': stats[\"node_name\"] = pod['spec']['node_name'] runningPodsList.append(stats) elif", "= core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict() #pods = core_api.list_namespaced_pod(namespace=namespace).to_dict() #print(\"pods={}\".format(pods)) for pod in", "= {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) totalNumOfLables", "= runningPodsList #podsList['failedPodsList'] = failedPodsList #pprint(podsList) #pprint(\"pendingPodsList={} runningPodsList={} failedPodsList={}\".format(runningPodsList, runningPodsList,", "labelNum == totalNumOfLables - 1: weightReplicas = numOfReplicas replicas =", "= len (failedPodsList) #print(\"NumOfPodsRunning={} runningPodsList={}\".format(NumOfPodsRunning, runningPodsList)) #print(\"NumOfPodsPending={} pendingPodsList={}\".format(NumOfPodsPending, pendingPodsList)) #print(\"NumOfPodsFailed={}", "failed pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) #print(\"nodeLabel={} NumOfAlreadyRunningPods={}\".format(nodeLabel, NumOfAlreadyRunningPods)) print(\"lifecycle={} NumOfNodes={}\".format(lifecycle, len(NodesList)))", "[] NumOfPodsFailed = [] #pprint(podsList) #lifecycle = 'OnDemand' #lifecycle =", "'Ec2Spot' #get_node_available_nodes_list(lifecycle) def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList): namespace = 'default' for i", "for nodeLabel in CustomSchedulingData.keys(): nodesListPerNodeLabel[nodeLabel] = {} nodeLabelParts = nodeLabel.split('=')", "numOfReplicas)) CustomPodScheduleStrategy = {} nodeLabelToReplicas = {} nodeLabelToWights = {}", "for podRunning in runningPodsList: if podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys(): podsAlreadyRunningOnNodeLabelList.append(podRunning) NumOfAlreadyRunningPods", "podsList['runningPodsList'] #pendingPodsList = podsList['pendingPodsList'] #failedPodsList = podsList['failedPodsList'] for nodeLabel, numOfReplicas", "'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) #RunEc2SpotCustomScheduler() #NumOfPodsToDeleted = 1 #podsAlreadyRunningOnNodeLabelList", "(stats[\"cpu_lmt\"] / stats[\"cpu_alloc\"] * 100) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] =", "availableNodesData={}\".format(nodeLabel, availableNodesData)) #exit(0) #pprint(data) return data if __name__ == '__main__':", "#scheduler_name = \"Ec2SpotK8sScheduler\" CustomSchedulerName ='K8SCustomScheduler' ureg = UnitRegistry() ureg.load_definitions('kubernetes_units.txt') pendingPodsList", "\"id\": sysdig_metric, \"aggregations\": { \"time\": \"timeAvg\", \"group\": \"avg\" } }]", "in core_api.list_node().to_dict()['items']: #pprint(node) node_labels = node['metadata']['labels'] if nodeLabelKey in node_labels.keys():", "#deploymentData['NumOfOnDemandPodsToBeRunning'] = int (deploymentData['OnDemandBase'] + (deploymentData['pod_replicas'] - deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage']", "#for nodeLabel, availableNodesData in nodesListPerNodeLabel.items(): #print(\"nodeLabel={} availableNodesData={}\".format(nodeLabel, availableNodesData)) #exit(0) #pprint(data)", "stats[\"mem_alloc\"] * 100) stats[\"mem_lmt_per\"] = (stats[\"mem_lmt\"] / stats[\"mem_alloc\"] * 100)", "runningPodsList =[] nodesListPerNodeLabel = {} Q_ = ureg.Quantity def scheduler(name,", "= numOfReplicas replicas = replicas + weightReplicas else: weightReplicas =", "{ \"time\": \"timeAvg\", \"group\": \"avg\" } }] #scheduler_name = \"Ec2SpotK8sScheduler\"", "failedPodsList={}\".format(runningPodsList, runningPodsList, failedPodsList ) #return pendingPodsList,runningPodsList,failedPodsList #return podsList def get_custom_deployments():", "- NumOfAlreadyRunningPods try: schedulePods(NumOfPodsToBeScheduled, NodesList) except Exception as e: pprint(e)", "stats[\"node_name\"] = pod['spec']['node_name'] runningPodsList.append(stats) elif stats[\"status\"] == 'Failed': failedPodsList.append(stats) #podsList['pendingPodsList']", "node='ip-10-0-3-253.ec2.internal' #namespace='ecommerce' #ret=scheduler(name, node, namespace) #pprint(ret) #main() #test() #testpod() #check_node_resources(node)", "= UnitRegistry() ureg.load_definitions('kubernetes_units.txt') pendingPodsList = [] failedPodsList = [] runningPodsList", "= replicas + weightReplicas else: weightReplicas = int (numOfReplicas *", "= [], [], [], [] if deploymentName in pod['metadata']['name'] and", "#namespaceList =[] namespacedataList = core_api.list_namespace().to_dict()['items'] for namespaceData in namespacedataList: namespace", "= deployment.metadata.annotations if 'UseCustomKubeScheduler' in annotations.keys(): if annotations['UseCustomKubeScheduler'] == 'true':", "= {} nodeLabelParts = nodeLabel.split('=') nodeLabelKey = nodeLabelParts[0] nodeLabelValue =", "exit(1) else: numOfBaseValues += 1 base = int(nodeStrategySubPartList[1]) if base", "totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) totalNumOfLables = len", "0 weight = 0 nodeLabel = '' for nodeStrategyPart in", "Trying to schedule i={} NumOfPodsToBeScheduled={} pod={} with cpu_req={} mem_req={}\".format(i, NumOfPodsToBeScheduled,", "getPodsListForDeployment(namespace, deploymentName): #global pendingPodsList #runningPodsList =[] #failedPodsList =[] #podsList =", "stats[\"mem_lmt\"] = sum(memlmts) stats[\"mem_req_per\"] = (stats[\"mem_req\"] / stats[\"mem_alloc\"] * 100)", "schedulePods(NumOfPodsToBeScheduled, NodesList): global pendingPodsList global failedPodsList namespace = 'default' if", "[], [] for pod in pods: #pprint(pod) for container in", "stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"name\"] = pod['metadata']['name'] stats[\"status\"]", "pprint(\"running pod i={} nodeLabel={} node_name={} name={}\".format(i,nodeLabel, p['node_name'], p['name'])) if NumOfAlreadyRunningPods", "node_name={}\".format(i,p['node_name'], p['name'])) for i, p in enumerate (failedPodsList): pprint(\"i={} failed", "[], [] if deploymentName in pod['metadata']['name'] and pod['spec']['scheduler_name'] == CustomSchedulerName:", "= deployment.metadata.name numOfReplicas = deployment.spec.replicas #deploymentData[deploymentName] = deployment.metadata.name Strategy =", "pendingPodsList = [] failedPodsList =[] getPodsListForDeployment(namespace, deploymentName) NumOfPodsRunning = len", "= Strategy.split(':') print(\"StrategyList={}\".format(StrategyList)) numOfBaseValues = 0 for nodeStrategy in StrategyList:", "watch from timeloop import Timeloop from datetime import timedelta config.load_kube_config()", "NumOfAlreadyRunningPods = len (podsAlreadyRunningOnNodeLabelList) for i, p in enumerate (podsAlreadyRunningOnNodeLabelList):", "from datetime import timedelta config.load_kube_config() #config.load_incluster_config() # doing this computation", "pending pods={}. So skipping schedulePods\".format(NumOfPodsToBeScheduled, len(pendingPodsList))) return for i in", "#failedPodsList = podsList['failedPodsList'] for nodeLabel, numOfReplicas in CustomSchedulingData.items(): print(\"Scheduling numOfReplicas={}", "nodesListPerNodeLabel.items(): #print(\"nodeLabel={} availableNodesData={}\".format(nodeLabel, availableNodesData)) #exit(0) #pprint(data) return data if __name__", "= [{ \"id\": sysdig_metric, \"aggregations\": { \"time\": \"timeAvg\", \"group\": \"avg\"", "in nodesListPerNodeLabel[nodeLabel].keys(): podsAlreadyRunningOnNodeLabelList.append(podRunning) NumOfAlreadyRunningPods = len (podsAlreadyRunningOnNodeLabelList) for i, p", "NumOfPodsToBeRunning try: deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) except Exception as e: pprint(e) pendingPodsList", "if 'UseCustomKubeScheduler' in annotations.keys(): if annotations['UseCustomKubeScheduler'] == 'true': deploymentName =", "totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) return CustomPodScheduleStrategy __all__ = [\"get_node_available_nodes_list\"] def", "failedPodsList global nodesListPerNodeLabel print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) #exit(0) #namespace = 'default'", "= 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList): namespace", "(runningPodsList) NumOfPodsPending = len (pendingPodsList) NumOfPodsFailed = len (failedPodsList) #print(\"NumOfPodsRunning={}", "a k8s cluster #k8s.config.load_incluster_config() core_api = client.CoreV1Api() apis_api = client.AppsV1Api()", "NumOfPodsToBeScheduled > len(pendingPodsList): pprint(\"schedulePods NumOfPodsToBeScheduled={} is greater than number of", "= deployment.spec.replicas #deploymentData[deploymentName] = deployment.metadata.name Strategy = annotations['CustomPodScheduleStrategy'] #deploymentData['pod_replicas'] =", "__name__ == '__main__': #ready_nodes = nodes_available() #pprint(ready_nodes) #name='review-v1-787d8fbfbb-ltdzt' node='ip-10-0-3-253.ec2.internal' #namespace='ecommerce'", "client.V1DeleteOptions() #body = {} pprint(\"deletePods i={} pod={} NumOfPodsToDeleted={}\".format(i, pod['name'], NumOfPodsToDeleted", "#field_selector = (selector) #resp = core_api.list_node(field_selector=field_selector).to_dict()['items'] #pprint(\"resp={}\".format(resp)) #exit(0) availableNodesData =", "<= numOfReplicas: numOfReplicas -= base else: base = numOfReplicas numOfReplicas", "NumOfAlreadyRunningPods={}\".format(nodeLabel, NumOfAlreadyRunningPods)) print(\"lifecycle={} NumOfNodes={}\".format(lifecycle, len(NodesList))) for nodeLabel, in NodesList.keys(): pprint(\"node_name={}\".format(n))", "pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i, p in enumerate (pendingPodsList): pprint(\"i={}", "0, res['limits'] or {}) cpureqs.append(Q_(reqs[\"cpu\"])) memreqs.append(Q_(reqs[\"memory\"])) cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] =", "pod['metadata']['name'] stats[\"status\"] = pod['status']['phase'] if stats[\"status\"] == 'Pending': pendingPodsList.append(stats) elif", "sum(memlmts) stats[\"mem_req_per\"] = (stats[\"mem_req\"] / stats[\"mem_alloc\"] * 100) stats[\"mem_lmt_per\"] =", "NumOfPodsToBeRunning: NumOfPodsToDeleted = NumOfAlreadyRunningPods - NumOfPodsToBeRunning try: deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) except", "labelNum = 0 for key, replicas in CustomPodScheduleStrategy.items(): weight =", "of pending pods={}. So skipping schedulePods\".format(NumOfPodsToBeScheduled, len(pendingPodsList))) return for i", "target=target) return core_api.create_namespaced_binding(namespace, body, _preload_content=False) #tl = Timeloop() <EMAIL>(interval=timedelta(seconds=10)) def", "= deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning'] #CustomKubeSchedulingDeploymentData.append(deploymentData) return CustomKubeSchedulingDeploymentData #print(\"OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={}", "= 'default' for i in range(0, NumOfPodsToDeleted): pod = podsAlreadyRunningOnNodeLabelList[i]", "for key, replicas in CustomPodScheduleStrategy.items(): weight = nodeLabelToWights[key] print(\"key: {}", "NumOfPodsFailed = [] #pprint(podsList) #lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot'", "= [] #data = {} for nodeLabel in CustomSchedulingData.keys(): nodesListPerNodeLabel[nodeLabel]", "print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) return CustomPodScheduleStrategy __all__", "podsAlreadyRunningOnNodeLabelList) #deploymentName='nginx' #deploymentName = 'kube-ops-view' #getPodsListForDeployment(deploymentName) #testlist() #tl.start(block=True) while True:", "number of pending pods={}. So skipping schedulePods\".format(NumOfPodsToBeScheduled, len(pendingPodsList))) return for", "nodeLabelToWights[key] print(\"key: {} replicas={} weight={}, totalWeight={}\".format(key, replicas, weight, totalWeight)) if", "int (numOfReplicas * (weight/totalWeight)) replicas = replicas + weightReplicas labelNum", "totalWeight, numOfReplicas)) print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) totalNumOfLables = len (CustomPodScheduleStrategy) labelNum", "#get_node_available_nodes_list(lifecycle) #RunEc2SpotCustomScheduler() #NumOfPodsToDeleted = 1 #podsAlreadyRunningOnNodeLabelList = [] #d ={'name':'nginx-66cb875766-vx6bp'}", "= stats['mem_free'] - pod['mem_req'] pendingPodsList.remove(pod) break def getPodsListForDeployment(namespace, deploymentName): #global", "pendingPodsList[0] print(\"schedulePods Trying to schedule i={} NumOfPodsToBeScheduled={} pod={} with cpu_req={}", "#resp = apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector) resp = apis_api.list_namespaced_deployment(namespace=namespace) for deployment in", "base nodeLabelToWights [nodeLabel] = weight CustomPodScheduleStrategy [nodeLabel] = base print(\"nodeLabelToReplicas={}", "and pod['spec']['scheduler_name'] == CustomSchedulerName: for container in pod['spec']['containers']: res =", "if deploymentCustomSchedulingData != {}: CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData) def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData): global", "for pod in pods['items']: #pprint(pod) #print(\"node_name={}\".format(pod['spec']['node_name'])) #return \"\" stats =", "else: weightReplicas = int (numOfReplicas * (weight/totalWeight)) replicas = replicas", "annotations['CustomPodScheduleStrategy'] #deploymentData['pod_replicas'] = deployment.spec.replicas #deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas) CustomKubeSchedulingDeploymentData[deploymentName] =", "= 30 body = client.V1DeleteOptions() #body = {} pprint(\"deletePods i={}", "#return podsList def get_custom_deployments(): CustomKubeSchedulingClusterDeploymentData = {} #namespaceList =[] namespacedataList", "p['node_name'], p['name'])) if NumOfAlreadyRunningPods == NumOfPodsToBeRunning: print(\"NumOfAlreadyRunningPods == NumOfPodsToBeRunning =", "return CustomKubeSchedulingClusterDeploymentData def get_custom_deployments_per_namespace(namespace): #CustomKubeSchedulingDeploymentData = [] CustomKubeSchedulingDeploymentData = {}", "def getPodsListForDeployment(namespace, deploymentName): #global pendingPodsList #runningPodsList =[] #failedPodsList =[] #podsList", "CustomKubeSchedulingDeploymentData = {} #namespace='default' #name = 'nginx' name = '1'", "name=node) meta=client.V1ObjectMeta() meta.name=name body=client.V1Binding(metadata=meta, target=target) return core_api.create_namespaced_binding(namespace, body, _preload_content=False) #tl", "\"Ec2SpotK8sScheduler\" CustomSchedulerName ='K8SCustomScheduler' ureg = UnitRegistry() ureg.load_definitions('kubernetes_units.txt') pendingPodsList = []", "= {} #namespace='default' #name = 'nginx' name = '1' #field_selector", "failedPodsList ) #return pendingPodsList,runningPodsList,failedPodsList #return podsList def get_custom_deployments(): CustomKubeSchedulingClusterDeploymentData =", "#pprint(pod) #print(\"node_name={}\".format(pod['spec']['node_name'])) #return \"\" stats = {} cpureqs,cpulmts,memreqs,memlmts = [],", "lmts = defaultdict(lambda: 0, res['limits'] or {}) cpureqs.append(Q_(reqs[\"cpu\"])) memreqs.append(Q_(reqs[\"memory\"])) cpulmts.append(Q_(lmts[\"cpu\"]))", "import pprint from kubernetes.client.rest import ApiException from pint import UnitRegistry", "#podsAlreadyRunningOnNodeLabelList = [] #d ={'name':'nginx-66cb875766-vx6bp'} #podsAlreadyRunningOnNodeLabelList.append(d) #deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) #deploymentName='nginx' #deploymentName", "field_selector=field_selector).to_dict() #pods = core_api.list_namespaced_pod(namespace=namespace).to_dict() #print(\"pods={}\".format(pods)) for pod in pods['items']: #pprint(pod)", "print(\"weight={}\".format(weight)) else: nodeLabel = nodeStrategyPart print(\"label key={} value={}\".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1])) #nodeLabelToReplicas", "nodeLabelToWights)) print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) print(\"CustomPodScheduleStrategy =", "node in core_api.list_node().to_dict()['items']: #pprint(node) node_labels = node['metadata']['labels'] if nodeLabelKey in", "= pod['spec']['node_name'] runningPodsList.append(stats) elif stats[\"status\"] == 'Failed': failedPodsList.append(stats) #podsList['pendingPodsList'] =", "json import os from pprint import pprint from kubernetes.client.rest import", "nodeStrategy in StrategyList: print(\"nodeStrategy: {}\".format(nodeStrategy)) nodeStrategyPartsList = nodeStrategy.split(',') base =", "len(NodesList))) for nodeLabel, in NodesList.keys(): pprint(\"node_name={}\".format(n)) #exit(0) #runningPodsList = podsList['runningPodsList']", "/ stats[\"cpu_alloc\"] * 100) stats[\"cpu_lmt_per\"] = (stats[\"cpu_lmt\"] / stats[\"cpu_alloc\"] *", "skipping schedulePods\".format(NumOfPodsToBeScheduled, len(pendingPodsList))) return for i in range(NumOfPodsToBeScheduled): pod =", "for free resources on node={} with cpu_free={} mem_free={}\".format(node, stats['cpu_free'], stats['mem_free']))", "(\"spec.schedulerName=\" + CustomSchedulerName) pods = core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict() #pods = core_api.list_namespaced_pod(namespace=namespace).to_dict()", "in pod['spec']['containers']: res = container['resources'] reqs = defaultdict(lambda: 0, res['requests']", "'OnDemand' #NodesList = get_node_available_nodes_list(lifecycle) #pprint(NodesList) NumOfPodsRunningAlready = 0 podsAlreadyRunningOnNodeLabelList =", "global nodesListPerNodeLabel print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) #exit(0) #namespace = 'default' #lifecycleList", "memreqs.append(Q_(reqs[\"memory\"])) cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"cpu_req_per\"]", "field_selector=field_selector).to_dict()['items'] # compute the allocated resources cpureqs,cpulmts,memreqs,memlmts = [], [],", "#config.load_incluster_config() # doing this computation within a k8s cluster #k8s.config.load_incluster_config()", "core_api.list_node(field_selector=field_selector).to_dict()['items'] #pprint(\"resp={}\".format(resp)) #exit(0) availableNodesData = {} for node in core_api.list_node().to_dict()['items']:", "totalNumOfLables - 1: weightReplicas = numOfReplicas replicas = replicas +", "= {} node_name = node['metadata']['name'] allocatable = node['status']['allocatable'] max_pods =", "print(\"StrategyList={}\".format(StrategyList)) numOfBaseValues = 0 for nodeStrategy in StrategyList: print(\"nodeStrategy: {}\".format(nodeStrategy))", "= [] for podRunning in runningPodsList: if podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys():", "#print(\"NumOfPodsPending={} pendingPodsList={}\".format(NumOfPodsPending, pendingPodsList)) #print(\"NumOfPodsFailed={} failedPodsList={}\".format(NumOfPodsFailed, failedPodsList)) get_node_available_nodes_list(CustomSchedulingData) for i, p", "core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict() #pods = core_api.list_namespaced_pod(namespace=namespace).to_dict() #print(\"pods={}\".format(pods)) for pod in pods['items']:", "res = container['resources'] reqs = defaultdict(lambda: 0, res['requests'] or {})", "pod={} onto the node={}\".format(pod['name'], node)) res = scheduler(pod['name'], node, namespace)", "\"spec.nodeName=\" + node_name) stats[\"cpu_alloc\"] = Q_(allocatable[\"cpu\"]) stats[\"mem_alloc\"] = Q_(allocatable[\"memory\"]) #stats[\"lifecycle\"]", "#getPodsListForDeployment(' ') #lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) #RunEc2SpotCustomScheduler()", "core_api.list_namespaced_pod(namespace=namespace).to_dict() #print(\"pods={}\".format(pods)) for pod in pods['items']: #pprint(pod) #print(\"node_name={}\".format(pod['spec']['node_name'])) #return \"\"", "name) field_selector = (\"metadata.annotations.OnDemandBase=\" + name) # get deployment by", "#exit(0) #podsList = getPodsListForDeployment(namespace, deploymentName) runningPodsList = [] pendingPodsList =", "100) stats[\"cpu_free\"] = stats[\"cpu_alloc\"] - stats[\"cpu_req\"] stats[\"mem_free\"] = stats[\"mem_alloc\"] -", "1 base = int(nodeStrategySubPartList[1]) if base <= numOfReplicas: numOfReplicas -=", "= replicas print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues,", "print(\"base value cannot be non-zero for more than node strategy\")", "0: print(\"base value cannot be non-zero for more than node", "NumOfPodsToBeScheduled={} is greater than number of pending pods={}. So skipping", "CustomSchedulingData)) #exit(0) #podsList = getPodsListForDeployment(namespace, deploymentName) runningPodsList = [] pendingPodsList", "get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas) CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas) #deploymentData['NumOfOnDemandPodsToBeRunning'] = int (deploymentData['OnDemandBase']", "allocatable = node['status']['allocatable'] max_pods = int(int(allocatable[\"pods\"]) * 1.5) field_selector =", "+ name) # get deployment by namespace #resp = apis_api.list_namespaced_deployment(namespace=namespace,", "kind=\"Node\", name=node) meta=client.V1ObjectMeta() meta.name=name body=client.V1Binding(metadata=meta, target=target) return core_api.create_namespaced_binding(namespace, body, _preload_content=False)", "replicas in CustomPodScheduleStrategy.items(): weight = nodeLabelToWights[key] print(\"key: {} replicas={} weight={},", "#lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) #RunEc2SpotCustomScheduler() #NumOfPodsToDeleted =", "from timeloop import Timeloop from datetime import timedelta config.load_kube_config() #config.load_incluster_config()", "print(\"deploymentName={} CustomSchedulingData={}\".format(deploymentName, CustomSchedulingData)) #exit(0) #podsList = getPodsListForDeployment(namespace, deploymentName) runningPodsList =", "= base nodeLabelToWights [nodeLabel] = weight CustomPodScheduleStrategy [nodeLabel] = base", "+= 1 numOfReplicas -= weightReplicas print(\"weightReplicas: {} replicas={} labelNum={}, numOfReplicas={}\".format(weightReplicas,", "metrics = [{ \"id\": sysdig_metric, \"aggregations\": { \"time\": \"timeAvg\", \"group\":", "cpureqs,cpulmts,memreqs,memlmts = [], [], [], [] for pod in pods:", "os from pprint import pprint from kubernetes.client.rest import ApiException from", "global runningPodsList global pendingPodsList global failedPodsList global nodesListPerNodeLabel print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace,", "within a k8s cluster #k8s.config.load_incluster_config() core_api = client.CoreV1Api() apis_api =", "in deploymentCustomSchedulingData.items(): print(\"deploymentName={} CustomSchedulingData={}\".format(deploymentName, CustomSchedulingData)) #exit(0) #podsList = getPodsListForDeployment(namespace, deploymentName)", "for i, p in enumerate (runningPodsList): pprint(\"i={} running pod_name={} node_name={}\".format(i,p['node_name'],", "stats[\"cpu_req\"] stats[\"mem_free\"] = stats[\"mem_alloc\"] - stats[\"mem_req\"] #stats[\"name\"] = node['metadata']['name'] #data.append(stats)", "NumOfPodsToDeleted )) response = core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds, body=body) pprint(response) def", "#podsList = {} #namespace='default' #name='Ec2SpotK8sScheduler' #field_selector = (\"spec.scheduler_name=\" + CustomSchedulerName)", "0 print(\"base={}\".format(nodeStrategySubPartList[1])) elif nodeStrategySubPartList[0] == 'weight': weight = int(nodeStrategySubPartList[1]) totalWeight", "'Failed': failedPodsList.append(stats) #podsList['pendingPodsList'] = pendingPodsList #podsList['runningPodsList'] = runningPodsList #podsList['failedPodsList'] =", "import random import json import os from pprint import pprint", "pod = pendingPodsList[0] print(\"schedulePods Trying to schedule i={} NumOfPodsToBeScheduled={} pod={}", "get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas): print(\"Strategy={} numOfReplicas={}\".format(Strategy, numOfReplicas)) CustomPodScheduleStrategy = {} nodeLabelToReplicas =", "{} node_name = node['metadata']['name'] allocatable = node['status']['allocatable'] max_pods = int(int(allocatable[\"pods\"])", "#pprint(deployment.metadata.annotations) #pprint(deployment) deploymentData = {} CustomPodScheduleStrategy = {} annotations =", "pod={} NumOfPodsToDeleted={}\".format(i, pod['name'], NumOfPodsToDeleted )) response = core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds,", "stats nodesListPerNodeLabel[nodeLabel] = availableNodesData #print(nodesListPerNodeLabel) #for nodeLabel, availableNodesData in nodesListPerNodeLabel.items():", "#d ={'name':'nginx-66cb875766-vx6bp'} #podsAlreadyRunningOnNodeLabelList.append(d) #deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) #deploymentName='nginx' #deploymentName = 'kube-ops-view' #getPodsListForDeployment(deploymentName)", "nodeLabel)) #pprint(podsList) #lifecycle = 'OnDemand' #NodesList = get_node_available_nodes_list(lifecycle) #pprint(NodesList) NumOfPodsRunningAlready", "availableNodesData)) #exit(0) #pprint(data) return data if __name__ == '__main__': #ready_nodes", "namespacedataList: namespace = namespaceData['metadata']['name'] CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace) #namespaceList.append(name) print(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) return", "= nodeStrategyPart print(\"label key={} value={}\".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1])) #nodeLabelToReplicas [nodeLabel] = base", "config, watch from timeloop import Timeloop from datetime import timedelta", "[] runningPodsList =[] nodesListPerNodeLabel = {} Q_ = ureg.Quantity def", "#tl = Timeloop() <EMAIL>(interval=timedelta(seconds=10)) def RunEc2SpotCustomScheduler(): #global pendingPodsList #global failedPodsList", "= container['resources'] reqs = defaultdict(lambda: 0, res['requests'] or {}) lmts", "if base <= numOfReplicas: numOfReplicas -= base else: base =", "[nodeLabel] = base nodeLabelToWights [nodeLabel] = weight CustomPodScheduleStrategy [nodeLabel] =", "> len(pendingPodsList): pprint(\"schedulePods NumOfPodsToBeScheduled={} is greater than number of pending", "if numOfBaseValues != 0: print(\"base value cannot be non-zero for", "from collections import defaultdict from kubernetes import client, config, watch", "pods['items']: #pprint(pod) #print(\"node_name={}\".format(pod['spec']['node_name'])) #return \"\" stats = {} cpureqs,cpulmts,memreqs,memlmts =", "numOfReplicas) #deploymentData['NumOfOnDemandPodsToBeRunning'] = int (deploymentData['OnDemandBase'] + (deploymentData['pod_replicas'] - deploymentData['OnDemandBase']) *", "else: base = numOfReplicas numOfReplicas = 0 print(\"base={}\".format(nodeStrategySubPartList[1])) elif nodeStrategySubPartList[0]", "NumOfPodsToBeScheduled={} pod={} with cpu_req={} mem_req={}\".format(i, NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'], pod['mem_req'])) for", "stats['mem_free'] - pod['mem_req'] pendingPodsList.remove(pod) break def getPodsListForDeployment(namespace, deploymentName): #global pendingPodsList", "weightReplicas else: weightReplicas = int (numOfReplicas * (weight/totalWeight)) replicas =", "nodeLabel={} node_name={} name={}\".format(i,nodeLabel, p['node_name'], p['name'])) if NumOfAlreadyRunningPods == NumOfPodsToBeRunning: print(\"NumOfAlreadyRunningPods", "'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList): namespace =", "100) stats[\"cpu_lmt_per\"] = (stats[\"cpu_lmt\"] / stats[\"cpu_alloc\"] * 100) stats[\"mem_req\"] =", "numOfReplicas={}\".format(weightReplicas, replicas, labelNum, numOfReplicas)) CustomPodScheduleStrategy[key] = replicas print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy))", "stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"name\"]", "deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) #exit(0) #namespace = 'default' #lifecycleList = ['OnDemand', 'Ec2Spot']", "numOfReplicas)) return CustomPodScheduleStrategy __all__ = [\"get_node_available_nodes_list\"] def get_node_available_nodes_list(CustomSchedulingData): global nodesListPerNodeLabel", "CustomSchedulingData in deploymentCustomSchedulingData.items(): print(\"deploymentName={} CustomSchedulingData={}\".format(deploymentName, CustomSchedulingData)) #exit(0) #podsList = getPodsListForDeployment(namespace,", ") #return pendingPodsList,runningPodsList,failedPodsList #return podsList def get_custom_deployments(): CustomKubeSchedulingClusterDeploymentData = {}", "pint import UnitRegistry from collections import defaultdict from kubernetes import", "try: schedulePods(NumOfPodsToBeScheduled, NodesList) except Exception as e: pprint(e) elif NumOfAlreadyRunningPods", "== 'Running': stats[\"node_name\"] = pod['spec']['node_name'] runningPodsList.append(stats) elif stats[\"status\"] == 'Failed':", "p in enumerate (pendingPodsList): pprint(\"i={} pending pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for", "1 numOfReplicas -= weightReplicas print(\"weightReplicas: {} replicas={} labelNum={}, numOfReplicas={}\".format(weightReplicas, replicas,", "failedPodsList CustomKubeSchedulingClusterDeploymentData = get_custom_deployments() pprint(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) for namespace, deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items():", "if podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys(): podsAlreadyRunningOnNodeLabelList.append(podRunning) NumOfAlreadyRunningPods = len (podsAlreadyRunningOnNodeLabelList) for", "{} nodeLabelToWights = {} totalWeight = 0 StrategyList = Strategy.split(':')", "nodeLabelValue = nodeLabelParts[1] #selector = \"metadata.labels.\"+nodeLabelParts[0]+\"=\"+nodeLabelParts[1] #selector = \"metadata.labels.nodesize=\"+nodeLabelParts[1] #print(\"selector={}\".format(selector))", "(\"status.phase!=Succeeded,status.phase!=Failed,\" + \"spec.nodeName=\" + node_name) stats[\"cpu_alloc\"] = Q_(allocatable[\"cpu\"]) stats[\"mem_alloc\"] =", "= nodeLabel.split('=') nodeLabelKey = nodeLabelParts[0] nodeLabelValue = nodeLabelParts[1] #selector =", "= (stats[\"mem_lmt\"] / stats[\"mem_alloc\"] * 100) stats[\"cpu_free\"] = stats[\"cpu_alloc\"] -", "print(\"weightReplicas: {} replicas={} labelNum={}, numOfReplicas={}\".format(weightReplicas, replicas, labelNum, numOfReplicas)) CustomPodScheduleStrategy[key] =", "= [] NumOfPodsFailed = [] #pprint(podsList) #lifecycle = 'OnDemand' #lifecycle", "- pod['mem_req'] pendingPodsList.remove(pod) break def getPodsListForDeployment(namespace, deploymentName): #global pendingPodsList #runningPodsList", "pod['cpu_req'], pod['mem_req'])) for node, stats in NodesList.items(): print(\"schedulePods Checking for", "client, config, watch from timeloop import Timeloop from datetime import", "#pprint(podsList) #lifecycle = 'OnDemand' #NodesList = get_node_available_nodes_list(lifecycle) #pprint(NodesList) NumOfPodsRunningAlready =", "#pprint(ready_nodes) #name='review-v1-787d8fbfbb-ltdzt' node='ip-10-0-3-253.ec2.internal' #namespace='ecommerce' #ret=scheduler(name, node, namespace) #pprint(ret) #main() #test()", "+ weightReplicas labelNum += 1 numOfReplicas -= weightReplicas print(\"weightReplicas: {}", "in CustomPodScheduleStrategy.items(): weight = nodeLabelToWights[key] print(\"key: {} replicas={} weight={}, totalWeight={}\".format(key,", "deploymentCustomSchedulingData) def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData): global runningPodsList global pendingPodsList global failedPodsList", "(stats[\"cpu_req\"] / stats[\"cpu_alloc\"] * 100) stats[\"cpu_lmt_per\"] = (stats[\"cpu_lmt\"] / stats[\"cpu_alloc\"]", "CustomPodScheduleStrategy [nodeLabel] = base print(\"nodeLabelToReplicas={} nodeLabelToWights={}\".format(nodeLabelToReplicas, nodeLabelToWights)) print(\"numOfBaseValues = {}", "to schedule i={} NumOfPodsToBeScheduled={} pod={} with cpu_req={} mem_req={}\".format(i, NumOfPodsToBeScheduled, pod['name'],", "= sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"cpu_req_per\"] = (stats[\"cpu_req\"] / stats[\"cpu_alloc\"]", "weight = 0 nodeLabel = '' for nodeStrategyPart in nodeStrategyPartsList:", "token>) sysdig_metric = \"net.http.request.time\" metrics = [{ \"id\": sysdig_metric, \"aggregations\":", "[] for podRunning in runningPodsList: if podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys(): podsAlreadyRunningOnNodeLabelList.append(podRunning)", "node, namespace) pprint(res) stats['cpu_free'] = stats['cpu_free'] - pod['cpu_req'] stats['mem_free'] =", "NumOfNodes={}\".format(lifecycle, len(NodesList))) for nodeLabel, in NodesList.keys(): pprint(\"node_name={}\".format(n)) #exit(0) #runningPodsList =", "meta=client.V1ObjectMeta() meta.name=name body=client.V1Binding(metadata=meta, target=target) return core_api.create_namespaced_binding(namespace, body, _preload_content=False) #tl =", "grace_period_seconds = 30 body = client.V1DeleteOptions() #body = {} pprint(\"deletePods", "#deploymentName = 'kube-ops-view' #getPodsListForDeployment(deploymentName) #testlist() #tl.start(block=True) while True: RunEc2SpotCustomScheduler() time.sleep(10)", "#print(\"NumOfPodsRunning={} runningPodsList={}\".format(NumOfPodsRunning, runningPodsList)) #print(\"NumOfPodsPending={} pendingPodsList={}\".format(NumOfPodsPending, pendingPodsList)) #print(\"NumOfPodsFailed={} failedPodsList={}\".format(NumOfPodsFailed, failedPodsList)) get_node_available_nodes_list(CustomSchedulingData)", "Checking for free resources on node={} with cpu_free={} mem_free={}\".format(node, stats['cpu_free'],", "field_selector=field_selector) resp = apis_api.list_namespaced_deployment(namespace=namespace) for deployment in resp.items: #pprint(deployment.metadata.annotations) #pprint(deployment)", "= sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] =", "for nodeLabel, in NodesList.keys(): pprint(\"node_name={}\".format(n)) #exit(0) #runningPodsList = podsList['runningPodsList'] #pendingPodsList", "Q_(allocatable[\"memory\"]) #stats[\"lifecycle\"] = lifecycle pods = core_api.list_pod_for_all_namespaces(limit=max_pods, field_selector=field_selector).to_dict()['items'] # compute", "i={} pod={} NumOfPodsToDeleted={}\".format(i, pod['name'], NumOfPodsToDeleted )) response = core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace,", "CustomSchedulerName) field_selector = (\"spec.schedulerName=\" + CustomSchedulerName) pods = core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict()", "= sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"name\"] = pod['metadata']['name'] stats[\"status\"] =", "CustomPodScheduleStrategy = {} annotations = deployment.metadata.annotations if 'UseCustomKubeScheduler' in annotations.keys():", "/ stats[\"mem_alloc\"] * 100) stats[\"mem_lmt_per\"] = (stats[\"mem_lmt\"] / stats[\"mem_alloc\"] *", "datetime import timedelta config.load_kube_config() #config.load_incluster_config() # doing this computation within", "== NumOfPodsToBeRunning = {}. So no need to Schedule\".format(NumOfAlreadyRunningPods)) elif", "node_name={} name={}\".format(i,nodeLabel, p['node_name'], p['name'])) if NumOfAlreadyRunningPods == NumOfPodsToBeRunning: print(\"NumOfAlreadyRunningPods ==", "print(\"nodeStrategy: {}\".format(nodeStrategy)) nodeStrategyPartsList = nodeStrategy.split(',') base = 0 weight =", "#exit(0) #pprint(data) return data if __name__ == '__main__': #ready_nodes =", "= {} #namespace='default' #name='Ec2SpotK8sScheduler' #field_selector = (\"spec.scheduler_name=\" + CustomSchedulerName) field_selector", "0 for nodeStrategy in StrategyList: print(\"nodeStrategy: {}\".format(nodeStrategy)) nodeStrategyPartsList = nodeStrategy.split(',')", "= pod['metadata']['name'] stats[\"status\"] = pod['status']['phase'] if stats[\"status\"] == 'Pending': pendingPodsList.append(stats)", "print(\"label key={} value={}\".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1])) #nodeLabelToReplicas [nodeLabel] = base nodeLabelToWights [nodeLabel]", "stats[\"cpu_req_per\"] = (stats[\"cpu_req\"] / stats[\"cpu_alloc\"] * 100) stats[\"cpu_lmt_per\"] = (stats[\"cpu_lmt\"]", "NumOfPodsToBeRunning - NumOfAlreadyRunningPods try: schedulePods(NumOfPodsToBeScheduled, NodesList) except Exception as e:", "CustomPodScheduleStrategy = {} nodeLabelToReplicas = {} nodeLabelToWights = {} totalWeight", "node strategy\") exit(1) else: numOfBaseValues += 1 base = int(nodeStrategySubPartList[1])", "Exception as e: pprint(e) pendingPodsList = [] NumOfPodsFailed = []", "kubernetes import client, config, watch from timeloop import Timeloop from", "= client.CoreV1Api() apis_api = client.AppsV1Api() #sdclient = SdcClient(<Your Sysdig API", "weight = nodeLabelToWights[key] print(\"key: {} replicas={} weight={}, totalWeight={}\".format(key, replicas, weight,", "labelNum += 1 numOfReplicas -= weightReplicas print(\"weightReplicas: {} replicas={} labelNum={},", "#podsList = getPodsListForDeployment(namespace, deploymentName) runningPodsList = [] pendingPodsList = []", "= 0 weight = 0 nodeLabel = '' for nodeStrategyPart", "nodeLabel.split('=') nodeLabelKey = nodeLabelParts[0] nodeLabelValue = nodeLabelParts[1] #selector = \"metadata.labels.\"+nodeLabelParts[0]+\"=\"+nodeLabelParts[1]", "replicas = replicas + weightReplicas labelNum += 1 numOfReplicas -=", "for namespaceData in namespacedataList: namespace = namespaceData['metadata']['name'] CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace)", "free resources on node={} with cpu_free={} mem_free={}\".format(node, stats['cpu_free'], stats['mem_free'])) #pprint(node)", "CustomKubeSchedulingClusterDeploymentData.items(): print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) if deploymentCustomSchedulingData != {}: CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData)", "with cpu_req={} mem_req={}\".format(i, NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'], pod['mem_req'])) for node, stats", "else: nodeLabel = nodeStrategyPart print(\"label key={} value={}\".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1])) #nodeLabelToReplicas [nodeLabel]", "[\"get_node_available_nodes_list\"] def get_node_available_nodes_list(CustomSchedulingData): global nodesListPerNodeLabel #data = [] #data =", "CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas) #deploymentData['NumOfOnDemandPodsToBeRunning'] = int (deploymentData['OnDemandBase'] + (deploymentData['pod_replicas']", "== 'true': deploymentName = deployment.metadata.name numOfReplicas = deployment.spec.replicas #deploymentData[deploymentName] =", "the node={}\".format(pod['name'], node)) res = scheduler(pod['name'], node, namespace) pprint(res) stats['cpu_free']", "= int (deploymentData['OnDemandBase'] + (deploymentData['pod_replicas'] - deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage'] /", "= core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds, body=body) pprint(response) def schedulePods(NumOfPodsToBeScheduled, NodesList): global", "field_selector = (\"spec.schedulerName=\" + CustomSchedulerName) pods = core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict() #pods", "So no need to Schedule\".format(NumOfAlreadyRunningPods)) elif NumOfAlreadyRunningPods < NumOfPodsToBeRunning: NumOfPodsToBeScheduled", "(numOfReplicas * (weight/totalWeight)) replicas = replicas + weightReplicas labelNum +=", "get_custom_deployments_per_namespace(namespace): #CustomKubeSchedulingDeploymentData = [] CustomKubeSchedulingDeploymentData = {} #namespace='default' #name =", "= lifecycle pods = core_api.list_pod_for_all_namespaces(limit=max_pods, field_selector=field_selector).to_dict()['items'] # compute the allocated", "<= stats['cpu_free'] and pod['mem_req'] <= stats['mem_free']: print(\"schedulePods scheduling pod={} onto", "= base print(\"nodeLabelToReplicas={} nodeLabelToWights={}\".format(nodeLabelToReplicas, nodeLabelToWights)) print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues,", "#name='Ec2SpotK8sScheduler' #field_selector = (\"spec.scheduler_name=\" + CustomSchedulerName) field_selector = (\"spec.schedulerName=\" +", "pod['status']['phase'] if stats[\"status\"] == 'Pending': pendingPodsList.append(stats) elif stats[\"status\"] == 'Running':", "nodeStrategySubPartList[0] == 'weight': weight = int(nodeStrategySubPartList[1]) totalWeight += weight print(\"weight={}\".format(weight))", "#data = [] #data = {} for nodeLabel in CustomSchedulingData.keys():", "= (stats[\"cpu_lmt\"] / stats[\"cpu_alloc\"] * 100) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"]", "stats[\"cpu_free\"] = stats[\"cpu_alloc\"] - stats[\"cpu_req\"] stats[\"mem_free\"] = stats[\"mem_alloc\"] - stats[\"mem_req\"]", "= {}. So no need to Schedule\".format(NumOfAlreadyRunningPods)) elif NumOfAlreadyRunningPods <", "#lifecycleList = ['OnDemand', 'Ec2Spot'] for deploymentName, CustomSchedulingData in deploymentCustomSchedulingData.items(): print(\"deploymentName={}", "CustomPodScheduleStrategy[key] = replicas print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) print(\"numOfBaseValues = {} totalWeight={}", "config.load_kube_config() #config.load_incluster_config() # doing this computation within a k8s cluster", "NumOfSpotPods={}\".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName, OnDemandASGName, pod_replicas, NumOfOnDemandPods, NumOfSpotPods)) def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas):", "pod['mem_req'])) for node, stats in NodesList.items(): print(\"schedulePods Checking for free", "'default' for i in range(0, NumOfPodsToDeleted): pod = podsAlreadyRunningOnNodeLabelList[i] grace_period_seconds", "#resp = core_api.list_node(field_selector=field_selector).to_dict()['items'] #pprint(\"resp={}\".format(resp)) #exit(0) availableNodesData = {} for node", "for i, p in enumerate (podsAlreadyRunningOnNodeLabelList): pprint(\"running pod i={} nodeLabel={}", "{} replicas={} weight={}, totalWeight={}\".format(key, replicas, weight, totalWeight)) if labelNum ==", "<EMAIL>(interval=timedelta(seconds=10)) def RunEc2SpotCustomScheduler(): #global pendingPodsList #global failedPodsList CustomKubeSchedulingClusterDeploymentData = get_custom_deployments()", "p['name'])) #print(\"nodeLabel={} NumOfAlreadyRunningPods={}\".format(nodeLabel, NumOfAlreadyRunningPods)) print(\"lifecycle={} NumOfNodes={}\".format(lifecycle, len(NodesList))) for nodeLabel, in", "= scheduler(pod['name'], node, namespace) pprint(res) stats['cpu_free'] = stats['cpu_free'] - pod['cpu_req']", "compute the allocated resources cpureqs,cpulmts,memreqs,memlmts = [], [], [], []", "in enumerate (pendingPodsList): pprint(\"i={} pending pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i,", "global failedPodsList namespace = 'default' if NumOfPodsToBeScheduled > len(pendingPodsList): pprint(\"schedulePods", "[], [], [], [] for pod in pods: #pprint(pod) for", "pendingPodsList #runningPodsList =[] #failedPodsList =[] #podsList = {} #namespace='default' #name='Ec2SpotK8sScheduler'", "# compute the allocated resources cpureqs,cpulmts,memreqs,memlmts = [], [], [],", "-= weightReplicas print(\"weightReplicas: {} replicas={} labelNum={}, numOfReplicas={}\".format(weightReplicas, replicas, labelNum, numOfReplicas))", "= (\"metadata.name=\" + name) field_selector = (\"metadata.annotations.OnDemandBase=\" + name) #", "replicas + weightReplicas labelNum += 1 numOfReplicas -= weightReplicas print(\"weightReplicas:", "for node, stats in NodesList.items(): print(\"schedulePods Checking for free resources", "availableNodesData #print(nodesListPerNodeLabel) #for nodeLabel, availableNodesData in nodesListPerNodeLabel.items(): #print(\"nodeLabel={} availableNodesData={}\".format(nodeLabel, availableNodesData))", "enumerate (podsAlreadyRunningOnNodeLabelList): pprint(\"running pod i={} nodeLabel={} node_name={} name={}\".format(i,nodeLabel, p['node_name'], p['name']))", "kubernetes.client.rest import ApiException from pint import UnitRegistry from collections import", "{}: CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData) def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData): global runningPodsList global pendingPodsList", "getPodsListForDeployment(namespace, deploymentName) runningPodsList = [] pendingPodsList = [] failedPodsList =[]", "#pprint(deployment) deploymentData = {} CustomPodScheduleStrategy = {} annotations = deployment.metadata.annotations", "on node={} with cpu_free={} mem_free={}\".format(node, stats['cpu_free'], stats['mem_free'])) #pprint(node) if pod['cpu_req']", "#namespaceList.append(name) print(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) return CustomKubeSchedulingClusterDeploymentData def get_custom_deployments_per_namespace(namespace): #CustomKubeSchedulingDeploymentData = [] CustomKubeSchedulingDeploymentData", "for i, p in enumerate (failedPodsList): pprint(\"i={} failed pod_name={} node_name={}\".format(i,p['node_name'],", "= pod['status']['phase'] if stats[\"status\"] == 'Pending': pendingPodsList.append(stats) elif stats[\"status\"] ==", "= len (pendingPodsList) NumOfPodsFailed = len (failedPodsList) #print(\"NumOfPodsRunning={} runningPodsList={}\".format(NumOfPodsRunning, runningPodsList))", "#stats[\"lifecycle\"] = lifecycle pods = core_api.list_pod_for_all_namespaces(limit=max_pods, field_selector=field_selector).to_dict()['items'] # compute the", "replicas = replicas + weightReplicas else: weightReplicas = int (numOfReplicas", "\"avg\" } }] #scheduler_name = \"Ec2SpotK8sScheduler\" CustomSchedulerName ='K8SCustomScheduler' ureg =", "[] CustomKubeSchedulingDeploymentData = {} #namespace='default' #name = 'nginx' name =", "Strategy.split(':') print(\"StrategyList={}\".format(StrategyList)) numOfBaseValues = 0 for nodeStrategy in StrategyList: print(\"nodeStrategy:", "cluster #k8s.config.load_incluster_config() core_api = client.CoreV1Api() apis_api = client.AppsV1Api() #sdclient =", "#podsList['failedPodsList'] = failedPodsList #pprint(podsList) #pprint(\"pendingPodsList={} runningPodsList={} failedPodsList={}\".format(runningPodsList, runningPodsList, failedPodsList )", "import time import random import json import os from pprint", "API token>) sysdig_metric = \"net.http.request.time\" metrics = [{ \"id\": sysdig_metric,", "by namespace #resp = apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector) resp = apis_api.list_namespaced_deployment(namespace=namespace) for", "- NumOfPodsToBeRunning try: deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) except Exception as e: pprint(e)", "!= 0: print(\"base value cannot be non-zero for more than", "and pod['mem_req'] <= stats['mem_free']: print(\"schedulePods scheduling pod={} onto the node={}\".format(pod['name'],", "timedelta config.load_kube_config() #config.load_incluster_config() # doing this computation within a k8s", "pendingPodsList #global failedPodsList CustomKubeSchedulingClusterDeploymentData = get_custom_deployments() pprint(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) for namespace, deploymentCustomSchedulingData", "#field_selector = (\"metadata.name=\" + name) field_selector = (\"metadata.annotations.OnDemandBase=\" + name)", "= {} #namespaceList =[] namespacedataList = core_api.list_namespace().to_dict()['items'] for namespaceData in", "for pod in pods: #pprint(pod) for container in pod['spec']['containers']: res", "stats[\"mem_alloc\"] - stats[\"mem_req\"] #stats[\"name\"] = node['metadata']['name'] #data.append(stats) availableNodesData[node_name] = stats", "* 100) stats[\"cpu_free\"] = stats[\"cpu_alloc\"] - stats[\"cpu_req\"] stats[\"mem_free\"] = stats[\"mem_alloc\"]", "time import random import json import os from pprint import", "= [] pendingPodsList = [] failedPodsList =[] getPodsListForDeployment(namespace, deploymentName) NumOfPodsRunning", "in CustomKubeSchedulingClusterDeploymentData.items(): print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) if deploymentCustomSchedulingData != {}: CustomSchedulePerNamespace(namespace,", "(pendingPodsList): pprint(\"i={} pending pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i, p in", "#print(\"nodeLabel={} NumOfAlreadyRunningPods={}\".format(nodeLabel, NumOfAlreadyRunningPods)) print(\"lifecycle={} NumOfNodes={}\".format(lifecycle, len(NodesList))) for nodeLabel, in NodesList.keys():", "= get_custom_deployments_per_namespace(namespace) #namespaceList.append(name) print(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) return CustomKubeSchedulingClusterDeploymentData def get_custom_deployments_per_namespace(namespace): #CustomKubeSchedulingDeploymentData =", "body, _preload_content=False) #tl = Timeloop() <EMAIL>(interval=timedelta(seconds=10)) def RunEc2SpotCustomScheduler(): #global pendingPodsList", "nodeLabel in CustomSchedulingData.keys(): nodesListPerNodeLabel[nodeLabel] = {} nodeLabelParts = nodeLabel.split('=') nodeLabelKey", "namespace): target=client.V1ObjectReference(api_version='v1', kind=\"Node\", name=node) meta=client.V1ObjectMeta() meta.name=name body=client.V1Binding(metadata=meta, target=target) return core_api.create_namespaced_binding(namespace,", "stats in NodesList.items(): print(\"schedulePods Checking for free resources on node={}", "- stats[\"mem_req\"] #stats[\"name\"] = node['metadata']['name'] #data.append(stats) availableNodesData[node_name] = stats nodesListPerNodeLabel[nodeLabel]", "core_api.list_namespace().to_dict()['items'] for namespaceData in namespacedataList: namespace = namespaceData['metadata']['name'] CustomKubeSchedulingClusterDeploymentData[namespace] =", "break def getPodsListForDeployment(namespace, deploymentName): #global pendingPodsList #runningPodsList =[] #failedPodsList =[]", "[] for pod in pods: #pprint(pod) for container in pod['spec']['containers']:", "{}) cpureqs.append(Q_(reqs[\"cpu\"])) memreqs.append(Q_(reqs[\"memory\"])) cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"] =", "= 0 nodeLabel = '' for nodeStrategyPart in nodeStrategyPartsList: nodeStrategySubPartList", "deploymentName in pod['metadata']['name'] and pod['spec']['scheduler_name'] == CustomSchedulerName: for container in", "get_custom_deployments_per_namespace(namespace) #namespaceList.append(name) print(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) return CustomKubeSchedulingClusterDeploymentData def get_custom_deployments_per_namespace(namespace): #CustomKubeSchedulingDeploymentData = []", "CustomSchedulingData={}\".format(deploymentName, CustomSchedulingData)) #exit(0) #podsList = getPodsListForDeployment(namespace, deploymentName) runningPodsList = []", "UnitRegistry from collections import defaultdict from kubernetes import client, config,", "i, p in enumerate (failedPodsList): pprint(\"i={} failed pod_name={} node_name={}\".format(i,p['node_name'], p['name']))", "podsList['failedPodsList'] for nodeLabel, numOfReplicas in CustomSchedulingData.items(): print(\"Scheduling numOfReplicas={} on nodeLabel={}\".format(numOfReplicas,", "stats[\"status\"] == 'Failed': failedPodsList.append(stats) #podsList['pendingPodsList'] = pendingPodsList #podsList['runningPodsList'] = runningPodsList", "= podsList['pendingPodsList'] #failedPodsList = podsList['failedPodsList'] for nodeLabel, numOfReplicas in CustomSchedulingData.items():", "= client.V1DeleteOptions() #body = {} pprint(\"deletePods i={} pod={} NumOfPodsToDeleted={}\".format(i, pod['name'],", "nodeLabel, in NodesList.keys(): pprint(\"node_name={}\".format(n)) #exit(0) #runningPodsList = podsList['runningPodsList'] #pendingPodsList =", "nodes_available() #pprint(ready_nodes) #name='review-v1-787d8fbfbb-ltdzt' node='ip-10-0-3-253.ec2.internal' #namespace='ecommerce' #ret=scheduler(name, node, namespace) #pprint(ret) #main()", "from kubernetes.client.rest import ApiException from pint import UnitRegistry from collections", "= 'default' if NumOfPodsToBeScheduled > len(pendingPodsList): pprint(\"schedulePods NumOfPodsToBeScheduled={} is greater", "be non-zero for more than node strategy\") exit(1) else: numOfBaseValues", "< NumOfPodsToBeRunning: NumOfPodsToBeScheduled = NumOfPodsToBeRunning - NumOfAlreadyRunningPods try: schedulePods(NumOfPodsToBeScheduled, NodesList)", "value cannot be non-zero for more than node strategy\") exit(1)", "pprint(e) pendingPodsList = [] NumOfPodsFailed = [] #pprint(podsList) #lifecycle =", "in enumerate (runningPodsList): pprint(\"i={} running pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i,", "#field_selector = (\"spec.scheduler_name=\" + CustomSchedulerName) field_selector = (\"spec.schedulerName=\" + CustomSchedulerName)", "Q_ = ureg.Quantity def scheduler(name, node, namespace): target=client.V1ObjectReference(api_version='v1', kind=\"Node\", name=node)", "scheduling pod={} onto the node={}\".format(pod['name'], node)) res = scheduler(pod['name'], node,", "annotations.keys(): if annotations['UseCustomKubeScheduler'] == 'true': deploymentName = deployment.metadata.name numOfReplicas =", "NumOfOnDemandPods, NumOfSpotPods)) def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas): print(\"Strategy={} numOfReplicas={}\".format(Strategy, numOfReplicas)) CustomPodScheduleStrategy =", "schedulePods\".format(NumOfPodsToBeScheduled, len(pendingPodsList))) return for i in range(NumOfPodsToBeScheduled): pod = pendingPodsList[0]", "stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"mem_req_per\"] = (stats[\"mem_req\"] /", "node['metadata']['name'] #data.append(stats) availableNodesData[node_name] = stats nodesListPerNodeLabel[nodeLabel] = availableNodesData #print(nodesListPerNodeLabel) #for", "== NumOfPodsToBeRunning: print(\"NumOfAlreadyRunningPods == NumOfPodsToBeRunning = {}. So no need", "{} CustomPodScheduleStrategy = {} annotations = deployment.metadata.annotations if 'UseCustomKubeScheduler' in", "nodeStrategySubPartList[0] == 'base': if numOfBaseValues != 0: print(\"base value cannot", "numOfBaseValues != 0: print(\"base value cannot be non-zero for more", "numOfReplicas={} on nodeLabel={}\".format(numOfReplicas, nodeLabel)) #pprint(podsList) #lifecycle = 'OnDemand' #NodesList =", "NumOfPodsToBeRunning: print(\"NumOfAlreadyRunningPods == NumOfPodsToBeRunning = {}. So no need to", "scheduler(name, node, namespace): target=client.V1ObjectReference(api_version='v1', kind=\"Node\", name=node) meta=client.V1ObjectMeta() meta.name=name body=client.V1Binding(metadata=meta, target=target)", "NumOfPodsToDeleted={}\".format(i, pod['name'], NumOfPodsToDeleted )) response = core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds, body=body)", "name) # get deployment by namespace #resp = apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector)", "= int (numOfReplicas * (weight/totalWeight)) replicas = replicas + weightReplicas", "i={} NumOfPodsToBeScheduled={} pod={} with cpu_req={} mem_req={}\".format(i, NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'], pod['mem_req']))", "greater than number of pending pods={}. So skipping schedulePods\".format(NumOfPodsToBeScheduled, len(pendingPodsList)))", "= podsList['runningPodsList'] #pendingPodsList = podsList['pendingPodsList'] #failedPodsList = podsList['failedPodsList'] for nodeLabel,", "cpu_free={} mem_free={}\".format(node, stats['cpu_free'], stats['mem_free'])) #pprint(node) if pod['cpu_req'] <= stats['cpu_free'] and", "memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"cpu_req_per\"] = (stats[\"cpu_req\"]", "0 StrategyList = Strategy.split(':') print(\"StrategyList={}\".format(StrategyList)) numOfBaseValues = 0 for nodeStrategy", "#pprint(podsList) #pprint(\"pendingPodsList={} runningPodsList={} failedPodsList={}\".format(runningPodsList, runningPodsList, failedPodsList ) #return pendingPodsList,runningPodsList,failedPodsList #return", "NumOfAlreadyRunningPods > NumOfPodsToBeRunning: NumOfPodsToDeleted = NumOfAlreadyRunningPods - NumOfPodsToBeRunning try: deletePods(NumOfPodsToDeleted,", "OnDemandASGName, pod_replicas, NumOfOnDemandPods, NumOfSpotPods)) def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas): print(\"Strategy={} numOfReplicas={}\".format(Strategy, numOfReplicas))", "core_api.create_namespaced_binding(namespace, body, _preload_content=False) #tl = Timeloop() <EMAIL>(interval=timedelta(seconds=10)) def RunEc2SpotCustomScheduler(): #global", "res['requests'] or {}) lmts = defaultdict(lambda: 0, res['limits'] or {})", "if NumOfPodsToBeScheduled > len(pendingPodsList): pprint(\"schedulePods NumOfPodsToBeScheduled={} is greater than number", "namespace) pprint(res) stats['cpu_free'] = stats['cpu_free'] - pod['cpu_req'] stats['mem_free'] = stats['mem_free']", "in resp.items: #pprint(deployment.metadata.annotations) #pprint(deployment) deploymentData = {} CustomPodScheduleStrategy = {}", "print(\"NumOfAlreadyRunningPods == NumOfPodsToBeRunning = {}. So no need to Schedule\".format(NumOfAlreadyRunningPods))", "#pprint(node) if pod['cpu_req'] <= stats['cpu_free'] and pod['mem_req'] <= stats['mem_free']: print(\"schedulePods", "#RunEc2SpotCustomScheduler() #NumOfPodsToDeleted = 1 #podsAlreadyRunningOnNodeLabelList = [] #d ={'name':'nginx-66cb875766-vx6bp'} #podsAlreadyRunningOnNodeLabelList.append(d)", "stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"]", "nodeLabel = nodeStrategyPart print(\"label key={} value={}\".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1])) #nodeLabelToReplicas [nodeLabel] =", "or {}) cpureqs.append(Q_(reqs[\"cpu\"])) memreqs.append(Q_(reqs[\"memory\"])) cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"]", "+ weightReplicas else: weightReplicas = int (numOfReplicas * (weight/totalWeight)) replicas", "pprint(response) def schedulePods(NumOfPodsToBeScheduled, NodesList): global pendingPodsList global failedPodsList namespace =", "= (\"spec.scheduler_name=\" + CustomSchedulerName) field_selector = (\"spec.schedulerName=\" + CustomSchedulerName) pods", "== 'weight': weight = int(nodeStrategySubPartList[1]) totalWeight += weight print(\"weight={}\".format(weight)) else:", "namespace, deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items(): print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) if deploymentCustomSchedulingData !=", "core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds, body=body) pprint(response) def schedulePods(NumOfPodsToBeScheduled, NodesList): global pendingPodsList", "namespaceData in namespacedataList: namespace = namespaceData['metadata']['name'] CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace) #namespaceList.append(name)", "So skipping schedulePods\".format(NumOfPodsToBeScheduled, len(pendingPodsList))) return for i in range(NumOfPodsToBeScheduled): pod", "for namespace, deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items(): print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) if deploymentCustomSchedulingData", "[] #d ={'name':'nginx-66cb875766-vx6bp'} #podsAlreadyRunningOnNodeLabelList.append(d) #deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) #deploymentName='nginx' #deploymentName = 'kube-ops-view'", "NumOfPodsToDeleted = NumOfAlreadyRunningPods - NumOfPodsToBeRunning try: deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) except Exception", "deployment.metadata.name Strategy = annotations['CustomPodScheduleStrategy'] #deploymentData['pod_replicas'] = deployment.spec.replicas #deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy,", "print(\"schedulePods Trying to schedule i={} NumOfPodsToBeScheduled={} pod={} with cpu_req={} mem_req={}\".format(i,", "CustomSchedulerName: for container in pod['spec']['containers']: res = container['resources'] reqs =", "CustomKubeSchedulingClusterDeploymentData = {} #namespaceList =[] namespacedataList = core_api.list_namespace().to_dict()['items'] for namespaceData", "cpu_req={} mem_req={}\".format(i, NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'], pod['mem_req'])) for node, stats in", "in CustomSchedulingData.keys(): nodesListPerNodeLabel[nodeLabel] = {} nodeLabelParts = nodeLabel.split('=') nodeLabelKey =", "'base': if numOfBaseValues != 0: print(\"base value cannot be non-zero", "numOfReplicas)) CustomPodScheduleStrategy[key] = replicas print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy)) print(\"numOfBaseValues = {}", "nodeStrategy.split(',') base = 0 weight = 0 nodeLabel = ''", "#print(\"nodeLabel={} availableNodesData={}\".format(nodeLabel, availableNodesData)) #exit(0) #pprint(data) return data if __name__ ==", "1.5) field_selector = (\"status.phase!=Succeeded,status.phase!=Failed,\" + \"spec.nodeName=\" + node_name) stats[\"cpu_alloc\"] =", "= {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) return CustomPodScheduleStrategy __all__ =", "lifecycle pods = core_api.list_pod_for_all_namespaces(limit=max_pods, field_selector=field_selector).to_dict()['items'] # compute the allocated resources", "i={} nodeLabel={} node_name={} name={}\".format(i,nodeLabel, p['node_name'], p['name'])) if NumOfAlreadyRunningPods == NumOfPodsToBeRunning:", "#deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning'] #CustomKubeSchedulingDeploymentData.append(deploymentData) return CustomKubeSchedulingDeploymentData #print(\"OnDemandBase={}, OnDemandAbovePercentage={}", "allocated resources cpureqs,cpulmts,memreqs,memlmts = [], [], [], [] for pod", "runningPodsList global pendingPodsList global failedPodsList global nodesListPerNodeLabel print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData))", "CustomKubeSchedulingClusterDeploymentData = get_custom_deployments() pprint(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) for namespace, deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items(): print(\"namespace={}", "podsAlreadyRunningOnNodeLabelList[i] grace_period_seconds = 30 body = client.V1DeleteOptions() #body = {}", "cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] = sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"mem_req\"] =", "= nodeLabelToWights[key] print(\"key: {} replicas={} weight={}, totalWeight={}\".format(key, replicas, weight, totalWeight))", "pprint(\"i={} pending pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i, p in enumerate", "stats[\"mem_free\"] = stats[\"mem_alloc\"] - stats[\"mem_req\"] #stats[\"name\"] = node['metadata']['name'] #data.append(stats) availableNodesData[node_name]", "pod['spec']['node_name'] runningPodsList.append(stats) elif stats[\"status\"] == 'Failed': failedPodsList.append(stats) #podsList['pendingPodsList'] = pendingPodsList", "apis_api = client.AppsV1Api() #sdclient = SdcClient(<Your Sysdig API token>) sysdig_metric", "return for i in range(NumOfPodsToBeScheduled): pod = pendingPodsList[0] print(\"schedulePods Trying", "= {} Q_ = ureg.Quantity def scheduler(name, node, namespace): target=client.V1ObjectReference(api_version='v1',", "print(\"Scheduling numOfReplicas={} on nodeLabel={}\".format(numOfReplicas, nodeLabel)) #pprint(podsList) #lifecycle = 'OnDemand' #NodesList", "node)) res = scheduler(pod['name'], node, namespace) pprint(res) stats['cpu_free'] = stats['cpu_free']", "#stats[\"name\"] = node['metadata']['name'] #data.append(stats) availableNodesData[node_name] = stats nodesListPerNodeLabel[nodeLabel] = availableNodesData", "= getPodsListForDeployment(namespace, deploymentName) runningPodsList = [] pendingPodsList = [] failedPodsList", "pod['cpu_req'] stats['mem_free'] = stats['mem_free'] - pod['mem_req'] pendingPodsList.remove(pod) break def getPodsListForDeployment(namespace,", "(selector) #resp = core_api.list_node(field_selector=field_selector).to_dict()['items'] #pprint(\"resp={}\".format(resp)) #exit(0) availableNodesData = {} for", "nodeLabelValue: stats = {} node_name = node['metadata']['name'] allocatable = node['status']['allocatable']", "sum(cpureqs) stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts)", "#exit(0) #runningPodsList = podsList['runningPodsList'] #pendingPodsList = podsList['pendingPodsList'] #failedPodsList = podsList['failedPodsList']", "pprint(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) for namespace, deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items(): print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) if", "{} totalWeight = 0 StrategyList = Strategy.split(':') print(\"StrategyList={}\".format(StrategyList)) numOfBaseValues =", "in nodesListPerNodeLabel.items(): #print(\"nodeLabel={} availableNodesData={}\".format(nodeLabel, availableNodesData)) #exit(0) #pprint(data) return data if", "= {} annotations = deployment.metadata.annotations if 'UseCustomKubeScheduler' in annotations.keys(): if", "pprint(\"deletePods i={} pod={} NumOfPodsToDeleted={}\".format(i, pod['name'], NumOfPodsToDeleted )) response = core_api.delete_namespaced_pod(name=pod['name'],", "import defaultdict from kubernetes import client, config, watch from timeloop", "node, namespace): target=client.V1ObjectReference(api_version='v1', kind=\"Node\", name=node) meta=client.V1ObjectMeta() meta.name=name body=client.V1Binding(metadata=meta, target=target) return", "= 0 print(\"base={}\".format(nodeStrategySubPartList[1])) elif nodeStrategySubPartList[0] == 'weight': weight = int(nodeStrategySubPartList[1])", "weight, totalWeight)) if labelNum == totalNumOfLables - 1: weightReplicas =", "weight print(\"weight={}\".format(weight)) else: nodeLabel = nodeStrategyPart print(\"label key={} value={}\".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1]))", ")) response = core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds, body=body) pprint(response) def schedulePods(NumOfPodsToBeScheduled,", "body=body) pprint(response) def schedulePods(NumOfPodsToBeScheduled, NodesList): global pendingPodsList global failedPodsList namespace", "[], [], [] for pod in pods: #pprint(pod) for container", "= nodeLabelParts[1] #selector = \"metadata.labels.\"+nodeLabelParts[0]+\"=\"+nodeLabelParts[1] #selector = \"metadata.labels.nodesize=\"+nodeLabelParts[1] #print(\"selector={}\".format(selector)) #name", "import client, config, watch from timeloop import Timeloop from datetime", "totalWeight = 0 StrategyList = Strategy.split(':') print(\"StrategyList={}\".format(StrategyList)) numOfBaseValues = 0", "pendingPodsList,runningPodsList,failedPodsList #return podsList def get_custom_deployments(): CustomKubeSchedulingClusterDeploymentData = {} #namespaceList =[]", "#lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList):", "0 nodeLabel = '' for nodeStrategyPart in nodeStrategyPartsList: nodeStrategySubPartList =", "- pod['cpu_req'] stats['mem_free'] = stats['mem_free'] - pod['mem_req'] pendingPodsList.remove(pod) break def", "body = client.V1DeleteOptions() #body = {} pprint(\"deletePods i={} pod={} NumOfPodsToDeleted={}\".format(i,", "core_api.list_pod_for_all_namespaces(limit=max_pods, field_selector=field_selector).to_dict()['items'] # compute the allocated resources cpureqs,cpulmts,memreqs,memlmts = [],", "'ip-192-168-73-104.ec2.internal' #selector = \"metadata.name\"+\"=\"+name #print(\"selector={}\".format(selector)) #field_selector = (selector) #resp =", "from kubernetes import client, config, watch from timeloop import Timeloop", "stats['cpu_free'] = stats['cpu_free'] - pod['cpu_req'] stats['mem_free'] = stats['mem_free'] - pod['mem_req']", "= replicas + weightReplicas labelNum += 1 numOfReplicas -= weightReplicas", "print(\"key: {} replicas={} weight={}, totalWeight={}\".format(key, replicas, weight, totalWeight)) if labelNum", "nodeStrategyPart print(\"label key={} value={}\".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1])) #nodeLabelToReplicas [nodeLabel] = base nodeLabelToWights", "more than node strategy\") exit(1) else: numOfBaseValues += 1 base", "{} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) return CustomPodScheduleStrategy __all__ = [\"get_node_available_nodes_list\"]", "podsList['pendingPodsList'] #failedPodsList = podsList['failedPodsList'] for nodeLabel, numOfReplicas in CustomSchedulingData.items(): print(\"Scheduling", "print(\"base={}\".format(nodeStrategySubPartList[1])) elif nodeStrategySubPartList[0] == 'weight': weight = int(nodeStrategySubPartList[1]) totalWeight +=", "NumOfPodsRunning = len (runningPodsList) NumOfPodsPending = len (pendingPodsList) NumOfPodsFailed =", "NumOfAlreadyRunningPods - NumOfPodsToBeRunning try: deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) except Exception as e:", "['OnDemand', 'Ec2Spot'] for deploymentName, CustomSchedulingData in deploymentCustomSchedulingData.items(): print(\"deploymentName={} CustomSchedulingData={}\".format(deploymentName, CustomSchedulingData))", "node, namespace) #pprint(ret) #main() #test() #testpod() #check_node_resources(node) #RunEc2SpotCustomScheduler() #getPodsListForDeployment(' ')", "int(nodeStrategySubPartList[1]) if base <= numOfReplicas: numOfReplicas -= base else: base", "pending pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i, p in enumerate (failedPodsList):", "elif stats[\"status\"] == 'Failed': failedPodsList.append(stats) #podsList['pendingPodsList'] = pendingPodsList #podsList['runningPodsList'] =", "= 0 for nodeStrategy in StrategyList: print(\"nodeStrategy: {}\".format(nodeStrategy)) nodeStrategyPartsList =", "global pendingPodsList global failedPodsList namespace = 'default' if NumOfPodsToBeScheduled >", "resources on node={} with cpu_free={} mem_free={}\".format(node, stats['cpu_free'], stats['mem_free'])) #pprint(node) if", "(weight/totalWeight)) replicas = replicas + weightReplicas labelNum += 1 numOfReplicas", "#global pendingPodsList #runningPodsList =[] #failedPodsList =[] #podsList = {} #namespace='default'", "print(\"nodeLabelToReplicas={} nodeLabelToWights={}\".format(nodeLabelToReplicas, nodeLabelToWights)) print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas))", "= 'OnDemand' #NodesList = get_node_available_nodes_list(lifecycle) #pprint(NodesList) NumOfPodsRunningAlready = 0 podsAlreadyRunningOnNodeLabelList", "+ (deploymentData['pod_replicas'] - deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage'] / 100) #deploymentData['NumOfSpotPodsToBeRunning'] =", "pprint from kubernetes.client.rest import ApiException from pint import UnitRegistry from", "(stats[\"mem_req\"] / stats[\"mem_alloc\"] * 100) stats[\"mem_lmt_per\"] = (stats[\"mem_lmt\"] / stats[\"mem_alloc\"]", "+= 1 base = int(nodeStrategySubPartList[1]) if base <= numOfReplicas: numOfReplicas", "in enumerate (failedPodsList): pprint(\"i={} failed pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) #print(\"nodeLabel={} NumOfAlreadyRunningPods={}\".format(nodeLabel,", "(podsAlreadyRunningOnNodeLabelList) for i, p in enumerate (podsAlreadyRunningOnNodeLabelList): pprint(\"running pod i={}", "[] #pprint(podsList) #lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) def", "if labelNum == totalNumOfLables - 1: weightReplicas = numOfReplicas replicas", "#body = {} pprint(\"deletePods i={} pod={} NumOfPodsToDeleted={}\".format(i, pod['name'], NumOfPodsToDeleted ))", "'true': deploymentName = deployment.metadata.name numOfReplicas = deployment.spec.replicas #deploymentData[deploymentName] = deployment.metadata.name", "return core_api.create_namespaced_binding(namespace, body, _preload_content=False) #tl = Timeloop() <EMAIL>(interval=timedelta(seconds=10)) def RunEc2SpotCustomScheduler():", "stats[\"cpu_lmt_per\"] = (stats[\"cpu_lmt\"] / stats[\"cpu_alloc\"] * 100) stats[\"mem_req\"] = sum(memreqs)", "cpureqs,cpulmts,memreqs,memlmts = [], [], [], [] if deploymentName in pod['metadata']['name']", "numOfBaseValues += 1 base = int(nodeStrategySubPartList[1]) if base <= numOfReplicas:", "= nodeLabelParts[0] nodeLabelValue = nodeLabelParts[1] #selector = \"metadata.labels.\"+nodeLabelParts[0]+\"=\"+nodeLabelParts[1] #selector =", "= node['status']['allocatable'] max_pods = int(int(allocatable[\"pods\"]) * 1.5) field_selector = (\"status.phase!=Succeeded,status.phase!=Failed,\"", "res['limits'] or {}) cpureqs.append(Q_(reqs[\"cpu\"])) memreqs.append(Q_(reqs[\"memory\"])) cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"] = sum(cpureqs)", "numOfReplicas = deployment.spec.replicas #deploymentData[deploymentName] = deployment.metadata.name Strategy = annotations['CustomPodScheduleStrategy'] #deploymentData['pod_replicas']", "non-zero for more than node strategy\") exit(1) else: numOfBaseValues +=", "[] pendingPodsList = [] failedPodsList =[] getPodsListForDeployment(namespace, deploymentName) NumOfPodsRunning =", "namespaceData['metadata']['name'] CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace) #namespaceList.append(name) print(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) return CustomKubeSchedulingClusterDeploymentData def get_custom_deployments_per_namespace(namespace):", "= int(nodeStrategySubPartList[1]) if base <= numOfReplicas: numOfReplicas -= base else:", "SpotASGName={} OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}\".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName, OnDemandASGName, pod_replicas, NumOfOnDemandPods,", "nodesListPerNodeLabel #data = [] #data = {} for nodeLabel in", "nodeLabelToWights = {} totalWeight = 0 StrategyList = Strategy.split(':') print(\"StrategyList={}\".format(StrategyList))", "deploymentName) runningPodsList = [] pendingPodsList = [] failedPodsList =[] getPodsListForDeployment(namespace,", "for nodeStrategy in StrategyList: print(\"nodeStrategy: {}\".format(nodeStrategy)) nodeStrategyPartsList = nodeStrategy.split(',') base", "NumOfOnDemandPods={} NumOfSpotPods={}\".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName, OnDemandASGName, pod_replicas, NumOfOnDemandPods, NumOfSpotPods)) def get_pods_custom_pod_schedule_strategy(Strategy,", "nodeLabelKey in node_labels.keys(): if node_labels[nodeLabelKey] == nodeLabelValue: stats = {}", "= podsList['failedPodsList'] for nodeLabel, numOfReplicas in CustomSchedulingData.items(): print(\"Scheduling numOfReplicas={} on", "pendingPodsList)) #print(\"NumOfPodsFailed={} failedPodsList={}\".format(NumOfPodsFailed, failedPodsList)) get_node_available_nodes_list(CustomSchedulingData) for i, p in enumerate", "as e: pprint(e) pendingPodsList = [] NumOfPodsFailed = [] #pprint(podsList)", "- deploymentData['NumOfOnDemandPodsToBeRunning'] #CustomKubeSchedulingDeploymentData.append(deploymentData) return CustomKubeSchedulingDeploymentData #print(\"OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={} pod_replicas={}", "if pod['cpu_req'] <= stats['cpu_free'] and pod['mem_req'] <= stats['mem_free']: print(\"schedulePods scheduling", "{} for node in core_api.list_node().to_dict()['items']: #pprint(node) node_labels = node['metadata']['labels'] if", "= core_api.list_namespace().to_dict()['items'] for namespaceData in namespacedataList: namespace = namespaceData['metadata']['name'] CustomKubeSchedulingClusterDeploymentData[namespace]", "* 1.5) field_selector = (\"status.phase!=Succeeded,status.phase!=Failed,\" + \"spec.nodeName=\" + node_name) stats[\"cpu_alloc\"]", "(stats[\"mem_lmt\"] / stats[\"mem_alloc\"] * 100) stats[\"cpu_free\"] = stats[\"cpu_alloc\"] - stats[\"cpu_req\"]", "sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"name\"] = pod['metadata']['name'] stats[\"status\"] = pod['status']['phase']", "pod = podsAlreadyRunningOnNodeLabelList[i] grace_period_seconds = 30 body = client.V1DeleteOptions() #body", "= [] failedPodsList =[] getPodsListForDeployment(namespace, deploymentName) NumOfPodsRunning = len (runningPodsList)", "pods = core_api.list_pod_for_all_namespaces(limit=max_pods, field_selector=field_selector).to_dict()['items'] # compute the allocated resources cpureqs,cpulmts,memreqs,memlmts", "stats[\"status\"] == 'Pending': pendingPodsList.append(stats) elif stats[\"status\"] == 'Running': stats[\"node_name\"] =", "ureg.load_definitions('kubernetes_units.txt') pendingPodsList = [] failedPodsList = [] runningPodsList =[] nodesListPerNodeLabel", "#pprint(\"pendingPodsList={} runningPodsList={} failedPodsList={}\".format(runningPodsList, runningPodsList, failedPodsList ) #return pendingPodsList,runningPodsList,failedPodsList #return podsList", "/usr/bin/python3 import time import random import json import os from", "deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage'] / 100) #deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning']", "reqs = defaultdict(lambda: 0, res['requests'] or {}) lmts = defaultdict(lambda:", "replicas={} weight={}, totalWeight={}\".format(key, replicas, weight, totalWeight)) if labelNum == totalNumOfLables", "= {} nodeLabelToReplicas = {} nodeLabelToWights = {} totalWeight =", "for deploymentName, CustomSchedulingData in deploymentCustomSchedulingData.items(): print(\"deploymentName={} CustomSchedulingData={}\".format(deploymentName, CustomSchedulingData)) #exit(0) #podsList", "{} nodeLabelToReplicas = {} nodeLabelToWights = {} totalWeight = 0", "with cpu_free={} mem_free={}\".format(node, stats['cpu_free'], stats['mem_free'])) #pprint(node) if pod['cpu_req'] <= stats['cpu_free']", "failedPodsList namespace = 'default' if NumOfPodsToBeScheduled > len(pendingPodsList): pprint(\"schedulePods NumOfPodsToBeScheduled={}", "'default' #lifecycleList = ['OnDemand', 'Ec2Spot'] for deploymentName, CustomSchedulingData in deploymentCustomSchedulingData.items():", "#namespace='default' #name='Ec2SpotK8sScheduler' #field_selector = (\"spec.scheduler_name=\" + CustomSchedulerName) field_selector = (\"spec.schedulerName=\"", "stats[\"mem_lmt\"] = sum(memlmts) stats[\"name\"] = pod['metadata']['name'] stats[\"status\"] = pod['status']['phase'] if", "in pods: #pprint(pod) for container in pod['spec']['containers']: res = container['resources']", "{} #namespaceList =[] namespacedataList = core_api.list_namespace().to_dict()['items'] for namespaceData in namespacedataList:", "#podsAlreadyRunningOnNodeLabelList.append(d) #deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) #deploymentName='nginx' #deploymentName = 'kube-ops-view' #getPodsListForDeployment(deploymentName) #testlist() #tl.start(block=True)", "namespace #resp = apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector) resp = apis_api.list_namespaced_deployment(namespace=namespace) for deployment", "pprint(\"node_name={}\".format(n)) #exit(0) #runningPodsList = podsList['runningPodsList'] #pendingPodsList = podsList['pendingPodsList'] #failedPodsList =", "deploymentName): #global pendingPodsList #runningPodsList =[] #failedPodsList =[] #podsList = {}", "return CustomPodScheduleStrategy __all__ = [\"get_node_available_nodes_list\"] def get_node_available_nodes_list(CustomSchedulingData): global nodesListPerNodeLabel #data", "NumOfPodsToDeleted): pod = podsAlreadyRunningOnNodeLabelList[i] grace_period_seconds = 30 body = client.V1DeleteOptions()", "node['metadata']['labels'] if nodeLabelKey in node_labels.keys(): if node_labels[nodeLabelKey] == nodeLabelValue: stats", "numOfReplicas = 0 print(\"base={}\".format(nodeStrategySubPartList[1])) elif nodeStrategySubPartList[0] == 'weight': weight =", "#sdclient = SdcClient(<Your Sysdig API token>) sysdig_metric = \"net.http.request.time\" metrics", "pendingPodsList.remove(pod) break def getPodsListForDeployment(namespace, deploymentName): #global pendingPodsList #runningPodsList =[] #failedPodsList", "CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace) #namespaceList.append(name) print(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) return CustomKubeSchedulingClusterDeploymentData def get_custom_deployments_per_namespace(namespace): #CustomKubeSchedulingDeploymentData", "= len (podsAlreadyRunningOnNodeLabelList) for i, p in enumerate (podsAlreadyRunningOnNodeLabelList): pprint(\"running", "1: weightReplicas = numOfReplicas replicas = replicas + weightReplicas else:", "global pendingPodsList global failedPodsList global nodesListPerNodeLabel print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) #exit(0)", "import Timeloop from datetime import timedelta config.load_kube_config() #config.load_incluster_config() # doing", "= get_node_available_nodes_list(lifecycle) #pprint(NodesList) NumOfPodsRunningAlready = 0 podsAlreadyRunningOnNodeLabelList = [] for", "in StrategyList: print(\"nodeStrategy: {}\".format(nodeStrategy)) nodeStrategyPartsList = nodeStrategy.split(',') base = 0", "= {} for nodeLabel in CustomSchedulingData.keys(): nodesListPerNodeLabel[nodeLabel] = {} nodeLabelParts", "runningPodsList.append(stats) elif stats[\"status\"] == 'Failed': failedPodsList.append(stats) #podsList['pendingPodsList'] = pendingPodsList #podsList['runningPodsList']", "print(\"lifecycle={} NumOfNodes={}\".format(lifecycle, len(NodesList))) for nodeLabel, in NodesList.keys(): pprint(\"node_name={}\".format(n)) #exit(0) #runningPodsList", "{}\".format(CustomPodScheduleStrategy)) print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) return CustomPodScheduleStrategy", "weight CustomPodScheduleStrategy [nodeLabel] = base print(\"nodeLabelToReplicas={} nodeLabelToWights={}\".format(nodeLabelToReplicas, nodeLabelToWights)) print(\"numOfBaseValues =", "#print(\"pods={}\".format(pods)) for pod in pods['items']: #pprint(pod) #print(\"node_name={}\".format(pod['spec']['node_name'])) #return \"\" stats", "import ApiException from pint import UnitRegistry from collections import defaultdict", "onto the node={}\".format(pod['name'], node)) res = scheduler(pod['name'], node, namespace) pprint(res)", "stats = {} node_name = node['metadata']['name'] allocatable = node['status']['allocatable'] max_pods", "{} for nodeLabel in CustomSchedulingData.keys(): nodesListPerNodeLabel[nodeLabel] = {} nodeLabelParts =", "<= stats['mem_free']: print(\"schedulePods scheduling pod={} onto the node={}\".format(pod['name'], node)) res", "elif NumOfAlreadyRunningPods > NumOfPodsToBeRunning: NumOfPodsToDeleted = NumOfAlreadyRunningPods - NumOfPodsToBeRunning try:", "def RunEc2SpotCustomScheduler(): #global pendingPodsList #global failedPodsList CustomKubeSchedulingClusterDeploymentData = get_custom_deployments() pprint(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData))", "podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys(): podsAlreadyRunningOnNodeLabelList.append(podRunning) NumOfAlreadyRunningPods = len (podsAlreadyRunningOnNodeLabelList) for i,", "= pendingPodsList[0] print(\"schedulePods Trying to schedule i={} NumOfPodsToBeScheduled={} pod={} with", "}] #scheduler_name = \"Ec2SpotK8sScheduler\" CustomSchedulerName ='K8SCustomScheduler' ureg = UnitRegistry() ureg.load_definitions('kubernetes_units.txt')", "[nodeLabel] = base print(\"nodeLabelToReplicas={} nodeLabelToWights={}\".format(nodeLabelToReplicas, nodeLabelToWights)) print(\"numOfBaseValues = {} totalWeight={}", "node={} with cpu_free={} mem_free={}\".format(node, stats['cpu_free'], stats['mem_free'])) #pprint(node) if pod['cpu_req'] <=", "= namespaceData['metadata']['name'] CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace) #namespaceList.append(name) print(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) return CustomKubeSchedulingClusterDeploymentData def", "\"\" stats = {} cpureqs,cpulmts,memreqs,memlmts = [], [], [], []", "len (failedPodsList) #print(\"NumOfPodsRunning={} runningPodsList={}\".format(NumOfPodsRunning, runningPodsList)) #print(\"NumOfPodsPending={} pendingPodsList={}\".format(NumOfPodsPending, pendingPodsList)) #print(\"NumOfPodsFailed={} failedPodsList={}\".format(NumOfPodsFailed,", "pod={} with cpu_req={} mem_req={}\".format(i, NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'], pod['mem_req'])) for node,", "#print(\"OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}\".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName, OnDemandASGName,", "Timeloop from datetime import timedelta config.load_kube_config() #config.load_incluster_config() # doing this", "len (podsAlreadyRunningOnNodeLabelList) for i, p in enumerate (podsAlreadyRunningOnNodeLabelList): pprint(\"running pod", "node, stats in NodesList.items(): print(\"schedulePods Checking for free resources on", "pendingPodsList={}\".format(NumOfPodsPending, pendingPodsList)) #print(\"NumOfPodsFailed={} failedPodsList={}\".format(NumOfPodsFailed, failedPodsList)) get_node_available_nodes_list(CustomSchedulingData) for i, p in", "node_labels[nodeLabelKey] == nodeLabelValue: stats = {} node_name = node['metadata']['name'] allocatable", "deploymentData['NumOfOnDemandPodsToBeRunning'] #CustomKubeSchedulingDeploymentData.append(deploymentData) return CustomKubeSchedulingDeploymentData #print(\"OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={}", "node_name={}\".format(i,p['node_name'], p['name'])) for i, p in enumerate (pendingPodsList): pprint(\"i={} pending", "#podsList['pendingPodsList'] = pendingPodsList #podsList['runningPodsList'] = runningPodsList #podsList['failedPodsList'] = failedPodsList #pprint(podsList)", "deployment.spec.replicas #deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas) CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas) #deploymentData['NumOfOnDemandPodsToBeRunning']", "= [\"get_node_available_nodes_list\"] def get_node_available_nodes_list(CustomSchedulingData): global nodesListPerNodeLabel #data = [] #data", "import timedelta config.load_kube_config() #config.load_incluster_config() # doing this computation within a", "='K8SCustomScheduler' ureg = UnitRegistry() ureg.load_definitions('kubernetes_units.txt') pendingPodsList = [] failedPodsList =", "stats[\"cpu_alloc\"] = Q_(allocatable[\"cpu\"]) stats[\"mem_alloc\"] = Q_(allocatable[\"memory\"]) #stats[\"lifecycle\"] = lifecycle pods", "pprint(\"i={} running pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) for i, p in enumerate", "#exit(0) availableNodesData = {} for node in core_api.list_node().to_dict()['items']: #pprint(node) node_labels", "= sum(memlmts) stats[\"mem_req_per\"] = (stats[\"mem_req\"] / stats[\"mem_alloc\"] * 100) stats[\"mem_lmt_per\"]", "#ready_nodes = nodes_available() #pprint(ready_nodes) #name='review-v1-787d8fbfbb-ltdzt' node='ip-10-0-3-253.ec2.internal' #namespace='ecommerce' #ret=scheduler(name, node, namespace)", "#CustomKubeSchedulingDeploymentData = [] CustomKubeSchedulingDeploymentData = {} #namespace='default' #name = 'nginx'", "enumerate (failedPodsList): pprint(\"i={} failed pod_name={} node_name={}\".format(i,p['node_name'], p['name'])) #print(\"nodeLabel={} NumOfAlreadyRunningPods={}\".format(nodeLabel, NumOfAlreadyRunningPods))", "{}\".format(CustomPodScheduleStrategy)) totalNumOfLables = len (CustomPodScheduleStrategy) labelNum = 0 for key,", "= SdcClient(<Your Sysdig API token>) sysdig_metric = \"net.http.request.time\" metrics =", "stats[\"cpu_lmt\"] = sum(cpulmts) stats[\"cpu_req_per\"] = (stats[\"cpu_req\"] / stats[\"cpu_alloc\"] * 100)", "= ['OnDemand', 'Ec2Spot'] for deploymentName, CustomSchedulingData in deploymentCustomSchedulingData.items(): print(\"deploymentName={} CustomSchedulingData={}\".format(deploymentName,", "if deploymentName in pod['metadata']['name'] and pod['spec']['scheduler_name'] == CustomSchedulerName: for container", "100) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts) stats[\"mem_req_per\"] = (stats[\"mem_req\"]", "=[] namespacedataList = core_api.list_namespace().to_dict()['items'] for namespaceData in namespacedataList: namespace =", "[{ \"id\": sysdig_metric, \"aggregations\": { \"time\": \"timeAvg\", \"group\": \"avg\" }", "pprint(e) elif NumOfAlreadyRunningPods > NumOfPodsToBeRunning: NumOfPodsToDeleted = NumOfAlreadyRunningPods - NumOfPodsToBeRunning", "for i, p in enumerate (pendingPodsList): pprint(\"i={} pending pod_name={} node_name={}\".format(i,p['node_name'],", "= get_custom_deployments() pprint(\"CustomKubeSchedulingClusterDeploymentData={}\".format(CustomKubeSchedulingClusterDeploymentData)) for namespace, deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items(): print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace,", "#pods = core_api.list_namespaced_pod(namespace=namespace).to_dict() #print(\"pods={}\".format(pods)) for pod in pods['items']: #pprint(pod) #print(\"node_name={}\".format(pod['spec']['node_name']))", "= {} totalWeight = 0 StrategyList = Strategy.split(':') print(\"StrategyList={}\".format(StrategyList)) numOfBaseValues", "pods: #pprint(pod) for container in pod['spec']['containers']: res = container['resources'] reqs", "= {} for node in core_api.list_node().to_dict()['items']: #pprint(node) node_labels = node['metadata']['labels']", "availableNodesData[node_name] = stats nodesListPerNodeLabel[nodeLabel] = availableNodesData #print(nodesListPerNodeLabel) #for nodeLabel, availableNodesData", "= get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas) #deploymentData['NumOfOnDemandPodsToBeRunning'] = int (deploymentData['OnDemandBase'] + (deploymentData['pod_replicas'] -", "pod i={} nodeLabel={} node_name={} name={}\".format(i,nodeLabel, p['node_name'], p['name'])) if NumOfAlreadyRunningPods ==", "#return \"\" stats = {} cpureqs,cpulmts,memreqs,memlmts = [], [], [],", "podsAlreadyRunningOnNodeLabelList.append(podRunning) NumOfAlreadyRunningPods = len (podsAlreadyRunningOnNodeLabelList) for i, p in enumerate", "nodeStrategySubPartList[1])) #nodeLabelToReplicas [nodeLabel] = base nodeLabelToWights [nodeLabel] = weight CustomPodScheduleStrategy", "namespacedataList = core_api.list_namespace().to_dict()['items'] for namespaceData in namespacedataList: namespace = namespaceData['metadata']['name']", "'Ec2Spot' #get_node_available_nodes_list(lifecycle) #RunEc2SpotCustomScheduler() #NumOfPodsToDeleted = 1 #podsAlreadyRunningOnNodeLabelList = [] #d", "deploymentName = deployment.metadata.name numOfReplicas = deployment.spec.replicas #deploymentData[deploymentName] = deployment.metadata.name Strategy", "deployment.spec.replicas) CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas) #deploymentData['NumOfOnDemandPodsToBeRunning'] = int (deploymentData['OnDemandBase'] +", "#print(\"selector={}\".format(selector)) #name = 'ip-192-168-73-104.ec2.internal' #selector = \"metadata.name\"+\"=\"+name #print(\"selector={}\".format(selector)) #field_selector =", "OnDemandAbovePercentage, SpotASGName, OnDemandASGName, pod_replicas, NumOfOnDemandPods, NumOfSpotPods)) def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas): print(\"Strategy={}", "(CustomPodScheduleStrategy) labelNum = 0 for key, replicas in CustomPodScheduleStrategy.items(): weight", "nodesListPerNodeLabel print(\"namespace={} deploymentCustomSchedulingData={}\".format(namespace, deploymentCustomSchedulingData)) #exit(0) #namespace = 'default' #lifecycleList =", "/ stats[\"cpu_alloc\"] * 100) stats[\"mem_req\"] = sum(memreqs) stats[\"mem_lmt\"] = sum(memlmts)", "namespace = 'default' for i in range(0, NumOfPodsToDeleted): pod =", "#pendingPodsList = podsList['pendingPodsList'] #failedPodsList = podsList['failedPodsList'] for nodeLabel, numOfReplicas in", "== 'Failed': failedPodsList.append(stats) #podsList['pendingPodsList'] = pendingPodsList #podsList['runningPodsList'] = runningPodsList #podsList['failedPodsList']", "(\"spec.scheduler_name=\" + CustomSchedulerName) field_selector = (\"spec.schedulerName=\" + CustomSchedulerName) pods =", "deploymentCustomSchedulingData): global runningPodsList global pendingPodsList global failedPodsList global nodesListPerNodeLabel print(\"namespace={}", "= {}\".format(CustomPodScheduleStrategy)) totalNumOfLables = len (CustomPodScheduleStrategy) labelNum = 0 for", "stats[\"mem_alloc\"] = Q_(allocatable[\"memory\"]) #stats[\"lifecycle\"] = lifecycle pods = core_api.list_pod_for_all_namespaces(limit=max_pods, field_selector=field_selector).to_dict()['items']", "= nodeStrategy.split(',') base = 0 weight = 0 nodeLabel =", "NodesList.keys(): pprint(\"node_name={}\".format(n)) #exit(0) #runningPodsList = podsList['runningPodsList'] #pendingPodsList = podsList['pendingPodsList'] #failedPodsList", "sysdig_metric = \"net.http.request.time\" metrics = [{ \"id\": sysdig_metric, \"aggregations\": {", "value={}\".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1])) #nodeLabelToReplicas [nodeLabel] = base nodeLabelToWights [nodeLabel] = weight", "0 podsAlreadyRunningOnNodeLabelList = [] for podRunning in runningPodsList: if podRunning['node_name']", "(deploymentData['pod_replicas'] - deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage'] / 100) #deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas']", "get_node_available_nodes_list(CustomSchedulingData) for i, p in enumerate (runningPodsList): pprint(\"i={} running pod_name={}", "CustomSchedulerName ='K8SCustomScheduler' ureg = UnitRegistry() ureg.load_definitions('kubernetes_units.txt') pendingPodsList = [] failedPodsList", "(pendingPodsList) NumOfPodsFailed = len (failedPodsList) #print(\"NumOfPodsRunning={} runningPodsList={}\".format(NumOfPodsRunning, runningPodsList)) #print(\"NumOfPodsPending={} pendingPodsList={}\".format(NumOfPodsPending,", "= {} CustomPodScheduleStrategy = {} annotations = deployment.metadata.annotations if 'UseCustomKubeScheduler'", "from pint import UnitRegistry from collections import defaultdict from kubernetes", "[] if deploymentName in pod['metadata']['name'] and pod['spec']['scheduler_name'] == CustomSchedulerName: for", "= Q_(allocatable[\"memory\"]) #stats[\"lifecycle\"] = lifecycle pods = core_api.list_pod_for_all_namespaces(limit=max_pods, field_selector=field_selector).to_dict()['items'] #", "base <= numOfReplicas: numOfReplicas -= base else: base = numOfReplicas", "stats['mem_free'])) #pprint(node) if pod['cpu_req'] <= stats['cpu_free'] and pod['mem_req'] <= stats['mem_free']:", "[nodeLabel] = weight CustomPodScheduleStrategy [nodeLabel] = base print(\"nodeLabelToReplicas={} nodeLabelToWights={}\".format(nodeLabelToReplicas, nodeLabelToWights))", "base = 0 weight = 0 nodeLabel = '' for", "pod['name'], pod['cpu_req'], pod['mem_req'])) for node, stats in NodesList.items(): print(\"schedulePods Checking", "NumOfAlreadyRunningPods < NumOfPodsToBeRunning: NumOfPodsToBeScheduled = NumOfPodsToBeRunning - NumOfAlreadyRunningPods try: schedulePods(NumOfPodsToBeScheduled,", "print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight, numOfReplicas)) print(\"CustomPodScheduleStrategy = {}\".format(CustomPodScheduleStrategy))", "get deployment by namespace #resp = apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector) resp =", "{} #namespace='default' #name = 'nginx' name = '1' #field_selector =", "deploymentData['OnDemandAbovePercentage'] / 100) #deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning'] #CustomKubeSchedulingDeploymentData.append(deploymentData) return", "base print(\"nodeLabelToReplicas={} nodeLabelToWights={}\".format(nodeLabelToReplicas, nodeLabelToWights)) print(\"numOfBaseValues = {} totalWeight={} numOfReplicas={}\".format(numOfBaseValues, totalWeight,", "elif NumOfAlreadyRunningPods < NumOfPodsToBeRunning: NumOfPodsToBeScheduled = NumOfPodsToBeRunning - NumOfAlreadyRunningPods try:", "NodesList) except Exception as e: pprint(e) elif NumOfAlreadyRunningPods > NumOfPodsToBeRunning:", "or {}) lmts = defaultdict(lambda: 0, res['limits'] or {}) cpureqs.append(Q_(reqs[\"cpu\"]))", "{} #namespace='default' #name='Ec2SpotK8sScheduler' #field_selector = (\"spec.scheduler_name=\" + CustomSchedulerName) field_selector =", "CustomPodScheduleStrategy __all__ = [\"get_node_available_nodes_list\"] def get_node_available_nodes_list(CustomSchedulingData): global nodesListPerNodeLabel #data =", "except Exception as e: pprint(e) elif NumOfAlreadyRunningPods > NumOfPodsToBeRunning: NumOfPodsToDeleted", "defaultdict(lambda: 0, res['limits'] or {}) cpureqs.append(Q_(reqs[\"cpu\"])) memreqs.append(Q_(reqs[\"memory\"])) cpulmts.append(Q_(lmts[\"cpu\"])) memlmts.append(Q_(lmts[\"memory\"])) stats[\"cpu_req\"]", "= '1' #field_selector = (\"metadata.name=\" + name) field_selector = (\"metadata.annotations.OnDemandBase=\"", "= 'ip-192-168-73-104.ec2.internal' #selector = \"metadata.name\"+\"=\"+name #print(\"selector={}\".format(selector)) #field_selector = (selector) #resp", "# doing this computation within a k8s cluster #k8s.config.load_incluster_config() core_api", "- stats[\"cpu_req\"] stats[\"mem_free\"] = stats[\"mem_alloc\"] - stats[\"mem_req\"] #stats[\"name\"] = node['metadata']['name']", "range(NumOfPodsToBeScheduled): pod = pendingPodsList[0] print(\"schedulePods Trying to schedule i={} NumOfPodsToBeScheduled={}", "deploymentCustomSchedulingData)) #exit(0) #namespace = 'default' #lifecycleList = ['OnDemand', 'Ec2Spot'] for", "pendingPodsList global failedPodsList namespace = 'default' if NumOfPodsToBeScheduled > len(pendingPodsList):", "'' for nodeStrategyPart in nodeStrategyPartsList: nodeStrategySubPartList = nodeStrategyPart.split('=') if nodeStrategySubPartList[0]" ]
[ "python3 # -*- coding: utf-8 -*- from clean_transcript import clean_transcript", "= \"/DeepSpeech/bin/bangor_welsh/alphabet.txt\" def validate_label(label): clean = clean_transcript(ALPHABET_FILE_PATH) cleaned, transcript =", "from clean_transcript import clean_transcript ALPHABET_FILE_PATH = \"/DeepSpeech/bin/bangor_welsh/alphabet.txt\" def validate_label(label): clean", "\"/DeepSpeech/bin/bangor_welsh/alphabet.txt\" def validate_label(label): clean = clean_transcript(ALPHABET_FILE_PATH) cleaned, transcript = clean.clean(label)", "def validate_label(label): clean = clean_transcript(ALPHABET_FILE_PATH) cleaned, transcript = clean.clean(label) if", "= clean_transcript(ALPHABET_FILE_PATH) cleaned, transcript = clean.clean(label) if cleaned: return transcript.lower()", "validate_label(label): clean = clean_transcript(ALPHABET_FILE_PATH) cleaned, transcript = clean.clean(label) if cleaned:", "import clean_transcript ALPHABET_FILE_PATH = \"/DeepSpeech/bin/bangor_welsh/alphabet.txt\" def validate_label(label): clean = clean_transcript(ALPHABET_FILE_PATH)", "clean_transcript import clean_transcript ALPHABET_FILE_PATH = \"/DeepSpeech/bin/bangor_welsh/alphabet.txt\" def validate_label(label): clean =", "cleaned, transcript = clean.clean(label) if cleaned: return transcript.lower() return None", "clean_transcript ALPHABET_FILE_PATH = \"/DeepSpeech/bin/bangor_welsh/alphabet.txt\" def validate_label(label): clean = clean_transcript(ALPHABET_FILE_PATH) cleaned,", "coding: utf-8 -*- from clean_transcript import clean_transcript ALPHABET_FILE_PATH = \"/DeepSpeech/bin/bangor_welsh/alphabet.txt\"", "clean_transcript(ALPHABET_FILE_PATH) cleaned, transcript = clean.clean(label) if cleaned: return transcript.lower() return", "ALPHABET_FILE_PATH = \"/DeepSpeech/bin/bangor_welsh/alphabet.txt\" def validate_label(label): clean = clean_transcript(ALPHABET_FILE_PATH) cleaned, transcript", "# -*- coding: utf-8 -*- from clean_transcript import clean_transcript ALPHABET_FILE_PATH", "utf-8 -*- from clean_transcript import clean_transcript ALPHABET_FILE_PATH = \"/DeepSpeech/bin/bangor_welsh/alphabet.txt\" def", "-*- from clean_transcript import clean_transcript ALPHABET_FILE_PATH = \"/DeepSpeech/bin/bangor_welsh/alphabet.txt\" def validate_label(label):", "-*- coding: utf-8 -*- from clean_transcript import clean_transcript ALPHABET_FILE_PATH =", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- from clean_transcript import", "clean = clean_transcript(ALPHABET_FILE_PATH) cleaned, transcript = clean.clean(label) if cleaned: return" ]
[ "std = init_scale * (d ** -0.5) nn.init.normal_(weight, mean=0, std=std)", "= self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i", "if bias is not None: logit = logit + bias", "NVIDIA CORPORATION. All rights reserved. # # Licensed under the", "dropout self.drop = nn.Dropout(dropout) if div_val == 1: if d_proj", "keep_order=False, key_padding_mask=None, *args, **kwargs): # [21-09-15 AG]: TODO may need", "self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = list(cutoffs) +", "= nn.Dropout(dropout) if div_val == 1: if d_proj != d_embed:", "self.keep_order or keep_order: nll.index_copy_(0, indices_i, -logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset +=", "embed = self.emb_layers[0](inp) embed = self.drop(embed) if self.d_proj != self.d_embed:", "embed = self.drop(embed) if self.d_proj != self.d_embed: embed = F.linear(embed,", "2.0 (the \"License\"); # you may not use this file", "- l_idx emb_i = self.emb_layers[i](inp_i) emb_i = self.drop(emb_i) emb_i =", "hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs): # [21-09-15 AG]: TODO", "target.reshape(-1) if hidden.size(0) != target.size(0): print(hidden.shape, target.shape) raise RuntimeError('Input and", "cutoff_values[i], cutoff_values[i + 1] mask_i = (target >= l_idx) &", "d_emb_i)) ) self.out_layers_biases.append( nn.Parameter(torch.zeros(r_idx - l_idx)) ) if not out_layers_weights:", "' (GPU {})'.format(p.get_device()) parastr = 'Parameter containing: [{} of size", "from typing import List, Optional import functools import torch import", "= self.emb_layers[0](inp) embed = self.drop(embed) if self.d_proj != self.d_embed: embed", "self.out_layers_biases[i] if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight],", "n_token self.d_embed = d_embed self.cutoffs = list(cutoffs) + [n_token] self.div_val", "** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList()", "l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero().squeeze(-1) # shape", "operation Initialization has been fixed for the case when d_proj", "[] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx,", "= list(tie_projs) tie_projs = [False] + tie_projs self.tie_projs = tie_projs", "biases = [], [] for i in range(len(self.cutoffs)): if self.div_val", "embed def _init_weight(weight, d : int, init_scale : Optional[float], default=None):", "init_scale or default if init_scale is None: std = default", "_init_embed = functools.partial(_init_weight, default=0.02) _init_proj = functools.partial(_init_weight, default=0.01) ### Just", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "AG]: bake the first False into the definition, just as", "# Changes embeddings.append(emb_i) indices.index_put_( (indices_i,), torch.arange(_tokens, device=inp.device) + _total_tokens )", "device_str) child_lines.append(' (' + str(k) + '): ' + parastr)", "dimension because inputs are always given as (B, L, D)", "hidden_i = hidden.index_select(0, indices_i) tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)", "nll.index_copy_(0, indices_i, -logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return nll.mean()", "self.div_val == 1: embed = self.emb_layers[0](inp) embed = self.drop(embed) if", "weights[i], biases[i], self.get_out_proj(i) hidden_i = hidden.index_select(0, indices_i) tail_logit_i = self._compute_logit(hidden_i,", "# TODO # self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0,", "tie_projs = [False] + tie_projs self.tie_projs = tie_projs if self.n_clusters", "!= target.size(0): print(hidden.shape, target.shape) raise RuntimeError('Input and target should have", "): super().__init__() self.n_token = n_token self.d_embed = d_embed self.d_proj =", "self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) if not out_layers_weights: self.out_layers_weights = nn.ParameterList()", "tie_projs self.tie_projs = tie_projs if self.n_clusters > 0: self.cluster_weight =", "self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0],", "self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val == 1:", "language governing permissions and # limitations under the License. from", "the cutoffs if tie_projs is None: tie_projs = [] elif", "emb_i = self.drop(emb_i) emb_i = F.linear(emb_i, self.emb_projs[i]) # Changes embeddings.append(emb_i)", "last dimension because inputs are always given as (B, L,", "keep_order def _compute_logit(self, hidden, weight, bias, proj): if proj is", "default=0.01) ### Just for this codebase, we need to squeeze", "= [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList()", "Optional import functools import torch import torch.nn as nn import", "n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0):", "[] for k, p in self._parameters.items(): if p is not", "use this file except in compliance with the License. #", "d_proj != d_embed: for i in range(len(self.cutoffs)): if tie_projs[i]: self.out_projs.append(None)", "self.drop(logit) logit = logit @ weight.t() else: logit = torch.einsum('bd,de,ev->bv',", "nn.Dropout(dropout) if div_val == 1: if d_proj != d_embed: for", "return nll.mean() # TODO maybe cases for length or padding_mask", "target = target.reshape(-1) if hidden.size(0) != target.size(0): print(hidden.shape, target.shape) raise", "reserved. # # Licensed under the Apache License, Version 2.0", "+ self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx", "= len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters #", "handle key_padding_mask ''' hidden :: [len*bsz x d_proj] target ::", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= self.out_layers_weights[i] bias_i = self.out_layers_biases[i] if i == 0: weight_i", "self.dropout > 0.0: logit = hidden @ proj logit =", "License. # You may obtain a copy of the License", "indices_i) tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i,", "inp.size() + (self.d_proj,) embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale) # embed.div_(self.emb_scale) return", "out_layers_weights=None, out_projs=None, keep_order=False, bias_scale=0.0, dropout=0.0, ): super().__init__() self.n_token = n_token", "= hidden @ proj logit = self.drop(logit) logit = logit", "= nn.Parameter(torch.zeros(self.n_clusters)) if not out_layers_weights: self.out_layers_weights = nn.ParameterList() else: self.out_layers_weights", "k, p in self._parameters.items(): if p is not None: size_str", "else: return self.out_projs[i] def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args,", "bias return logit def get_out_proj(self, i): if self.tie_projs[i]: if len(self.shared_out_projs)", "str(k) + '): ' + parastr) tmpstr = '\\n'.join(child_lines) return", "[len*bsz] ''' hidden = hidden.reshape(-1, hidden.size(-1)) target = target.reshape(-1) if", "transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation", "for length or padding_mask class AdaptiveEmbedding(nn.Module): \"\"\" Copy of transformers.AdaptiveEmbedding", "d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0): super().__init__()", "0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) if not", "target.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) if i", "under the License is distributed on an \"AS IS\" BASIS,", "if self.dropout > 0.0: logit = hidden @ proj logit", "head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) nll = torch.zeros_like(target,", "_init_embed(self.emb_layers[-1].weight, d_embed, init_scale) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed **", "License for the specific language governing permissions and # limitations", "-bound, bound) self.keep_order = keep_order def _compute_logit(self, hidden, weight, bias,", "@ weight.t() else: logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) if", "head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: weight_i, bias_i, proj_i = weights[i], biases[i],", "embed_shape = inp.size() + (self.d_proj,) embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale) #", "= self.drop(logit) logit = logit @ weight.t() else: logit =", "self.out_layers_biases.append( nn.Parameter(torch.zeros(n_token)) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(n_token, d_embed)) )", "is not None: logit = logit + bias return logit", "works with fp16 by replacing the index_put_ operation Initialization has", "not None: logit = logit + bias return logit def", "= logit @ weight.t() else: logit = torch.einsum('bd,de,ev->bv', (hidden, proj,", "\\ .gather(1, target.unsqueeze(1)).squeeze(1) else: # construct weights and biases weights,", "if init_scale is None: std = default else: std =", "* d_proj ** -.5 nn.init.uniform_(bias, -bound, bound) self.keep_order = keep_order", "** -.5 nn.init.uniform_(bias, -bound, bound) self.keep_order = keep_order def _compute_logit(self,", "[{} of size {}{}]'.format( torch.typename(p), size_str, device_str) child_lines.append(' (' +", "mask_i = (target >= l_idx) & (target < r_idx) indices_i", "out_projs=None, keep_order=False, bias_scale=0.0, dropout=0.0, ): super().__init__() self.n_token = n_token self.d_embed", "d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0): super().__init__() self.n_token", "0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values)", "p is not None: size_str = 'x'.join(str(size) for size in", "= torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) if bias is not None:", "= torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1]", "self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size =", "in p.size()) device_str = '' if not p.is_cuda else '", "[None] * len(self.cutoffs) self.out_projs.append(None) self.out_layers_biases.append( nn.Parameter(torch.zeros(n_token)) ) if not out_layers_weights:", "d_emb_i)) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5) _init_embed(self.emb_layers[-1].weight,", "emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj) for i in range(len(self.cutoffs)): l_idx, r_idx", "not None: size_str = 'x'.join(str(size) for size in p.size()) device_str", "weight_i = self.out_layers_weights[i] bias_i = self.out_layers_biases[i] if i == 0:", "### Just for this codebase, we need to squeeze the", "mean=0, std=init_scale * d_embed ** -.5) if d_proj != d_embed:", "tie_projs is None: tie_projs = [] elif isinstance(tie_projs, bool): tie_projs", "logit = hidden @ proj logit = self.drop(logit) logit =", "empty should work as long as cutoffs[-1] > max token", "torch import torch.nn as nn import torch.nn.functional as F class", "= 'Parameter containing: [{} of size {}{}]'.format( torch.typename(p), size_str, device_str)", "are always given as (B, L, D) instead of (B,", "= d_embed // (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))", "in compliance with the License. # You may obtain a", "* 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) def forward(self, inp, *args, **kwargs):", "software # distributed under the License is distributed on an", "- l_idx, d_emb_i)) ) for bias in self.out_layers_biases: bound =", "std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) def forward(self, inp, *args,", "i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i =", "self.out_layers_weights.append( nn.Parameter(torch.zeros(n_token, d_embed)) ) else: for i in range(len(self.cutoffs)): l_idx,", "ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, tie_projs=None, out_layers_weights=None,", "out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(n_token, d_embed)) ) else: for i in range(len(self.cutoffs)):", "dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)", "default=None): assert init_scale or default if init_scale is None: std", "import torch import torch.nn as nn import torch.nn.functional as F", "emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) embeddings = [] indices", "0.0: logit = hidden @ proj logit = self.drop(logit) logit", "nn.Parameter(torch.zeros(self.n_clusters)) if not out_layers_weights: self.out_layers_weights = nn.ParameterList() else: self.out_layers_weights =", "device=hidden.device) offset = 0 cutoff_values = [0] + self.cutoffs for", "d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers =", "torch.arange(_tokens, device=inp.device) + _total_tokens ) _total_tokens += _tokens # emb_flat.index_copy_(0,", "torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj =", "div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size", "# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. #", ":: [len*bsz x d_proj] target :: [len*bsz] ''' hidden =", "[0] + self.cutoffs for i in range(len(cutoff_values) - 1): l_idx,", "-.5) _init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale", "self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat >= l_idx) &", "embeddings.append(emb_i) indices.index_put_( (indices_i,), torch.arange(_tokens, device=inp.device) + _total_tokens ) _total_tokens +=", "(d ** -0.5) nn.init.normal_(weight, mean=0, std=std) _init_embed = functools.partial(_init_weight, default=0.02)", "(div_val ** i) self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i)) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0,", "bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias,", "self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i)) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i", "is built into the cutoffs if tie_projs is None: tie_projs", "tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)", "or padding_mask class AdaptiveEmbedding(nn.Module): \"\"\" Copy of transformers.AdaptiveEmbedding that works", "d_embed, init_scale) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5)", "self.drop(emb_i) emb_i = F.linear(emb_i, self.emb_projs[i]) # Changes embeddings.append(emb_i) indices.index_put_( (indices_i,),", "= '\\n'.join(child_lines) return tmpstr class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed,", "embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale) # embed.div_(self.emb_scale) return embed def _init_weight(weight,", "logit = F.linear(hidden, weight, bias=bias) else: if self.dropout > 0.0:", "= weights[i], biases[i], self.get_out_proj(i) hidden_i = hidden.index_select(0, indices_i) tail_logit_i =", "F.linear(embed, self.emb_projs[0]) else: param = next(self.parameters()) inp_flat = inp.view(-1) #", "range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i", "emb_i = F.linear(emb_i, self.emb_projs[i]) # Changes embeddings.append(emb_i) indices.index_put_( (indices_i,), torch.arange(_tokens,", "range(len(self.cutoffs)): if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_embed)) ) else:", "+ parastr) tmpstr = '\\n'.join(child_lines) return tmpstr class ProjectedAdaptiveLogSoftmax(nn.Module): def", "= F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob_i[:, -i] \\ + tail_logprob_i.gather(1,", "d_proj self.cutoffs = list(cutoffs) + [n_token] self.cutoff_ends = [0] +", "d_embed ** -.5) if d_proj != d_embed: # TODO #", "torch.nn as nn import torch.nn.functional as F class OptionalParameterList(nn.ParameterList): def", "(target >= l_idx) & (target < r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze()", "self.out_layers_biases[0], self.get_out_proj(0)) nll = -F.log_softmax(logit, dim=-1) \\ .gather(1, target.unsqueeze(1)).squeeze(1) else:", "d_emb_i ** -.5) _init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) # torch.nn.init.normal_(self.emb_projs[-1],", "(div_val ** i) if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_emb_i))", "print(hidden.shape, target.shape) raise RuntimeError('Input and target should have the same", "*args, **kwargs): # [21-09-15 AG]: TODO may need to handle", "> 0.0 else nn.Identity() self.emb_scale = d_proj ** 0.5 self.cutoff_ends", "= torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj", "head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) nll = torch.zeros_like(target, dtype=hidden.dtype,", "= d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers", "= torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) embeddings = [] indices =", "== 1: self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0)) _init_embed(self.emb_layers[-1].weight, d_embed, init_scale)", "mask_i.nonzero().squeeze(-1) # shape (_tokens,) _tokens = indices_i.numel() if _tokens ==", "0)) _init_embed(self.emb_layers[-1].weight, d_embed, init_scale) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed", "self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs =", "hidden, weight, bias, proj): if proj is None: logit =", "2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under", "[bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0],", "self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers_weights[0][l_idx:r_idx] bias_i = self.out_layers_biases[0][l_idx:r_idx]", "d_proj, init_scale) def forward(self, inp, *args, **kwargs): if self.div_val ==", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= self.emb_layers[i](inp_i) emb_i = self.drop(emb_i) emb_i = F.linear(emb_i, self.emb_projs[i]) #", "i) self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i)) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale *", "= torch.cat(embeddings, dim=0) emb_flat = embeddings[indices] embed_shape = inp.size() +", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "out_layers_weights: self.out_layers_weights = nn.ParameterList() else: self.out_layers_weights = out_layers_weights self.out_layers_biases =", "if not out_layers_weights: self.out_layers_weights = nn.ParameterList() else: self.out_layers_weights = out_layers_weights", "typing import List, Optional import functools import torch import torch.nn", "forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs): # [21-09-15 AG]:", "the definition, just as [0] is built into the cutoffs", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "tie_projs=None, out_layers_weights=None, out_projs=None, keep_order=False, bias_scale=0.0, dropout=0.0, ): super().__init__() self.n_token =", "list(cutoffs) + [n_token] self.cutoff_ends = [0] + self.cutoffs self.div_val =", "cutoff_values[i + 1] mask_i = (target >= l_idx) & (target", "head_logprob.index_select(0, indices_i) if i == 0: logprob_i = head_logprob_i.gather(1, target_i[:,", "target_i[:, None]).squeeze(1) else: weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)", "to in writing, software # distributed under the License is", "Initialization has been fixed for the case when d_proj =", "RuntimeError('Input and target should have the same size ' 'in", "nll.mean() # TODO maybe cases for length or padding_mask class", ") self.out_layers_biases.append( nn.Parameter(torch.zeros(r_idx - l_idx)) ) if not out_layers_weights: self.out_layers_weights.append(", "std=init_scale * d_emb_i ** -.5) _init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))", "list(cutoffs) + [n_token] self.div_val = div_val self.d_proj = d_proj self.drop", "= [] indices = torch.zeros_like(inp_flat) # empty should work as", "cutoffs[-1] > max token _total_tokens = 0 # emb_flat =", "# See the License for the specific language governing permissions", "the case when d_proj = d_embed \"\"\" def __init__(self, n_token,", "self.out_layers_biases[0][l_idx:r_idx] else: weight_i = self.out_layers_weights[i] bias_i = self.out_layers_biases[i] if i", "device=inp.device) + _total_tokens ) _total_tokens += _tokens # emb_flat.index_copy_(0, indices_i,", "== 1: return self.shared_out_projs[0] else: return self.shared_out_projs[i] else: return self.out_projs[i]", "logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) if bias is not", "= dropout self.drop = nn.Dropout(dropout) if div_val == 1: if", "head_logprob = F.log_softmax(head_logit, dim=1) nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset", "[False] + tie_projs self.tie_projs = tie_projs if self.n_clusters > 0:", "mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) else: for i", "or agreed to in writing, software # distributed under the", "in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i", "Changes embeddings.append(emb_i) indices.index_put_( (indices_i,), torch.arange(_tokens, device=inp.device) + _total_tokens ) _total_tokens", "required by applicable law or agreed to in writing, software", "torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) if bias is not None: logit", "bias_i = self.out_layers_biases[i] if i == 0: weight_i = torch.cat(", "self.n_token = n_token self.d_embed = d_embed self.cutoffs = list(cutoffs) +", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i", "that works with fp16 by replacing the index_put_ operation Initialization", "* d_emb_i ** -.5) _init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) #", "== 0: return None elif len(self.shared_out_projs) == 1: return self.shared_out_projs[0]", "with the License. # You may obtain a copy of", "squeeze the last dimension because inputs are always given as", "nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values =", "i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i", "# [21-09-15 AG]: TODO may need to handle key_padding_mask '''", "= div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1", "== 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i =", "else: std = init_scale * (d ** -0.5) nn.init.normal_(weight, mean=0,", "weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0) head_logit", "replacing the index_put_ operation Initialization has been fixed for the", "to squeeze the last dimension because inputs are always given", "self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size =", "+ str(k) + '): ' + parastr) tmpstr = '\\n'.join(child_lines)", "if hidden.size(0) != target.size(0): print(hidden.shape, target.shape) raise RuntimeError('Input and target", "\"\"\" def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1,", "elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs) else: tie_projs", "0: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: weight_i, bias_i, proj_i", "[0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList() if", "if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers_weights[0], self.out_layers_biases[0], self.get_out_proj(0))", "- 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] mask_i", "(' + str(k) + '): ' + parastr) tmpstr =", "(inp_flat < r_idx) indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,) _tokens", "compliance with the License. # You may obtain a copy", "is None: tie_projs = [] elif isinstance(tie_projs, bool): tie_projs =", "agreed to in writing, software # distributed under the License", "d_emb_i = d_embed // (div_val ** i) if tie_projs[i]: self.out_projs.append(None)", "= out_layers_weights self.out_layers_biases = nn.ParameterList() self.shared_out_projs = out_projs self.out_projs =", "= (target >= l_idx) & (target < r_idx) indices_i =", "indices_i.numel() == 0: continue target_i = target.index_select(0, indices_i) - l_idx", "List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0): super().__init__() self.n_token = n_token self.d_embed", "super().__init__() self.n_token = n_token self.d_embed = d_embed self.d_proj = d_proj", "d_embed, sparse=sample_softmax > 0)) _init_embed(self.emb_layers[-1].weight, d_embed, init_scale) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0,", "== 1: embed = self.emb_layers[0](inp) embed = self.drop(embed) if self.d_proj", "self.emb_projs[i]) # Changes embeddings.append(emb_i) indices.index_put_( (indices_i,), torch.arange(_tokens, device=inp.device) + _total_tokens", "distributed under the License is distributed on an \"AS IS\"", "= head_logprob_i[:, -i] \\ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) if self.keep_order", "= nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val == 1: self.emb_layers.append(nn.Embedding(n_token,", "n_token self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = list(cutoffs)", "bound = bias_scale * d_proj ** -.5 nn.init.uniform_(bias, -bound, bound)", "self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs", "nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i)) ) for bias in self.out_layers_biases: bound", "has been fixed for the case when d_proj = d_embed", "= inp.new_zeros(inp_flat.size(0), self.d_proj) for i in range(len(self.cutoffs)): l_idx, r_idx =", "self.emb_layers[i](inp_i) emb_i = self.drop(emb_i) emb_i = F.linear(emb_i, self.emb_projs[i]) # Changes", "child_lines = [] for k, p in self._parameters.items(): if p", "else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return nll.mean() # TODO maybe", "self._compute_logit(hidden, self.out_layers_weights[0], self.out_layers_biases[0], self.get_out_proj(0)) nll = -F.log_softmax(logit, dim=-1) \\ .gather(1,", "= 0 # emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj) for i in", "self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size +", "== 0: continue target_i = target.index_select(0, indices_i) - l_idx head_logprob_i", "if indices_i.numel() == 0: continue target_i = target.index_select(0, indices_i) -", "in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i", "[21-09-15 AG]: bake the first False into the definition, just", "self.head_size = self.shortlist_size + self.n_clusters # [21-09-15 AG]: bake the", "if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0)", "else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i", "nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) if not out_layers_weights: self.out_layers_weights =", "+ [n_token] self.div_val = div_val self.d_proj = d_proj self.drop =", "i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx =", "if not p.is_cuda else ' (GPU {})'.format(p.get_device()) parastr = 'Parameter", "weight, bias=bias) else: if self.dropout > 0.0: logit = hidden", "express or implied. # See the License for the specific", "biases[0], self.get_out_proj(0) head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob =", "self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx =", "index_put_ operation Initialization has been fixed for the case when", "except in compliance with the License. # You may obtain", "= d_embed self.cutoffs = list(cutoffs) + [n_token] self.div_val = div_val", "# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) def", "= next(self.parameters()) inp_flat = inp.view(-1) # Changes # emb_flat =", "+= _tokens # emb_flat.index_copy_(0, indices_i, emb_i) embeddings = torch.cat(embeddings, dim=0)", "+ (self.d_proj,) embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale) # embed.div_(self.emb_scale) return embed", "1] mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "bias in self.out_layers_biases: bound = bias_scale * d_proj ** -.5", "= -F.log_softmax(logit, dim=-1) \\ .gather(1, target.unsqueeze(1)).squeeze(1) else: # construct weights", "cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) -", "not use this file except in compliance with the License.", "return embed def _init_weight(weight, d : int, init_scale : Optional[float],", "0: continue target_i = target.index_select(0, indices_i) - l_idx head_logprob_i =", "def _init_weight(weight, d : int, init_scale : Optional[float], default=None): assert", "default if init_scale is None: std = default else: std", "** -.5) if d_proj != d_embed: # TODO # self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj,", "writing, software # distributed under the License is distributed on", "self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) if", "self.out_layers_weights[0], self.out_layers_biases[0], self.get_out_proj(0)) nll = -F.log_softmax(logit, dim=-1) \\ .gather(1, target.unsqueeze(1)).squeeze(1)", "head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0) head_logit = self._compute_logit(hidden,", "r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0: continue target_i", "+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) if self.keep_order or keep_order: nll.index_copy_(0, indices_i,", "you may not use this file except in compliance with", "len(self.shared_out_projs) == 1: return self.shared_out_projs[0] else: return self.shared_out_projs[i] else: return", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "x d_proj] target :: [len*bsz] ''' hidden = hidden.reshape(-1, hidden.size(-1))", "self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]", "torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0] +", "# embed.div_(self.emb_scale) return embed def _init_weight(weight, d : int, init_scale", "self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)", "= (inp_flat >= l_idx) & (inp_flat < r_idx) indices_i =", "in range(len(self.cutoffs)): if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_embed)) )", "hidden.size(-1)) target = target.reshape(-1) if hidden.size(0) != target.size(0): print(hidden.shape, target.shape)", "F.log_softmax(head_logit, dim=1) nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset = 0", "construct weights and biases weights, biases = [], [] for", "range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]", "= functools.partial(_init_weight, default=0.01) ### Just for this codebase, we need", "as [0] is built into the cutoffs if tie_projs is", "self.out_layers_weights.append( nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i)) ) for bias in self.out_layers_biases:", "dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight,", "None]).squeeze(1) if self.keep_order or keep_order: nll.index_copy_(0, indices_i, -logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)", "= inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i =", "d_embed: for i in range(len(self.cutoffs)): if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append(", "bound) self.keep_order = keep_order def _compute_logit(self, hidden, weight, bias, proj):", "'Parameter containing: [{} of size {}{}]'.format( torch.typename(p), size_str, device_str) child_lines.append('", "CONDITIONS OF ANY KIND, either express or implied. # See", "__init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, tie_projs=None, out_layers_weights=None, out_projs=None, keep_order=False,", "self.shortlist_size + self.n_clusters # [21-09-15 AG]: bake the first False", "= [False] + tie_projs self.tie_projs = tie_projs if self.n_clusters >", "dim=1) logprob_i = head_logprob_i[:, -i] \\ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)", "tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_embed)) ) else: # self.out_projs", "padding_mask class AdaptiveEmbedding(nn.Module): \"\"\" Copy of transformers.AdaptiveEmbedding that works with", "_init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale *", "= nn.ParameterList() self.shared_out_projs = out_projs self.out_projs = OptionalParameterList() self.dropout =", "dropout=0.0): super().__init__() self.n_token = n_token self.d_embed = d_embed self.cutoffs =", "is None: logit = F.linear(hidden, weight, bias=bias) else: if self.dropout", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "self.out_layers_biases: bound = bias_scale * d_proj ** -.5 nn.init.uniform_(bias, -bound,", "AdaptiveEmbedding(nn.Module): \"\"\" Copy of transformers.AdaptiveEmbedding that works with fp16 by", "have the same size ' 'in the batch dimension.') if", "fixed for the case when d_proj = d_embed \"\"\" def", "(self.d_proj,) embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale) # embed.div_(self.emb_scale) return embed def", "== 0: logit = self._compute_logit(hidden, self.out_layers_weights[0], self.out_layers_biases[0], self.get_out_proj(0)) nll =", "= weights[0], biases[0], self.get_out_proj(0) head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)", "weight.t())) if bias is not None: logit = logit +", "= indices_i.numel() if _tokens == 0: continue inp_i = inp_flat.index_select(0,", "+ 1] d_emb_i = d_embed // (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx", "proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob_i[:, -i] \\", "== 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i", "def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs): # [21-09-15", "# Changes # emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) embeddings", "self.d_proj = d_proj self.drop = nn.Dropout(dropout) if dropout > 0.0", "self.get_out_proj(i) hidden_i = hidden.index_select(0, indices_i) tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i,", "r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers_weights[0][l_idx:r_idx] bias_i", "D) instead of (B, L) import src.models.nn.utils as U #", "if tie_projs is None: tie_projs = [] elif isinstance(tie_projs, bool):", "self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))", "or keep_order: nll.index_copy_(0, indices_i, -logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0)", "= cutoff_values[i], cutoff_values[i + 1] mask_i = (target >= l_idx)", "-.5) if d_proj != d_embed: # TODO # self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))", "indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) if i ==", "import torch.nn as nn import torch.nn.functional as F class OptionalParameterList(nn.ParameterList):", "Optional[float], default=None): assert init_scale or default if init_scale is None:", "# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) else:", "to handle key_padding_mask ''' hidden :: [len*bsz x d_proj] target", "= hidden.reshape(-1, hidden.size(-1)) target = target.reshape(-1) if hidden.size(0) != target.size(0):", "logprob_i = head_logprob_i[:, -i] \\ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) if", "embeddings = [] indices = torch.zeros_like(inp_flat) # empty should work", "class AdaptiveEmbedding(nn.Module): \"\"\" Copy of transformers.AdaptiveEmbedding that works with fp16", "else: # construct weights and biases weights, biases = [],", "self.out_projs = [None] * len(self.cutoffs) self.out_projs.append(None) self.out_layers_biases.append( nn.Parameter(torch.zeros(n_token)) ) if", "self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity() self.emb_scale", "in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed", "' + parastr) tmpstr = '\\n'.join(child_lines) return tmpstr class ProjectedAdaptiveLogSoftmax(nn.Module):", "parastr) tmpstr = '\\n'.join(child_lines) return tmpstr class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self,", "= keep_order def _compute_logit(self, hidden, weight, bias, proj): if proj", "device=param.device) embeddings = [] indices = torch.zeros_like(inp_flat) # empty should", "None elif len(self.shared_out_projs) == 1: return self.shared_out_projs[0] else: return self.shared_out_projs[i]", "= d_embed self.d_proj = d_proj self.cutoffs = list(cutoffs) + [n_token]", "i in range(len(self.cutoffs)): if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_embed))", "maybe cases for length or padding_mask class AdaptiveEmbedding(nn.Module): \"\"\" Copy", "def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, tie_projs=None, out_layers_weights=None, out_projs=None,", "cutoffs, div_val=1, tie_projs=None, out_layers_weights=None, out_projs=None, keep_order=False, bias_scale=0.0, dropout=0.0, ): super().__init__()", "div_val == 1: self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0)) _init_embed(self.emb_layers[-1].weight, d_embed,", "- l_idx head_logprob_i = head_logprob.index_select(0, indices_i) if i == 0:", "OR CONDITIONS OF ANY KIND, either express or implied. #", "[n_token] self.div_val = div_val self.d_proj = d_proj self.drop = nn.Dropout(dropout)", "range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i =", "1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) def forward(self, inp, *args, **kwargs): if", "limitations under the License. from typing import List, Optional import", "= self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i)", "self.cutoffs = list(cutoffs) + [n_token] self.cutoff_ends = [0] + self.cutoffs", "the License is distributed on an \"AS IS\" BASIS, #", "d_embed self.d_proj = d_proj self.cutoffs = list(cutoffs) + [n_token] self.cutoff_ends", "= F.linear(emb_i, self.emb_projs[i]) # Changes embeddings.append(emb_i) indices.index_put_( (indices_i,), torch.arange(_tokens, device=inp.device)", "tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob_i[:, -i] \\ +", "nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val == 1: self.emb_layers.append(nn.Embedding(n_token, d_embed,", "'in the batch dimension.') if self.n_clusters == 0: logit =", "!= d_embed: # TODO # self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) #", "F.linear(hidden, weight, bias=bias) else: if self.dropout > 0.0: logit =", "logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: weight_i, bias_i, proj_i =", "containing: [{} of size {}{}]'.format( torch.typename(p), size_str, device_str) child_lines.append(' ('", "been fixed for the case when d_proj = d_embed \"\"\"", "r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat >=", "int, init_scale : Optional[float], default=None): assert init_scale or default if", "div_val self.d_proj = d_proj self.drop = nn.Dropout(dropout) if dropout >", "self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0)) _init_embed(self.emb_layers[-1].weight, d_embed, init_scale) # torch.nn.init.normal_(self.emb_layers[-1].weight,", ">= l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero().squeeze(-1) #", "hidden.index_select(0, indices_i) tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i =", "shape (_tokens,) _tokens = indices_i.numel() if _tokens == 0: continue", "for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i +", "torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) def forward(self,", "< r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0: continue", "= torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias],", "= target.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) if", "l_idx) & (target < r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel()", "= self.out_layers_biases[0][l_idx:r_idx] else: weight_i = self.out_layers_weights[i] bias_i = self.out_layers_biases[i] if", "sparse=sample_softmax > 0)) _init_embed(self.emb_layers[-1].weight, d_embed, init_scale) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale", ": Optional[float], default=None): assert init_scale or default if init_scale is", "functools.partial(_init_weight, default=0.02) _init_proj = functools.partial(_init_weight, default=0.01) ### Just for this", "std=init_scale * d_embed ** -.5) if d_proj != d_embed: #", ") if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(n_token, d_embed)) ) else: for", "**kwargs): if self.div_val == 1: embed = self.emb_layers[0](inp) embed =", "= '' if not p.is_cuda else ' (GPU {})'.format(p.get_device()) parastr", "class OptionalParameterList(nn.ParameterList): def extra_repr(self): child_lines = [] for k, p", "if div_val == 1: self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0)) _init_embed(self.emb_layers[-1].weight,", "law or agreed to in writing, software # distributed under", "proj logit = self.drop(logit) logit = logit @ weight.t() else:", "init_scale) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i],", "for this codebase, we need to squeeze the last dimension", "head_logprob_i[:, -i] \\ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) if self.keep_order or", "> 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) if", "embeddings = torch.cat(embeddings, dim=0) emb_flat = embeddings[indices] embed_shape = inp.size()", "self.shared_out_projs[i] else: return self.out_projs[i] def forward(self, hidden, target, keep_order=False, key_padding_mask=None,", "default=0.02) _init_proj = functools.partial(_init_weight, default=0.01) ### Just for this codebase,", ") else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i],", "l_idx, d_emb_i)) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5)", "else nn.Identity() self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0]", "else: param = next(self.parameters()) inp_flat = inp.view(-1) # Changes #", "raise RuntimeError('Input and target should have the same size '", "the first False into the definition, just as [0] is", "if self.div_val == 1: embed = self.emb_layers[0](inp) embed = self.drop(embed)", "self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_emb_i)) ) self.out_layers_biases.append( nn.Parameter(torch.zeros(r_idx - l_idx)) ) if", "in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i +", "l_idx emb_i = self.emb_layers[i](inp_i) emb_i = self.drop(emb_i) emb_i = F.linear(emb_i,", "else ' (GPU {})'.format(p.get_device()) parastr = 'Parameter containing: [{} of", "first False into the definition, just as [0] is built", "len(self.cutoffs) self.out_projs.append(None) self.out_layers_biases.append( nn.Parameter(torch.zeros(n_token)) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(n_token,", "we need to squeeze the last dimension because inputs are", "may need to handle key_padding_mask ''' hidden :: [len*bsz x", "= d_proj self.drop = nn.Dropout(dropout) if dropout > 0.0 else", "if i == 0: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else:", "n_token, d_embed, d_proj, cutoffs, div_val=1, tie_projs=None, out_layers_weights=None, out_projs=None, keep_order=False, bias_scale=0.0,", "indices_i, emb_i) embeddings = torch.cat(embeddings, dim=0) emb_flat = embeddings[indices] embed_shape", "self.out_layers_biases.append( nn.Parameter(torch.zeros(r_idx - l_idx)) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(r_idx", "L, D) instead of (B, L) import src.models.nn.utils as U", "# emb_flat.index_copy_(0, indices_i, emb_i) embeddings = torch.cat(embeddings, dim=0) emb_flat =", "mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0: continue target_i = target.index_select(0, indices_i)", "self.get_out_proj(0)) nll = -F.log_softmax(logit, dim=-1) \\ .gather(1, target.unsqueeze(1)).squeeze(1) else: #", "mean=0, std=init_scale * d_emb_i ** -.5) _init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj,", "torch.nn.functional as F class OptionalParameterList(nn.ParameterList): def extra_repr(self): child_lines = []", "def get_out_proj(self, i): if self.tie_projs[i]: if len(self.shared_out_projs) == 0: return", "d_embed: # TODO # self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) # torch.nn.init.normal_(self.emb_projs[-1],", "default else: std = init_scale * (d ** -0.5) nn.init.normal_(weight,", "when d_proj = d_embed \"\"\" def __init__(self, n_token, d_embed, d_proj,", "_tokens = indices_i.numel() if _tokens == 0: continue inp_i =", "may obtain a copy of the License at # #", "else: logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) if bias is", "* 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) else: for i in range(len(self.cutoffs)):", "head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1)", "should work as long as cutoffs[-1] > max token _total_tokens", "d_proj != d_embed: # TODO # self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))", "False into the definition, just as [0] is built into", "hidden :: [len*bsz x d_proj] target :: [len*bsz] ''' hidden", "nn.ParameterList() else: self.out_layers_weights = out_layers_weights self.out_layers_biases = nn.ParameterList() self.shared_out_projs =", "mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) def forward(self, inp,", "= self.drop(embed) if self.d_proj != self.d_embed: embed = F.linear(embed, self.emb_projs[0])", "d : int, init_scale : Optional[float], default=None): assert init_scale or", "p in self._parameters.items(): if p is not None: size_str =", "else: tie_projs = list(tie_projs) tie_projs = [False] + tie_projs self.tie_projs", "self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_embed)) ) else: # self.out_projs =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= div_val self.d_proj = d_proj self.drop = nn.Dropout(dropout) if dropout", "inputs are always given as (B, L, D) instead of", "self.shared_out_projs[0] else: return self.shared_out_projs[i] else: return self.out_projs[i] def forward(self, hidden,", "d_embed // (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i)) #", "_total_tokens = 0 # emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj) for i", "sample_softmax=False, dropout=0.0): super().__init__() self.n_token = n_token self.d_embed = d_embed self.cutoffs", "may not use this file except in compliance with the", "_init_proj(self.emb_projs[-1], d_proj, init_scale) def forward(self, inp, *args, **kwargs): if self.div_val", "(GPU {})'.format(p.get_device()) parastr = 'Parameter containing: [{} of size {}{}]'.format(", "list(tie_projs) tie_projs = [False] + tie_projs self.tie_projs = tie_projs if", "emb_flat.index_copy_(0, indices_i, emb_i) embeddings = torch.cat(embeddings, dim=0) emb_flat = embeddings[indices]", "# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5) _init_embed(self.emb_layers[-1].weight, d_emb_i,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "super().__init__() self.n_token = n_token self.d_embed = d_embed self.cutoffs = list(cutoffs)", "torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5) _init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale)", "_total_tokens += _tokens # emb_flat.index_copy_(0, indices_i, emb_i) embeddings = torch.cat(embeddings,", "''' hidden :: [len*bsz x d_proj] target :: [len*bsz] '''", "License. from typing import List, Optional import functools import torch", "this file except in compliance with the License. # You", "else: return self.shared_out_projs[i] else: return self.out_projs[i] def forward(self, hidden, target,", "target should have the same size ' 'in the batch", ") else: # self.out_projs = [None] * len(self.cutoffs) self.out_projs.append(None) self.out_layers_biases.append(", "nn.Parameter(torch.zeros(n_token, d_embed)) ) else: for i in range(len(self.cutoffs)): l_idx, r_idx", "weights and biases weights, biases = [], [] for i", "i in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i", "else: weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i) hidden_i =", "offset += logprob_i.size(0) return nll.mean() # TODO maybe cases for", "= 'x'.join(str(size) for size in p.size()) device_str = '' if", "nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return nll.mean() # TODO maybe cases", "Just for this codebase, we need to squeeze the last", "self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters", "= nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) if not out_layers_weights: self.out_layers_weights", "embeddings[indices] embed_shape = inp.size() + (self.d_proj,) embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale)", "1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) else: for i in range(len(self.cutoffs)): l_idx,", "= self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat >= l_idx)", "indices.index_put_( (indices_i,), torch.arange(_tokens, device=inp.device) + _total_tokens ) _total_tokens += _tokens", "self.out_layers_biases = nn.ParameterList() self.shared_out_projs = out_projs self.out_projs = OptionalParameterList() self.dropout", "1] weight_i = self.out_layers_weights[0][l_idx:r_idx] bias_i = self.out_layers_biases[0][l_idx:r_idx] else: weight_i =", "1: self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0)) _init_embed(self.emb_layers[-1].weight, d_embed, init_scale) #", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())", "weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i) hidden_i = hidden.index_select(0,", "1] mask_i = (target >= l_idx) & (target < r_idx)", "indices_i, -logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return nll.mean() #", "self.cutoff_ends[i + 1] weight_i = self.out_layers_weights[0][l_idx:r_idx] bias_i = self.out_layers_biases[0][l_idx:r_idx] else:", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "d_proj = d_embed \"\"\" def __init__(self, n_token, d_embed, d_proj, cutoffs", "1: embed = self.emb_layers[0](inp) embed = self.drop(embed) if self.d_proj !=", "-0.5) nn.init.normal_(weight, mean=0, std=std) _init_embed = functools.partial(_init_weight, default=0.02) _init_proj =", "AG]: TODO may need to handle key_padding_mask ''' hidden ::", "init_scale) def forward(self, inp, *args, **kwargs): if self.div_val == 1:", "self.out_layers_weights = nn.ParameterList() else: self.out_layers_weights = out_layers_weights self.out_layers_biases = nn.ParameterList()", "div_val == 1: if d_proj != d_embed: for i in", "r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val **", "tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_emb_i)) ) self.out_layers_biases.append( nn.Parameter(torch.zeros(r_idx -", "self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers_weights[0], self.out_layers_biases[0], self.get_out_proj(0)) nll", "logit = logit + bias return logit def get_out_proj(self, i):", "(B, L, D) instead of (B, L) import src.models.nn.utils as", "nn.init.normal_(weight, mean=0, std=std) _init_embed = functools.partial(_init_weight, default=0.02) _init_proj = functools.partial(_init_weight,", "need to handle key_padding_mask ''' hidden :: [len*bsz x d_proj]", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i +", "= nn.Dropout(dropout) if dropout > 0.0 else nn.Identity() self.emb_scale =", "= functools.partial(_init_weight, default=0.02) _init_proj = functools.partial(_init_weight, default=0.01) ### Just for", "bias_scale * d_proj ** -.5 nn.init.uniform_(bias, -bound, bound) self.keep_order =", "== 1: if d_proj != d_embed: for i in range(len(self.cutoffs)):", "d_embed // (div_val ** i) if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append(", "tie_projs if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias", "# emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) embeddings = []", "if p is not None: size_str = 'x'.join(str(size) for size", "target_i[:, None]).squeeze(1) if self.keep_order or keep_order: nll.index_copy_(0, indices_i, -logprob_i) else:", "*args, **kwargs): if self.div_val == 1: embed = self.emb_layers[0](inp) embed", "need to squeeze the last dimension because inputs are always", "= self.out_layers_biases[i] if i == 0: weight_i = torch.cat( [weight_i,", "indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,) _tokens = indices_i.numel() if", "nn.Parameter(torch.zeros(d_proj, d_emb_i)) ) self.out_layers_biases.append( nn.Parameter(torch.zeros(r_idx - l_idx)) ) if not", "Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # #", "rights reserved. # # Licensed under the Apache License, Version", "as cutoffs[-1] > max token _total_tokens = 0 # emb_flat", "size {}{}]'.format( torch.typename(p), size_str, device_str) child_lines.append(' (' + str(k) +", "and biases weights, biases = [], [] for i in", "self.cutoff_ends[i + 1] mask_i = (inp_flat >= l_idx) & (inp_flat", "for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx", "i == 0: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: weight_i,", "def _compute_logit(self, hidden, weight, bias, proj): if proj is None:", ".gather(1, target.unsqueeze(1)).squeeze(1) else: # construct weights and biases weights, biases", "_compute_logit(self, hidden, weight, bias, proj): if proj is None: logit", "std=std) _init_embed = functools.partial(_init_weight, default=0.02) _init_proj = functools.partial(_init_weight, default=0.01) ###", "= default else: std = init_scale * (d ** -0.5)", "= d_embed // (div_val ** i) if tie_projs[i]: self.out_projs.append(None) else:", ": List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0): super().__init__() self.n_token = n_token", "F.linear(emb_i, self.emb_projs[i]) # Changes embeddings.append(emb_i) indices.index_put_( (indices_i,), torch.arange(_tokens, device=inp.device) +", "key_padding_mask ''' hidden :: [len*bsz x d_proj] target :: [len*bsz]", "governing permissions and # limitations under the License. from typing", "** i) if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_emb_i)) )", "as nn import torch.nn.functional as F class OptionalParameterList(nn.ParameterList): def extra_repr(self):", "out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i)) ) for bias in", "= inp.view(-1) # Changes # emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype,", "d_embed self.cutoffs = list(cutoffs) + [n_token] self.div_val = div_val self.d_proj", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj,", "** -0.5) nn.init.normal_(weight, mean=0, std=std) _init_embed = functools.partial(_init_weight, default=0.02) _init_proj", "r_idx) indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,) _tokens = indices_i.numel()", "[n_token] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size", "- 1 self.head_size = self.shortlist_size + self.n_clusters # [21-09-15 AG]:", "import List, Optional import functools import torch import torch.nn as", "'' if not p.is_cuda else ' (GPU {})'.format(p.get_device()) parastr =", "range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed //", "if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i)) ) for", "if d_proj != d_embed: # TODO # self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj,", "nn import torch.nn.functional as F class OptionalParameterList(nn.ParameterList): def extra_repr(self): child_lines", "+ self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters =", "or implied. # See the License for the specific language", "d_embed \"\"\" def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int],", "instead of (B, L) import src.models.nn.utils as U # AdaptiveEmbedding", "> max token _total_tokens = 0 # emb_flat = inp.new_zeros(inp_flat.size(0),", "if self.keep_order or keep_order: nll.index_copy_(0, indices_i, -logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset", "(indices_i,), torch.arange(_tokens, device=inp.device) + _total_tokens ) _total_tokens += _tokens #", "1 self.head_size = self.shortlist_size + self.n_clusters # [21-09-15 AG]: bake", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "-logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return nll.mean() # TODO", "+ '): ' + parastr) tmpstr = '\\n'.join(child_lines) return tmpstr", "inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = self.drop(emb_i)", "= [] for k, p in self._parameters.items(): if p is", "embed = F.linear(embed, self.emb_projs[0]) else: param = next(self.parameters()) inp_flat =", "+ 1] mask_i = (inp_flat >= l_idx) & (inp_flat <", "as F class OptionalParameterList(nn.ParameterList): def extra_repr(self): child_lines = [] for", "self.n_clusters # [21-09-15 AG]: bake the first False into the", "self.drop = nn.Dropout(dropout) if div_val == 1: if d_proj !=", "dim=1) nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values", "init_scale) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5) if", "for i in range(len(self.cutoffs)): if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj,", "= OptionalParameterList() self.dropout = dropout self.drop = nn.Dropout(dropout) if div_val", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= mask_i.nonzero().squeeze(-1) # shape (_tokens,) _tokens = indices_i.numel() if _tokens", "into the cutoffs if tie_projs is None: tie_projs = []", "self.out_projs[i] def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs): #", "cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0): super().__init__() self.n_token =", "not p.is_cuda else ' (GPU {})'.format(p.get_device()) parastr = 'Parameter containing:", "dimension.') if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers_weights[0], self.out_layers_biases[0],", "else: if self.dropout > 0.0: logit = hidden @ proj", "= self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = d_embed // (div_val", "self.out_projs = OptionalParameterList() self.dropout = dropout self.drop = nn.Dropout(dropout) if", "(_tokens,) _tokens = indices_i.numel() if _tokens == 0: continue inp_i", "weight_i = self.out_layers_weights[0][l_idx:r_idx] bias_i = self.out_layers_biases[0][l_idx:r_idx] else: weight_i = self.out_layers_weights[i]", "init_scale=1.0, sample_softmax=False, dropout=0.0): super().__init__() self.n_token = n_token self.d_embed = d_embed", "1] d_emb_i = d_embed // (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx -", "functools import torch import torch.nn as nn import torch.nn.functional as", "self.d_embed = d_embed self.cutoffs = list(cutoffs) + [n_token] self.div_val =", "as (B, L, D) instead of (B, L) import src.models.nn.utils", "the batch dimension.') if self.n_clusters == 0: logit = self._compute_logit(hidden,", "and target should have the same size ' 'in the", "bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i) hidden_i = hidden.index_select(0, indices_i)", "@ proj logit = self.drop(logit) logit = logit @ weight.t()", "(the \"License\"); # you may not use this file except", "self.out_projs.append(None) self.out_layers_biases.append( nn.Parameter(torch.zeros(n_token)) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(n_token, d_embed))", "emb_flat = embeddings[indices] embed_shape = inp.size() + (self.d_proj,) embed =", "OptionalParameterList() self.dropout = dropout self.drop = nn.Dropout(dropout) if div_val ==", "is not None: size_str = 'x'.join(str(size) for size in p.size())", "# you may not use this file except in compliance", "l_idx)) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))", "i) if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_emb_i)) ) self.out_layers_biases.append(", "if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias =", "& (target < r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() ==", "dim=0) emb_flat = embeddings[indices] embed_shape = inp.size() + (self.d_proj,) embed", "key_padding_mask=None, *args, **kwargs): # [21-09-15 AG]: TODO may need to", "0.0 else nn.Identity() self.emb_scale = d_proj ** 0.5 self.cutoff_ends =", "extra_repr(self): child_lines = [] for k, p in self._parameters.items(): if", "d_embed))) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1],", "the same size ' 'in the batch dimension.') if self.n_clusters", "or default if init_scale is None: std = default else:", "torch.typename(p), size_str, device_str) child_lines.append(' (' + str(k) + '): '", "hidden.reshape(-1, hidden.size(-1)) target = target.reshape(-1) if hidden.size(0) != target.size(0): print(hidden.shape,", "len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters # [21-09-15", "0: logit = self._compute_logit(hidden, self.out_layers_weights[0], self.out_layers_biases[0], self.get_out_proj(0)) nll = -F.log_softmax(logit,", "logit def get_out_proj(self, i): if self.tie_projs[i]: if len(self.shared_out_projs) == 0:", "if d_proj != d_embed: for i in range(len(self.cutoffs)): if tie_projs[i]:", "self.n_token = n_token self.d_embed = d_embed self.d_proj = d_proj self.cutoffs", "= self.shortlist_size + self.n_clusters # [21-09-15 AG]: bake the first", "if div_val == 1: if d_proj != d_embed: for i", "self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs)", "div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0): super().__init__() self.n_token = n_token self.d_embed =", "0: return None elif len(self.shared_out_projs) == 1: return self.shared_out_projs[0] else:", "-.5 nn.init.uniform_(bias, -bound, bound) self.keep_order = keep_order def _compute_logit(self, hidden,", "for the case when d_proj = d_embed \"\"\" def __init__(self,", "std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) else: for i in", "next(self.parameters()) inp_flat = inp.view(-1) # Changes # emb_flat = torch.zeros([inp_flat.size(0),", "nn.Dropout(dropout) if dropout > 0.0 else nn.Identity() self.emb_scale = d_proj", "permissions and # limitations under the License. from typing import", "__init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False,", "mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) indices_i", "< r_idx) indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,) _tokens =", "keep_order: nll.index_copy_(0, indices_i, -logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return", "(B, L) import src.models.nn.utils as U # AdaptiveEmbedding = U.Squeeze(AdaptiveEmbedding)", "# # Unless required by applicable law or agreed to", "under the License. from typing import List, Optional import functools", "self.out_layers_weights = out_layers_weights self.out_layers_biases = nn.ParameterList() self.shared_out_projs = out_projs self.out_projs", "logit = logit @ weight.t() else: logit = torch.einsum('bd,de,ev->bv', (hidden,", "List, Optional import functools import torch import torch.nn as nn", "** -.5) _init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0,", "torch.cat(embeddings, dim=0) emb_flat = embeddings[indices] embed_shape = inp.size() + (self.d_proj,)", "return tmpstr class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs,", "l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = d_embed", "= embeddings[indices] embed_shape = inp.size() + (self.d_proj,) embed = emb_flat.view(embed_shape)", "self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) -", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i)) ) for bias", "+ self.n_clusters # [21-09-15 AG]: bake the first False into", "None: logit = F.linear(hidden, weight, bias=bias) else: if self.dropout >", "= [0] + self.cutoffs for i in range(len(cutoff_values) - 1):", "self.emb_projs = nn.ParameterList() if div_val == 1: self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax", "Version 2.0 (the \"License\"); # you may not use this", "F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob_i[:, -i] \\ + tail_logprob_i.gather(1, target_i[:,", "because inputs are always given as (B, L, D) instead", "'\\n'.join(child_lines) return tmpstr class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj,", "= head_logprob.index_select(0, indices_i) if i == 0: logprob_i = head_logprob_i.gather(1,", "l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers_weights[0][l_idx:r_idx]", "self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) nll =", "by replacing the index_put_ operation Initialization has been fixed for", "indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0: continue target_i =", "[len*bsz x d_proj] target :: [len*bsz] ''' hidden = hidden.reshape(-1,", "= self.drop(emb_i) emb_i = F.linear(emb_i, self.emb_projs[i]) # Changes embeddings.append(emb_i) indices.index_put_(", "d_emb_i = d_embed // (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx - l_idx,", "built into the cutoffs if tie_projs is None: tie_projs =", "in self._parameters.items(): if p is not None: size_str = 'x'.join(str(size)", "self.tie_projs = tie_projs if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters,", "logprob_i.size(0) return nll.mean() # TODO maybe cases for length or", "indices = torch.zeros_like(inp_flat) # empty should work as long as", "[weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i)", "None: std = default else: std = init_scale * (d", ") for bias in self.out_layers_biases: bound = bias_scale * d_proj", "target :: [len*bsz] ''' hidden = hidden.reshape(-1, hidden.size(-1)) target =", "= init_scale * (d ** -0.5) nn.init.normal_(weight, mean=0, std=std) _init_embed", "self.get_out_proj(0) head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit,", "= self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers_weights[0][l_idx:r_idx] bias_i =", "self.drop(embed) if self.d_proj != self.d_embed: embed = F.linear(embed, self.emb_projs[0]) else:", "is None: std = default else: std = init_scale *", "implied. # See the License for the specific language governing", "bias=bias) else: if self.dropout > 0.0: logit = hidden @", "given as (B, L, D) instead of (B, L) import", "under the Apache License, Version 2.0 (the \"License\"); # you", "(inp_flat >= l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero().squeeze(-1)", "nn.Parameter(torch.zeros(n_token)) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(n_token, d_embed)) ) else:", "_total_tokens ) _total_tokens += _tokens # emb_flat.index_copy_(0, indices_i, emb_i) embeddings", "= [tie_projs] * len(cutoffs) else: tie_projs = list(tie_projs) tie_projs =", "div_val=1, tie_projs=None, out_layers_weights=None, out_projs=None, keep_order=False, bias_scale=0.0, dropout=0.0, ): super().__init__() self.n_token", "head_proj = weights[0], biases[0], self.get_out_proj(0) head_logit = self._compute_logit(hidden, head_weight, head_bias,", "the License. from typing import List, Optional import functools import", "None: logit = logit + bias return logit def get_out_proj(self,", "= list(cutoffs) + [n_token] self.div_val = div_val self.d_proj = d_proj", "d_proj, cutoffs, div_val=1, tie_projs=None, out_layers_weights=None, out_projs=None, keep_order=False, bias_scale=0.0, dropout=0.0, ):", "assert init_scale or default if init_scale is None: std =", "not out_layers_weights: self.out_layers_weights = nn.ParameterList() else: self.out_layers_weights = out_layers_weights self.out_layers_biases", "len(self.shared_out_projs) == 0: return None elif len(self.shared_out_projs) == 1: return", "else: self.out_layers_weights = out_layers_weights self.out_layers_biases = nn.ParameterList() self.shared_out_projs = out_projs", "bias is not None: logit = logit + bias return", "= self._compute_logit(hidden, self.out_layers_weights[0], self.out_layers_biases[0], self.get_out_proj(0)) nll = -F.log_softmax(logit, dim=-1) \\", "d_embed, d_proj, cutoffs, div_val=1, tie_projs=None, out_layers_weights=None, out_projs=None, keep_order=False, bias_scale=0.0, dropout=0.0,", "All rights reserved. # # Licensed under the Apache License,", ":: [len*bsz] ''' hidden = hidden.reshape(-1, hidden.size(-1)) target = target.reshape(-1)", "l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat", "by applicable law or agreed to in writing, software #", "dropout=0.0, ): super().__init__() self.n_token = n_token self.d_embed = d_embed self.d_proj", "nn.Parameter(torch.zeros(r_idx - l_idx)) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(r_idx -", "weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i,", "# TODO maybe cases for length or padding_mask class AdaptiveEmbedding(nn.Module):", "self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = d_embed // (div_val **", "self.out_layers_weights[i] bias_i = self.out_layers_biases[i] if i == 0: weight_i =", "bool): tie_projs = [tie_projs] * len(cutoffs) else: tie_projs = list(tie_projs)", "if dropout > 0.0 else nn.Identity() self.emb_scale = d_proj **", "1: if d_proj != d_embed: for i in range(len(self.cutoffs)): if", "target.unsqueeze(1)).squeeze(1) else: # construct weights and biases weights, biases =", "OptionalParameterList(nn.ParameterList): def extra_repr(self): child_lines = [] for k, p in", "self.keep_order = keep_order def _compute_logit(self, hidden, weight, bias, proj): if", "d_proj ** -.5 nn.init.uniform_(bias, -bound, bound) self.keep_order = keep_order def", "= mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0: continue target_i = target.index_select(0,", "torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5) if d_proj !=", "\"\"\" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing", "torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale) else: for", "-F.log_softmax(logit, dim=-1) \\ .gather(1, target.unsqueeze(1)).squeeze(1) else: # construct weights and", "else: # self.out_projs = [None] * len(self.cutoffs) self.out_projs.append(None) self.out_layers_biases.append( nn.Parameter(torch.zeros(n_token))", "dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0] + self.cutoffs", "self.d_proj != self.d_embed: embed = F.linear(embed, self.emb_projs[0]) else: param =", "'): ' + parastr) tmpstr = '\\n'.join(child_lines) return tmpstr class", "# shape (_tokens,) _tokens = indices_i.numel() if _tokens == 0:", "_tokens # emb_flat.index_copy_(0, indices_i, emb_i) embeddings = torch.cat(embeddings, dim=0) emb_flat", "l_idx head_logprob_i = head_logprob.index_select(0, indices_i) if i == 0: logprob_i", "biases[i], self.get_out_proj(i) hidden_i = hidden.index_select(0, indices_i) tail_logit_i = self._compute_logit(hidden_i, weight_i,", "[], [] for i in range(len(self.cutoffs)): if self.div_val == 1:", "self.d_proj], dtype=param.dtype, device=param.device) embeddings = [] indices = torch.zeros_like(inp_flat) #", "= out_projs self.out_projs = OptionalParameterList() self.dropout = dropout self.drop =", "if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(n_token, d_embed)) ) else: for i", "if _tokens == 0: continue inp_i = inp_flat.index_select(0, indices_i) -", "tmpstr class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,", "0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs", ") _total_tokens += _tokens # emb_flat.index_copy_(0, indices_i, emb_i) embeddings =", "nn.ParameterList() if div_val == 1: self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))", "+ 1] mask_i = (target >= l_idx) & (target <", "None]).squeeze(1) else: weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i) hidden_i", "long as cutoffs[-1] > max token _total_tokens = 0 #", "bias_scale=0.0, dropout=0.0, ): super().__init__() self.n_token = n_token self.d_embed = d_embed", "len(cutoffs) else: tie_projs = list(tie_projs) tie_projs = [False] + tie_projs", "+= logprob_i.size(0) return nll.mean() # TODO maybe cases for length", "1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i =", "# limitations under the License. from typing import List, Optional", "in self.out_layers_biases: bound = bias_scale * d_proj ** -.5 nn.init.uniform_(bias,", "if proj is None: logit = F.linear(hidden, weight, bias=bias) else:", "TODO may need to handle key_padding_mask ''' hidden :: [len*bsz", "the index_put_ operation Initialization has been fixed for the case", "dropout > 0.0 else nn.Identity() self.emb_scale = d_proj ** 0.5", "torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) embeddings = [] indices = torch.zeros_like(inp_flat)", "mean=0, std=std) _init_embed = functools.partial(_init_weight, default=0.02) _init_proj = functools.partial(_init_weight, default=0.01)", "dim=-1) \\ .gather(1, target.unsqueeze(1)).squeeze(1) else: # construct weights and biases", "for bias in self.out_layers_biases: bound = bias_scale * d_proj **", "[21-09-15 AG]: TODO may need to handle key_padding_mask ''' hidden", "return self.shared_out_projs[0] else: return self.shared_out_projs[i] else: return self.out_projs[i] def forward(self,", "should have the same size ' 'in the batch dimension.')", "# [21-09-15 AG]: bake the first False into the definition,", "+ bias return logit def get_out_proj(self, i): if self.tie_projs[i]: if", "if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_embed)) ) else: #", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "# construct weights and biases weights, biases = [], []", "case when d_proj = d_embed \"\"\" def __init__(self, n_token, d_embed,", "Unless required by applicable law or agreed to in writing,", "out_layers_weights self.out_layers_biases = nn.ParameterList() self.shared_out_projs = out_projs self.out_projs = OptionalParameterList()", "self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val ==", "fp16 by replacing the index_put_ operation Initialization has been fixed", "= F.linear(hidden, weight, bias=bias) else: if self.dropout > 0.0: logit", "hidden @ proj logit = self.drop(logit) logit = logit @", "0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat(", "self.shared_out_projs = out_projs self.out_projs = OptionalParameterList() self.dropout = dropout self.drop", "child_lines.append(' (' + str(k) + '): ' + parastr) tmpstr", "the specific language governing permissions and # limitations under the", "d_embed))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale)", "bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob_i[:, -i]", "biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0) head_logit =", "indices_i.numel() if _tokens == 0: continue inp_i = inp_flat.index_select(0, indices_i)", "embed.div_(self.emb_scale) return embed def _init_weight(weight, d : int, init_scale :", "= list(cutoffs) + [n_token] self.cutoff_ends = [0] + self.cutoffs self.div_val", "applicable law or agreed to in writing, software # distributed", "* len(cutoffs) else: tie_projs = list(tie_projs) tie_projs = [False] +", "keep_order=False, bias_scale=0.0, dropout=0.0, ): super().__init__() self.n_token = n_token self.d_embed =", "return None elif len(self.shared_out_projs) == 1: return self.shared_out_projs[0] else: return", "self._parameters.items(): if p is not None: size_str = 'x'.join(str(size) for", "with fp16 by replacing the index_put_ operation Initialization has been", "r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = d_embed //", "weight, bias, proj): if proj is None: logit = F.linear(hidden,", "= logit + bias return logit def get_out_proj(self, i): if", "_init_weight(weight, d : int, init_scale : Optional[float], default=None): assert init_scale", "= torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0]", "\\ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) if self.keep_order or keep_order: nll.index_copy_(0,", "self.cutoffs = list(cutoffs) + [n_token] self.div_val = div_val self.d_proj =", "''' hidden = hidden.reshape(-1, hidden.size(-1)) target = target.reshape(-1) if hidden.size(0)", "target_i = target.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i)", "def forward(self, inp, *args, **kwargs): if self.div_val == 1: embed", "into the definition, just as [0] is built into the", "& (inp_flat < r_idx) indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,)", "i): if self.tie_projs[i]: if len(self.shared_out_projs) == 0: return None elif", "self.div_val = div_val self.d_proj = d_proj self.drop = nn.Dropout(dropout) if", "* d_embed ** -.5) if d_proj != d_embed: # TODO", "= self.out_layers_weights[0][l_idx:r_idx] bias_i = self.out_layers_biases[0][l_idx:r_idx] else: weight_i = self.out_layers_weights[i] bias_i", "in writing, software # distributed under the License is distributed", "self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) if not out_layers_weights:", "// (div_val ** i) if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj,", "self.emb_layers[0](inp) embed = self.drop(embed) if self.d_proj != self.d_embed: embed =", "dtype=param.dtype, device=param.device) embeddings = [] indices = torch.zeros_like(inp_flat) # empty", "0 # emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj) for i in range(len(self.cutoffs)):", "weights[0], biases[0], self.get_out_proj(0) head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob", "target.shape) raise RuntimeError('Input and target should have the same size", "d_proj self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()", "embed.mul_(self.emb_scale) # embed.div_(self.emb_scale) return embed def _init_weight(weight, d : int,", "1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] mask_i =", "import functools import torch import torch.nn as nn import torch.nn.functional", "= [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0]", "tie_projs = [] elif isinstance(tie_projs, bool): tie_projs = [tie_projs] *", "inp.new_zeros(inp_flat.size(0), self.d_proj) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i],", "inp, *args, **kwargs): if self.div_val == 1: embed = self.emb_layers[0](inp)", "= [None] * len(self.cutoffs) self.out_projs.append(None) self.out_layers_biases.append( nn.Parameter(torch.zeros(n_token)) ) if not", "else: weight_i = self.out_layers_weights[i] bias_i = self.out_layers_biases[i] if i ==", "Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the", "definition, just as [0] is built into the cutoffs if", "just as [0] is built into the cutoffs if tie_projs", "self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i)", "token _total_tokens = 0 # emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj) for", "continue target_i = target.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0,", "TODO # self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale", "else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]", "+ [n_token] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val", "def extra_repr(self): child_lines = [] for k, p in self._parameters.items():", "+ 1] weight_i = self.out_layers_weights[0][l_idx:r_idx] bias_i = self.out_layers_biases[0][l_idx:r_idx] else: weight_i", "= F.linear(embed, self.emb_projs[0]) else: param = next(self.parameters()) inp_flat = inp.view(-1)", "import torch.nn.functional as F class OptionalParameterList(nn.ParameterList): def extra_repr(self): child_lines =", "bias, proj): if proj is None: logit = F.linear(hidden, weight,", "= [], [] for i in range(len(self.cutoffs)): if self.div_val ==", "get_out_proj(self, i): if self.tie_projs[i]: if len(self.shared_out_projs) == 0: return None", ">= l_idx) & (target < r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if", ") if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i)) )", "[tie_projs] * len(cutoffs) else: tie_projs = list(tie_projs) tie_projs = [False]", "(target < r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0:", "1: return self.shared_out_projs[0] else: return self.shared_out_projs[i] else: return self.out_projs[i] def", "head_logprob_i = head_logprob.index_select(0, indices_i) if i == 0: logprob_i =", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "// (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i)) # torch.nn.init.normal_(self.emb_layers[-1].weight,", "self.d_embed: embed = F.linear(embed, self.emb_projs[0]) else: param = next(self.parameters()) inp_flat", "of (B, L) import src.models.nn.utils as U # AdaptiveEmbedding =", "in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i],", "License, Version 2.0 (the \"License\"); # you may not use", "self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) if not out_layers_weights: self.out_layers_weights = nn.ParameterList() else:", "torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0)", "= tie_projs if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))", "- l_idx, d_emb_i)) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i **", "# You may obtain a copy of the License at", "= emb_flat.view(embed_shape) embed.mul_(self.emb_scale) # embed.div_(self.emb_scale) return embed def _init_weight(weight, d", "inp.view(-1) # Changes # emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)", "self.d_proj) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i", "else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_emb_i)) ) self.out_layers_biases.append( nn.Parameter(torch.zeros(r_idx - l_idx)) )", "l_idx, d_emb_i)) ) for bias in self.out_layers_biases: bound = bias_scale", "head_proj) head_logprob = F.log_softmax(head_logit, dim=1) nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)", "size in p.size()) device_str = '' if not p.is_cuda else", "if self.tie_projs[i]: if len(self.shared_out_projs) == 0: return None elif len(self.shared_out_projs)", "and # limitations under the License. from typing import List,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "of size {}{}]'.format( torch.typename(p), size_str, device_str) child_lines.append(' (' + str(k)", "[] indices = torch.zeros_like(inp_flat) # empty should work as long", "if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_emb_i)) ) self.out_layers_biases.append( nn.Parameter(torch.zeros(r_idx", "= F.log_softmax(head_logit, dim=1) nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset =", "bake the first False into the definition, just as [0]", "self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj,", "Changes # emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) embeddings =", "self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) if tie_projs[i]:", "[] elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs) else:", "out_projs self.out_projs = OptionalParameterList() self.dropout = dropout self.drop = nn.Dropout(dropout)", "parastr = 'Parameter containing: [{} of size {}{}]'.format( torch.typename(p), size_str,", "CORPORATION. All rights reserved. # # Licensed under the Apache", "target.size(0): print(hidden.shape, target.shape) raise RuntimeError('Input and target should have the", "def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0,", "return logit def get_out_proj(self, i): if self.tie_projs[i]: if len(self.shared_out_projs) ==", "_init_proj(self.emb_projs[-1], d_proj, init_scale) else: for i in range(len(self.cutoffs)): l_idx, r_idx", "param = next(self.parameters()) inp_flat = inp.view(-1) # Changes # emb_flat", "class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, tie_projs=None,", "# emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj) for i in range(len(self.cutoffs)): l_idx,", "nn.Parameter(torch.zeros(d_proj, d_embed)) ) else: # self.out_projs = [None] * len(self.cutoffs)", "as long as cutoffs[-1] > max token _total_tokens = 0", "offset = 0 cutoff_values = [0] + self.cutoffs for i", "nn.init.uniform_(bias, -bound, bound) self.keep_order = keep_order def _compute_logit(self, hidden, weight,", "hidden.size(0) != target.size(0): print(hidden.shape, target.shape) raise RuntimeError('Input and target should", "proj is None: logit = F.linear(hidden, weight, bias=bias) else: if", "forward(self, inp, *args, **kwargs): if self.div_val == 1: embed =", "F class OptionalParameterList(nn.ParameterList): def extra_repr(self): child_lines = [] for k,", "tmpstr = '\\n'.join(child_lines) return tmpstr class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token,", "# empty should work as long as cutoffs[-1] > max", "the License for the specific language governing permissions and #", "= nn.ParameterList() if div_val == 1: self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax >", "r_idx = cutoff_values[i], cutoff_values[i + 1] mask_i = (target >=", "(c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed", "# self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale *", "TODO maybe cases for length or padding_mask class AdaptiveEmbedding(nn.Module): \"\"\"", "Apache License, Version 2.0 (the \"License\"); # you may not", "range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i =", "weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob_i[:,", "else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_embed)) ) else: # self.out_projs = [None]", "same size ' 'in the batch dimension.') if self.n_clusters ==", "either express or implied. # See the License for the", "0: continue inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i =", "* (d ** -0.5) nn.init.normal_(weight, mean=0, std=std) _init_embed = functools.partial(_init_weight,", "max token _total_tokens = 0 # emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj)", "- l_idx)) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(r_idx - l_idx,", "[0] is built into the cutoffs if tie_projs is None:", "weight.t() else: logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) if bias", "size_str, device_str) child_lines.append(' (' + str(k) + '): ' +", "self.cutoff_ends[i + 1] d_emb_i = d_embed // (div_val ** i)", "self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_emb_i)) ) self.out_layers_biases.append( nn.Parameter(torch.zeros(r_idx - l_idx))", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i", "continue inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i)", "(hidden, proj, weight.t())) if bias is not None: logit =", "self.emb_projs[0]) else: param = next(self.parameters()) inp_flat = inp.view(-1) # Changes", "_init_proj = functools.partial(_init_weight, default=0.01) ### Just for this codebase, we", "for k, p in self._parameters.items(): if p is not None:", "proj, weight.t())) if bias is not None: logit = logit", "the last dimension because inputs are always given as (B,", "d_embed)) ) else: for i in range(len(self.cutoffs)): l_idx, r_idx =", "= d_embed \"\"\" def __init__(self, n_token, d_embed, d_proj, cutoffs :", "[0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters", "p.is_cuda else ' (GPU {})'.format(p.get_device()) parastr = 'Parameter containing: [{}", "d_proj, init_scale) else: for i in range(len(self.cutoffs)): l_idx, r_idx =", "**kwargs): # [21-09-15 AG]: TODO may need to handle key_padding_mask", "self.d_proj = d_proj self.cutoffs = list(cutoffs) + [n_token] self.cutoff_ends =", "= 0 cutoff_values = [0] + self.cutoffs for i in", "size_str = 'x'.join(str(size) for size in p.size()) device_str = ''", "** i) self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i)) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale", "tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) if self.keep_order or keep_order: nll.index_copy_(0, indices_i, -logprob_i)", "= self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size", "i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]", "d_embed)) ) else: # self.out_projs = [None] * len(self.cutoffs) self.out_projs.append(None)", "!= self.d_embed: embed = F.linear(embed, self.emb_projs[0]) else: param = next(self.parameters())", "indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = self.drop(emb_i) emb_i", "tie_projs = [tie_projs] * len(cutoffs) else: tie_projs = list(tie_projs) tie_projs", "isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs) else: tie_projs =", "self.out_layers_weights[0][l_idx:r_idx] bias_i = self.out_layers_biases[0][l_idx:r_idx] else: weight_i = self.out_layers_weights[i] bias_i =", "= d_proj self.cutoffs = list(cutoffs) + [n_token] self.cutoff_ends = [0]", "size ' 'in the batch dimension.') if self.n_clusters == 0:", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "emb_i = self.emb_layers[i](inp_i) emb_i = self.drop(emb_i) emb_i = F.linear(emb_i, self.emb_projs[i])", "> 0.0: logit = hidden @ proj logit = self.drop(logit)", "logit = self.drop(logit) logit = logit @ weight.t() else: logit", "head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0) head_logit = self._compute_logit(hidden, head_weight,", "elif len(self.shared_out_projs) == 1: return self.shared_out_projs[0] else: return self.shared_out_projs[i] else:", "d_emb_i))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1], d_proj, init_scale)", "work as long as cutoffs[-1] > max token _total_tokens =", "+ _total_tokens ) _total_tokens += _tokens # emb_flat.index_copy_(0, indices_i, emb_i)", "emb_i) embeddings = torch.cat(embeddings, dim=0) emb_flat = embeddings[indices] embed_shape =", "= nn.ParameterList() else: self.out_layers_weights = out_layers_weights self.out_layers_biases = nn.ParameterList() self.shared_out_projs", "{}{}]'.format( torch.typename(p), size_str, device_str) child_lines.append(' (' + str(k) + '):", "emb_flat.view(embed_shape) embed.mul_(self.emb_scale) # embed.div_(self.emb_scale) return embed def _init_weight(weight, d :", "'x'.join(str(size) for size in p.size()) device_str = '' if not", "batch dimension.') if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers_weights[0],", "init_scale * (d ** -0.5) nn.init.normal_(weight, mean=0, std=std) _init_embed =", "d_emb_i, init_scale) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)", "weights, biases = [], [] for i in range(len(self.cutoffs)): if", "self.dropout = dropout self.drop = nn.Dropout(dropout) if div_val == 1:", "+ self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val", "hidden = hidden.reshape(-1, hidden.size(-1)) target = target.reshape(-1) if hidden.size(0) !=", "cases for length or padding_mask class AdaptiveEmbedding(nn.Module): \"\"\" Copy of", "length or padding_mask class AdaptiveEmbedding(nn.Module): \"\"\" Copy of transformers.AdaptiveEmbedding that", "None: tie_projs = [] elif isinstance(tie_projs, bool): tie_projs = [tie_projs]", "{})'.format(p.get_device()) parastr = 'Parameter containing: [{} of size {}{}]'.format( torch.typename(p),", "init_scale) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) # torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale) _init_proj(self.emb_projs[-1],", "= [] elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs)", "l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val", "not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(n_token, d_embed)) ) else: for i in", "\"License\"); # you may not use this file except in", "logit = self._compute_logit(hidden, self.out_layers_weights[0], self.out_layers_biases[0], self.get_out_proj(0)) nll = -F.log_softmax(logit, dim=-1)", "-i] \\ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) if self.keep_order or keep_order:", "None: size_str = 'x'.join(str(size) for size in p.size()) device_str =", "d_emb_i)) ) for bias in self.out_layers_biases: bound = bias_scale *", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= n_token self.d_embed = d_embed self.cutoffs = list(cutoffs) + [n_token]", "> 0)) _init_embed(self.emb_layers[-1].weight, d_embed, init_scale) # torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale *", "self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_embed)) ) else: # self.out_projs = [None] *", "= bias_scale * d_proj ** -.5 nn.init.uniform_(bias, -bound, bound) self.keep_order", "always given as (B, L, D) instead of (B, L)", "if len(self.shared_out_projs) == 0: return None elif len(self.shared_out_projs) == 1:", "# self.out_projs = [None] * len(self.cutoffs) self.out_projs.append(None) self.out_layers_biases.append( nn.Parameter(torch.zeros(n_token)) )", "indices_i) if i == 0: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)", "for i in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i],", "= inp.size() + (self.d_proj,) embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale) # embed.div_(self.emb_scale)", "# distributed under the License is distributed on an \"AS", "logit @ weight.t() else: logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))", "' 'in the batch dimension.') if self.n_clusters == 0: logit", "# Unless required by applicable law or agreed to in", "return self.out_projs[i] def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs):", "return self.shared_out_projs[i] else: return self.out_projs[i] def forward(self, hidden, target, keep_order=False,", "for size in p.size()) device_str = '' if not p.is_cuda", "nll = -F.log_softmax(logit, dim=-1) \\ .gather(1, target.unsqueeze(1)).squeeze(1) else: # construct", "= hidden.index_select(0, indices_i) tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i", "codebase, we need to squeeze the last dimension because inputs", "= n_token self.d_embed = d_embed self.d_proj = d_proj self.cutoffs =", "# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5) if d_proj", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "target, keep_order=False, key_padding_mask=None, *args, **kwargs): # [21-09-15 AG]: TODO may", "cutoffs if tie_projs is None: tie_projs = [] elif isinstance(tie_projs,", "d_proj] target :: [len*bsz] ''' hidden = hidden.reshape(-1, hidden.size(-1)) target", "of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_", "init_scale is None: std = default else: std = init_scale", "inp_flat = inp.view(-1) # Changes # emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],", "self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i =", "_tokens == 0: continue inp_i = inp_flat.index_select(0, indices_i) - l_idx", "You may obtain a copy of the License at #", "nn.ParameterList() self.shared_out_projs = out_projs self.out_projs = OptionalParameterList() self.dropout = dropout", "= target.reshape(-1) if hidden.size(0) != target.size(0): print(hidden.shape, target.shape) raise RuntimeError('Input", ": int, init_scale : Optional[float], default=None): assert init_scale or default", "torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1] >", "this codebase, we need to squeeze the last dimension because", "proj): if proj is None: logit = F.linear(hidden, weight, bias=bias)", "proj_i = weights[i], biases[i], self.get_out_proj(i) hidden_i = hidden.index_select(0, indices_i) tail_logit_i", "tie_projs = list(tie_projs) tie_projs = [False] + tie_projs self.tie_projs =", "nn.Identity() self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] +", "+ tie_projs self.tie_projs = tie_projs if self.n_clusters > 0: self.cluster_weight", "* len(self.cutoffs) self.out_projs.append(None) self.out_layers_biases.append( nn.Parameter(torch.zeros(n_token)) ) if not out_layers_weights: self.out_layers_weights.append(", "biases weights, biases = [], [] for i in range(len(self.cutoffs)):", "bias_i = self.out_layers_biases[0][l_idx:r_idx] else: weight_i = self.out_layers_weights[i] bias_i = self.out_layers_biases[i]", "== 0: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: weight_i, bias_i,", "the Apache License, Version 2.0 (the \"License\"); # you may", "p.size()) device_str = '' if not p.is_cuda else ' (GPU", "if self.d_proj != self.d_embed: embed = F.linear(embed, self.emb_projs[0]) else: param", "std = default else: std = init_scale * (d **", "logit + bias return logit def get_out_proj(self, i): if self.tie_projs[i]:", "self.tie_projs[i]: if len(self.shared_out_projs) == 0: return None elif len(self.shared_out_projs) ==", "== 0: continue inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i", "!= d_embed: for i in range(len(self.cutoffs)): if tie_projs[i]: self.out_projs.append(None) else:", "= self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) nll", "init_scale : Optional[float], default=None): assert init_scale or default if init_scale", "= head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: weight_i, bias_i, proj_i = weights[i],", "l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] mask_i = (target", "functools.partial(_init_weight, default=0.01) ### Just for this codebase, we need to" ]
[ "list): field = json.dumps(field) row_out.append(field) writer.writerow(row_out) @main.command() @click.argument('new_table_name') @click.argument('old_table_name') @click.option('--connection-string')", "logger.info('Swapping tables using cx_Oracle: {} - {}'.format(new_table_name, old_table_name)) conn =", "Carto'.format(table_name)) carto.load(logger, db_schema, table_name, load_postgis, table_schema, connection_string, rows, indexes_fields, truncate)", "fopen(input_file) as file: rows = csv.reader(file) if skip_headers: next(rows) if", "rows = csv.reader(file) if skip_headers: next(rows) if re.match(carto.carto_connection_string_regex, connection_string) !=", "if indexes_fields != None: indexes_fields = indexes_fields.split(',') if re.match(carto.carto_connection_string_regex, connection_string)", "rows, indexes_fields, truncate) else: connection_string = get_connection_string(connection_string) engine, storage =", "truncate? carto does. Makes this idempotent logger.info('{} - Writing to", "in storage.iter(table_name): row_out = [] for field in row: if", "get_table_schema(table_schema_path) ## TODO: csv settings? use Frictionless Data csv standard?", "TABLE {} RENAME TO {}'.format(new_table_name, old_table_name) sql3 = 'DROP TABLE", "engine = create_engine(connection_string) storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid,", "smart_open(file, mode=mode) def get_table_schema(table_schema_path): with fopen(table_schema_path) as file: contents =", "csv standard? ## TODO: support line delimted json? with fopen(input_file)", "engine = create_engine(connection_string) if engine.dialect.driver == 'psycopg2': logger.info('Swapping tables using", "output_file, db_schema, geometry_support): connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string,", "codecs import logging from logging.config import dictConfig import click import", "exception: {}\".format(str(value)), exc_info=(type, value, tb)) sys.excepthook = exception_handler return logger", "as cur: sql = 'ALTER TABLE \"{}\" RENAME TO \"{}_old\";'.format(old_table_name,", "storage.describe(table_name) fields = map(lambda x: x['name'], descriptor['fields']) writer.writerow(fields) if geometry_support", "storage.create(table_name, table_schema, indexes_fields=indexes_fields) @main.command() @click.argument('table_name') @click.option('--table-schema-path') @click.option('--connection-string') @click.option('-f','--input-file') @click.option('--db-schema') @click.option('--geometry-support')", "allow table modification within a transaction, so make individual transactions:", "exist?\".format(new_table_name)) rb_sql = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)", "import sys import os import re import codecs import logging", "csv.writer(file) descriptor = storage.describe(table_name) fields = map(lambda x: x['name'], descriptor['fields'])", "SELECT ON {} TO {}'.format(old_table_name, user.strip())) # Oracle does not", "raise Exception('`CONNECTION_STRING` environment variable or `--connection-string` option required') return connection_string", "sys.excepthook = exception_handler return logger @click.group() def main(): pass def", "@click.option('--connection-string') @click.option('-f','--input-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--skip-headers', is_flag=True) @click.option('--indexes-fields') @click.option('--upsert', is_flag=True)", "TABLE \"{}_old\";'.format(old_table_name) cur.execute(sql) conn.commit() except: conn.rollback() raise conn.close() elif engine.dialect.driver", "logger.info('{} - Creating table using SQLAlchemy'.format(table_name)) storage.create(table_name, table_schema, indexes_fields=indexes_fields) @main.command()", "logger = get_logger(logging_config) table_schema = get_table_schema(table_schema_path) ## TODO: csv settings?", "does. Makes this idempotent logger.info('{} - Writing to table using", "mode == 'r': return sys.stdin elif mode == 'w': return", "Creating table using Carto'.format(table_name)) return carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists,", "using SQLAlchemy'.format(table_name)) storage.create(table_name, table_schema, indexes_fields=indexes_fields) @main.command() @click.argument('table_name') @click.option('--table-schema-path') @click.option('--connection-string') @click.option('-f','--input-file')", "Exception('`CONNECTION_STRING` environment variable or `--connection-string` option required') return connection_string def", "return connection_string def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None): engine =", "yaml.load(file) dictConfig(config) except: FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s' logging.basicConfig(format=FORMAT,", "indexes_fields, truncate) else: connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string,", "'cx_oracle': logger.info('Swapping tables using cx_Oracle: {} - {}'.format(new_table_name, old_table_name)) conn", "fopen(output_file, mode='w') as file: writer = csv.writer(file) descriptor = storage.describe(table_name)", "dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True) return engine, storage def fopen(file,", "logging from logging.config import dictConfig import click import yaml from", "re.match(carto.carto_connection_string_regex, connection_string) != None: if select_users != None: select_users =", "== 'r': return sys.stdin elif mode == 'w': return sys.stdout", "field in row: if isinstance(field, dict) or isinstance(field, list): field", "conn.execute(rb_sql) raise try: conn.execute(sql3) except: logger.error(\"Could not drop {}_old table.", "select_users = [] logger.info('Swapping tables using Carto: {} - {}'.format(new_table_name,", "@click.option('--db-schema') @click.option('--select-users', help='Users to grant SELECT on updated table') @click.option('--logging-config',", "engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid) ## TODO: truncate?", "= 'DROP TABLE {}'.format(old_table_name) conn.execute(rb_sql1) rb_sql2 = 'ALTER TABLE {}_old", "get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support) descriptor = storage.describe(table_name)", "csv standard? ## TODO: support line delimited json? with fopen(output_file,", ". import postgres from . import carto csv.field_size_limit(sys.maxsize) def get_logger(logging_config):", "!= None: load_postgis = geometry_support == 'postgis' if indexes_fields !=", "if upsert: postgres.upsert(engine, db_schema, table_name, table_schema, rows) elif geometry_support ==", "indexes_fields != None: indexes_fields = indexes_fields.split(',') logger.info('{} - Writing to", "drop {}_old table. Do you have permission?\".format(old_table_name)) rb_sql1 = 'DROP", "engine.raw_connection() try: with conn.cursor() as cur: sql = 'ALTER TABLE", "else: storage.write(table_name, rows) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid')", "## TODO: truncate? carto does. Makes this idempotent logger.info('{} -", "create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid) ## TODO: csv settings? use", "to {}.\".format(old_table_name)) raise else: raise Exception('`{}` not supported by swap_table'.format(engine.dialect.driver))", "= get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support) logger.info('{} -", "logger.info('{} - Writing to table using SQLAlchemy'.format(table_name)) if table_schema_path !=", "writer.writerow(row_out) @main.command() @click.argument('new_table_name') @click.argument('old_table_name') @click.option('--connection-string') @click.option('--db-schema') @click.option('--select-users', help='Users to grant", "if not isinstance(contents, str): contents = contents.decode('utf-8') return json.loads(contents) @main.command()", "tables using psycopg2: {} - {}'.format(new_table_name, old_table_name)) conn = engine.raw_connection()", "@click.option('--from-srid') @click.option('--skip-headers', is_flag=True) @click.option('--indexes-fields') @click.option('--upsert', is_flag=True) @click.option('--truncate/--no-truncate', is_flag=True, default=False) @click.option('--logging-config',", "@click.option('--db-schema') @click.option('--indexes-fields') @click.option('--geometry-support') @click.option('--if-not-exists', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def create_table(table_name,", "'w': return sys.stdout else: return smart_open(file, mode=mode) def get_table_schema(table_schema_path): with", "@click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') def describe_table(table_name, connection_string, output_file, db_schema,", "return carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists, indexes_fields, connection_string) connection_string =", "def describe_table(table_name, connection_string, output_file, db_schema, geometry_support): connection_string = get_connection_string(connection_string) engine,", "with open(logging_config) as file: config = yaml.load(file) dictConfig(config) except: FORMAT", "upsert, truncate, logging_config): logger = get_logger(logging_config) table_schema = get_table_schema(table_schema_path) ##", "Data csv standard? ## TODO: support line delimited json? with", "support line delimited json? with fopen(output_file, mode='w') as file: writer", "table using Carto'.format(table_name)) carto.load(logger, db_schema, table_name, load_postgis, table_schema, connection_string, rows,", "old_table_name) sql2 = 'ALTER TABLE {} RENAME TO {}'.format(new_table_name, old_table_name)", "import Storage from smart_open import smart_open from . import postgres", "%(levelname)s [%(name)s] %(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr) logger = logging.getLogger('the_el') def", "sql = 'ALTER TABLE \"{}\" RENAME TO \"{}_old\";'.format(old_table_name, old_table_name) +\\", "@click.option('--upsert', is_flag=True) @click.option('--truncate/--no-truncate', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def write(table_name, table_schema_path,", "sys.stdin elif mode == 'w': return sys.stdout else: return smart_open(file,", "table_schema, rows) else: storage.write(table_name, rows) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema')", "x['name'], descriptor['fields']) writer.writerow(fields) if geometry_support == None and engine.dialect.driver ==", "@click.argument('table_schema_path') @click.option('--connection-string') @click.option('--db-schema') @click.option('--indexes-fields') @click.option('--geometry-support') @click.option('--if-not-exists', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf')", "TO \"{}\";'.format(new_table_name, old_table_name) +\\ 'DROP TABLE \"{}_old\";'.format(old_table_name) cur.execute(sql) conn.commit() except:", "load_postgis = geometry_support == 'postgis' if indexes_fields != None: indexes_fields", "old_table_name) conn.execute(rb_sql2) raise try: for sql in grants_sql: conn.execute(sql) except:", "if skip_headers: next(rows) if re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis =", "@main.command() @click.argument('new_table_name') @click.argument('old_table_name') @click.option('--connection-string') @click.option('--db-schema') @click.option('--select-users', help='Users to grant SELECT", "not rename {} table. Does it exist?\".format(old_table_name)) raise try: conn.execute(sql2)", "to_srid=to_srid) ## TODO: csv settings? use Frictionless Data csv standard?", "have permission?\".format(old_table_name)) rb_sql1 = 'DROP TABLE {}'.format(old_table_name) conn.execute(rb_sql1) rb_sql2 =", "RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql2) raise try: for sql in", "create_storage_adaptor(connection_string, db_schema, geometry_support) logger.info('{} - Creating table using SQLAlchemy'.format(table_name)) storage.create(table_name,", "yaml from sqlalchemy import create_engine from jsontableschema_sql import Storage from", "to_srid=to_srid, views=True) return engine, storage def fopen(file, mode='r'): if file", "== 'cx_oracle': logger.info('Swapping tables using cx_Oracle: {} - {}'.format(new_table_name, old_table_name))", "TO {}'.format(new_table_name, old_table_name) sql3 = 'DROP TABLE {}_old'.format(old_table_name) try: conn.execute(sql1)", "storage.write(table_name, rows) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--to-srid')", "@click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--to-srid') @click.option('--logging-config', default='logging_config.conf') def read(table_name,", "within a transaction, so make individual transactions: sql1 = 'ALTER", "TABLE {}'.format(old_table_name) conn.execute(rb_sql1) rb_sql2 = 'ALTER TABLE {}_old RENAME TO", "db_schema, table_name, table_schema, rows) elif geometry_support == None and engine.dialect.driver", "{}'.format(old_table_name) conn.execute(rb_sql1) rb_sql2 = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name,", "db_schema, new_table_name, old_table_name, select_users, connection_string) connection_string = get_connection_string(connection_string) engine =", "sql2 = 'ALTER TABLE {} RENAME TO {}'.format(new_table_name, old_table_name) sql3", "Carto: {} - {}'.format(new_table_name, old_table_name)) return carto.swap_table(logger, db_schema, new_table_name, old_table_name,", "connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support) logger.info('{}", "old_table_name) sql3 = 'DROP TABLE {}_old'.format(old_table_name) try: conn.execute(sql1) except: logger.error(\"Could", "connection_string, output_file, db_schema, geometry_support): connection_string = get_connection_string(connection_string) engine, storage =", "{} TO {}'.format(old_table_name, user.strip())) # Oracle does not allow table", "import click import yaml from sqlalchemy import create_engine from jsontableschema_sql", "old_table_name)) conn = engine.raw_connection() try: with conn.cursor() as cur: sql", "%(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr) logger = logging.getLogger('the_el') def exception_handler(type, value,", "engine.dialect.driver == 'psycopg2': postgres.copy_from(engine, table_name, table_schema, rows) else: storage.write(table_name, rows)", "Oracle does not allow table modification within a transaction, so", "== 'psycopg2': logger.info('Swapping tables using psycopg2: {} - {}'.format(new_table_name, old_table_name))", "dictConfig(config) except: FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO,", "connection_string) != None: if select_users != None: select_users = select_users.split(',')", "= contents.decode('utf-8') return json.loads(contents) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support')", "[] grants_sql = [] for user in select_users: grants_sql.append('GRANT SELECT", "get_logger(logging_config) table_schema = get_table_schema(table_schema_path) if indexes_fields != None: indexes_fields =", "a transaction, so make individual transactions: sql1 = 'ALTER TABLE", "use Frictionless Data csv standard? ## TODO: support line delimted", "create_engine(connection_string) storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True) return", "user.strip())) # Oracle does not allow table modification within a", "None: load_postgis = geometry_support == 'postgis' if indexes_fields != None:", "= geometry_support == 'postgis' logger.info('{} - Creating table using Carto'.format(table_name))", "file) @main.command() @click.argument('table_name') @click.argument('table_schema_path') @click.option('--connection-string') @click.option('--db-schema') @click.option('--indexes-fields') @click.option('--geometry-support') @click.option('--if-not-exists', is_flag=True,", "logger.info('Swapping tables using psycopg2: {} - {}'.format(new_table_name, old_table_name)) conn =", "import create_engine from jsontableschema_sql import Storage from smart_open import smart_open", "raise try: conn.execute(sql2) except: logger.error(\"Could not rename {} table. Does", "environment variable or `--connection-string` option required') return connection_string def create_storage_adaptor(connection_string,", "= create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid) ## TODO: csv settings?", "conn = engine.raw_connection() try: with conn.cursor() as cur: sql =", "\"{}\" RENAME TO \"{}_old\";'.format(old_table_name, old_table_name) +\\ 'ALTER TABLE \"{}\" RENAME", "does not allow table modification within a transaction, so make", "RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql) raise try: conn.execute(sql3) except: logger.error(\"Could", "from_srid=from_srid, to_srid=to_srid, views=True) return engine, storage def fopen(file, mode='r'): if", "@click.option('--if-not-exists', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def create_table(table_name, table_schema_path, connection_string, db_schema,", "sqlalchemy import create_engine from jsontableschema_sql import Storage from smart_open import", "line delimited json? with fopen(output_file, mode='w') as file: writer =", "indexes_fields, upsert, truncate, logging_config): logger = get_logger(logging_config) table_schema = get_table_schema(table_schema_path)", "select_users.split(',') else: select_users = [] grants_sql = [] for user", "'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql2) raise try:", "TABLE {} RENAME TO {}_old'.format(old_table_name, old_table_name) sql2 = 'ALTER TABLE", "or `--connection-string` option required') return connection_string def create_storage_adaptor(connection_string, db_schema, geometry_support,", "connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid,", "= [] for user in select_users: grants_sql.append('GRANT SELECT ON {}", "None: select_users = select_users.split(',') else: select_users = [] grants_sql =", "engine.dialect.driver == 'psycopg2': logger.info('Swapping tables using psycopg2: {} - {}'.format(new_table_name,", "Makes this idempotent logger.info('{} - Writing to table using SQLAlchemy'.format(table_name))", "and engine.dialect.driver == 'psycopg2': postgres.copy_to(engine, table_name, file) else: for row", "engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid) ## TODO:", "truncate, logging_config): logger = get_logger(logging_config) table_schema = get_table_schema(table_schema_path) ## TODO:", "rows) elif geometry_support == None and engine.dialect.driver == 'psycopg2': postgres.copy_from(engine,", "db_schema, indexes_fields, geometry_support, if_not_exists, logging_config): logger = get_logger(logging_config) table_schema =", "get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid) ## TODO:", "@click.option('--to-srid') @click.option('--logging-config', default='logging_config.conf') def read(table_name, connection_string, output_file, db_schema, geometry_support, from_srid,", "default=False) @click.option('--logging-config', default='logging_config.conf') def write(table_name, table_schema_path, connection_string, input_file, db_schema, geometry_support,", "!= None: load_postgis = geometry_support == 'postgis' logger.info('{} - Creating", "geometry_support, from_srid, skip_headers, indexes_fields, upsert, truncate, logging_config): logger = get_logger(logging_config)", "db_schema, geometry_support, from_srid=from_srid) ## TODO: truncate? carto does. Makes this", "= 'ALTER TABLE {} RENAME TO {}'.format(new_table_name, old_table_name) sql3 =", "== 'psycopg2': postgres.copy_to(engine, table_name, file) else: for row in storage.iter(table_name):", "table_name, table_schema, rows) else: storage.write(table_name, rows) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file')", "sys.stdout else: return smart_open(file, mode=mode) def get_table_schema(table_schema_path): with fopen(table_schema_path) as", "main(): pass def get_connection_string(connection_string): connection_string = os.getenv('CONNECTION_STRING', connection_string) if connection_string", "@click.option('--db-schema') @click.option('--geometry-support') def describe_table(table_name, connection_string, output_file, db_schema, geometry_support): connection_string =", "if table_schema_path != None: table_schema = get_table_schema(table_schema_path) storage.describe(table_name, descriptor=table_schema) else:", "swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users, logging_config): logger = get_logger(logging_config) if", "- {}'.format(new_table_name, old_table_name)) return carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users, connection_string)", "@click.option('--geometry-support') def describe_table(table_name, connection_string, output_file, db_schema, geometry_support): connection_string = get_connection_string(connection_string)", "exception_handler return logger @click.group() def main(): pass def get_connection_string(connection_string): connection_string", "row_out.append(field) writer.writerow(row_out) @main.command() @click.argument('new_table_name') @click.argument('old_table_name') @click.option('--connection-string') @click.option('--db-schema') @click.option('--select-users', help='Users to", "== 'psycopg2': postgres.copy_from(engine, table_name, table_schema, rows) else: storage.write(table_name, rows) @main.command()", "= engine.raw_connection() try: with conn.cursor() as cur: sql = 'ALTER", "truncate) else: connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema,", "rb_sql1 = 'DROP TABLE {}'.format(old_table_name) conn.execute(rb_sql1) rb_sql2 = 'ALTER TABLE", "engine.dialect.driver == 'psycopg2': postgres.copy_to(engine, table_name, file) else: for row in", "None: load_postgis = geometry_support == 'postgis' logger.info('{} - Creating table", "SQLAlchemy'.format(table_name)) if table_schema_path != None: table_schema = get_table_schema(table_schema_path) storage.describe(table_name, descriptor=table_schema)", "if re.match(carto.carto_connection_string_regex, connection_string) != None: if select_users != None: select_users", "exist?\".format(old_table_name)) raise try: conn.execute(sql2) except: logger.error(\"Could not rename {} table.", "raise try: conn.execute(sql3) except: logger.error(\"Could not drop {}_old table. Do", "csv.reader(file) if skip_headers: next(rows) if re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis", "storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid) ## TODO: truncate? carto", "sql in grants_sql: conn.execute(sql) except: logger.error(\"Could not grant all permissions", "= exception_handler return logger @click.group() def main(): pass def get_connection_string(connection_string):", "TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql) raise try: conn.execute(sql3)", "select_users = [] grants_sql = [] for user in select_users:", "with fopen(input_file) as file: rows = csv.reader(file) if skip_headers: next(rows)", "table using SQLAlchemy'.format(table_name)) storage.create(table_name, table_schema, indexes_fields=indexes_fields) @main.command() @click.argument('table_name') @click.option('--table-schema-path') @click.option('--connection-string')", "table_schema = get_table_schema(table_schema_path) storage.describe(table_name, descriptor=table_schema) else: storage.describe(table_name) if upsert: postgres.upsert(engine,", "logger = get_logger(logging_config) if re.match(carto.carto_connection_string_regex, connection_string) != None: if select_users", "sql1 = 'ALTER TABLE {} RENAME TO {}_old'.format(old_table_name, old_table_name) sql2", "on updated table') @click.option('--logging-config', default='logging_config.conf') def swap_table(new_table_name, old_table_name, connection_string, db_schema,", "if connection_string == None: raise Exception('`CONNECTION_STRING` environment variable or `--connection-string`", "table_schema, if_not_exists, indexes_fields, connection_string) connection_string = get_connection_string(connection_string) engine, storage =", "TO {}'.format(old_table_name, user.strip())) # Oracle does not allow table modification", "geometry_support, from_srid=from_srid, to_srid=to_srid) ## TODO: csv settings? use Frictionless Data", "TABLE \"{}\" RENAME TO \"{}_old\";'.format(old_table_name, old_table_name) +\\ 'ALTER TABLE \"{}\"", "file: config = yaml.load(file) dictConfig(config) except: FORMAT = '[%(asctime)-15s] %(levelname)s", "mode='w') as file: writer = csv.writer(file) descriptor = storage.describe(table_name) fields", "click import yaml from sqlalchemy import create_engine from jsontableschema_sql import", "standard? ## TODO: support line delimted json? with fopen(input_file) as", "grants_sql: conn.execute(sql) except: logger.error(\"Could not grant all permissions to {}.\".format(old_table_name))", "if file == None: if mode == 'r': return sys.stdin", "@click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--skip-headers', is_flag=True) @click.option('--indexes-fields') @click.option('--upsert', is_flag=True) @click.option('--truncate/--no-truncate', is_flag=True,", "transactions: sql1 = 'ALTER TABLE {} RENAME TO {}_old'.format(old_table_name, old_table_name)", "tb): logger.exception(\"Uncaught exception: {}\".format(str(value)), exc_info=(type, value, tb)) sys.excepthook = exception_handler", "config = yaml.load(file) dictConfig(config) except: FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s]", "== None and engine.dialect.driver == 'psycopg2': postgres.copy_to(engine, table_name, file) else:", "select_users != None: select_users = select_users.split(',') else: select_users = []", "x: x['name'], descriptor['fields']) writer.writerow(fields) if geometry_support == None and engine.dialect.driver", "= get_logger(logging_config) if re.match(carto.carto_connection_string_regex, connection_string) != None: if select_users !=", "is_flag=True) @click.option('--truncate/--no-truncate', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def write(table_name, table_schema_path, connection_string,", "= '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr) logger =", "def read(table_name, connection_string, output_file, db_schema, geometry_support, from_srid, to_srid, logging_config): logger", "logger = logging.getLogger('the_el') def exception_handler(type, value, tb): logger.exception(\"Uncaught exception: {}\".format(str(value)),", "connection_string, db_schema, select_users, logging_config): logger = get_logger(logging_config) if re.match(carto.carto_connection_string_regex, connection_string)", "standard? ## TODO: support line delimited json? with fopen(output_file, mode='w')", "ON {} TO {}'.format(old_table_name, user.strip())) # Oracle does not allow", "geometry_support, from_srid, to_srid, logging_config): logger = get_logger(logging_config) connection_string = get_connection_string(connection_string)", "{} - {}'.format(new_table_name, old_table_name)) conn = engine.raw_connection() try: with conn.cursor()", "table_schema_path, connection_string, input_file, db_schema, geometry_support, from_srid, skip_headers, indexes_fields, upsert, truncate,", "[%(name)s] %(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr) logger = logging.getLogger('the_el') def exception_handler(type,", "file == None: if mode == 'r': return sys.stdin elif", "{}_old'.format(old_table_name, old_table_name) sql2 = 'ALTER TABLE {} RENAME TO {}'.format(new_table_name,", "for sql in grants_sql: conn.execute(sql) except: logger.error(\"Could not grant all", "@click.option('--geometry-support') @click.option('--from-srid') @click.option('--to-srid') @click.option('--logging-config', default='logging_config.conf') def read(table_name, connection_string, output_file, db_schema,", "logging_config): logger = get_logger(logging_config) table_schema = get_table_schema(table_schema_path) if indexes_fields !=", "file: writer = csv.writer(file) descriptor = storage.describe(table_name) fields = map(lambda", "connection_string = get_connection_string(connection_string) engine = create_engine(connection_string) if engine.dialect.driver == 'psycopg2':", "old_table_name)) return carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users, connection_string) connection_string =", "@main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') def describe_table(table_name, connection_string, output_file,", "upsert: postgres.upsert(engine, db_schema, table_name, table_schema, rows) elif geometry_support == None", "field = json.dumps(field) row_out.append(field) writer.writerow(row_out) @main.command() @click.argument('new_table_name') @click.argument('old_table_name') @click.option('--connection-string') @click.option('--db-schema')", "describe_table(table_name, connection_string, output_file, db_schema, geometry_support): connection_string = get_connection_string(connection_string) engine, storage", "not rename {} table. Does it exist?\".format(new_table_name)) rb_sql = 'ALTER", "if engine.dialect.driver == 'psycopg2': logger.info('Swapping tables using psycopg2: {} -", "stream=sys.stderr) logger = logging.getLogger('the_el') def exception_handler(type, value, tb): logger.exception(\"Uncaught exception:", "elif geometry_support == None and engine.dialect.driver == 'psycopg2': postgres.copy_from(engine, table_name,", "get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid) ##", "db_schema, select_users, logging_config): logger = get_logger(logging_config) if re.match(carto.carto_connection_string_regex, connection_string) !=", "= [] logger.info('Swapping tables using Carto: {} - {}'.format(new_table_name, old_table_name))", "open(logging_config) as file: config = yaml.load(file) dictConfig(config) except: FORMAT =", "return carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users, connection_string) connection_string = get_connection_string(connection_string)", "to_srid, logging_config): logger = get_logger(logging_config) connection_string = get_connection_string(connection_string) engine, storage", "@click.option('--geometry-support') @click.option('--if-not-exists', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def create_table(table_name, table_schema_path, connection_string,", "from jsontableschema_sql import Storage from smart_open import smart_open from .", "return logger @click.group() def main(): pass def get_connection_string(connection_string): connection_string =", "Creating table using SQLAlchemy'.format(table_name)) storage.create(table_name, table_schema, indexes_fields=indexes_fields) @main.command() @click.argument('table_name') @click.option('--table-schema-path')", "old_table_name) +\\ 'DROP TABLE \"{}_old\";'.format(old_table_name) cur.execute(sql) conn.commit() except: conn.rollback() raise", "geometry_support == None and engine.dialect.driver == 'psycopg2': postgres.copy_to(engine, table_name, file)", "input_file, db_schema, geometry_support, from_srid, skip_headers, indexes_fields, upsert, truncate, logging_config): logger", "if indexes_fields != None: indexes_fields = indexes_fields.split(',') logger.info('{} - Writing", "{}\".format(str(value)), exc_info=(type, value, tb)) sys.excepthook = exception_handler return logger @click.group()", "return sys.stdin elif mode == 'w': return sys.stdout else: return", "Writing to table using Carto'.format(table_name)) carto.load(logger, db_schema, table_name, load_postgis, table_schema,", "get_table_schema(table_schema_path) if indexes_fields != None: indexes_fields = indexes_fields.split(',') if re.match(carto.carto_connection_string_regex,", "## TODO: support line delimted json? with fopen(input_file) as file:", "row in storage.iter(table_name): row_out = [] for field in row:", "as file: contents = file.read() if not isinstance(contents, str): contents", "= select_users.split(',') else: select_users = [] logger.info('Swapping tables using Carto:", "= yaml.load(file) dictConfig(config) except: FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s'", "logger = get_logger(logging_config) table_schema = get_table_schema(table_schema_path) if indexes_fields != None:", "select_users = select_users.split(',') else: select_users = [] logger.info('Swapping tables using", "connection_string, input_file, db_schema, geometry_support, from_srid, skip_headers, indexes_fields, upsert, truncate, logging_config):", "@click.option('--from-srid') @click.option('--to-srid') @click.option('--logging-config', default='logging_config.conf') def read(table_name, connection_string, output_file, db_schema, geometry_support,", "cx_Oracle: {} - {}'.format(new_table_name, old_table_name)) conn = engine.connect() if select_users", "for field in row: if isinstance(field, dict) or isinstance(field, list):", "user in select_users: grants_sql.append('GRANT SELECT ON {} TO {}'.format(old_table_name, user.strip()))", "= get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid)", "import smart_open from . import postgres from . import carto", "from_srid=None, to_srid=None): engine = create_engine(connection_string) storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support,", "= create_engine(connection_string) storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True)", "None: if select_users != None: select_users = select_users.split(',') else: select_users", "TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql2) raise try: for", "{} RENAME TO {}_old'.format(old_table_name, old_table_name) sql2 = 'ALTER TABLE {}", "in row: if isinstance(field, dict) or isinstance(field, list): field =", "def exception_handler(type, value, tb): logger.exception(\"Uncaught exception: {}\".format(str(value)), exc_info=(type, value, tb))", "old_table_name, select_users, connection_string) connection_string = get_connection_string(connection_string) engine = create_engine(connection_string) if", "line delimted json? with fopen(input_file) as file: rows = csv.reader(file)", "'DROP TABLE {}_old'.format(old_table_name) try: conn.execute(sql1) except: logger.error(\"Could not rename {}", "None: raise Exception('`CONNECTION_STRING` environment variable or `--connection-string` option required') return", "@click.option('--table-schema-path') @click.option('--connection-string') @click.option('-f','--input-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--skip-headers', is_flag=True) @click.option('--indexes-fields') @click.option('--upsert',", "= map(lambda x: x['name'], descriptor['fields']) writer.writerow(fields) if geometry_support == None", "logger.error(\"Could not rename {} table. Does it exist?\".format(new_table_name)) rb_sql =", "TODO: support line delimted json? with fopen(input_file) as file: rows", "descriptor=table_schema) else: storage.describe(table_name) if upsert: postgres.upsert(engine, db_schema, table_name, table_schema, rows)", "descriptor = storage.describe(table_name) with fopen(output_file, mode='w') as file: json.dump(descriptor, file)", "grants_sql.append('GRANT SELECT ON {} TO {}'.format(old_table_name, user.strip())) # Oracle does", "@click.option('--logging-config', default='logging_config.conf') def create_table(table_name, table_schema_path, connection_string, db_schema, indexes_fields, geometry_support, if_not_exists,", "json.dumps(field) row_out.append(field) writer.writerow(row_out) @main.command() @click.argument('new_table_name') @click.argument('old_table_name') @click.option('--connection-string') @click.option('--db-schema') @click.option('--select-users', help='Users", "table_schema, connection_string, rows, indexes_fields, truncate) else: connection_string = get_connection_string(connection_string) engine,", "= json.dumps(field) row_out.append(field) writer.writerow(row_out) @main.command() @click.argument('new_table_name') @click.argument('old_table_name') @click.option('--connection-string') @click.option('--db-schema') @click.option('--select-users',", "writer = csv.writer(file) descriptor = storage.describe(table_name) fields = map(lambda x:", "TO {}_old'.format(old_table_name, old_table_name) sql2 = 'ALTER TABLE {} RENAME TO", "get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support) logger.info('{} - Creating", "= 'ALTER TABLE {} RENAME TO {}_old'.format(old_table_name, old_table_name) sql2 =", "get_logger(logging_config): try: with open(logging_config) as file: config = yaml.load(file) dictConfig(config)", "connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid)", "os import re import codecs import logging from logging.config import", "None and engine.dialect.driver == 'psycopg2': postgres.copy_to(engine, table_name, file) else: for", "carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users, connection_string) connection_string = get_connection_string(connection_string) engine", "support line delimted json? with fopen(input_file) as file: rows =", "logger.info('{} - Writing to table using Carto'.format(table_name)) carto.load(logger, db_schema, table_name,", "@click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--to-srid') @click.option('--logging-config', default='logging_config.conf') def read(table_name, connection_string,", "= get_table_schema(table_schema_path) if indexes_fields != None: indexes_fields = indexes_fields.split(',') if", "import re import codecs import logging from logging.config import dictConfig", "dictConfig import click import yaml from sqlalchemy import create_engine from", "\"{}\" RENAME TO \"{}\";'.format(new_table_name, old_table_name) +\\ 'DROP TABLE \"{}_old\";'.format(old_table_name) cur.execute(sql)", "logger @click.group() def main(): pass def get_connection_string(connection_string): connection_string = os.getenv('CONNECTION_STRING',", "TABLE \"{}\" RENAME TO \"{}\";'.format(new_table_name, old_table_name) +\\ 'DROP TABLE \"{}_old\";'.format(old_table_name)", "if geometry_support == None and engine.dialect.driver == 'psycopg2': postgres.copy_to(engine, table_name,", "geometry_support) logger.info('{} - Creating table using SQLAlchemy'.format(table_name)) storage.create(table_name, table_schema, indexes_fields=indexes_fields)", "except: logger.error(\"Could not rename {} table. Does it exist?\".format(old_table_name)) raise", "as file: config = yaml.load(file) dictConfig(config) except: FORMAT = '[%(asctime)-15s]", "next(rows) if re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis = geometry_support ==", "db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid) ## TODO: csv settings? use Frictionless", "logger.info('Swapping tables using Carto: {} - {}'.format(new_table_name, old_table_name)) return carto.swap_table(logger,", "= engine.connect() if select_users != None: select_users = select_users.split(',') else:", "rb_sql = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql)", "settings? use Frictionless Data csv standard? ## TODO: support line", "Frictionless Data csv standard? ## TODO: support line delimted json?", "csv.field_size_limit(sys.maxsize) def get_logger(logging_config): try: with open(logging_config) as file: config =", "{}'.format(new_table_name, old_table_name) sql3 = 'DROP TABLE {}_old'.format(old_table_name) try: conn.execute(sql1) except:", "try: conn.execute(sql2) except: logger.error(\"Could not rename {} table. Does it", "updated table') @click.option('--logging-config', default='logging_config.conf') def swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users,", "rename {} table. Does it exist?\".format(new_table_name)) rb_sql = 'ALTER TABLE", "= file.read() if not isinstance(contents, str): contents = contents.decode('utf-8') return", "raise conn.close() elif engine.dialect.driver == 'cx_oracle': logger.info('Swapping tables using cx_Oracle:", "def main(): pass def get_connection_string(connection_string): connection_string = os.getenv('CONNECTION_STRING', connection_string) if", "conn.execute(sql3) except: logger.error(\"Could not drop {}_old table. Do you have", "to_srid=None): engine = create_engine(connection_string) storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid,", "required') return connection_string def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None): engine", "grant SELECT on updated table') @click.option('--logging-config', default='logging_config.conf') def swap_table(new_table_name, old_table_name,", "create_storage_adaptor(connection_string, db_schema, geometry_support) descriptor = storage.describe(table_name) with fopen(output_file, mode='w') as", "if_not_exists, logging_config): logger = get_logger(logging_config) table_schema = get_table_schema(table_schema_path) if indexes_fields", "[] for field in row: if isinstance(field, dict) or isinstance(field,", "@click.argument('new_table_name') @click.argument('old_table_name') @click.option('--connection-string') @click.option('--db-schema') @click.option('--select-users', help='Users to grant SELECT on", "@click.option('--logging-config', default='logging_config.conf') def swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users, logging_config): logger", "storage = create_storage_adaptor(connection_string, db_schema, geometry_support) logger.info('{} - Creating table using", "connection_string) connection_string = get_connection_string(connection_string) engine = create_engine(connection_string) if engine.dialect.driver ==", "indexes_fields.split(',') logger.info('{} - Writing to table using Carto'.format(table_name)) carto.load(logger, db_schema,", "grants_sql = [] for user in select_users: grants_sql.append('GRANT SELECT ON", "select_users, connection_string) connection_string = get_connection_string(connection_string) engine = create_engine(connection_string) if engine.dialect.driver", "carto does. Makes this idempotent logger.info('{} - Writing to table", "get_logger(logging_config) table_schema = get_table_schema(table_schema_path) ## TODO: csv settings? use Frictionless", "using SQLAlchemy'.format(table_name)) if table_schema_path != None: table_schema = get_table_schema(table_schema_path) storage.describe(table_name,", "using cx_Oracle: {} - {}'.format(new_table_name, old_table_name)) conn = engine.connect() if", "= get_table_schema(table_schema_path) ## TODO: csv settings? use Frictionless Data csv", "indexes_fields, connection_string) connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema,", "!= None: if select_users != None: select_users = select_users.split(',') else:", "geometry_support == None and engine.dialect.driver == 'psycopg2': postgres.copy_from(engine, table_name, table_schema,", "old_table_name) +\\ 'ALTER TABLE \"{}\" RENAME TO \"{}\";'.format(new_table_name, old_table_name) +\\", "not drop {}_old table. Do you have permission?\".format(old_table_name)) rb_sql1 =", "carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists, indexes_fields, connection_string) connection_string = get_connection_string(connection_string)", "import logging from logging.config import dictConfig import click import yaml", "logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr) logger = logging.getLogger('the_el') def exception_handler(type, value, tb):", "conn = engine.connect() if select_users != None: select_users = select_users.split(',')", "try: conn.execute(sql1) except: logger.error(\"Could not rename {} table. Does it", "default='logging_config.conf') def write(table_name, table_schema_path, connection_string, input_file, db_schema, geometry_support, from_srid, skip_headers,", "def write(table_name, table_schema_path, connection_string, input_file, db_schema, geometry_support, from_srid, skip_headers, indexes_fields,", "import json import csv import sys import os import re", "[] for user in select_users: grants_sql.append('GRANT SELECT ON {} TO", "table. Does it exist?\".format(old_table_name)) raise try: conn.execute(sql2) except: logger.error(\"Could not", "@click.option('--logging-config', default='logging_config.conf') def write(table_name, table_schema_path, connection_string, input_file, db_schema, geometry_support, from_srid,", "import csv import sys import os import re import codecs", "[] logger.info('Swapping tables using Carto: {} - {}'.format(new_table_name, old_table_name)) return", "if_not_exists, indexes_fields, connection_string) connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string,", "= logging.getLogger('the_el') def exception_handler(type, value, tb): logger.exception(\"Uncaught exception: {}\".format(str(value)), exc_info=(type,", "logging.getLogger('the_el') def exception_handler(type, value, tb): logger.exception(\"Uncaught exception: {}\".format(str(value)), exc_info=(type, value,", "= indexes_fields.split(',') if re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis = geometry_support", "- Writing to table using SQLAlchemy'.format(table_name)) if table_schema_path != None:", "== 'w': return sys.stdout else: return smart_open(file, mode=mode) def get_table_schema(table_schema_path):", "storage.describe(table_name) if upsert: postgres.upsert(engine, db_schema, table_name, table_schema, rows) elif geometry_support", "with fopen(output_file, mode='w') as file: json.dump(descriptor, file) @main.command() @click.argument('table_name') @click.argument('table_schema_path')", "return smart_open(file, mode=mode) def get_table_schema(table_schema_path): with fopen(table_schema_path) as file: contents", "using psycopg2: {} - {}'.format(new_table_name, old_table_name)) conn = engine.raw_connection() try:", "connection_string == None: raise Exception('`CONNECTION_STRING` environment variable or `--connection-string` option", "= get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support) descriptor =", "import codecs import logging from logging.config import dictConfig import click", "Does it exist?\".format(old_table_name)) raise try: conn.execute(sql2) except: logger.error(\"Could not rename", "= csv.writer(file) descriptor = storage.describe(table_name) fields = map(lambda x: x['name'],", "TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql) raise try: conn.execute(sql3) except: logger.error(\"Could not", "indexes_fields.split(',') if re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis = geometry_support ==", "- {}'.format(new_table_name, old_table_name)) conn = engine.raw_connection() try: with conn.cursor() as", "'[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr) logger = logging.getLogger('the_el')", "else: for row in storage.iter(table_name): row_out = [] for field", "all permissions to {}.\".format(old_table_name)) raise else: raise Exception('`{}` not supported", "return sys.stdout else: return smart_open(file, mode=mode) def get_table_schema(table_schema_path): with fopen(table_schema_path)", "read(table_name, connection_string, output_file, db_schema, geometry_support, from_srid, to_srid, logging_config): logger =", "= get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid) ##", "table_schema_path != None: table_schema = get_table_schema(table_schema_path) storage.describe(table_name, descriptor=table_schema) else: storage.describe(table_name)", "smart_open from . import postgres from . import carto csv.field_size_limit(sys.maxsize)", "storage def fopen(file, mode='r'): if file == None: if mode", "indexes_fields != None: indexes_fields = indexes_fields.split(',') if re.match(carto.carto_connection_string_regex, connection_string) !=", "or isinstance(field, list): field = json.dumps(field) row_out.append(field) writer.writerow(row_out) @main.command() @click.argument('new_table_name')", "tables using Carto: {} - {}'.format(new_table_name, old_table_name)) return carto.swap_table(logger, db_schema,", "{}'.format(new_table_name, old_table_name)) return carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users, connection_string) connection_string", "None: indexes_fields = indexes_fields.split(',') if re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis", "with conn.cursor() as cur: sql = 'ALTER TABLE \"{}\" RENAME", "contents.decode('utf-8') return json.loads(contents) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') def", "postgres.copy_to(engine, table_name, file) else: for row in storage.iter(table_name): row_out =", "permissions to {}.\".format(old_table_name)) raise else: raise Exception('`{}` not supported by", "@click.group() def main(): pass def get_connection_string(connection_string): connection_string = os.getenv('CONNECTION_STRING', connection_string)", "def swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users, logging_config): logger = get_logger(logging_config)", "modification within a transaction, so make individual transactions: sql1 =", "import dictConfig import click import yaml from sqlalchemy import create_engine", "writer.writerow(fields) if geometry_support == None and engine.dialect.driver == 'psycopg2': postgres.copy_to(engine,", "'ALTER TABLE \"{}\" RENAME TO \"{}\";'.format(new_table_name, old_table_name) +\\ 'DROP TABLE", "default='logging_config.conf') def create_table(table_name, table_schema_path, connection_string, db_schema, indexes_fields, geometry_support, if_not_exists, logging_config):", "= select_users.split(',') else: select_users = [] grants_sql = [] for", "table_schema = get_table_schema(table_schema_path) if indexes_fields != None: indexes_fields = indexes_fields.split(',')", "contents = contents.decode('utf-8') return json.loads(contents) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema')", "== None: raise Exception('`CONNECTION_STRING` environment variable or `--connection-string` option required')", "get_connection_string(connection_string): connection_string = os.getenv('CONNECTION_STRING', connection_string) if connection_string == None: raise", "mode='r'): if file == None: if mode == 'r': return", "geometry_support == 'postgis' logger.info('{} - Creating table using Carto'.format(table_name)) return", "logger.error(\"Could not rename {} table. Does it exist?\".format(old_table_name)) raise try:", "- Creating table using SQLAlchemy'.format(table_name)) storage.create(table_name, table_schema, indexes_fields=indexes_fields) @main.command() @click.argument('table_name')", "logger.error(\"Could not grant all permissions to {}.\".format(old_table_name)) raise else: raise", "if re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis = geometry_support == 'postgis'", "help='Users to grant SELECT on updated table') @click.option('--logging-config', default='logging_config.conf') def", "\"{}_old\";'.format(old_table_name) cur.execute(sql) conn.commit() except: conn.rollback() raise conn.close() elif engine.dialect.driver ==", "variable or `--connection-string` option required') return connection_string def create_storage_adaptor(connection_string, db_schema,", "use Frictionless Data csv standard? ## TODO: support line delimited", "@main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--to-srid') @click.option('--logging-config', default='logging_config.conf')", "connection_string, db_schema, indexes_fields, geometry_support, if_not_exists, logging_config): logger = get_logger(logging_config) table_schema", "@click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--to-srid') @click.option('--logging-config', default='logging_config.conf') def read(table_name, connection_string, output_file,", "db_schema, geometry_support) logger.info('{} - Creating table using SQLAlchemy'.format(table_name)) storage.create(table_name, table_schema,", "for user in select_users: grants_sql.append('GRANT SELECT ON {} TO {}'.format(old_table_name,", "str): contents = contents.decode('utf-8') return json.loads(contents) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file')", "geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True) return engine, storage def fopen(file, mode='r'):", "get_connection_string(connection_string) engine = create_engine(connection_string) if engine.dialect.driver == 'psycopg2': logger.info('Swapping tables", "db_schema, geometry_support): connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema,", "postgres.copy_from(engine, table_name, table_schema, rows) else: storage.write(table_name, rows) @main.command() @click.argument('table_name') @click.option('--connection-string')", "transaction, so make individual transactions: sql1 = 'ALTER TABLE {}", "except: logger.error(\"Could not rename {} table. Does it exist?\".format(new_table_name)) rb_sql", "= get_logger(logging_config) table_schema = get_table_schema(table_schema_path) if indexes_fields != None: indexes_fields", "!= None: table_schema = get_table_schema(table_schema_path) storage.describe(table_name, descriptor=table_schema) else: storage.describe(table_name) if", "cur: sql = 'ALTER TABLE \"{}\" RENAME TO \"{}_old\";'.format(old_table_name, old_table_name)", "None and engine.dialect.driver == 'psycopg2': postgres.copy_from(engine, table_name, table_schema, rows) else:", "logging_config): logger = get_logger(logging_config) table_schema = get_table_schema(table_schema_path) ## TODO: csv", "!= None: select_users = select_users.split(',') else: select_users = [] grants_sql", "'ALTER TABLE {} RENAME TO {}_old'.format(old_table_name, old_table_name) sql2 = 'ALTER", "raise try: for sql in grants_sql: conn.execute(sql) except: logger.error(\"Could not", "from_srid=from_srid) ## TODO: truncate? carto does. Makes this idempotent logger.info('{}", "db_schema, table_name, load_postgis, table_schema, connection_string, rows, indexes_fields, truncate) else: connection_string", "= get_table_schema(table_schema_path) storage.describe(table_name, descriptor=table_schema) else: storage.describe(table_name) if upsert: postgres.upsert(engine, db_schema,", "geometry_support, from_srid=None, to_srid=None): engine = create_engine(connection_string) storage = Storage(engine, dbschema=db_schema,", "exc_info=(type, value, tb)) sys.excepthook = exception_handler return logger @click.group() def", "from_srid, skip_headers, indexes_fields, upsert, truncate, logging_config): logger = get_logger(logging_config) table_schema", "value, tb)) sys.excepthook = exception_handler return logger @click.group() def main():", "tb)) sys.excepthook = exception_handler return logger @click.group() def main(): pass", "file: rows = csv.reader(file) if skip_headers: next(rows) if re.match(carto.carto_connection_string_regex, connection_string)", "= 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql2) raise", "get_table_schema(table_schema_path): with fopen(table_schema_path) as file: contents = file.read() if not", "as file: writer = csv.writer(file) descriptor = storage.describe(table_name) fields =", "try: with conn.cursor() as cur: sql = 'ALTER TABLE \"{}\"", "def get_table_schema(table_schema_path): with fopen(table_schema_path) as file: contents = file.read() if", "'psycopg2': logger.info('Swapping tables using psycopg2: {} - {}'.format(new_table_name, old_table_name)) conn", "connection_string, rows, indexes_fields, truncate) else: connection_string = get_connection_string(connection_string) engine, storage", "import carto csv.field_size_limit(sys.maxsize) def get_logger(logging_config): try: with open(logging_config) as file:", "contents = file.read() if not isinstance(contents, str): contents = contents.decode('utf-8')", "from_srid, to_srid, logging_config): logger = get_logger(logging_config) connection_string = get_connection_string(connection_string) engine,", "- Creating table using Carto'.format(table_name)) return carto.create_table(logger, table_name, load_postgis, table_schema,", "logging_config): logger = get_logger(logging_config) connection_string = get_connection_string(connection_string) engine, storage =", "mode='w') as file: json.dump(descriptor, file) @main.command() @click.argument('table_name') @click.argument('table_schema_path') @click.option('--connection-string') @click.option('--db-schema')", "= os.getenv('CONNECTION_STRING', connection_string) if connection_string == None: raise Exception('`CONNECTION_STRING` environment", "table using Carto'.format(table_name)) return carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists, indexes_fields,", "from sqlalchemy import create_engine from jsontableschema_sql import Storage from smart_open", "in select_users: grants_sql.append('GRANT SELECT ON {} TO {}'.format(old_table_name, user.strip())) #", "@click.option('--skip-headers', is_flag=True) @click.option('--indexes-fields') @click.option('--upsert', is_flag=True) @click.option('--truncate/--no-truncate', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf')", "is_flag=True) @click.option('--indexes-fields') @click.option('--upsert', is_flag=True) @click.option('--truncate/--no-truncate', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def", "create_table(table_name, table_schema_path, connection_string, db_schema, indexes_fields, geometry_support, if_not_exists, logging_config): logger =", "csv import sys import os import re import codecs import", "jsontableschema_sql import Storage from smart_open import smart_open from . import", "= create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid) ## TODO: truncate? carto does.", "TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql2) raise try: for sql in grants_sql:", "conn.close() elif engine.dialect.driver == 'cx_oracle': logger.info('Swapping tables using cx_Oracle: {}", "!= None: indexes_fields = indexes_fields.split(',') logger.info('{} - Writing to table", "logging.config import dictConfig import click import yaml from sqlalchemy import", "smart_open import smart_open from . import postgres from . import", "None: select_users = select_users.split(',') else: select_users = [] logger.info('Swapping tables", "load_postgis, table_schema, if_not_exists, indexes_fields, connection_string) connection_string = get_connection_string(connection_string) engine, storage", "is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def create_table(table_name, table_schema_path, connection_string, db_schema, indexes_fields,", "{} table. Does it exist?\".format(new_table_name)) rb_sql = 'ALTER TABLE {}_old", "dict) or isinstance(field, list): field = json.dumps(field) row_out.append(field) writer.writerow(row_out) @main.command()", "not grant all permissions to {}.\".format(old_table_name)) raise else: raise Exception('`{}`", "# Oracle does not allow table modification within a transaction,", "@click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--to-srid') @click.option('--logging-config', default='logging_config.conf') def", "def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None): engine = create_engine(connection_string) storage", "re import codecs import logging from logging.config import dictConfig import", "connection_string) != None: load_postgis = geometry_support == 'postgis' if indexes_fields", "db_schema, geometry_support) descriptor = storage.describe(table_name) with fopen(output_file, mode='w') as file:", "@click.argument('table_name') @click.option('--table-schema-path') @click.option('--connection-string') @click.option('-f','--input-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--skip-headers', is_flag=True) @click.option('--indexes-fields')", "re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis = geometry_support == 'postgis' if", "csv settings? use Frictionless Data csv standard? ## TODO: support", "Does it exist?\".format(new_table_name)) rb_sql = 'ALTER TABLE {}_old RENAME TO", "## TODO: support line delimited json? with fopen(output_file, mode='w') as", "fopen(file, mode='r'): if file == None: if mode == 'r':", "logger.info('{} - Creating table using Carto'.format(table_name)) return carto.create_table(logger, table_name, load_postgis,", "Frictionless Data csv standard? ## TODO: support line delimited json?", "mode=mode) def get_table_schema(table_schema_path): with fopen(table_schema_path) as file: contents = file.read()", "pass def get_connection_string(connection_string): connection_string = os.getenv('CONNECTION_STRING', connection_string) if connection_string ==", "None: table_schema = get_table_schema(table_schema_path) storage.describe(table_name, descriptor=table_schema) else: storage.describe(table_name) if upsert:", "'psycopg2': postgres.copy_to(engine, table_name, file) else: for row in storage.iter(table_name): row_out", "{}'.format(new_table_name, old_table_name)) conn = engine.raw_connection() try: with conn.cursor() as cur:", "table_schema = get_table_schema(table_schema_path) ## TODO: csv settings? use Frictionless Data", "= storage.describe(table_name) with fopen(output_file, mode='w') as file: json.dump(descriptor, file) @main.command()", "level=logging.INFO, stream=sys.stderr) logger = logging.getLogger('the_el') def exception_handler(type, value, tb): logger.exception(\"Uncaught", "fopen(output_file, mode='w') as file: json.dump(descriptor, file) @main.command() @click.argument('table_name') @click.argument('table_schema_path') @click.option('--connection-string')", "'DROP TABLE {}'.format(old_table_name) conn.execute(rb_sql1) rb_sql2 = 'ALTER TABLE {}_old RENAME", "def get_connection_string(connection_string): connection_string = os.getenv('CONNECTION_STRING', connection_string) if connection_string == None:", "table') @click.option('--logging-config', default='logging_config.conf') def swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users, logging_config):", "descriptor = storage.describe(table_name) fields = map(lambda x: x['name'], descriptor['fields']) writer.writerow(fields)", "Storage from smart_open import smart_open from . import postgres from", "@click.option('-f','--input-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--skip-headers', is_flag=True) @click.option('--indexes-fields') @click.option('--upsert', is_flag=True) @click.option('--truncate/--no-truncate',", "table_schema, indexes_fields=indexes_fields) @main.command() @click.argument('table_name') @click.option('--table-schema-path') @click.option('--connection-string') @click.option('-f','--input-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid')", "TABLE {}_old'.format(old_table_name) try: conn.execute(sql1) except: logger.error(\"Could not rename {} table.", "map(lambda x: x['name'], descriptor['fields']) writer.writerow(fields) if geometry_support == None and", "delimted json? with fopen(input_file) as file: rows = csv.reader(file) if", "TODO: csv settings? use Frictionless Data csv standard? ## TODO:", "conn.commit() except: conn.rollback() raise conn.close() elif engine.dialect.driver == 'cx_oracle': logger.info('Swapping", "logger.exception(\"Uncaught exception: {}\".format(str(value)), exc_info=(type, value, tb)) sys.excepthook = exception_handler return", "option required') return connection_string def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None):", "select_users, logging_config): logger = get_logger(logging_config) if re.match(carto.carto_connection_string_regex, connection_string) != None:", "{}'.format(old_table_name, old_table_name) conn.execute(rb_sql) raise try: conn.execute(sql3) except: logger.error(\"Could not drop", "@main.command() @click.argument('table_name') @click.argument('table_schema_path') @click.option('--connection-string') @click.option('--db-schema') @click.option('--indexes-fields') @click.option('--geometry-support') @click.option('--if-not-exists', is_flag=True, default=False)", "else: connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support,", "to table using SQLAlchemy'.format(table_name)) if table_schema_path != None: table_schema =", "engine, storage def fopen(file, mode='r'): if file == None: if", "json.dump(descriptor, file) @main.command() @click.argument('table_name') @click.argument('table_schema_path') @click.option('--connection-string') @click.option('--db-schema') @click.option('--indexes-fields') @click.option('--geometry-support') @click.option('--if-not-exists',", "indexes_fields = indexes_fields.split(',') if re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis =", "= indexes_fields.split(',') logger.info('{} - Writing to table using Carto'.format(table_name)) carto.load(logger,", "delimited json? with fopen(output_file, mode='w') as file: writer = csv.writer(file)", "connection_string) != None: load_postgis = geometry_support == 'postgis' logger.info('{} -", "exception_handler(type, value, tb): logger.exception(\"Uncaught exception: {}\".format(str(value)), exc_info=(type, value, tb)) sys.excepthook", "@main.command() @click.argument('table_name') @click.option('--table-schema-path') @click.option('--connection-string') @click.option('-f','--input-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--skip-headers', is_flag=True)", "= Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True) return engine, storage", "+\\ 'DROP TABLE \"{}_old\";'.format(old_table_name) cur.execute(sql) conn.commit() except: conn.rollback() raise conn.close()", "you have permission?\".format(old_table_name)) rb_sql1 = 'DROP TABLE {}'.format(old_table_name) conn.execute(rb_sql1) rb_sql2", ". import carto csv.field_size_limit(sys.maxsize) def get_logger(logging_config): try: with open(logging_config) as", "json.loads(contents) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') def describe_table(table_name, connection_string,", "as file: rows = csv.reader(file) if skip_headers: next(rows) if re.match(carto.carto_connection_string_regex,", "'r': return sys.stdin elif mode == 'w': return sys.stdout else:", "= [] grants_sql = [] for user in select_users: grants_sql.append('GRANT", "file) else: for row in storage.iter(table_name): row_out = [] for", "default=False) @click.option('--logging-config', default='logging_config.conf') def create_table(table_name, table_schema_path, connection_string, db_schema, indexes_fields, geometry_support,", "select_users: grants_sql.append('GRANT SELECT ON {} TO {}'.format(old_table_name, user.strip())) # Oracle", "make individual transactions: sql1 = 'ALTER TABLE {} RENAME TO", "db_schema, geometry_support, from_srid=None, to_srid=None): engine = create_engine(connection_string) storage = Storage(engine,", "Data csv standard? ## TODO: support line delimted json? with", "storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True) return engine,", "conn.execute(sql1) except: logger.error(\"Could not rename {} table. Does it exist?\".format(old_table_name))", "from_srid=from_srid, to_srid=to_srid) ## TODO: csv settings? use Frictionless Data csv", "FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr) logger", "with fopen(output_file, mode='w') as file: writer = csv.writer(file) descriptor =", "elif mode == 'w': return sys.stdout else: return smart_open(file, mode=mode)", "table_name, load_postgis, table_schema, connection_string, rows, indexes_fields, truncate) else: connection_string =", "from . import postgres from . import carto csv.field_size_limit(sys.maxsize) def", "select_users = select_users.split(',') else: select_users = [] grants_sql = []", "engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support) descriptor = storage.describe(table_name) with", "connection_string = os.getenv('CONNECTION_STRING', connection_string) if connection_string == None: raise Exception('`CONNECTION_STRING`", "{}_old RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql) raise try: conn.execute(sql3) except:", "= get_logger(logging_config) connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema,", "TODO: support line delimited json? with fopen(output_file, mode='w') as file:", "carto csv.field_size_limit(sys.maxsize) def get_logger(logging_config): try: with open(logging_config) as file: config", "{} table. Does it exist?\".format(old_table_name)) raise try: conn.execute(sql2) except: logger.error(\"Could", "rb_sql2 = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql2)", "idempotent logger.info('{} - Writing to table using SQLAlchemy'.format(table_name)) if table_schema_path", "{} - {}'.format(new_table_name, old_table_name)) conn = engine.connect() if select_users !=", "table. Does it exist?\".format(new_table_name)) rb_sql = 'ALTER TABLE {}_old RENAME", "os.getenv('CONNECTION_STRING', connection_string) if connection_string == None: raise Exception('`CONNECTION_STRING` environment variable", "so make individual transactions: sql1 = 'ALTER TABLE {} RENAME", "None: if mode == 'r': return sys.stdin elif mode ==", "import postgres from . import carto csv.field_size_limit(sys.maxsize) def get_logger(logging_config): try:", "not isinstance(contents, str): contents = contents.decode('utf-8') return json.loads(contents) @main.command() @click.argument('table_name')", "@click.argument('old_table_name') @click.option('--connection-string') @click.option('--db-schema') @click.option('--select-users', help='Users to grant SELECT on updated", "select_users.split(',') else: select_users = [] logger.info('Swapping tables using Carto: {}", "default='logging_config.conf') def read(table_name, connection_string, output_file, db_schema, geometry_support, from_srid, to_srid, logging_config):", "else: storage.describe(table_name) if upsert: postgres.upsert(engine, db_schema, table_name, table_schema, rows) elif", "json import csv import sys import os import re import", "!= None: select_users = select_users.split(',') else: select_users = [] logger.info('Swapping", "sql3 = 'DROP TABLE {}_old'.format(old_table_name) try: conn.execute(sql1) except: logger.error(\"Could not", "= 'DROP TABLE {}_old'.format(old_table_name) try: conn.execute(sql1) except: logger.error(\"Could not rename", "!= None: indexes_fields = indexes_fields.split(',') if re.match(carto.carto_connection_string_regex, connection_string) != None:", "engine.connect() if select_users != None: select_users = select_users.split(',') else: select_users", "isinstance(contents, str): contents = contents.decode('utf-8') return json.loads(contents) @main.command() @click.argument('table_name') @click.option('--connection-string')", "file: contents = file.read() if not isinstance(contents, str): contents =", "import os import re import codecs import logging from logging.config", "else: return smart_open(file, mode=mode) def get_table_schema(table_schema_path): with fopen(table_schema_path) as file:", "file.read() if not isinstance(contents, str): contents = contents.decode('utf-8') return json.loads(contents)", "Writing to table using SQLAlchemy'.format(table_name)) if table_schema_path != None: table_schema", "@click.option('--connection-string') @click.option('--db-schema') @click.option('--select-users', help='Users to grant SELECT on updated table')", "if mode == 'r': return sys.stdin elif mode == 'w':", "create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid) ## TODO: truncate? carto does. Makes", "RENAME TO \"{}_old\";'.format(old_table_name, old_table_name) +\\ 'ALTER TABLE \"{}\" RENAME TO", "fields = map(lambda x: x['name'], descriptor['fields']) writer.writerow(fields) if geometry_support ==", "= 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql) raise", "storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid) ## TODO: csv", "geometry_support) descriptor = storage.describe(table_name) with fopen(output_file, mode='w') as file: json.dump(descriptor,", "if isinstance(field, dict) or isinstance(field, list): field = json.dumps(field) row_out.append(field)", "views=True) return engine, storage def fopen(file, mode='r'): if file ==", "table_name, file) else: for row in storage.iter(table_name): row_out = []", "elif engine.dialect.driver == 'cx_oracle': logger.info('Swapping tables using cx_Oracle: {} -", "individual transactions: sql1 = 'ALTER TABLE {} RENAME TO {}_old'.format(old_table_name,", "import yaml from sqlalchemy import create_engine from jsontableschema_sql import Storage", "table_name, load_postgis, table_schema, if_not_exists, indexes_fields, connection_string) connection_string = get_connection_string(connection_string) engine,", "file: json.dump(descriptor, file) @main.command() @click.argument('table_name') @click.argument('table_schema_path') @click.option('--connection-string') @click.option('--db-schema') @click.option('--indexes-fields') @click.option('--geometry-support')", "= create_storage_adaptor(connection_string, db_schema, geometry_support) logger.info('{} - Creating table using SQLAlchemy'.format(table_name))", "+\\ 'ALTER TABLE \"{}\" RENAME TO \"{}\";'.format(new_table_name, old_table_name) +\\ 'DROP", "to table using Carto'.format(table_name)) carto.load(logger, db_schema, table_name, load_postgis, table_schema, connection_string,", "= [] for field in row: if isinstance(field, dict) or", "return engine, storage def fopen(file, mode='r'): if file == None:", "else: select_users = [] grants_sql = [] for user in", "isinstance(field, dict) or isinstance(field, list): field = json.dumps(field) row_out.append(field) writer.writerow(row_out)", "connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support) descriptor", "@click.option('--connection-string') @click.option('--db-schema') @click.option('--indexes-fields') @click.option('--geometry-support') @click.option('--if-not-exists', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def", "geometry_support, from_srid=from_srid) ## TODO: truncate? carto does. Makes this idempotent", "{}_old table. Do you have permission?\".format(old_table_name)) rb_sql1 = 'DROP TABLE", "except: FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr)", "with fopen(table_schema_path) as file: contents = file.read() if not isinstance(contents,", "new_table_name, old_table_name, select_users, connection_string) connection_string = get_connection_string(connection_string) engine = create_engine(connection_string)", "conn.cursor() as cur: sql = 'ALTER TABLE \"{}\" RENAME TO", "indexes_fields, geometry_support, if_not_exists, logging_config): logger = get_logger(logging_config) table_schema = get_table_schema(table_schema_path)", "'ALTER TABLE \"{}\" RENAME TO \"{}_old\";'.format(old_table_name, old_table_name) +\\ 'ALTER TABLE", "try: conn.execute(sql3) except: logger.error(\"Could not drop {}_old table. Do you", "RENAME TO {}_old'.format(old_table_name, old_table_name) sql2 = 'ALTER TABLE {} RENAME", "this idempotent logger.info('{} - Writing to table using SQLAlchemy'.format(table_name)) if", "default='logging_config.conf') def swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users, logging_config): logger =", "else: select_users = [] logger.info('Swapping tables using Carto: {} -", "engine.dialect.driver == 'cx_oracle': logger.info('Swapping tables using cx_Oracle: {} - {}'.format(new_table_name,", "conn.execute(sql) except: logger.error(\"Could not grant all permissions to {}.\".format(old_table_name)) raise", "table_name, table_schema, rows) elif geometry_support == None and engine.dialect.driver ==", "table_schema, rows) elif geometry_support == None and engine.dialect.driver == 'psycopg2':", "connection_string) if connection_string == None: raise Exception('`CONNECTION_STRING` environment variable or", "def fopen(file, mode='r'): if file == None: if mode ==", "carto.load(logger, db_schema, table_name, load_postgis, table_schema, connection_string, rows, indexes_fields, truncate) else:", "if select_users != None: select_users = select_users.split(',') else: select_users =", "old_table_name)) conn = engine.connect() if select_users != None: select_users =", "{}_old RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql2) raise try: for sql", "{}'.format(old_table_name, old_table_name) conn.execute(rb_sql2) raise try: for sql in grants_sql: conn.execute(sql)", "== 'postgis' if indexes_fields != None: indexes_fields = indexes_fields.split(',') logger.info('{}", "from . import carto csv.field_size_limit(sys.maxsize) def get_logger(logging_config): try: with open(logging_config)", "@click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') def describe_table(table_name, connection_string, output_file, db_schema, geometry_support): connection_string", "psycopg2: {} - {}'.format(new_table_name, old_table_name)) conn = engine.raw_connection() try: with", "@click.option('--truncate/--no-truncate', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def write(table_name, table_schema_path, connection_string, input_file,", "= 'ALTER TABLE \"{}\" RENAME TO \"{}_old\";'.format(old_table_name, old_table_name) +\\ 'ALTER", "= get_connection_string(connection_string) engine = create_engine(connection_string) if engine.dialect.driver == 'psycopg2': logger.info('Swapping", "RENAME TO {}'.format(new_table_name, old_table_name) sql3 = 'DROP TABLE {}_old'.format(old_table_name) try:", "logger.error(\"Could not drop {}_old table. Do you have permission?\".format(old_table_name)) rb_sql1", "'DROP TABLE \"{}_old\";'.format(old_table_name) cur.execute(sql) conn.commit() except: conn.rollback() raise conn.close() elif", "## TODO: csv settings? use Frictionless Data csv standard? ##", "not allow table modification within a transaction, so make individual", "try: for sql in grants_sql: conn.execute(sql) except: logger.error(\"Could not grant", "fopen(table_schema_path) as file: contents = file.read() if not isinstance(contents, str):", "'postgis' logger.info('{} - Creating table using Carto'.format(table_name)) return carto.create_table(logger, table_name,", "geometry_support): connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)", "old_table_name, connection_string, db_schema, select_users, logging_config): logger = get_logger(logging_config) if re.match(carto.carto_connection_string_regex,", "geometry_support == 'postgis' if indexes_fields != None: indexes_fields = indexes_fields.split(',')", "get_logger(logging_config) if re.match(carto.carto_connection_string_regex, connection_string) != None: if select_users != None:", "except: conn.rollback() raise conn.close() elif engine.dialect.driver == 'cx_oracle': logger.info('Swapping tables", "\"{}_old\";'.format(old_table_name, old_table_name) +\\ 'ALTER TABLE \"{}\" RENAME TO \"{}\";'.format(new_table_name, old_table_name)", "storage.describe(table_name, descriptor=table_schema) else: storage.describe(table_name) if upsert: postgres.upsert(engine, db_schema, table_name, table_schema,", "get_logger(logging_config) connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support,", "indexes_fields=indexes_fields) @main.command() @click.argument('table_name') @click.option('--table-schema-path') @click.option('--connection-string') @click.option('-f','--input-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--skip-headers',", "it exist?\".format(old_table_name)) raise try: conn.execute(sql2) except: logger.error(\"Could not rename {}", "{}'.format(old_table_name, user.strip())) # Oracle does not allow table modification within", "== 'postgis' logger.info('{} - Creating table using Carto'.format(table_name)) return carto.create_table(logger,", "connection_string def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None): engine = create_engine(connection_string)", "@click.argument('table_name') @click.argument('table_schema_path') @click.option('--connection-string') @click.option('--db-schema') @click.option('--indexes-fields') @click.option('--geometry-support') @click.option('--if-not-exists', is_flag=True, default=False) @click.option('--logging-config',", "it exist?\".format(new_table_name)) rb_sql = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name,", "rows) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') @click.option('--from-srid') @click.option('--to-srid') @click.option('--logging-config',", "load_postgis, table_schema, connection_string, rows, indexes_fields, truncate) else: connection_string = get_connection_string(connection_string)", "in grants_sql: conn.execute(sql) except: logger.error(\"Could not grant all permissions to", "SELECT on updated table') @click.option('--logging-config', default='logging_config.conf') def swap_table(new_table_name, old_table_name, connection_string,", "tables using cx_Oracle: {} - {}'.format(new_table_name, old_table_name)) conn = engine.connect()", "storage = create_storage_adaptor(connection_string, db_schema, geometry_support) descriptor = storage.describe(table_name) with fopen(output_file,", "- Writing to table using Carto'.format(table_name)) carto.load(logger, db_schema, table_name, load_postgis,", "connection_string, output_file, db_schema, geometry_support, from_srid, to_srid, logging_config): logger = get_logger(logging_config)", "old_table_name) conn.execute(rb_sql) raise try: conn.execute(sql3) except: logger.error(\"Could not drop {}_old", "{} - {}'.format(new_table_name, old_table_name)) return carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users,", "@click.option('--select-users', help='Users to grant SELECT on updated table') @click.option('--logging-config', default='logging_config.conf')", "'psycopg2': postgres.copy_from(engine, table_name, table_schema, rows) else: storage.write(table_name, rows) @main.command() @click.argument('table_name')", "@click.option('--indexes-fields') @click.option('--geometry-support') @click.option('--if-not-exists', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def create_table(table_name, table_schema_path,", "None: indexes_fields = indexes_fields.split(',') logger.info('{} - Writing to table using", "cur.execute(sql) conn.commit() except: conn.rollback() raise conn.close() elif engine.dialect.driver == 'cx_oracle':", "re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis = geometry_support == 'postgis' logger.info('{}", "'ALTER TABLE {} RENAME TO {}'.format(new_table_name, old_table_name) sql3 = 'DROP", "storage.iter(table_name): row_out = [] for field in row: if isinstance(field,", "db_schema, geometry_support, from_srid, skip_headers, indexes_fields, upsert, truncate, logging_config): logger =", "postgres.upsert(engine, db_schema, table_name, table_schema, rows) elif geometry_support == None and", "isinstance(field, list): field = json.dumps(field) row_out.append(field) writer.writerow(row_out) @main.command() @click.argument('new_table_name') @click.argument('old_table_name')", "== None: if mode == 'r': return sys.stdin elif mode", "table_schema_path, connection_string, db_schema, indexes_fields, geometry_support, if_not_exists, logging_config): logger = get_logger(logging_config)", "geometry_support, if_not_exists, logging_config): logger = get_logger(logging_config) table_schema = get_table_schema(table_schema_path) if", "from logging.config import dictConfig import click import yaml from sqlalchemy", "value, tb): logger.exception(\"Uncaught exception: {}\".format(str(value)), exc_info=(type, value, tb)) sys.excepthook =", "create_engine(connection_string) if engine.dialect.driver == 'psycopg2': logger.info('Swapping tables using psycopg2: {}", "load_postgis = geometry_support == 'postgis' logger.info('{} - Creating table using", "output_file, db_schema, geometry_support, from_srid, to_srid, logging_config): logger = get_logger(logging_config) connection_string", "db_schema, geometry_support, from_srid, to_srid, logging_config): logger = get_logger(logging_config) connection_string =", "Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True) return engine, storage def", "skip_headers: next(rows) if re.match(carto.carto_connection_string_regex, connection_string) != None: load_postgis = geometry_support", "using Carto: {} - {}'.format(new_table_name, old_table_name)) return carto.swap_table(logger, db_schema, new_table_name,", "return json.loads(contents) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') def describe_table(table_name,", "write(table_name, table_schema_path, connection_string, input_file, db_schema, geometry_support, from_srid, skip_headers, indexes_fields, upsert,", "except: logger.error(\"Could not drop {}_old table. Do you have permission?\".format(old_table_name))", "row_out = [] for field in row: if isinstance(field, dict)", "mode == 'w': return sys.stdout else: return smart_open(file, mode=mode) def", "= create_storage_adaptor(connection_string, db_schema, geometry_support) descriptor = storage.describe(table_name) with fopen(output_file, mode='w')", "conn.execute(rb_sql2) raise try: for sql in grants_sql: conn.execute(sql) except: logger.error(\"Could", "TODO: truncate? carto does. Makes this idempotent logger.info('{} - Writing", "{} RENAME TO {}'.format(new_table_name, old_table_name) sql3 = 'DROP TABLE {}_old'.format(old_table_name)", "get_table_schema(table_schema_path) storage.describe(table_name, descriptor=table_schema) else: storage.describe(table_name) if upsert: postgres.upsert(engine, db_schema, table_name,", "grant all permissions to {}.\".format(old_table_name)) raise else: raise Exception('`{}` not", "json? with fopen(input_file) as file: rows = csv.reader(file) if skip_headers:", "rows) else: storage.write(table_name, rows) @main.command() @click.argument('table_name') @click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support')", "sys import os import re import codecs import logging from", "try: with open(logging_config) as file: config = yaml.load(file) dictConfig(config) except:", "connection_string) connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)", "SQLAlchemy'.format(table_name)) storage.create(table_name, table_schema, indexes_fields=indexes_fields) @main.command() @click.argument('table_name') @click.option('--table-schema-path') @click.option('--connection-string') @click.option('-f','--input-file') @click.option('--db-schema')", "skip_headers, indexes_fields, upsert, truncate, logging_config): logger = get_logger(logging_config) table_schema =", "create_engine from jsontableschema_sql import Storage from smart_open import smart_open from", "{}_old'.format(old_table_name) try: conn.execute(sql1) except: logger.error(\"Could not rename {} table. Does", "table using SQLAlchemy'.format(table_name)) if table_schema_path != None: table_schema = get_table_schema(table_schema_path)", "@click.option('--indexes-fields') @click.option('--upsert', is_flag=True) @click.option('--truncate/--no-truncate', is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def write(table_name,", "using Carto'.format(table_name)) carto.load(logger, db_schema, table_name, load_postgis, table_schema, connection_string, rows, indexes_fields,", "table modification within a transaction, so make individual transactions: sql1", "@click.option('--geometry-support') @click.option('--from-srid') @click.option('--skip-headers', is_flag=True) @click.option('--indexes-fields') @click.option('--upsert', is_flag=True) @click.option('--truncate/--no-truncate', is_flag=True, default=False)", "json? with fopen(output_file, mode='w') as file: writer = csv.writer(file) descriptor", "engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support) logger.info('{} - Creating table", "= create_engine(connection_string) if engine.dialect.driver == 'psycopg2': logger.info('Swapping tables using psycopg2:", "permission?\".format(old_table_name)) rb_sql1 = 'DROP TABLE {}'.format(old_table_name) conn.execute(rb_sql1) rb_sql2 = 'ALTER", "= get_logger(logging_config) table_schema = get_table_schema(table_schema_path) ## TODO: csv settings? use", "as file: json.dump(descriptor, file) @main.command() @click.argument('table_name') @click.argument('table_schema_path') @click.option('--connection-string') @click.option('--db-schema') @click.option('--indexes-fields')", "def get_logger(logging_config): try: with open(logging_config) as file: config = yaml.load(file)", "\"{}\";'.format(new_table_name, old_table_name) +\\ 'DROP TABLE \"{}_old\";'.format(old_table_name) cur.execute(sql) conn.commit() except: conn.rollback()", "storage.describe(table_name) with fopen(output_file, mode='w') as file: json.dump(descriptor, file) @main.command() @click.argument('table_name')", "postgres from . import carto csv.field_size_limit(sys.maxsize) def get_logger(logging_config): try: with", "logger = get_logger(logging_config) connection_string = get_connection_string(connection_string) engine, storage = create_storage_adaptor(connection_string,", "is_flag=True, default=False) @click.option('--logging-config', default='logging_config.conf') def write(table_name, table_schema_path, connection_string, input_file, db_schema,", "@click.option('--connection-string') @click.option('-o','--output-file') @click.option('--db-schema') @click.option('--geometry-support') def describe_table(table_name, connection_string, output_file, db_schema, geometry_support):", "table. Do you have permission?\".format(old_table_name)) rb_sql1 = 'DROP TABLE {}'.format(old_table_name)", "== None and engine.dialect.driver == 'psycopg2': postgres.copy_from(engine, table_name, table_schema, rows)", "and engine.dialect.driver == 'psycopg2': postgres.copy_from(engine, table_name, table_schema, rows) else: storage.write(table_name,", "for row in storage.iter(table_name): row_out = [] for field in", "from smart_open import smart_open from . import postgres from .", "RENAME TO \"{}\";'.format(new_table_name, old_table_name) +\\ 'DROP TABLE \"{}_old\";'.format(old_table_name) cur.execute(sql) conn.commit()", "- {}'.format(new_table_name, old_table_name)) conn = engine.connect() if select_users != None:", "conn.rollback() raise conn.close() elif engine.dialect.driver == 'cx_oracle': logger.info('Swapping tables using", "rename {} table. Does it exist?\".format(old_table_name)) raise try: conn.execute(sql2) except:", "Do you have permission?\".format(old_table_name)) rb_sql1 = 'DROP TABLE {}'.format(old_table_name) conn.execute(rb_sql1)", "`--connection-string` option required') return connection_string def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None,", "conn.execute(rb_sql1) rb_sql2 = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)", "except: logger.error(\"Could not grant all permissions to {}.\".format(old_table_name)) raise else:", "def create_table(table_name, table_schema_path, connection_string, db_schema, indexes_fields, geometry_support, if_not_exists, logging_config): logger", "conn.execute(sql2) except: logger.error(\"Could not rename {} table. Does it exist?\".format(new_table_name))", "create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None): engine = create_engine(connection_string) storage =", "= storage.describe(table_name) fields = map(lambda x: x['name'], descriptor['fields']) writer.writerow(fields) if", "to grant SELECT on updated table') @click.option('--logging-config', default='logging_config.conf') def swap_table(new_table_name,", "indexes_fields = indexes_fields.split(',') logger.info('{} - Writing to table using Carto'.format(table_name))", "descriptor['fields']) writer.writerow(fields) if geometry_support == None and engine.dialect.driver == 'psycopg2':", "using Carto'.format(table_name)) return carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists, indexes_fields, connection_string)", "'postgis' if indexes_fields != None: indexes_fields = indexes_fields.split(',') logger.info('{} -", "Carto'.format(table_name)) return carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists, indexes_fields, connection_string) connection_string", "row: if isinstance(field, dict) or isinstance(field, list): field = json.dumps(field)", "TO \"{}_old\";'.format(old_table_name, old_table_name) +\\ 'ALTER TABLE \"{}\" RENAME TO \"{}\";'.format(new_table_name,", "'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name) conn.execute(rb_sql) raise try:", "= csv.reader(file) if skip_headers: next(rows) if re.match(carto.carto_connection_string_regex, connection_string) != None:", "= geometry_support == 'postgis' if indexes_fields != None: indexes_fields =", "{}'.format(new_table_name, old_table_name)) conn = engine.connect() if select_users != None: select_users", "logging_config): logger = get_logger(logging_config) if re.match(carto.carto_connection_string_regex, connection_string) != None: if", "@click.option('--logging-config', default='logging_config.conf') def read(table_name, connection_string, output_file, db_schema, geometry_support, from_srid, to_srid," ]
[ "NVIDIA CORPORATION. All rights reserved. # # Licensed under the", "out_dir, glm=\"\"): sclite_path = os.path.join(sctk_dir, \"bin\", \"sclite\") if not os.path.exists(sclite_path):", "os.path.exists(args.sctk_dir) if args.asr_model.endswith('.nemo'): logging.info(f\"Using local ASR model from {args.asr_model}\") asr_model", "import WER from nemo.collections.asr.models import EncDecCTCModel from nemo.utils import logging", "encoded_len, greedy_predictions = asr_model( input_signal=test_batch[0], input_signal_length=test_batch[1] ) for r in", "2.0 (the \"License\"); # you may not use this file", "from https://github.com/usnistgov/SCTK is required. Hypotheses and references are first saved", "local ASR model from {args.asr_model}\") asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model) else: logging.info(f\"Using", "subprocess.check_output(f\"{sclite_path} -h {hypglm} -r {refglm} -i wsj -o all\", shell=True)", "hypfile = os.path.join(args.out_dir, \"hyp.trn\") reffile = os.path.join(args.out_dir, \"ref.trn\") with open(hypfile,", "import torch from nemo.collections.asr.metrics.wer import WER from nemo.collections.asr.models import EncDecCTCModel", "log_probs.cpu().numpy(): all_log_probs.append(r) hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions) for batch_ind in range(greedy_predictions.shape[0]): reference", "WER from nemo.collections.asr.models import EncDecCTCModel from nemo.utils import logging try:", "are scored after applying a glm file (if provided). \"\"\"", "after applying a glm file (if provided). \"\"\" import errno", "from argparse import ArgumentParser import torch from nemo.collections.asr.metrics.wer import WER", "if use_sctk: score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm) if __name__ ==", "{refglm} -i wsj -o all\", shell=True) can_gpu = torch.cuda.is_available() def", "\"\"\" import errno import json import os import subprocess from", "stdout=hypf) refglm = os.path.join(out_dir, os.path.basename(ref_fname)) + \".glm\" with open(refglm, \"w\")", "hypotheses[i] + \" (\" + utt_id + \")\" + \"\\n\")", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "are first saved in trn format and are scored after", "references are first saved in trn format and are scored", "= os.path.exists(args.sctk_dir) if args.asr_model.endswith('.nemo'): logging.info(f\"Using local ASR model from {args.asr_model}\")", "asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))]) wer = WER(vocabulary=asr_model.decoder.vocabulary) hypotheses =", "os.path.exists(args.out_dir): os.makedirs(args.out_dir) use_sctk = os.path.exists(args.sctk_dir) if args.asr_model.endswith('.nemo'): logging.info(f\"Using local ASR", "if can_gpu: test_batch = [x.cuda() for x in test_batch] with", "16000, 'manifest_filepath': args.dataset, 'labels': asr_model.decoder.vocabulary, 'batch_size': args.batch_size, 'normalize_transcripts': not args.dont_normalize_text,", "import errno import json import os import subprocess from argparse", "os.path.join(out_dir, os.path.basename(hyp_fname)) + \".glm\" rfilt_cmd = [rfilter_path] + [glm] with", "all_log_probs.append(r) hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions) for batch_ind in range(greedy_predictions.shape[0]): reference =", "args.dont_normalize_text, } ) if can_gpu: asr_model = asr_model.cuda() asr_model.eval() labels_map", "help=\"Pass: 'QuartzNet15x5Base-En'\", ) parser.add_argument(\"--dataset\", type=str, required=True, help=\"path to evaluation data\")", "= parser.parse_args() torch.set_grad_enabled(False) if not os.path.exists(args.out_dir): os.makedirs(args.out_dir) use_sctk = os.path.exists(args.sctk_dir)", "[glm] with open(hypglm, \"w\") as hypf, open(hyp_fname, \"r\") as hyp_in:", "input_signal_length=test_batch[1] ) for r in log_probs.cpu().numpy(): all_log_probs.append(r) hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions)", "EncDecCTCModel from nemo.utils import logging try: from torch.cuda.amp import autocast", "os.path.exists(rfilter_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path) hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) +", "os.path.basename(ref_fname)) + \".glm\" with open(refglm, \"w\") as reff, open(ref_fname, \"r\")", "torch.cuda.amp import autocast except ImportError: from contextlib import contextmanager @contextmanager", "use this file except in compliance with the License. #", "hypotheses = [] references = [] all_log_probs = [] for", "non-English.\", ) parser.add_argument(\"--out_dir\", type=str, required=True, help=\"Destination dir for output files\")", "reserved. # # Licensed under the Apache License, Version 2.0", "local installation from https://github.com/usnistgov/SCTK is required. Hypotheses and references are", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "for batch_ind in range(greedy_predictions.shape[0]): reference = ''.join([labels_map[c] for c in", "beginning hyp_f.write(\" \" + hypotheses[i] + \" (\" + utt_id", "+ \" (\" + utt_id + \")\" + \"\\n\") if", "info_list def main(): parser = ArgumentParser() parser.add_argument( \"--asr_model\", type=str, default=\"QuartzNet15x5Base-En\",", "License. # You may obtain a copy of the License", "for i in range(len(hypotheses)): utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0] # rfilter in", "'batch_size': args.batch_size, 'normalize_transcripts': not args.dont_normalize_text, } ) if can_gpu: asr_model", "2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under", "\")\" + \"\\n\") ref_f.write(\" \" + references[i] + \" (\"", "reference = ''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()]) references.append(reference) del test_batch", "under the License is distributed on an \"AS IS\" BASIS,", "c in test_batch[2][batch_ind].cpu().detach().numpy()]) references.append(reference) del test_batch info_list = get_utt_info(args.dataset) hypfile", "License for the specific language governing permissions and # limitations", "torch from nemo.collections.asr.metrics.wer import WER from nemo.collections.asr.models import EncDecCTCModel from", "nemo.utils import logging try: from torch.cuda.amp import autocast except ImportError:", "evaluation data\") parser.add_argument(\"--batch_size\", type=int, default=4) parser.add_argument( \"--dont_normalize_text\", default=False, action='store_true', help=\"Turn", "help=\"Turn off trasnscript normalization. Recommended for non-English.\", ) parser.add_argument(\"--out_dir\", type=str,", "'manifest_filepath': args.dataset, 'labels': asr_model.decoder.vocabulary, 'batch_size': args.batch_size, 'normalize_transcripts': not args.dont_normalize_text, }", "glm=args.glm) if __name__ == '__main__': main() # noqa pylint: disable=no-value-for-parameter", "hyp_in: subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf) refglm = os.path.join(out_dir, os.path.basename(ref_fname)) + \".glm\"", "in asr_model.test_dataloader(): if can_gpu: test_batch = [x.cuda() for x in", "type=str, required=True, help=\"Destination dir for output files\") parser.add_argument(\"--sctk_dir\", type=str, required=False,", "(\" + utt_id + \")\" + \"\\n\") if use_sctk: score_with_sctk(args.sctk_dir,", "glm if os.path.exists(glm): rfilter_path = os.path.join(sctk_dir, \"bin\", \"rfilter1\") if not", "required=True, help=\"path to evaluation data\") parser.add_argument(\"--batch_size\", type=int, default=4) parser.add_argument( \"--dont_normalize_text\",", "from {args.asr_model}\") asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model) else: logging.info(f\"Using NGC cloud ASR", "in compliance with the License. # You may obtain a", "import subprocess from argparse import ArgumentParser import torch from nemo.collections.asr.metrics.wer", "software # distributed under the License is distributed on an", "+ \"\\n\") if use_sctk: score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm) if", "+= wer.ctc_decoder_predictions_tensor(greedy_predictions) for batch_ind in range(greedy_predictions.shape[0]): reference = ''.join([labels_map[c] for", "hypotheses with sclite. A local installation from https://github.com/usnistgov/SCTK is required.", "default=\"\", help=\"Path to glm file\") args = parser.parse_args() torch.set_grad_enabled(False) if", "info_list.append(utt) return info_list def main(): parser = ArgumentParser() parser.add_argument( \"--asr_model\",", "type=str, default=\"QuartzNet15x5Base-En\", required=False, help=\"Pass: 'QuartzNet15x5Base-En'\", ) parser.add_argument(\"--dataset\", type=str, required=True, help=\"path", "default=False, action='store_true', help=\"Turn off trasnscript normalization. Recommended for non-English.\", )", "{args.asr_model}\") asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model) else: logging.info(f\"Using NGC cloud ASR model", "if not os.path.exists(sclite_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path) # apply glm", "parser.add_argument(\"--out_dir\", type=str, required=True, help=\"Destination dir for output files\") parser.add_argument(\"--sctk_dir\", type=str,", "in log_probs.cpu().numpy(): all_log_probs.append(r) hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions) for batch_ind in range(greedy_predictions.shape[0]):", "parser = ArgumentParser() parser.add_argument( \"--asr_model\", type=str, default=\"QuartzNet15x5Base-En\", required=False, help=\"Pass: 'QuartzNet15x5Base-En'\",", "asr_model.decoder.vocabulary, 'batch_size': args.batch_size, 'normalize_transcripts': not args.dont_normalize_text, } ) if can_gpu:", "sctk root dir\") parser.add_argument(\"--glm\", type=str, required=False, default=\"\", help=\"Path to glm", "utt_f: for line in utt_f: utt = json.loads(line) info_list.append(utt) return", "test_batch] with autocast(): log_probs, encoded_len, greedy_predictions = asr_model( input_signal=test_batch[0], input_signal_length=test_batch[1]", "\"\"\" This script is based on speech_to_text_infer.py and allows you", "open(hypglm, \"w\") as hypf, open(hyp_fname, \"r\") as hyp_in: subprocess.run(rfilt_cmd, stdin=hyp_in,", "'sample_rate': 16000, 'manifest_filepath': args.dataset, 'labels': asr_model.decoder.vocabulary, 'batch_size': args.batch_size, 'normalize_transcripts': not", "os.path.exists(sclite_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path) # apply glm if os.path.exists(glm):", "get_utt_info(manifest_path): info_list = [] with open(manifest_path, \"r\") as utt_f: for", "for c in test_batch[2][batch_ind].cpu().detach().numpy()]) references.append(reference) del test_batch info_list = get_utt_info(args.dataset)", "as hyp_in: subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf) refglm = os.path.join(out_dir, os.path.basename(ref_fname)) +", "\"r\") as hyp_in: subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf) refglm = os.path.join(out_dir, os.path.basename(ref_fname))", "x in test_batch] with autocast(): log_probs, encoded_len, greedy_predictions = asr_model(", "else: refglm = ref_fname hypglm = hyp_fname _ = subprocess.check_output(f\"{sclite_path}", "to glm file\") args = parser.parse_args() torch.set_grad_enabled(False) if not os.path.exists(args.out_dir):", ") parser.add_argument(\"--out_dir\", type=str, required=True, help=\"Destination dir for output files\") parser.add_argument(\"--sctk_dir\",", "ref_f: for i in range(len(hypotheses)): utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0] # rfilter", "= ref_fname hypglm = hyp_fname _ = subprocess.check_output(f\"{sclite_path} -h {hypglm}", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "scored after applying a glm file (if provided). \"\"\" import", "+ \".glm\" rfilt_cmd = [rfilter_path] + [glm] with open(hypglm, \"w\")", "if not os.path.exists(args.out_dir): os.makedirs(args.out_dir) use_sctk = os.path.exists(args.sctk_dir) if args.asr_model.endswith('.nemo'): logging.info(f\"Using", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "range(greedy_predictions.shape[0]): reference = ''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()]) references.append(reference) del", "EncDecCTCModel.from_pretrained(model_name=args.asr_model) asr_model.setup_test_data( test_data_config={ 'sample_rate': 16000, 'manifest_filepath': args.dataset, 'labels': asr_model.decoder.vocabulary, 'batch_size':", "= hyp_fname _ = subprocess.check_output(f\"{sclite_path} -h {hypglm} -r {refglm} -i", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "\"w\") as hyp_f, open(reffile, \"w\") as ref_f: for i in", "= EncDecCTCModel.restore_from(restore_path=args.asr_model) else: logging.info(f\"Using NGC cloud ASR model {args.asr_model}\") asr_model", "to in writing, software # distributed under the License is", "import contextmanager @contextmanager def autocast(enabled=None): yield def score_with_sctk(sctk_dir, ref_fname, hyp_fname,", "# See the License for the specific language governing permissions", "\"\\n\") ref_f.write(\" \" + references[i] + \" (\" + utt_id", "have a space at the beginning hyp_f.write(\" \" + hypotheses[i]", ") if can_gpu: asr_model = asr_model.cuda() asr_model.eval() labels_map = dict([(i,", "or agreed to in writing, software # distributed under the", "hypglm = hyp_fname _ = subprocess.check_output(f\"{sclite_path} -h {hypglm} -r {refglm}", "-h {hypglm} -r {refglm} -i wsj -o all\", shell=True) can_gpu", "required by applicable law or agreed to in writing, software", "= [] all_log_probs = [] for test_batch in asr_model.test_dataloader(): if", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "log_probs, encoded_len, greedy_predictions = asr_model( input_signal=test_batch[0], input_signal_length=test_batch[1] ) for r", "root dir\") parser.add_argument(\"--glm\", type=str, required=False, default=\"\", help=\"Path to glm file\")", "test_batch in asr_model.test_dataloader(): if can_gpu: test_batch = [x.cuda() for x", "except ImportError: from contextlib import contextmanager @contextmanager def autocast(enabled=None): yield", "parser.add_argument(\"--dataset\", type=str, required=True, help=\"path to evaluation data\") parser.add_argument(\"--batch_size\", type=int, default=4)", "https://github.com/usnistgov/SCTK is required. Hypotheses and references are first saved in", "can_gpu: asr_model = asr_model.cuda() asr_model.eval() labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for", "rfilter_path = os.path.join(sctk_dir, \"bin\", \"rfilter1\") if not os.path.exists(rfilter_path): raise FileNotFoundError(errno.ENOENT,", "reffile = os.path.join(args.out_dir, \"ref.trn\") with open(hypfile, \"w\") as hyp_f, open(reffile,", "reff, open(ref_fname, \"r\") as ref_in: subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff) else: refglm", "\" (\" + utt_id + \")\" + \"\\n\") if use_sctk:", "with open(manifest_path, \"r\") as utt_f: for line in utt_f: utt", "compliance with the License. # You may obtain a copy", "hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions) for batch_ind in range(greedy_predictions.shape[0]): reference = ''.join([labels_map[c]", "agreed to in writing, software # distributed under the License", "-r {refglm} -i wsj -o all\", shell=True) can_gpu = torch.cuda.is_available()", "import autocast except ImportError: from contextlib import contextmanager @contextmanager def", "hyp_f, open(reffile, \"w\") as ref_f: for i in range(len(hypotheses)): utt_id", "distributed under the License is distributed on an \"AS IS\"", "FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path) hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) + \".glm\" rfilt_cmd", "under the License. \"\"\" This script is based on speech_to_text_infer.py", "wer.ctc_decoder_predictions_tensor(greedy_predictions) for batch_ind in range(greedy_predictions.shape[0]): reference = ''.join([labels_map[c] for c", "type=str, required=False, default=\"\", help=\"Path to sctk root dir\") parser.add_argument(\"--glm\", type=str,", "stdout=reff) else: refglm = ref_fname hypglm = hyp_fname _ =", "\" (\" + utt_id + \")\" + \"\\n\") ref_f.write(\" \"", "express or implied. # See the License for the specific", "model {args.asr_model}\") asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model) asr_model.setup_test_data( test_data_config={ 'sample_rate': 16000, 'manifest_filepath':", "except in compliance with the License. # You may obtain", "with open(hypglm, \"w\") as hypf, open(hyp_fname, \"r\") as hyp_in: subprocess.run(rfilt_cmd,", "help=\"Path to sctk root dir\") parser.add_argument(\"--glm\", type=str, required=False, default=\"\", help=\"Path", "required=False, default=\"\", help=\"Path to glm file\") args = parser.parse_args() torch.set_grad_enabled(False)", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "This script is based on speech_to_text_infer.py and allows you to", "for r in log_probs.cpu().numpy(): all_log_probs.append(r) hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions) for batch_ind", "use_sctk: score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm) if __name__ == '__main__':", "= ''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()]) references.append(reference) del test_batch info_list", "writing, software # distributed under the License is distributed on", "-o all\", shell=True) can_gpu = torch.cuda.is_available() def get_utt_info(manifest_path): info_list =", "= [x.cuda() for x in test_batch] with autocast(): log_probs, encoded_len,", "required=True, help=\"Destination dir for output files\") parser.add_argument(\"--sctk_dir\", type=str, required=False, default=\"\",", "# limitations under the License. \"\"\" This script is based", "you may not use this file except in compliance with", "wer = WER(vocabulary=asr_model.decoder.vocabulary) hypotheses = [] references = [] all_log_probs", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "utt_id + \")\" + \"\\n\") if use_sctk: score_with_sctk(args.sctk_dir, reffile, hypfile,", "from contextlib import contextmanager @contextmanager def autocast(enabled=None): yield def score_with_sctk(sctk_dir,", "torch.cuda.is_available() def get_utt_info(manifest_path): info_list = [] with open(manifest_path, \"r\") as", "i in range(len(hypotheses)): utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0] # rfilter in sctk", "autocast except ImportError: from contextlib import contextmanager @contextmanager def autocast(enabled=None):", "\".glm\" with open(refglm, \"w\") as reff, open(ref_fname, \"r\") as ref_in:", "in sctk likes each transcript to have a space at", "+ \" (\" + utt_id + \")\" + \"\\n\") ref_f.write(\"", "{hypglm} -r {refglm} -i wsj -o all\", shell=True) can_gpu =", "CONDITIONS OF ANY KIND, either express or implied. # See", "as utt_f: for line in utt_f: utt = json.loads(line) info_list.append(utt)", "\"w\") as ref_f: for i in range(len(hypotheses)): utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0]", "default=\"QuartzNet15x5Base-En\", required=False, help=\"Pass: 'QuartzNet15x5Base-En'\", ) parser.add_argument(\"--dataset\", type=str, required=True, help=\"path to", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "[] with open(manifest_path, \"r\") as utt_f: for line in utt_f:", "default=4) parser.add_argument( \"--dont_normalize_text\", default=False, action='store_true', help=\"Turn off trasnscript normalization. Recommended", "refglm = os.path.join(out_dir, os.path.basename(ref_fname)) + \".glm\" with open(refglm, \"w\") as", "asr_model.cuda() asr_model.eval() labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))])", "autocast(): log_probs, encoded_len, greedy_predictions = asr_model( input_signal=test_batch[0], input_signal_length=test_batch[1] ) for", "os.path.join(sctk_dir, \"bin\", \"rfilter1\") if not os.path.exists(rfilter_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path)", "= [] with open(manifest_path, \"r\") as utt_f: for line in", "os.path.basename(hyp_fname)) + \".glm\" rfilt_cmd = [rfilter_path] + [glm] with open(hypglm,", "to sctk root dir\") parser.add_argument(\"--glm\", type=str, required=False, default=\"\", help=\"Path to", "at the beginning hyp_f.write(\" \" + hypotheses[i] + \" (\"", "os.path.join(sctk_dir, \"bin\", \"sclite\") if not os.path.exists(sclite_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path)", "trasnscript normalization. Recommended for non-English.\", ) parser.add_argument(\"--out_dir\", type=str, required=True, help=\"Destination", "sclite. A local installation from https://github.com/usnistgov/SCTK is required. Hypotheses and", "+ \"\\n\") ref_f.write(\" \" + references[i] + \" (\" +", "_ = subprocess.check_output(f\"{sclite_path} -h {hypglm} -r {refglm} -i wsj -o", "greedy_predictions = asr_model( input_signal=test_batch[0], input_signal_length=test_batch[1] ) for r in log_probs.cpu().numpy():", "OR CONDITIONS OF ANY KIND, either express or implied. #", "A local installation from https://github.com/usnistgov/SCTK is required. Hypotheses and references", "input_signal=test_batch[0], input_signal_length=test_batch[1] ) for r in log_probs.cpu().numpy(): all_log_probs.append(r) hypotheses +=", "nemo.collections.asr.metrics.wer import WER from nemo.collections.asr.models import EncDecCTCModel from nemo.utils import", "the License is distributed on an \"AS IS\" BASIS, #", "if args.asr_model.endswith('.nemo'): logging.info(f\"Using local ASR model from {args.asr_model}\") asr_model =", "+ [glm] with open(hypglm, \"w\") as hypf, open(hyp_fname, \"r\") as", "utt = json.loads(line) info_list.append(utt) return info_list def main(): parser =", "= os.path.join(args.out_dir, \"ref.trn\") with open(hypfile, \"w\") as hyp_f, open(reffile, \"w\")", "language governing permissions and # limitations under the License. \"\"\"", "FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path) # apply glm if os.path.exists(glm): rfilter_path =", "file\") args = parser.parse_args() torch.set_grad_enabled(False) if not os.path.exists(args.out_dir): os.makedirs(args.out_dir) use_sctk", "in range(len(asr_model.decoder.vocabulary))]) wer = WER(vocabulary=asr_model.decoder.vocabulary) hypotheses = [] references =", "parser.add_argument( \"--dont_normalize_text\", default=False, action='store_true', help=\"Turn off trasnscript normalization. Recommended for", "[] for test_batch in asr_model.test_dataloader(): if can_gpu: test_batch = [x.cuda()", "not args.dont_normalize_text, } ) if can_gpu: asr_model = asr_model.cuda() asr_model.eval()", "parser.add_argument(\"--sctk_dir\", type=str, required=False, default=\"\", help=\"Path to sctk root dir\") parser.add_argument(\"--glm\",", "raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path) # apply glm if os.path.exists(glm): rfilter_path", "# rfilter in sctk likes each transcript to have a", "glm file (if provided). \"\"\" import errno import json import", "= ArgumentParser() parser.add_argument( \"--asr_model\", type=str, default=\"QuartzNet15x5Base-En\", required=False, help=\"Pass: 'QuartzNet15x5Base-En'\", )", "law or agreed to in writing, software # distributed under", "required. Hypotheses and references are first saved in trn format", "[] all_log_probs = [] for test_batch in asr_model.test_dataloader(): if can_gpu:", "rfilt_cmd = [rfilter_path] + [glm] with open(hypglm, \"w\") as hypf,", "dir for output files\") parser.add_argument(\"--sctk_dir\", type=str, required=False, default=\"\", help=\"Path to", "Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # #", "contextmanager @contextmanager def autocast(enabled=None): yield def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir,", "= [] references = [] all_log_probs = [] for test_batch", ") for r in log_probs.cpu().numpy(): all_log_probs.append(r) hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions) for", "for output files\") parser.add_argument(\"--sctk_dir\", type=str, required=False, default=\"\", help=\"Path to sctk", "required=False, default=\"\", help=\"Path to sctk root dir\") parser.add_argument(\"--glm\", type=str, required=False,", "likes each transcript to have a space at the beginning", "Hypotheses and references are first saved in trn format and", "ArgumentParser import torch from nemo.collections.asr.metrics.wer import WER from nemo.collections.asr.models import", "may obtain a copy of the License at # #", "asr_model.setup_test_data( test_data_config={ 'sample_rate': 16000, 'manifest_filepath': args.dataset, 'labels': asr_model.decoder.vocabulary, 'batch_size': args.batch_size,", "= torch.cuda.is_available() def get_utt_info(manifest_path): info_list = [] with open(manifest_path, \"r\")", "= dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))]) wer = WER(vocabulary=asr_model.decoder.vocabulary)", "json import os import subprocess from argparse import ArgumentParser import", "ref_fname hypglm = hyp_fname _ = subprocess.check_output(f\"{sclite_path} -h {hypglm} -r", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "os import subprocess from argparse import ArgumentParser import torch from", "parser.add_argument(\"--batch_size\", type=int, default=4) parser.add_argument( \"--dont_normalize_text\", default=False, action='store_true', help=\"Turn off trasnscript", "= os.path.join(out_dir, os.path.basename(ref_fname)) + \".glm\" with open(refglm, \"w\") as reff,", "EncDecCTCModel.restore_from(restore_path=args.asr_model) else: logging.info(f\"Using NGC cloud ASR model {args.asr_model}\") asr_model =", "for test_batch in asr_model.test_dataloader(): if can_gpu: test_batch = [x.cuda() for", "sclite_path = os.path.join(sctk_dir, \"bin\", \"sclite\") if not os.path.exists(sclite_path): raise FileNotFoundError(errno.ENOENT,", "= get_utt_info(args.dataset) hypfile = os.path.join(args.out_dir, \"hyp.trn\") reffile = os.path.join(args.out_dir, \"ref.trn\")", "may not use this file except in compliance with the", "open(hypfile, \"w\") as hyp_f, open(reffile, \"w\") as ref_f: for i", "= asr_model.cuda() asr_model.eval() labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "provided). \"\"\" import errno import json import os import subprocess", "references.append(reference) del test_batch info_list = get_utt_info(args.dataset) hypfile = os.path.join(args.out_dir, \"hyp.trn\")", "hyp_f.write(\" \" + hypotheses[i] + \" (\" + utt_id +", "[rfilter_path] + [glm] with open(hypglm, \"w\") as hypf, open(hyp_fname, \"r\")", "this file except in compliance with the License. # You", "+ \".glm\" with open(refglm, \"w\") as reff, open(ref_fname, \"r\") as", "ArgumentParser() parser.add_argument( \"--asr_model\", type=str, default=\"QuartzNet15x5Base-En\", required=False, help=\"Pass: 'QuartzNet15x5Base-En'\", ) parser.add_argument(\"--dataset\",", "= os.path.join(args.out_dir, \"hyp.trn\") reffile = os.path.join(args.out_dir, \"ref.trn\") with open(hypfile, \"w\")", "subprocess from argparse import ArgumentParser import torch from nemo.collections.asr.metrics.wer import", "type=int, default=4) parser.add_argument( \"--dont_normalize_text\", default=False, action='store_true', help=\"Turn off trasnscript normalization.", "\")\" + \"\\n\") if use_sctk: score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm)", "asr_model.eval() labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))]) wer", "and references are first saved in trn format and are", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "get_utt_info(args.dataset) hypfile = os.path.join(args.out_dir, \"hyp.trn\") reffile = os.path.join(args.out_dir, \"ref.trn\") with", "# # Licensed under the Apache License, Version 2.0 (the", "and are scored after applying a glm file (if provided).", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "parser.parse_args() torch.set_grad_enabled(False) if not os.path.exists(args.out_dir): os.makedirs(args.out_dir) use_sctk = os.path.exists(args.sctk_dir) if", "\"bin\", \"rfilter1\") if not os.path.exists(rfilter_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path) hypglm", "json.loads(line) info_list.append(utt) return info_list def main(): parser = ArgumentParser() parser.add_argument(", "= os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0] # rfilter in sctk likes each transcript to", "errno import json import os import subprocess from argparse import", "parser.add_argument( \"--asr_model\", type=str, default=\"QuartzNet15x5Base-En\", required=False, help=\"Pass: 'QuartzNet15x5Base-En'\", ) parser.add_argument(\"--dataset\", type=str,", "stdin=hyp_in, stdout=hypf) refglm = os.path.join(out_dir, os.path.basename(ref_fname)) + \".glm\" with open(refglm,", "contextlib import contextmanager @contextmanager def autocast(enabled=None): yield def score_with_sctk(sctk_dir, ref_fname,", "\"rfilter1\") if not os.path.exists(rfilter_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path) hypglm =", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "+ utt_id + \")\" + \"\\n\") ref_f.write(\" \" + references[i]", "not os.path.exists(sclite_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path) # apply glm if", "ASR model from {args.asr_model}\") asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model) else: logging.info(f\"Using NGC", "import os import subprocess from argparse import ArgumentParser import torch", "rights reserved. # # Licensed under the Apache License, Version", "stdin=ref_in, stdout=reff) else: refglm = ref_fname hypglm = hyp_fname _", "\"r\") as utt_f: for line in utt_f: utt = json.loads(line)", "each transcript to have a space at the beginning hyp_f.write(\"", "files\") parser.add_argument(\"--sctk_dir\", type=str, required=False, default=\"\", help=\"Path to sctk root dir\")", "def autocast(enabled=None): yield def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=\"\"): sclite_path", "use_sctk = os.path.exists(args.sctk_dir) if args.asr_model.endswith('.nemo'): logging.info(f\"Using local ASR model from", "batch_ind in range(greedy_predictions.shape[0]): reference = ''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()])", "args = parser.parse_args() torch.set_grad_enabled(False) if not os.path.exists(args.out_dir): os.makedirs(args.out_dir) use_sctk =", "with open(hypfile, \"w\") as hyp_f, open(reffile, \"w\") as ref_f: for", "with autocast(): log_probs, encoded_len, greedy_predictions = asr_model( input_signal=test_batch[0], input_signal_length=test_batch[1] )", "not os.path.exists(args.out_dir): os.makedirs(args.out_dir) use_sctk = os.path.exists(args.sctk_dir) if args.asr_model.endswith('.nemo'): logging.info(f\"Using local", "range(len(asr_model.decoder.vocabulary))]) wer = WER(vocabulary=asr_model.decoder.vocabulary) hypotheses = [] references = []", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "format and are scored after applying a glm file (if", "= [rfilter_path] + [glm] with open(hypglm, \"w\") as hypf, open(hyp_fname,", "os.path.join(args.out_dir, \"ref.trn\") with open(hypfile, \"w\") as hyp_f, open(reffile, \"w\") as", "= asr_model( input_signal=test_batch[0], input_signal_length=test_batch[1] ) for r in log_probs.cpu().numpy(): all_log_probs.append(r)", "or implied. # See the License for the specific language", "is required. Hypotheses and references are first saved in trn", "nemo.collections.asr.models import EncDecCTCModel from nemo.utils import logging try: from torch.cuda.amp", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "NGC cloud ASR model {args.asr_model}\") asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model) asr_model.setup_test_data( test_data_config={", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "@contextmanager def autocast(enabled=None): yield def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=\"\"):", "[] references = [] all_log_probs = [] for test_batch in", "test_data_config={ 'sample_rate': 16000, 'manifest_filepath': args.dataset, 'labels': asr_model.decoder.vocabulary, 'batch_size': args.batch_size, 'normalize_transcripts':", "not os.path.exists(rfilter_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path) hypglm = os.path.join(out_dir, os.path.basename(hyp_fname))", "ImportError: from contextlib import contextmanager @contextmanager def autocast(enabled=None): yield def", "test_batch = [x.cuda() for x in test_batch] with autocast(): log_probs,", "+ references[i] + \" (\" + utt_id + \")\" +", "all_log_probs = [] for test_batch in asr_model.test_dataloader(): if can_gpu: test_batch", "logging.info(f\"Using NGC cloud ASR model {args.asr_model}\") asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model) asr_model.setup_test_data(", "and allows you to score the hypotheses with sclite. A", "for non-English.\", ) parser.add_argument(\"--out_dir\", type=str, required=True, help=\"Destination dir for output", "can_gpu: test_batch = [x.cuda() for x in test_batch] with autocast():", "= WER(vocabulary=asr_model.decoder.vocabulary) hypotheses = [] references = [] all_log_probs =", "if not os.path.exists(rfilter_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path) hypglm = os.path.join(out_dir,", "(the \"License\"); # you may not use this file except", "main(): parser = ArgumentParser() parser.add_argument( \"--asr_model\", type=str, default=\"QuartzNet15x5Base-En\", required=False, help=\"Pass:", "# you may not use this file except in compliance", "subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff) else: refglm = ref_fname hypglm = hyp_fname", "the beginning hyp_f.write(\" \" + hypotheses[i] + \" (\" +", "limitations under the License. \"\"\" This script is based on", "governing permissions and # limitations under the License. \"\"\" This", "in utt_f: utt = json.loads(line) info_list.append(utt) return info_list def main():", "in trn format and are scored after applying a glm", "shell=True) can_gpu = torch.cuda.is_available() def get_utt_info(manifest_path): info_list = [] with", "'normalize_transcripts': not args.dont_normalize_text, } ) if can_gpu: asr_model = asr_model.cuda()", "\"--asr_model\", type=str, default=\"QuartzNet15x5Base-En\", required=False, help=\"Pass: 'QuartzNet15x5Base-En'\", ) parser.add_argument(\"--dataset\", type=str, required=True,", "from torch.cuda.amp import autocast except ImportError: from contextlib import contextmanager", "= os.path.join(out_dir, os.path.basename(hyp_fname)) + \".glm\" rfilt_cmd = [rfilter_path] + [glm]", "output files\") parser.add_argument(\"--sctk_dir\", type=str, required=False, default=\"\", help=\"Path to sctk root", "info_list = [] with open(manifest_path, \"r\") as utt_f: for line", "dir\") parser.add_argument(\"--glm\", type=str, required=False, default=\"\", help=\"Path to glm file\") args", "os.path.join(args.out_dir, \"hyp.trn\") reffile = os.path.join(args.out_dir, \"ref.trn\") with open(hypfile, \"w\") as", "# # Unless required by applicable law or agreed to", "glm file\") args = parser.parse_args() torch.set_grad_enabled(False) if not os.path.exists(args.out_dir): os.makedirs(args.out_dir)", "os.path.join(out_dir, os.path.basename(ref_fname)) + \".glm\" with open(refglm, \"w\") as reff, open(ref_fname,", "saved in trn format and are scored after applying a", "utt_id + \")\" + \"\\n\") ref_f.write(\" \" + references[i] +", "you to score the hypotheses with sclite. A local installation", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "required=False, help=\"Pass: 'QuartzNet15x5Base-En'\", ) parser.add_argument(\"--dataset\", type=str, required=True, help=\"path to evaluation", "Version 2.0 (the \"License\"); # you may not use this", "WER(vocabulary=asr_model.decoder.vocabulary) hypotheses = [] references = [] all_log_probs = []", "utt_f: utt = json.loads(line) info_list.append(utt) return info_list def main(): parser", "and # limitations under the License. \"\"\" This script is", "asr_model.test_dataloader(): if can_gpu: test_batch = [x.cuda() for x in test_batch]", "(c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed", ") parser.add_argument(\"--dataset\", type=str, required=True, help=\"path to evaluation data\") parser.add_argument(\"--batch_size\", type=int,", "model from {args.asr_model}\") asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model) else: logging.info(f\"Using NGC cloud", "dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))]) wer = WER(vocabulary=asr_model.decoder.vocabulary) hypotheses", "implied. # See the License for the specific language governing", "reffile, hypfile, args.out_dir, glm=args.glm) if __name__ == '__main__': main() #", "asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model) asr_model.setup_test_data( test_data_config={ 'sample_rate': 16000, 'manifest_filepath': args.dataset, 'labels':", "default=\"\", help=\"Path to sctk root dir\") parser.add_argument(\"--glm\", type=str, required=False, default=\"\",", "under the Apache License, Version 2.0 (the \"License\"); # you", "range(len(hypotheses)): utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0] # rfilter in sctk likes each", "refglm = ref_fname hypglm = hyp_fname _ = subprocess.check_output(f\"{sclite_path} -h", "subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf) refglm = os.path.join(out_dir, os.path.basename(ref_fname)) + \".glm\" with", "os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0] # rfilter in sctk likes each transcript to have", "asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model) else: logging.info(f\"Using NGC cloud ASR model {args.asr_model}\")", "\"hyp.trn\") reffile = os.path.join(args.out_dir, \"ref.trn\") with open(hypfile, \"w\") as hyp_f,", "+ \")\" + \"\\n\") ref_f.write(\" \" + references[i] + \"", "if os.path.exists(glm): rfilter_path = os.path.join(sctk_dir, \"bin\", \"rfilter1\") if not os.path.exists(rfilter_path):", "All rights reserved. # # Licensed under the Apache License,", "by applicable law or agreed to in writing, software #", "= json.loads(line) info_list.append(utt) return info_list def main(): parser = ArgumentParser()", "\" + references[i] + \" (\" + utt_id + \")\"", "based on speech_to_text_infer.py and allows you to score the hypotheses", "\"--dont_normalize_text\", default=False, action='store_true', help=\"Turn off trasnscript normalization. Recommended for non-English.\",", "cloud ASR model {args.asr_model}\") asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model) asr_model.setup_test_data( test_data_config={ 'sample_rate':", "ASR model {args.asr_model}\") asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model) asr_model.setup_test_data( test_data_config={ 'sample_rate': 16000,", "os.strerror(errno.ENOENT), sclite_path) # apply glm if os.path.exists(glm): rfilter_path = os.path.join(sctk_dir,", "hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) + \".glm\" rfilt_cmd = [rfilter_path] +", "from nemo.collections.asr.models import EncDecCTCModel from nemo.utils import logging try: from", "type=str, required=True, help=\"path to evaluation data\") parser.add_argument(\"--batch_size\", type=int, default=4) parser.add_argument(", "yield def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=\"\"): sclite_path = os.path.join(sctk_dir,", "parser.add_argument(\"--glm\", type=str, required=False, default=\"\", help=\"Path to glm file\") args =", "{args.asr_model}\") asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model) asr_model.setup_test_data( test_data_config={ 'sample_rate': 16000, 'manifest_filepath': args.dataset,", "allows you to score the hypotheses with sclite. A local", "glm=\"\"): sclite_path = os.path.join(sctk_dir, \"bin\", \"sclite\") if not os.path.exists(sclite_path): raise", "as ref_f: for i in range(len(hypotheses)): utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0] #", "help=\"Path to glm file\") args = parser.parse_args() torch.set_grad_enabled(False) if not", "os.path.exists(glm): rfilter_path = os.path.join(sctk_dir, \"bin\", \"rfilter1\") if not os.path.exists(rfilter_path): raise", "space at the beginning hyp_f.write(\" \" + hypotheses[i] + \"", "type=str, required=False, default=\"\", help=\"Path to glm file\") args = parser.parse_args()", "def main(): parser = ArgumentParser() parser.add_argument( \"--asr_model\", type=str, default=\"QuartzNet15x5Base-En\", required=False,", "hypf, open(hyp_fname, \"r\") as hyp_in: subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf) refglm =", "to have a space at the beginning hyp_f.write(\" \" +", "applying a glm file (if provided). \"\"\" import errno import", "sctk likes each transcript to have a space at the", "in range(greedy_predictions.shape[0]): reference = ''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()]) references.append(reference)", "off trasnscript normalization. Recommended for non-English.\", ) parser.add_argument(\"--out_dir\", type=str, required=True,", "\"bin\", \"sclite\") if not os.path.exists(sclite_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path) #", "test_batch info_list = get_utt_info(args.dataset) hypfile = os.path.join(args.out_dir, \"hyp.trn\") reffile =", "\"ref.trn\") with open(hypfile, \"w\") as hyp_f, open(reffile, \"w\") as ref_f:", "in test_batch] with autocast(): log_probs, encoded_len, greedy_predictions = asr_model( input_signal=test_batch[0],", "hypfile, args.out_dir, glm=args.glm) if __name__ == '__main__': main() # noqa", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "transcript to have a space at the beginning hyp_f.write(\" \"", "Unless required by applicable law or agreed to in writing,", "as ref_in: subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff) else: refglm = ref_fname hypglm", "os.makedirs(args.out_dir) use_sctk = os.path.exists(args.sctk_dir) if args.asr_model.endswith('.nemo'): logging.info(f\"Using local ASR model", "data\") parser.add_argument(\"--batch_size\", type=int, default=4) parser.add_argument( \"--dont_normalize_text\", default=False, action='store_true', help=\"Turn off", "''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()]) references.append(reference) del test_batch info_list =", "import logging try: from torch.cuda.amp import autocast except ImportError: from", "references[i] + \" (\" + utt_id + \")\" + \"\\n\")", "the specific language governing permissions and # limitations under the", "\" + hypotheses[i] + \" (\" + utt_id + \")\"", "= [] for test_batch in asr_model.test_dataloader(): if can_gpu: test_batch =", "as hyp_f, open(reffile, \"w\") as ref_f: for i in range(len(hypotheses)):", "asr_model( input_signal=test_batch[0], input_signal_length=test_batch[1] ) for r in log_probs.cpu().numpy(): all_log_probs.append(r) hypotheses", "args.out_dir, glm=args.glm) if __name__ == '__main__': main() # noqa pylint:", "applicable law or agreed to in writing, software # distributed", "open(refglm, \"w\") as reff, open(ref_fname, \"r\") as ref_in: subprocess.run(rfilt_cmd, stdin=ref_in,", "# apply glm if os.path.exists(glm): rfilter_path = os.path.join(sctk_dir, \"bin\", \"rfilter1\")", "action='store_true', help=\"Turn off trasnscript normalization. Recommended for non-English.\", ) parser.add_argument(\"--out_dir\",", "speech_to_text_infer.py and allows you to score the hypotheses with sclite.", "\"sclite\") if not os.path.exists(sclite_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path) # apply", "argparse import ArgumentParser import torch from nemo.collections.asr.metrics.wer import WER from", "wsj -o all\", shell=True) can_gpu = torch.cuda.is_available() def get_utt_info(manifest_path): info_list", "open(manifest_path, \"r\") as utt_f: for line in utt_f: utt =", "logging.info(f\"Using local ASR model from {args.asr_model}\") asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model) else:", "def get_utt_info(manifest_path): info_list = [] with open(manifest_path, \"r\") as utt_f:", "info_list = get_utt_info(args.dataset) hypfile = os.path.join(args.out_dir, \"hyp.trn\") reffile = os.path.join(args.out_dir,", "all\", shell=True) can_gpu = torch.cuda.is_available() def get_utt_info(manifest_path): info_list = []", "to score the hypotheses with sclite. A local installation from", "args.dataset, 'labels': asr_model.decoder.vocabulary, 'batch_size': args.batch_size, 'normalize_transcripts': not args.dont_normalize_text, } )", "apply glm if os.path.exists(glm): rfilter_path = os.path.join(sctk_dir, \"bin\", \"rfilter1\") if", "in writing, software # distributed under the License is distributed", "can_gpu = torch.cuda.is_available() def get_utt_info(manifest_path): info_list = [] with open(manifest_path,", "rfilter in sctk likes each transcript to have a space", "autocast(enabled=None): yield def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=\"\"): sclite_path =", "for i in range(len(asr_model.decoder.vocabulary))]) wer = WER(vocabulary=asr_model.decoder.vocabulary) hypotheses = []", "rfilter_path) hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) + \".glm\" rfilt_cmd = [rfilter_path]", "Recommended for non-English.\", ) parser.add_argument(\"--out_dir\", type=str, required=True, help=\"Destination dir for", "-i wsj -o all\", shell=True) can_gpu = torch.cuda.is_available() def get_utt_info(manifest_path):", "import json import os import subprocess from argparse import ArgumentParser", "= os.path.join(sctk_dir, \"bin\", \"sclite\") if not os.path.exists(sclite_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),", "permissions and # limitations under the License. \"\"\" This script", "torch.set_grad_enabled(False) if not os.path.exists(args.out_dir): os.makedirs(args.out_dir) use_sctk = os.path.exists(args.sctk_dir) if args.asr_model.endswith('.nemo'):", "def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=\"\"): sclite_path = os.path.join(sctk_dir, \"bin\",", "hyp_fname, out_dir, glm=\"\"): sclite_path = os.path.join(sctk_dir, \"bin\", \"sclite\") if not", "as hypf, open(hyp_fname, \"r\") as hyp_in: subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf) refglm", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "+ hypotheses[i] + \" (\" + utt_id + \")\" +", "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. #", "# You may obtain a copy of the License at", "hyp_fname _ = subprocess.check_output(f\"{sclite_path} -h {hypglm} -r {refglm} -i wsj", "try: from torch.cuda.amp import autocast except ImportError: from contextlib import", "i in range(len(asr_model.decoder.vocabulary))]) wer = WER(vocabulary=asr_model.decoder.vocabulary) hypotheses = [] references", "[x.cuda() for x in test_batch] with autocast(): log_probs, encoded_len, greedy_predictions", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "logging try: from torch.cuda.amp import autocast except ImportError: from contextlib", "the License. \"\"\" This script is based on speech_to_text_infer.py and", "file (if provided). \"\"\" import errno import json import os", "from nemo.collections.asr.metrics.wer import WER from nemo.collections.asr.models import EncDecCTCModel from nemo.utils", "raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path) hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) + \".glm\"", "with open(refglm, \"w\") as reff, open(ref_fname, \"r\") as ref_in: subprocess.run(rfilt_cmd,", "for x in test_batch] with autocast(): log_probs, encoded_len, greedy_predictions =", "score the hypotheses with sclite. A local installation from https://github.com/usnistgov/SCTK", "utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0] # rfilter in sctk likes each transcript", "CORPORATION. All rights reserved. # # Licensed under the Apache", "r in log_probs.cpu().numpy(): all_log_probs.append(r) hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions) for batch_ind in", "the hypotheses with sclite. A local installation from https://github.com/usnistgov/SCTK is", "if can_gpu: asr_model = asr_model.cuda() asr_model.eval() labels_map = dict([(i, asr_model.decoder.vocabulary[i])", "references = [] all_log_probs = [] for test_batch in asr_model.test_dataloader():", "open(hyp_fname, \"r\") as hyp_in: subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf) refglm = os.path.join(out_dir,", "the License for the specific language governing permissions and #", "trn format and are scored after applying a glm file", "Apache License, Version 2.0 (the \"License\"); # you may not", "open(reffile, \"w\") as ref_f: for i in range(len(hypotheses)): utt_id =", "import EncDecCTCModel from nemo.utils import logging try: from torch.cuda.amp import", "either express or implied. # See the License for the", "args.batch_size, 'normalize_transcripts': not args.dont_normalize_text, } ) if can_gpu: asr_model =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "is based on speech_to_text_infer.py and allows you to score the", "script is based on speech_to_text_infer.py and allows you to score", "score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=\"\"): sclite_path = os.path.join(sctk_dir, \"bin\", \"sclite\")", "labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))]) wer =", "+ \")\" + \"\\n\") if use_sctk: score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir,", "for line in utt_f: utt = json.loads(line) info_list.append(utt) return info_list", "in test_batch[2][batch_ind].cpu().detach().numpy()]) references.append(reference) del test_batch info_list = get_utt_info(args.dataset) hypfile =", "from nemo.utils import logging try: from torch.cuda.amp import autocast except", "'QuartzNet15x5Base-En'\", ) parser.add_argument(\"--dataset\", type=str, required=True, help=\"path to evaluation data\") parser.add_argument(\"--batch_size\",", "help=\"path to evaluation data\") parser.add_argument(\"--batch_size\", type=int, default=4) parser.add_argument( \"--dont_normalize_text\", default=False,", "} ) if can_gpu: asr_model = asr_model.cuda() asr_model.eval() labels_map =", "return info_list def main(): parser = ArgumentParser() parser.add_argument( \"--asr_model\", type=str,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "as reff, open(ref_fname, \"r\") as ref_in: subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff) else:", "'labels': asr_model.decoder.vocabulary, 'batch_size': args.batch_size, 'normalize_transcripts': not args.dont_normalize_text, } ) if", "installation from https://github.com/usnistgov/SCTK is required. Hypotheses and references are first", "import ArgumentParser import torch from nemo.collections.asr.metrics.wer import WER from nemo.collections.asr.models", "\"\\n\") if use_sctk: score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm) if __name__", "sclite_path) # apply glm if os.path.exists(glm): rfilter_path = os.path.join(sctk_dir, \"bin\",", "a space at the beginning hyp_f.write(\" \" + hypotheses[i] +", "help=\"Destination dir for output files\") parser.add_argument(\"--sctk_dir\", type=str, required=False, default=\"\", help=\"Path", "= subprocess.check_output(f\"{sclite_path} -h {hypglm} -r {refglm} -i wsj -o all\",", "del test_batch info_list = get_utt_info(args.dataset) hypfile = os.path.join(args.out_dir, \"hyp.trn\") reffile", "first saved in trn format and are scored after applying", "normalization. Recommended for non-English.\", ) parser.add_argument(\"--out_dir\", type=str, required=True, help=\"Destination dir", "asr_model = asr_model.cuda() asr_model.eval() labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i", "\"License\"); # you may not use this file except in", "score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm) if __name__ == '__main__': main()", "ref_fname, hyp_fname, out_dir, glm=\"\"): sclite_path = os.path.join(sctk_dir, \"bin\", \"sclite\") if", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "a glm file (if provided). \"\"\" import errno import json", "os.strerror(errno.ENOENT), rfilter_path) hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) + \".glm\" rfilt_cmd =", "to evaluation data\") parser.add_argument(\"--batch_size\", type=int, default=4) parser.add_argument( \"--dont_normalize_text\", default=False, action='store_true',", "in range(len(hypotheses)): utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0] # rfilter in sctk likes", "open(ref_fname, \"r\") as ref_in: subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff) else: refglm =", "(\" + utt_id + \")\" + \"\\n\") ref_f.write(\" \" +", "ref_f.write(\" \" + references[i] + \" (\" + utt_id +", "# distributed under the License is distributed on an \"AS", "line in utt_f: utt = json.loads(line) info_list.append(utt) return info_list def", "# Unless required by applicable law or agreed to in", "test_batch[2][batch_ind].cpu().detach().numpy()]) references.append(reference) del test_batch info_list = get_utt_info(args.dataset) hypfile = os.path.join(args.out_dir,", "\"r\") as ref_in: subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff) else: refglm = ref_fname", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "else: logging.info(f\"Using NGC cloud ASR model {args.asr_model}\") asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model)", "on speech_to_text_infer.py and allows you to score the hypotheses with", "(if provided). \"\"\" import errno import json import os import", "You may obtain a copy of the License at #", "ref_in: subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff) else: refglm = ref_fname hypglm =", "+ utt_id + \")\" + \"\\n\") if use_sctk: score_with_sctk(args.sctk_dir, reffile,", "\".glm\" rfilt_cmd = [rfilter_path] + [glm] with open(hypglm, \"w\") as", "= EncDecCTCModel.from_pretrained(model_name=args.asr_model) asr_model.setup_test_data( test_data_config={ 'sample_rate': 16000, 'manifest_filepath': args.dataset, 'labels': asr_model.decoder.vocabulary,", "the Apache License, Version 2.0 (the \"License\"); # you may", "= os.path.join(sctk_dir, \"bin\", \"rfilter1\") if not os.path.exists(rfilter_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),", "\"w\") as hypf, open(hyp_fname, \"r\") as hyp_in: subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf)", "args.asr_model.endswith('.nemo'): logging.info(f\"Using local ASR model from {args.asr_model}\") asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model)", "\"w\") as reff, open(ref_fname, \"r\") as ref_in: subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff)", "with sclite. A local installation from https://github.com/usnistgov/SCTK is required. Hypotheses", "License. \"\"\" This script is based on speech_to_text_infer.py and allows" ]