input
stringlengths
2.65k
237k
output
stringclasses
1 value
# -*- coding=UTF-8 -*- # pyright: strict from __future__ import annotations import traceback from concurrent import futures from typing import Callable, Iterator, Optional, Tuple import cast_unknown as cast import cv2 import numpy as np from PIL.Image import Image from PIL.Image import fromarray as image_from_array from ... import action, app, imagetools, mathtools, ocr, template, templates from ...single_mode import Context, Training, training from ...single_mode.training import Partner from ..scene import Scene, SceneHolder from .command import CommandScene _TRAINING_CONFIRM = template.Specification( templates.SINGLE_MODE_TRAINING_CONFIRM, threshold=0.8 ) def _gradient(colors: Tuple[Tuple[Tuple[int, int, int], int], ...]) -> np.ndarray: ret = np.linspace((0, 0, 0), colors[0][0], colors[0][1]) for index, i in enumerate(colors[1:], 1): color, stop = i prev_color, prev_stop = colors[index - 1] g = np.linspace(prev_color, color, stop - prev_stop + 1) ret = np.concatenate((ret, g[1:])) return ret def _recognize_base_effect(img: Image) -> int: cv_img = imagetools.cv_image(imagetools.resize(img, height=32)) sharpened_img = imagetools.sharpen(cv_img) sharpened_img = imagetools.mix(sharpened_img, cv_img, 0.4) white_outline_img = imagetools.constant_color_key( sharpened_img, (255, 255, 255), ) white_outline_img_dilated = cv2.morphologyEx( white_outline_img, cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)), ) white_outline_img_dilated = cv2.morphologyEx( white_outline_img_dilated, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 7)), ) bg_mask_img = ( imagetools.bg_mask_by_outline(white_outline_img_dilated) + white_outline_img ) masked_img = cv2.copyTo(cv_img, 255 - bg_mask_img) brown_img = imagetools.constant_color_key( cv_img, (29, 62, 194), (24, 113, 218), (30, 109, 216), (69, 104, 197), (119, 139, 224), (103, 147, 223), (59, 142, 226), threshold=0.85, ) _, non_brown_img = cv2.threshold(brown_img, 120, 255, cv2.THRESH_BINARY_INV) border_brown_img = imagetools.border_flood_fill(non_brown_img) brown_outline_img = cv2.copyTo(brown_img, 255 - border_brown_img) bg_mask_img = imagetools.bg_mask_by_outline(brown_outline_img) masked_img = cv2.copyTo(masked_img, 255 - bg_mask_img) fill_gradient = _gradient( ( ((140, 236, 255), 0), ((140, 236, 255), round(cv_img.shape[0] * 0.25)), ((114, 229, 255), round(cv_img.shape[0] * 0.35)), ((113, 198, 255), round(cv_img.shape[0] * 0.55)), ((95, 179, 255), round(cv_img.shape[0] * 0.63)), ((74, 157, 255), round(cv_img.shape[0] * 0.70)), ((74, 117, 255), round(cv_img.shape[0] * 0.83)), ((74, 117, 255), cv_img.shape[0]), ) ).astype(np.uint8) fill_img = np.repeat(np.expand_dims(fill_gradient, 1), cv_img.shape[1], axis=1) assert fill_img.shape == cv_img.shape text_img = imagetools.color_key(masked_img, fill_img) text_img_extra = imagetools.constant_color_key( masked_img, (175, 214, 255), threshold=0.95, ) text_img = np.array(np.maximum(text_img, text_img_extra)) imagetools.fill_area(text_img, (0,), size_lt=48) app.log.image( "base effect", img, level=app.DEBUG, layers={ "sharpened": sharpened_img, "white_outline": white_outline_img, "white_outline_dilated": white_outline_img_dilated, "brown": brown_img, "non_brown": non_brown_img, "border_brown": border_brown_img, "brown_outline": brown_outline_img, "bg_mask": bg_mask_img, "masked": masked_img, "text_extra": text_img_extra, "text": text_img, }, ) if cv2.countNonZero(text_img) < 100: # ignore skin match result return 0 # +100 has different color hash100 = "000000000000006600ee00ff00ff00ff004e0000000000000000000000000000" if ( imagetools.compare_hash( imagetools.image_hash(imagetools.pil_image(text_img)), hash100, ) > 0.9 ): return 100 text = ocr.text(image_from_array(text_img)) if not text: return 0 return int(text.lstrip("+")) def _recognize_red_effect(img: Image) -> int: cv_img = imagetools.cv_image( imagetools.resize( imagetools.resize(img, height=24), height=48, ) ) sharpened_img = cv2.filter2D( cv_img, 8, np.array( ( (0, -1, 0), (-1, 5, -1), (0, -1, 0), ) ), ) sharpened_img = imagetools.mix(sharpened_img, cv_img, 0.5) white_outline_img = imagetools.constant_color_key( sharpened_img, (255, 255, 255), (222, 220, 237), (252, 254, 202), (236, 249, 105), (243, 220, 160), ) masked_img = imagetools.inside_outline(cv_img, white_outline_img) red_outline_img = imagetools.constant_color_key( cv_img, (15, 18, 216), (34, 42, 234), (56, 72, 218), (20, 18, 181), (27, 35, 202), ) red_outline_img = cv2.morphologyEx( red_outline_img, cv2.MORPH_CLOSE, np.ones((3, 3)), ) masked_img = imagetools.inside_outline(masked_img, red_outline_img) height = cv_img.shape[0] fill_gradient = _gradient( ( ((129, 211, 255), 0), ((126, 188, 255), round(height * 0.5)), ((82, 134, 255), round(height * 0.75)), ((36, 62, 211), height), ) ).astype(np.uint8) fill_img = np.repeat(np.expand_dims(fill_gradient, 1), cv_img.shape[1], axis=1) assert fill_img.shape == cv_img.shape text_img_base = imagetools.color_key(masked_img, fill_img) imagetools.fill_area(text_img_base, (0,), size_lt=8) text_img_extra = imagetools.constant_color_key( masked_img, (128, 196, 253), (136, 200, 255), (144, 214, 255), (58, 116, 255), (64, 111, 238), (114, 174, 251), (89, 140, 240), (92, 145, 244), (91, 143, 238), (140, 228, 254), threshold=0.95, ) text_img = np.array(np.maximum(text_img_base, text_img_extra)) h = cv_img.shape[0] imagetools.fill_area(text_img, (0,), size_lt=round(h * 0.2**2)) app.log.image( "red effect", cv_img, level=app.DEBUG, layers={ "sharppend": sharpened_img, "white_outline": white_outline_img, "red_outline": red_outline_img, "masked": masked_img, "fill": fill_img, "text_base": text_img_base, "text_extra": text_img_extra, "text": text_img, }, ) text = ocr.text(image_from_array(text_img)) if not text: return 0 return int(text.lstrip("+")) def _recognize_level(rgb_color: Tuple[int, ...]) -> int: if imagetools.compare_color((49, 178, 22), rgb_color) > 0.9: return 1 if imagetools.compare_color((46, 139, 244), rgb_color) > 0.9: return 2 if imagetools.compare_color((255, 134, 0), rgb_color) > 0.9: return 3 if imagetools.compare_color((244, 69, 132), rgb_color) > 0.9: return 4 if imagetools.compare_color((165, 78, 255), rgb_color) > 0.9: return 5 raise ValueError("_recognize_level: unknown level color: %s" % (rgb_color,)) def _recognize_failure_rate( rp: mathtools.ResizeProxy, trn: Training, img: Image ) -> float: x, y = trn.confirm_position bbox = ( x + rp.vector(15, 540), y + rp.vector(-155, 540), x + rp.vector(75, 540), y + rp.vector(-120, 540), ) rate_img = imagetools.cv_image(imagetools.resize(img.crop(bbox), height=48)) outline_img = imagetools.constant_color_key( rate_img, (252, 150, 14), (255, 183, 89), (0, 150, 255), (0, 69, 255), ) fg_img = imagetools.inside_outline(rate_img, outline_img) text_img = imagetools.constant_color_key( fg_img, (255, 255, 255), (18, 218, 255), ) app.log.image( "failure rate", rate_img, level=app.DEBUG, layers={ "outline": outline_img, "fg": fg_img, "text": text_img, }, ) text = ocr.text(imagetools.pil_image(text_img)) return int(text.strip("%")) / 100 def _estimate_vitality(ctx: Context, trn: Training) -> float: # https://gamewith.jp/uma-musume/article/show/257432 vit_data = { trn.TYPE_SPEED: (-21, -22, -23, -25, -27), trn.TYPE_STAMINA: (-19, -20, -21, -23, -25), trn.TYPE_POWER: (-20, -21, -22, -24, -26), trn.TYPE_GUTS: (-22, -23, -24, -26, -28), trn.TYPE_WISDOM: (5, 5, 5, 5, 5), } if trn.type not in vit_data: return 0 return vit_data[trn.type][trn.level - 1] / ctx.max_vitality def _iter_training_images(static: bool): rp = action.resize_proxy() radius = rp.vector(30, 540) _, first_confirm_pos = action.wait_image(_TRAINING_CONFIRM) yield template.screenshot() if static: return seen_confirm_pos = { first_confirm_pos, } for pos in ( rp.vector2((78, 850), 540), rp.vector2((171, 850), 540), rp.vector2((268, 850), 540), rp.vector2((367, 850), 540), rp.vector2((461, 850), 540), ): if mathtools.distance(first_confirm_pos, pos) < radius: continue action.tap(pos) _, pos = action.wait_image(_TRAINING_CONFIRM) if pos not in seen_confirm_pos: yield template.screenshot() seen_confirm_pos.add(pos) def _recognize_type_color(rp: mathtools.ResizeProxy, icon_img: Image) -> int: type_pos = rp.vector2((7, 18), 540) type_colors = ( ((36, 170, 255), Partner.TYPE_SPEED), ((255, 106, 86), Partner.TYPE_STAMINA), ((255, 151, 27), Partner.TYPE_POWER), ((255, 96, 156), Partner.TYPE_GUTS), ((3, 191, 126), Partner.TYPE_WISDOM), ((255, 179, 22), Partner.TYPE_FRIEND), ) for color, v in type_colors: if ( imagetools.compare_color_near( imagetools.cv_image(icon_img), type_pos, color[::-1] ) > 0.9 ): return v return Partner.TYPE_OTHER def _recognize_has_hint(rp: mathtools.ResizeProxy, icon_img: Image) -> bool: bbox = rp.vector4((50, 0, 58, 8), 540) hint_mark_color = (127, 67, 255) hint_mark_img = icon_img.crop(bbox) hint_mask = imagetools.constant_color_key( imagetools.cv_image(hint_mark_img), hint_mark_color ) return np.average(hint_mask) > 200 def _recognize_has_training( ctx: Context, rp: mathtools.ResizeProxy, icon_img: Image ) -> bool: if ctx.scenario != ctx.SCENARIO_AOHARU: return False bbox = rp.vector4((52, 0, 65, 8), 540) mark_img = icon_img.crop(bbox) mask = imagetools.constant_color_key( imagetools.cv_image(mark_img), (67, 131, 255), (82, 171, 255), threshold=0.9, ) mask_avg = np.average(mask) ret = mask_avg > 80 app.log.image( "has training: %s mask_avg=%0.2f" % (ret, mask_avg), icon_img, layers={ "mark": mask, }, ) return ret def _recognize_has_soul_burst( ctx: Context, rp: mathtools.ResizeProxy, icon_img: Image ) -> bool: if ctx.scenario != ctx.SCENARIO_AOHARU: return False bbox = rp.vector4((52, 0, 65, 8), 540) mark_img = imagetools.cv_image(icon_img.crop(bbox)) mask = imagetools.constant_color_key( mark_img, (198, 255, 255), threshold=0.9, ) mask_avg = np.average(mask) ret = mask_avg > 80 app.log.image( "has soul burst: %s mask_avg=%s" % (ret, mask_avg), icon_img, level=app.DEBUG, layers={ "mark": mark_img, "mark_mask": mask, }, ) return ret def _recognize_partner_level(rp: mathtools.ResizeProxy, icon_img: Image) -> int: pos = ( rp.vector2((10, 65), 540), # level 1 rp.vector2((20, 65), 540), # level 2 rp.vector2((33, 65), 540), # level 3 rp.vector2((43, 65), 540), # level 4 rp.vector2((55, 65), 540), # level 5 ) colors = ( (109, 108, 119), # empty (42, 192, 255), # level 1 (42, 192, 255), # level 2 (162, 230, 30), # level 3 (255, 173, 30), # level 4 (255, 235, 120), # level 5 ) spec: Tuple[Tuple[Tuple[Tuple[int, int], Tuple[int, int, int]], ...], ...] = ( # level 0 ( (pos[0], colors[0]), (pos[1], colors[0]), (pos[2], colors[0]), (pos[3], colors[0]), (pos[4], colors[0]), ), # level 1 ( (pos[0], colors[1]), (pos[1], colors[0]), (pos[2], colors[0]), (pos[3], colors[0]), (pos[4], colors[0]), ), # level 2 ( (pos[0], colors[2]), (pos[1], colors[2]), (pos[3], colors[0]), (pos[4], colors[0]), ), # level 3 ( (pos[0], colors[3]), (pos[1], colors[3]), (pos[2], colors[3]), (pos[4], colors[0]), ), # level 4 ( (pos[0], colors[4]), (pos[1], colors[4]), (pos[2], colors[4]), (pos[3], colors[4]), ), # level 5 ( (pos[0], colors[5]), (pos[4], colors[5]), ), ) for level, s in enumerate(spec): if all( imagetools.compare_color_near( imagetools.cv_image(icon_img), pos, color[::-1], ) > 0.95 for pos, color in s ): return level return -1 def _recognize_soul( rp: mathtools.ResizeProxy, screenshot: Image, icon_bbox: Tuple[int, int, int, int] ) -> float: right_bottom_icon_bbox = ( icon_bbox[0] + rp.vector(49, 540), icon_bbox[1] + rp.vector(32, 540), icon_bbox[0] +
^ d(29) ^ d(30) ^ d(31), q(0) ^ q(1) ^ q(6) ^ q(7) ^ q(9) ^ q(11) ^ q(12) ^ q(13) ^ q(16) ^ q(17) ^ q(24) ^ q(27) ^ q(28) ^ d(0) ^ d(1) ^ d(6) ^ d(7) ^ d(9) ^ d(11) ^ d(12) ^ d(13) ^ d(16) ^ d(17) ^ d(24) ^ d(27) ^ d(28), q(0) ^ q(1) ^ q(2) ^ q(6) ^ q(7) ^ q(8) ^ q(9) ^ q(13) ^ q(14) ^ q(16) ^ q(17) ^ q(18) ^ q(24) ^ q(26) ^ q(30) ^ q(31) ^ d(0) ^ d(1) ^ d(2) ^ d(6) ^ d(7) ^ d(8) ^ d(9) ^ d(13) ^ d(14) ^ d(16) ^ d(17) ^ d(18) ^ d(24) ^ d(26) ^ d(30) ^ d(31), q(1) ^ q(2) ^ q(3) ^ q(7) ^ q(8) ^ q(9) ^ q(10) ^ q(14) ^ q(15) ^ q(17) ^ q(18) ^ q(19) ^ q(25) ^ q(27) ^ q(31) ^ d(1) ^ d(2) ^ d(3) ^ d(7) ^ d(8) ^ d(9) ^ d(10) ^ d(14) ^ d(15) ^ d(17) ^ d(18) ^ d(19) ^ d(25) ^ d(27) ^ d(31), q(0) ^ q(2) ^ q(3) ^ q(4) ^ q(6) ^ q(8) ^ q(11) ^ q(12) ^ q(15) ^ q(18) ^ q(19) ^ q(20) ^ q(24) ^ q(25) ^ q(29) ^ q(30) ^ q(31) ^ d(0) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(8) ^ d(11) ^ d(12) ^ d(15) ^ d(18) ^ d(19) ^ d(20) ^ d(24) ^ d(25) ^ d(29) ^ d(30) ^ d(31), q(0) ^ q(1) ^ q(3) ^ q(4) ^ q(5) ^ q(6) ^ q(7) ^ q(10) ^ q(13) ^ q(19) ^ q(20) ^ q(21) ^ q(24) ^ q(28) ^ q(29) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(5) ^ d(6) ^ d(7) ^ d(10) ^ d(13) ^ d(19) ^ d(20) ^ d(21) ^ d(24) ^ d(28) ^ d(29), q(1) ^ q(2) ^ q(4) ^ q(5) ^ q(6) ^ q(7) ^ q(8) ^ q(11) ^ q(14) ^ q(20) ^ q(21) ^ q(22) ^ q(25) ^ q(29) ^ q(30) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(7) ^ d(8) ^ d(11) ^ d(14) ^ d(20) ^ d(21) ^ d(22) ^ d(25) ^ d(29) ^ d(30), q(0) ^ q(2) ^ q(3) ^ q(5) ^ q(7) ^ q(8) ^ q(10) ^ q(15) ^ q(16) ^ q(21) ^ q(22) ^ q(23) ^ q(24) ^ q(25) ^ q(28) ^ q(29) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(7) ^ d(8) ^ d(10) ^ d(15) ^ d(16) ^ d(21) ^ d(22) ^ d(23) ^ d(24) ^ d(25) ^ d(28) ^ d(29), q(0) ^ q(1) ^ q(3) ^ q(4) ^ q(8) ^ q(10) ^ q(11) ^ q(12) ^ q(17) ^ q(22) ^ q(23) ^ q(28) ^ q(31) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(8) ^ d(10) ^ d(11) ^ d(12) ^ d(17) ^ d(22) ^ d(23) ^ d(28) ^ d(31), q(1) ^ q(2) ^ q(4) ^ q(5) ^ q(9) ^ q(11) ^ q(12) ^ q(13) ^ q(18) ^ q(23) ^ q(24) ^ q(29) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(9) ^ d(11) ^ d(12) ^ d(13) ^ d(18) ^ d(23) ^ d(24) ^ d(29), q(0) ^ q(2) ^ q(3) ^ q(5) ^ q(9) ^ q(13) ^ q(14) ^ q(16) ^ q(19) ^ q(26) ^ q(28) ^ q(29) ^ q(31) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(9) ^ d(13) ^ d(14) ^ d(16) ^ d(19) ^ d(26) ^ d(28) ^ d(29) ^ d(31), q(0) ^ q(1) ^ q(3) ^ q(4) ^ q(9) ^ q(12) ^ q(14) ^ q(15) ^ q(16) ^ q(17) ^ q(20) ^ q(24) ^ q(25) ^ q(26) ^ q(27) ^ q(28) ^ q(31) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(9) ^ d(12) ^ d(14) ^ d(15) ^ d(16) ^ d(17) ^ d(20) ^ d(24) ^ d(25) ^ d(26) ^ d(27) ^ d(28) ^ d(31), q(0) ^ q(1) ^ q(2) ^ q(4) ^ q(5) ^ q(6) ^ q(9) ^ q(12) ^ q(13) ^ q(15) ^ q(17) ^ q(18) ^ q(21) ^ q(24) ^ q(27) ^ q(30) ^ q(31) ^ d(0) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(9) ^ d(12) ^ d(13) ^ d(15) ^ d(17) ^ d(18) ^ d(21) ^ d(24) ^ d(27) ^ d(30) ^ d(31), q(1) ^ q(2) ^ q(3) ^ q(5) ^ q(6) ^ q(7) ^ q(10) ^ q(13) ^ q(14) ^ q(16) ^ q(18) ^ q(19) ^ q(22) ^ q(25) ^ q(28) ^ q(31) ^ d(1) ^ d(2) ^ d(3) ^ d(5) ^ d(6) ^ d(7) ^ d(10) ^ d(13) ^ d(14) ^ d(16) ^ d(18) ^ d(19) ^ d(22) ^ d(25) ^ d(28) ^ d(31), q(2) ^ q(3) ^ q(4) ^ q(6) ^ q(7) ^ q(8) ^ q(11) ^ q(14) ^ q(15) ^ q(17) ^ q(19) ^ q(20) ^ q(23) ^ q(26) ^ q(29) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(7) ^ d(8) ^ d(11) ^ d(14) ^ d(15) ^ d(17) ^ d(19) ^ d(20) ^ d(23) ^ d(26) ^ d(29), q(3) ^ q(4) ^ q(5) ^ q(7) ^ q(8) ^ q(9) ^ q(12) ^ q(15) ^ q(16) ^ q(18) ^ q(20) ^ q(21) ^ q(24) ^ q(27) ^ q(30) ^ d(3) ^ d(4) ^ d(5) ^ d(7) ^ d(8) ^ d(9) ^ d(12) ^ d(15) ^ d(16) ^ d(18) ^ d(20) ^ d(21) ^ d(24) ^ d(27) ^ d(30), q(0) ^ q(4) ^ q(5) ^ q(8) ^ q(12) ^ q(13) ^ q(17) ^ q(19) ^ q(21) ^ q(22) ^ q(24) ^ q(26) ^ q(29) ^ q(30) ^ d(0) ^ d(4) ^ d(5) ^ d(8) ^ d(12) ^ d(13) ^ d(17) ^ d(19) ^ d(21) ^ d(22) ^ d(24) ^ d(26) ^ d(29) ^ d(30), q(1) ^ q(5) ^ q(6) ^ q(9) ^ q(13) ^ q(14) ^ q(18) ^ q(20) ^ q(22) ^ q(23) ^ q(25) ^ q(27) ^ q(30) ^ q(31) ^ d(1) ^ d(5) ^ d(6) ^ d(9) ^ d(13) ^ d(14) ^ d(18) ^ d(20) ^ d(22) ^ d(23) ^ d(25) ^ d(27) ^ d(30) ^ d(31), q(2) ^ q(6) ^ q(7) ^ q(10) ^ q(14) ^ q(15) ^ q(19) ^ q(21) ^ q(23) ^ q(24) ^ q(26) ^ q(28) ^ q(31) ^ d(2) ^ d(6) ^ d(7) ^ d(10) ^ d(14) ^ d(15) ^ d(19) ^ d(21) ^ d(23) ^ d(24) ^ d(26) ^ d(28) ^ d(31), q(3) ^ q(7) ^ q(8) ^ q(11) ^ q(15) ^ q(16) ^ q(20) ^ q(22) ^ q(24) ^ q(25) ^ q(27) ^ q(29) ^ d(3) ^ d(7) ^ d(8) ^ d(11) ^ d(15) ^ d(16) ^ d(20) ^ d(22) ^ d(24) ^ d(25) ^ d(27) ^ d(29), q(4) ^ q(8) ^ q(9) ^ q(12) ^ q(16) ^ q(17) ^ q(21) ^ q(23) ^ q(25) ^ q(26) ^ q(28) ^ q(30) ^ d(4) ^ d(8) ^ d(9) ^ d(12) ^ d(16) ^ d(17) ^ d(21) ^ d(23) ^ d(25) ^ d(26) ^ d(28) ^ d(30), q(5) ^ q(9) ^ q(10) ^ q(13) ^ q(17) ^ q(18) ^ q(22) ^ q(24) ^ q(26) ^ q(27) ^ q(29) ^ q(31) ^ d(5) ^ d(9) ^ d(10) ^ d(13) ^ d(17) ^ d(18) ^ d(22) ^ d(24) ^ d(26) ^ d(27) ^ d(29) ^ d(31), q(0) ^ q(9) ^ q(11) ^ q(12) ^ q(14) ^ q(16) ^ q(18) ^ q(19) ^ q(23) ^ q(24) ^ q(26) ^ q(27) ^ q(29) ^ q(31) ^ d(0) ^ d(9) ^ d(11) ^ d(12) ^ d(14) ^ d(16) ^ d(18) ^ d(19) ^ d(23) ^ d(24) ^ d(26) ^ d(27) ^ d(29) ^ d(31), q(0) ^ q(1) ^ q(6) ^ q(9) ^ q(13) ^ q(15) ^ q(16) ^ q(17) ^ q(19) ^ q(20) ^ q(26) ^ q(27) ^ q(29) ^ q(31) ^ d(0) ^ d(1) ^
# -*- coding: utf-8 -*- """ This enables to parameterize a desired scenario to mock a multi-partner ML project. """ from datasets import dataset_mnist, dataset_cifar10, dataset_titanic from sklearn.model_selection import train_test_split import datetime import os import numpy as np import matplotlib.pyplot as plt import uuid import pandas as pd from loguru import logger import operator import random import utils from dataset import Dataset import constants from partner import Partner class Scenario: def __init__(self, params, experiment_path, scenario_id=1, n_repeat=1, is_dry_run=False): # --------------------------------------------------------------------- # Initialization of the dataset defined in the config of the experiment # --------------------------------------------------------------------- # Raise Exception if unknown parameters in the .yml file params_known = ["dataset_name", "dataset_proportion"] # Dataset related params_known += ["methods", "multi_partner_learning_approach", "aggregation_weighting"] # federated learning related params_known += ["partners_count", "amounts_per_partner", "corrupted_datasets", "samples_split_option"] # Partners related params_known += ["gradient_updates_per_pass_count", "epoch_count", "minibatch_count", "is_early_stopping"] # Computation related params_known += ["evaluation_partner_numbers","sequential_weighting_ponderation"] params_known += ["is_quick_demo"] if not all([x in params_known for x in params]): for x in params: if not x in params_known: logger.debug(f"Unrecognised parameter: {x}") raise Exception(f"Unrecognised parameters, check your .yml file") # Get and verify which dataset is configured supported_datasets_names = ["mnist", "cifar10", "titanic"] if "dataset_name" in params: dataset_name = params["dataset_name"] if dataset_name not in supported_datasets_names: raise Exception(f"Dataset named '{dataset_name}' is not supported (yet). You could add it!") else: dataset_name = "mnist" # default logger.debug(f"Dataset selected: {dataset_name}") # Reference the module corresponding to the dataset selected and initialize the Dataset object if dataset_name == "mnist": dataset_module = dataset_mnist elif dataset_name == "cifar10": dataset_module = dataset_cifar10 elif dataset_name == "titanic": dataset_module = dataset_titanic else: raise Exception(f"Dataset named '{dataset_name}' is not supported (yet). You could add it!") # The proportion of the dataset the computation will used if "dataset_proportion" in params: self.dataset_proportion = params["dataset_proportion"] assert self.dataset_proportion > 0, "Error in the config file, dataset_proportion should be > 0" assert self.dataset_proportion <= 1, "Error in the config file, dataset_proportion should be <= 1" else: self.dataset_proportion = 1 # default self.dataset = Dataset( dataset_name, dataset_module.x_train, dataset_module.x_test, dataset_module.y_train, dataset_module.y_test, dataset_module.input_shape, dataset_module.num_classes, dataset_module.preprocess_dataset_labels, dataset_module.generate_new_model_for_dataset, ) if self.dataset_proportion < 1: self.shorten_dataset_proportion() else: logger.debug(f"Computation use the full dataset for scenario #{scenario_id}") self.nb_samples_used = len(self.dataset.x_train) self.final_relative_nb_samples = [] # The train set is split into a train set and a validation set (used in particular for early stopping) self.dataset.train_val_split() # -------------------------------------- # Definition of collaborative scenarios # -------------------------------------- # List of all partners defined in the scenario self.partners_list = [] # partners mock different partners in a collaborative data science project # For defining the number of partners self.partners_count = params["partners_count"] # For configuring the respective sizes of the partners' datasets # Should the partners receive an equivalent amount of samples each or receive different amounts? # Define the percentages of samples per partner # Sum has to equal 1 and number of items has to equal partners_count self.amounts_per_partner = params["amounts_per_partner"] # For configuring if data samples are split between partners randomly or in a stratified way... # ... so that they cover distinct areas of the samples space if "samples_split_option" in params: (self.samples_split_type, self.samples_split_description) = params["samples_split_option"] else: (self.samples_split_type, self.samples_split_description) = ("basic", "random") # default # For configuring if the data of the partners are corrupted or not (useful for testing contributivity measures) if "corrupted_datasets" in params: self.corrupted_datasets = params["corrupted_datasets"] else: self.corrupted_datasets = ["not_corrupted"] * self.partners_count # default # --------------------------------------------------- # Configuration of the distributed learning approach # --------------------------------------------------- self.mpl = None self.evaluation_partner_numbers = None self.sequential_weighting_ponderation = None # Multi-partner learning approach multi_partner_learning_approaches_list = [ "fedavg", "seq-pure", "seq-with-final-agg", "seqavg", "qavg" ] if "multi_partner_learning_approach" in params: approach = params["multi_partner_learning_approach"] if approach in multi_partner_learning_approaches_list: self.multi_partner_learning_approach = approach if self.multi_partner_learning_approach == "qavg": # for specifiying evaluation on subpart of ther dataset if "evaluation_partner_numbers" in params: self.evaluation_partner_numbers = params['evaluation_partner_numbers'] else: self.evaluation_partner_numbers = len(self.partners_list) else: raise Exception(f"Multi-partner learning approach '{approach}' is not a valid approach.") else: self.multi_partner_learning_approach = 'fedavg' # default # Define how federated learning aggregation steps are weighted. Toggle between 'uniform' and 'data_volume' # Default is 'uniform' if "aggregation_weighting" in params: self.aggregation_weighting = params["aggregation_weighting"] if self.aggregation_weighting == 'sequential': if 'sequential_weighting_ponderation' in params: self.sequential_weighting_ponderation = params['sequential_weighting_ponderation'] else: self.sequential_weighting_ponderation = 0.5 else: self.aggregation_weighting = "uniform" # default # Number of epochs, mini-batches and fit_batches in ML training if "epoch_count" in params: self.epoch_count = params["epoch_count"] assert self.epoch_count > 0, "Error: in the provided config file, epoch_count should be > 0" else: self.epoch_count = 40 # default if "minibatch_count" in params: self.minibatch_count = params["minibatch_count"] assert self.minibatch_count > 0, "Error: in the provided config file, minibatch_count should be > 0" else: self.minibatch_count = 20 # default if "gradient_updates_per_pass_count" in params: self.gradient_updates_per_pass_count = params["gradient_updates_per_pass_count"] assert self.gradient_updates_per_pass_count > 0, "Error: in the provided config file, gradient_updates_per_pass_count should be > 0" else: self.gradient_updates_per_pass_count = constants.DEFAULT_GRADIENT_UPDATES_PER_PASS_COUNT # Early stopping stops ML training when performance increase is not significant anymore # It is used to optimize the number of epochs and the execution time if "is_early_stopping" in params: self.is_early_stopping = params["is_early_stopping"] else: self.is_early_stopping = True # default # ----------------------------------------------------------------- # Configuration of contributivity measurement methods to be tested # ----------------------------------------------------------------- # List of contributivity measures selected and computed in the scenario self.contributivity_list = [] # Contributivity methods contributivity_methods_list = [ "Shapley values", "Independent scores", "TMCS", "ITMCS", "IS_lin_S", "IS_reg_S", "AIS_Kriging_S", "SMCS", "WR_SMC", ] self.methods = [] if "methods" in params and params["methods"]: for method in params["methods"]: if method in contributivity_methods_list: self.methods.append(method) else: raise Exception(f"Contributivity method '{method}' is not in methods list.") # ------------- # Miscellaneous # ------------- # Scenario id and number of repetition self.scenario_id = scenario_id self.n_repeat = n_repeat if "is_quick_demo" in params: self.is_quick_demo = params["is_quick_demo"] if self.is_quick_demo and self.dataset_proportion < 1: raise Exception("Don't start a quick_demo without the full dataset") else: self.is_quick_demo = False # default # The quick demo parameters overwrites previously defined parameters to make the scenario faster to compute if "is_quick_demo" in params and params["is_quick_demo"]: # Use less data and/or less epochs to speed up the computations logger.info("Quick demo: limit number of data and number of epochs.") if (len(self.dataset.x_train) > 1000): index_train = np.random.choice(self.dataset.x_train.shape[0], 1000, replace=False) index_val = np.random.choice(self.dataset.x_val.shape[0], 500, replace=False) index_test = np.random.choice(self.dataset.x_test.shape[0], 500, replace=False) self.dataset.x_train = self.dataset.x_train[index_train] self.dataset.y_train = self.dataset.y_train[index_train] self.dataset.x_val = self.dataset.x_val[index_val] self.dataset.y_val = self.dataset.y_val[index_val] self.dataset.x_test = self.dataset.x_test[index_test] self.dataset.y_test = self.dataset.y_test[index_test] self.epoch_count = 3 self.minibatch_count = 2 # ------- # Outputs # ------- now = datetime.datetime.now() now_str = now.strftime("%Y-%m-%d_%Hh%M") self.scenario_name = ( "scenario_" + str(self.scenario_id) + "_" + "repeat" + "_" + str(self.n_repeat) + "_" + now_str + "_" + uuid.uuid4().hex[ :3 ] # This is to be sure 2 distinct scenarios do no have the same name ) self.short_scenario_name = ( str(self.partners_count) + " " + str(self.amounts_per_partner) ) self.save_folder = experiment_path / self.scenario_name if not is_dry_run: self.save_folder.mkdir(parents=True, exist_ok=True) # ------------------------------------------------ # Print the description of the scenario configured # ------------------------------------------------ if not is_dry_run: # Describe scenario logger.info("### Description of data scenario configured:") logger.info(f" Number of partners defined: {self.partners_count}") logger.info(f" Data distribution scenario chosen: {self.samples_split_description}") logger.info(f" Multi-partner learning approach: {self.multi_partner_learning_approach}") logger.info(f" Weighting option: {self.aggregation_weighting}") logger.info(f" Iterations parameters: " f"{self.epoch_count} epochs > " f"{self.minibatch_count} mini-batches > " f"{self.gradient_updates_per_pass_count} gradient updates per pass") # Describe data logger.info(f"### Data loaded: {self.dataset.name}") logger.info(f" {len(self.dataset.x_train)} train data with {len(self.dataset.y_train)} labels") logger.info(f" {len(self.dataset.x_val)} val data with {len(self.dataset.y_val)} labels") logger.info(f" {len(self.dataset.x_test)} test data with {len(self.dataset.y_test)} labels") def append_contributivity(self, contributivity): self.contributivity_list.append(contributivity) def instantiate_scenario_partners(self): """Create the partners_list - self.partners_list should be []""" if self.partners_list != []: raise Exception("self.partners_list should be []") self.partners_list = [Partner(i) for i in range(self.partners_count)] def split_data_fully_specified(self,is_logging_enabled=True): """Fully specified split: Populates the partners with trained and test data The following partition system is needed for each cluster: - nb_train : number of data used for training - nb_test : number of data used for testing - repartition : list of proportion for each class example : [ [ 1000, 500, [ 0.1 ,0.6 ,0.3 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ] ] ] # Added following fields : partners: - train_data_size => initialised while parsing config.yml - test_data_size => initialised while parsing
in enumerate(tqdm(self.G, desc=tqdm_desc_alg, leave=tqdm_leave, disable=(tqdm_disable or self.m == 1))): for g, alg in enumerate(self.G): # Initialize alg.reset() # Loop on time #for t in tqdm(self.T, desc=tqdm_desc_it, leave=tqdm_leave, disable=(tqdm_disable or self.n > 1 or self.m > 1) ): for t in self.T: # The algorithm chooses the arm to play i = alg.choose() # The arm played gives reward if prev_draw: x = X_i_t_j[i, t, j] else: x = self.A[i].draw() # The reward is returned to the algorithm alg.observe(x) # Save both H[j, g, t] = i X[j, g, t] = x #parallelism else: #initialize multiprocessing if num_threads is None: num_threads = num_cpus if ray.is_initialized() == False: ray.init(num_cpus=num_threads) # Initialize Rewards and History of selected Actions (3d matrices [t x g x i]) remote_X = _create_nparray.remote((self.n, self.m, self.h), dtype='float', ini_value=0.0) #successes remote_H = _create_nparray.remote((self.n, self.m, self.h), dtype='int', ini_value=-1) #history of actions # Draw for every arm all repetitions #if prev_draw: X_i_t_j = np.array([arm.draw((self.h, self.n)) for arm in self.A]) remote_X_i_t_j = ray.put(X_i_t_j) for j in tqdm(range(self.n)): [_run_episode.remote(j, self.A, alg, g, self.h, remote_X, remote_H, remote_X_i_t_j) for g, alg in enumerate(self.G)] X = ray.get(remote_X) H = ray.get(remote_H) #Translate Rewards following Domain R = X * self.d.r_amp + self.d.r_min #actions history, with initial action index being 1, not 0 H1 = H+1 #actions map (bool 4d matrix) H_a = np.array([[[[True if (H[j,g,t]==i) else False for t in self.T] for i in self.K] for g in range(self.m)] for j in range(self.n)], dtype='bool') #progressive actions count (int 4d matrix [t x j x i x a]) N_a = np.cumsum(H_a, axis=3) #averaged progressive actions count (float 3d matrix [t x j x a]) #averaged over repetitions self.average_pulls_count_ajt = self.MN_a = np.mean(N_a, axis=0) #progressive actions frequency (float 4d matrix [t x j x i x a]) F_a = N_a / self.T1 #averaged progressive actions frequency (float 3d matrix [t x j x a]) #averaged over repetitions self.average_pulls_freq_ajt = self.MF_a = np.mean(F_a, axis=0) if (self.w is not None): #window count (int 4d matrix [t x j x i x a]) NW_a = np.concatenate((N_a[:,:,:,:self.w], N_a[:,:,:,self.w:] - N_a[:,:,:,:-self.w]), axis=3) #averaged window count (float 3d matrix [t x j x a]) #averaged over repetitions self.window_average_pulls_count_ajt = self.MNW_a = np.mean(NW_a, axis=0) #window frequency (float 4d matrix [t x j x i x a]) FW_a = np.concatenate((N_a[:,:,:,:self.w] / np.arange(1,self.w+1, dtype='float'), (N_a[:,:,:,self.w:] - N_a[:,:,:,:-self.w]) / float(self.w)), axis=3) #averaged window frequency (float 3d matrix [t x j x a]) #averaged over repetitions self.window_average_pulls_freq_ajt = self.MFW_a = np.mean(FW_a, axis=0) #final arm pull count (int 3d matrix [j x i x a]) #n_a = N_a[:,:,:,self.h-1] n_a = N_a[:,:,:,-1] #averaged final arm pull count (float 2d matrix [j x a]) #averaged over repetitions self.mn_a = np.mean(n_a, axis=0) #final arm pull frequency (float 3d matrix [j x i x a]) f_a = F_a[:,:,:,-1] #averaged final arm pull frequency (float 2d matrix [j x a]) #averaged over repetitions self.mf_a = np.mean(f_a, axis=0) #progressive cumulative rewards (float 3d matrix [t x j x i]) SR = np.cumsum(R, axis=2, dtype='float') #averaged progressive cumulative rewards (float 2d matrix [t x j]) #averaged over repetitions self.average_cumulative_rewards_jt = self.MSR = np.mean(SR, axis=0) #final rewards (float 2d matrix [j x i]) sr = SR[:,:,-1] #averaged final rewards (float 1d matrix [j]) #averaged over repetitions self.msr = np.mean(sr, axis=0) #and standard deviation self.dsr = np.std(sr, axis=0) #progressive average rewards (float 3d matrix [t x j x i]) #averaged over time MR = SR / self.T1 #averaged progressive average rewards (float 2d matrix [t x j]) #averaged over time and repetitions self.averaged_mean_reward_jt = self.MMR = np.mean(MR, axis=0) #regret (float 3d matrix [t x j x i]) L = self.mu_star - R #averaged regret (float 2d matrix [t x j]) #self.ML = np.mean(L, axis=0) #progressive average regret (float 3d matrix [t x j x i]) #averaged over time ML = self.mu_star - MR #averaged average regret (float 2d matrix [t x j]) #averaged over time and repetitions self.average_mean_regret_jt = self.MML = np.mean(ML, axis=0) #self.average_mean_regret_jt = self.MML = self.mu_star - self.MMR #cumulated regret (float 3d matrix [t x j x i]) SL = np.cumsum(L, axis=2, dtype='float') #averaged cumulated regret (float 2d matrix [t x j]) #averaged over repetitions self.average_cumulative_regret_jt = self.MSL = np.mean(SL, axis=0) #final cumulated regret (float 2d matrix [j x i]) sl = SL[:,:,-1] #averaged final cumulated regret (float 1d matrix [j]) #averaged over repetitions self.msl = np.mean(sl, axis=0) #and standard deviation self.dsl = np.std(sl, axis=0) #rewards map (float 4d matrix [t x j x i x a]) R_a = np.array([[[[R[j,g,t] if (H[j,g,t]==i) else 0.0 for t in self.T] for i in self.K] for g in range(self.m)] for j in range(self.n)], dtype='float') #averaged rewards map (float 3d matrix [t x j x a]) #averaged over repetitions self.MR_a = np.mean(R_a, axis=0) #progressive rewards map (int 4d matrix [t x j x i x a]) SR_a = np.cumsum(R_a, axis=3) #averaged progressive rewards map (float 3d matrix [t x j x a]) #averaged over repetitions self.MSR_a = np.mean(SR_a, axis=0) #final rewards per action (float 3d matrix [j x i x a]) sr_a = SR_a[:,:,:,-1] #averaged final rewards per action (float 2d matrix [j x a]) #averaged over repetitions self.msr_a = np.mean(sr_a, axis=0) #reward proportion per action (float 3d matrix [j x i x a]) fr_a = sr_a / SR[:,:,-1,np.newaxis] #averaged proportion per action (float 2d matrix [j x a]) #averaged over repetitions self.mfr_a = np.mean(fr_a, axis=0) #progressive budget (float 3d matrix [t x j x i]) # i.e. the progressive cumulative rewards plus initial budget B = SR + self.b_0 ##progressive on negative counter of episodes (float 3d matrix [t x j]) ## i.e. the number of episodes where, at each time t, alg j is running on negative budget #N = np.sum(B >= 0, axis=0) #averaged progressive budget (float 2d matrix [t x j]) #averaged over repetitions #self.MB = np.mean(B, axis=0) self.MB = self.MSR + self.b_0 #final budget (float 2d matrix [j x i]) b = B[:,:,-1] #averaged final budget (float 1d matrix [j]) #averaged over repetitions self.mb = np.mean(b, axis=0) #time map on non-positive budget (int 3d matrix [t x j x i]) #TNB = np.array([[[1 if(v<=0) else 0 for v in B_ij] for B_ij in B_i] for B_i in B]) TNB = (B <= 0).astype(int) #time dead map (int 3d matrix [t x j x i]) TD = np.maximum.accumulate(TNB, axis=2) #time alive map (int 3d matrix [t x j x i]) TS = 1 - TD #progressive death counter of episodes (float 3d matrix [t x j]) DC = np.sum(TD, axis=0) #final death counter dc = DC[:,-1] #progressive survival rate of episodes (float 3d matrix [t x j]) #MS = 1 - np.mean(TD, axis=0) self.MS = np.mean(TS, axis=0) #final survival counter self.ms = MS[:,-1] #progressive budget considering ruin (float 3d matrix [t x j x i]) # i.e. the progressive cumulative rewards plus initial budget #_RB = ma.masked_less_equal(_B, 0.0).filled(0.0) #_RB = np.maximum(B, 0.0) RB = np.multiply(B, TS) self.MRB = np.mean(self.RB, axis=0) #progressive penalized mean budget (float 3d matrix [t x j x i]) # i.e. the progressive mean budget multiplied by survival rate self.MPB = np.multiply(self.MB, self.MS) ##progressive budget excluding ruin episodes (float 3d matrix [t x j x i]) ## i.e. the progressive cumulative rewards plus initial budget #SB = ma.masked_less_equal(B, 0.0) # ##averaged progressive budget on survival episodes only (float 2d matrix [t x j]) #averaged over repetitions #self.MSB = np.mean(SB, axis=0) # ##final budget (float 2d matrix [j x i]) #sb = SB[:,:,-1] # ##averaged final budget (float 1d matrix [j]) #averaged over repetitions #self.msb = np.mean(sb, axis=0) ##time map of the averaged budget on negative (int 2d matrix [t x j]) #self.TNMB = np.array([[1 if(v<0) else 0 for v in MB_j] for MB_j in self.MB]) ##survival time (before ruin or end) (int 2d matrix [j x i])
import metrics import numpy as np import torch import revdiff as rd import unittest import utils def get_grad(out, x): return rd.build_node_grad(out, x) def val(x): return rd.build_val(x) def get_arr_len(x): if isinstance(x, (np.ndarray, np.generic)): return x.size else: return 1 def mse(y_pred, y_true): diff = (y_true - y_pred) s = rd.op_sum(diff * diff, axis=0) return (1 / len(y_pred.shape)) * s class RDTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.ck_feps = 1e-6 def ck_fequals(self, a, b, feps = None): if feps is None: feps = self.ck_feps dist = metrics.tdist(a, b) / get_arr_len(a) self.assertLess(dist, feps) class BasicOps(RDTestCase): def test_fdw_val(self): x = rd.build_val(2) y = rd.build_val(3) self.ck_fequals(x.eval(), 2) self.ck_fequals(y.eval(), 3) def test_bwd_val(self): x = rd.build_val(2) y = rd.build_val(3) self.ck_fequals(get_grad(x, x).eval(), 1) self.ck_fequals(get_grad(x, y).eval(), 0) self.ck_fequals(get_grad(y, y).eval(), 1) self.ck_fequals(get_grad(y, x).eval(), 0) def test_fwd_vadd(self): x = np.array(2) y = np.array(3) self.ck_fequals((val(x) + val(y)).eval(), x + y) x = np.random.randn(3, 12, 7) y = np.random.randn(3, 12, 7) self.ck_fequals((val(x) + val(y)).eval(), x + y) def test_bwd_vadd(self): x = np.array(2) y = np.array(3) tx = val(x) ty = val(y) tz = tx + ty tlone = val(10) self.ck_fequals(get_grad(tz, tx).eval(), 1) self.ck_fequals(get_grad(tz, ty).eval(), 1) self.ck_fequals(get_grad(tz, tlone).eval(), 0) def test_fwd_vsub(self): x = np.array(2) y = np.array(3) self.ck_fequals((val(x) - val(y)).eval(), x - y) x = np.random.randn(3, 12, 7) y = np.random.randn(3, 12, 7) self.ck_fequals((val(x) - val(y)).eval(), x - y) def test_bwd_vsub(self): x = np.array(2) y = np.array(3) tx = val(x) ty = val(y) tz = tx - ty tlone = val(10) self.ck_fequals(get_grad(tz, tx).eval(), 1) self.ck_fequals(get_grad(tz, ty).eval(), -1) self.ck_fequals(get_grad(tz, tlone).eval(), 0) def test_fwd_vmul(self): x = np.array(2) y = np.array(3) self.ck_fequals((val(x) * val(y)).eval(), x * y) x = np.random.randn(3, 12, 7) y = np.random.randn(3, 12, 7) self.ck_fequals((val(x) * val(y)).eval(), x * y) def test_bwd_vmul(self): x = np.array(2) y = np.array(3) tx = val(x) ty = val(y) tz = tx * ty tlone = val(10) self.ck_fequals(get_grad(tz, tx).eval(), y) self.ck_fequals(get_grad(tz, ty).eval(), x) self.ck_fequals(get_grad(tz, tlone).eval(), 0) def test_fwd_vdiv(self): x = np.array(2) y = np.array(3) self.ck_fequals((val(x) / val(y)).eval(), x / y) x = np.random.randn(3, 12, 7) y = np.random.randn(3, 12, 7) self.ck_fequals((val(x) / val(y)).eval(), x / y, feps=1e-4) def test_bwd_vdiv(self): x = np.array(2) y = np.array(3) tx = val(x) ty = val(y) tz = tx / ty tlone = val(10) self.ck_fequals(get_grad(tz, tx).eval(), 1 / y) self.ck_fequals(get_grad(tz, ty).eval(), - x/(y**2)) self.ck_fequals(get_grad(tz, tlone).eval(), 0) def test_fwd_dotvv(self): x = np.random.randn(7) y = np.random.randn(7) tx = val(x) ty = val(y) tz = rd.build_dot_vv(tx, ty) self.ck_fequals(tz.eval(), x @ y) def test_bwd_dotvv(self): x = np.random.randn(7) y = np.random.randn(7) tx = val(x) ty = val(y) tlone = val(10) tz = rd.build_dot_vv(tx, ty) self.ck_fequals(get_grad(tz, tx).eval(), y) self.ck_fequals(get_grad(tz, ty).eval(), x) self.ck_fequals(get_grad(tz, tlone).eval(), 0) def test_bwd_vsadd(self): x = np.random.randn() y = np.random.randn(12).astype(np.float32) tx = val(x) ty = val(y) tz = rd.build_vsadd(tx, ty) te = rd.build_dot_vv(tz, tz) dx = torch.tensor(x, requires_grad=True) dy = torch.tensor(y, requires_grad=True) dz = dx + dy de = torch.dot(dz, dz) de.backward() self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy(), feps=1e-5) self.ck_fequals(get_grad(te, ty).eval(), dy.grad.data.numpy()) def test_bwd_vsmul(self): x = np.random.randn() y = np.random.randn(12).astype(np.float32) tx = val(x) ty = val(y) tz = rd.build_vsmul(tx, ty) te = rd.build_dot_vv(tz, tz) dx = torch.tensor(x, requires_grad=True) dy = torch.tensor(y, requires_grad=True) dz = dx * dy de = torch.dot(dz, dz) de.backward() self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy(), feps=1e-5) self.ck_fequals(get_grad(te, ty).eval(), dy.grad.data.numpy()) def test_bwd_vsdiv(self): x = np.random.randn() y = np.random.rand(12).astype(np.float32) + 0.1 tx = val(x) ty = val(y) tz = rd.build_vsdiv(tx, ty) te = rd.build_dot_vv(tz, tz) dx = torch.tensor(x, requires_grad=True) dy = torch.tensor(y, requires_grad=True) dz = dx / dy de = torch.dot(dz, dz) de.backward() self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy(), feps=1e-4) self.ck_fequals(get_grad(te, ty).eval(), dy.grad.data.numpy()) def test_bwd_vlog(self): x = np.random.rand(12).astype(np.float32) + 0.1 tx = val(x) tz = rd.build_vlog(tx) te = rd.build_dot_vv(tz, tz) dx = torch.tensor(x, requires_grad=True) dz = torch.log(dx) de = torch.dot(dz, dz) de.backward() self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy()) class LinReg(RDTestCase): def test_mse(self): y_pred = np.random.randn(46) y_true = np.random.randn(46) dy_pred = val(y_pred) dy_true = val(y_true) dloss = mse(dy_pred, dy_true) ty_pred = torch.tensor(y_pred, requires_grad=True) ty_true = torch.tensor(y_true, requires_grad=True) criterion = torch.nn.MSELoss() tloss = criterion(ty_pred, ty_true) tloss.backward() self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-4) self.ck_fequals(get_grad(dloss, dy_pred).eval(), ty_pred.grad.data.numpy()) self.ck_fequals(get_grad(dloss, dy_true).eval(), ty_true.grad.data.numpy()) def test_sgd_mse(self): X = np.random.randn(46, 7) w = np.random.randn(7) y_true = np.random.randn(46) dX = val(X) dw = val(w) dy_true = val(y_true) dy_pred = rd.build_dot_mv(dX, dw) dloss = mse(dy_pred, dy_true) tX = torch.tensor(X, requires_grad=True) tw = torch.tensor(w, requires_grad=True) ty_true = torch.tensor(y_true, requires_grad=True) ty_pred = torch.matmul(tX, tw) utils.save_grad(ty_pred) criterion = torch.nn.MSELoss() tloss = criterion(ty_pred, ty_true) tloss.backward() self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3) self.ck_fequals(get_grad(dloss, dy_pred).eval(), utils.get_grad(ty_pred).data.numpy()) self.ck_fequals(get_grad(dloss, dy_true).eval(), ty_true.grad.data.numpy()) self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy(), feps=1e-4) self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy()) def test_sgd_logreg_2(self): X = np.random.randn(46, 7).astype(np.float32) w = np.random.randn(7).astype(np.float32) y_true = np.random.randint(0, 2, (46)).astype(np.float32) dX = val(X) dw = val(w) dy_true = val(y_true) dy_out = rd.build_dot_mv(dX, dw) dy_pred = rd.build_vsigmoid(dy_out) dloss = rd.build_bce_loss(dy_out, dy_true) tX = torch.tensor(X, requires_grad=True) tw = torch.tensor(w, requires_grad=True) ty_true = torch.tensor(y_true, requires_grad=False) ty_out = torch.matmul(tX, tw) utils.save_grad(ty_out) ty_pred = torch.sigmoid(ty_out) criterion = torch.nn.BCEWithLogitsLoss(reduction='sum') tloss = criterion(ty_out, ty_true) tloss.backward() self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3) self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy()) self.ck_fequals(get_grad(dloss, dy_out).eval(), utils.get_grad(ty_out).data.numpy()) self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy()) self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy()) def test_sgd_logreg_2_prim(self): X = np.random.randn(46, 7).astype(np.float32) w = np.random.randn(7).astype(np.float32) y_true = np.random.randint(0, 2, (46)).astype(np.float32) dX = val(X) dw = val(w) dy_true = val(y_true) dy_out = rd.build_dot_mv(dX, dw) dy_pred = rd.build_vsdiv(1, rd.build_vsadd(1, rd.build_vexp((-dy_out)))) dloss = - rd.op_sum(dy_true * rd.build_vlog(dy_pred) + (rd.build_vsadd(1, -dy_true)) * rd.build_vlog(rd.build_vsadd(1, -dy_pred)), axis=0) tX = torch.tensor(X, requires_grad=True) tw = torch.tensor(w, requires_grad=True) ty_true = torch.tensor(y_true, requires_grad=False) ty_out = torch.matmul(tX, tw) utils.save_grad(ty_out) ty_pred = torch.sigmoid(ty_out) criterion = torch.nn.BCEWithLogitsLoss(reduction='sum') tloss = criterion(ty_out, ty_true) tloss.backward() self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-2) self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy()) self.ck_fequals(get_grad(dloss, dy_out).eval(), utils.get_grad(ty_out).data.numpy(), feps=1e-4) self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy(), feps=1e-3) self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy(), feps=1e-4) def test_sgd_logreg_k(self): X = np.random.randn(46, 7).astype(np.float32) w = np.random.randn(7, 4).astype(np.float32) y_true = np.zeros((46, 4)).astype(np.float32) for i in range(y_true.shape[0]): y_true[i][np.random.randint(0, y_true.shape[1])] = 1 dX = val(X) dw = val(w) dy_true = val(y_true) dy_out = rd.build_dot_mm(dX, dw) dy_pred = rd.build_softmax(dy_out) dloss = rd.build_cross_entropy_loss(dy_out, dy_true) tX = torch.tensor(X, requires_grad=True) tw = torch.tensor(w, requires_grad=True) ty_true = torch.tensor(y_true, requires_grad=False) ty_true = torch.argmax(ty_true, dim=1) ty_out = torch.matmul(tX, tw) ty_pred = torch.nn.functional.softmax(ty_out, dim=1) utils.save_grad(ty_out) criterion = torch.nn.CrossEntropyLoss(reduction='sum') tloss = criterion(ty_out, ty_true) tloss.backward() self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3) self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy()) self.ck_fequals(get_grad(dloss, dy_out).eval(), utils.get_grad(ty_out).data.numpy()) self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy()) self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy()) def test_sgd_logreg_k_l1_l2(self): X = np.random.randn(46, 7).astype(np.float32) w = np.random.randn(7, 4).astype(np.float32) y_true = np.zeros((46, 4)).astype(np.float32) for i in range(y_true.shape[0]): y_true[i][np.random.randint(0, y_true.shape[1])] = 1 alpha_l1 = 0.53 alpha_l2 = 0.82 dX = val(X) dw = val(w) dw_flat = rd.build_reshape(dw, (dw.shape[0] * dw.shape[1],)) dy_true = val(y_true) dy_out = rd.build_dot_mm(dX, dw) dy_pred = rd.build_softmax(dy_out) dloss = rd.build_cross_entropy_loss(dy_out, dy_true) dloss = dloss + alpha_l1 * rd.build_norm1(dw_flat) dloss = dloss + alpha_l2 * rd.build_dot_vv(dw_flat, dw_flat) tX = torch.tensor(X, requires_grad=True) tw = torch.tensor(w, requires_grad=True) tw_flat = tw.view(-1) ty_true = torch.tensor(y_true, requires_grad=False) ty_true = torch.argmax(ty_true, dim=1) ty_out = torch.matmul(tX, tw) ty_pred = torch.nn.functional.softmax(ty_out, dim=1) utils.save_grad(ty_out) criterion = torch.nn.CrossEntropyLoss(reduction='sum') tloss = criterion(ty_out, ty_true) tloss = tloss + alpha_l1 * torch.norm(tw_flat, p=1) + alpha_l2 * torch.dot(tw_flat, tw_flat) tloss.backward() self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3) self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy()) self.ck_fequals(get_grad(dloss, dy_out).eval(), utils.get_grad(ty_out).data.numpy()) self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy()) self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy()) class MLP(RDTestCase): def test_layer_lin1(self): X = np.random.randn(46, 7) y_true = np.random.randn(46, 3) W = np.random.randn(7, 3) b = np.random.randn(3) dX = val(X) dy_true = val(y_true) dW = val(W) db = val(b) dy_pred = rd.build_add_bias(rd.build_dot_mm(dX, dW), db) dloss = mse(rd.build_reshape(dy_pred, (y_true.size,)), rd.build_reshape(dy_true, (y_true.size,))) tX = torch.tensor(X, requires_grad=True) ty_true = torch.tensor(y_true, requires_grad=True) tW = torch.tensor(W, requires_grad=True) tb = torch.tensor(b, requires_grad=True) ty_pred = torch.matmul(tX, tW) + tb criterion = torch.nn.MSELoss() tloss = criterion(ty_pred, ty_true) tloss.backward() self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3) self.ck_fequals(get_grad(dloss, dy_true).eval(), ty_true.grad.data.numpy()) self.ck_fequals(get_grad(dloss, dW).eval(), tW.grad.data.numpy(), feps=1e-5) self.ck_fequals(get_grad(dloss, db).eval(), tb.grad.data.numpy(), feps=1e-5) self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy()) def test_act_relu(self): x = np.random.randn(43) tx = val(x) tz = rd.build_vrelu(tx) te = rd.build_dot_vv(tz, tz) dx = torch.tensor(x, requires_grad=True) dz = torch.relu(dx) de = torch.dot(dz, dz) de.backward() self.ck_fequals(tz.eval(), dz.data.numpy()) self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy()) def test_act_softmax(self): x = np.random.randn(11, 7) tx = val(x) tz = rd.build_softmax(tx) tz = rd.build_reshape(tz, (11 * 7,)) te = rd.build_dot_vv(tz, tz) dx = torch.tensor(x, requires_grad=True) dz = torch.relu(dx).view(-1) de = torch.dot(dz, dz) de.backward() self.ck_fequals(tz.eval(), dz.data.numpy(), feps=1e-1) #self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy()) def test_act_softplus(self): x = np.random.randn(23) tx = val(x) tz = rd.build_vsoftplus(tx, 0.7) te = rd.build_dot_vv(tz, tz)
<gh_stars>0 #! /usr/bin/env python # coding: utf-8 import re import json import logging from time import time from JYTools import logger from JYTools.util import is_num from JYTools.StringTool import is_string, join_decode from JYTools.util.file import FileWriter from JYTools.JYWorker.util import ValueVerify, ReportScene from ._Task import TaskStatus, TaskType from ._redis import RedisWorker __author__ = 'meisanggou' class DAGTools(object): RIGHT_TASK_TYPE = ("app", "pipeline", "repeat-app", "repeat-pipeline") @classmethod def _verify_pipeline_attribute(cls, p_params): if "task_list" not in p_params: error_msg = join_decode("pipeline应该包含task_list属性") logger.error(error_msg) return False, dict(code=2, data="task_list", message=error_msg) tl = p_params["task_list"] if isinstance(tl, list) is False: error_msg = join_decode(["task_list属性值的类型不正确。应该是list,传入的不是list类型,现在是", type(tl)]) logger.error(error_msg) return False, dict(code=3, data="task_list", message=error_msg) if len(tl) <= 0: error_msg = join_decode("pipeline应该至少包含一个任务") logger.error(error_msg) return False, dict(code=4, data=len(tl), message=error_msg) if "task_output" in p_params: task_output = p_params["task_output"] if isinstance(task_output, dict) is False: error_msg = join_decode(["task_output属性值的类型不正确。应该是dict,传入的不是dict类型,现在是", type(task_output)]) return False, dict(code=5, data="task_output", message=error_msg) for key in task_output.keys(): p_params["output_%s" % key] = task_output[key] return True, dict(code=0, data=None, message="success") @classmethod def _verify_pipeline_input_output(cls, p_params): tl = p_params["task_list"] output_keys = filter(lambda x: x.startswith("output_"), p_params.keys()) if len(output_keys) <= 0: # warn 7 pipeline未设置一个输出 warn_msg = "pipeline未设置一个输出,一般不会这样设置" logger.warning(warn_msg) for key in output_keys: if is_string(p_params[key]) is False: continue if p_params[key].startswith("&") is False: continue ref_d = DAGWorker.split_ref(p_params[key]) ref_task = ref_d["ref_task"] if ref_task <= 0 or ref_task > len(tl): error_msg = join_decode(["pipeline的输出[", key, "]在视图引用子任务[", ref_task, "]的输出,但是这个子任务不存在"]) logger.error(error_msg) return False, dict(code=7, data="%s|%s" % (key, ref_task), message=error_msg) ref_index = ref_task - 1 ref_key = ref_d["key"] if TaskStatus.is_success(tl[ref_index]["task_status"]) or tl[ref_index]["task_type"] != "app": output_key = "output_" + ref_d["key"] if output_key not in tl[ref_index]: error_msg = join_decode(["pipeline的输出[", key, "]在试图引用子任务[", ref_task, "]的输出[", ref_key, "],但是在这个子任务中并没有发现这个输出"]) logger.error(error_msg) return False, dict(code=8, data="%s|%s|%s" % (key, ref_task, ref_key), message=error_msg) input_keys = filter(lambda x: x.startswith("input_"), p_params.keys()) for key in input_keys: if is_string(p_params[key]) is False: continue if p_params[key].startswith("&") is False: continue # warn 6 pipeline输入为字符串类型,而且以&开头 warn_msg = join_decode(["父任务的输入[", key, "]是个以&开头的字符串[", p_params[key], "],如果子任务引用到将会出错"]) logger.warning(warn_msg) return True, dict(code=0, data=None, message="success") @classmethod def _verify_pipeline_item(cls, index, item): task_no = index + 1 if "task_type" not in item: item["task_type"] = "app" if "task_status" not in item: item["task_status"] = TaskStatus.NONE if item["task_type"] not in cls.RIGHT_TASK_TYPE: error_msg = "invalid task_type, now index is %s, task_type is %s" % (index, item["task_type"]) logger.error(error_msg) return False, dict(code=11, data=item["task_type"], message=error_msg) if "task_output" in item: task_output = item["task_output"] if isinstance(task_output, dict) is False: error_msg = "task_output need dict type, now index is %s, task_output type is %s" \ % (index, type(task_output)) logger.error(error_msg) return False, dict(code=12, data=type(task_output), message=error_msg) for key in task_output.keys(): if "output_%s" % key not in item: item["output_%s" % key] = task_output[key] # 检查work_tag ------------------------------------------------------------------------------------------------- if item["task_type"] in ["app", "repeat-app"]: if "work_tag" not in item: error_msg = join_decode(["app类型的子任务需要包含work_tag属性,任务[", task_no, "]不符合要求"]) logger.error(error_msg) return False, dict(code=13, data="work_tag", message=error_msg) work_tag = item["work_tag"] if is_string(work_tag) is False: error_msg = join_decode(["work_tag属性对应值,必须是字符串类型的,子任务[", task_no, "]的work_tag是[", type(work_tag), "]类型的"]) logger.error(error_msg) return False, dict(code=14, data=type(work_tag), message=error_msg) if ValueVerify.v_work_tag(work_tag) is False: error_msg = join_decode(["work_tag属性对应值,仅允许包含数字字母下划线短横线,子任务[", task_no, "]的work_tag是[", work_tag, "]"]) logger.error(error_msg) return False, dict(code=14, data=type(work_tag), message=error_msg) item_keys = item.keys() # 检查输出 ------------------------------------------------------------------------------------------------------ output_keys = filter(lambda x: x.startswith("output_"), item_keys) if TaskStatus.is_success(item["task_status"]): if len(output_keys) <= 0: # warn 2 某个子任务的状态为success时,没有设置一个输出 warn_msg = "子任务[%s]已经是完成状态,但是没有发现输出参数,一般应该有输出参数的" % task_no logger.warning(warn_msg) else: for key in output_keys: # warn 3 某个子任务的状态为success时,输出值为&开头的字符串 if is_string(item[key]) and item[key].startswith("&"): warn_msg = join_decode(["子任务[", task_no, "]的一个输出[", key, "]输出值为[", item[key], "],不应该以&开头,如果其他任务引用了该输出将会报错"]) logger.warning(warn_msg) elif item["task_type"] == "app": # warn 1 某个app类型的子任务的状态不为success时,但是设置了task_output if len(output_keys) > 0: warn_msg = "子任务[%s]的状态还未成功,但发现了输出设置,我们一般不这么干" % task_no logger.warning(warn_msg) else: # warn 4 某个非app类型的子任务没有设置一个输出 if len(output_keys) <= 0: warn_msg = join_decode(["子任务[", task_no, "],任务类型为[", item["task_type"], "],没有发现一个输出参数,一般应该有输出参数"]) logger.warning(warn_msg) # 检查repeat_freq if item["task_type"].startswith("repeat-") is True: if "repeat_freq" in item: repeat_freq = item["repeat_freq"] if is_num(repeat_freq) is False or repeat_freq <= 0: error_msg = join_decode(["子任务[", task_no, "]设置了repeat_freq,设置为[", repeat_freq, "],不是数字或者不大于0"]) return False, dict(code=15, data=repeat_freq, message=error_msg) # 检查是否有多余的key--------------------------------------------------------------------------------------------- avail_keys = ["task_type", "task_output", "task_status", "task_name", "runtime"] if item["task_type"].endswith("pipeline") is True: avail_keys.append("task_list") else: avail_keys.append("work_tag") if item["task_status"] == TaskStatus.SUCCESS: avail_keys.extend(["start_time", "begin_time", "finished_time", "end_time", "task_message"]) if item["task_type"].startswith("repeat-") is True: avail_keys.append("repeat_freq") surplus_keys = filter(lambda x: x not in avail_keys, item_keys) surplus_keys = filter(lambda x: x.startswith("input_") is False, surplus_keys) surplus_keys = filter(lambda x: x.startswith("output_") is False, surplus_keys) for key in surplus_keys: warn_msg = join_decode(["子任务[", task_no, "]包含一个无用的属性[", key, "]"]) logger.warning(warn_msg) return True, dict(code=0, data=None, message="success") @staticmethod def _verify_ref(p_params): tl = p_params["task_list"] task_len = len(tl) rs_l = [dict(quotes=list(), next=list(), index=i) for i in range(task_len)] completed_queue = [0] for index in range(task_len): task_item = tl[index] task_no = index + 1 if TaskStatus.is_success(task_item["task_status"]) is True: completed_queue.append(task_no) continue for k, v in task_item.items(): if k.startswith("input_") is False: continue if is_string(v) is False: continue if v.startswith("&") is False: continue ref_d = DAGWorker.split_ref(v) if ref_d is None: error_msg = join_decode(["子任务[", task_no, "]的输入[", k, "]为字符串且以&开头,但不是一个合法引用格式"]) logger.error(error_msg) return False, dict(code=21, data=k, message=error_msg) ref_key = ref_d["key"] ref_task = ref_d["ref_task"] if ref_d["index"] < 0 or ref_d["index"] > task_len: error_msg = join_decode(["子任务[", task_no, "]的输入[", k, "]在试图引用子任务[", ref_d["index"], "]的输出,但是这个子任务不存在"]) logger.error(error_msg) return False, dict(code=16, data="", message=error_msg) if ref_d["index"] not in rs_l[index]["quotes"]: rs_l[index]["quotes"].append(ref_d["index"]) if ref_d["index"] > 0: rs_l[ref_d["index"] - 1]["next"].append(index) if ref_d["required"] is False: continue if ref_d["ref_task"] == 0: input_key = "input_" + ref_d["key"] if input_key not in p_params: error_msg = join_decode(["子任务[", task_no, "]的输入[", k, "]在试图引用父任务的输入[", ref_d["key"], "],但是在父任务中并没有发现这个输入"]) logger.error(error_msg) return False, dict(code=17, data="%s|%s|%s" % (task_no, k, ref_d["key"]), message=error_msg) input_v = p_params[input_key] if is_string(input_v) and input_v.startswith("&"): error_msg = join_decode(["子任务[", task_no, "]的输入[", k, "]在试图引用父任务的输入[", ref_key, "],但是在父任务的输入为字符串类型,而且以&开头,这是不被允许的"]) logger.error(error_msg) return False, dict(code=18, data="%s|%s|%s" % (task_no, k, ref_key), message=error_msg) continue if ref_d["ref_task"] == task_no: error_msg = join_decode(["子任务[", task_no, "]的输入[", k, "]在试图引用自己的输出[", ref_d["key"], "],这是很搞笑的"]) logger.error(error_msg) return False, dict(code=19, data="%s|%s|%s" % (task_no, k, ref_d["key"]), message=error_msg) ref_index = ref_d["ref_task"] - 1 if TaskStatus.is_success(tl[ref_index]["task_status"]) or tl[ref_index]["task_type"] != "app": output_key = "output_" + ref_d["key"] if output_key not in tl[ref_index]: error_msg = join_decode(["子任务[", task_no, "]的输入[", k, "]在试图引用子任务[", ref_task, "]的输出[", ref_key, "],但是在这个子任务中并没有发现这个输出"]) logger.error(error_msg) return False, dict(code=20, data="%s|%s|%s|%s" % (task_no, k, ref_task, ref_key), message=error_msg) while True: completed_num = 0 for index in range(task_len): if index + 1 in completed_queue: continue rs_item = rs_l[index] q_len = len(rs_item["quotes"]) for i in range(q_len - 1, -1, -1): if rs_item["quotes"][i] in completed_queue: rs_item["quotes"].remove(rs_item["quotes"][i]) if len(rs_item["quotes"]) <= 0: completed_queue.append(index + 1) completed_num += 1 continue if len(completed_queue) == task_len + 1: return True, dict() if completed_num == 0: error_msg = "各个子任务之间引用存在回路" logger.error(error_msg) return False, dict(code=6, data=None, message=error_msg) return True, dict(code=0, data=None, message="success") @classmethod def ip_verify_pipeline(cls, p_params): """ 1 校验pipeline中是否有引用过界 2 校验pipeline结构中是否包含回路 3 引用为pipeline输入时,校验输入是否存在 4 引用为某个pipeline类型的子任务时,校验输出是否存在 5 引用为某个app类型的子任务的输出,且该任务状态为success时,校验输出是否存在 6 Pipeline输出引用子任务不存在 以下情况给出警告 1 某个app类型的子任务的状态不为success时,但是设置了task_output 2 某个子任务的状态为success时,没有设置一个输出 3 某个子任务的状态为success时,输出值为&开头的字符串 4 某个非app类型的子任务没有设置一个输出 5 子任务包含无用的属性 6 pipeline输入为字符串类型,而且以&开头 7 pipeline未设置一个输出 8 pipeline设置的输出无法在子任务中找到 :param p_params: :return: error_code: 1 pipeline结构应该是个字典类型 2 pipeline中必须存在一个属性task_list,但是没有存在 3 pipeline的属性值task_list的类型不正确。应该是list,传入的不是list类型 4 pipeline至少包含一个任务 5 pipeline的task_output属性值的类型不正确。应该是dict,传入的不是dict类型 6 pipeline结构中包含回路 7 pipeline设置的输出无法找到对应的子任务 8 pipeline设置的输出无法在对应子任务中找到引用的输出 11 pipeline子任务的任务类型task_type不合法 12 pipeline子任务的task_output应该是字典dict类型的 13 pipeline中app和repeat-app类型子任务必须设置work_tag 14 pipeline中app和repeat-app类型子任务属性work_tag类型必须是字符串类型的而且仅允许包含数字字母下划线短横线 15 pipeline中repeat-app和repeat-pipeline类型子任务属性repeat_freq必须是数字类型,而且必须大于0 16 pipeline子任务的输入引用了一个不存在的任务的输出 17 pipeline子任务的输入引用了一个父任务不存在的输入 18 pipeline子任务的输入引用了一个父任务的输入,但是这个父任务的输入为字符串类型,而且以&开头 19 pipeline子任务的输入引用了自己的输出 20 pipeline子任务的输入引用了另一个子任务的输出,但是该输出不存在 21 pipeline子任务的输入为字符串且以&开头,但不是一个合法引用格式 """ logger.warning(u"你正在调用一个处于试用阶段的方法,测试结果仅供参考,请勿用于生产环境") if isinstance(p_params, dict) is False: error_msg = join_decode(["pipeline结构应该是个字典类型,现在是", type(p_params)]) return False, dict(code=1, data=None, message=error_msg) # 格式化pipeline的属性 r, data = cls._verify_pipeline_attribute(p_params) if r is False: return r, data # format子任务 检测pipeline子任务的格式 tl = p_params["task_list"] for index in range(len(tl)): item = tl[index] r, data = cls._verify_pipeline_item(index, item) if r is False: return r, data r, data = cls._verify_pipeline_input_output(p_params) if r is False: return r, data r, data = cls._verify_ref(p_params) if r is False: return r, data return True, dict(code=0, data=p_params, message="success") class DAGWorker(RedisWorker): expect_params_type = dict ref_compile = re.compile(r"^(\d{1,10})((&\d+|&*[a-z])\w{0,60})(\**)$", re.I) def __init__(self, conf_path=None, heartbeat_value=None, is_brother=False, work_tag=None, log_dir=None, redis_host=None, redis_password=<PASSWORD>, redis_port=None, redis_db=None, section_name="Redis", **kwargs): self.agent_tag = kwargs.pop("agent_tag", None) RedisWorker.__init__(self, conf_path, heartbeat_value, is_brother, work_tag, log_dir, redis_host, redis_password, redis_port, redis_db, section_name, **kwargs) self.after_handle_funcs.append(self.after_handle) def push_task(self, key, params, work_tag=None, sub_key=None, report_tag=None, is_report=False, report_scene=None, task_name=None): if self.agent_tag is not None: if work_tag is not None and work_tag not in (self.work_tag, self.upload_log_tag) and is_report is False: params = dict(work_tag=work_tag, params=params) work_tag = self.agent_tag if report_scene is None: report_scene = ReportScene.Begin | ReportScene.End self._push_task(key, params, work_tag, sub_key, report_tag, is_report=is_report, report_scene=report_scene, task_name=task_name) def push_control(self, key, expected_status, params=None, work_tag=None, sub_key=None, report_tag=None, report_scene=ReportScene.END): if self.agent_tag is not None: if params is None: params = dict() params.update(work_tag=work_tag) work_tag = self.agent_tag RedisWorker.push_control(self, key, expected_status, params, work_tag, sub_key, report_tag, report_scene) @staticmethod def split_ref(ref_str): """ :param ref_str: index+字母开头的key index+&+数字开头的key index+&+字母开头的key 最后可以加入*结尾,也可以不加入,暂时定义为required :return: """ if ref_str[0] == "&": ref_str = ref_str[1:] match_r = DAGWorker.ref_compile.match(ref_str) if match_r is None: return None ref_index = int(match_r.groups()[0]) ref_key = match_r.groups()[1] required = match_r.groups()[3] if ref_key[0] == "&":
storagetypes.wait_futures( [ret_fut] ) return ret_fut.get_result() @classmethod def __compactify_get_candidates_delete( cls, volume_id, parent_id, dir_index_cutoff, async=False ): """ Find the set of allocated index nodes beyond a given offset, suitable for swapping into a newly-freed slot. """ to_compactify = MSEntryDirEntIndex.ListAll( {"MSEntryDirEntIndex.parent_id ==": parent_id, "MSEntryDirEntIndex.volume_id ==": volume_id, "MSEntryDirEntIndex.alloced ==" : True, "MSEntryDirEntIndex.dir_index >=": dir_index_cutoff}, async=async, limit=1024 ) return to_compactify @classmethod def FindFreeGaps( cls, volume_id, parent_id, dir_index_cutoff, async=False, limit=1024 ): """ Find the set of unallocated index slots less than a given offset, suitable for holding a newly-allocated directory index. """ to_compactify = MSEntryDirEntIndex.ListAll( {"MSEntryDirEntIndex.parent_id ==": parent_id, "MSEntryDirEntIndex.volume_id ==": volume_id, "MSEntryDirEntIndex.alloced ==" : True, "MSEntryDirEntIndex.dir_index <": dir_index_cutoff}, async=async, limit=limit ) gaps = list( set(range(0, dir_index_cutoff)) - set([idx.dir_index for idx in to_compactify]) ) return gaps @classmethod def __compactify_swap( cls, volume_id, parent_id, alloced_file_id, alloced_dir_index, free_file_id, free_dir_index, async=False ): """ Atomically swap an allocated directory index node with an unallocated (or non-existant) directory index node, thereby placing the allocated directory index node into the "gap" left by the unallocated directory index node. This will delete the unallocated directory index node and its companion entry index node, and move the allocated directory index node's companion entry index node into place. alloced_file_id corresponds to the existing MSEntry (i.e. the one associated with the allocated index node) free_file_id corresponds to the deleted MSEntry, if applicable (i.e. the one associated with the free index node). It can be None if there is no index node to delete for the file id. Return the dir index of the overwritten gap node on success, or None if free_file_id was None Return -ENOENT if the allocated dir index node no longer exists. Return -EAGAIN if we raced another process to allocate this slot and lost Return -ESTALE if the index allocation data is invalid (i.e. the free index got allocated, or the allocated index got freed) """ alloced_idx_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, alloced_dir_index ) alloced_entry_idx_key_name = MSEntryEntDirIndex.make_key_name( volume_id, alloced_file_id ) free_idx_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, free_dir_index ) alloced_entry_idx_key = storagetypes.make_key( MSEntryEntDirIndex, alloced_entry_idx_key_name ) free_idx_key = storagetypes.make_key( MSEntryDirEntIndex, free_idx_key_name ) # if the free file ID is not known, get it if free_file_id is None: free_idx_data = cls.__read_dirent_node( volume_id, parent_id, None, free_dir_index, check_file_id=False ) if free_idx_data is not None: free_idx_rc, free_idx = free_idx_data # it's okay if this index node does not exist if free_idx_rc != 0 and free_idx_rc != -errno.ENOENT: # some other error logging.error("/%s/%s: __read_dirent_node( /%s/%s, %s ) rc = %s" % (volume_id, parent_id, volume_id, free_file_id, free_dir_index, free_idx_rc )) if async: return storagetypes.FutureWrapper( free_idx_rc ) else: return free_idx_rc elif free_idx_rc == 0 and free_idx is not None: if free_idx.alloced: logging.error("/%s/%s: free index (/%s/%s, %s) is allocated" % (volume_id, parent_id, volume_id, free_idx.file_id, free_dir_index) ) storagetypes.memcache.delete_multi( [alloced_idx_key_name, alloced_entry_idx_key_name, free_idx_key_name] ) if async: return storagetypes.FutureWrapper( -errno.ESTALE ) else: return -errno.ESTALE else: logging.info("/%s/%s: file id of /%s/%s at %s is %s\n" % (volume_id, parent_id, volume_id, parent_id, free_dir_index, free_idx.file_id) ) @storagetypes.concurrent def do_swap( free_file_id ): # confirm that the allocated directory index node and free directory index node still exist free_idx_data = None free_idx_rc = None free_idx = None free_idx_file_id = None check_free_file_id = True if free_file_id is None: check_free_file_id = False alloced_idx_data, free_idx_data = yield cls.__read_dirent_node( volume_id, parent_id, alloced_file_id, alloced_dir_index, async=True ), cls.__read_dirent_node( volume_id, parent_id, free_file_id, free_dir_index, async=True, check_file_id=check_free_file_id ) alloced_idx_rc, alloced_idx = alloced_idx_data if free_idx_data is not None: free_idx_rc, free_idx = free_idx_data # possible that we raced another compactify operation and lost (in which case the allocated node might be different than what we expect) if alloced_idx_rc != 0: logging.error("/%s/%s: alloced index (/%s/%s, %s) rc = %s" % (volume_id, parent_id, volume_id, alloced_file_id, alloced_dir_index, alloced_idx_rc) ) storagetypes.concurrent_return( (-errno.EAGAIN, None, None) ) elif not alloced_idx.alloced: logging.error("/%s/%s: alloced index (/%s/%s, %s) is free" % (volume_id, parent_id, volume_id, alloced_file_id, alloced_dir_index) ) storagetypes.concurrent_return( (-errno.ESTALE, None, None) ) if free_idx_data is not None: if free_idx_rc != 0: if free_idx_rc == -errno.ENOENT: # the entry doesn't exist, which is fine by us since we're about to overwrite it anyway free_idx_rc = None free_idx = None free_idx_data = None else: logging.error("/%s/%s: __read_dirent_node(/%s/%s, %s) rc = %s" % (volume_id, parent_id, volume_id, free_file_id, free_dir_index, free_idx_rc) ) storagetypes.concurrent_return( (free_idx_rc, None, None) ) elif free_idx.alloced: logging.error("/%s/%s: free index (/%s/%s, %s) is allocated" % (volume_id, parent_id, volume_id, free_idx.file_id, free_dir_index) ) storagetypes.concurrent_return( (-errno.ESTALE, None, None) ) elif free_idx.dir_index != free_dir_index: raise Exception("/%s/%s: free index slot mismatch: %s != %s" % (volume_id, free_file_id, free_idx.dir_index, free_dir_index)) else: # save this for later... free_idx_file_id = free_idx.file_id # sanity check if alloced_idx.dir_index != alloced_dir_index: raise Exception("/%s/%s: allocated index slot mismatch: %s != %s" % (volume_id, alloced_file_id, alloced_idx.dir_index, alloced_dir_index)) # do the swap: # * overwrite the free dir index node with the allocated dir index node's data (moving it into place over the freed one) # * update the alloced ent node with the free dir index node's dir index (compactifying the index) new_dir_idx = MSEntryDirEntIndex( key=free_idx_key, **alloced_idx.to_dict() ) new_entry_dir_idx = MSEntryEntDirIndex( key=alloced_entry_idx_key, **alloced_idx.to_dict() ) # overwrites existing entry index node new_dir_idx.dir_index = free_dir_index new_entry_dir_idx.dir_index = free_dir_index logging.debug( "swap index slot of /%s/%s: slot %s --> slot %s (overwrites %s)" % (volume_id, alloced_file_id, alloced_dir_index, free_dir_index, free_file_id) ) yield new_dir_idx.put_async(), new_entry_dir_idx.put_async(), alloced_idx.key.delete_async() storagetypes.memcache.delete_multi( [alloced_idx_key_name, alloced_entry_idx_key_name, free_idx_key_name] ) storagetypes.concurrent_return( (0, alloced_idx, free_idx_file_id) ) @storagetypes.concurrent def swap( free_file_id ): rc, alloced_idx, free_idx_file_id = yield storagetypes.transaction_async( lambda: do_swap( free_file_id ), xg=True ) if rc < 0: storagetypes.concurrent_return( rc ) old_dir_index = None if free_file_id is None: free_file_id = free_idx_file_id if free_file_id is not None: # blow away the newly-freed index node old_entry_idx_key_name = MSEntryEntDirIndex.make_key_name( volume_id, free_file_id ) old_entry_idx_key = storagetypes.make_key( MSEntryEntDirIndex, old_entry_idx_key_name ) yield old_entry_idx_key.delete_async() storagetypes.memcache.delete( old_entry_idx_key_name ) old_dir_index = alloced_idx.dir_index storagetypes.concurrent_return( old_dir_index ) rc_fut = swap( free_file_id ) if async: return rc_fut else: storagetypes.wait_futures( [rc_fut] ) return rc_fut.get_result() @classmethod @storagetypes.concurrent def __compactify_remove_index_async( cls, volume_id, parent_id, dead_file_id, dead_dir_index ): """ Remove a freed index slot's node data. """ idx_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, dead_dir_index ) ent_key_name = MSEntryEntDirIndex.make_key_name( volume_id, dead_file_id ) idx_key = storagetypes.make_key( MSEntryDirEntIndex, idx_key_name ) ent_key = storagetypes.make_key( MSEntryEntDirIndex, ent_key_name ) @storagetypes.concurrent def delete_index_if_unallocated(): idx_node = yield idx_key.get_async( use_cache=False, use_memcache=False ) if idx_node is None: # already gone storagetypes.concurrent_return( 0 ) if not idx_node.alloced: yield idx_key.delete_async() storagetypes.concurrent_return( 0 ) yield ent_key.delete_async(), storagetypes.transaction_async( delete_index_if_unallocated ) storagetypes.memcache.delete_multi( [idx_key_name, ent_key_name] ) @classmethod def __compactify_child_delete( cls, volume_id, parent_id, free_file_id, free_dir_index, dir_index_cutoff ): """ Repeatedly find a child's index node that (1) is allocated, and (2) is beyond a given cutoff (i.e. the number of index nodes at the time of the call), and then atomically swap the identified freed node with child's node in the index order. The effect is that allocated index nodes at the end of the index get moved to replace the gaps in the index, thereby compactifying it. Return (old_dir_index, free dir index node) on success Return -EPERM if no compactification can happen (i.e. all children have directory index values smaller than the maximum) Return -EAGAIN if the caller should refresh the parent directory index maximum value """ # find all entries in parent with a dir index greater than the current one to_compactify = None while True: to_compactify = cls.__compactify_get_candidates_delete( volume_id, parent_id, dir_index_cutoff ) if len(to_compactify) > 0: # it's possible there are more than one. Pick one that's allocated (but
# Copyright (c) 2021 OpenKS Authors, Visual Computing Group, Beihang University. # All rights reserved. import os import sys import pickle import argparse import datetime import time import json from tqdm import tqdm import random from pathlib import Path import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, DistributedSampler from datetime import datetime from copy import deepcopy from plyfile import PlyData, PlyElement sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder from ..model import VisualConstructionModel from .mmd_modules.ThreeDVG.data.scannet.model_util_scannet import ScannetDatasetConfig from .mmd_modules.ThreeDVG.lib.dataset import ScannetReferenceDataset from .mmd_modules.ThreeDVG.lib.solver import Solver from .mmd_modules.ThreeDVG.lib.config import CONF from .mmd_modules.ThreeDVG.models.refnet import RefNet from .mmd_modules.ThreeDVG.scripts.utils.AdamW import AdamW from .mmd_modules.ThreeDVG.scripts.utils.script_utils import set_params_lr_dict from .mmd_modules.ThreeDVG.lib.ap_helper import APCalculator, parse_predictions, parse_groundtruths from .mmd_modules.ThreeDVG.lib.loss_helper import get_loss from .mmd_modules.ThreeDVG.lib.eval_helper import get_eval from .mmd_modules.ThreeDVG.utils.pc_utils import write_ply_rgb, write_oriented_bbox from .mmd_modules.ThreeDVG.utils.box_util import get_3d_box, box3d_iou SCANREFER_TRAIN = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_train.json"))) SCANREFER_VAL = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_val.json"))) SCANNET_ROOT = "/data5/caidaigang/scanrefer/data/scannet/scans/" #30 SCANNET_MESH = os.path.join(SCANNET_ROOT, "{}/{}_vh_clean_2.ply") # scene_id, scene_id SCANNET_META = os.path.join(SCANNET_ROOT, "{}/{}.txt") # scene_id, scene_id # constants MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8]) DC = ScannetDatasetConfig() @VisualConstructionModel.register("3DVG", "PyTorch") class VisualGrounding3DVGTorch(VisualConstructionModel): # TODO distributed learning is not complete. def __init__(self, name: str = 'pytorch-3dvg', use_distributed: bool = False, args = {"3DVG": True}): self.name = name self.args = self.parse_args(args) print("args",self.args) #self.device = torch.device(self.args.device) # setting os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu os.environ["CUDA_LAUNCH_BLOCKING"] = "1" # reproducibility torch.manual_seed(self.args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(self.args.seed) def parse_args(self, args): parser = argparse.ArgumentParser(description="3DVG Visual Grounding Model") parser.add_argument("--tag", type=str, help="tag for the training, e.g. cuda_wl", default="") parser.add_argument("--gpu", type=str, help="gpu", default="2") parser.add_argument("--batch_size", type=int, help="batch size", default=10) parser.add_argument("--epoch", type=int, help="number of epochs", default=200) parser.add_argument("--verbose", type=int, help="iterations of showing verbose", default=50) parser.add_argument("--val_step", type=int, help="iterations of validating", default=1000) parser.add_argument("--lr", type=float, help="learning rate", default=2e-3) parser.add_argument("--wd", type=float, help="weight decay", default=1e-5) parser.add_argument("--lang_num_max", type=int, help="lang num max", default=8) parser.add_argument("--num_points", type=int, default=40000, help="Point Number [default: 40000]") parser.add_argument("--num_proposals", type=int, default=256, help="Proposal number [default: 256]") parser.add_argument("--num_scenes", type=int, default=-1, help="Number of scenes [default: -1]") parser.add_argument("--seed", type=int, default=42, help="random seed") parser.add_argument("--coslr", action='store_true', help="cosine learning rate") parser.add_argument("--amsgrad", action='store_true', help="optimizer with amsgrad") parser.add_argument("--no_height", action="store_true", help="Do NOT use height signal in input.") parser.add_argument("--no_augment", action="store_true", help="Do NOT use augment on trainingset (not used)") parser.add_argument("--no_lang_cls", action="store_true", help="Do NOT use language classifier.") parser.add_argument("--no_detection", action="store_true", help="Do NOT train the detection module.") parser.add_argument("--no_reference", action="store_true", help="Do NOT train the localization module.") parser.add_argument("--use_color", action="store_true", help="Use RGB color in input.") parser.add_argument("--use_normal", action="store_true", help="Use RGB color in input.") parser.add_argument("--use_multiview", action="store_true", help="Use multiview images.") parser.add_argument("--use_bidir", action="store_true", help="Use bi-directional GRU.") parser.add_argument("--use_pretrained", type=str, help="Specify the folder name containing the pretrained detection module.") parser.add_argument("--use_checkpoint", type=str, help="Specify the checkpoint root", default="") ###############################val############################## parser.add_argument("--folder", type=str, help="Folder containing the model") parser.add_argument("--force", action="store_true", help="enforce the generation of results") parser.add_argument("--no_nms", action="store_true", help="do NOT use non-maximum suppression for post-processing.") parser.add_argument("--repeat", type=int, default=1, help="Number of times for evaluation") parser.add_argument("--use_train", action="store_true", help="Use train split in evaluation.") parser.add_argument("--use_oracle", action="store_true", help="Use ground truth bounding boxes.") parser.add_argument("--use_cat_rand", action="store_true", help="Use randomly selected bounding boxes from correct categories as outputs.") parser.add_argument("--use_best", action="store_true", help="Use best bounding boxes as outputs.") parser.add_argument("--reference", action="store_true", help="evaluate the reference localization results") parser.add_argument("--detection", action="store_true", help="evaluate the object detection results") ###############################visualize############################## parser.add_argument("--scene_id", type=str, help="scene id", default="") args = parser.parse_args(args) return args def get_dataloader(self, args, scanrefer, scanrefer_new, all_scene_list, split, config, augment, shuffle=True): dataset = ScannetReferenceDataset( scanrefer=scanrefer[split], scanrefer_new=scanrefer_new[split], scanrefer_all_scene=all_scene_list, split=split, num_points=args.num_points, use_height=(not args.no_height), use_color=args.use_color, use_normal=args.use_normal, use_multiview=args.use_multiview, lang_num_max=args.lang_num_max, augment=augment, shuffle=shuffle ) # dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True) dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=shuffle, num_workers=4) return dataset, dataloader def get_eval_dataloader(self, args, scanrefer, scanrefer_new, all_scene_list, split, config): dataset = ScannetReferenceDataset( scanrefer=scanrefer, scanrefer_new=scanrefer_new, scanrefer_all_scene=all_scene_list, split=split, num_points=args.num_points, use_color=args.use_color, use_height=(not args.no_height), use_normal=args.use_normal, use_multiview=args.use_multiview, lang_num_max=1,#args.lang_num_max ) print("evaluate on {} samples".format(len(dataset))) dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True) return dataset, dataloader def get_model(self, args): # initiate model input_channels = int(args.use_multiview) * 128 + int(args.use_normal) * 3 + int(args.use_color) * 3 + int( not args.no_height) model = RefNet( num_class=DC.num_class, num_heading_bin=DC.num_heading_bin, num_size_cluster=DC.num_size_cluster, mean_size_arr=DC.mean_size_arr, input_feature_dim=input_channels, num_proposal=args.num_proposals, use_lang_classifier=(not args.no_lang_cls), use_bidir=args.use_bidir, no_reference=args.no_reference, dataset_config=DC ) # trainable model if args.use_pretrained: # load model print("loading pretrained VoteNet...") pretrained_model = RefNet( num_class=DC.num_class, num_heading_bin=DC.num_heading_bin, num_size_cluster=DC.num_size_cluster, mean_size_arr=DC.mean_size_arr, num_proposal=args.num_proposals, input_feature_dim=input_channels, use_bidir=args.use_bidir, no_reference=True, dataset_config=DC ) pretrained_path = os.path.join(CONF.PATH.OUTPUT, args.use_pretrained, "model_last.pth") pretrained_model.load_state_dict(torch.load(pretrained_path), strict=False) # mount model.backbone_net = pretrained_model.backbone_net model.vgen = pretrained_model.vgen model.proposal = pretrained_model.proposal if args.no_detection: # freeze pointnet++ backbone for param in model.backbone_net.parameters(): param.requires_grad = False # freeze voting for param in model.vgen.parameters(): param.requires_grad = False # freeze detector for param in model.proposal.parameters(): param.requires_grad = False # to CUDA model = model.cuda() return model def get_eval_model(self, args, config): # load model input_channels = int(args.use_multiview) * 128 + int(args.use_normal) * 3 + int(args.use_color) * 3 + int( not args.no_height) model = RefNet( num_class=config.num_class, num_heading_bin=config.num_heading_bin, num_size_cluster=config.num_size_cluster, mean_size_arr=config.mean_size_arr, num_proposal=args.num_proposals, input_feature_dim=input_channels, use_lang_classifier=(not args.no_lang_cls), use_bidir=args.use_bidir, dataset_config=config, ).cuda() model_name = "model_last.pth" if args.detection else "model.pth" path = os.path.join(CONF.PATH.OUTPUT, args.folder, model_name) model.load_state_dict(torch.load(path), strict=False) model.eval() return model def get_num_params(self, model): model_parameters = filter(lambda p: p.requires_grad, model.parameters()) num_params = int(sum([np.prod(p.size()) for p in model_parameters])) return num_params def get_solver(self, args, dataloader): model = self.get_model(args) # TODO weight_dict = { 'detr': {'lr': 0.0001}, 'lang': {'lr': 0.0005}, 'match': {'lr': 0.0005}, } params = set_params_lr_dict(model, base_lr=args.lr, weight_decay=args.wd, weight_dict=weight_dict) # params = model.parameters() optimizer = AdamW(params, lr=args.lr, weight_decay=args.wd, amsgrad=args.amsgrad) if args.use_checkpoint: print("loading checkpoint {}...".format(args.use_checkpoint)) stamp = args.use_checkpoint root = os.path.join(CONF.PATH.OUTPUT, stamp) checkpoint = torch.load(os.path.join(CONF.PATH.OUTPUT, args.use_checkpoint, "checkpoint.tar")) model.load_state_dict(checkpoint["model_state_dict"]) optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) else: stamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if args.tag: stamp += "_" + args.tag.upper() root = os.path.join(CONF.PATH.OUTPUT, stamp) os.makedirs(root, exist_ok=True) # scheduler parameters for training solely the detection pipeline LR_DECAY_STEP = [80, 120, 160] if args.no_reference else None if args.coslr: LR_DECAY_STEP = { 'type': 'cosine', 'T_max': args.epoch, 'eta_min': 1e-5, } LR_DECAY_RATE = 0.1 if args.no_reference else None BN_DECAY_STEP = 20 if args.no_reference else None BN_DECAY_RATE = 0.5 if args.no_reference else None print('LR&BN_DECAY', LR_DECAY_STEP, LR_DECAY_RATE, BN_DECAY_STEP, BN_DECAY_RATE, flush=True) solver = Solver( model=model, config=DC, dataloader=dataloader, optimizer=optimizer, stamp=stamp, val_step=args.val_step, detection=not args.no_detection, reference=not args.no_reference, use_lang_classifier=not args.no_lang_cls, lr_decay_step=LR_DECAY_STEP, lr_decay_rate=LR_DECAY_RATE, bn_decay_step=BN_DECAY_STEP, bn_decay_rate=BN_DECAY_RATE ) num_params = self.get_num_params(model) return solver, num_params, root def save_info(self, args, root, num_params, train_dataset, val_dataset): info = {} for key, value in vars(args).items(): info[key] = value info["num_train"] = len(train_dataset) info["num_val"] = len(val_dataset) info["num_train_scenes"] = len(train_dataset.scene_list) info["num_val_scenes"] = len(val_dataset.scene_list) info["num_params"] = num_params with open(os.path.join(root, "info.json"), "w") as f: json.dump(info, f, indent=4) def get_scannet_scene_list(self, split): scene_list = sorted( [line.rstrip() for line in open(os.path.join(CONF.PATH.SCANNET_META, "scannetv2_{}.txt".format(split)))]) return scene_list def get_scanrefer(self, scanrefer_train, scanrefer_val, num_scenes, lang_num_max): if self.args.no_reference: train_scene_list = self.get_scannet_scene_list("train") new_scanrefer_train = [] for scene_id in train_scene_list: data = deepcopy(SCANREFER_TRAIN[0]) data["scene_id"] = scene_id new_scanrefer_train.append(data) val_scene_list = self.get_scannet_scene_list("val") new_scanrefer_val = [] for scene_id in val_scene_list: data = deepcopy(SCANREFER_VAL[0]) data["scene_id"] = scene_id new_scanrefer_val.append(data) else: # get initial scene list train_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer_train]))) val_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer_val]))) if num_scenes == -1: num_scenes = len(train_scene_list) else: assert len(train_scene_list) >= num_scenes # slice train_scene_list train_scene_list = train_scene_list[:num_scenes] # filter data in chosen scenes new_scanrefer_train = [] scanrefer_train_new = [] scanrefer_train_new_scene = [] scene_id = "" for data in scanrefer_train: if data["scene_id"] in train_scene_list: new_scanrefer_train.append(data) if scene_id != data["scene_id"]: scene_id = data["scene_id"] if len(scanrefer_train_new_scene) > 0: scanrefer_train_new.append(scanrefer_train_new_scene) scanrefer_train_new_scene = [] if len(scanrefer_train_new_scene) >= lang_num_max: scanrefer_train_new.append(scanrefer_train_new_scene) scanrefer_train_new_scene = [] scanrefer_train_new_scene.append(data) """ if data["scene_id"] not in scanrefer_train_new: scanrefer_train_new[data["scene_id"]] = [] scanrefer_train_new[data["scene_id"]].append(data) """ scanrefer_train_new.append(scanrefer_train_new_scene) new_scanrefer_val = scanrefer_val scanrefer_val_new = [] scanrefer_val_new_scene = [] scene_id = "" for data in scanrefer_val: # if data["scene_id"] not in scanrefer_val_new: # scanrefer_val_new[data["scene_id"]] = [] # scanrefer_val_new[data["scene_id"]].append(data) if scene_id != data["scene_id"]: scene_id = data["scene_id"] if len(scanrefer_val_new_scene) > 0: scanrefer_val_new.append(scanrefer_val_new_scene) scanrefer_val_new_scene = [] if len(scanrefer_val_new_scene) >= lang_num_max: scanrefer_val_new.append(scanrefer_val_new_scene) scanrefer_val_new_scene = [] scanrefer_val_new_scene.append(data) scanrefer_val_new.append(scanrefer_val_new_scene) print("scanrefer_train_new", len(scanrefer_train_new), len(scanrefer_val_new), len(scanrefer_train_new[0])) sum = 0 for i in range(len(scanrefer_train_new)): sum += len(scanrefer_train_new[i]) # print(len(scanrefer_train_new[i])) # for i in range(len(scanrefer_val_new)): # print(len(scanrefer_val_new[i])) print("sum", sum) # 1418 363 # all scanrefer scene all_scene_list = train_scene_list + val_scene_list print("train on {} samples and val on {} samples".format(len(new_scanrefer_train), len(new_scanrefer_val))) return new_scanrefer_train, new_scanrefer_val, all_scene_list, scanrefer_train_new, scanrefer_val_new def get_eval_scanrefer(self, args): if args.detection: scene_list = self.get_scannet_scene_list("val") scanrefer = [] for scene_id in scene_list: data = deepcopy(SCANREFER_TRAIN[0]) data["scene_id"] = scene_id scanrefer.append(data) else: scanrefer = SCANREFER_TRAIN if args.use_train else SCANREFER_VAL scene_list = sorted(list(set([data["scene_id"] for data in scanrefer]))) if args.num_scenes != -1: scene_list = scene_list[:args.num_scenes] scanrefer = [data for data in scanrefer if data["scene_id"] in scene_list] new_scanrefer_val = scanrefer scanrefer_val_new = [] scanrefer_val_new_scene = [] scene_id = "" for data in scanrefer: # if data["scene_id"] not in scanrefer_val_new: # scanrefer_val_new[data["scene_id"]] = [] # scanrefer_val_new[data["scene_id"]].append(data) if scene_id != data["scene_id"]: scene_id = data["scene_id"] if len(scanrefer_val_new_scene)
<filename>experiments/figures/work/make_figures.py # standard library imports import os import sys import argparse import time # package imports import torch from torch.distributions.normal import Normal import numpy as np import matplotlib.pyplot as plt from matplotlib.lines import Line2D import seaborn as sns import pandas as pd import matplotlib matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 import jax.numpy as jnp import neural_tangents as nt from neural_tangents import stax # local imports sys.path.append('../../..') import src.layers as layers import src.util as util import src.networks as networks import src.callbacks as callbacks from src.data import load_dataset def get_parser(): parser = argparse.ArgumentParser() parser.add_argument('--test_mode', action='store_true') # for quickly testing figures # general model argument parser.add_argument('--dim_in', type=int, default=1) parser.add_argument('--noise_sig2', type=float, default=.01, help='observational noise') parser.add_argument('--n_layers', type=int, default=1) parser.add_argument('--activation', type=str, default='erf') parser.add_argument('--seed_data', type=int, default=0, help='seed for dataset') # prior #parser.add_argument('--scale_by_width', action='store_true') # applies to prior and initialization parser.add_argument('--prior_sig2', type=float, default=1) # initialization parser.add_argument('--init_method', type=str, default='deterministic') parser.add_argument('--temp_gamma_alpha', type=float, default=100.) # for initializing variational variances parser.add_argument('--test_param1', type=float, default=1) parser.add_argument('--test_param2', type=float, default=1) parser.add_argument('--manual_plot_lims', action='store_true') parser.add_argument('--dtype', type=str, default='float32') # gradient descent arguments parser.add_argument('--scale_kl', action='store_true') return parser def plot_upcrossings(x, f, upcross_level=0, bins=16, ax=None): ''' numpy inputs f: (n_samp, n) x: (n,1) or (n,) <-- assumes this is sorted plots histogram of upcrossing locations uses midpoint of x's to assign locations (so x needs to be sorted) ''' # check if x is sorted x = x.reshape(-1) x_diff = x[1:] - x[:-1] assert np.all(x_diff >= 0) or np.all(x_diff <= 0) x_mid = (x[0:-1] + x[1:])/2 # midpoint between adjacent x pairs u = upcross_level*np.ones(x.shape[0]) up = np.logical_and(f[:,:-1]<u[:-1], f[:,1:]>u[1:]) # indicator of upcrossing (n_samp, n-1) # average upcrossings mean_upcrossings = np.mean(np.sum(up, 1), 0) # sum over n, average over n_samp # location of upcrossings idx_up = np.concatenate([np.where(row)[0] for row in up]) # indices of all of the upcrossings x_up = x_mid[idx_up] # position of all the upcrossings # histogram of location of upcrossings if ax is None: fig, ax = plt.subplots() ax.hist(x_up, bins=bins, weights=1/f.shape[0]*np.ones(len(x_up))) # weight by 1/n_samp #ax.set_title('upcrossings --- avg = %.3f' % mean_upcrossings) return ax def plot_predictive(x_grid, f_grid, uncertainty_type='std', n_std=1, n_samp_show=0, f_true=None, color='blue', label=None, ax=None): ''' Numpy inputs x_grid: (n_grid, 1) f_true: (n_grid, 1) f_grid: (n_samp, n_grid) x_train: (n_train, 1) y_train: (n_train, 1) ''' if ax is None: fig, ax = plt.subplots() else: fig = None f_grid_pred = np.mean(f_grid, 0) ax.plot(x_grid, f_grid_pred, color=color, label=label) # predictions on grid #n_samp_plot = min(f_grid.shape[0], 5) if n_samp_show > 0: ax.plot(x_grid, f_grid[:n_samp_show, :].T, color=color, alpha=.25, linewidth=1) # plot a few samples if f_true is not None: ax.plot(x_grid, f_true, color='orange') if uncertainty_type == 'std': f_grid_std = np.std(f_grid, 0) for std in np.arange(1, n_std+1): f_grid_pred_lb = f_grid_pred.reshape(-1) - std * f_grid_std.reshape(-1) f_grid_pred_ub = f_grid_pred.reshape(-1) + std * f_grid_std.reshape(-1) ax.fill_between(x_grid.reshape(-1), f_grid_pred_lb.reshape(-1), f_grid_pred_ub.reshape(-1), color=color, alpha=0.2, edgecolor='none') elif uncertainty_type == 'quantile': for q in [2.5, 5, 10]: ci = np.percentile(f_grid, [q, 100-q], axis=0) ax.fill_between(x_grid.reshape(-1), ci[0,:], ci[1,:], color=color, alpha=0.2) #ax.legend() #ax.set_xlabel('x') return fig, ax def load_pytorch(args, dim_hidden, dir_save, act_name=None, dim_in=None): if act_name is None: act_name = args.activation if dim_in is None: dim_in = args.dim_in device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # data try: #data = np.load(os.path.join(dir_save, 'data.npy'), allow_pickle=True).item() if str(device) == 'cpu': # kind of a hack dir_save_alt = os.path.join(dir_save, '../') data = np.load(os.path.join(dir_save_alt, 'data.npy'), allow_pickle=True).item() else: data = np.load(os.path.join(dir_save, 'data.npy'), allow_pickle=True).item() # will this work on cuda? # convert to numpy data['x_train'] = data['x_train'].cpu().numpy() data['y_train'] = data['y_train'].cpu().numpy() data['f_train'] = data['f_train'].cpu().numpy() try: data['x_test'] = data['x_test'].cpu().numpy() data['y_test'] = data['y_test'].cpu().numpy() data['f_test'] = data['f_test'].cpu().numpy() except: pass #data['x_grid'] = np.linspace(data['x_train'].min()-.5, data['x_train'].max()+.5, 50).reshape(-1,1).astype(args.dtype) data['x_grid'] = np.linspace(-1, 1, 20).reshape(-1,1).astype(args.dtype) except: #print('unable to load data (possibly not needed)...') data = {'noise_sig2': args.noise_sig2} # model bnn = networks.BNN( dim_in=dim_in, dim_hidden=dim_hidden, noise_scale=np.sqrt(data['noise_sig2']), n_layers=args.n_layers, act_name=act_name, layer_name='BBB', w_scale_prior=[np.sqrt(args.prior_sig2), np.sqrt(args.prior_sig2)], b_scale_prior=[np.sqrt(args.prior_sig2), np.sqrt(args.prior_sig2)], ntk_scaling=True, temp_gamma_alpha=args.temp_gamma_alpha, init_method=args.init_method, test_param=[args.test_param1, args.test_param2], scale_kl = args.scale_kl) bnn.load_state_dict(torch.load(os.path.join(dir_save, 'model.tar'), map_location=torch.device(device))) bnn.to(device) bnn.eval() return bnn, data def plot_pytorch(bnn, x_grid, prior=False, n_samp=100, n_std=1, n_samp_show=0, ax=None, label=None, color=None, ax_upcrossings=None): x_grid_torch = torch.from_numpy(x_grid).to(bnn.layers[0].device) bnn_grid = scalable_foward(bnn, x_grid_torch, n_samp=n_samp, prior=prior).cpu().numpy() plot_predictive(x_grid, bnn_grid, uncertainty_type='std', n_std=n_std, n_samp_show=n_samp_show, f_true=None, color=color, label=label, ax=ax) if ax_upcrossings is not None: plot_upcrossings(x_grid, bnn_grid, ax=ax_upcrossings) def scalable_foward(bnn, x, n_samp=1000, prior=False, dim_hidden_thresh=256000): ''' Helps with memory errors for large models 256000 ''' n_batch = 100 if bnn.dim_hidden < dim_hidden_thresh else n_samp # for batching over samples #n_batch = 10000 if bnn.dim_hidden < dim_hidden_thresh else n_samp # for batching over samples loop_over_data = bnn.dim_hidden >= dim_hidden_thresh # for batching inputs loop_over_data=False with torch.no_grad(): if loop_over_data: return torch.cat([bnn.forward_batched(x[i,0].reshape(1,-1), n_samp=n_samp, n_batch=n_batch, prior=prior).detach() for i in range(x.shape[0])], 0).squeeze().T # (n_samp, n_obs) else: return bnn.forward_batched(x, n_samp=n_samp, n_batch=n_batch, prior=prior).detach().squeeze().T # (n_samp, n_obs) def forward_no_bias(bnn, x, n_samp=1000): ''' Sets output bias before taking samples Numpy inputs/outputs, n_samp x n_obs ''' # copy original q(b) b_loc_orig = bnn.layers[-1].b_loc.data.clone() b_scale_untrans_orig = bnn.layers[-1].b_scale_untrans.data.clone() # change zero bias bnn.layers[-1].b_loc.data = torch.tensor([0.], device=bnn.layers[1].device) bnn.layers[-1].b_scale_untrans.data = bnn.layers[-1].untransform(torch.tensor([1e-7], device=bnn.layers[1].device)) # compute predictions x_torch = torch.from_numpy(x).to(bnn.layers[0].device) with torch.no_grad(): #f_pred = bnn.forward_batched(x_torch, n_samp=n_samp, n_batch=100, prior=prior).detach().cpu().squeeze().numpy().T f_pred = scalable_foward(bnn, x_torch, n_samp=n_samp, prior=False).cpu().numpy() # reset to original q(b) bnn.layers[-1].b_loc.data = b_loc_orig bnn.layers[-1].b_scale_untrans.data = b_scale_untrans_orig return f_pred def forward_no_bias_prior(bnn, x, n_samp=1000): ''' Sets output bias before taking samples Numpy inputs/outputs, n_samp x n_obs Samples from the prior ''' # copy original prior p_b_orig = bnn.layers[-1].p_b # change to optimal bias bnn.layers[-1].p_b = Normal(torch.tensor([0.], device=bnn.layers[1].device), torch.tensor([1e-32], device=bnn.layers[1].device)).expand(bnn.layers[-1].b_loc.shape) # compute likelihood x_torch = torch.from_numpy(x).to(bnn.layers[0].device) with torch.no_grad(): #f_pred = bnn.forward_batched(x_torch, n_samp=n_samp, n_batch=100, prior=prior).detach().cpu().squeeze().numpy().T f_pred = scalable_foward(bnn, x_torch, n_samp=n_samp, prior=True).cpu().numpy() # reset to original prior bnn.layers[-1].p_b = p_b_orig return f_pred def compute_bound(m, bnn, data): assert bnn.n_layers == 1 # only works in L=1 case # compute optimal bias b_loc_opt = np.sum(data['y_train']) / (data['y_train'].shape[0] + data['noise_sig2']) b_scale_opt = np.sqrt(data['noise_sig2'] / (data['y_train'].shape[0] + data['noise_sig2'])) # copy original prior p_b_orig = bnn.layers[-1].p_b # change to optimal bias bnn.layers[-1].p_b = Normal(torch.tensor([b_loc_opt], device=bnn.layers[1].device), torch.tensor([b_scale_opt], device=bnn.layers[1].device)).expand(bnn.layers[-1].b_loc.shape) # compute likelihood x_train_torch = torch.from_numpy(data['x_train']).to(bnn.layers[0].device) y_train_torch = torch.from_numpy(data['y_train']).to(bnn.layers[0].device) with torch.no_grad(): f_pred = bnn.forward_batched(x_train_torch, n_samp=1000, n_batch=100, prior=True) log_prob = bnn.log_prob(y_train_torch, f_pred).sum() kl = torch.distributions.kl_divergence(bnn.layers[-1].p_b, p_b_orig).sum() kl_bound = (-log_prob + kl).item() # reset to original prior bnn.layers[-1].p_b = p_b_orig # simpler if L=D=norm_x=1, alpha=0 c0 = 2/3 * np.sqrt(2) * kl_bound return c0 * m**(-0.5) def fit_nngp(args, x_train, y_train, noise_sig2=None): if noise_sig2 is None: noise_sig2 = args.noise_sig2 dim_hidden = 100 # why is this needed? I think it only matters if you choose to do the ntk kernel init_fn, apply_fn, kernel_fn = stax.serial( stax.Dense(dim_hidden, W_std=np.sqrt(args.prior_sig2), b_std=np.sqrt(args.prior_sig2)), stax.Erf(), stax.Dense(1, W_std=np.sqrt(args.prior_sig2), b_std=np.sqrt(args.prior_sig2)) ) predict_fn = nt.predict.gradient_descent_mse_ensemble(kernel_fn, x_train, y_train, diag_reg=np.sqrt(noise_sig2), diag_reg_absolute_scale=False) # beau: use noise level for diag_reg? return kernel_fn, predict_fn def forward_nngp(mean, cov, n_samp): return np.random.multivariate_normal(mean.reshape(-1), cov, n_samp) def plot_nngp(kernel_fn, predict_fn, x_grid, prior=False, n_samp=100, n_std=1, n_samp_show=0, ax=None, label=None, color=None, ax_upcrossings=None): if prior: mean = np.zeros(x_grid.shape) cov = kernel_fn(x_grid, x_grid, 'nngp') else: mean, cov = predict_fn(x_test=x_grid, get='nngp', compute_cov=True) nngp_grid = forward_nngp(mean, cov, n_samp=n_samp) plot_predictive(x_grid, nngp_grid, uncertainty_type='std', n_std=n_std, n_samp_show=n_samp_show, f_true=None, color=color, label=label, ax=ax) if ax_upcrossings is not None: plot_upcrossings(x_grid, nngp_grid, ax=ax_upcrossings) def fit_nngp_alt(args, x_train, y_train, x_test, noise_sig2, prior=False): dim_hidden = 100 # why is this needed? I don't believe it's actually used (it shouldn't be) init_fn, apply_fn, kernel_fn = stax.serial( stax.Dense(dim_hidden, W_std=np.sqrt(args.prior_sig2), b_std=np.sqrt(args.prior_sig2)), stax.Erf(), stax.Dense(1, W_std=np.sqrt(args.prior_sig2), b_std=np.sqrt(args.prior_sig2)) ) k_xx = kernel_fn(x_train, x_train, 'nngp') if prior: mean = np.zeros(x_test.shape) cov = kernel_fn(x_test, x_test, 'nngp') else: k_xtest = kernel_fn(x_train, x_test, 'nngp') k_testtest = kernel_fn(x_test, x_test, 'nngp') k_xx_inv = np.linalg.inv(k_xx + noise_sig2 * np.eye(x_train.shape[0])) mean = k_xtest.T @ k_xx_inv @ y_train cov = k_testtest - k_xtest.T @ k_xx_inv @ k_xtest return mean, cov def plot_nngp_alt(args, x_train, y_train, noise_sig2, x_test, prior=False, n_samp=100, n_std=1, n_samp_show=0, ax=None, label=None, color=None, ax_upcrossings=None): mean, cov = fit_nngp_alt(args, x_train, y_train, x_test, noise_sig2, prior=prior) nngp_grid = forward_nngp(mean, cov, n_samp=n_samp) plot_predictive(x_test, nngp_grid, uncertainty_type='std', n_std=n_std, n_samp_show=n_samp_show, f_true=None, color=color, label=label, ax=ax) if ax_upcrossings is not None: plot_upcrossings(x_test, nngp_grid, ax=ax_upcrossings) def distance_to_prior_nngp(args, x_train, y_train, x_test, noise_sig2): max_error = lambda z: np.max(np.abs(z)) post_mean, post_cov = fit_nngp_alt(args, x_train, y_train, x_test, noise_sig2, prior=False) prior_mean = 0 error_mean = max_error(post_mean - prior_mean) return error_mean def distance_to_prior(bnn, x, n_samp=1000): #error = lambda z: np.sqrt(np.sum(z**2)) max_error = lambda z: np.max(np.abs(z)) #bnn_grid_prior = forward_no_bias_prior(bnn, x, n_samp=n_samp) bnn_grid_post = forward_no_bias(bnn, x, n_samp=n_samp) prior_mean = 0 #prior_var = np.var(bnn_grid_prior, 0) post_mean = np.mean(bnn_grid_post, 0) post_var = np.var(bnn_grid_post, 0) error_mean = max_error(post_mean - prior_mean) # Max distance
u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')}, '861393168':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861393169':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861393164':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861393165':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861393166':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861393167':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861393160':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861393161':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861393162':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861393163':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '86145035':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '86138226':{'en': 'Zhaoqing, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')}, '861379335':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')}, '86145039':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '86145038':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '86138227':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')}, '861379334':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')}, '86145280':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')}, '86138224':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')}, '861452588':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')}, '861452589':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u5357\u5e73\u5e02')}, '861452580':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')}, '861452581':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')}, '861452582':{'en': 'Xiamen, Fujian', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')}, '861452583':{'en': 'Ningde, Fujian', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')}, '861452584':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')}, '861452585':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')}, '861452586':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')}, '861452587':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')}, '86139409':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '86139408':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '861379336':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')}, '86139401':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '86139400':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '86139403':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '86139402':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '86139405':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '86139404':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '86139407':{'en': 'Yingkou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8425\u53e3\u5e02')}, '86139406':{'en': 'Jinzhou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u9526\u5dde\u5e02')}, '86138222':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')}, '861379331':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')}, '861391210':{'en': 'Zhenjiang, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')}, '861391211':{'en': 'Zhenjiang, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')}, '861391212':{'en': 'Yangzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')}, '861391213':{'en': 'Yangzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')}, '861391214':{'en': 'Yangzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')}, '861391215':{'en': 'Lianyungang, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')}, '861391216':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')}, '861391217':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')}, '861391218':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')}, '861391219':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861380373':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')}, '861380372':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')}, '861380375':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')}, '861380374':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')}, '861380377':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')}, '861380376':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')}, '861454195':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')}, '861398503':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')}, '861454197':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')}, '861454196':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '86139669':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')}, '861454190':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')}, '861454193':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '861454192':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')}, '86139667':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')}, '861454199':{'en': 'Yuncheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '861454198':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')}, '86138220':{'en': 'Jieyang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u63ed\u9633\u5e02')}, '861379333':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')}, '861399449':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')}, '861399448':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')}, '861399445':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861399444':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861399447':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')}, '86138221':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')}, '861399441':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861399440':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861399443':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861399442':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861458539':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')}, '861458538':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')}, '861458533':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')}, '861458532':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')}, '861458531':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')}, '861458530':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')}, '861458537':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')}, '861458536':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')}, '861458535':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')}, '861458534':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')}, '861380739':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')}, '861450193':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '861457098':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')}, '861384878':{'en': 'Bayannur, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5df4\u5f66\u6dd6\u5c14\u5e02')}, '861384879':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')}, '861384872':{'en': 'Baotou, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5305\u5934\u5e02')}, '861384873':{'en': 'Baotou, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5305\u5934\u5e02')}, '861384870':{'en': 'Hulun, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u4f26\u8d1d\u5c14\u5e02')}, '861384871':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')}, '861384876':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')}, '861384877':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')}, '861384874':{'en': 'Bayannur, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5df4\u5f66\u6dd6\u5c14\u5e02')}, '861384875':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')}, '861379340':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')}, '861379341':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')}, '861379342':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')}, '861379343':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')}, '861379344':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')}, '861379345':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')}, '86138138':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')}, '86138139':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')}, '861379348':{'en': 'De<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')}, '86138137':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')}, '86138135':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')}, '86138130':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')}, '86138131':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')}, '861386702':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')}, '861386703':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')}, '861386700':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')}, '861386701':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')}, '861386706':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')}, '861386707':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')}, '861386704':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')}, '861386705':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')}, '861386708':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')}, '861386709':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')}, '861392170':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861392171':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861392172':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861392173':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861392174':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861392175':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861392176':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861392177':{'en': 'Xu<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861392178':{'en': 'Xu<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861392179':{'en': 'Xuzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861452870':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')}, '861452871':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')}, '861452872':{'en': 'Shizuishan, Ningxia', 'zh': u('\u5b81\u590f\u77f3\u5634\u5c71\u5e02')}, '861452873':{'en': 'Wuzhong, Ningxia', 'zh': u('\u5b81\u590f\u5434\u5fe0\u5e02')}, '861452874':{'en': 'Guyuan, Ningxia', 'zh': u('\u5b81\u590f\u56fa\u539f\u5e02')}, '861452875':{'en': 'Zhongwei, Ningxia', 'zh': u('\u5b81\u590f\u4e2d\u536b\u5e02')}, '861452876':{'en': 'Wuzhong, Ningxia', 'zh': u('\u5b81\u590f\u5434\u5fe0\u5e02')}, '861452877':{'en': 'Shizuishan, Ningxia', 'zh': u('\u5b81\u590f\u77f3\u5634\u5c71\u5e02')}, '861452878':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')}, '861452879':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')}, '861457087':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')}, '86137915':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')}, '86137917':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')}, '86137916':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')}, '86137910':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '86137913':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')}, '86137912':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')}, '861454937':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')}, '86137919':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '861457088':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')}, '861457089':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')}, '861458574':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')}, '861398679':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')}, '861398678':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')}, '861380142':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861380143':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861380140':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')}, '861380141':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')}, '861380410':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u94c1\u5cad\u5e02')}, '861380411':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '861380412':{'en': 'Anshan, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')}, '861380413':{'en': 'Fushun, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u629a\u987a\u5e02')}, '861380148':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')}, '861380149':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')}, '861380418':{'en': 'Fuxin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u961c\u65b0\u5e02')}, '861380419':{'en': 'Liaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')}, '861453518':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '861453519':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '861453514':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '861453515':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '861453516':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '861453517':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '861453510':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '861453511':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '861453512':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '861453513':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '861378998':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861378999':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861378992':{'en': 'Xianning, Hubei', 'zh': u('\u6e56\u5317\u7701\u54b8\u5b81\u5e02')}, '861378993':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861378990':{'en': 'Enshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')}, '861378991':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')}, '861378996':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861378997':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861378994':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861378995':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '86139871':{'en': 'Kunming, Yunnan', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')}, '86139872':{'en': 'Dali, Yunnan', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')}, '86139873':{'en': 'Honghe, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')}, '86139874':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')}, '86139875':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4fdd\u5c71\u5e02')}, '86139876':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')}, '86139877':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')}, '86139878':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')}, '86139879':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')}, '861390536':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')}, '861390537':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')}, '861390534':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')}, '861390535':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')}, '861390532':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '861390533':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')}, '861390530':{'en': 'Heze, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')}, '861390531':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '861390538':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')}, '861390539':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')}, '861383495':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '861383494':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '861383497':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '861383496':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '861379119':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')}, '861379118':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')}, '861383493':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '861383492':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '861379115':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')}, '861379114':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '861379117':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')}, '861379116':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')}, '861383499':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '861379110':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '861379113':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '861379112':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '861398671':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8346\u5dde\u5e02')}, '861452648':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')}, '861452647':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861452646':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861452645':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')}, '861452644':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')}, '861452643':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')}, '861452642':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')}, '861452641':{'en': 'Lianyungang, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')}, '861452640':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')}, '861450227':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861450226':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861450225':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861450224':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861398670':{'en': 'Jingzhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u5dde\u5e02')}, '861450222':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861450221':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861450220':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861450229':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861450228':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861458256':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')}, '861458257':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')}, '861458254':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')}, '861458255':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')}, '861458252':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')}, '861458253':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')}, '861458250':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')}, '861458251':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')}, '861388432':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')}, '861458258':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')}, '861458259':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')}, '861399580':{'en': 'Ezhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u9102\u5dde\u5e02')}, '861399581':{'en': 'Ezhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u9102\u5dde\u5e02')}, '861399582':{'en': 'Ezhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u9102\u5dde\u5e02')}, '861399583':{'en': 'Ezhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u9102\u5dde\u5e02')}, '861399584':{'en': 'Ezhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u9102\u5dde\u5e02')}, '861399585':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861399586':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861399587':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861399588':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861399589':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861382458':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')}, '861382459':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')}, '861382452':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861382453':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861382450':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861382451':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861382456':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')}, '861382457':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')}, '861382454':{'en': 'Foshan, Guangdong',
lm.dependent_learning_mechanisms: projections = self._add_error_projection_to_dependent_learning_mechs(lm) self.add_projections(projections) # Suppress "no efferent connections" warning for: # - error_signal OutputPort of last LearningMechanism in sequence # - comparator learning_mechanisms[-1].output_ports[ERROR_SIGNAL].parameters.require_projection_in_composition.set(False, override=True) if comparator: for s in comparator.output_ports: s.parameters.require_projection_in_composition.set(False, override=True) learning_related_components = {LEARNING_MECHANISM: learning_mechanisms, COMPARATOR_MECHANISM: comparator, TARGET_MECHANISM: target, LEARNED_PROJECTION: learned_projections} # Update graph in case method is called again self._analyze_graph() return learning_related_components def _create_terminal_backprop_learning_components(self, input_source, output_source, error_function, loss_function, learned_projection, learning_rate, learning_update): """Create ComparatorMechanism, LearningMechanism and LearningProjection for Component in learning sequence""" # target = self._terminal_backprop_sequences[output_source][TARGET_MECHANISM] # comparator = self._terminal_backprop_sequences[output_source][COMPARATOR_MECHANISM] # learning_mechanism = self._terminal_backprop_sequences[output_source][LEARNING_MECHANISM] # If target and comparator already exist (due to overlapping pathway), use those try: target_mechanism = self._terminal_backprop_sequences[output_source][TARGET_MECHANISM] comparator_mechanism = self._terminal_backprop_sequences[output_source][COMPARATOR_MECHANISM] # Otherwise, create new ones except KeyError: target_mechanism = ProcessingMechanism(name='Target', default_variable=output_source.output_ports[0].value) comparator_mechanism = ComparatorMechanism(name='Comparator', target={NAME: TARGET, VARIABLE: target_mechanism.output_ports[0].value}, sample={NAME: SAMPLE, VARIABLE: output_source.output_ports[0].value, WEIGHT: -1}, function=error_function, output_ports=[OUTCOME, MSE]) learning_function = BackPropagation(default_variable=[input_source.output_ports[0].value, output_source.output_ports[0].value, comparator_mechanism.output_ports[0].value], activation_derivative_fct=output_source.function.derivative, learning_rate=learning_rate, loss_function=loss_function) learning_mechanism = LearningMechanism(function=learning_function, default_variable=[input_source.output_ports[0].value, output_source.output_ports[0].value, comparator_mechanism.output_ports[0].value], error_sources=comparator_mechanism, learning_enabled=learning_update, in_composition=True, name="Learning Mechanism for " + learned_projection.name) self.add_nodes(nodes=[(target_mechanism, NodeRole.TARGET), comparator_mechanism, learning_mechanism], required_roles=NodeRole.LEARNING) learning_related_projections = self._create_learning_related_projections(input_source, output_source, target_mechanism, comparator_mechanism, learning_mechanism) self.add_projections(learning_related_projections) learning_projection = self._create_learning_projection(learning_mechanism, learned_projection) self.add_projection(learning_projection, feedback=True) self.enable_learning = True return target_mechanism, comparator_mechanism, learning_mechanism def _create_non_terminal_backprop_learning_components(self, input_source, output_source, learned_projection, learning_rate, learning_update): # Get existing LearningMechanism if one exists (i.e., if this is a crossing point with another pathway) learning_mechanism = \ next((lp.receiver.owner for lp in learned_projection.parameter_ports[MATRIX].mod_afferents if isinstance(lp, LearningProjection)), None) # If learning_mechanism exists: # error_sources will be empty (as they have been dealt with in self._get_back_prop_error_sources # error_projections will contain list of any created to be added to the Composition below if learning_mechanism: error_sources, error_projections = self._get_back_prop_error_sources(output_source, learning_mechanism) # If learning_mechanism does not yet exist: # error_sources will contain ones needed to create learning_mechanism # error_projections will be empty since they can't be created until the learning_mechanism is created below; # they will be created (using error_sources) when, and determined after learning_mechanism is created below else: error_sources, error_projections = self._get_back_prop_error_sources(output_source) error_signal_template = [error_source.output_ports[ERROR_SIGNAL].value for error_source in error_sources] default_variable = [input_source.output_ports[0].value, output_source.output_ports[0].value] + error_signal_template learning_function = BackPropagation(default_variable=[input_source.output_ports[0].value, output_source.output_ports[0].value, error_signal_template[0]], activation_derivative_fct=output_source.function.derivative, learning_rate=learning_rate) learning_mechanism = LearningMechanism(function=learning_function, # default_variable=[input_source.output_ports[0].value, # output_source.output_ports[0].value, # error_signal_template], default_variable=default_variable, error_sources=error_sources, learning_enabled=learning_update, in_composition=True, name="Learning Mechanism for " + learned_projection.name) # Create MappingProjections from ERROR_SIGNAL OutputPort of each error_source # to corresponding error_input_ports for i, error_source in enumerate(error_sources): error_projection = MappingProjection(sender=error_source, receiver=learning_mechanism.error_signal_input_ports[i]) error_projections.append(error_projection) self.add_node(learning_mechanism, required_roles=NodeRole.LEARNING) act_in_projection = MappingProjection(sender=input_source.output_ports[0], receiver=learning_mechanism.input_ports[0]) act_out_projection = MappingProjection(sender=output_source.output_ports[0], receiver=learning_mechanism.input_ports[1]) self.add_projections([act_in_projection, act_out_projection] + error_projections) learning_projection = self._create_learning_projection(learning_mechanism, learned_projection) self.add_projection(learning_projection, feedback=True) return learning_mechanism def _get_back_prop_error_sources(self, receiver_activity_mech, learning_mech=None): # FIX CROSSED_PATHWAYS [JDC]: GENERALIZE THIS TO HANDLE COMPARATOR/TARGET ASSIGNMENTS IN BACKPROP # AND THEN TO HANDLE ALL FORMS OF LEARNING (AS BELOW) # REFACTOR TO DEAL WITH CROSSING PATHWAYS (?CREATE METHOD ON LearningMechanism TO DO THIS?): # 1) Determine whether this is a terminal sequence: # - use arg passed in or determine from context (see current implementation in add_backpropagation_learning_pathway) # - for terminal sequence, handle target and sample projections as below # 2) For non-terminal sequences, determine # of error_signals coming from LearningMechanisms associated with # all efferentprojections of ProcessingMechanism that projects to ACTIVATION_OUTPUT of LearningMechanism # - check validity of existing error_signal projections with respect to those and, if possible, # their correspondence with error_matrices # - check if any ERROR_SIGNAL input_ports are empty (vacated by terminal sequence elements deleted in # add_projection) # - call add_ports method on LearningMechanism to add new ERROR_SIGNAL input_port to its input_ports # and error_matrix to its self.error_matrices attribute # - add new error_signal projection """Add any LearningMechanisms associated with efferent projection from receiver_activity_mech""" error_sources = [] error_projections = [] # First get all efferents of receiver_activity_mech with a LearningProjection that are in current Composition for efferent in [p for p in receiver_activity_mech.efferents if (hasattr(p, 'has_learning_projection') and p.has_learning_projection and p in self.projections)]: # Then get any LearningProjections to that efferent that are in current Composition for learning_projection in [mod_aff for mod_aff in efferent.parameter_ports[MATRIX].mod_afferents if (isinstance(mod_aff, LearningProjection) and mod_aff in self.projections)]: error_source = learning_projection.sender.owner if (error_source not in self.nodes # error_source is not in the Composition or (learning_mech # learning_mech passed in # the error_source is already associated with learning_mech and (error_source in learning_mech.error_sources) # and the error_source already sends a Projection to learning_mech and (learning_mech in [p.receiver.owner for p in error_source.efferents]))): continue # ignore the error_source error_sources.append(error_source) # If learning_mech was passed in, add error_source to its list of error_signal_input_ports if learning_mech: # FIX: REPLACE WITH learning_mech._add_error_signal_input_port ONCE IMPLEMENTED error_signal_input_port = next((e for e in learning_mech.error_signal_input_ports if not e.path_afferents), None) if error_signal_input_port is None: error_signal_input_port = learning_mech.add_ports( InputPort(projections=error_source.output_ports[ERROR_SIGNAL], name=ERROR_SIGNAL, context=Context(source=ContextFlags.METHOD)), context=Context(source=ContextFlags.METHOD)) # Create Projection here so that don't have to worry about determining correct # error_signal_input_port of learning_mech in _create_non_terminal_backprop_learning_components error_projections.append(MappingProjection(sender=error_source.output_ports[ERROR_SIGNAL], receiver=error_signal_input_port)) # Return error_sources so they can be used to create a new LearningMechanism if needed # Return error_projections created to existing learning_mech # so they can be added to the Composition by _create_non_terminal_backprop_learning_components return error_sources, error_projections def _get_backprop_error_projections(self, learning_mech, receiver_activity_mech): error_sources = [] error_projections = [] # for error_source in learning_mech.error_sources: # if error_source in self.nodes: # error_sources.append(error_source) # Add any LearningMechanisms associated with efferent projection from receiver_activity_mech # First get all efferents of receiver_activity_mech with a LearningProjection that are in current Composition for efferent in [p for p in receiver_activity_mech.efferents if (hasattr(p, 'has_learning_projection') and p.has_learning_projection and p in self.projections)]: # Then any LearningProjections to that efferent that are in current Composition for learning_projection in [mod_aff for mod_aff in efferent.parameter_ports[MATRIX].mod_afferents if (isinstance(mod_aff, LearningProjection) and mod_aff in self.projections)]: error_source = learning_projection.sender.owner if (error_source in learning_mech.error_sources and error_source in self.nodes and learning_mech in [p.receiver.owner for p in error_source.efferents]): continue error_sources.append(error_source) # FIX: REPLACE WITH learning_mech._add_error_signal_input_port ONCE IMPLEMENTED error_signal_input_port = next((e for e in learning_mech.error_signal_input_ports if not e.path_afferents), None) if error_signal_input_port is None: error_signal_input_port = learning_mech.add_ports( InputPort(projections=error_source.output_ports[ERROR_SIGNAL], name=ERROR_SIGNAL, context=Context(source=ContextFlags.METHOD)), context=Context(source=ContextFlags.METHOD)) # DOES THE ABOVE GENERATE A PROJECTION? IF SO, JUST GET AND RETURN THAT; ELSE DO THE FOLLOWING: error_projections.append(MappingProjection(sender=error_source.output_ports[ERROR_SIGNAL], receiver=error_signal_input_port)) return error_projections # 2) For non-terminal sequences, determine # of error_signals coming from LearningMechanisms associated with # all efferentprojections of ProcessingMechanism that projects to ACTIVATION_OUTPUT of LearningMechanism # - check validity of existing error_signal projections with respect to those and, if possible, # their correspondence with error_matrices # - check if any ERROR_SIGNAL input_ports are empty (vacated by terminal sequence elements deleted in # add_projection) # - call add_ports method on LearningMechanism to add new ERROR_SIGNAL input_port to its input_ports # and error_matrix to its self.error_matrices attribute # - add new error_signal projection def _add_error_projection_to_dependent_learning_mechs(self, error_source): projections = [] # Get all afferents to receiver_activity_mech in Composition that have LearningProjections for afferent in [p for p in error_source.input_source.path_afferents if (p in self.projections and hasattr(p, 'has_learning_projection') and p.has_learning_projection)]: # For each LearningProjection to that afferent, if its LearningMechanism doesn't already receiver for learning_projection in [lp for lp in afferent.parameter_ports[MATRIX].mod_afferents if (isinstance(lp, LearningProjection) and error_source not in lp.sender.owner.error_sources)]: dependent_learning_mech = learning_projection.sender.owner error_signal_input_port = dependent_learning_mech.add_ports( InputPort(projections=error_source.output_ports[ERROR_SIGNAL], name=ERROR_SIGNAL, context=Context(source=ContextFlags.METHOD)), context=Context(source=ContextFlags.METHOD)) projections.append(error_signal_input_port[0].path_afferents[0]) # projections.append(MappingProjection(sender=error_source.output_ports[ERROR_SIGNAL], # receiver=error_signal_input_port[0])) return projections # ****************************************************************************************************************** # CONTROL # ****************************************************************************************************************** def add_controller(self, controller:ControlMechanism): """ Add an `OptimizationControlMechanism` as the `controller <Composition.controller>` of the Composition, which gives the OCM access to the `Composition`'s `evaluate <Composition.evaluate>` method. This allows the OCM to use simulations to determine an optimal Control policy. """ if not isinstance(controller, ControlMechanism): raise CompositionError(f"Specification of {repr(CONTROLLER)} arg for {self.name} " f"must be a {repr(ControlMechanism.__name__)} ") # VALIDATE AND ADD CONTROLLER # Warn for request to assign the ControlMechanism already assigned and ignore if controller is self.controller: warnings.warn(f"{controller.name} has already been assigned as the {CONTROLLER} " f"for {self.name}; assignment ignored.") return # Warn for request to assign ControlMechanism that is already the controller of another Composition if hasattr(controller, COMPOSITION) and controller.composition is not self: warnings.warn(f"{controller} has already been assigned as the {CONTROLLER} " f"for another {COMPOSITION} ({controller.composition.name}); assignment ignored.") return # Warn if current one is being replaced if self.controller and self.prefs.verbosePref: warnings.warn(f"The existing {CONTROLLER}
<reponame>jzuhone/kadi<gh_stars>0 # Licensed under a 3-clause BSD style license - see LICENSE.rst from collections import defaultdict import math import difflib import os from pathlib import Path import calendar import re import gzip import pickle import itertools import functools import weakref import logging import numpy as np from astropy.table import Table import astropy.units as u import requests from cxotime import CxoTime from testr.test_helper import has_internet from Chandra.Maneuver import NSM_attitude from kadi.commands import get_cmds_from_backstop, conf from kadi.commands.core import (load_idx_cmds, load_pars_dict, LazyVal, get_par_idx_update_pars_dict, _find, vstack_exact, ska_load_dir, CommandTable, load_name_to_cxotime) from kadi.commands.command_sets import get_cmds_from_event from kadi import occweb, paths # TODO configuration options, but use DEFAULT_* in the mean time # - commands_version (v1, v2) MATCHING_BLOCK_SIZE = 500 # TODO: cache translation from cmd_events to CommandTable's [Probably not] APPROVED_LOADS_OCCWEB_DIR = Path('FOT/mission_planning/PRODUCTS/APPR_LOADS') # URL to download google sheets `doc_id` CMD_EVENTS_SHEET_URL = 'https://docs.google.com/spreadsheets/d/{doc_id}/export?format=csv' # Cached values of the full mission commands archive (cmds_v2.h5, cmds_v2.pkl). # These are loaded on demand. IDX_CMDS = LazyVal(functools.partial(load_idx_cmds, version=2)) PARS_DICT = LazyVal(functools.partial(load_pars_dict, version=2)) REV_PARS_DICT = LazyVal(lambda: {v: k for k, v in PARS_DICT.items()}) # Cache of recent commands keyed by scenario CMDS_RECENT = {} MATCHING_BLOCKS = {} # APR1420B was the first load set to have RLTT (backstop 6.9) RLTT_ERA_START = CxoTime('2020-04-14') HAS_INTERNET = has_internet() logger = logging.getLogger(__name__) def clear_caches(): """Clear all commands caches. This is useful for testing and in case upstream products like the Command Events sheet have changed during a session. """ CMDS_RECENT.clear() MATCHING_BLOCKS.clear() for var in [IDX_CMDS, PARS_DICT, REV_PARS_DICT]: try: del IDX_CMDS._val except AttributeError: pass from kadi.commands.observations import OBSERVATIONS OBSERVATIONS.clear() def interrupt_load_commands(load, cmds): """Cut commands beyond observing or vehicle stop times. Orbit point commands are NOT cut so that in the case of a load stop the orbit points are still available. This takes advantage of additional code that de-duplicates orbit points. """ bad = np.zeros(len(cmds), dtype=bool) if load['observing_stop'] != '': bad |= ((cmds['date'] > load['observing_stop']) & (cmds['scs'] > 130)) if load['vehicle_stop'] != '': bad |= ((cmds['date'] > load['vehicle_stop']) & (cmds['scs'] < 131) & (cmds['type'] != 'ORBPOINT')) if np.any(bad): logger.info(f'Cutting {bad.sum()} commands from {load["name"]}') cmds = cmds[~bad] return cmds def _merge_cmds_archive_recent(start, scenario): """Merge cmds archive from ``start`` onward with recent cmds for ``scenario`` This assumes: - CMDS_RECENT cache has been set with that scenario. - Recent commands overlap the cmds archive :parameter start: CxoTime-like, Start time for returned commands :parameter scenario: str Scenario name :returns: CommandTable Commands from cmds archive and all recent commands """ cmds_recent = CMDS_RECENT[scenario] logger.info(f'Merging cmds_recent with archive commands from {start}') if scenario not in MATCHING_BLOCKS: # Get index for start of cmds_recent within the cmds archive i0_arch_recent = IDX_CMDS.find_date(cmds_recent['date'][0]) # Find the end of the first large (MATCHING_BLOCK_SIZE) block of # cmds_recent that overlap with archive cmds. Look for the matching # block in a subset of archive cmds that starts at the start of # cmds_recent. `arch_recent_offset` is the offset from `i0_arch_recent` # to the end of the matching block. `i0_recent` is the end of the # matching block in recent commands. arch_recent_offset, recent_block_end = get_matching_block_idx( IDX_CMDS[i0_arch_recent:], cmds_recent) arch_block_end = i0_arch_recent + arch_recent_offset MATCHING_BLOCKS[scenario] = arch_block_end, recent_block_end, i0_arch_recent else: arch_block_end, recent_block_end, i0_arch_recent = MATCHING_BLOCKS[scenario] # Get archive commands from the requested start time (or start of the overlap # with recent commands) to the end of the matching block in recent commands. i0_arch_start = min(IDX_CMDS.find_date(start), i0_arch_recent) cmds_arch = IDX_CMDS[i0_arch_start:arch_block_end] # Stored archive commands HDF5 has no `params` column, instead storing an # index to the param values which are in PARS_DICT. Add `params` object # column with None values and then stack with cmds_recent (which has # `params` already as dicts). cmds = vstack_exact([cmds_arch, cmds_recent[recent_block_end:]]) # Need to give CommandTable a ref to REV_PARS_DICT so it can tranlate from # params index to the actual dict of values. Stored as a weakref so that # pickling and other serialization doesn't break. cmds.rev_pars_dict = weakref.ref(REV_PARS_DICT) return cmds def get_matching_block_idx_simple(cmds_recent, cmds_arch, min_match): # Find the first command in cmd_arch that starts at the same date as the # block of recent commands. There might be multiple commands at the same # date, so we walk through this getting a block match of `min_match` size. # Matching block is defined by all `key_names` columns matching. date0 = cmds_recent['date'][0] i0_arch = cmds_arch.find_date(date0) key_names = ('date', 'type', 'tlmsid', 'scs', 'step', 'vcdu') # Find block of commands in cmd_arch that match first min_match of # cmds_recent. Special case is min_match=0, which means we just want to # append the cmds_recent to the end of cmds_arch. This is the case for # the transition from pre-RLTT (APR1420B) to post, for the one-time # migration from version 1 to version 2. while min_match > 0: if all(np.all(cmds_arch[name][i0_arch:i0_arch + min_match] == cmds_recent[name][:min_match]) for name in key_names): break # No joy, step forward and make sure date still matches i0_arch += 1 if cmds_arch['date'][i0_arch] != date0: raise ValueError(f'No matching commands block in archive found for recent_commands') logger.info(f'Found matching commands block in archive at {i0_arch}') return i0_arch def get_cmds(start=None, stop=None, inclusive_stop=False, scenario=None, **kwargs): """Get commands using loads table, relying entirely on RLTT. :param start: CxoTime-like Start time for cmds :param stop: CxoTime-like Stop time for cmds :param scenario: str, None Scenario name :param inclusive_stop: bool Include commands at exactly ``stop`` if True. :param loads_stop: CxoTime-like, None Stop time for loads table (default is all available loads, but useful for development/testing work) :param **kwargs: dict key=val keyword argument pairs for filtering :returns: CommandTable """ scenario = os.environ.get('KADI_SCENARIO', scenario) start = CxoTime('1999:001' if start is None else start) stop = (CxoTime.now() + 1 * u.year) if stop is None else CxoTime(stop) # Default stop is either now (typically) or set by env var default_stop = CxoTime(os.environ.get('KADI_COMMANDS_DEFAULT_STOP')) # For flight scenario or no internet or if the query stop time is guaranteed # to not require recent commands then just use the archive. before_recent_cmds = stop < default_stop - conf.default_lookback * u.day if scenario == 'flight' or not HAS_INTERNET or before_recent_cmds: cmds = IDX_CMDS logger.info('Getting commands from archive only') else: if scenario not in CMDS_RECENT: cmds_recent = update_archive_and_get_cmds_recent( scenario, cache=True, pars_dict=PARS_DICT, rev_pars_dict=REV_PARS_DICT) else: cmds_recent = CMDS_RECENT[scenario] # Get `cmds` as correct mix of recent and archive commands that contains # the requested date range. if stop.date < cmds_recent['date'][0]: # Query does not overlap with recent commands, just use archive. logger.info('Getting commands from archive only') cmds = IDX_CMDS elif start < CxoTime(cmds_recent['date'][0]) + 3 * u.day: # Query starts near beginning of recent commands and *might* need some # archive commands. The margin is set at 3 days to ensure that OBS # command continuity is maintained (there is at least one maneuver). cmds = _merge_cmds_archive_recent(start, scenario) logger.info(f'Getting commands from archive + recent {scenario=}') else: # Query is strictly within recent commands. cmds = cmds_recent logger.info(f'Getting commands from recent only {scenario=}') # Select the requested time range and make a copy. (Slicing is a view so # in theory bad things could happen without a copy). idx0 = cmds.find_date(start) idx1 = cmds.find_date(stop, side=('right' if inclusive_stop else 'left')) cmds = cmds[idx0:idx1].copy() if kwargs: # Specified extra filters on cmds search pars_dict = PARS_DICT.copy() # For any recent commands that have params as a dict, those will have # idx = -1. This doesn't work with _find, which is optimized to search # pars_dict for the matching search keys. # TODO: this step is only really required for kwargs that are not a column, # i.e. keys that are found only in params. for ii in np.flatnonzero(cmds['idx'] == -1): cmds[ii]['idx'] = get_par_idx_update_pars_dict(pars_dict, cmds[ii]) cmds = _find(idx_cmds=cmds, pars_dict=pars_dict, **kwargs) cmds.rev_pars_dict = weakref.ref(REV_PARS_DICT) cmds['time'].info.format = '.3f' return cmds def update_archive_and_get_cmds_recent(scenario=None, *, lookback=None, stop=None, cache=True, pars_dict=None, rev_pars_dict=None): """Update local loads table and downloaded loads and return all recent cmds. This also caches the recent commands in the global CMDS_RECENT dict. This relies entirely on RLTT and load_events to assemble the commands. :param scenario: str, None Scenario name :param lookback: int, Quantity, None Lookback time from ``stop`` for recent loads. If None, use conf.default_lookback. :param stop: CxoTime-like, None Stop time for loads table (default is now + 21 days) :param cache: bool Cache
value clf = svm.SVC(kernel="precomputed", C=1, verbose=False) clf.fit(trn_kernel_mat, trn_lab) label_hat = clf.predict(tst_kernel_mat) score_hat = clf.score(tst_kernel_mat, tst_lab) score_list.append(score_hat) print "mAP (%.2f) at C=%.2f" % (np.mean(score_list)*100,C) map_scores.append(np.mean(score_list)*100) return map_scores.index(max(map_scores)) def compute_distance_signatures(config, options): """ Computes SGD (summary-graph-distance) features. 1) For all individuals (trn/tst), compile a list file for SG distance computation (i.e., a list of graphs) 2) Run the kernel on all distance files 3) Extract relevant kernel entries and compute the induced distance, i.e., d(x,y) = || phi(x) - phi(y) ||, where phi() is the feature-mapping. :param config: Configuration options for the executables. :param options: Configuration options for controlling the experiment flow. """ logger = logging.getLogger() cv_dir = os.path.join(options["dest"], "cv-%.4d" % options["id"]) if not check_dir(cv_dir): raise Exception("Cross-validation directory %s missing!" % cv_dir) std_groupings = set() all_groupings = set() for group in options["groupLabel"]: std_groupings.add(group) all_groupings.add(group) all_groupings.add("Common") logger.info("Creating SGD list!") label_list = [] graph_list = [] # Add the summary graphs to the list for grp_atlas in std_groupings: for atlas in all_groupings: # Only with the 'Common' atlas, we stay in the same space if not (atlas == "Common"): continue summary_graph_file = os.path.join(cv_dir, grp_atlas, "Normal-mean-%s.grp.mat" % atlas) if not check_file(summary_graph_file): raise Exception("Summary graph file %s missing!" % summary_graph_file) graph_list.append(summary_graph_file) # Create summary-graph list JSON file json_dict = {"nGraphs" : len(graph_list), "graphList" : graph_list, "labels" : [-1]*len(graph_list)} sgd_file = os.path.join(cv_dir, "sgd-list.txt") with open(sgd_file, "w") as outfile: json.dump(json_dict, outfile) # Compute the k(y,y) entries ... (used in kernel-induced distance) k2_file = os.path.join(cv_dir, "k2-sgd.kern") cmd = [config["Exec"]["TubeGraphKernel"], sgd_file, sgd_file, k2_file, "--defaultLabelType %d" % options["defaultLabelType"], "--graphKernelType %d" % options["graphKernelType"], "--subtreeHeight %d" % options["subtreeHeight"]] subprocess.call(cmd) logger.info("Creating subject-specific graph lists for SGD") label_map = create_label_map(options) for cnt,subject in enumerate(options["subjects"]): label_list = [] graph_list = [] for atlas_used_for_graph in all_groupings: if not (atlas_used_for_graph == "Common"): continue # Add the subject's graph file (for one atlas type) subject_graph_file = os.path.join(cv_dir,os.path.basename(subject), "VascularNetwork-t1-phantom-%s.grp.mat" % atlas_used_for_graph) if not check_file(subject_graph_file): raise Exception("File %s missing!" % subject_graph_file) graph_list.append(subject_graph_file) label_list.append(label_map[options["groupLabel"][cnt]]) # Dictionary entries ... json_dict = { "nGraphs" : len(graph_list), "graphList" : graph_list, "labels" : label_list } # Write subject graph list to subject's directory subject_graph_list_file = os.path.join(cv_dir, os.path.basename(subject), "sgd-compare-graph-list.txt") with open(subject_graph_list_file, "w") as outfile: json.dump(json_dict, outfile) # We can now compute the kernel matrices k0_file = os.path.join(cv_dir, os.path.basename(subject), "k0-sgd.kern") k1_file = os.path.join(cv_dir, os.path.basename(subject), "k1-sgd.kern") # First, the k(x,x) entries ... (used in kernel-induced distance) cmd = [config["Exec"]["TubeGraphKernel"], subject_graph_list_file, subject_graph_list_file, k0_file, "--defaultLabelType %d" % options["defaultLabelType"], "--graphKernelType %d" % options["graphKernelType"], "--subtreeHeight %d" % options["subtreeHeight"]] subprocess.call(cmd) # Second, the k(x,y) entries ... (used in kernel-induced distance) cmd = [config["Exec"]["TubeGraphKernel"], subject_graph_list_file, sgd_file, k1_file, "--defaultLabelType %d" % options["defaultLabelType"], "--graphKernelType %d" % options["graphKernelType"], "--subtreeHeight %d" % options["subtreeHeight"]] subprocess.call(cmd) def compute_glo_label_map(config, options): """ Computes a global label map from CVT cell ID to a discrete label. The created mapping files can later be used as input to the graph kernel(s) to provide better (more discriminative) labelings of nodes. """ logger = logging.getLogger() cv_dir = os.path.join(options["dest"], "cv-%.4d" % options["id"]) if not check_dir(cv_dir): raise Exception("Cross-validation directory %s missing!" % cv_dir) atlas_cvt_file = os.path.join(cv_dir, "Common", "bNormalDD-mean.cvt.mha") if not check_file(atlas_cvt_file): raise Exception("CVT file %s for group Common not found!" % atlas_cvt_file) # Newly created CVT files, one (w)ith background cell counting, the other has (n)o background cell counting relabeled_wBack_atlas_cvt_img_file = os.path.join(cv_dir, "Common", "relabeled-wBack-bNormalDD-mean.cvt.mha") relabeled_nBack_atlas_cvt_img_file = os.path.join(cv_dir, "Common", "relabeled-nBack-bNormalDD-mean.cvt.mha") # CVT cell ID to new label mapping, one (w)ith background cell counting, the other has (n)o background cell counting relabeled_wBack_atlas_cvt_map_file = os.path.join(cv_dir, "Common", "relabeled-wBack-bNormalDD-mean.cvt.map") relabeled_nBack_atlas_cvt_map_file = os.path.join(cv_dir, "Common", "relabeled-nBack-bNormalDD-mean.cvt.map") if options["segmentationImage"] is None: raise Exception("No segmentation file given!") # Run with background cell labeling cmd = [config["Exec"]["TransferLabelsToRegions"], atlas_cvt_file, options["segmentationImage"], relabeled_wBack_atlas_cvt_img_file, relabeled_wBack_atlas_cvt_map_file, "--omitRegions %d" % 0] subprocess.call(cmd) # Rum without background cell labeling cmd = [config["Exec"]["TransferLabelsToRegions"], atlas_cvt_file, options["segmentationImage"], relabeled_nBack_atlas_cvt_img_file, relabeled_nBack_atlas_cvt_map_file] subprocess.call(cmd) def evaluate_classifier_from_full_gk(config, options): """ Train and evaluate a SVM classifier, assuming that we have access to all data, i.e., training and testing. Train a SVM using a full (training + testing) kernel. During training, only those kernel values that correspond to training sample pairs are considered, obviously. :param config: Configuration options for the executables. :param options: Configuration options for controlling the experiment flow. """ logger = logging.getLogger() cv_dir = os.path.join(options["dest"], "cv-%.4d" % options["id"]) if not check_dir(cv_dir): raise Exception("Cross-validation directory %s missing!" % cv_dir) N = len(options["trn"]) # M training samples M = len(options["tst"]) # N testing samples # Create mapping of class to label label_map = create_label_map(options) # This is the file created by TubeGraphKernel kernel_data_file = os.path.join(cv_dir, "full-Common.kern.bin") if not check_file(kernel_data_file): raise Exception("Training kernel data file %s missing!" % kernel_data_file) # Reads kernel from binary file and normalizes it kernel_mat = np.fromfile(kernel_data_file, dtype="double").reshape(N+M,N+M); # Create training/testing/all labels tst_labels = [] trn_labels = [] all_labels = [] for i in options["trn"]: label = label_map[options["groupLabel"][i]] trn_labels.append(label) for i in options["tst"]: label = label_map[options["groupLabel"][i]] tst_labels.append(label) all_labels = trn_labels + tst_labels # The set of cost factors, we use for CV cost_factors = [0.1, 1, 5, 10, 20, 50]; # Normalize the kernel data normalized_kernel_mat = normalize_kernel(kernel_mat) # Extract the training portion of the kernel list_of_tst_samples = range(N, N+M) assert len(list_of_tst_samples) == M, 'Mismatch in list size of testing samples!' trn_kernel_mat = np.matrix(normalized_kernel_mat) pruned_trn_kernel_mat = np.delete(trn_kernel_mat, list_of_tst_samples, axis=0) pruned_trn_kernel_mat = np.delete(pruned_trn_kernel_mat, list_of_tst_samples, axis=1) # Extract the testing portion of the kernel list_of_trn_samples = range(0,N) assert len(list_of_trn_samples) == N, 'Mismatch in list size of training samples!' tst_kernel_mat = np.matrix(normalized_kernel_mat) pruned_tst_kernel_mat = np.delete(tst_kernel_mat, list_of_trn_samples, axis=0) pruned_tst_kernel_mat = np.delete(pruned_tst_kernel_mat, list_of_tst_samples, axis=1) # Create a SVM classifier and cross-validate the cost factor clf = svm.SVC(kernel="precomputed", C=1, verbose=True) optimal_cost_idx = crossvalidate_cost_param( clf, pruned_trn_kernel_mat, trn_labels, 20, cost_factors, 0.5) logger.info("Optimal cost = %.2f", cost_factors[optimal_cost_idx]) clf = svm.SVC(kernel="precomputed", C=cost_factors[optimal_cost_idx], verbose=True) score = clf.fit(pruned_trn_kernel_mat, trn_labels).score(pruned_trn_kernel_mat, trn_labels) logger.info("Training finished (Score = %.2f)!" % score) y = clf.predict(pruned_tst_kernel_mat) score = clf.score(pruned_tst_kernel_mat, tst_labels) logger.info("Final score %.2f", 100 * score) def trn_classifier(config, options): """ Train a support vector machine classifer. :param config: Configuration options for the executables. :param options: Configuration options for controlling the experiment flow. """ logger = logging.getLogger() cv_dir = os.path.join(options["dest"], "cv-%.4d" % options["id"]) if not check_dir(cv_dir): raise Exception("Cross-validation directory %s missing!" % cv_dir) N = len(options["trn"]) label_map = create_label_map(options) logger.debug("%d subjects in training data, %d label types!" % (N, len(label_map))) kernel_data_file = os.path.join(cv_dir, "trn-Common.kern.bin") if not check_file(kernel_data_file): raise Exception("Training kernel data file %s missing!" % kernel_data_file) # Reads kernel from binary file and normalizes it kernel_data = np.fromfile(kernel_data_file, dtype="double").reshape(N,N); labels = [] for i in options["trn"]: label = label_map[options["groupLabel"][i]] labels.append(label) logger.debug("Training C-SVM with precomputed kernel ...") clf = svm.SVC(kernel="precomputed", C=200, verbose=True) score = clf.fit(kernel_data, labels).score(kernel_data, labels) logger.debug("Training finished (Score = %.2f)!" % score) print float(sum(clf.n_support_))/N clf_out_file = os.path.join(cv_dir, "svm-Common.clf") with open(clf_out_file, 'wb') as fid: cPickle.dump(clf, fid) logger.debug("Wrote classifer to %s!" % clf_out_file) # Write SVM information file svm_info_file = os.path.join(cv_dir, "svm-Info.json") svm_info = dict() svm_info["num_sv"] = float(sum(clf.n_support_))/N svm_info["trn_score"] = score with open(svm_info_file, 'wb') as fp: json.dump(svm_info, fp) logger.debug("Wrote classifier info to %s!" % svm_info_file) def tst_classifier(config, options): """ Test the support vector machine classifier. :param config: Configuration options for the executables. :param options: Configuration options for controlling the experiment flow. """ logger = logging.getLogger() cv_dir = os.path.join(options["dest"], "cv-%.4d" % options["id"]) if not check_dir(cv_dir): raise Exception("Cross-validation directory %s missing!" % cv_dir) N = len(options["trn"]) M = len(options["tst"]) label_map = create_label_map(options) logger.debug("Testing on %d samples (%d used in training)" % (M,N)) labels = [] for i in options["tst"]: label = label_map[options["groupLabel"][i]] labels.append(label) kernel_data_file = os.path.join(cv_dir, "tst-Common.kern.bin") if not check_file(kernel_data_file): raise Exception("Testing kernel data file %s missing!" % kernel_data_file) kernel_data = np.fromfile(kernel_data_file, dtype="double").reshape(M,N); # Load classifier from disk clf_file = os.path.join(cv_dir, "svm-Common.clf") if not check_file(clf_file): raise Exception("Classifier %s missing!" % clf_file) with open(clf_file, "rb") as fid: clf = cPickle.load(fid) y = clf.predict(kernel_data) score = clf.score(kernel_data, labels) logger.debug("Testing score = %.2f (%d/%d)" % (score, int(score*M), M)) # Try to update the SVM information file svm_info = dict() svm_info_file = os.path.join(cv_dir, "svm-Info.json") if check_file(svm_info_file): fp = open(svm_info_file) svm_info = json.load(fp) svm_info["tst_score"] = score svm_info["truth"] = list(labels) svm_info["guess"] = list(y) fp.close() with open(svm_info_file, 'wb')
scores = (50, 46). Player 0 rolls 8 dice and gets outcomes [2, 6, 4, 6, 1, 4, 4, 5]. End scores = (51, 46) >>> print(turns[5]) Start scores = (51, 46). Player 1 rolls 0 dice and gets outcomes []. End scores = (51, 49) >>> print(turns[6]) Start scores = (51, 49). Player 0 rolls 6 dice and gets outcomes [5, 3, 1, 5, 1, 2]. End scores = (52, 49) >>> print(turns[7]) Start scores = (52, 49). Player 1 rolls 10 dice and gets outcomes [1, 2, 3, 6, 1, 4, 5, 4, 4, 4]. End scores = (52, 50) >>> print(turns[8]) Start scores = (52, 50). Player 0 rolls 10 dice and gets outcomes [6, 4, 1, 1, 1, 5, 5, 3, 1, 2]. End scores = (53, 50) >>> print(turns[9]) Start scores = (53, 50). Player 1 rolls 4 dice and gets outcomes [4, 2, 1, 5]. End scores = (53, 51) >>> print(turns[10]) Start scores = (53, 51). Player 0 rolls 8 dice and gets outcomes [3, 4, 5, 2, 6, 4, 5, 2]. End scores = (84, 51) >>> print(turns[11]) Game Over """, 'hidden': False, 'locked': False }, { 'code': r""" >>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=85986, score0=35, score1=12, goal=74, feral_hogs=False) >>> print(turns[0]) Start scores = (35, 12). Player 0 rolls 10 dice and gets outcomes [6, 2, 3, 4, 3, 3, 6, 5, 1, 2]. End scores = (36, 12) >>> print(turns[1]) Start scores = (36, 12). Player 1 rolls 4 dice and gets outcomes [5, 3, 6, 1]. End scores = (36, 13) >>> print(turns[2]) Start scores = (36, 13). Player 0 rolls 5 dice and gets outcomes [2, 3, 3, 6, 4]. End scores = (54, 13) >>> print(turns[3]) Start scores = (54, 13). Player 1 rolls 10 dice and gets outcomes [2, 5, 1, 6, 3, 6, 4, 3, 3, 5]. End scores = (54, 14) >>> print(turns[4]) Start scores = (54, 14). Player 0 rolls 3 dice and gets outcomes [2, 2, 3]. End scores = (61, 14) >>> print(turns[5]) Start scores = (61, 14). Player 1 rolls 2 dice and gets outcomes [4, 5]. End scores = (23, 61) >>> print(turns[6]) Start scores = (23, 61). Player 0 rolls 0 dice and gets outcomes []. End scores = (28, 61) >>> print(turns[7]) Start scores = (28, 61). Player 1 rolls 4 dice and gets outcomes [6, 3, 6, 5]. End scores = (28, 81) >>> print(turns[8]) Game Over """, 'hidden': False, 'locked': False }, { 'code': r""" >>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=76820, score0=28, score1=14, goal=61, feral_hogs=False) >>> print(turns[0]) Start scores = (28, 14). Player 0 rolls 4 dice and gets outcomes [2, 6, 4, 1]. End scores = (29, 14) >>> print(turns[1]) Start scores = (29, 14). Player 1 rolls 0 dice and gets outcomes []. End scores = (29, 17) >>> print(turns[2]) Start scores = (29, 17). Player 0 rolls 5 dice and gets outcomes [1, 1, 1, 3, 3]. End scores = (30, 17) >>> print(turns[3]) Start scores = (30, 17). Player 1 rolls 9 dice and gets outcomes [6, 6, 6, 4, 2, 5, 4, 1, 5]. End scores = (30, 18) >>> print(turns[4]) Start scores = (30, 18). Player 0 rolls 9 dice and gets outcomes [5, 3, 6, 5, 4, 4, 5, 3, 5]. End scores = (70, 18) >>> print(turns[5]) Game Over """, 'hidden': False, 'locked': False }, { 'code': r""" >>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=83984, score0=64, score1=49, goal=78, feral_hogs=True) >>> print(turns[0]) Start scores = (64, 49). Player 0 rolls 7 dice and gets outcomes [3, 5, 3, 5, 6, 3, 4]. End scores = (93, 49) >>> print(turns[1]) Game Over """, 'hidden': False, 'locked': False }, { 'code': r""" >>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=25773, score0=3, score1=17, goal=30, feral_hogs=True) >>> print(turns[0]) Start scores = (3, 17). Player 0 rolls 5 dice and gets outcomes [3, 4, 5, 4, 6]. End scores = (25, 17) >>> print(turns[1]) Start scores = (25, 17). Player 1 rolls 3 dice and gets outcomes [2, 1, 4]. End scores = (25, 18) >>> print(turns[2]) Start scores = (25, 18). Player 0 rolls 4 dice and gets outcomes [5, 1, 5, 3]. End scores = (26, 18) >>> print(turns[3]) Start scores = (26, 18). Player 1 rolls 8 dice and gets outcomes [1, 4, 3, 2, 1, 5, 6, 2]. End scores = (26, 19) >>> print(turns[4]) Start scores = (26, 19). Player 0 rolls 9 dice and gets outcomes [6, 5, 6, 2, 1, 5, 2, 1, 1]. End scores = (27, 19) >>> print(turns[5]) Start scores = (27, 19). Player 1 rolls 10 dice and gets outcomes [5, 6, 6, 4, 2, 5, 3, 3, 2, 4]. End scores = (27, 59) >>> print(turns[6]) Game Over """, 'hidden': False, 'locked': False }, { 'code': r""" >>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=6012, score0=30, score1=3, goal=85, feral_hogs=False) >>> print(turns[0]) Start scores = (30, 3). Player 0 rolls 0 dice and gets outcomes []. End scores = (36, 3) >>> print(turns[1]) Start scores = (36, 3). Player 1 rolls 1 dice and gets outcomes [1]. End scores = (4, 36) >>> print(turns[2]) Start scores = (4, 36). Player 0 rolls 4 dice and gets outcomes [4, 6, 2, 4]. End scores = (20, 36) >>> print(turns[3]) Start scores = (20, 36). Player 1 rolls 6 dice and gets outcomes [2, 4, 2, 4, 3, 3]. End scores = (20, 54) >>> print(turns[4]) Start scores = (20, 54). Player 0 rolls 2 dice and gets outcomes [4, 6]. End scores = (54, 30) >>> print(turns[5]) Start scores = (54, 30). Player 1 rolls 1 dice and gets outcomes [4]. End scores = (54, 34) >>> print(turns[6]) Start scores = (54, 34). Player 0 rolls 5 dice and gets outcomes [4, 6, 6, 6, 5]. End scores = (34, 81) >>> print(turns[7]) Start scores = (34, 81). Player 1 rolls 0 dice and gets outcomes []. End scores = (34, 83) >>> print(turns[8]) Start scores = (34, 83). Player 0 rolls 0 dice and gets outcomes []. End scores = (42, 83) >>> print(turns[9]) Start scores = (42, 83). Player 1 rolls 2 dice and gets outcomes [5, 1]. End scores = (42, 84) >>> print(turns[10]) Start scores = (42, 84). Player 0 rolls 4 dice and gets outcomes [1, 4, 2, 4]. End scores = (43, 84) >>> print(turns[11]) Start scores = (43, 84). Player 1 rolls 10 dice and gets outcomes [5, 3, 3, 4, 1, 1, 5, 6, 1, 1]. End scores = (85, 43) >>> print(turns[12]) Game Over """, 'hidden': False, 'locked': False }, { 'code': r""" >>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=56692, score0=69, score1=40, goal=71, feral_hogs=False) >>> print(turns[0]) Start scores = (69, 40). Player 0 rolls 6 dice and gets outcomes [6, 4, 6, 4, 4, 1]. End scores = (70, 40) >>> print(turns[1]) Start scores = (70, 40). Player 1 rolls 3 dice and gets outcomes [3, 3, 4]. End scores = (50, 70) >>> print(turns[2]) Start scores = (50, 70). Player 0 rolls 2 dice and gets outcomes [5, 5]. End scores = (60, 70) >>> print(turns[3]) Start scores = (60, 70). Player 1 rolls 3 dice and gets outcomes [6, 2, 5]. End scores = (60, 83) >>> print(turns[4]) Game Over """, 'hidden': False, 'locked': False }, { 'code': r""" >>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=11528, score0=6, score1=7, goal=17, feral_hogs=False) >>> print(turns[0]) Start scores = (6, 7). Player 0 rolls 3 dice and gets outcomes [2, 6, 2]. End scores = (16, 7) >>> print(turns[1]) Start scores = (16, 7). Player 1 rolls 8 dice and gets outcomes [6, 3, 1, 5, 2, 6, 5, 5]. End scores = (16,
<reponame>cfculhane/autorest.python<gh_stars>10-100 # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Dict, List, Optional from azure.core.exceptions import HttpResponseError import msrest.serialization class BaseProduct(msrest.serialization.Model): """The product documentation. All required parameters must be populated in order to send to Azure. :ivar product_id: Required. Unique identifier representing a specific product for a given latitude & longitude. For example, uberX in San Francisco will have a different product_id than uberX in Los Angeles. :vartype product_id: str :ivar description: Description of product. :vartype description: str """ _validation = { "product_id": {"required": True}, } _attribute_map = { "product_id": {"key": "base_product_id", "type": "str"}, "description": {"key": "base_product_description", "type": "str"}, } def __init__(self, *, product_id: str, description: Optional[str] = None, **kwargs): """ :keyword product_id: Required. Unique identifier representing a specific product for a given latitude & longitude. For example, uberX in San Francisco will have a different product_id than uberX in Los Angeles. :paramtype product_id: str :keyword description: Description of product. :paramtype description: str """ super(BaseProduct, self).__init__(**kwargs) self.product_id = product_id self.description = description class Error(msrest.serialization.Model): """Error. :ivar status: :vartype status: int :ivar message: :vartype message: str :ivar parent_error: :vartype parent_error: ~modelflattening.models.Error """ _attribute_map = { "status": {"key": "status", "type": "int"}, "message": {"key": "message", "type": "str"}, "parent_error": {"key": "parentError", "type": "Error"}, } def __init__( self, *, status: Optional[int] = None, message: Optional[str] = None, parent_error: Optional["Error"] = None, **kwargs ): """ :keyword status: :paramtype status: int :keyword message: :paramtype message: str :keyword parent_error: :paramtype parent_error: ~modelflattening.models.Error """ super(Error, self).__init__(**kwargs) self.status = status self.message = message self.parent_error = parent_error class Resource(msrest.serialization.Model): """Resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar type: Resource Type. :vartype type: str :ivar tags: A set of tags. Dictionary of :code:`<string>`. :vartype tags: dict[str, str] :ivar location: Resource Location. :vartype location: str :ivar name: Resource Name. :vartype name: str """ _validation = { "id": {"readonly": True}, "type": {"readonly": True}, "name": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "type": {"key": "type", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "location": {"key": "location", "type": "str"}, "name": {"key": "name", "type": "str"}, } def __init__(self, *, tags: Optional[Dict[str, str]] = None, location: Optional[str] = None, **kwargs): """ :keyword tags: A set of tags. Dictionary of :code:`<string>`. :paramtype tags: dict[str, str] :keyword location: Resource Location. :paramtype location: str """ super(Resource, self).__init__(**kwargs) self.id = None self.type = None self.tags = tags self.location = location self.name = None class FlattenedProduct(Resource): """Flattened product. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar type: Resource Type. :vartype type: str :ivar tags: A set of tags. Dictionary of :code:`<string>`. :vartype tags: dict[str, str] :ivar location: Resource Location. :vartype location: str :ivar name: Resource Name. :vartype name: str :ivar p_name: :vartype p_name: str :ivar type_properties_type: :vartype type_properties_type: str :ivar provisioning_state_values: Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK". :vartype provisioning_state_values: str or ~modelflattening.models.FlattenedProductPropertiesProvisioningStateValues :ivar provisioning_state: :vartype provisioning_state: str """ _validation = { "id": {"readonly": True}, "type": {"readonly": True}, "name": {"readonly": True}, "provisioning_state_values": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "type": {"key": "type", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "location": {"key": "location", "type": "str"}, "name": {"key": "name", "type": "str"}, "p_name": {"key": "properties.p\\.name", "type": "str"}, "type_properties_type": {"key": "properties.type", "type": "str"}, "provisioning_state_values": {"key": "properties.provisioningStateValues", "type": "str"}, "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, } def __init__( self, *, tags: Optional[Dict[str, str]] = None, location: Optional[str] = None, p_name: Optional[str] = None, type_properties_type: Optional[str] = None, provisioning_state: Optional[str] = None, **kwargs ): """ :keyword tags: A set of tags. Dictionary of :code:`<string>`. :paramtype tags: dict[str, str] :keyword location: Resource Location. :paramtype location: str :keyword p_name: :paramtype p_name: str :keyword type_properties_type: :paramtype type_properties_type: str :keyword provisioning_state: :paramtype provisioning_state: str """ super(FlattenedProduct, self).__init__(tags=tags, location=location, **kwargs) self.p_name = p_name self.type_properties_type = type_properties_type self.provisioning_state_values = None self.provisioning_state = provisioning_state class FlattenParameterGroup(msrest.serialization.Model): """Parameter group. All required parameters must be populated in order to send to Azure. :ivar name: Required. Product name with value 'groupproduct'. :vartype name: str :ivar simple_body_product: Simple body product to put. :vartype simple_body_product: ~modelflattening.models.SimpleProduct :ivar product_id: Required. Unique identifier representing a specific product for a given latitude & longitude. For example, uberX in San Francisco will have a different product_id than uberX in Los Angeles. :vartype product_id: str :ivar description: Description of product. :vartype description: str :ivar max_product_display_name: Display name of product. :vartype max_product_display_name: str :ivar capacity: Capacity of product. For example, 4 people. The only acceptable values to pass in are None and "Large". The default value is None. :vartype capacity: str :ivar generic_value: Generic URL value. :vartype generic_value: str :ivar odata_value: URL value. :vartype odata_value: str """ _validation = { "name": {"required": True}, "product_id": {"required": True}, } _attribute_map = { "name": {"key": "name", "type": "str"}, "simple_body_product": {"key": "SimpleBodyProduct", "type": "SimpleProduct"}, "product_id": {"key": "productId", "type": "str"}, "description": {"key": "description", "type": "str"}, "max_product_display_name": {"key": "max_product_display_name", "type": "str"}, "capacity": {"key": "capacity", "type": "str"}, "generic_value": {"key": "generic_value", "type": "str"}, "odata_value": {"key": "@odata\\.value", "type": "str"}, } def __init__( self, *, name: str, product_id: str, simple_body_product: Optional["SimpleProduct"] = None, description: Optional[str] = None, max_product_display_name: Optional[str] = None, capacity: Optional[str] = None, generic_value: Optional[str] = None, odata_value: Optional[str] = None, **kwargs ): """ :keyword name: Required. Product name with value 'groupproduct'. :paramtype name: str :keyword simple_body_product: Simple body product to put. :paramtype simple_body_product: ~modelflattening.models.SimpleProduct :keyword product_id: Required. Unique identifier representing a specific product for a given latitude & longitude. For example, uberX in San Francisco will have a different product_id than uberX in Los Angeles. :paramtype product_id: str :keyword description: Description of product. :paramtype description: str :keyword max_product_display_name: Display name of product. :paramtype max_product_display_name: str :keyword capacity: Capacity of product. For example, 4 people. The only acceptable values to pass in are None and "Large". The default value is None. :paramtype capacity: str :keyword generic_value: Generic URL value. :paramtype generic_value: str :keyword odata_value: URL value. :paramtype odata_value: str """ super(FlattenParameterGroup, self).__init__(**kwargs) self.name = name self.simple_body_product = simple_body_product self.product_id = product_id self.description = description self.max_product_display_name = max_product_display_name self.capacity = capacity self.generic_value = generic_value self.odata_value = odata_value class GenericUrl(msrest.serialization.Model): """The Generic URL. :ivar generic_value: Generic URL value. :vartype generic_value: str """ _attribute_map = { "generic_value": {"key": "generic_value", "type": "str"}, } def __init__(self, *, generic_value: Optional[str] = None, **kwargs): """ :keyword generic_value: Generic URL value. :paramtype generic_value: str """ super(GenericUrl, self).__init__(**kwargs) self.generic_value = generic_value class ProductUrl(GenericUrl): """The product URL. :ivar generic_value: Generic URL value. :vartype generic_value: str :ivar odata_value: URL value. :vartype odata_value: str """ _attribute_map = { "generic_value": {"key": "generic_value", "type": "str"}, "odata_value": {"key": "@odata\\.value", "type": "str"}, } def __init__(self, *, generic_value: Optional[str] = None, odata_value: Optional[str] = None, **kwargs): """ :keyword generic_value: Generic URL value. :paramtype generic_value: str :keyword odata_value: URL value. :paramtype odata_value: str """ super(ProductUrl, self).__init__(generic_value=generic_value, **kwargs) self.odata_value = odata_value class ProductWrapper(msrest.serialization.Model): """The wrapped produc. :ivar value: the product value. :vartype value: str """ _attribute_map = { "value": {"key": "property.value", "type": "str"}, } def __init__(self, *, value: Optional[str] = None, **kwargs): """ :keyword value: the product value. :paramtype value: str """ super(ProductWrapper, self).__init__(**kwargs) self.value = value class ResourceCollection(msrest.serialization.Model): """ResourceCollection. :ivar productresource: Flattened product. :vartype productresource: ~modelflattening.models.FlattenedProduct :ivar arrayofresources: :vartype arrayofresources: list[~modelflattening.models.FlattenedProduct] :ivar dictionaryofresources: Dictionary of :code:`<FlattenedProduct>`. :vartype dictionaryofresources: dict[str, ~modelflattening.models.FlattenedProduct] """ _attribute_map = { "productresource": {"key": "productresource", "type": "FlattenedProduct"}, "arrayofresources": {"key": "arrayofresources", "type": "[FlattenedProduct]"}, "dictionaryofresources": {"key": "dictionaryofresources", "type": "{FlattenedProduct}"}, } def __init__( self, *, productresource: Optional["FlattenedProduct"] = None, arrayofresources: Optional[List["FlattenedProduct"]] = None, dictionaryofresources: Optional[Dict[str, "FlattenedProduct"]] = None, **kwargs ): """ :keyword productresource: Flattened product. :paramtype productresource: ~modelflattening.models.FlattenedProduct :keyword arrayofresources: :paramtype arrayofresources: list[~modelflattening.models.FlattenedProduct] :keyword dictionaryofresources: Dictionary of :code:`<FlattenedProduct>`. :paramtype dictionaryofresources: dict[str, ~modelflattening.models.FlattenedProduct]
and dummy_input_on_gpu is not specified. """ result = self._apply_helper(self._auto_quant_main, fp32_model, dummy_input_on_cpu, dummy_input_on_gpu, results_dir, cache_id) return result["model"],\ result["accuracy"],\ result["encoding_path"] def _apply_helper( self, auto_quant_main_fn: Callable, fp32_model: torch.nn.Module, dummy_input_on_cpu: Union[torch.Tensor, Tuple], dummy_input_on_gpu: Optional[Union[torch.Tensor, Tuple]] = None, results_dir: str = "/tmp", cache_id: str = None, ) -> Dict[str, Any]: """ Helper for self.apply(). :param auto_quant_main_fn: Function that implements the main logic of AutoQuant. :param fp32_model: Model to apply PTQ techniques. :param dummy_input_on_cpu: Dummy input to the model in CPU memory. :param dummy_input_on_gpu: Dummy input to the model in GPU memory. This parameter is required if and only if the fp32_model is on GPU. :param results_dir: Directory to save the results. :param cache_id: A string that composes a cache id in combination with results_dir. If specified, AutoQuant will load/save the PTQ results from/to the file system if previous PTQ results produced under the same results_dir and cache_id exist, :return: The best ptq result as a dictionary. :raises: - ValueError if the model is on GPU and dummy_input_on_gpu is not specified. """ results_dir = os.path.abspath(results_dir) os.makedirs(results_dir, exist_ok=True) if utils.get_device(fp32_model) == torch.device("cpu"): dummy_input = dummy_input_on_cpu else: if dummy_input_on_gpu is None: raise ValueError( "If model is placed on GPU, dummy_input_on_gpu must be also provided." ) dummy_input = dummy_input_on_gpu if cache_id is None: cache_dir = None else: cache_dir = os.path.join(results_dir, ".auto_quant_cache", cache_id) with in_eval_mode(fp32_model): with cache.enable(cache_dir): _logger.info("Starting AutoQuant") fp32_acc = self._evaluate_model_performance(fp32_model) target_acc = fp32_acc - self.allowed_accuracy_drop _logger.info("Target eval score: %.02f", target_acc) _logger.info("FP32 eval score (W32A32): %.02f", fp32_acc) eval_manager = _EvalManager( quantsim_factory=self._create_quantsim_and_encodings, eval_func=self._evaluate_model_performance, dummy_input=dummy_input, dummy_input_on_cpu=dummy_input_on_cpu, results_dir=results_dir, ) ret = auto_quant_main_fn(fp32_model, target_acc, dummy_input, eval_manager, results_dir) acc = ret["accuracy"] _logger.info("Best eval score: %.02f", acc) if acc < target_acc: _logger.info( "AutoQuant is unable to match the target accuracy. " "Consider Quantization Aware Training." ) eval_manager.export_diagnostics() return ret def _auto_quant_main( self, fp32_model: torch.nn.Module, target_acc: float, dummy_input: Union[torch.Tensor, Tuple], eval_manager: "_EvalManager", results_dir: str = "/tmp", ) -> Dict[str, Any]: """ Helper function of apply(). :param fp32_model: Model to apply PTQ techniques. :param target_acc: Target eval score. :param dummy_input: Dummy input to the model. The device of dumyy_input should be same as that of model. :param eval_manager: _Evalmanager object. :param results_dir: Directory to save the results. :return: The best ptq result as a dictionary. """ with eval_manager.analysis_session("Weight Quantization Sensitivity") as sess: acc = sess.eval(fp32_model, default_output_bw=32) sess.diagnostics.add( f"Weight-quantized eval score (W{self.default_param_bw}A32): {acc:.02f}" ) with eval_manager.analysis_session("Activation Quantization Sensitivity") as sess: acc = sess.eval(fp32_model, default_param_bw=32) sess.diagnostics.add( f"Activation-quantized eval score (W32A{self.default_output_bw}): {acc:.02f}" ) # Batchnorm Folding with eval_manager.ptq_session("Batchnorm Folding") as sess: model, folded_pairs = self._apply_batchnorm_folding(fp32_model, dummy_input) for conv, bn in folded_pairs: sess.diagnostics.add(f"{conv} was merged with {bn}.") sess.set_ptq_result(model=model, applied_techniques=["batchnorm_folding"]) best_result = eval_manager.get_best_ptq_result() if best_result.accuracy >= target_acc: return best_result.as_dict() # Cross-Layer Equalization with eval_manager.ptq_session("Cross-Layer Equalization") as sess: model = self._apply_cross_layer_equalization(fp32_model, dummy_input) sess.set_ptq_result(model=model, applied_techniques=["cross_layer_equalization"]) best_result = eval_manager.get_best_ptq_result() if best_result.accuracy >= target_acc: return best_result.as_dict() # AdaRound with eval_manager.ptq_session("AdaRound") as sess: model, encoding_path = self._apply_adaround(best_result.load_model(), dummy_input, results_dir) sess.set_ptq_result(model=model, encoding_path=encoding_path, applied_techniques=[*best_result.applied_techniques, "adaround"]) return eval_manager.get_best_ptq_result().as_dict() @dataclass class PtqResult: """ Evaluation results. :param tag: Identifier string of the evaluation result. :param model_path: Path to the serialized model. :param encoding_path: Path to the encoding file. :param accuracy: Accuracy of the model. """ model_path: str device: torch.device encoding_path: str accuracy: float applied_techniques: List[str] def load_model(self) -> torch.nn.Module: """ Load model. :return: Loaded model. """ return torch.load(self.model_path).to(self.device) def as_dict(self): """Convert to dictionary""" return dict(model=self.load_model(), accuracy=self.accuracy, encoding_path=self.encoding_path, applied_techniques=self.applied_techniques) class _EvalManager: """ Evaluation manager for AutoQuant. """ def __init__(self, quantsim_factory: Callable, eval_func: Callable[[torch.nn.Module], float], dummy_input: Union[torch.Tensor, Tuple], dummy_input_on_cpu: Union[torch.Tensor, Tuple], results_dir: str): """ :param quantsim_factory: A factory function that returns QuantizationSimModel. :param eval_func: Evaluation function. :param dummy_input: Dummy input to the model. Assumed to be located on the same device as the model. :param dummy_input_on_cpu: Dummy input to the model in CPU memory. :param results_dir: Base directory to save the temporary serialized model. """ self._quantsim_factory = quantsim_factory self._eval_func = eval_func self._dummy_input = dummy_input self._dummy_input_on_cpu = dummy_input_on_cpu self._results_dir = results_dir os.makedirs(self._results_dir, exist_ok=True) self._all_sessions: List[_EvalSession] = [] self._ptq_sessions: List[_PtqSession] = [] def get_best_ptq_result(self) -> PtqResult: """ Get the results with the highest evaluation score among the ptq results evaluated so far. :return: The best evaluation result so far. """ if not self._ptq_sessions: raise RuntimeError ptq_results = [sess.ptq_result for sess in self._ptq_sessions] return max(ptq_results, key=lambda ptq_result: ptq_result.accuracy) def analysis_session(self, title: str) -> "_EvalSession": """ Return a session for analysis only. :param title: Title of the session. :return: Analysis session. """ return self._get_session(title, _EvalSession) def ptq_session(self, title: str) -> "_PtqSession": """ Return a session for analysis only. :param title: Title of the session. :return: PTQ session. """ sess = self._get_session(title, _PtqSession) self._ptq_sessions.append(sess) return sess def _get_session(self, title: str, session_cls: type): """ Session factory. :param title: Title of the session. :session_cls: Class of the session. :return: Session object. """ session = session_cls(title, self._quantsim_factory, self._eval_func, self._dummy_input, self._dummy_input_on_cpu, results_dir=os.path.join(self._results_dir, ".trace")) self._all_sessions.append(session) return session def export_diagnostics(self) -> str: """ Export diagnostics in html format. :return: Diagnostics string in html format. """ loader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) env = jinja2.Environment(loader=loader) template = env.get_template("auto_quant_diagnostics_template.html") if any(sess.diagnostics.contains_bokeh() for sess in self._all_sessions): from bokeh.resources import CDN head = CDN.render() else: head = "" body = { sess.title: sess.diagnostics for sess in self._all_sessions if not sess.diagnostics.is_empty() } html = template.render(head=head, body=body) filename = os.path.join(self._results_dir, "diagnostics.html") with open(filename, "w") as f: f.write(html) return html class _EvalSession: """ Evaluation session for AutoQuant. Each session object contains a title and diagnostics produced during the session. The collected diagnostics will be exported into a html file by _EvalManager. """ def __init__( self, title: str, quantsim_factory: Callable, eval_func: Callable[[torch.nn.Module], float], dummy_input: Union[torch.Tensor, Tuple], dummy_input_on_cpu: Union[torch.Tensor, Tuple], results_dir: str ): """ :param title: Title of the session. :param quantsim_factory: A factory function that returns QuantizationSimModel. :param eval_func: Evaluation function. :param dummy_input: Dummy input to the model. Assumed to be located on the same device as the model. :param dummy_input_on_cpu: Dummy input to the model in CPU memory. :param results_dir: Base directory to save the temporary serialized model. """ self._title = title self._quantsim_factory = quantsim_factory self._eval_func = eval_func self._dummy_input = dummy_input self._dummy_input_on_cpu = dummy_input_on_cpu self._results_dir = results_dir self._spinner = None os.makedirs(self._results_dir, exist_ok=True) self._diagnostics = Diagnostics() # Map session title to file name. # e.g. title: "Cross-Layer Equalization" -> filename: "cross_layer_equalization" self._filename = self._title.lower().replace("-", " ") self._filename = "_".join(self._filename.split()) @property def title(self): """Getter of self._title.""" return self._title @property def diagnostics(self): """Getter of self._diagnostics.""" return self._diagnostics def eval(self, model: torch.nn.Module, **kwargs): """ Evaluate the model. :param model: Model to evaluate. :param **kwargs: Additional arguments to the quantsim factory. :return: Eval score. """ sim = self._quantsim_factory(model, self._dummy_input, **kwargs) acc = self._eval_func(sim.model) return acc def __enter__(self): self._spinner = Spinner(self._title) self._spinner.__enter__() return self def __exit__(self, exc_type, exc_val, exc_tb): try: if self._spinner is not None: self._spinner.__exit__(exc_type, exc_val, exc_tb) finally: if exc_val is not None: raise exc_val class _PtqSession(_EvalSession): """ PTQ session. Each PTQ session object should call `set_ptq_result` exactly once inside a with-as block. """ def __init__(self, *args, **kwargs): super(_PtqSession, self).__init__(*args, **kwargs) self._ptq_result = None @property def ptq_result(self) -> PtqResult: """Getter of self._ptq_result.""" if self._ptq_result is None: raise RuntimeError return self._ptq_result def set_ptq_result( self, applied_techniques: List[str], model: torch.nn.Module = None, sim: QuantizationSimModel = None, acc: float = None, **kwargs ) -> None: """ Set the result of PTQ. Should be called exactly once inside a with-as block. Exactly one among model and (sim, acc) pair should be specified. 1) If sim and acc is specified, save them as the result of this session. 2) If model is specified, evaluate the quantized accuracy of the model and save the result. :param model: Result of PTQ. :param sim: Result of PTQ. The quamtization encoding (compute_encodings()) is assumed to have been computed in advance. :param acc: Eval score. :param **kwargs: Additional arguments to the quantsim factory. :return: None """ if sim is None: assert acc is None assert model is not None sim = self._quantsim_factory(model, self._dummy_input, **kwargs) acc = self._eval_func(sim.model) else: assert acc is not None assert model is None self._set_ptq_result(sim, acc, applied_techniques) def _set_ptq_result( self, sim: QuantizationSimModel, acc:
<= 1) m.c4764 = Constraint(expr= m.b344 - m.b347 + m.b384 <= 1) m.c4765 = Constraint(expr= m.b344 - m.b348 + m.b385 <= 1) m.c4766 = Constraint(expr= m.b344 - m.b349 + m.b386 <= 1) m.c4767 = Constraint(expr= m.b344 - m.b350 + m.b387 <= 1) m.c4768 = Constraint(expr= m.b344 - m.b351 + m.b388 <= 1) m.c4769 = Constraint(expr= m.b344 - m.b352 + m.b389 <= 1) m.c4770 = Constraint(expr= m.b344 - m.b353 + m.b390 <= 1) m.c4771 = Constraint(expr= m.b344 - m.b354 + m.b391 <= 1) m.c4772 = Constraint(expr= m.b344 - m.b355 + m.b392 <= 1) m.c4773 = Constraint(expr= m.b344 - m.b356 + m.b393 <= 1) m.c4774 = Constraint(expr= m.b344 - m.b357 + m.b394 <= 1) m.c4775 = Constraint(expr= m.b344 - m.b358 + m.b395 <= 1) m.c4776 = Constraint(expr= m.b344 - m.b359 + m.b396 <= 1) m.c4777 = Constraint(expr= m.b344 - m.b360 + m.b397 <= 1) m.c4778 = Constraint(expr= m.b344 - m.b361 + m.b398 <= 1) m.c4779 = Constraint(expr= m.b344 - m.b362 + m.b399 <= 1) m.c4780 = Constraint(expr= m.b345 - m.b346 + m.b400 <= 1) m.c4781 = Constraint(expr= m.b345 - m.b347 + m.b401 <= 1) m.c4782 = Constraint(expr= m.b345 - m.b348 + m.b402 <= 1) m.c4783 = Constraint(expr= m.b345 - m.b349 + m.b403 <= 1) m.c4784 = Constraint(expr= m.b345 - m.b350 + m.b404 <= 1) m.c4785 = Constraint(expr= m.b345 - m.b351 + m.b405 <= 1) m.c4786 = Constraint(expr= m.b345 - m.b352 + m.b406 <= 1) m.c4787 = Constraint(expr= m.b345 - m.b353 + m.b407 <= 1) m.c4788 = Constraint(expr= m.b345 - m.b354 + m.b408 <= 1) m.c4789 = Constraint(expr= m.b345 - m.b355 + m.b409 <= 1) m.c4790 = Constraint(expr= m.b345 - m.b356 + m.b410 <= 1) m.c4791 = Constraint(expr= m.b345 - m.b357 + m.b411 <= 1) m.c4792 = Constraint(expr= m.b345 - m.b358 + m.b412 <= 1) m.c4793 = Constraint(expr= m.b345 - m.b359 + m.b413 <= 1) m.c4794 = Constraint(expr= m.b345 - m.b360 + m.b414 <= 1) m.c4795 = Constraint(expr= m.b345 - m.b361 + m.b415 <= 1) m.c4796 = Constraint(expr= m.b345 - m.b362 + m.b416 <= 1) m.c4797 = Constraint(expr= m.b346 - m.b347 + m.b417 <= 1) m.c4798 = Constraint(expr= m.b346 - m.b348 + m.b418 <= 1) m.c4799 = Constraint(expr= m.b346 - m.b349 + m.b419 <= 1) m.c4800 = Constraint(expr= m.b346 - m.b350 + m.b420 <= 1) m.c4801 = Constraint(expr= m.b346 - m.b351 + m.b421 <= 1) m.c4802 = Constraint(expr= m.b346 - m.b352 + m.b422 <= 1) m.c4803 = Constraint(expr= m.b346 - m.b353 + m.b423 <= 1) m.c4804 = Constraint(expr= m.b346 - m.b354 + m.b424 <= 1) m.c4805 = Constraint(expr= m.b346 - m.b355 + m.b425 <= 1) m.c4806 = Constraint(expr= m.b346 - m.b356 + m.b426 <= 1) m.c4807 = Constraint(expr= m.b346 - m.b357 + m.b427 <= 1) m.c4808 = Constraint(expr= m.b346 - m.b358 + m.b428 <= 1) m.c4809 = Constraint(expr= m.b346 - m.b359 + m.b429 <= 1) m.c4810 = Constraint(expr= m.b346 - m.b360 + m.b430 <= 1) m.c4811 = Constraint(expr= m.b346 - m.b361 + m.b431 <= 1) m.c4812 = Constraint(expr= m.b346 - m.b362 + m.b432 <= 1) m.c4813 = Constraint(expr= m.b347 - m.b348 + m.b433 <= 1) m.c4814 = Constraint(expr= m.b347 - m.b349 + m.b434 <= 1) m.c4815 = Constraint(expr= m.b347 - m.b350 + m.b435 <= 1) m.c4816 = Constraint(expr= m.b347 - m.b351 + m.b436 <= 1) m.c4817 = Constraint(expr= m.b347 - m.b352 + m.b437 <= 1) m.c4818 = Constraint(expr= m.b347 - m.b353 + m.b438 <= 1) m.c4819 = Constraint(expr= m.b347 - m.b354 + m.b439 <= 1) m.c4820 = Constraint(expr= m.b347 - m.b355 + m.b440 <= 1) m.c4821 = Constraint(expr= m.b347 - m.b356 + m.b441 <= 1) m.c4822 = Constraint(expr= m.b347 - m.b357 + m.b442 <= 1) m.c4823 = Constraint(expr= m.b347 - m.b358 + m.b443 <= 1) m.c4824 = Constraint(expr= m.b347 - m.b359 + m.b444 <= 1) m.c4825 = Constraint(expr= m.b347 - m.b360 + m.b445 <= 1) m.c4826 = Constraint(expr= m.b347 - m.b361 + m.b446 <= 1) m.c4827 = Constraint(expr= m.b347 - m.b362 + m.b447 <= 1) m.c4828 = Constraint(expr= m.b348 - m.b349 + m.b448 <= 1) m.c4829 = Constraint(expr= m.b348 - m.b350 + m.b449 <= 1) m.c4830 = Constraint(expr= m.b348 - m.b351 + m.b450 <= 1) m.c4831 = Constraint(expr= m.b348 - m.b352 + m.b451 <= 1) m.c4832 = Constraint(expr= m.b348 - m.b353 + m.b452 <= 1) m.c4833 = Constraint(expr= m.b348 - m.b354 + m.b453 <= 1) m.c4834 = Constraint(expr= m.b348 - m.b355 + m.b454 <= 1) m.c4835 = Constraint(expr= m.b348 - m.b356 + m.b455 <= 1) m.c4836 = Constraint(expr= m.b348 - m.b357 + m.b456 <= 1) m.c4837 = Constraint(expr= m.b348 - m.b358 + m.b457 <= 1) m.c4838 = Constraint(expr= m.b348 - m.b359 + m.b458 <= 1) m.c4839 = Constraint(expr= m.b348 - m.b360 + m.b459 <= 1) m.c4840 = Constraint(expr= m.b348 - m.b361 + m.b460 <= 1) m.c4841 = Constraint(expr= m.b348 - m.b362 + m.b461 <= 1) m.c4842 = Constraint(expr= m.b349 - m.b350 + m.b462 <= 1) m.c4843 = Constraint(expr= m.b349 - m.b351 + m.b463 <= 1) m.c4844 = Constraint(expr= m.b349 - m.b352 + m.b464 <= 1) m.c4845 = Constraint(expr= m.b349 - m.b353 + m.b465 <= 1) m.c4846 = Constraint(expr= m.b349 - m.b354 + m.b466 <= 1) m.c4847 = Constraint(expr= m.b349 - m.b355 + m.b467 <= 1) m.c4848 = Constraint(expr= m.b349 - m.b356 + m.b468 <= 1) m.c4849 = Constraint(expr= m.b349 - m.b357 + m.b469 <= 1) m.c4850 = Constraint(expr= m.b349 - m.b358 + m.b470 <= 1) m.c4851 = Constraint(expr= m.b349 - m.b359 + m.b471 <= 1) m.c4852 = Constraint(expr= m.b349 - m.b360 + m.b472 <= 1) m.c4853 = Constraint(expr= m.b349 - m.b361 + m.b473 <= 1) m.c4854 = Constraint(expr= m.b349 - m.b362 + m.b474 <= 1) m.c4855 = Constraint(expr= m.b350 - m.b351 + m.b475 <= 1) m.c4856 = Constraint(expr= m.b350 - m.b352 + m.b476 <= 1) m.c4857 = Constraint(expr= m.b350 - m.b353 + m.b477 <= 1) m.c4858 = Constraint(expr= m.b350 - m.b354 + m.b478 <= 1) m.c4859 = Constraint(expr= m.b350 - m.b355 + m.b479 <= 1) m.c4860 = Constraint(expr= m.b350 - m.b356 + m.b480 <= 1) m.c4861 = Constraint(expr= m.b350 - m.b357 + m.b481 <= 1) m.c4862 = Constraint(expr= m.b350 - m.b358 + m.b482 <= 1) m.c4863 = Constraint(expr= m.b350 - m.b359 + m.b483 <= 1) m.c4864 = Constraint(expr= m.b350 - m.b360 + m.b484 <= 1) m.c4865 = Constraint(expr= m.b350 - m.b361 + m.b485 <= 1) m.c4866 = Constraint(expr= m.b350 - m.b362 + m.b486 <= 1) m.c4867 = Constraint(expr= m.b351 - m.b352 + m.b487 <= 1) m.c4868 = Constraint(expr= m.b351 - m.b353 + m.b488 <= 1) m.c4869 = Constraint(expr= m.b351 - m.b354 + m.b489 <= 1) m.c4870 = Constraint(expr= m.b351 - m.b355 + m.b490 <= 1) m.c4871 = Constraint(expr= m.b351 - m.b356 + m.b491 <= 1) m.c4872 = Constraint(expr= m.b351 - m.b357 + m.b492 <= 1) m.c4873 = Constraint(expr= m.b351 - m.b358 + m.b493 <= 1) m.c4874 = Constraint(expr= m.b351 - m.b359 + m.b494 <= 1) m.c4875 = Constraint(expr= m.b351 - m.b360 + m.b495 <= 1) m.c4876 = Constraint(expr= m.b351 - m.b361 + m.b496 <= 1) m.c4877 = Constraint(expr= m.b351 - m.b362 + m.b497 <= 1) m.c4878 = Constraint(expr= m.b352 - m.b353 + m.b498 <= 1) m.c4879 = Constraint(expr= m.b352 - m.b354 + m.b499 <= 1) m.c4880 = Constraint(expr= m.b352 - m.b355 + m.b500 <= 1) m.c4881 = Constraint(expr= m.b352 - m.b356 + m.b501 <= 1) m.c4882 = Constraint(expr= m.b352 - m.b357 + m.b502 <= 1) m.c4883 = Constraint(expr= m.b352 - m.b358 + m.b503 <= 1) m.c4884 = Constraint(expr= m.b352 - m.b359 + m.b504 <= 1) m.c4885 = Constraint(expr= m.b352 - m.b360 + m.b505 <= 1) m.c4886 = Constraint(expr= m.b352 - m.b361 + m.b506 <= 1) m.c4887 = Constraint(expr= m.b352 - m.b362 + m.b507 <= 1) m.c4888 = Constraint(expr= m.b353 - m.b354 + m.b508 <= 1) m.c4889 = Constraint(expr= m.b353 - m.b355 + m.b509 <= 1) m.c4890 = Constraint(expr= m.b353 - m.b356 + m.b510 <= 1) m.c4891 = Constraint(expr= m.b353 - m.b357 + m.b511 <= 1) m.c4892 = Constraint(expr= m.b353 - m.b358 + m.b512 <= 1) m.c4893 = Constraint(expr= m.b353 - m.b359 + m.b513 <= 1) m.c4894 = Constraint(expr= m.b353 - m.b360 + m.b514 <= 1) m.c4895 = Constraint(expr= m.b353 - m.b361 + m.b515 <= 1) m.c4896 = Constraint(expr= m.b353 - m.b362 + m.b516 <= 1) m.c4897 = Constraint(expr= m.b354 - m.b355 + m.b517 <= 1) m.c4898 = Constraint(expr= m.b354 - m.b356 + m.b518 <= 1) m.c4899 = Constraint(expr= m.b354 - m.b357 + m.b519 <= 1) m.c4900 = Constraint(expr= m.b354 - m.b358 + m.b520 <= 1) m.c4901 = Constraint(expr= m.b354 - m.b359 + m.b521 <= 1) m.c4902 = Constraint(expr= m.b354 - m.b360 + m.b522 <= 1) m.c4903 = Constraint(expr= m.b354 - m.b361 + m.b523 <= 1) m.c4904 = Constraint(expr= m.b354 - m.b362 + m.b524 <= 1) m.c4905 = Constraint(expr= m.b355 - m.b356 + m.b525 <= 1) m.c4906
a scalar valued polynomial), self * other. :param other: Polynomial, scalar or vector we should multiply this polynomial with. :type: PolynomialLagrange, scalar or vector :return: Product of this polynomial with other. :rtype: :class:`PolynomialLagrange`. """ if isinstance(other, numbers.Number) or isinstance(other, np.ndarray): return self.multiply_with_constant(other) # Multiplication of two polynomials # Multiplied polynomials need to have the same domain dimension assert self.domain_dimension() == other.domain_dimension() # Cannot multiply two vector valued polynomials assert self.target_dimension() == 1 assert other.target_dimension() == 1 m = self.domain_dimension() r = self.degree() + other.degree() dim = get_dimension(r, m) coeff = np.empty(dim) x = generate_lagrange_points(m, r) for i in range(len(x)): coeff[i] = self(x[i]) * other(x[i]) return PolynomialLagrange(coeff, r, m) def __pow__(self, exp): r""" Raise the polynomial to a power. .. math:: (l^{\mu})(x) = l(x)^{\mu} = l_1(x)^{\mu_1} l_2(x)^{\mu_2} \ldots l_n(x)^{\mu_n}. :param exp: Power we want the raise the polynomial to (natural number or multi-index depending on the dimension of the target of the polynomial). :type exp: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...] :return: This polynomial raised to the given power. :rtype: :class:`PolynomialLagrange`. """ if isinstance(exp, numbers.Integral): assert exp >= 0 assert self.target_dimension() == 1 if exp == 0: return unit_polynomial(0, self.m) if exp == 1: return PolynomialLagrange(self.coeff, self.r, self.m) return self * self**(exp - 1) else: assert len(exp) == self.target_dimension() assert [entry >= 0 for entry in exp] m = self.domain_dimension() r = self.degree() * multiindex.norm(exp) dim = get_dimension(r, m) coeff = np.empty(dim) # Get the coefficients by applying the dual basis (evaluate at # Lagrange points) to the exponentiated polynomial x = generate_lagrange_points(m, r) for i in range(len(x)): coeff[i] = multiindex.power(self(x[i]), exp) return PolynomialLagrange(coeff, r, m) def partial_derivative(self, i=0): """ Compute the i:th partial derivative of the polynomial. :param int i: Index of partial derivative. :return: i:th partial derivative of this polynomial. :rtype: :class:`PolynomialLagrange`. """ assert isinstance(i, numbers.Integral) assert i >= 0 m = self.domain_dimension() n = self.target_dimension() assert i < m r = self.degree() if r == 0: return zero_polynomial(0, m, n) dim = get_dimension(r - 1, m) if n == 1: coeff = np.zeros(dim) else: coeff = np.zeros((dim, n)) # Express polynomial in monomial basis p = self.to_monomial_basis() # Compute derivative for the polynomial in the monomial basis dp = p.partial_derivative(i) # Convert the derivative to the Lagrange basis x = generate_lagrange_points(m, r - 1) for j in range(len(x)): coeff[j] = dp(x[j]) return PolynomialLagrange(coeff, r - 1, m) def degree_elevate(self, s): r""" Express the polynomial using a higher degree basis. Let :math:`p(x) = \sum_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}} a_{\nu} l_{\nu, r}(x)` be this polynomial, where :math:`\{ l_{\nu, r} \}_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}}` is the Lagrange basis for :math:`\mathcal{P}_r (\mathbb{R}^m)`. Let :math:`\{ l_{\nu, s} \}_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq s}}, s \geq r` be the Lagrange basis for :math:`\mathcal{P}_s (\mathbb{R}^m)`. Then this function returns a polynomial :math:`q(x)` .. math:: q(x) = \sum_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq s}} \tilde{a}_{\nu} l_{\nu, s}(x), such that :math:`p(x) = q(x) \, \forall x \in \Delta_c^m`. :param int s: New degree for the polynomial basis the polynomial should be expressed in. :return: Elevation of this polynomial to the higher degree basis. :rtype: :class:`PolynomialLagrange`. """ assert s >= self.degree() m = self.domain_dimension() n = self.target_dimension() r = self.degree() if s == self.degree(): return PolynomialLagrange(self.coeff, r, m) dim = get_dimension(s, m) if n == 1: coeff = np.zeros(dim) else: coeff = np.zeros((dim, n)) dual_basis = dual_lagrange_basis(s, m) for i in range(len(dual_basis)): coeff[i] = dual_basis[i](self) return PolynomialLagrange(coeff, s, m) def to_monomial_basis(self): """ Compute the monomial representation of this polynomial. :return: This polynomial expressed in the monomial basis. :rtype: :class:`~polynomials_on_simplices.polynomial.polynomials_monomial_basis.Polynomial`. """ return sum([b * a for (a, b) in zip(self.coeff, self._basis_polynomials_monomial_form[self.m, self.r])], zero_polynomial_monomial(0, self.m, self.n)) _basis_polynomials_monomial_form = {} def latex_str(self): r""" Generate a Latex string for this polynomial. :return: Latex string for this polynomial. :rtype: str """ try: len(self.coeff[0]) coeff_strs = [str_number_array(c, latex=True) for c in self.coeff] basis_strs = lagrange_basis_latex_compact(self.r, self.m) return str_dot_product(coeff_strs, basis_strs) except TypeError: coeff_strs = [str_number(c, latex_fraction=True) for c in self.coeff] basis_strs = lagrange_basis_latex_compact(self.r, self.m) return str_dot_product(coeff_strs, basis_strs) def latex_str_expanded(self): r""" Generate a Latex string for this polynomial, where each basis function has been expanded in the monomial basis. :return: Latex string for this polynomial. :rtype: str """ try: len(self.coeff[0]) coeff_strs = [str_number_array(c, latex=True) for c in self.coeff] basis_strs = lagrange_basis_latex(self.r, self.m) for i in range(len(basis_strs)): if len(basis_strs[i]) > 3: basis_strs[i] = "(" + basis_strs[i] + ")" return str_dot_product(coeff_strs, basis_strs) except TypeError: coeff_strs = [str_number(c, latex_fraction=True) for c in self.coeff] basis_strs = lagrange_basis_latex(self.r, self.m) for i in range(len(basis_strs)): if len(basis_strs[i]) > 3: basis_strs[i] = "(" + basis_strs[i] + ")" return str_dot_product(coeff_strs, basis_strs) def code_str(self, fn_name): r""" Generate a function code string for evaluating this polynomial. :param str fn_name: Name for the function in the generated code. :return: Code string for evaluating this polynomial. :rtype: str """ return self._eval_code.replace(self._eval_fn_name, fn_name) def lagrange_basis_fn(nu, r): r""" Generate a Lagrange basis polynomial on the unit simplex (:math:`\Delta_c^n`), where n is equal to the length of nu. :param nu: Multi-index indicating which Lagrange basis polynomial should be generated. The polynomial will have the value 1 at the point associated with the multi-index, and value 0 at all other points. :type nu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...] :param int r: Degree of polynomial. :return: The Lagrange base polynomial as specified by nu and r. :rtype: :class:`PolynomialLagrange`. .. rubric:: Examples >>> import sympy as sp >>> x1, x2 = sp.symbols('x1 x2') >>> lagrange_basis_fn(1, 1)(x1) - x1 0 >>> sp.simplify(lagrange_basis_fn(2, 2)(x1) - (2*x1**2 - x1)) 0 >>> sp.simplify(lagrange_basis_fn((1, 1), 2)((x1, x2)) - 4*x1*x2) 0 """ try: m = len(nu) except TypeError: m = 1 nu = (nu,) dim = get_dimension(r, m) coeff = np.zeros(dim, dtype=int) i = multiindex.get_index(nu, r) coeff[i] = 1 return PolynomialLagrange(coeff, r, m) def lagrange_basis(r, n): r""" Generate all Lagrange base polynomials for the space :math:`\mathcal{P}_r(\Delta_c^n)`. :param int n: Dimension of the space. :param int r: Degree of the polynomial space. :return: List of base polynomials. :rtype: List[:class:`PolynomialLagrange`]. """ basis = [] for mi in multiindex.MultiIndexIterator(n, r): basis.append(lagrange_basis_fn(mi, r)) return basis def vector_valued_lagrange_basis_fn(nu, r, i, n): r""" Generate a vector valued Lagrange basis polynomial on the m-dimensional unit simplex, :math:`l_{\nu, r, i} : \Delta_c^m \to \mathbb{R}^n`. The vector valued basis polynomial is generated by specifying a scalar valued basis polynomial and the component of the vector valued basis polynomial that should be equal to the scalar valued basis polynomial. All other components of the vector valued basis polynomial will be zero, i.e. .. math:: l_{\nu, r, i}^j (x) = \begin{cases} l_{\nu, r} (x), & i = j \\ 0, & \text{else} \end{cases}, where m is equal to the length of nu. :param nu: Multi-index indicating which scalar valued Lagrange basis polynomial should be generated for the non-zero component. :type nu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...] :param int r: Degree of polynomial. :param int i: Index of the vector component that is non-zero. :param int n: Dimension of the target. :return: The Lagrange base polynomial as specified by nu, r, i and n. :rtype: :class:`PolynomialLagrange`. .. rubric:: Examples >>> import sympy as sp >>> x1, x2 = sp.symbols('x1 x2') >>> vector_valued_lagrange_basis_fn(0, 1, 0, 2)(x1) array([-x1 + 1, 0], dtype=object) >>> vector_valued_lagrange_basis_fn(1, 1, 1, 2)(x1) array([0, x1], dtype=object) >>> vector_valued_lagrange_basis_fn((1, 0), 2, 0, 2)((x1, x2)) array([-4*x1**2 - 4*x1*x2 + 4*x1, 0], dtype=object) >>> vector_valued_lagrange_basis_fn((1, 1), 3, 1, 3)((x1, x2)) array([0, -27*x1**2*x2 - 27*x1*x2**2 + 27*x1*x2, 0], dtype=object) """ if n == 1: assert i == 0 return lagrange_basis_fn(nu, r) assert i >= 0 assert i < n try: m = len(nu) except TypeError: m = 1 nu = (nu,) dim = get_dimension(r, m) coeff = np.zeros((dim, n), dtype=int) j = multiindex.get_index(nu, r) coeff[j][i] = 1 return PolynomialLagrange(coeff, r, m) def vector_valued_lagrange_basis(r, m, n, ordering="interleaved"): r""" Generate all Lagrange base polynomials for the space :math:`\mathcal{P}_r(\Delta_c^m, \mathbb{R}^n)`. :param int m: Dimension
# PyjProperties - Java Property file parser and writer for Python # # Copyright (c) 2015, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of PyjProperties nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function import codecs import itertools import functools import os import re import sys import time from collections import namedtuple import six # This represents a combination of a value and metadata for a property key. PropertyTuple = namedtuple("PropertyTuple", ["data", "meta"]) def _escape_non_ascii(unicode_obj): """ Escape non-printable (or non-ASCII) characters using Java-compatible Unicode escape sequences. This function is based on code from the JSON library module shipped with Python 2.7.3 (json/encoder.py, function py_encode_basestring_ascii), which is Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights Reserved. See the file LICENSE included with PyjProperties for the full license terms. If that file is not available, then please see: https://www.python.org/download/releases/2.7.3/license/ Differences to the aforementioned original version of py_encode_basestring_ascii(): - Always tries to decode str objects as UTF-8, even if they don't contain any UTF-8 characters. This is so that we always return an unicode object. - Only processes non-printable or non-ASCII characters. Also _always_ replaces these characters with Java-compatible Unicode escape sequences (the original function replaced e. g. newlines with "\n" etc.). - Does not wrap the resulting string in double quotes ("). :type unicode_obj: unicode :param unicode_obj: The source string containing data to escape. :rtype : unicode :return: A unicode object. This does not contain any non-ASCII characters anymore. """ def replace(match): s = match.group(0) n = ord(s) if n < 0x10000: return u'\\u{0:04x}'.format(n) else: # surrogate pair n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) return u'\\u{0:04x}\\u{1:04x}'.format(s1, s2) # Just to be sure: If we get passed a str object, then try to decode it as UTF-8. if isinstance(unicode_obj, six.binary_type): unicode_obj = unicode_obj.decode('utf-8') return re.sub( six.text_type(r'[^ -~]'), replace, unicode_obj ) @functools.partial(codecs.register_error, "jproperties.jbackslashreplace") def _jbackslashreplace_error_handler(err): """ Encoding error handler which replaces invalid characters with Java-compliant Unicode escape sequences. :param err: An `:exc:UnicodeEncodeError` instance. :return: See https://docs.python.org/2/library/codecs.html?highlight=codecs#codecs.register_error """ if not isinstance(err, UnicodeEncodeError): raise err return _escape_non_ascii(err.object[err.start:err.end]), err.end def _escape_str(raw_str, only_leading_spaces=False, escape_non_printing=False, line_breaks_only=False): """ Escape a string so that it can safely be written as a key/value to a property file. :type raw_str: unicode :param raw_str: The string to escape. :param only_leading_spaces: Controls which whitespace characters to escape (other illegal, non-whitespace characters are always escaped). If True, then only escape a possibly present single leading space character (this is used for the value of a key-value pair). If False, escape all whitespace characters. :param escape_non_printing: Whether to escape legal, but non-printable ASCII characters as well. :param line_breaks_only: Only escape \r, \n and \f and not characters like : and =. Note: This does not invalidate/influence the other parameters like only_leading_spaces -- spaces are always escaped as per only_leading_spaces. :rtype : unicode :return: The escaped string. """ # We NEED an unicode object. It's worth a try. if isinstance(raw_str, six.binary_type): # consider bringing in chardet... raw_str = raw_str.decode("utf-8") elif not isinstance(raw_str, six.text_type): # Last resort: Convert unknown object to a unicode string. # This works nicely for integers etc. raw_str = six.text_type(raw_str) # Do simple whitespace substitutions. trans_dict = { ord(u"\r"): u"\\r", ord(u"\n"): u"\\n", ord(u"\f"): u"\\f" } # Do we want to be conform to the specs fully? if not line_breaks_only: # Yes, so escape more possibly ambiguous characters as well. trans_dict.update( { ord(u"#"): u"\\#", ord(u"!"): u"\\!", ord(u"="): u"\\=", ord(u":"): u"\\:", ord(u"\\"): u"\\\\", ord(u"\t"): u"\\t", } ) # All right, now we can actually do the substitutions. escaped_str = raw_str.translate(trans_dict) # Now escape either all space characters or only a possibly present single space at the beginning. if not only_leading_spaces: escaped_str = escaped_str.replace(u" ", u"\\ ") else: escaped_str = re.sub(u"^ ", u"\\\\ ", escaped_str) # Do we want to escape non-printing characters as well? if escape_non_printing: escaped_str = _escape_non_ascii(escaped_str) return escaped_str class PropertyError(Exception): """Base exception class for all exceptions raised by this module.""" pass class ParseError(PropertyError): """ Raised on parse errors in property files. :ivar message: The error message (string). :ivar line_number: Number of the line where the error occurred (integer). :ivar file_obj: The file object we were reading from when the error occurred (may be None). """ def __init__(self, message, line_number, file_obj=None): """ Create a new ParseError exception. :param message: Error message. :param line_number: Line number of error. :param file_obj: File object we were reading from when the error occurred. :return: A new :exc:`.ParseError` object. """ self.message = message self.line_number = line_number self.file_obj = file_obj def __str__(self): """ Get a human-readable string representation of this object. :return: Human-readable string representation of this object. """ filename = "<unknown>" if not hasattr(self.file_obj, "filename") else self.file_obj.filename return "Parse error in %s:%d: %s" % ( filename, self.line_number, self.message ) class Properties(object): """ A parser for Java property files. This class implements parsing Java property files as defined here: http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load(java.io.Reader) """ # Line endings/terminators. _EOL = "\r\n" # Non-line terminator whitespace. _WHITESPACE = " \t\f" # Which characters do we treat as whitespace? _ALLWHITESPACE = _EOL + _WHITESPACE def __init__(self, process_escapes_in_values=True, *args, **kwargs): """ Create a new property file parser. :param process_escapes_in_values: If False, do not process escape sequences in values when parsing and try hard not to produce any escape sequences when writing, i. e. output strings literally. However, some things like leading whitespace and newlines are always escaped (since there is not good way around this). :return: A new :class:`.Properties`. """ # For cooperative multiple inheritance. # noinspection PyArgumentList super(Properties, self).__init__(*args, **kwargs) self._process_escapes_in_values = process_escapes_in_values # Initialize parser state. self.reset() # Initialize property data. self.clear() def __len__(self): return len(self._properties) def __getitem__(self, item): if not isinstance(item, six.string_types): raise TypeError("Property keys must be of type str or unicode") if isinstance(item, six.binary_type): item = item.decode('utf-8') if item not in self._properties: raise KeyError("Key not found") return PropertyTuple( self._properties[item], self._metadata.get(item, {}) ) def __setitem__(self, key, value): if not isinstance(key, six.string_types): raise TypeError("Property keys must be of type str or unicode") metadata = None if isinstance(value, tuple): value, metadata = value if not isinstance(value, six.string_types): raise TypeError("Property values must be of type str or unicode") if isinstance(value, six.binary_type): value = value.decode('utf-8') if metadata is not None and not isinstance(metadata, dict): raise TypeError("Metadata needs to be a dictionary") self._properties[key] = value if metadata is not None: self._metadata[key] = metadata def __delitem__(self, key): if not isinstance(key, six.string_types): raise TypeError("Property keys must be of type str or unicode") if isinstance(key, six.binary_type): key = key.decode('utf-8') if key not in self._properties: raise KeyError("Key not found") # Remove the property itself. del self._properties[key] # Remove its metadata as well. if key in self._metadata: del self._metadata[key] # We also no longer
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import os import math import copy import torch import torch.nn.functional as F from fairseq import options, utils, metrics, search, tokenizer from fairseq.data import ( AppendTokenDataset, ConcatDataset, data_utils, indexed_dataset, LanguagePairDataset, PrependTokenDataset, StripTokenDataset, TruncateDataset, ) from fairseq.tasks import FairseqTask, register_task from fairseq.tasks.translation import TranslationTask from sparse_prototype.retrieve_prototype_dataset import RetrievePrototypeDataset from sparse_prototype.language_pair_map_dataset import LanguagePairMapDataset # ported from UnsupervisedMT def parse_lambda_config(x): """ Parse the configuration of lambda coefficient (for scheduling). x = "3" # lambda will be a constant equal to x x = "0:1,1000:0" # lambda will start from 1 and linearly decrease # to 0 during the first 1000 iterations x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 # iterations, then will linearly increase to 1 until iteration 2000 """ split = x.split(',') if len(split) == 1: return float(x), None else: split = [s.split(os.pathsep) for s in split] assert all(len(s) == 2 for s in split) assert all(k.isdigit() for k, _ in split) assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1)) return float(split[0][1]), [(int(k), float(v)) for k, v in split] @register_task('sparse_prototype') class SparsePrototypeTask(TranslationTask): """A task for training multiple translation models simultaneously. We iterate round-robin over batches from multiple language pairs, ordered according to the `--lang-pairs` argument. The training loop is roughly: for i in range(len(epoch)): for lang_pair in args.lang_pairs: batch = next_batch_for_lang_pair(lang_pair) loss = criterion(model_for_lang_pair(lang_pair), batch) loss.backward() optimizer.step() In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset (e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that implements the `FairseqMultiModel` interface. During inference it is required to specify a single `--source-lang` and `--target-lang`, instead of `--lang-pairs`. """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off TranslationTask.add_args(parser) parser.add_argument('--forget-rate', type=float, default=0.9, metavar='D', help='rho = (t + decay)^{-forget}') parser.add_argument('--decay-rate', type=float, default=1., metavar='D', help='rho = (t + decay)^{-forget}') parser.add_argument('--retrieve-split', type=str, default='train', help='the retrieve pool') parser.add_argument('--dec-opt-freq', type=int, default=1, help='the relative update freq of decoder') parser.add_argument('--enc-opt-freq', type=int, default=1, help='the relative update freq of encoder') parser.add_argument('--iw-nsamples', type=int, default=1000, help='number of importance-weighted samples') parser.add_argument('--eval-mode', type=str, default='none', choices=['iw', 'entropy', 'gen_sample', 'gen_reconstruction', 'time', 'none', 'from_file', 'gen_interpolation'], help='evaluation modes') parser.add_argument('--eval-gen-file', type=str, default=None, help='read in prototypes and edit vectors') parser.add_argument('--eval-gen-edit-vec', action='store_true', default=False, help='write edit vectors in the generation file') parser.add_argument('--prune-num', type=int, default=-1, help='perform evaluation based on top prune_num templates only') # parser.add_argument('--prune-num-offline', type=int, default=-1, # help='perform evaluation based on top prune_num templates only (offline version)') parser.add_argument('--free-bits', type=float, default=0, help='the free bits param to regularize KLt, 0 to disable') parser.add_argument('--lambda-t-config', default="1.0", type=str, metavar='CONFIG', help='KLt coefficient ' 'use fixed weight during training if set to floating point number. ' 'use piecewise linear function over number of updates to schedule the ' 'weight with the format: w0:step0,w1:step1,...') parser.add_argument('--gen-nz', type=int, default=10, help='number of edit vector samples to draw from the prior') parser.add_argument('--gen-np', type=int, default=200, help='number of top prototypes') parser.add_argument('--write-loss-path', type=str, default=None, help='write out loss at evaluation time for interpolation exp') def __init__(self, args, src_dict, edit_dict): super().__init__(args, src_dict, src_dict) self.forget_rate = args.forget_rate self.decay_rate = args.decay_rate self.dictionary = src_dict self.retrieve_fn = None self.retrieve_pool = None self.edit_dict = edit_dict self.lambda_t, self.lambda_t_steps = parse_lambda_config(args.lambda_t_config) @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) paths = args.data.split(os.pathsep) assert len(paths) > 0 # find language pair automatically # if args.source_lang is None or args.target_lang is None: # args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0]) # if args.source_lang is None or args.target_lang is None: # raise Exception('Could not infer language pair, please provide it explicitly') # load dictionaries src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.txt')) print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict))) if args.inv_editor == 'levenshtein': edit_dict = RetrievePrototypeDataset.get_edit_dict() else: edit_dict = None if edit_dict is not None: print('| [edit] dictionary: {} types'.format(len(edit_dict))) return cls(args, src_dict, edit_dict) def load_dataset(self, split, epoch=0, **kwargs): if self.retrieve_fn is None: self.build_model(self.args) # raise ValueError( # "retrieve_fn is None !" # ) retrieve_dataset = None if self.retrieve_pool is None: paths = self.args.data.split(os.pathsep) assert len(paths) > 0 data_path = paths[epoch % len(paths)] split_path = os.path.join(data_path, split) dataset = data_utils.load_indexed_dataset( split_path, self.dictionary, self.args.dataset_impl ) if dataset is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) lang_pair_dataset = LanguagePairDataset( dataset, dataset.sizes, self.src_dict, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, ) if split == self.args.retrieve_split: print("split {} is used as the retrieve_pool".format(split)) retrieve_dataset = lang_pair_dataset else: print("loading the retrieve split {}".format(self.args.retrieve_split)) split_path = os.path.join(self.args.data, self.args.retrieve_split) dataset = data_utils.load_indexed_dataset( split_path, self.dictionary, self.args.dataset_impl ) if dataset is None: raise FileNotFoundError( "Dataset not found: {} ({})".format( self.args.retrieve_split, split_path) ) if self.args.prune_num > 0: retrieve_dataset = LanguagePairMapDataset( dataset, dataset.sizes, self.src_dict, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, ) else: retrieve_dataset = LanguagePairDataset( dataset, dataset.sizes, self.src_dict, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, ) self.retrieve_pool = retrieve_dataset elif split == self.args.retrieve_split: print("skip reading split {} since it is used as the retrieve_pool" .format(split)) lang_pair_dataset = self.retrieve_pool else: paths = self.args.data.split(os.pathsep) assert len(paths) > 0 data_path = paths[epoch % len(paths)] split_path = os.path.join(data_path, split) dataset = data_utils.load_indexed_dataset( split_path, self.dictionary, self.args.dataset_impl ) if dataset is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) lang_pair_dataset = LanguagePairDataset( dataset, dataset.sizes, self.src_dict, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, ) # always use unbiased estimator at test time # Avoid selecting self as templates at training time if 'train' not in split and self.args.criterion != 'guu_elbo': sampling = True masks = None else: def read_mask(fpath): with open(fpath) as fin: return [int(x.rstrip()) for x in fin] sampling = options.eval_bool(self.args.reinforce) if os.path.exists(os.path.join(self.args.data, 'mask_id.txt')): masks = read_mask(os.path.join(self.args.data, 'mask_id.txt')) else: masks = None self.datasets[split] = RetrievePrototypeDataset(lang_pair_dataset, self.src_dict, retrieve_dataset=self.retrieve_pool, retrieve_fn=self.retrieve_fn, cuda=not self.args.cpu, num_samples=self.args.infer_ns, temperature=self.args.reinforce_temperature, sampling=sampling, edit_dict=self.edit_dict, split=split, masks=masks, ) def build_model(self, args): if self.retrieve_fn is None: from fairseq import models model = models.build_model(args, self) def retrieve_fn(samples, split, model_=model): return model_.classifier(samples, split) self.retrieve_fn = retrieve_fn self.model = model return self.model def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False, update_lambda=True): model.train() model.set_num_updates(update_num) model.set_lambda_t(self.lambda_t) if update_num > 0: self.update_step(update_num) loss, sample_size, logging_output = criterion(model, sample, len(self.datasets['train'])) if ignore_grad: loss *= 0 optimizer.backward(loss) # following only supports single GPU training # if update_lambda and not ignore_grad: # if not model.grad_lambda: # with torch.no_grad(): # forget = math.pow(update_num + self.decay_rate, -self.forget_rate) # new_lambda = model.get_alpha() + \ # F.softmax(sample['net_input']['logits'], dim=1).mean(dim=0) * len(self.datasets['train']) # # new_lambda = new_lambda.cpu() # # update the Dirichlet posterior # model.update_lambda((1. - forget) * model.get_lambda() + # forget * new_lambda) # else: # raise ValueError('There are bugs to be fixed when dealing with KL theta') return loss, sample_size, logging_output # to support distributed training def collect_lambda_stats(self, model, sample): return F.softmax(sample['net_input']['logits'], dim=1).sum(0) def distributed_update_lambda(self, model, lambda_stats_sum, nsentences, update_num, ignore_grad=False): if ignore_grad: return forget = math.pow(update_num + self.decay_rate, -self.forget_rate) new_lambda = model.get_alpha() + lambda_stats_sum / nsentences * len(self.datasets['train']) # update the Dirichlet posterior model.update_lambda((1. - forget) * model.get_lambda() + forget * new_lambda) def update_step(self, num_updates): def lambda_step_func(config, n_iter): """ Update a lambda value according to its schedule configuration. """ ranges = [i for i in range(len(config) - 1) if config[i][0] <= n_iter < config[i + 1][0]] if len(ranges) == 0: assert n_iter >= config[-1][0] return config[-1][1] assert len(ranges) == 1 i = ranges[0] x_a, y_a = config[i] x_b, y_b = config[i + 1] return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a) if self.lambda_t_steps is not None: self.lambda_t = lambda_step_func(self.lambda_t_steps, num_updates) def valid_step(self, sample, model, criterion, split='valid'): model.eval() with torch.no_grad(): loss, sample_size, logging_output = criterion(model, sample, len(self.datasets[split])) return loss, sample_size, logging_output def valid_iw_step(self, sample, model, criterion, mode='iw'): model.eval() with torch.no_grad(): if mode == 'iw': loss, sample_size, logging_output = criterion.iw_eval_new(model, sample, 0, self.args.iw_nsamples, retrieve_dataset=self.datasets[self.args.valid_subset]) elif mode == 'entropy': loss, sample_size, logging_output = criterion.entropy_eval(model, sample, 0) else: raise ValueError("mode {} is not supported".format(mode)) return loss, sample_size, logging_output def inference_step(self, generator, models, sample, prefix_tokens=None, mode='gen_sample'): if mode == "gen_sample": sample_z = models[0].sample_from_uniform_sphere(ns=sample['net_input']['src_tokens'].size(0)) sample['net_input'].update({'edit_vecs': sample_z}) elif mode == "gen_interpolation": def slerp(val, low, high): """spherical linear interpolation from: https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 """ low_norm = low/torch.norm(low, dim=1, keepdim=True) high_norm = high/torch.norm(high, dim=1, keepdim=True) omega = torch.acos((low_norm*high_norm).sum(1)) so =
"""@file galaxycluster.py The GalaxyCluster class """ import pickle import warnings from .gcdata import GCData from .dataops import compute_tangential_and_cross_components, make_radial_profile from .theory import compute_critical_surface_density from .plotting import plot_profiles class GalaxyCluster(): """Object that contains the galaxy cluster metadata and background galaxy data Attributes ---------- unique_id : int or string Unique identifier of the galaxy cluster ra : float Right ascension of galaxy cluster center (in degrees) dec : float Declination of galaxy cluster center (in degrees) z : float Redshift of galaxy cluster center galcat : GCData Table of background galaxy data containing at least galaxy_id, ra, dec, e1, e2, z """ def __init__(self, *args, **kwargs): self.unique_id = None self.ra = None self.dec = None self.z = None self.galcat = None if len(args)>0 or len(kwargs)>0: self._add_values(*args, **kwargs) self._check_types() def _add_values(self, unique_id: str, ra: float, dec: float, z: float, galcat: GCData): """Add values for all attributes""" self.unique_id = unique_id self.ra = ra self.dec = dec self.z = z self.galcat = galcat def _check_types(self): """Check types of all attributes""" if isinstance(self.unique_id, (int, str)): # should unique_id be a float? self.unique_id = str(self.unique_id) else: raise TypeError(f'unique_id incorrect type: {type(self.unique_id)}') try: self.ra = float(self.ra) except TypeError: print(f'ra incorrect type: {type(self.ra)}') try: self.dec = float(self.dec) except TypeError: print(f'dec incorrect type: {type(self.dec)}') try: self.z = float(self.z) except TypeError: print(f'z incorrect type: {type(self.z)}') if not isinstance(self.galcat, GCData): raise TypeError(f'galcat incorrect type: {type(self.galcat)}') if not -360. <= self.ra <= 360.: raise ValueError(f'ra={self.ra} not in valid bounds: [-360, 360]') if not -90. <= self.dec <= 90.: raise ValueError(f'dec={self.dec} not in valid bounds: [-90, 90]') if self.z < 0.: raise ValueError(f'z={self.z} must be greater than 0') def save(self, filename, **kwargs): """Saves GalaxyCluster object to filename using Pickle""" with open(filename, 'wb') as fin: pickle.dump(self, fin, **kwargs) @classmethod def load(cls, filename, **kwargs): """Loads GalaxyCluster object to filename using Pickle""" with open(filename, 'rb') as fin: self = pickle.load(fin, **kwargs) self._check_types() return self def _str_colnames(self): """Colnames in comma separated str""" return ', '.join(self.galcat.colnames) def __repr__(self): """Generates basic description of GalaxyCluster""" return ( f'GalaxyCluster {self.unique_id}: ' f'(ra={self.ra}, dec={self.dec}) at z={self.z}' f'\n> with columns: {self._str_colnames()}' f'\n> {len(self.galcat)} source galaxies' ) def __str__(self): """Generates string for print(GalaxyCluster)""" table = 'objects'.join(self.galcat.__str__().split('objects')[1:]) return self.__repr__()+'\n'+table def _repr_html_(self): """Generates string for display(GalaxyCluster)""" return ( f'<b>GalaxyCluster:</b> {self.unique_id} ' f'(ra={self.ra}, dec={self.dec}) at z={self.z}' f'<br>> <b>with columns:</b> {self._str_colnames()}' f'<br>> {len(self.galcat)} source galaxies' f'<br>{self.galcat._html_table()}' ) def add_critical_surface_density(self, cosmo): r"""Computes the critical surface density for each galaxy in `galcat`. It only runs if input cosmo != galcat cosmo or if `sigma_c` not in `galcat`. Parameters ---------- cosmo : clmm.Cosmology object CLMM Cosmology object Returns ------- None """ if cosmo is None: raise TypeError('To compute Sigma_crit, please provide a cosmology') if cosmo.get_desc() != self.galcat.meta['cosmo'] or 'sigma_c' not in self.galcat: if self.z is None: raise TypeError('Cluster\'s redshift is None. Cannot compute Sigma_crit') if 'z' not in self.galcat.columns: raise TypeError('Galaxy catalog missing the redshift column. ' 'Cannot compute Sigma_crit') self.galcat.update_cosmo(cosmo, overwrite=True) self.galcat['sigma_c'] = compute_critical_surface_density(cosmo=cosmo, z_cluster=self.z, z_source=self.galcat['z']) def compute_tangential_and_cross_components( self, shape_component1='e1', shape_component2='e2', tan_component='et', cross_component='ex', geometry='curve', is_deltasigma=False, cosmo=None, add=True): r"""Adds a tangential- and cross- components for shear or ellipticity to self Calls `clmm.dataops.compute_tangential_and_cross_components` with the following arguments: ra_lens: cluster Ra dec_lens: cluster Dec ra_source: `galcat` Ra dec_source: `galcat` Dec shear1: `galcat` shape_component1 shear2: `galcat` shape_component2 geometry: `input` geometry is_deltasigma: `input` is_deltasigma sigma_c: `galcat` sigma_c | None Parameters ---------- shape_component1: string, optional Name of the column in the `galcat` astropy table of the cluster object that contains the shape or shear measurement along the first axis. Default: `e1` shape_component1: string, optional Name of the column in the `galcat` astropy table of the cluster object that contains the shape or shear measurement along the second axis. Default: `e2` tan_component: string, optional Name of the column to be added to the `galcat` astropy table that will contain the tangential component computed from columns `shape_component1` and `shape_component2`. Default: `et` cross_component: string, optional Name of the column to be added to the `galcat` astropy table that will contain the cross component computed from columns `shape_component1` and `shape_component2`. Default: `ex` geometry: str, optional Sky geometry to compute angular separation. Options are curve (uses astropy) or flat. is_deltasigma: bool If `True`, the tangential and cross components returned are multiplied by Sigma_crit. Results in units of :math:`M_\odot\ Mpc^{-2}` cosmo: astropy cosmology object Specifying a cosmology is required if `is_deltasigma` is True add: bool If `True`, adds the computed shears to the `galcat` Returns ------- angsep: array_like Angular separation between lens and each source galaxy in radians tangential_component: array_like Tangential shear (or assimilated quantity) for each source galaxy cross_component: array_like Cross shear (or assimilated quantity) for each source galaxy """ # Check is all the required data is available missing_cols = ', '.join( [f"'{t_}'" for t_ in ('ra', 'dec', shape_component1, shape_component2) if t_ not in self.galcat.columns]) if len(missing_cols)>0: raise TypeError('Galaxy catalog missing required columns: '+missing_cols+\ '. Do you mean to first convert column names?') if is_deltasigma: self.add_critical_surface_density(cosmo) # compute shears angsep, tangential_comp, cross_comp = compute_tangential_and_cross_components( ra_lens=self.ra, dec_lens=self.dec, ra_source=self.galcat['ra'], dec_source=self.galcat['dec'], shear1=self.galcat[shape_component1], shear2=self.galcat[shape_component2], geometry=geometry, is_deltasigma=is_deltasigma, sigma_c=self.galcat['sigma_c'] if 'sigma_c' in self.galcat.columns else None) if add: self.galcat['theta'] = angsep self.galcat[tan_component] = tangential_comp self.galcat[cross_component] = cross_comp return angsep, tangential_comp, cross_comp def make_radial_profile(self, bin_units, bins=10, error_model='ste', cosmo=None, tan_component_in='et', cross_component_in='ex', tan_component_out='gt', cross_component_out='gx', tan_component_in_err=None, cross_component_in_err=None, include_empty_bins=False, gal_ids_in_bins=False, add=True, table_name='profile', overwrite=True): r"""Compute the shear or ellipticity profile of the cluster We assume that the cluster object contains information on the cross and tangential shears or ellipticities and angular separation of the source galaxies Calls `clmm.dataops.make_radial_profile` with the following arguments: components: `galcat` components (tan_component_in, cross_component_in, z) angsep: `galcat` theta angsep_units: radians bin_units: `input` bin_units bins: `input` bins include_empty_bins: `input` include_empty_bins cosmo: `input` cosmo z_lens: cluster z Parameters ---------- angsep_units : str Units of the calculated separation of the source galaxies Allowed Options = ["radians"] bin_units : str Units to use for the radial bins of the shear profile Allowed Options = ["radians", "deg", "arcmin", "arcsec", "kpc", "Mpc"] (letter case independent) bins : array_like, optional User defined bins to use for the shear profile. If a list is provided, use that as the bin edges. If a scalar is provided, create that many equally spaced bins between the minimum and maximum angular separations in bin_units. If nothing is provided, default to 10 equally spaced bins. error_model : str, optional Statistical error model to use for y uncertainties. (letter case independent) `ste` - Standard error [=std/sqrt(n) in unweighted computation] (Default). `std` - Standard deviation. cosmo: dict, optional Cosmology parameters to convert angular separations to physical distances tan_component_in: string, optional Name of the tangential component column in `galcat` to be binned. Default: 'et' cross_component_in: string, optional Name of the cross component column in `galcat` to be binned. Default: 'ex' tan_component_out: string, optional Name of the tangetial component binned column to be added in profile table. Default: 'gt' cross_component_out: string, optional Name of the cross component binned profile column to be added in profile table. Default: 'gx' tan_component_in_err: string, None, optional Name of the tangential component error column in `galcat` to be binned. Default: None cross_component_in_err: string, None, optional Name of the cross component error column in `galcat` to be binned. Default: None include_empty_bins: bool, optional Also include empty bins in the returned table gal_ids_in_bins: bool, optional Also include the list of galaxies ID belonging to each bin in the returned table add: bool, optional Attach the profile to the cluster object table_name: str, optional Name of the profile table to be add as `cluster.table_name`. Default 'profile' overwrite: bool, optional Overwrite profile table. Default True Returns ------- profile : GCData Output table containing the radius grid points, the tangential and cross shear profiles on that grid, and the errors in the two shear profiles. The errors are defined as the standard errors in each bin. """ #Too many local variables (19/15) #pylint: disable=R0914 if not all([t_ in self.galcat.columns for t_ in (tan_component_in, cross_component_in, 'theta')]): raise TypeError( 'Shear or ellipticity information is missing! Galaxy catalog must
<gh_stars>10-100 from datetime import datetime from re import search, sub from dateutil.parser import parse from pytz import UTC from baseball.baseball import (POSITION_CODE_DICT, PlateAppearance, Player, PlayerAppearance, Inning, Team, Game) from baseball.baseball_events import (AUTOMATIC_BALL_POSITION, Pitch, Pickoff, RunnerAdvance, Substitution, Switch) MLB_TEAM_CODE_DICT = {'LAA': 'ana', 'SEA': 'sea', 'BAL': 'bal', 'CLE': 'cle', 'CIN': 'cin', 'NYM': 'nyn', 'COL': 'col', 'LAD': 'lan', 'DET': 'det', 'TOR': 'tor', 'HOU': 'hou', 'OAK': 'oak', 'MIA': 'mia', 'FLO': 'flo', 'CAL': 'cal', 'ATL': 'atl', 'MIL': 'mil', 'ML4': 'ml4', 'CHC': 'chn', 'MIN': 'min', 'KC': 'kca', 'NYY': 'nya', 'TEX': 'tex', 'PHI': 'phi', 'WSH': 'was', 'PIT': 'pit', 'STL': 'sln', 'SD': 'sdn', 'ARI': 'ari', 'SF': 'sfn', 'CHW': 'cha', 'TB': 'tba', 'BOS': 'bos'} MLB_REVERSE_TEAM_CODE_DICT = {'ana': 'LAA', 'sea': 'SEA', 'bal': 'BAL', 'cle': 'CLE', 'cin': 'CIN', 'nyn': 'NYM', 'col': 'COL', 'lan': 'LAD', 'det': 'DET', 'tor': 'TOR', 'hou': 'HOU', 'oak': 'OAK', 'mia': 'MIA', 'flo': 'FLO', 'cal': 'CAL', 'atl': 'ATL', 'mil': 'MIL', 'ml4': 'ML4', 'chn': 'CHC', 'min': 'MIN', 'kca': 'KC', 'nya': 'NYY', 'tex': 'TEX', 'phi': 'PHI', 'was': 'WSH', 'pit': 'PIT', 'sln': 'STL', 'sdn': 'SD', 'ari': 'ARI', 'sfn': 'SF', 'cha': 'CHW', 'tba': 'TB', 'bos': 'BOS'} POSITION_ABBREV_DICT = {'P': 1, 'C': 2, '1B': 3, '2B': 4, '3B': 5, 'SS': 6, 'LF': 7, 'CF': 8, 'RF': 9, 'DH': 10, 'PH': 'PH', 'PR': 'PR', 'EH': 'EH'} def get_datetime(tfs_zulu_str): if tfs_zulu_str: year = int(tfs_zulu_str[0:4]) month = int(tfs_zulu_str[5:7]) day = int(tfs_zulu_str[8:10]) hour = int(tfs_zulu_str[11:13]) minute = int(tfs_zulu_str[14:16]) second = int(tfs_zulu_str[17:19]) event_datetime = datetime(year, month, day, hour, minute, second, tzinfo=UTC) else: event_datetime = None return event_datetime def process_pitch(event): pitch_description = event.get('des') pitch_type = event.get('pitch_type') pitch_datetime = get_datetime(event.get('tfs_zulu')) if (not event.get('x') or not event.get('y') or event.get('x') == 'None' or event.get('y') == 'None'): (pitch_x, pitch_y) = AUTOMATIC_BALL_POSITION else: pitch_x = float(event.get('x')) pitch_y = float(event.get('y')) pitch_position = (pitch_x, pitch_y) if event.get('start_speed'): pitch_speed = float(event.get('start_speed')) else: pitch_speed = None pitch_obj = Pitch(pitch_datetime, pitch_description, pitch_type, pitch_speed, pitch_position) return pitch_obj def process_pickoff(event): pickoff_description = event.get('des') pickoff_base = pickoff_description.split()[-1] if (pickoff_description.split()[1] == 'Attempt' or pickoff_description.split()[1] == 'Error'): pickoff_was_successful = False elif len(pickoff_description.split()) == 2: pickoff_was_successful = True else: raise ValueError('Bad Pickoff description.') pickoff_obj = Pickoff(pickoff_description, pickoff_base, pickoff_was_successful) return pickoff_obj def process_runner_advance(event, game_obj): runner_id = int(event.get('id')) if runner_id in game_obj.away_team: runner = game_obj.away_team[runner_id] elif runner_id in game_obj.home_team: runner = game_obj.home_team[runner_id] else: raise ValueError('Runner ID not in player dict') start_base = event.get('start') end_base = event.get('end') run_description = event.get('event') runner_scored = (event.get('score') == 'T') run_earned = (event.get('earned') == 'T') is_rbi = (event.get('rbi') == 'T') runner_advance_obj = RunnerAdvance(run_description, runner, start_base, end_base, runner_scored, run_earned, is_rbi) return runner_advance_obj def process_plate_appearance(plate_appearance, game_obj): event_list = [] scoring_runners_list = [] runners_batted_in_list = [] for event in plate_appearance: if event.tag == 'pitch': pitch_obj = process_pitch(event) event_list.append(pitch_obj) elif event.tag == 'po': pickoff_obj = process_pickoff(event) event_list.append(pickoff_obj) elif event.tag == 'runner': runner_advance_obj = process_runner_advance(event, game_obj) event_list.append(runner_advance_obj) if runner_advance_obj.runner_scored: scoring_runners_list.append(runner_advance_obj.runner) if runner_advance_obj.is_rbi: runners_batted_in_list.append(runner_advance_obj.runner) else: raise ValueError('Undefined event') return event_list, scoring_runners_list, runners_batted_in_list def parse_substitution_description(description): if ('remains in game' in description and 'leaves the game' in description): incoming_str, action_str, outgoing_str = description.split(', ') action_list = [action_str] player_list = [incoming_str.split(' remains')[0], outgoing_str.split(' leaves')[0]] elif 'enters the batting order' in description: action_list = [description.split(', ')[1]] start_str = description.split(', ')[0] predicate_str = description.split(', ')[2] player_list = [ start_str.split(' enters')[0], predicate_str.split(' leaves')[0] ] else: description = description.strip(' .') description = description.split(': ')[1] description_list = description.split(', ') player_list = description_list[0].split(' replaces ') action_list = description_list[1:] return player_list, action_list def get_name_only(player_str): name_flag = False player_name = None for word in player_str.split(): if name_flag: player_name += (' ' + word) elif word[0].isupper(): player_name = word name_flag = True return player_name def get_player_names(player_list): incoming_player_name = ( player_list[0].replace( 'Pinch-hitter ', '' ).replace( '<NAME> ', '' ).replace( 'Pinch-runner ', '' ).replace( 'Pinch runner ', '' ).replace( 'Pitcher ', '' ) ) outgoing_player_name = get_name_only(player_list[1]) return incoming_player_name.strip(), outgoing_player_name.strip() def parse_substitution(substitution_datetime, description, event_summary, inning_half_str, game_obj): player_list, action_list = parse_substitution_description(description) incoming_player_name, outgoing_player_name = get_player_names(player_list) batting_order, position_str = None, None for item in action_list: if 'batting' in item: batting_order_str = item.replace('batting ', '')[:-2] batting_order = int(batting_order_str) elif 'replacing' in item: outgoing_player_name = item.replace('replacing ', '').strip() elif 'playing' in item: position_str = item.replace('playing ', '').split()[0].strip() position_num = get_position_number(position_str) if (event_summary in ['Pitching Substitution', 'Defensive Sub', 'Defensive sub']): if inning_half_str == 'top': this_team = game_obj.home_team else: this_team = game_obj.away_team elif (event_summary in ['Offensive Sub', 'Offensive sub', 'Offensive Substitution']): if inning_half_str == 'top': this_team = game_obj.away_team else: this_team = game_obj.home_team else: raise ValueError('Invalid Substitution event summary') if ('Pinch-hitter ' in player_list[0] or 'Pinch hitter' in player_list[0]): position_num = 'PH' elif ('Pinch-runner ' in player_list[0] or 'Pinch runner' in player_list[0]): position_num = 'PR' elif event_summary == 'Pitching Substitution': position_num = 1 outgoing_player_name = get_name_only(outgoing_player_name) incoming_player = this_team[incoming_player_name] outgoing_player = this_team[outgoing_player_name] substitution_obj = Substitution(substitution_datetime, incoming_player, outgoing_player, batting_order, position_num) return this_team, substitution_obj def get_position_number(position_str): if position_str in POSITION_CODE_DICT: position = POSITION_CODE_DICT[position_str] elif position_str in POSITION_ABBREV_DICT: position = POSITION_ABBREV_DICT[position_str] elif not position_str: position = None else: raise ValueError('Invalid Position') return position def fix_description(input_str): match = search(r'[A-Z]\.\s+[A-Z]\.\s+', input_str) while match: index_start = match.start() index_end = match.end() string_start = input_str[:index_start] string_end = input_str[index_end:] string_middle = input_str[index_start:index_end] string_middle = sub(r'\.\s+', '', string_middle) input_str = (string_start + string_middle + ' ' + string_end) match = search(r'[A-Z]\.\s+[A-Z]\.\s+', input_str) match = search(r'[A-Z]\.\s+[A-Z]', input_str) while match: index_start = match.start() index_end = match.end() string_start = input_str[:index_start] string_middle = input_str[index_start:index_end].replace('.', '') string_end = input_str[index_end:] input_str = (string_start + string_middle + string_end).strip() match = search(r'[A-Z]\.\s+[A-Z]', input_str) input_str = sub(r'\.\s+', '. ', input_str) input_str = input_str.strip() return input_str def process_at_bat(plate_appearance, event_list, game_obj, steal_description): (new_event_list, scoring_runners_list, runners_batted_in_list) = process_plate_appearance(plate_appearance, game_obj) event_list += new_event_list plate_appearance_desc = fix_description(plate_appearance.get('des')) pitcher_id = int(plate_appearance.get('pitcher')) inning_outs = int(plate_appearance.get('o')) batter_id = int(plate_appearance.get('batter')) if batter_id in game_obj.home_team: batter = game_obj.home_team[batter_id] batting_team = game_obj.home_team elif batter_id in game_obj.away_team: batter = game_obj.away_team[batter_id] batting_team = game_obj.away_team else: raise ValueError('Batter ID not in player_dict') out_runner_supplemental_list = None pitcher = None for this_team in [game_obj.home_team, game_obj.away_team]: if pitcher_id in this_team: pitcher = this_team[pitcher_id] elif steal_description: out_runner_supplemental_list = ( PlateAppearance.get_out_runners_list(steal_description, this_team, new_event_list, batter) ) if not pitcher: raise ValueError('Batter ID not in player_dict') start_datetime = get_datetime(plate_appearance.get('end_tfs_zulu')) end_datetime = get_datetime(plate_appearance.get('end_tfs_zulu')) plate_appearance_summary = plate_appearance.get('event').strip() plate_appearance_obj = PlateAppearance(start_datetime, end_datetime, batting_team, plate_appearance_desc, plate_appearance_summary, pitcher, batter, inning_outs, scoring_runners_list, runners_batted_in_list, event_list) if out_runner_supplemental_list: plate_appearance_obj.out_runners_list += out_runner_supplemental_list return plate_appearance_obj def process_substitution(substitution_obj, inning_num, inning_half_str, next_batter_num, substituting_team): player_appearance_obj = PlayerAppearance( substitution_obj.incoming_player, substitution_obj.position, inning_num, inning_half_str, next_batter_num ) batting_list_list = substituting_team.batting_order_list_list player_appearance_list = None processed_flag = False if substitution_obj.batting_order: batting_index = substitution_obj.batting_order - 1 player_appearance_list = batting_list_list[batting_index] else: for this_appearance_list in batting_list_list: if (this_appearance_list[-1].player_obj.mlb_id == substitution_obj.outgoing_player.mlb_id): player_appearance_list = this_appearance_list if not player_appearance_list: position_list = [batting_list[-1].position for batting_list in batting_list_list] duplicate_position_set = set( [x for x in position_list if ((position_list.count(x) > 1) and (x not in ['PH', 'PR']))] ) if duplicate_position_set: duplicate_position = [x for x in duplicate_position_set][0] duplicate_appearance_list = [] for batting_list in batting_list_list: if batting_list[-1].position == duplicate_position: duplicate_appearance_list.append(batting_list) first_player_start = int( '{}{}{}'.format( duplicate_appearance_list[0][-1].start_inning_num, int(duplicate_appearance_list[0][-1].start_inning_half == 'bottom'), duplicate_appearance_list[0][-1].start_inning_batter_num, ) ) second_player_start = int( '{}{}{}'.format( duplicate_appearance_list[1][-1].start_inning_num, int(duplicate_appearance_list[1][-1].start_inning_half == 'bottom'), duplicate_appearance_list[1][-1].start_inning_batter_num, ) ) if first_player_start < second_player_start: player_appearance_list = duplicate_appearance_list[0] else: player_appearance_list = duplicate_appearance_list[1] if not player_appearance_list: for this_appearance_list in batting_list_list: outgoing_id = substitution_obj.outgoing_player.mlb_id if (len(this_appearance_list) > 1 and this_appearance_list[-2].player_obj.mlb_id == outgoing_id): player_appearance_list = this_appearance_list if player_appearance_list: processed_flag = True set_player_position_from_list(player_appearance_obj, player_appearance_list) final_appearance = player_appearance_list[-1] final_appearance.end_inning_num = inning_num final_appearance.end_inning_half = inning_half_str final_appearance.end_inning_batter_num = next_batter_num player_appearance_list.append(player_appearance_obj) if player_appearance_obj.position == 1: processed_flag = True pitching_appearance_list = substituting_team.pitcher_list final_appearance = pitching_appearance_list[-1] final_appearance.end_inning_num = inning_num final_appearance.end_inning_half = inning_half_str final_appearance.end_inning_batter_num = next_batter_num pitching_appearance_list.append(player_appearance_obj) if not processed_flag: raise ValueError('Invalid substitution.') def process_switch(switch_obj, inning_num, inning_half_str, next_batter_num, switching_team): player_appearance_obj = PlayerAppearance( switch_obj.player, switch_obj.new_position_num, inning_num, inning_half_str, next_batter_num ) batting_list_list = switching_team.batting_order_list_list old_player_appearance_list = None for this_appearance_list in batting_list_list: if (this_appearance_list[-1].player_obj.mlb_id == switch_obj.player.mlb_id): old_player_appearance_list = this_appearance_list if not old_player_appearance_list: if (switching_team.pitcher_list[-1].player_obj.mlb_id == switch_obj.player.mlb_id): old_player_appearance_list = switching_team.pitcher_list if not old_player_appearance_list: raise ValueError('Invalid player switch') final_appearance = old_player_appearance_list[-1] final_appearance.end_inning_num = inning_num final_appearance.end_inning_half = inning_half_str final_appearance.end_inning_batter_num = next_batter_num if switch_obj.new_batting_order: new_player_appearance_list = batting_list_list[ switch_obj.new_batting_order - 1 ] new_player_appearance_list.append(player_appearance_obj) else: old_player_appearance_list.append(player_appearance_obj) if player_appearance_obj.position == 1: switching_team.pitcher_list.append(player_appearance_obj) def parse_switch_description(event_datetime, description, event_summary, game_obj, inning_half_str): if 'Substitution: ' in description: description = description.split('Substitution: ')[1] if 'Defensive' in event_summary: if inning_half_str == 'top': switching_team = game_obj.home_team else: switching_team = game_obj.away_team elif 'Offensive' in event_summary: if inning_half_str == 'top': switching_team = game_obj.away_team else: switching_team
#!/usr/bin/env python # coding: utf-8 # ## Calculate distance between means/medoids of mutation groupings # # Our goal is to find an unsupervised way of calculating distance/similarity between our mutation groupings ("none"/"one"/"both") which isn't affected by sample size, to the degree that differentially expressed gene count was (see `4_de_analysis` notebooks). # # Here, we'll try the extremely simple method of: # # 1) taking the n-dimensional mean (centroid) or median (medoid) of each group # 2) calculating distance between the centroids and using this to define "expression similarity" # # We'll try this for a few different feature selection/embedding methods, and for both gene expression and RPPA (protein expression) data. # In[1]: from pathlib import Path import pickle as pkl import itertools as it import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import sys; sys.path.append('..') import config as cfg get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') # ### Notebook parameters # # These control which type of data to use, how to preprocess the data, how to calculate the centroid distances, etc. # # Defaults are chosen mostly to make the script run quickly (expression data takes much longer to load and preprocess), although they seem to be somewhat representative of the overall results. # In[2]: # whether to use expression or rppa data data_type = 'rppa' # how to calculate centroids, 'mean' or 'median' centroid_method = 'median' # preprocessing method, 'none' or 'pca' currently preprocessing = 'pca' # number of features to subset to # for 'none' preprocessing this is the number of features by MAD # (None = no subset by MAD) # for 'pca' preprocessing this is the number of PCs to use subset_feats = 50 # number of samples to require in each "hit" grouping, None = no minimum min_sample_count = 15 # ### Load expression data # # We'll also subset to the top features by mean absolute deviation, if that option . # In[3]: expression_data_file = ( '/home/jake/research/mpmp/data/tcga_expression_matrix_processed.tsv.gz' ) rppa_data_file = ( '/home/jake/research/mpmp/data/tcga_rppa_matrix_processed.tsv' ) if data_type == 'expression': data_df = pd.read_csv(expression_data_file, sep='\t', index_col=0) elif data_type == 'rppa': data_df = pd.read_csv(rppa_data_file, sep='\t', index_col=0) print(data_df.shape) data_df.iloc[:5, :5] # In[4]: # if PCA preprocessing is selected, convert raw features to PCs # select the number of PCs using subset_feats if preprocessing == 'pca': from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler # standardize features first data_df = pd.DataFrame( StandardScaler().fit_transform(data_df), index=data_df.index.copy(), columns=data_df.columns.copy() ) # then transform using PCA pca = PCA(n_components=subset_feats) data_df = pd.DataFrame( pca.fit_transform(data_df), index=data_df.index.copy(), columns=['PC{}'.format(i) for i in range(subset_feats)] ) data_df.iloc[:5, :5] # In[5]: # if no preprocessing, subset features by mean absolute deviation if preprocessing == 'none' and subset_feats is not None: mad_ranking = ( data_df.mad(axis=0) .sort_values(ascending=False) ) top_feats = mad_ranking[:subset_feats].index.astype(str).values print(top_feats[:5]) data_df = data_df.reindex(top_feats, axis='columns') print(data_df.shape) data_df.iloc[:5, :5] # ### Load Park et al. "hit" data # # This was collated/formatted in `0_process_park.ipynb` # In[6]: with open(cfg.distance_gain_info, 'rb') as f: park_gain_info = pkl.load(f) park_gain_info['TP53_BRCA'].head() # In[7]: with open(cfg.distance_loss_info, 'rb') as f: park_loss_info = pkl.load(f) park_loss_info['TP53_BRCA'].head() # ### Calculate distance between means/medians for given gene + cancer type # In[8]: from scipy.spatial.distance import pdist, squareform def get_centroids_and_distance(identifier, info_df, centroid_method='mean'): groups = ['both', 'none', 'one'] group_combinations = list(it.combinations(groups, 2)) class_name = info_df['class_name'].values[0] # get (gene/protein) expression data for samples samples = info_df.index.intersection(data_df.index) info_df = info_df.reindex(samples) # if one group has no samples, we have to make sure to assign it 0 count class_counts = [] hit_class_counts = info_df.groupby('num_hits').count().class_name for group in groups: if group in hit_class_counts.index: class_counts.append(hit_class_counts[group]) else: class_counts.append(0) # group by number of hits, then calculate centroids centroids_df = (data_df .reindex(samples) .merge(info_df['num_hits'], left_index=True, right_index=True) .groupby('num_hits') ) if centroid_method == 'mean': centroids_df = centroids_df.mean() elif centroid_method == 'median': centroids_df = centroids_df.median() else: raise NotImplementedError( 'centroid method {} not implemented'.format(centroid_method) ) # calculate distance between centroids # make sure this is in the same order for each identifier, and # handle NA distances here (if one group doesn't have any samples) dists = pdist(centroids_df.values, metric='euclidean') dist_combinations = list(it.combinations(hit_class_counts.index, 2)) ordered_dists = [] for cmb in group_combinations: if cmb not in dist_combinations: ordered_dists.append(np.nan) else: cmb_ix = dist_combinations.index(cmb) ordered_dists.append(dists[cmb_ix]) return (groups, group_combinations, class_counts, ordered_dists) get_centroids_and_distance('TP53_BRCA', park_loss_info['TP53_BRCA'], 'median') # ### Calculate centroid distance between "hits", per class # # Class 1 = look at both loss and gain (should be one-hit in neither) # Class 2 = only look at loss (should be one-hit here) # Class 3 = only look at gain (should be one-hit here) # Class 4 = look at both loss and gain (should be one-hit in both) # In[9]: class_counts_df = {} results_df = {} class_names = {} counts_columns = None results_columns = None # get distances for copy loss, for class 1/2/4 genes for identifier, loss_df in park_loss_info.items(): class_name = loss_df.head(1).class_name.values[0] if class_name == 'class 3': continue results = get_centroids_and_distance(identifier, loss_df, 'mean') if counts_columns is None: counts_columns = results[0] else: assert counts_columns == results[0] if results_columns is None: results_columns = ['{}/{}'.format(i, j) for i, j in results[1]] class_names[identifier] = class_name class_counts_df[identifier] = results[2] results_df[identifier] = results[3] class_counts_loss_df = ( pd.DataFrame(class_counts_df.values(), index=class_counts_df.keys(), columns=counts_columns) .merge(pd.Series(class_names).rename('class_name'), left_index=True, right_index=True) ) results_loss_df = ( pd.DataFrame(results_df.values(), index=results_df.keys(), columns=results_columns) .merge(pd.Series(class_names).rename('class_name'), left_index=True, right_index=True) ) print(class_counts_loss_df.shape) class_counts_loss_df.head() # In[10]: if min_sample_count is not None: valid_rows = (class_counts_loss_df .drop(columns=['class_name']) .astype(int) > min_sample_count ).all(axis=1) print('Valid rows:', valid_rows.sum(), '/', class_counts_loss_df.shape[0]) valid_ids = class_counts_loss_df.index[valid_rows] valid_ids[:5] # In[11]: if min_sample_count is not None: results_loss_df = results_loss_df.loc[valid_ids, :] print(results_loss_df.shape) results_loss_df.head() # In[12]: class_counts_df = {} results_df = {} class_names = {} counts_columns = None results_columns = None # get distances for copy gain, for class 1/3/4 genes for identifier, gain_df in park_gain_info.items(): class_name = gain_df.head(1).class_name.values[0] if class_name == 'class 2': continue results = get_centroids_and_distance(identifier, gain_df, 'mean') if counts_columns is None: counts_columns = results[0] else: assert counts_columns == results[0] if results_columns is None: results_columns = ['{}/{}'.format(i, j) for i, j in results[1]] class_names[identifier] = class_name class_counts_df[identifier] = results[2] results_df[identifier] = results[3] class_counts_gain_df = ( pd.DataFrame(class_counts_df.values(), index=class_counts_df.keys(), columns=counts_columns) .merge(pd.Series(class_names).rename('class_name'), left_index=True, right_index=True) ) results_gain_df = ( pd.DataFrame(results_df.values(), index=results_df.keys(), columns=results_columns) .merge(pd.Series(class_names).rename('class_name'), left_index=True, right_index=True) ) print(class_counts_gain_df.shape) class_counts_gain_df.head() # In[13]: if min_sample_count is not None: valid_rows = (class_counts_gain_df .drop(columns=['class_name']) .astype(int) > min_sample_count ).all(axis=1) print('Valid rows:', valid_rows.sum(), '/', class_counts_gain_df.shape[0]) valid_ids = class_counts_gain_df.index[valid_rows] valid_ids[:5] # In[14]: if min_sample_count is not None: results_gain_df = results_gain_df.loc[valid_ids, :] print(results_gain_df.shape) results_gain_df.head() # ### Plot centroid distance results # # To make our plots, we'll just get rid of NaN rows (i.e. genes/cancer types that don't have at least one sample in each "hit" category). # In[15]: sns.set({'figure.figsize': (18, 12)}) fig, axarr = plt.subplots(2, 2) # plot class counts distributions for copy loss # we don't show class 3 genes here because they're defined based on # copy gain (they are shown in the next plot) for ix, class_name in enumerate(['class 1', 'class 2', 'class 4']): ax = axarr[ix // 2, ix % 2] # convert dataframe to long-form to plot it plot_df = (class_counts_loss_df[class_counts_loss_df.class_name == class_name] .drop(columns='class_name') .dropna(axis='index') .reset_index() .rename(columns={'index': 'identifier'}) .melt(id_vars='identifier', value_name='count', var_name='num_hits') ) sns.kdeplot(data=plot_df, x='count', hue='num_hits', ax=ax) ax.set_title('Distribution of samples having the given number of hits, ' 'copy loss, {}'.format(class_name)) ax.set_xlim(-10, 500) # In[16]: sns.set({'figure.figsize': (18, 12)}) fig, axarr = plt.subplots(2, 2) # plot class counts distributions for copy gain # we don't show class 2 genes here because they're defined based on # copy loss (they were shown in the last plot) for ix, class_name in enumerate(['class 1', 'class 3', 'class 4']): ax = axarr[ix // 2, ix % 2] # convert dataframe to long-form to plot it plot_df = (class_counts_gain_df[class_counts_gain_df.class_name == class_name] .drop(columns='class_name') .dropna(axis='index') .reset_index() .rename(columns={'index': 'identifier'}) .melt(id_vars='identifier', value_name='count', var_name='num_hits') ) sns.kdeplot(data=plot_df, x='count', hue='num_hits', ax=ax) ax.set_title('Distribution of samples having the given number of hits, ' 'copy gain, {}'.format(class_name)) ax.set_xlim(-10, 500) ax.set_ylim(0.0, 0.03) # In[17]: sns.set({'figure.figsize': (18, 12)}) fig, axarr = plt.subplots(2, 2) # plot copy loss results here for ix, class_name in enumerate(['class 1', 'class 2', 'class 4']): ax = axarr[ix // 2, ix % 2] # convert dataframe to long-form to plot it plot_df = ( results_loss_df[results_loss_df.class_name == class_name] .drop(columns='class_name') .dropna(axis='index') .reset_index() .rename(columns={'index': 'identifier'}) .melt(id_vars='identifier', value_name='distance', var_name='num_hits') ) sns.boxplot(data=plot_df, x='num_hits', y='distance', ax=ax, order=['none/one', 'both/one', 'both/none']) if preprocessing == 'pca': if data_type == 'expression': ax.set_ylim(0, 180) elif data_type == 'rppa': ax.set_ylim(0, 20) elif data_type == 'rppa': ax.set_ylim(0, 15) elif data_type == 'expression': ax.set_ylim(0, 1.75e6) ax.set_title('Distributions of {} distance for {} genes, copy loss, {} data'.format( centroid_method, class_name, data_type)) # In[18]: sns.set({'figure.figsize': (18, 12)}) fig, axarr = plt.subplots(2, 2) # plot copy gain results
<filename>plugins/motifmining.py #21datalabplugin import numpy import numpy as np import time from scipy.ndimage import gaussian_filter from system import __functioncontrolfolder from model import date2secs from model import epochToIsoString from mininghelper import * from pytz import timezone from model import date2secs import copy from model import date2secs,secs2date import mininghelper as mnh # use a list to avoid loading of this in the model mycontrol = [copy.deepcopy(__functioncontrolfolder)] mycontrol[0]["children"][-1]["value"]="threaded" """ HOW TO USE the motif miner prepare the template - add the template to the model - connect the referencer MotifMiner.widget - connect the referencer MotifMiner.table - add the MotifMiner.annotations folder to the referencer of your annotations (typically widget.hasAnnotation.annotaions) prepare the widget: - widget.hasAnnotation.colors: add this entry: "pattern_match": {"color": "#808080","pattern": "v"} - hasAnnotation.visibleTags need an entry "pattern_match" - also make sure the widget.startTime,widget.endTime is being watched by the visibleElement observer (so we can use the "jump" function in the cockpit) - selecte the motif - either in the tree or: open the cockpit (hook it to the context menu first) select the motif in the time series view click - prepare the output - hit run, wait 3 sec, hit stop - now the score variable should show the right length, it can be used to display """ motifMinerTemplate = { "name": "MotifMiner1", "type": "folder", "children":[ { "name": "MotifMiner", "type": "function", "functionPointer": "motifmining.motif_miner", # filename.functionname "autoReload": True, # set this to true to reload the module on each execution "children": [ {"name": "motif", "type": "referencer"}, # the one motif we are using {"name": "score", "type": "column"}, {"name": "algorithm", "type": "const","value":"pearson", "validation":{"values":["euclidean","pearson","pearson_center","convolution"]}}, # the alorithm used, one of ... {"name": "widget","type":"referencer"} , # the widget to which this miner belongs which is used (to find the selected motif {"name": "table","type":"referencer"}, # for the variables and times {"name": "table","type":"referencer"}, # for the variables and times {"name": "peaks","type":"variable"}, {"name": "addNoise","type":"const","value":0.001}, {"name": "subSamplingFactor","type":"const","value":2}, {"name": "subtractPolynomOrder", "type": "const", "value": 2,"validation":{"values":["none",1,2,3]}}, {"name": "annotations","type":"folder"}, # the results mycontrol[0] ] }, { "name": "peakSearch", "type": "function", "functionPointer": "motifmining.motif_jumper", # filename.functionname "autoReload": True, # set this to true to reload the module on each execution "children": [ {"name": "miner", "type": "referencer","references":["MotifMiner1.MotifMiner"]}, # the one motif we are using {"name": "jumpPos", "type": "variable", "value":0}, {"name": "jumpInc", "type": "const", "value":1}, # 1,-1 for forward backwards {"name": "threshold", "type": "const","value":0.5}, # the detection threshold __functioncontrolfolder ] }, { "name": "progress", "type": "observer", "children": [ {"name": "enabled", "type": "const", "value": True}, # turn on/off the observer {"name": "triggerCounter", "type": "variable", "value": 0}, # increased on each trigger {"name": "lastTriggerTime", "type": "variable", "value": ""}, # last datetime when it was triggered {"name": "targets", "type": "referencer","references":["MotifMiner1.MotifMiner.control.progress"]}, # pointing to the nodes observed {"name": "properties", "type": "const", "value": ["value"]}, # properties to observe [“children”,“value”, “forwardRefs”] {"name": "onTriggerFunction", "type": "referencer"}, # the function(s) to be called when triggering {"name": "triggerSourceId", "type": "variable"}, # the sourceId of the node which caused the observer to trigger {"name": "hasEvent", "type": "const", "value": True}, # set to event string iftrue if we want an event as well {"name": "eventString", "type": "const", "value": "motifminer.progress"}, # the string of the event {"name": "eventData", "type": "const", "value": {"text": "observer status update"}} # the value-dict will be part of the SSE event["data"] , the key "text": , this will appear on the page, ] }, {"name": "cockpit", "type": "const", "value": "/customui/motifminer1cockpit.htm"} #the cockpit for the motif miner ] } ppsMinerPipeline = { "name": "PPSMinerPipeline", "type": "folder", "children":[ { "name": "PPSMiner", "type": "function", "functionPointer": "motifmining.pps_miner", # filename.functionname "autoReload": True, # set this to true to reload the module on each execution "children": [ {"name": "motif", "type": "referencer"}, # the one motif we are using {"name": "widget","type":"referencer"} , # the widget to which this miner belongs which is used (to find the selected motif {"name": "peaks","type":"variable"}, # a list of time points as the result of the mining, these are the matches {"name": "preFilter","type":"const","value":2.8}, # gaussian smoothing filter applied to the raw data, this parameter is the "sigma" of the gaussian filter {"name": "postFilter","type":"const","value":1}, # same as prefilter, but applied after processing of the data (diff, polyfit etc) {"name": "differentiate","type":"const","value":True}, # differentiate the signal for processing (to get rid of drifts) {"name": "timeRanges","type":"const","value":{"1":0.7,"7":0.5}}, # see mininghelper.pps_mining for explanation {"name": "valueRanges","type":"const","value":{"1":0.8}}, {"name": "typeFilter","type":"const","value":["max","min"]}, # use only these types of prominent points {"name": "subtractPolynomOrder", "type": "const", "value": None,"validation":{"values":[None,1,2,3]}}, {"name": "annotations","type":"folder"}, # the results, the matches mycontrol[0] ] }, { "name": "peakSearch", "type": "function", "functionPointer": "motifmining.motif_jumper", # filename.functionname "autoReload": True, # set this to true to reload the module on each execution "children": [ {"name": "miner", "type": "referencer","references":["PPSMinerPipeline.PPSMiner"]}, # the one motif we are using {"name": "jumpPos", "type": "variable", "value":0}, {"name": "jumpInc", "type": "const", "value":1}, # 1,-1 for forward backwards {"name": "threshold", "type": "const","value":0.5}, # the detection threshold __functioncontrolfolder ] }, { "name": "progress", "type": "observer", "children": [ {"name": "enabled", "type": "const", "value": True}, # turn on/off the observer {"name": "triggerCounter", "type": "variable", "value": 0}, # increased on each trigger {"name": "lastTriggerTime", "type": "variable", "value": ""}, # last datetime when it was triggered {"name": "targets", "type": "referencer","references":["PPSMinerPipeline.PPSMiner.control.progress"]}, # pointing to the nodes observed {"name": "properties", "type": "const", "value": ["value"]}, # properties to observe [“children”,“value”, “forwardRefs”] {"name": "onTriggerFunction", "type": "referencer"}, # the function(s) to be called when triggering {"name": "triggerSourceId", "type": "variable"}, # the sourceId of the node which caused the observer to trigger {"name": "hasEvent", "type": "const", "value": True}, # set to event string iftrue if we want an event as well {"name": "eventString", "type": "const", "value": "motifminer.progress"}, # the string of the event {"name": "eventData", "type": "const", "value": {"text": "observer status update"}} # the value-dict will be part of the SSE event["data"] , the key "text": , this will appear on the page, ] }, {"name": "cockpit", "type": "const", "value": "/customui/ppscockpit.htm"} #the cockpit for the motif miner ] } def motif_miner(functionNode): logger = functionNode.get_logger() logger.info("==>>>> in motif_miner " + functionNode.get_browse_path()) progressNode = functionNode.get_child("control").get_child("progress") progressNode.set_value(0) tableNode = functionNode.get_child("table").get_targets()[0] timeNode = tableNode.get_child("timeField").get_targets()[0] subSamplingFactor = functionNode.get_child("subSamplingFactor").get_value() sigmaNoise = functionNode.get_child("addNoise").get_value() annotations = functionNode.get_child("annotations") myModel = functionNode.get_model() signalNode = functionNode.get_child("control").get_child("signal") polynomPre = functionNode.get_child("subtractPolynomOrder").get_value() if polynomPre != "none": polynomPre = "subtract_poly_"+str(int(polynomPre)) logger.debug(f"polynome pre is {polynomPre}") algo = functionNode.get_child("algorithm").get_value() """ preparation: - find the current widget, take the current selected selection for my motif - if not available - connect the result to the table """ myWidget = functionNode.get_child("widget").get_targets()[0] motifs = myWidget.get_child("hasAnnotation").get_child("selectedAnnotations").get_leaves() motif = None if motifs: motif = motifs[0] functionNode.get_child("motif").add_references(motif,deleteAll=True) # take this annotation else: #there is currently no selection, we take the motif from last time if we have one motifs = functionNode.get_child("motif").get_targets() if motifs: motif = motifs[0] if not motif: logger.error("have no motif") return False # prepare the result: delete previous scores and annotations scoreNode = functionNode.get_child("score") functionNode.get_child("peaks").set_value([]) scoreNode.connect_to_table(tableNode) # this will make it part of the table and write it all to numpy.inf try: myModel.disable_observers() annos = annotations.get_children() if annos: for anno in annos: anno.delete() except: myModel.enable_observers() return False myModel.enable_observers() myModel.notify_observers(annotations.get_id(), "children") # trigger the widgets to delete the annotations #prepare the motif motifVariable = motif.get_child("variable").get_targets()[0] start = motif.get_child("startTime").get_value() end = motif.get_child("endTime").get_value() motifEpochStart = date2secs(start) motifEpochEnd = date2secs(end) logger.debug(f"motif: {motifVariable.get_browse_path()}, {start} .. {end} ") timeIndices = timeNode.get_time_indices(start,end) logger.debug(f" motif len {len(timeIndices)}") y = motifVariable.get_value().copy() # y holds the full data y = gaussian_filter(y, sigma=2) # smoothen it #normalize y #y = normalize_range(y,0,1) #normalize t = timeIndices[::subSamplingFactor] #downsampling of the data yMotif = y[t].copy() # yMotif holds the motif data downsampled noise = np.random.normal(0, sigmaNoise, len(yMotif)) yMotif = yMotif + noise # adds a small amount of noise to the template in order to not create pathological results # prepare the result motifTimeLen = (timeIndices[-1]-timeIndices[0]) scoreIndices = numpy.arange(timeIndices[0]-1*motifTimeLen, timeIndices[-1]+ 200*motifTimeLen ) scoreTimes = scoreIndices[::subSamplingFactor] #y = y[::subSamplingFactor] # full data #y=y[scoreTimes] #we will now go through the data and each 2 seconds update the results with the new results runningR = True runningL = True blockSize = 10 # times the motif len blockStartR = timeIndices[0] # start is the motif start time for the serach to the right blockStartL = timeIndices[0]-(blockSize-1)*motifTimeLen # start is the motif start time for the search
<reponame>ghanem-mhd/LabChain """Test cases for the networking component.""" import json import time from unittest import TestCase from werkzeug.test import Client from labchain.datastructure.block import Block from labchain.network.networking import ServerNetworkInterface, TransactionDoesNotExistException, BlockDoesNotExistException from labchain.datastructure.transaction import Transaction class MockJsonRpcClient: """""" def __init__(self): self.requests = {} self.response_queue = [] def queue_response(self, response_data): """Set the content of the result field for future requests.""" self.response_queue.append(response_data) def send(self, ip_address, port, method, params=tuple()): """Store a json RPC call in self.requests.""" key = str(ip_address) + ':' + str(port) if key not in self.requests: self.requests[key] = [] self.requests[key].append((method, params)) response = self.response_queue.pop() return response['result'] class MockCryptoHelper: def __init__(self): self.key_counter = 1 self.hash_counter = 1 self.hash_map = {} def hash(self, message): if message not in self.hash_map: self.hash_map[message] = '{num:05d}'.format(num=self.hash_counter) self.hash_counter += 1 return self.hash_map[message] class CommonTestCase(TestCase): def empty_function(): """Empty function for unneeded functionality.""" pass def create_server_network_interface(self, json_rpc_client): return ServerNetworkInterface(json_rpc_client, {}, MockCryptoHelper(), self.on_block_received, self.on_transaction_received, self.get_block, self.get_block_by_hash, self.get_transaction, self.get_blocks_by_hash_range, self.empty_function, self.get_n_last_transactions, self.empty_function, port=6666) def setUp(self): # key block ID -> value block instance self.available_blocks = {} # key transaction hash -> value transaction instance self.available_transactions = {} self.received_blocks = [] self.received_transactions = [] self.json_rpc_client = MockJsonRpcClient() self.network_interface = self.create_server_network_interface(self.json_rpc_client) self.client = Client(self.network_interface.application) def on_block_received(self, block): self.received_blocks.append(block) def on_transaction_received(self, transaction): self.received_transactions.append(transaction) def get_block(self, block_id): if block_id in self.available_blocks: return [self.available_blocks[block_id]] return [] def get_blocks_by_hash_range(self, start_hash=None, end_hash=None): return [block for block in self.available_blocks.values()] def get_block_by_hash(self, block_hash): for block_id in self.available_blocks: if self.available_blocks[block_id].merkle_tree_root == block_hash: return self.available_blocks[block_id] return None def get_transaction(self, transaction_hash): if transaction_hash in self.available_transactions: return self.available_transactions[transaction_hash], 'test_block_hash' return None, None def get_n_last_transactions(self,n): return self.available_transactions def get_peer_list(self): return self.network_interface.peers def add_peer(self, host, port=6666, info=None): if info is None: info = {} self.network_interface.peers[host] = {port: info} def make_request(self, data): """Make a request to the node and return the response dict.""" app_iter, status, headers = self.client.post('/', data=data, environ_base={'REMOTE_ADDR': '1.2.3.4'}, headers={'Content-Type': 'application/json'}) result = '' for line in app_iter: result += line.decode() return result def get_last_request(self, host, port): key = str(host) + ':' + str(port) if key not in self.json_rpc_client.requests or len(self.json_rpc_client.requests[key]) == 0: return None, None return self.json_rpc_client.requests[key][-1] def assert_json_equal(self, json_expected, json_actual): """Assert that two JSON strings contain the same data.""" if type(json_expected) is str: json_expected = json.loads(json_expected) if type(json_actual) is str: json_actual = json.loads(json_actual) self.assertEqual(json_expected, json_actual) class PeerListExchangeTestCase(CommonTestCase): def test_server_test_get_peers_with_one_entry(self): """Test case #1.""" # given self.add_peer('192.168.2.3', 6666) # when response_data = self.make_request('{ "jsonrpc": "2.0", "method": "getPeers", "params":[], "id": 1}') # then self.assert_json_equal(response_data, '{ "jsonrpc": "2.0", "result": {"192.168.2.3": {"6666": {}}}, "id": 1}') def test_server_test_get_peers_with_no_entries(self): """Test case #1a.""" # when response_data = self.make_request('{ "jsonrpc": "2.0", "method": "getPeers", "params":[], "id": 1}') # then self.assert_json_equal(response_data, '{ "jsonrpc": "2.0", "result": {}, "id": 1}') def test_server_advertise_peer_with_port_param(self): """Test case #2.""" # when response_data = self.make_request('{ "jsonrpc": "2.0", "method": "advertisePeer", "params":[6667], "id": 1}') # then self.assert_json_equal(response_data, '{ "jsonrpc": "2.0", "result": true, "id": 1}') self.assertDictEqual(self.network_interface.peers, {"172.16.31.10": {6667: {}}}) def test_server_advertise_peer_with_no_port_param(self): """Test case #2a.""" # when response_data = self.make_request('{ "jsonrpc": "2.0", "method": "advertisePeer", "id": 1}') # then self.assert_json_equal(response_data, '{ "jsonrpc": "2.0", "result": true, "id": 1}') self.assertDictEqual(self.network_interface.peers, {"172.16.31.10": {6666: {}}}) def test_client_exchange_peer_list(self): """Test case #3.""" # given self.add_peer('192.168.121.77', 6666) self.add_peer('192.168.100.4', 6666) # when self.json_rpc_client.queue_response({'jsonrpc': '2.0', 'result': {'192.168.2.3': {6666: {}}}, 'id': 1}) self.json_rpc_client.queue_response({'jsonrpc': '2.0', 'result': {'192.168.5.6': {6666: {}}}, 'id': 1}) self.network_interface.update_peer_lists() # then last_request_method, last_request_params = self.get_last_request('192.168.121.77', 6666) self.assertEqual(last_request_method, 'getPeers') self.assertFalse(last_request_params) last_request_method, last_request_params = self.get_last_request('192.168.100.4', 6666) self.assertEqual(last_request_method, 'getPeers') self.assertFalse(last_request_params, []) self.assertDictEqual(self.network_interface.peers, {'192.168.2.3': {6666: {}}, '192.168.5.6': {6666: {}}, '192.168.121.77': {6666: {}}, '192.168.100.4': {6666: {}}}) def test_client_advertise_peer(self): """Test case #4.""" # given self.add_peer('192.168.121.77', 6666) self.add_peer('192.168.100.4', 6666) # when self.json_rpc_client.queue_response({'jsonrpc': '2.0', 'result': True, 'id': 1}) self.json_rpc_client.queue_response({'jsonrpc': '2.0', 'result': True, 'id': 1}) self.network_interface.advertise_to_peers() # then last_request_method, last_request_params = self.get_last_request('192.168.121.77', 6666) self.assertEqual(last_request_method, 'advertisePeer') self.assertEqual([6666], last_request_params) last_request_method, last_request_params = self.get_last_request('192.168.100.4', 6666) self.assertEqual(last_request_method, 'advertisePeer') self.assertEqual([6666], last_request_params) class SendTransactionTestCase(CommonTestCase): def test_send_transaction_server_valid(self): """Test Case #5""" # When # Server gets request from Client response = self.make_request('{"jsonrpc": "2.0", "method": "sendTransaction", ' '"params": [{"sender": "test_sender", "receiver": "test_receiver", ' '"payload": "test_payload", "signature": "test_signature"}], "id": 1}') # Then # assert transaction transaction = self.received_transactions[0] self.assertEqual(transaction.sender, 'test_sender') self.assertEqual(transaction.receiver, 'test_receiver') self.assertEqual(transaction.payload, 'test_payload') self.assertEqual(transaction.signature, 'test_signature') # assert response self.assert_json_equal(response, '{"jsonrpc": "2.0", "result": null, "id": 1}') def test_send_transaction_client_valid(self): """Test Case #6""" # given self.add_peer('192.168.2.3', 6666) test_transaction = Transaction('test_sender', 'test_receiver', 'test_payload', 'test_signature') # when self.json_rpc_client.queue_response({ 'jsonrpc': '2.0', 'result': True, 'id': 1 }) self.network_interface.sendTransaction(test_transaction) # then last_request_method, last_request_params = self.get_last_request('192.168.2.3', 6666) self.assertEqual(last_request_method, 'sendTransaction') self.assertEqual(last_request_params, [{"sender": "test_sender", "receiver": "test_receiver", "payload": "test_payload", "signature": "test_signature"}]) class SendBlockTestCase(CommonTestCase): def test_send_block_server_valid(self): """Test Case #7""" # When # Server gets request from Client response = self.make_request('{"jsonrpc": "2.0", "method": "sendBlock", ' '"params": [{' '"nr" : 2, "merkleHash" : "merkle_hash123", ' '"predecessorBlock" : "pre_hash123","nonce" : 6969, ' '"creator" : "test_creator", "timestamp": 0.0 , "difficulty" : 0, ' '"transactions" : [' '{"sender": "test_sender", "receiver": "test_receiver", ' '"payload": "test_payload", "signature": "test_signature"}]}], "id": 1}') # Then block = self.received_blocks[0] self.assertEqual(block.merkle_tree_root, 'merkle_hash123') self.assertEqual(block.predecessor_hash, 'pre_hash123') self.assertEqual(block.nonce, 6969) self.assertEqual(block.block_creator_id, 'test_creator') self.assertEqual(len(block.transactions), 1) transaction = block.transactions[0] self.assertEqual(transaction.sender, 'test_sender') self.assertEqual(transaction.receiver, 'test_receiver') self.assertEqual(transaction.payload, 'test_payload') self.assertEqual(transaction.signature, 'test_signature') # assert response self.assert_json_equal(response, '{"jsonrpc": "2.0", "result": null, "id": 1}') def test_send_block_client_valid(self): """Test Case #8""" # Given self.add_peer('192.168.100.4', 6666) now = time.time() test_block = Block(2, 'merkle_hash123', 'pre_hash123', 'test_creator', [Transaction('test_sender', 'test_receiver', 'test_payload', 'test_signature')], 6969, now) # when self.json_rpc_client.queue_response({ 'jsonrpc': '2.0', 'result': None, 'id': 1 }) self.network_interface.sendBlock(test_block) # then last_request_method, last_request_params = self.get_last_request('192.168.100.4', 6666) self.assertEqual(last_request_method, 'sendBlock') self.assert_json_equal(last_request_params, [{'nr': 2, 'timestamp': now, 'merkleHash': 'merkle_hash123', 'predecessorBlock': 'pre_hash123', 'nonce': 6969, 'creator': 'test_creator', 'difficulty': -1, 'transactions': [{'sender': 'test_sender', 'receiver': 'test_receiver', 'payload': 'test_payload', 'signature': 'test_signature'}]}]) class RequestTransactionServerTestCase(CommonTestCase): def test_request_transaction(self): """test case #9 """ # given self.add_peer('192.168.100.4', 6666) self.available_transactions['hash_of_transaction_#1'] = Transaction("test_sender", "test_receiver", "test_payload", "test_signature") # when json_rpc_request = {"jsonrpc": "2.0", "method": "requestTransaction", "params": ["hash_of_transaction_#1"], "id": 1} response = self.make_request(json.dumps(json_rpc_request)) # then self.assert_json_equal(response, '{"result": [{"sender": "test_sender", "receiver": "test_receiver", ' '"payload": "test_payload", "signature": "test_signature"}, "test_block_hash"], "id": 1,"jsonrpc": "2.0"}') def test_request_nonexistent_transaction(self): """test case #10 """ # given self.add_peer('192.168.100.4', 6666) # when json_rpc_request = {"jsonrpc": "2.0", "method": "requestTransaction", "params": ["hash_of_transaction_#1"], "id": 1} response = self.make_request(json.dumps(json_rpc_request)) # then self.assert_json_equal(response, '{ "jsonrpc": "2.0", "result": null, "id": 1}') class RequestTransactionClientTestCase(CommonTestCase): def test_request_transaction(self): """test case #11 """ # given self.add_peer('192.168.100.4', 6666) self.json_rpc_client.queue_response({'jsonrpc': '2.0', 'result': [{ 'sender': 'pubkey_of_test_sender', 'receiver': 'pubkey_of_test_receiver', 'payload': 'test_payload', 'signature': 'test_signature'}, 'test_block_hash'], 'id': 1}) transaction, block_hash = self.network_interface.requestTransaction('hash_of_transaction_#1') # then last_request_method, last_request_params = self.get_last_request('192.168.100.4', 6666) self.assertEqual(last_request_method, 'requestTransaction') self.assertEqual(last_request_params, ['hash_of_transaction_#1']) self.assertEqual(transaction.sender, 'pubkey_of_test_sender') self.assertEqual(transaction.receiver, 'pubkey_of_test_receiver') self.assertEqual(transaction.payload, 'test_payload') self.assertEqual(transaction.signature, 'test_signature') def test_request_nonexistent_transaction(self): """test case #12 """ # given self.add_peer('192.168.100.4', 6666) # when self.json_rpc_client.queue_response({'jsonrpc': '2.0', 'result': None, 'id': 1}) with self.assertRaises(TransactionDoesNotExistException): self.network_interface.requestTransaction('non_existent_hash') last_request_method, last_request_params = self.get_last_request('192.168.100.4', 6666) self.assertEqual(last_request_method, 'requestTransaction') class RequestBlockServerTestCase(CommonTestCase): def test_request_block(self): """Test case #13.""" # given self.available_blocks[2] = Block(2, 'test_merkle_hash', 'test_pred_block_hash', 'test_creator', [ Transaction('test_sender', 'test_receiver', 'test_payload', 'test_signature') ], nonce=5, timestamp=1337.0) # when json_rpc_request = {"jsonrpc": "2.0", "method": "requestBlock", "params": [2], "id": 1} response = self.make_request(json.dumps(json_rpc_request)) # then self.assert_json_equal(response, '{ "jsonrpc": "2.0", "result": [{"nr": 2, "timestamp": 1337.0, ' '"merkleHash" : "test_merkle_hash", ' '"difficulty" : -1,' '"predecessorBlock" : "test_pred_block_hash", "nonce" : 5, ' '"creator" : "test_creator", "transactions" : ' '[{"sender": "test_sender", "receiver": "test_receiver", ' '"payload": "test_payload", "signature": "test_signature"}]}], "id":1}' ) def test_request_block_by_hash(self): """Test case #13a.""" # given self.available_blocks[2] = Block(2, 'test_merkle_hash', 'test_pred_block_hash', 'test_creator', [ Transaction('test_sender', 'test_receiver', 'test_payload', 'test_signature') ], nonce=5, timestamp=1337.0) # when json_rpc_request = {"jsonrpc": "2.0", "method": "requestBlockByHash", "params": ['test_merkle_hash'], "id": 1} response = self.make_request(json.dumps(json_rpc_request)) # then self.assert_json_equal(response, '{ "jsonrpc": "2.0", "result": {"nr": 2, "timestamp": 1337.0, ' '"merkleHash" : "test_merkle_hash", ' '"difficulty" : -1,' '"predecessorBlock" : "test_pred_block_hash", "nonce" : 5, ' '"creator" : "test_creator", "transactions" : ' '[{"sender": "test_sender", "receiver": "test_receiver", ' '"payload": "test_payload", "signature": "test_signature"}]}, "id":1}' ) def test_request_block_with_no_predecessor(self): """Test case #14.""" # given self.available_blocks[2] = Block(2, 'test_merkle_hash', None, 'test_creator', [ Transaction('test_sender', 'test_receiver', 'test_payload', 'test_signature') ], nonce=5, timestamp=1337.0) # when json_rpc_request = {"jsonrpc": "2.0", "method": "requestBlock", "params": [2], "id": 1} response = self.make_request(json.dumps(json_rpc_request)) # then self.assert_json_equal(response, '{ "jsonrpc": "2.0", "result": [{"nr": 2, "timestamp": 1337.0, ' '"merkleHash" : "test_merkle_hash", ' '"difficulty" : -1,' '"predecessorBlock" : null, "nonce" : 5, ' '"creator" : "test_creator", "transactions" : ' '[{"sender": "test_sender", "receiver": "test_receiver", ' '"payload": "test_payload", "signature": "test_signature"}]}], "id":1}' ) def request_nonexisting_block(self): """Test case #15.""" # when json_rpc_request = {"jsonrpc": "2.0", "method": "requestBlock", "params": [2], "id": 1} response = self.make_request(json.dumps(json_rpc_request)) # then self.assert_json_equal(response, '{"jsonrpc": "2.0", "result": null, id: 1}') def test_request_blocks_by_hash_range(self):
<filename>data/data_loader.py """Classes for loading various data types within the project, including - Force plate XML data - C3D data (to be moved here from another script) - PLY data (to be moved here from another script)""" import c3d from vis.utils import * import xml.etree.ElementTree as ET from matplotlib import pyplot as plt import tqdm from scipy.ndimage.measurements import center_of_mass, label from scipy import signal from matplotlib.patches import Rectangle from mpl_toolkits.mplot3d import Axes3D, art3d from matplotlib.widgets import Slider import plyfile import os, sys, csv, torch from smal_fitter.smbld_model.smbld_mesh import SMBLDMesh path_join = os.path.join # simplify function name class DataSources: """Gives the locations of sources for various forms of data within the project. Absolute references are used for large data files only accessed on my HP computer Relative references are for smaller data types that need to be accessed on multiple computers""" ply_collections = r"E:\IIB Project Data\produced data\ply collections" # Absolute reference forceplate_data = r"E:\IIB Project Data\produced data\forceplate_data" # Absolute reference dynamics_data = r"E:\IIB Project Data\produced data\dynamics data" # Absolute reference smal_outputs = r"E:\IIB Project Data\produced data\smal_outputs" # Absolute reference c3d_data = r"C:\Users\Ollie\Dropbox\Ollie\University\IIB\Project\Pipeline\c3d_data" # Absolute reference datasets = r"E:\IIB Project Data\Training data sets" # Absolute reference def cluster_recognition(data, merge=20, cap=False): """Takes an mxn array, returns B bounding boxes, in form ((x0, y0), (x1,y1)). Merges any bboxs that have centres within <merge> of each other""" labelled, num_features = label(data) bboxs = [] for f in range(1, num_features + 1): a = np.where(labelled == f) y0, y1, x0, x1 = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1]) bboxs.append(((x0, y0), (x1, y1))) ## IF ANY BBOXES OVERLAP, COMBINE THEM for bbox in bboxs: (x0, y0), (x1, y1) = bbox gx, gy = (x0 + x1) / 2, (y0 + y1) / 2 for other in [b for b in bboxs if b is not bbox]: (ox0, oy0), (ox1, oy1) = other ogx, ogy = (ox0 + ox1) / 2, (oy0 + oy1) / 2 # print(bbox, other, (abs(ogx - gx) + abs(ogy - gy))) if (abs(ogx - gx) + abs(ogy - gy)) <= merge: bboxs.remove(bbox) bboxs.remove(other) bboxs.append(((min(x0, ox0), min(y0, oy0)), (max(x1, oy1), max(y1, oy1)))) return bboxs paw_colours = { "front right": "#99ffcc", # green "front left": "#ffff99", # red "rear left": "#66ffff", # blue "rear right": "#ff00ff", # fuschia } def centre_of_pressure(data): """Given an (MxN) array, return the (x, y) coord of the CoP""" y, x = center_of_mass(data) return x, y class Paw(Rectangle): """The paw for a particular frame. Has info on - name of paw, net reaction force, and bbox at that instant""" def __init__(self, bboxs, forces, frame_start, frame_stop): """Bbox in format ((x0, y0), (x1, y1)) first seen force = force (first seen) f0 = frame first seen""" (x0, y0), (x1, y1) = bboxs[0] self.x0, self.y0, self.x1, self.y1 = x0, y0, x1, y1 self.bboxs = bboxs self.mean_width = self.x0 - self.x1 self.mean_height = self.y0 - self.y1 self.x_mean, self.y_mean = x0 + .5 * (x1 - x0), y0 + .5 * (y1 - y0) self.forces = forces self.frame_start, self.frame_stop = frame_start, frame_stop def add_frame(self, force, bbox): self.forces.append(force) self.bboxs.append(bbox) def calc_means(self): all_means = np.array([[0.5 * (x0 + x1), 0.5 * (y0 + y1)] for (x0, y0), (x1, y1) in self.bboxs]) self.x_mean, self.y_mean = all_means.mean(axis=0) def bbox_is_paw(self, bbox, disp=0): """Returns True if bbox is part of current paw. Criteria is: CoM, x position is within 5 squares of mean CoM, y position, displaced by <disp>, is within 5 squares of initial """ (x0, y0), (x1, y1) = bbox within_n = lambda a, b, n=20: abs(a - b) <= n x_mean, y_mean = x0 + .5 * (x1 - x0), y0 + .5 * (y1 - y0) return within_n(x_mean, self.x0) and within_n(y_mean - disp, self.y_mean) def identify(self, side, end): self.end = end self.side = side self.name = f"{end} {side}" self.colour = paw_colours[self.name] super().__init__((self.x0, self.y0), (1 + self.x1 - self.x0), (1 + self.y1 - self.y0), fill=None, ec=self.colour, lw=2) def add_text(self, ax): self.text = ax.text((self.x0 + self.x1) / 2, self.y1, va="top", ha="center", s=self.name, color="white") def clear(self): self.remove() if hasattr(self, "text"): self.text.remove() # remove text as well if it has been created def set_frame(self, f, ax): n_frames = self.frame_stop - self.frame_start if f < self.frame_start: pass elif f == self.frame_start: ax.add_patch(self) elif f < self.frame_start + n_frames: (x0, y0), (x1, y1) = self.bboxs[f - self.frame_start] self.set_xy((x0, y0)) self.set_width(1 + x1 - x0), self.set_height(1 + y1 - y0) elif f == self.frame_start + n_frames + 1: self.remove() class ForcePlateData(): def __init__(self, src="", freq=100, n_frames=None, play_every=1, playback_speed=1.0, vis=False): self.fps = playback_speed * freq / play_every if src == "": src = askopenfilename() else: src += ".xml" self.src = src file_loc = os.path.join(DataSources.forceplate_data, src) tree = ET.parse(file_loc) for child in tree.getroot(): if "movements" in child.tag: movements = child for child in movements: id = 0 movement = list(movements)[0] *_, clips = list(movement) forceplate_data, velocity_data = list(clips) velocities = np.array([float(i.text) for i in list(velocity_data)[-1]]) vdt = velocities * 1 / freq # v * dt # Extract forceplate data _, id, begin, freq, count, units, cell_count, cell_size, data = list(forceplate_data) n_x, n_y = int(list(cell_count)[0].text), int(list(cell_count)[1].text) self.x_size, self.y_size = float(list(cell_size)[0].text), float(list(cell_size)[0].text) self.pressure_data = [] for quant in data: cell_begin, cell_count, cells = list(quant) cell_data = cells.text split_data = (cell_data.replace("\t", "").replace("\n", " ").split(" "))[ 1:-1] # ignore first and last point as these are blanks split_data = list(map(float, split_data)) x_start, y_start = int(list(cell_begin)[0].text), int(list(cell_begin)[1].text) x_count, y_count = int(list(cell_count)[0].text), int(list(cell_count)[1].text) data = np.reshape(split_data, (y_count, x_count)) ## pad data correctly (note: y _start measured from bottom) data = np.pad(data, [(n_y - y_count - y_start, y_start), (x_start, n_x - x_count - x_start)]) self.pressure_data.append(data) # Only select data from desired range of number of frames if n_frames is None: n_frames = len(self.pressure_data) self.n_frames = n_frames self.pressure_data = self.pressure_data[:n_frames:play_every] self.pressure_data = np.array(self.pressure_data) # For now, assume x and y cell size is in *mm*, so divide this result by 100 to convert from mm to cm self.force_data = self.pressure_data * self.x_size * self.y_size / 100 # Cluster the force data to get the reaction in each foot as a function of time self.bounding_boxes = bounding_boxes = [] # Identify the forces for each paw in each frame. # Current method: # Identify each paw using a cluster recognition function that identifies bounding boxes of connected data # Identify which paw by: # Front/back based on whether ahead/behind of the overall average y # Left/right based on whether ahead/behind of the local average x (either front x or back x). # This will need to be converted into some kind of running average to work out which foot # This process may need refining for longer clips where the dog moves relative to the treadmill more # First, identify center point as the mean (x, y) coordinate of all non zero data def centre_of_pressure(data): """Given a series of frames, each with an array of numerical data corresponding to pressure plate data, return the overall average x and y positions""" all_y_centres, all_x_centres = zip(*[center_of_mass(frame) for frame in data]) return np.nanmean(all_x_centres), np.nanmean(all_y_centres) _, self.global_y_centre = centre_of_pressure(self.pressure_data) # Center of pressure for all data y_centre_index = int(round(self.global_y_centre)) # convert to int for indexing # Apply clusters to all frames self.paws = paws = [] clusters, n_features = label(self.force_data > 0) # Identify individual paws minmax = lambda a: (np.min(a), np.max(a)) for n_feature in range(1, n_features + 1): a = np.where(clusters == n_feature) f_start, f_stop = minmax(a[0]) bboxs = [] forces = [] for f in range(f_start, f_stop + 1): b = np.where(clusters[f] == n_feature) y0, y1 = minmax(b[0]) x0, x1 = minmax(b[1]) bboxs.append(((x0, y0), (x1, y1))) forces.append(self.force_data[f, y0: y1 + 1, x0: x1 + 1].sum()) paws.append(Paw(bboxs, forces, f_start, f_stop)) overlap = lambda a1, a2, b1, b2, buff=10: (a1 - buff < b1 < a2 + buff) or (a1 - buff < b2 < a2 + buff) for paw in paws: # get list of all paws that are present in same timeframe concurrent_paws = [other for other in paws if overlap(other.frame_start, other.frame_start + len(other.bboxs), paw.frame_start, paw.frame_start + len(paw.bboxs))] mean_y = np.mean([p.y_mean for p in concurrent_paws]) end = ["front", "rear"][paw.y_mean >= mean_y] # only get mean_x from this end if end == "front": mean_x = np.mean([p.x_mean for p in concurrent_paws if p.y_mean <= mean_y]) else: mean_x = np.mean([p.x_mean for p in concurrent_paws if p.y_mean >= mean_y]) side = ["left", "right"][paw.x_mean >= mean_x] paw.identify(side, end) ## compute grfs assignments = {"front left": 0, "front right": 1, "rear left": 2, "rear right": 3} self.grfs = grfs = np.zeros((self.n_frames, 4)) for paw in paws: idx = assignments[paw.name] f0, n_f = paw.frame_start, len(paw.forces) grfs[f0:f0 + n_f, idx] += paw.forces self.save_data(grfs, title=self.src) if vis: self.plot_pressure(n_frames=n_frames) def plot_pressure(self, n_frames=None, title="output"): fig, (ax_p, ax_grf) = plt.subplots(ncols=2, gridspec_kw={"width_ratios": [1, 3]}) if n_frames is None: data = self.pressure_data n_frames = self.n_frames else: data = self.pressure_data[:n_frames] progress = tqdm.tqdm(total=len(data) - 1) plot = ax_p.imshow(data[0], cmap='hot', interpolation='nearest') # calc all grfs assignments = {"front left": 0, "front right": 1, "rear left": 2, "rear right": 3} for f in range(4): name = {v: k for k, v in assignments.items()}[f] ax_grf.plot(self.grfs[:n_frames, f], label=name, color=paw_colours[name]) ax_grf.legend() vline = ax_grf.axvline(0, ls="--") for paw in self.paws: paw.set_frame(0, ax_p) def anim(i): ax_p.set_title(f"Frame = {i}") vline.set_data([i, i], [0, 1]) # Currently, i=0 runs twice for some weird reason. For now, just skip frame
packVer.version: continue else: raise Exception("Package " + packVer.name + " already in " + filename + " but with a different version, present: " + packsInFile[packVer.name] + ", adding: " + packVer.version) #if njh project, add the flags of it's dependencies if pack.njhProject_: cmd = "./setup.py --compfile compfile.mk --numCores 1 --append --outMakefile {makefileCommon}".format(makefileCommon = os.path.abspath(filename)) buildSubDir = pack.getBuildSubDir(packVer.version) Utils.run_in_dir(cmd, buildSubDir) pvIncFlags = self.getIncludeFlags(packVer) if "" != pvIncFlags: f.write("#" + packVer.name + ":" + packVer.version + " CXXFLAGS\n") f.write("COMLIBS += " + pvIncFlags + "\n") pvLdFlags = self.getLdFlags(packVer) if "" != pvLdFlags: f.write("#" + packVer.name + ":" + packVer.version + " LDFLAGS\n") f.write("LD_FLAGS += " + pvLdFlags + "\n") f.write("\n") f.flush() else: with open(filename, "a") as f: f.write("#Utils\n") f.write("# from http://stackoverflow.com/a/18258352\n") f.write("rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))\n") f.write("\n") f.write("#Default CXXFLAGS\n") f.write("COMLIBS += " + self.getDefaultIncludeFlags() + "\n") dLdFlags = self.getDefaultLDFlags() if "" != dLdFlags: f.write("#Default LDFLAGS\n") f.write("LD_FLAGS += " + dLdFlags + "\n") f.write("\n") f.flush() for packVer in packVers: pack = self.package(packVer.name) #if njh project, add the flags of it's dependencies if pack.njhProject_: cmd = "./setup.py --compfile compfile.mk --numCores 1 --append --outMakefile {makefileCommon}".format(makefileCommon = os.path.abspath(filename)) buildSubDir = pack.getBuildSubDir(packVer.version) Utils.run_in_dir(cmd, buildSubDir) pvIncFlags = self.getIncludeFlags(packVer) if "" != pvIncFlags: f.write("#" + packVer.name + ":" + packVer.version + " CXXFLAGS\n") f.write("COMLIBS += " + pvIncFlags + "\n") pvLdFlags = self.getLdFlags(packVer) if "" != pvLdFlags: f.write("#" + packVer.name + ":" + packVer.version + " LDFLAGS\n") f.write("LD_FLAGS += " + pvLdFlags + "\n") f.write("\n") f.flush() def addPackage(self, packVers, packVer): if hasattr(packVer, "commit"): packVer = LibNameVerCommit(packVer.name, packVer.version.replace("/", "__"), packVer.commit) else: packVer = LibNameVer(packVer.name, packVer.version.replace("/", "__")) if self.checkForPackVer(packVer): pack = self.package(packVer.name) for dep in pack.versions_[packVer.version].depends_: self.setUpPackagesNeeded([str(dep.name).lower()]) if hasattr(dep, "commit"): self.addPackage(packVers, LibNameVerCommit(str(dep.name).lower(), dep.version, dep.commit)) else: self.addPackage(packVers, LibNameVer(str(dep.name).lower(), dep.version)) found = False for otherPackVer in packVers: if otherPackVer.name == packVer.name: if otherPackVer.version != packVer.version: raise Exception("Version conflict for " + packVer.name + " already have " + otherPackVer.version + " and adding: " + packVer.version) elif hasattr(otherPackVer, "commit") and hasattr(packVer, "commit") and otherPackVer.commit != packVer.commit: raise Exception("Commit conflict for " + str(packVer.name) + " version: " + str(packVer.version) + " already have " + str(otherPackVer.commit) + " and adding: " + str(packVer.commit)) else: found = True if not found: packVers.append(packVer) @staticmethod def getPackagesInMakefileCommon(makefileFnp): packagesAlready = {} with open(makefileFnp, "r") as makefile: for line in makefile: if ':' in line and line.startswith("#") and "CXXFLAGS" in line: toks = line[1:].split() firstToks = toks[0].split(":") packagesAlready[firstToks[0]] = firstToks[1] return packagesAlready def isInstalled(self, packVer): if os.path.exists(os.path.join(self.dirMaster_.install_dir, joinNameVer(packVer))): return True else: return False def getDefaultIncludeFlags(self): return "-I./src/" def getDefaultLDFlags(self): ret = "" if Utils.isMac(): #for dylib path fixing in macs, this gets rid of the name_size limit, which why the hell is there a name size limit ret = ret + "-headerpad_max_install_names" return ret def __njhProjectBuildCmdOld(self): cmd = """ ./configure.py -CC {CC} -CXX {CXX} -externalLibDir {external} -prefix {localTop} && ./setup.py --compfile compfile.mk --numCores {num_cores} && make -j {num_cores} && make install""" cmd = " ".join(cmd.split()) return cmd def __njhProjectBuildCmd(self): cmd = """ ./configure.py -CC {CC} -CXX {CXX} -externalLibDir {external} -prefix $(dirname {local_dir}) """ if self.args.noInternet: cmd = cmd + """&& ./setup.py --compfile compfile.mk --numCores {num_cores} --outMakefile makefile-common.mk --overWrite --noInternet """ else: cmd = cmd + """&& ./setup.py --compfile compfile.mk --numCores {num_cores} --outMakefile makefile-common.mk --overWrite """ cmd = cmd + """&& make clean && make -j {num_cores} && make install""" cmd = " ".join(cmd.split()) return cmd class Setup: def __init__(self, args): self.extDirLoc = "" # the location where the libraries will be installed #if no compile file set up and assume external is next to setup.py if not args.compfile: self.extDirLoc = "external" #self.extDirLoc = os.path.abspath(os.path.join(os.path.dirname(__file__), "external")) else: self.extDirLoc = os.path.abspath(self.parseForExtPath(args.compfile[0])) self.dirMaster_ = LibDirMaster(self.extDirLoc) self.args = args # command line arguments parsed by argument parser self.setUps = {} # all available set ups self.setUpsNeeded = [] # the setups that need to be done self.foundSetUpsNeeded = [] #the setups given by either parsing the comp file or command line, to be processed/check to be put into self.setUpsNeeded self.installed = [] # the setups that able to install self.failedInstall = [] # the setups that failed self.CC = "" # the c compilier being used self.CXX = "" # the c++ compilier being used self.noInternet_ = False if args.noInternet: self.noInternet_ = True self.__initSetUpFuncs() self.__processArgsForCompilers() self.__processArgsForSetupsNeeded() #add packages but with only the setups needed found packNames = [foundSetup.name for foundSetup in self.foundSetUpsNeeded] self.setupPackages(packNames) #then add setups needed found to be parsed/checked by packages for setupFound in self.foundSetUpsNeeded: self.packages_.addPackage(self.setUpsNeeded, setupFound) def setupPackages(self, packNames=[]): #if we have internet and the cache is more than a day old, clear it if Utils.connectedInternet: cacheDate = datetime.datetime.fromtimestamp(os.path.getmtime(self.dirMaster_.cache_dir)) now = datetime.datetime.now() if 86400 < (now - cacheDate).total_seconds(): self.clearCache() if self.args.clearCache: self.clearCache() self.packages_ = Packages(self.extDirLoc, self.args, packNames) # path object to hold the paths for install def getAllAvailablePackages(self): return list(self.setUps.keys()) def setup(self): if self.args.forceUpdate: for setUpNeeded in self.setUpsNeeded: if not setUpNeeded.name in list(self.setUps.keys()): print(CT.boldBlack( "Unrecognized option ") + CT.boldRed(setUpNeeded.name)) else: self.rmDirsForLib(setUpNeeded) for setUpNeeded in self.setUpsNeeded: if not setUpNeeded.name in list(self.setUps.keys()): print(CT.boldBlack( "Unrecognized option ") + CT.boldRed(setUpNeeded.name)) else: if hasattr(setUpNeeded, "commit"): self.__setup(setUpNeeded.name, setUpNeeded.version, setUpNeeded.commit) else: self.__setup(setUpNeeded.name, setUpNeeded.version) for p in self.installed: if hasattr(p, "commit"): print(p.name + ":" + str(p.version)+ ":" + str(p.commit), CT.boldGreen("installed")) else: print(p.name + ":" + str(p.version), CT.boldGreen("installed")) for p in self.failedInstall: if hasattr(p, "commit"): print(p.name + ":" + str(p.version)+ ":" + str(p.commit), CT.boldRed("failed to install")) else: print(p.name + ":" + str(p.version), CT.boldRed("failed to install")) def __initSetUpFuncs(self): self.setUps = {"zi_lib": self.zi_lib, "boost": self.boost, "boost_filesystem": self.boost_filesystem, "boost_math": self.boost_math, "cppitertools": self.cppitertools, "catch": self.catch, "cppprogutils": self.cppprogutils, "r": self.r, "bamtools": self.bamtools, "cppcms": self.cppcms, "armadillo": self.armadillo, "libpca": self.libpca, "njhseq": self.njhseq, "seekdeep": self.SeekDeep, "njhcpp": self.njhcpp, "seqserver": self.seqserver, "njhrinside": self.njhRInside, "jsoncpp": self.jsoncpp, "pstreams": self.pstreams, "dlib": self.dlib, "libsvm": self.libsvm, "mongoc": self.mongoc, "mongocxx": self.mongocxx, "twobit" : self.twobit, "sharedmutex" : self.sharedMutex, "mathgl": self.mathgl, "magic": self.magic, "zlib": self.zlib, "zlib-ng": self.zlibng, "openblas": self.openblas, "flash": self.flash, "pigz": self.pigz, "bowtie2": self.bowtie2, "muscle": self.muscle, "adapterremoval": self.adapterremoval, "lastz": self.lastz, "samtools": self.samtools, "bcftools": self.bcftools, "hts": self.hts, "restbed": self.restbed, "unqlite": self.unqlite, "eigen": self.eigen, "glpk": self.glpk, "cmake": self.cmake, "curl": self.curl, "bhtsne": self.bhtsne, "lapack": self.lapack, "atlas": self.atlas, "mipwrangler": self.mipwrangler, "elucidator": self.elucidator, "pathweaver": self.pathweaver } if self.args.private: self.setUps["elucidatorlab"] = self.elucidatorlab; ''' "mlpack": self.mlpack, "liblinear": self.liblinear, ''' def printAvailableSetUps(self): self.__initSetUpFuncs() installs = sorted(self.getAllAvailablePackages()) self.setupPackages(installs) print("Available installs:") print("To Install use ./setup.py --libs lib1:ver,lib2:ver,lib3:ver") print("E.g. ./setup.py --libs bamtools:v2.4.0,boost:1_60_0") for installAvail in installs: print(installAvail) pack = self.__package(installAvail) sys.stdout.write("\t") sys.stdout.write(",".join([p.replace("__", "/") for p in pack.getVersions()])) sys.stdout.write("\n") def printGitRefs(self): self.__initSetUpFuncs() print("Git branches and tags:") for setUpNeeded in self.setUpsNeeded: print(setUpNeeded.name) pack = self.__package(setUpNeeded.name) refs = pack.getGitRefs(pack.versions_[pack.defaultVersion_].bPaths_.url) print("\t" + "Branches") for b in refs.branches: print("\t\t" + b) print("\t" + "Tags") for t in refs.tags: print("\t\t" + t) def __processArgsForSetupsNeeded(self): if self.args.libs: inLibs = self.args.libs.split(",") for lib in inLibs: if ":" not in lib.lower(): raise Exception("Need to give version for " + lib) else: libSplit = lib.split(":") if 3 == len(libSplit): self.foundSetUpsNeeded.append(LibNameVerCommit(libSplit[0].lower(), libSplit[1], libSplit[2])); else: self.foundSetUpsNeeded.append(LibNameVer(libSplit[0].lower(), libSplit[1])); #self.packages_.addPackage(self.setUpsNeeded,LibNameVer(libSplit[0].lower(), libSplit[1])) if self.args.compfile: self.parseSetUpNeeded(self.args.compfile[0]) self.__initSetUpFuncs() #check to see if package is available for foundSetup in self.foundSetUpsNeeded: if foundSetup.name not in self.setUps: raise Exception("Error " + foundSetup.name + " not available, options are: " + ",".join(self.getAllAvailablePackages())) def __processArgsForCompilers(self): if self.args.compfile: self.parserForCompilers(self.args.compfile[0]) # if no compfile need to determine compiler, will default to env CC and CXX else: self.CC = genHelper.determineCC(self.args) self.CXX = genHelper.determineCXX(self.args) self.args.CC = self.CC self.args.CXX = self.CXX if "clang" in self.CXX: self.args.clang = True else: self.args.clang = False def parseForExtPath(self, fn): args = self.parseCompFile(fn) if "EXT_PATH" in args: extPath = args["EXT_PATH"].strip() extPath = extPath.replace("$(realpath", "") extPath = extPath.replace(")", "") extPath = extPath.strip() else: print("did not find external folder location; assuming ./external") extPath = "./external" return extPath def parseSetUpNeeded(self, fn): args = self.parseCompFile(fn) for k,v in args.items(): if k.startswith("USE_"): if '0' != v: if "#" in v: valSplit = v.split("#") if valSplit[0] == '1': if ":" in valSplit[1]: valSplitFurther = valSplit[1].split(":") self.foundSetUpsNeeded.append(LibNameVerCommit(k[4:].lower(), valSplitFurther[0], valSplitFurther[1])); else: self.foundSetUpsNeeded.append(LibNameVer(k[4:].lower(), valSplit[1])); #self.packages_.addPackage(self.setUpsNeeded, LibNameVer(k[4:].lower(),valSplit[1])) else: raise Exception("Need to supply version in compfile with
############################################################################### # Imports import argparse # Argument parser import logging # DEBUG, INFO, WARNING, ERROR, CRITICAL import Utility import YawlToMetagraph import TriplesToMetagraph import PolicyAnalysisHelper from mgtoolkit.library import * from ortools.linear_solver import pywraplp import sympy as sp from sympy.logic.boolalg import to_cnf import os import time ############################################################################### # Argument parser def get_parser(): # Get parser for command line arguments parser = argparse.ArgumentParser(description="DESCRIPTION", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--version", action="version", version='%(prog)s 1.0') parser.add_argument("-v", "--verbose", action="count", default=0, help="increase output verbosity") parser.add_argument("-w", "--workflow", type=str, metavar="FILE", help="workflow to generate policy from") parser.add_argument("-y", "--yawl-mode", action="store_true", help="specification is in YAWL") return parser ############################################################################### # Functions # Load workflow from test configuration def load_workflow_from_test(): Utility.print_section("Creating test metagraph") variables_set = {"owner", "vfx", "hdr", "color", "sound"} propositions_set = {'Eq(method, POST)', 'time > 8', 'time < 17'} edges_set = [ Edge({"owner"}, {"vfx"}, attributes=['Eq(method, POST)']), Edge({"vfx"}, {"color", "sound"}, attributes=['Eq(method, POST)', 'time < 17', 'time > 8']), Edge({"color"}, {"hdr"}, attributes=['time > 8', 'time < 17']), Edge({"hdr"}, {"owner"}, attributes=['time > 8', 'time < 17']) ] workflow_metagraph = ConditionalMetagraph(variables_set, propositions_set) workflow_metagraph.add_edges_from(edges_set) return workflow_metagraph def find_all_reachable_elements(mg, B, covering=set(), edges_taken=[]): covering = covering.union(B) # Add B to the covering edges_not_taken = mg.edges # Initialize edges not taken valid_edges = mg.edges[0] # Initialize to any edge to trigger while loop while valid_edges: # Compute valid edges from edges not yet taken valid_edges = set() for edge in edges_not_taken: if covering.issuperset(edge.invertex): valid_edges.add(edge) # Add outvertex variables coming from valid edges # Add edges to edges_taken # Remove edges from edges_not_taken for edge in valid_edges: covering = covering.union(edge.outvertex) edges_taken.append(edge) edges_not_taken.remove(edge) return covering, edges_taken def find_all_unreachable_elements(mg, B, covering=set(), edges_taken=[]): reachable_variables, reachable_edges = find_all_reachable_elements(mg, B, covering, edges_taken) unreachable_variables = mg.variables_set - reachable_variables unreachable_edges = [] for edge in mg.edges: if edge not in reachable_edges: unreachable_edges.append(edge) if glob_verbose >= 2: print("Reachable variables: {}".format(reachable_variables)) print("Unreachable variables: {}".format(unreachable_variables)) print("Reachable edges: {}".format(reachable_edges)) print("Unreachable edges: {}".format(unreachable_edges)) return unreachable_variables, unreachable_edges def generate_output_file_name(workflow, yawl_mode): # Generate output file name Utility.print_section("Generating output file name") if "manually-generated" in workflow: output_sat_name = "generated-sat-from-mg/generated-from-manual/" + workflow.split('.')[0].split('/')[-1] + ".py" elif "randomly-generated" in workflow: path_chunks = workflow.split("/") if yawl_mode: generated_sat_dir_path = "generated-sat-from-mg/generated-from-random-yawl/" + path_chunks[-2] + "/" else: generated_sat_dir_path = "generated-sat-from-mg/generated-from-random/" + path_chunks[-2] + "/" if not os.path.exists(generated_sat_dir_path): os.makedirs(generated_sat_dir_path) print("SAT dir path: {}".format(generated_sat_dir_path)) output_sat_name = generated_sat_dir_path + path_chunks[-1].split('.')[0] + ".py" else: Utility.terminate_app(0) #TODO Handle error print("Output SAT file: {}\n".format(output_sat_name)) return output_sat_name def conversions(mg): # Make tuple list associating names of variables/edges in the metagraph to those in SAT Utility.print_section("Generating tuple list associating names of MG variables to SAT variables") variable_name_conversion = [] for idx, variable in enumerate(sorted(mg.variables_set)): if glob_verbose >= 2: print("Variable: {}".format(variable)) variable_name_conversion.append((variable, "x_{}".format(idx))) edge_name_conversion = [] for idx, edge in enumerate(mg.edges): if glob_verbose >= 2: print("Edge: {}".format(edge)) edge_name_conversion.append((edge, "e_{}".format(idx))) if glob_verbose >= 0: print("Variable name conversion: {}".format(variable_name_conversion)) print("Edge name conversion: {}".format(edge_name_conversion)) return variable_name_conversion, edge_name_conversion def basic_sat_structure(variable_name_conversion, edge_name_conversion): Utility.print_section("Generating basic SAT structure") sat = [] sat.append("from mgtoolkit.library import *\n") sat.append("from ortools.linear_solver import pywraplp\n\n") sat.append("def LinearProgramming():\n") sat.append(" # Instantiate a Glop solver.\n") sat.append(" solver = pywraplp.Solver('SolveSimpleSystem', pywraplp.Solver.SAT_INTEGER_PROGRAMMING)\n\n") # Add list of equivalence for variables and edges sat.append(" var_equivalence = {}\n".format(variable_name_conversion)) sat.append(" edge_equivalence = {}\n\n".format(edge_name_conversion)) timesteps = len(edge_name_conversion) # For each variable in the generating set, create a logic variable for each timestep sat.append(" # For each variable in the generating set, create a logic variable\n") for variable in variable_name_conversion: for step in range(0, timesteps): sat.append(" {}_{} = solver.IntVar(0, 1, '{}_{}')\n".format(variable[1], step, variable[1], step)) sat.append("") # For each edge in the edges set, create a logic variable for each timestep for edge in edge_name_conversion: for step in range(0, timesteps): sat.append(" {}_{} = solver.IntVar(0, 1, '{}_{}')\n".format(edge[1], step, edge[1], step)) sat.append("") sat.append(" print('Number of variables = {}'.format(solver.NumVariables()))\n\n") return sat def constraints_variables_unique_time(constraints, variable_name_conversion, edge_name_conversion): # Each edge and variable must be set to True only once in all time, or not used Utility.print_section("Generating constraints: Variables must only be unlocked once in time") timesteps = len(edge_name_conversion) # Each variable must be set to True only once for variable_idx, (variable, variable_name) in enumerate(variable_name_conversion): formula_list = [] # Each element corresponds to one variable being True at one time for i in range(0, timesteps): # index indicating which time to set the variable to True formula_part_list = [] # Each element is a variable, either True or not for step in range(0, timesteps): if i == step: # This is the variable to set to True formula_part_list.append("{}_{}".format(variable_name, step)) else: formula_part_list.append("~{}_{}".format(variable_name, step)) formula_part = " & ".join(formula_part_list) # formula unlocking one variable at a specific time formula_list.append("({})".format(formula_part)) # or not used formula_part_list = [] # Each element is a variable, either True or not for step in range(0, timesteps): formula_part_list.append("~{}_{}".format(variable_name, step)) formula_part = " & ".join(formula_part_list) # formula unlocking one variable at a specific time formula_list.append("({})".format(formula_part)) formula = " | ".join(formula_list) print(formula) constraints.append(to_cnf("{}".format(formula))) # Each edge must be set to True only once for edge_idx, (edge, edge_name) in enumerate(edge_name_conversion): formula_list = [] # Each element corresponds to one variable being True at one time for i in range(0, timesteps): # index indicating which time to set the variable to True formula_part_list = [] # Each element is a variable, either True or not for step in range(0, timesteps): if i == step: # This is the variable to set to True formula_part_list.append("{}_{}".format(edge_name, step)) else: formula_part_list.append("~{}_{}".format(edge_name, step)) formula_part = " & ".join(formula_part_list) # formula unlocking one variable at a specific time formula_list.append("({})".format(formula_part)) # or not used formula_part_list = [] # Each element is a variable, either True or not for step in range(0, timesteps): formula_part_list.append("~{}_{}".format(edge_name, step)) formula_part = " & ".join(formula_part_list) # formula unlocking one variable at a specific time formula_list.append("({})".format(formula_part)) formula = " | ".join(formula_list) print(formula) constraints.append(to_cnf("{}".format(formula))) return constraints def constraints_edge_unlocking(constraints, edge_name_conversion, source, target): Utility.print_section("Generating constraints: Edges unlock at least one useful variable") timesteps = len(edge_name_conversion) for ei_idx, (ei, ei_name) in enumerate(edge_name_conversion): for u in range(0, timesteps): # Condition 1: at least one variable in the outvertex is unlocked # "e_{i,u} => ~e_{j,0} & ~e_{j,1} & ... & ~e_{j,u-1}" formula_part_list = [] # Parts of the formula for unlocking at least one variable for var in ei.outvertex: if var not in source: for ej_idx, (ej, ej_name) in enumerate(edge_name_conversion): if ei != ej and var in ej.outvertex: formula_part_list_variable = [] # Parts of the formula for unlocking one variable for v in range(0, u): # Enforces v < u timesteps formula_part_list_variable.append("~{}_{}".format(ej_name, v)) if not formula_part_list_variable: formula_part_list_variable = ["True"] formula_part_variable = " & ".join(formula_part_list_variable) # formula for unlocking one variable formula_part_list.append("({})".format(formula_part_variable)) formula_part = " | ".join(formula_part_list) # formula for unlocking at least one variable if glob_verbose >= 2: print("Formulas for {}_{}: {} at timestep {}".format(ei_name, u, ei, u)) print("Formula part list: {}".format(formula_part_list)) print("Formula part: {}\n".format(formula_part)) if formula_part: # If no other edge has intersecting outvertex variables, there is no constraint from condition 1 if glob_verbose >= 1: print("({}_{}) >> ({})".format(ei_name, u, formula_part)) constraints.append(to_cnf("({}_{}) >> ({})".format(ei_name, u, formula_part))) # Full clause for unlocking at least one variable # Condition 2: at least one unlocked variable is either in T, or used at a later time by an edge formula_part_2 = "" for var in ei.outvertex: if var not in source: if var in target: # Condition 2.a # e_{i,u} => e_{i,u} unlocks var # Do nothing? This condition is already condition 1 pass else: # Conditions 2.b and 2.c formula_part_list_2b = [] # Parts for all edges that can use var for ej_idx, (ej, ej_name) in enumerate(edge_name_conversion): if ei != ej and var in ej.invertex: # Condition 2.b formula_part_list_2b_ej = [] # Parts (times) for edge e_j that can use var for v in range(u+1, timesteps): # Enforces v > u timesteps formula_part_list_2b_ej.append("{}_{}".format(ej_name, v)) formula_part_ej = " | ".join(formula_part_list_2b_ej) # All times when e_j can use var if formula_part_ej: formula_part_list_2b.append("({})".format(formula_part_ej)) # Condition 2.c for v in range(u+1, timesteps): formula_part_list_2c = [] # Parts for all edges that can unlock var in the meantime for ek_idx, (ek, ek_name) in enumerate(edge_name_conversion): if ei != ek and ej != ek and var in ek.outvertex: formula_part_list_2c_ek = [] # Parts (times) for edge e_k that can unlock var in the meantime for w
= np.asarray((topright[0], topright[1])) leftIntersectionPointA = np.asarray((leftIntersectionX, leftIntersectionY)) rightIntersectionPointA = np.asarray((rightIntersectionX,righttIntersectionY)) leftIntersectionPointB = np.asarray((leftIntersectionXB, leftIntersectionYB)) rightIntersectionPointB = np.asarray((rightIntersectionXB,righttIntersectionYB)) dist_01 = self._point_to_line_dist(centerPoint, [topLeftA,lowerRightA]) dist_02 = self._point_to_line_dist(centerPoint, [lowerLeftA,leftIntersectionPointA]) dist_03 = self._point_to_line_dist(centerPoint, [toprightA,rightIntersectionPointA]) dist_04 = self._point_to_line_dist(centerPoint, [lowerLeftA,toprightA]) dist_05 = self._point_to_line_dist(centerPoint, [topLeftA,leftIntersectionPointB]) dist_06 = self._point_to_line_dist(centerPoint, [lowerRightA,rightIntersectionPointB]) minDistance_option_1 = min(dist_01, dist_02, dist_03) minDistance_option_2 = min(dist_04, dist_05, dist_06) distances_option_1.append(minDistance_option_1) distances_option_2.append(minDistance_option_2) # initialise the result and set a paramenter that is linked to the size of the image res_option_1 = 0 res_option_2 = 0 parameter = self.gray.size / ((self.gray.shape[0]+self.gray.shape[1]) * 1.618) for distance in distances_option_1: res_option_1 += distance * (np.exp((-distance/parameter))) for distance in distances_option_2: res_option_2 += distance * (np.exp((-distance/parameter))) if len(distances_option_1) == 0: cv2.putText(imageDisplay, "Nan: 0 ", (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) self.ScoreFourTriangleAdapted = 0 return imageDisplay, 0 elif len(distances_option_2) == 0: cv2.putText(imageDisplay, "Nan: 0 ", (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) self.ScoreFourTriangleAdapted = 0 return imageDisplay, 0 else: score_option_1 = res_option_1 / sum(distances_option_1) score_option_2 = res_option_2 / sum(distances_option_2) ScoreFourTriangleAdapted = max(score_option_1,score_option_2) if distanceMethod == 'segment': cv2.putText(imageDisplay, "TriangSeg: {:.3f}".format(ScoreFourTriangleAdapted), (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) if segmentation == 'saliency': cv2.putText(imageDisplay, "RTS: {:.3f}".format(ScoreFourTriangleAdapted), (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) if segmentation == 'ORB': cv2.putText(imageDisplay, "RTorb: {:.3f}".format(ScoreFourTriangleAdapted), (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) self.ScoreFourTriangleAdapted = ScoreFourTriangleAdapted return imageDisplay, ScoreFourTriangleAdapted def bigTriangleCompositionAdapted(self, segmentation = 'inner', minArea = True, numberOfCnts = 100, areascalefactor = 2000, distanceMethod = 'segment'): # calculate the area of the template triangle based on the golden ratio h, w, s = self.image.shape baseLeftX = int((w - (w*0.618)) / 2) baseLeftY = int(h - ((h*0.618) / 2)) baseRightX = baseLeftX + int(w* 0.618) baseRightY = baseLeftY vertexX = int( w / 2) vertexY = h - baseLeftY centerImgX = int(self.image.shape[1]/2) centerImgY = int(self.image.shape[0]/2) listOfPotins = [[baseLeftX, baseLeftY], [vertexX, vertexY], [baseRightX, baseRightY]] ctr = np.array(listOfPotins).reshape((-1,1,2)).astype(np.int32) bigTriangeImg = self.image.copy() # fill the triangle for the mask blankForMasking = np.zeros(self.image.shape, dtype = "uint8") cv2.drawContours(blankForMasking, [ctr], -1, (255, 255, 255), 2) if segmentation == 'ORB' : blank, contours, keypoints = self._orbSegmentation ( maxKeypoints = 1000, edged = False, edgesdilateOpen = False, method = cv2.RETR_EXTERNAL) if segmentation == 'saliency': contours, SaliencyMask = self._saliencySegmentation(method = cv2.RETR_EXTERNAL ) # create the segmentations of the images using threshold if segmentation == 'thresh': contours, threshImg = self._thresholdSegmentation(method = cv2.RETR_LIST ) if segmentation == 'inner': segmentationOnInnerCnts, contours = self._innerCntsSegmentation(numberOfCnts = numberOfCnts, method = cv2.RETR_CCOMP, minArea = 2) # sort contours sorted_contours = sorted(contours, key = cv2.contourArea, reverse = True) # sorted and selected list of areas in contours if minArea is True if minArea: selected_contours = [] minArea = self.gray.size / areascalefactor for cnt in sorted_contours[0:numberOfCnts]: area = cv2.contourArea(cnt) if area > minArea: selected_contours.append(cnt) sorted_contours = sorted(selected_contours, key = cv2.contourArea, reverse = True) # select only the bigger contours contoursSelection = sorted_contours[0:numberOfCnts] # find the center of each contours and draw cnts, not using approx contours imageDisplay, listOfCenterPoints = self._findCentreOfMass(image = bigTriangeImg, contours = contoursSelection, approxCnt = False) # calculate the distance from the center points and the rule of third point(as in the paper) # min distance of each center to the 4 points distancePoints = [] if distanceMethod == 'point': for point in listOfCenterPoints: cX = point[0] cY = point[1] ManhattanDistanceNormalised_01 = abs(baseLeftX - cX) / self.image.shape[1] + abs(baseLeftY - cY) / self.image.shape[0] ManhattanDistanceNormalised_02 = abs(baseRightX - cX) / self.image.shape[1] + abs(baseRightY - cY) / self.image.shape[0] ManhattanDistanceNormalised_03 = abs(vertexX - cX) / self.image.shape[1] + abs(vertexY- cY) / self.image.shape[0] ManhattanDistanceNormalised_04 = abs(centerImgX - cX) / self.image.shape[1] + abs(centerImgY - cY) / self.image.shape[0] minDistance = min(ManhattanDistanceNormalised_01,ManhattanDistanceNormalised_02,ManhattanDistanceNormalised_03,ManhattanDistanceNormalised_04) distancePoints.append(minDistance) if distanceMethod == 'segment': for point in listOfCenterPoints: centerPoint = np.asarray(point) baseLeftPoint = np.asarray((baseLeftX, baseLeftY)) baseRigthPoint = np.asarray((baseRightX, baseRightY)) vertexPoint = np.asarray((vertexX, vertexY)) centerOfImgPoint = np.asarray((centerImgX,centerImgY)) dist_01 = self._point_to_line_dist(centerPoint, [baseLeftPoint,baseRigthPoint]) dist_02 = self._point_to_line_dist(centerPoint, [baseLeftPoint,vertexPoint]) dist_03 = self._point_to_line_dist(centerPoint, [baseRigthPoint,vertexPoint]) dist_04 = self._point_to_line_dist(centerPoint, [centerOfImgPoint,vertexPoint]) minDistance = min(dist_01, dist_02, dist_03, dist_04) distancePoints.append(minDistance) # initialise the result and set a paramenter that is linked to the size of the image res = 0 parameter = self.gray.size / ((self.gray.shape[0]+self.gray.shape[1]) * 1.618) if len(distancePoints) == 0: ScoreBigTriangle = 0 self.ScoreBigTriangle = ScoreBigTriangle return imageDisplay, ScoreBigTriangle else: for distance in distancePoints: res += distance * (np.exp((-distance/parameter))) ScoreBigTriangle = res / sum(distancePoints) # draw the guides rules and saliency on panel cv2.line(imageDisplay,(baseLeftX,baseLeftY), (baseRightX,baseRightY), (255,0,255), 1) cv2.line(imageDisplay,(baseLeftX,baseLeftY), (vertexX, vertexY), (255,0,255), 1) cv2.line(imageDisplay,(baseRightX,baseRightY), (vertexX, vertexY), (255,0,255), 1) cv2.line(imageDisplay,(centerImgX,centerImgY), (vertexX, vertexY), (255,0,255), 1) if distanceMethod == 'segment': cv2.putText(imageDisplay, "TriangSeg: {:.3f}".format(ScoreBigTriangle), (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) if segmentation == 'saliency': cv2.putText(imageDisplay, "RTS: {:.3f}".format(ScoreBigTriangle), (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) if segmentation == 'ORB': cv2.putText(imageDisplay, "RTorb: {:.3f}".format(ScoreBigTriangle), (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) self.ScoreBigTriangle = ScoreBigTriangle return imageDisplay, ScoreBigTriangle def bigTriangleComposition (self, segmentation = 'ORB'): # calculate the area of the template triangle based on the golden ratio h, w, s = self.image.shape baseLeftX = int((w - (w*0.618)) / 2) baseLeftY = int(h - ((h*0.618) / 2)) baseRightX = baseLeftX + int(w* 0.618) baseRightY = baseLeftY vertexX = int( w / 2) vertexY = h - baseLeftY listOfPotins = [[baseLeftX, baseLeftY], [vertexX, vertexY], [baseRightX, baseRightY]] ctr = np.array(listOfPotins).reshape((-1,1,2)).astype(np.int32) # fill the triangle for the mask blankForMasking = np.zeros(self.image.shape, dtype = "uint8") cv2.drawContours(blankForMasking, [ctr], -1, (255, 255, 255), 2) # dilate the mask to capture more relevant pixels kernel = np.ones((5,5),np.uint8) blankForMasking = cv2.dilate(blankForMasking,kernel,iterations = 2) # flip to make the triangle with the base to the lower base #blankForMasking = cv2.flip(blankForMasking, -1) # segmentation using if segmentation == 'ORB' : ImgImpRegion, contours, keypoints = self._orbSegmentation ( maxKeypoints = 1000, edged = True, edgesdilateOpen = True, method = cv2.RETR_EXTERNAL) if segmentation == 'saliency': contours, ImgImpRegion = self._saliencySegmentation(method = cv2.RETR_EXTERNAL ) ImgImpRegion = cv2.cvtColor(ImgImpRegion, cv2.COLOR_GRAY2BGR) # create the segmentations of the images using threshold if segmentation == 'thresh': contours, ImgImpRegion = self._thresholdSegmentation(method = cv2.RETR_LIST ) if segmentation == 'both': ImgImpRegionA, contours, keypoints = self._orbSegmentation ( maxKeypoints = 1000, edged = True, edgesdilateOpen = True, method = cv2.RETR_EXTERNAL) contours, ImgImpRegionB = self._saliencySegmentation(method = cv2.RETR_EXTERNAL ) ImgImpRegionB = cv2.cvtColor(ImgImpRegionB, cv2.COLOR_GRAY2BGR) # create the both mask ImgImpRegion = cv2.bitwise_or(ImgImpRegionA,ImgImpRegionB) # count the total number of segmentation pixel bigger than 0 maskedImage = cv2.bitwise_and(ImgImpRegion, blankForMasking) sumOfrelevantPixels = (maskedImage > 0).sum() totalRelevantPixels = (ImgImpRegion > 0).sum() # ratio of the number counted in and out of the triangle bigTriangleCompScore = sumOfrelevantPixels/totalRelevantPixels # draw the image for display cv2.drawContours(ImgImpRegion, [ctr], -1, (255, 0, 0), 2) cv2.putText(ImgImpRegion, "TriComp: {:.3f}".format(bigTriangleCompScore), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) self.bigTriangleCompScore = bigTriangleCompScore return ImgImpRegion, bigTriangleCompScore def fourTriangleDistance (self, segmentation = 'ORB', edged = True, edgesdilateOpen = True, method = cv2.RETR_EXTERNAL): # draw the guidelines of the images guideLinesA = self._fourTriangleGuidelines(flip = False) guideLinesB = self._fourTriangleGuidelines(flip = True) # dilate theguidelines kernel = np.ones((5,5),np.uint8) maskA = cv2.dilate(guideLinesA,kernel,iterations = 3) maskB = cv2.dilate(guideLinesB,kernel,iterations = 3) # segmentation using ORB or Saliency or Thresh or Edges expanded if segmentation == 'ORB' : ImgImpRegion, contours, keypoints = self._orbSegmentation ( maxKeypoints = 1000, edged = edged, edgesdilateOpen = edgesdilateOpen, method = method) if segmentation == 'saliency': contours, ImgImpRegion = self._saliencySegmentation(method = cv2.RETR_EXTERNAL ) ImgImpRegion = cv2.cvtColor(ImgImpRegion, cv2.COLOR_GRAY2BGR) # create the segmentations of the images using threshold if segmentation == 'thresh': contours, ImgImpRegion = self._thresholdSegmentation(imageToTresh = None, method = cv2.RETR_LIST ) if segmentation == 'both': ImgImpRegionA, contours, keypoints = self._orbSegmentation ( maxKeypoints = 1000, edged = edged, edgesdilateOpen = edgesdilateOpen, method = method) contours, ImgImpRegionB = self._saliencySegmentation(method = cv2.RETR_EXTERNAL ) ImgImpRegionB = cv2.cvtColor(ImgImpRegionB, cv2.COLOR_GRAY2BGR) # create the both
to what we do in '_setup_event_channel' using 'self._topic_root_policy', and then # calling 'self._sns.set_topic_attributes' permission_label: str = self._generate_cw_alarm_to_sns_publish_permissin_label( self.account_id, self.region, internal_alarm_name ) try: exponential_retry(self._sns.remove_permission, {"InternalErrorException"}, TopicArn=self._topic_arn, Label=permission_label) except self._sns.exceptions.NotFoundException: pass exponential_retry( self._sns.add_permission, {"InternalErrorException"}, TopicArn=self._topic_arn, Label=permission_label, AWSAccountId=[self.account_id], # this is redundant but kept here until the policy based update (above TODO is handled) ActionName=["Publish"], ) # create metric alarms first (composite alarm fails on validation if it depends on any of these new metric alarms). for int_signal in new_alarms: if int_signal.resource_access_spec.source == SignalSourceType.INTERNAL_ALARM: self._create_or_update_internal_alarm(int_signal) for int_signal in new_alarms: if int_signal.resource_access_spec.source == SignalSourceType.INTERNAL_COMPOSITE_ALARM: self._create_or_update_internal_alarm(int_signal) def _create_or_update_internal_alarm(self, int_alarm_signal: Signal) -> None: # Now create/update the Alarm! if isinstance(int_alarm_signal.resource_access_spec, InternalAlarmSignalSourceAccessSpec): self._create_or_update_internal_metric_alarm(int_alarm_signal) elif isinstance(int_alarm_signal.resource_access_spec, InternalCompositeAlarmSignalSourceAccessSpec): self._create_or_update_internal_composite_alarm(int_alarm_signal) else: raise ValueError( f"Internal alarm {int_alarm_signal.alias!r} is not supported by driver " f"{self.__class__.__name__!r} due to its unrecognized access spec " f"{int_alarm_signal.resource_access_spec.__class__.__name__!r}" ) def _create_or_update_internal_metric_alarm(self, int_alarm_signal: Signal) -> None: """Refer https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudwatch.html#CloudWatch.Client.put_metric_alarm Please note that at this point alarm must have passed all of the validations (generic validations in front end, core and hook_internal_signal within this driver). """ internal_alarm_spec: InternalAlarmSignalSourceAccessSpec = int_alarm_signal.resource_access_spec alarm_params = internal_alarm_spec.alarm_params target_dict = {} if isinstance(alarm_params.target_metric_or_expression, MetricExpression): metrics_array = [] id: str = alarm_params.target_metric_or_expression.alias if id is None: id = MetricExpression.DEFAULT_ALIAS metrics_array.append({"Id": id, "Expression": alarm_params.target_metric_or_expression, "ReturnData": True}) for metric_signal in alarm_params.metric_signals: metric_stat_dict = {"Id": metric_signal.alias, "MetricStat": {}, "ReturnData": False} metric_access_spec = cast("MetricSignalSourceAccessSpec", metric_signal.resource_access_spec) metric_stat: MetricStatType = metric_access_spec.create_stats_from_filter(metric_signal.domain_spec.dimension_filter_spec)[ 0 ] metric_stat_dict["MetricStat"].update(metric_stat) metrics_array.append(metric_stat_dict) for expression in alarm_params.metric_expressions: metrics_array.append({"Id": id, "Expression": expression, "ReturnData": False}) target_dict.update({"Metrics": metrics_array}) else: # Signal (target is single metric) metric_signal = alarm_params.target_metric_or_expression metric_access_spec = cast("MetricSignalSourceAccessSpec", metric_signal.resource_access_spec) metric_stat: MetricStatType = metric_access_spec.create_stats_from_filter(metric_signal.domain_spec.dimension_filter_spec)[0] target_dict.update( { "MetricName": metric_stat["Metric"]["MetricName"], "Namespace": metric_stat["Metric"]["Namespace"], "Dimensions": metric_stat["Metric"]["Dimensions"], "Statistic": metric_stat["Stat"], # "ExtendedStatistic"= , "Period": metric_stat["Period"], # RheocerOS does not use Unit ! "Unit": "None", } ) exponential_retry( self._cw.put_metric_alarm, [], AlarmName=self.get_unique_internal_alarm_name(internal_alarm_spec.alarm_id), AlarmDescription=internal_alarm_spec.alarm_params.description if internal_alarm_spec.alarm_params.description else "", ActionsEnabled=True, # forward all of the actions to Diagnostics HUB / event-channel + default actions. OKActions=[self._topic_arn] + [action.uri(alarm_params) for action in alarm_params.default_actions.OK_ACTIONS], AlarmActions=[self._topic_arn] + [action.uri(alarm_params) for action in alarm_params.default_actions.ALARM_ACTIONS], InsufficientDataActions=[self._topic_arn] + [action.uri(alarm_params) for action in alarm_params.default_actions.INSUFFICIENT_DATA_ACTIONS], EvaluationPeriods=alarm_params.number_of_evaluation_periods, DatapointsToAlarm=alarm_params.number_of_datapoint_periods, Threshold=alarm_params.threshold, ComparisonOperator=alarm_params.comparison_operator.value, TreatMissingData=alarm_params.treat_missing_data.value, # EvaluateLowSampleCountPercentile='string', # TODO Pass this if alarm is new # Tags=[ # { # 'Key': 'IntelliFlow_Context_UUID', # 'Value': internal_alarm_spec.get_owner_context_uuid() # } # ], **target_dict, ) def _render_cw_alarm_rule(self, alarm_rule: Union[AlarmRule, "Signal"], nested: bool = False) -> str: """Recursively render the rule into AWS CloudWatch composite alarm alarm rule string.""" cw_alarm_rule_part = "" if isinstance(alarm_rule, AlarmRule): if alarm_rule.inverted: cw_alarm_rule_part += "NOT " if nested: cw_alarm_rule_part += "(" # LHS cw_alarm_rule_part += self._render_cw_alarm_rule(alarm_rule.lhs, nested=True) # OPERATOR if alarm_rule.operator == AlarmRuleOperator.AND: cw_alarm_rule_part += " AND " elif alarm_rule.operator == AlarmRuleOperator.OR: cw_alarm_rule_part += " OR " else: raise ValueError(f"Operator {alarm_rule.operator} is not supported for AWS CW Alarm Rule rendering!") # RHS cw_alarm_rule_part += self._render_cw_alarm_rule(alarm_rule.rhs, nested=True) if nested: cw_alarm_rule_part += ")" # close inversion paranthesis else: # Signal rule_alarm_signal: Signal = alarm_rule if rule_alarm_signal.is_inverted: cw_alarm_rule_part += "NOT " alarm_state_dimension: DimensionVariant = rule_alarm_signal.domain_spec.dimension_filter_spec.find_dimension_by_name( AlarmDimension.STATE_TRANSITION ) # default to alarm if not specialized (materialized) current_alarm_state: str = alarm_state_dimension.value if alarm_state_dimension.is_material_value() else AlarmState.ALARM.value cw_alarm_rule_part += current_alarm_state cw_alarm_rule_part += "(" # we use ALARM ARN if rule_alarm_signal.resource_access_spec.source in [ SignalSourceType.INTERNAL_ALARM, SignalSourceType.INTERNAL_COMPOSITE_ALARM, ]: # internal alarm # use this platform's own AWS region, account id and mapped, unique alarm ID cw_alarm_rule_part += f"arn:aws:cloudwatch:{self.region}:{self.account_id}:alarm:{self.get_unique_internal_alarm_name(rule_alarm_signal.resource_access_spec.alarm_id)}" elif rule_alarm_signal.resource_access_spec.source in [SignalSourceType.CW_ALARM, SignalSourceType.CW_COMPOSITE_ALARM]: # TODO actually CW does not allow alarms from other accounts or regions to be bound to composite # alarms. So we can proactively add a validation to make the API error more human readable. # external alarm account_id = rule_alarm_signal.resource_access_spec.account_id region_id = rule_alarm_signal.resource_access_spec.region_id # use the name "as is" for externals cw_alarm_rule_part += f"arn:aws:cloudwatch:{region_id}:{account_id}:alarm:{rule_alarm_signal.resource_access_spec.name}" else: raise ValueError( f"Input signal (alias: {rule_alarm_signal.alias!r}, " f"source : {rule_alarm_signal.resource_access_spec.source.value!r}) is not " f"supported as a composite alarm input! Only alarms must be used as an input." ) cw_alarm_rule_part += ")" return cw_alarm_rule_part def _create_or_update_internal_composite_alarm(self, int_alarm_signal: Signal) -> None: """Refer https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudwatch.html#CloudWatch.Client.put_composite_alarm """ internal_alarm_spec: InternalCompositeAlarmSignalSourceAccessSpec = int_alarm_signal.resource_access_spec alarm_params: CompositeAlarmParams = internal_alarm_spec.alarm_params exponential_retry( put_composite_alarm, {}, self._cw, ActionsEnabled=True, AlarmName=self.get_unique_internal_alarm_name(internal_alarm_spec.alarm_id), AlarmDescription=alarm_params.description if alarm_params.description else "", AlarmRule=self._render_cw_alarm_rule(alarm_params.alarm_rule), # forward all of the actions to Diagnostics HUB / event-channel + default actions. OKActions=[self._topic_arn] + [action.uri(alarm_params) for action in alarm_params.default_actions.OK_ACTIONS], AlarmActions=[self._topic_arn] + [action.uri(alarm_params) for action in alarm_params.default_actions.ALARM_ACTIONS], InsufficientDataActions=[self._topic_arn] + [action.uri(alarm_params) for action in alarm_params.default_actions.INSUFFICIENT_DATA_ACTIONS] # TODO Pass this if alarm is new # , Tags=[ # { # 'Key': 'IntelliFlow_Context_UUID', # 'Value': internal_alarm_spec.get_owner_context_uuid() # } # ] ) # overrides def _process_construct_connections( self, new_construct_conns: Set["_PendingConnRequest"], current_construct_conns: Set["_PendingConnRequest"] ) -> None: """Got dev-time connection request from another Construct. That construct needs modifications in the resources that this construct encapsulates. Ex: from Lambda, to set up event notification from internal data S3 bucket. """ lambda_arns: Set[str] = set() queue_arns: Set[str] = set() topic_arns: Set[str] = set() for conn in new_construct_conns: if conn.resource_type == "lambda": lambda_arns.add(conn.resource_path) elif conn.resource_type == "sns": topic_arns.add(conn.resource_path) elif conn.resource_type == "sqs": queue_arns.add(conn.resource_path) else: err: str = f"{self.__class__.__name__} driver cannot process the connection request: {conn!r}" module_logger.error(err) raise ValueError(err) # s3 notification setup requires target resources to be alive and with right permissions. # so let's setup client permissions first. lambda_client = self._session.client(service_name="lambda", region_name=self._region) for lambda_arn in lambda_arns: statement_id: str = self.__class__.__name__ + "_lambda_Invoke_Permission" # consider switching to following way of generating the statement id. this is not critical since within # same application/platform only this impl can retain this statement_id (upstream ones use the specific one) # generate_statement_id(f"{self.__class__.__name__}_{self._dev_platform.context_id}_internal_invoke") try: exponential_retry( remove_permission, {"InvalidParameterValueException", "ServiceException", "TooManyRequestsException"}, lambda_client, lambda_arn, statement_id, ) except ClientError as error: if error.response["Error"]["Code"] not in ["ResourceNotFoundException"]: raise # give permission to SNS to call this lambda resource owned by our platform. exponential_retry( add_permission, {"InvalidParameterValueException", "ServiceException", "TooManyRequestsException"}, lambda_client, lambda_arn, statement_id, "lambda:InvokeFunction", "sns.amazonaws.com", self._topic_arn, ) # WARNING: DO NOT use account_id for any other trigger than S3. # self._account_id) exponential_retry( self._sns.subscribe, {"InternalErrorException"}, TopicArn=self._topic_arn, Protocol="lambda", Endpoint=lambda_arn ) # TODO/FUTURE go over queue_arns and topic_arns def subscribe_downstream_resource(self, downstream_platform: "DevelopmentPlatform", resource_type: str, resource_path: str): if resource_type == "lambda": downstream_session = downstream_platform.conf.get_param(AWSCommonParams.BOTO_SESSION) downstream_region = downstream_platform.conf.get_param(AWSCommonParams.REGION) # alternatively the session can be retrieved as below: # current platform (self._dev_platform) should be alive as an upstream platform within # the dev context of the caller downstream platform # downstream_session = self._dev_platform.conf.get_param(AWSCommonParams.HOST_BOTO_SESSION) lambda_client = downstream_session.client( service_name="lambda", # have to use downstream's own region (cannot use self._region) region_name=downstream_region, ) lambda_arn = resource_path statement_id: str = generate_statement_id(f"{self.__class__.__name__}_{self._dev_platform.context_id}_upstream_invoke") try: exponential_retry( remove_permission, {"InvalidParameterValueException", "ServiceException", "TooManyRequestsException"}, lambda_client, lambda_arn, statement_id, ) except ClientError as error: if error.response["Error"]["Code"] not in ["ResourceNotFoundException"]: raise # give permission to SNS to call this lambda resource owned by our platform. exponential_retry( add_permission, {"InvalidParameterValueException", "ServiceException", "TooManyRequestsException"}, lambda_client, lambda_arn, statement_id, "lambda:InvokeFunction", "sns.amazonaws.com", self._topic_arn, ) # Setup against the downstream resource (Lambda) is complete. # Now start configuring upstream event-channel (SNS). downstream_account_id = downstream_platform.conf.get_param(AWSCommonParams.ACCOUNT_ID) if self._account_id != downstream_account_id: # 1 - check permission to downstream resource permission_label: str = generate_statement_id( f"{self.__class__.__name__}_{downstream_platform.context_id}_downstream_invoke" ) # TODO replace the following (acc based) permission control code with a more granular policy based version. # similar to what we do in '_setup_event_channel' using 'self._topic_root_policy', and then # calling 'self._sns.set_topic_attributes' try: exponential_retry( self._sns.remove_permission, {"InternalErrorException"}, TopicArn=self._topic_arn, Label=permission_label ) except self._sns.exceptions.NotFoundException: pass exponential_retry( self._sns.add_permission, {"InternalErrorException"}, TopicArn=self._topic_arn, Label=permission_label, AWSAccountId=[downstream_account_id], ActionName=["Receive", "Subscribe"], ) # and finally the actual subscribe call can now be made from the downstream session. # if upstream makes this call on behalf of downstream, then downstream lambda should # catch SNS notification with a 'token' and then call 'confirm_subscription' (we avoid this). downstream_sns = downstream_session.client( service_name="sns", # here we have to use self._region again since we just want to # hit this SNS from downstream acc only. region_name=self._region, ) exponential_retry( downstream_sns.subscribe, {"InternalErrorException"}, TopicArn=self._topic_arn, Protocol="lambda", Endpoint=lambda_arn ) def unsubscribe_downstream_resource(self, downstream_platform: "DevelopmentPlatform", resource_type: str, resource_path: str): """Remove the resource owned by a downstream platform from event-channel gracefully, following a reverse order as compared to 'subscribe_downstream_resource'. """ if resource_type == "lambda": downstream_session = downstream_platform.conf.get_param(AWSCommonParams.BOTO_SESSION) downstream_region = downstream_platform.conf.get_param(AWSCommonParams.REGION) # alternatively the session can be retrieved as below: # current platform (self._dev_platform) should be alive as an
<filename>python-obj-system.py #!/usr/bin/env python3 from mdpyformat import * import pprintex header_md("""Python object primer for Python3 / meta classes""" ) header_md("""Introduction""", nesting = 2) print_md(""" Python is good at creating the illusion of being a simple programming language. Sometimes this illusion fails, like when you have to deal with the import/module system [my attempts to get it](https://github.com/MoserMichael/pythonimportplayground). Another area of complexity is the object system, last week I tried to understand [python enums](https://docs.python.org/3/library/enum.html), it turns that they are built on top of [meta classes](https://github.com/python/cpython/blob/2c56c97f015a7ea81719615ddcf3c745fba5b4f3/Lib/enum.py#L511), So now I have come to realize, that I really don't know much about python and its object system. The purpose of this text is to figure out, how the python object system ticks. """) header_md("""The Python object system""", nesting=2) header_md("""How objects are represented""", nesting=3) print_md(""" Lets look at a simple python class Foo with a single base class Base, and see how objects are created and represented in memory """) eval_and_quote(""" # The base class. All Python3 classes have the base class of type object. # The long form is therefore # class Base(object): # However Pylint will tell you, that this long form is redundant class Base: # Class variables are shared between all instances of the class Base, and declared like this: base_class_var = "Base" # The object constructor/init method, Note the first 'self' argument, which refers to the object instance. def __init__(self): print("calling Base.__init__") # Object variables are specific to a given instance of Base # Each object has a builtin hash member: __dict__ this one lists all object members (including those added by the base class __init__ method) self.obj_var_base = 10 # An object method - needs to access the object instance, which is passed as first 'self' argument. def show_base(self): print_md("obj_var_base: ", self.obj_var_base) # A class method/static method is called without an object instance. @staticmethod def make_base(): return Base() # class Foo with a base class Base class Foo(Base): # Class variables are shared between all instances of the class Foo, and declared like this: class_var = 42 class_var2 = 43 # The object constructor/init method, Note the first 'self' argument, which is the object instance. def __init__(self): # When not calling the base class __init__ method: the base class object variables are not added to the object !!! # The base class __init__ adds the 'obj_var_base' member to the __dict__ member of this object instance. # By convention: you first init the base classes, before initialising the derived class. super().__init__() print("calling Foo.__init__") # Object variables are specific to a given instance of Foo # Each object has a builtin hash member: __dict__ this one lists all object members (including those added by the base class __init__ method) # Define object variable: obj_var_a self.obj_var_a=42 # Define object variable: obj_var_b self.obj_var_b="name" # An object method - needs to access the object instance, which is passed as first 'self' argument. def show_derived(self): print_md("obj_var_a:", self.obj_var_a, "obj_var_b:", self.obj_var_b) # A class method/static method is called without an object instance. @staticmethod def make_foo(): return Foo() # Make a new object instance of type Foo class. foo_obj=Foo() """) print_md("The memory address of object foo_obj is returned by the [id built-in](https://docs.python.org/3/library/functions.html#id)") eval_and_quote('print("id(foo_obj) : ", id(foo_obj))') print_md("If two variables have the same object id value, then they both refer to the very same object/instance!") print_md(""" Each user defined object has a __dict__ attribute, this is a dictionary that lists all the object instance variables. This also includes instance members that were added by the __init__ method of the base class !! """) eval_and_quote("""print("foo_obj.__dict__ : ", foo_obj.__dict__)""") print_md(""" So you see that the following is exactly the same thing: """) eval_and_quote("""assert id(foo_obj.obj_var_a) == id( foo_obj.__dict__['obj_var_a'] ) """) print_md(""" Wait, but where does the __dict__ attribute come from? The [built-in getattr](https://docs.python.org/3/library/functions.html#getattr) function can return this built-in __dict__ attribute! Interesting: the python notation object.member_name can mean different things: 1) for built-in attributes it means a call to getattr 2) for object instances (assigned in the __init__ method of the class) it means a call to retrieve the __dict__ attribute, and then a lookup of the variable name in that dictionary. """) print_md( """foo_obj.__dict__ and getattr(foo_obj,'__dict__',None) is the same thing! """) eval_and_quote("""assert id(foo_obj.__dict__) == id( getattr(foo_obj,'__dict__',None) )""") print_md(""" The getattr builtin function has a good part, its return value can be checked for None. This can be used, in order to check if the argument is an object with a __dict__ attribute. """) eval_and_quote("""base_obj = object()""") print_md("An object of built-in type ", type(base_obj), " doesn't have a __dict__ member") eval_and_quote("""assert getattr(base_obj, '__dict__', None) is None""") eval_and_quote("""int_obj = 42""") print_md("An object of built-in type ", type(int_obj), " doesn't have a __dict__ member") eval_and_quote("""assert getattr(int_obj, '__dict__', None) is None""") print_md(""" The [dir builtin](https://docs.python.org/3/library/functions.html#dir) function does different things, depending on the argument, for regular objects it returns a "list that contains the object’s attributes’ names, the names of its class’s attributes, and recursively of the attributes of its class’s base classes.", all this is sorted alphabetically. """) eval_and_quote("""print("dir(foo_obj) : ", dir(foo_obj))""") # doesn't have __slots__, how odd. #print_md("foo_obj.__slots__ : ", foo_obj.__slots__) header_md("""How classes are represented""", nesting=3) print_md("""The built-in function [type](https://docs.python.org/3/library/functions.html#type), is returning the class of an object, when applied to a variable (to be more exact: type is a built-in class, and not a built-in function, more on that later)""") eval_and_quote(""" # Make a new object instance of type Foo class. foo_obj=Foo() print("class of object foo_obj - type(foo_obj): ", type(foo_obj)) # That's the same as showing the __class__ member of the variable (in Python3) print("foo_obj.__class__ :", foo_obj.__class__) """) print_md(""" The class is an object, it's purpose is to hold the static data that is shared between all object instances. Each object has a built-in __class__ attribute, that refers to this class object. Note that the name of the class includes the module name, __main__ if the class is defined in the file given as argument to the python interpreter. Also note that the type built-in of type(foo_obj) is really the same as: str(foo_obj.__class__) (for Python3) """) print_md(""" Again, the built in attribute __class__ can also be accessed with the getattr built-in function. """) eval_and_quote( """ print("foo_obj.__class__ and getattr(foo_obj,'__class__',None) is the same thing!") assert id(foo_obj.__class__) == id( getattr(foo_obj,'__class__',None) ) """) print_md("""The __name__ and __qualname__ built-in attributes return the name of the class, without the module name """) eval_and_quote( """ print("foo_boj.__class__.__name__ : ", foo_obj.__class__.__name__) print("foo_boj.__class__.__qualname__ : ", foo_obj.__class__.__qualname__)""" ) print_md(""" To get the immediate base class list as declared in that particular class. """) eval_and_quote( """print("foo_obj.__class__.__bases__ :", foo_obj.__class__.__bases__)""") print_md(""" The __mro__ member is a list of types that stands for 'method resoultion order', when searching for an instance method, this list is searched in order to resolve the method name. The Python runtime creates this lists by enumerating all of its base classes recursively, in depth first traversal order. For each class it follows the base classes, from the left ot the right This list is used to resolve a member function 'member_function' of an object, when you call it via: obj_ref.member_function() """) eval_and_quote( """print("foo_obj.__class__.__mro__ :", foo_obj.__class__.__mro__) """ ) print_md("Computing the method resolution order by hand") eval_and_quote(""" # function to a class hierarchy, in depth first search order (like what you get in MRO - method resolution order) def show_type_hierarchy(type_class): def show_type_hierarchy_imp(type_class, nesting): if len(type_class.__bases__) == 0: return prefix = "\t" * nesting print( prefix + "type:", type_class.__name__ , "base types:", ",".join( map( lambda ty : ty.__name__, type_class.__bases__) ) ) #print( prefix + "str(", type_class.__name__ , ").__dict__ : ", type_class.__dict__ ) for base in type_class.__bases__: show_type_hierarchy_imp(base, nesting+1) if not inspect.isclass(type_class): print("object ", str(type_class), " is not class") return print("show type hierarchy of class:") show_type_hierarchy_imp(type_class, 0) class LevelOneFirst: pass class LevelOneSecond: pass class LevelOneThird: pass class LevelTwoFirst(LevelOneFirst, LevelOneSecond): pass class LevelThree(LevelTwoFirst,LevelOneThird): pass show_type_hierarchy(LevelThree) print("LevelThree.__mro__:", LevelThree.__mro__) """) eval_and_quote(""" print("*** mro in detail:") for cls in foo_obj.__class__.__mro__: print_md("\tclass-in-mro: ", str(cls), "id:", id(cls), "cls.__dict__: ", cls.__dict__) print("*** eof mro in detail") """) print_md(""" The class object has a __dict__ too - here you will see all the class variables (for Foo these are class_var and class_var2) and class methods (defined with @staticmethod), but also the object methods (with the self parameter) """) eval_and_quote( """print("foo_obj.__class__.__dict__ : ", foo_obj.__class__.__dict__)""" ) # doen't have slots, how odd. #print_md("foo_obj.__class__.__slots__ : ", foo_obj.__class__.__slots__) print_md(""" Again, the [dir](https://docs.python.org/3/library/functions.html#dir) built-in function does different things, depending on the argument type for a class object it returns a "list that contains the names of its attributes, and recursively of the attributes of its bases" That means it displays both the names of static variables, and
import os.path from io import StringIO import json from unittest.mock import patch, Mock from django.core.management import call_command from django.test import TestCase, override_settings from django.urls import reverse import pytest import rdflib import requests from .admin import ManifestSelectWidget from .models import Manifest, Canvas, IIIFImage, IIIFPresentation, \ IIIFException, get_iiif_url from .importer import ManifestImporter FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'fixtures') @patch('djiffy.models.requests') def test_get_iiif_url(mockrequests): # by default, no auth token test_url = 'http://example.com/id1/manifest' get_iiif_url(test_url) mockrequests.get.assert_called_with(test_url) # token specified, domain matches with override_settings(DJIFFY_AUTH_TOKENS={'example.com': 'testauth'}): get_iiif_url(test_url) mockrequests.get.assert_called_with(test_url, params={'auth_token': 'testauth'}) # token specified, domain doesn't match with override_settings(DJIFFY_AUTH_TOKENS={'not.me': 'testauth'}): get_iiif_url(test_url) mockrequests.get.assert_called_with(test_url) class TestManifest(TestCase): def test_str(self): # no label - short id is used book = Manifest(short_id='bk123') assert str(book) == 'bk123' book.label = 'An item' assert str(book) == 'An item' def test_absolute_url(self): book = Manifest(short_id='bk123') assert book.short_id in book.get_absolute_url() # FIXME: is this a useful test or too specific assert book.get_absolute_url() == '/iiif/%s/' % book.short_id def test_admin_thumbnail(self): book = Manifest.objects.create(short_id='bk123') assert book.admin_thumbnail() is None canv = Canvas.objects.create(short_id='pg12', thumbnail=True, manifest=book, order=1) assert book.admin_thumbnail() == canv.admin_thumbnail() def test_logo(self): book = Manifest(short_id='bk123') assert book.logo is None book.extra_data['logo'] = 'http://so.me/logo.img' assert book.logo == book.extra_data['logo'] def test_license(self): book = Manifest(short_id='bk123') assert book.license is None book.extra_data['license'] = 'http://rightsstatements.org/vocab/InC/1.0/' assert book.license == book.extra_data['license'] def test_rights_statement_id(self): book = Manifest(short_id='bk123') assert book.rights_statement_id is None book.extra_data['license'] = 'http://rightsstatements.org/vocab/InC/1.0/' assert book.rights_statement_id == 'InC' @patch('djiffy.models.requests') @patch('djiffy.models.rdflib') def test_license_label(self, mockrdflib, mockrequests): book = Manifest(short_id='bk123') # no license, no label assert book.license_label() is None # non http license, no label book.extra_data['license'] = 'foo://bar' assert book.license_label() is None # rightsstatement.org license book.extra_data['license'] = 'http://rightsstatements.org/vocab/NKC/1.0/' # simulate expected return: 303 to data url mockresponse = mockrequests.get.return_value mockresponse.status_code = 303 mockresponse.headers = {'location': 'http://rightsstatements.org/vocab/NKC/1.0/'} mockrequests.codes = requests.codes # load fixture data into an rdflib graph and return via mockrdflib testgraph = rdflib.Graph() testgraph.parse(os.path.join(FIXTURE_DIR, 'rightsstatement_org_NKC.json'), format='json-ld') mockrdflib.Graph.return_value = testgraph # use actual uriref method mockrdflib.URIRef = rdflib.URIRef with patch.object(testgraph, 'parse') as mockparse: label = book.license_label() assert label == 'No Known Copyright' mockrequests.get.assert_called_with( book.license, headers={'Accept': 'application/json'}, allow_redirects=False) mockrdflib.Graph.assert_any_call() mockparse.assert_called_with(mockresponse.headers['location'], format='json-ld') # with language code label = book.license_label(lang='de') assert label == 'Kein Urheberrechtsschutz bekannt' # CC license book = Manifest(short_id='bk123') book.extra_data['license'] = 'http://creativecommons.org/licenses/by-nc-nd/3.0/' testgraph = rdflib.Graph() testgraph.parse(os.path.join(FIXTURE_DIR, 'cc_by_nc_nd.rdf'), format='xml') mockrdflib.Graph.return_value = testgraph with patch.object(testgraph, 'parse') as mockparse: label = book.license_label() assert label == 'Attribution-NonCommercial-NoDerivs 3.0 Unported' # with language code label = book.license_label(lang='fr') assert label == 'Attribution - Pas d’Utilisation Commerciale - Pas de Modification 3.0 non transposé' # rights label in extra data mockrequests.reset_mock() mockrdflib.reset_mock() local_label = 'copyright status unknown' book.extra_data['seeAlso'] = {'edm_rights': {'pref_label': local_label}} label = book.license_label() assert label == local_label # should not call requests or use rdflib mockrequests.get.assert_not_called() mockrdflib.Graph.assert_not_called() # error handling - shouldn't blow up if there's an error book = Manifest(short_id='bk123') testgraph = rdflib.Graph() mockrdflib.Graph.return_value = testgraph book.extra_data['license'] = 'http://rightsstatements.org/vocab/NKC/1.0/' # exception on parse with patch.object(testgraph, 'parse') as mockparse: mockparse.side_effect = Exception assert book.license_label() is None # exception on request mockrequests.get.side_effect = Exception assert book.license_label() is None class TestCanvas(TestCase): def test_str(self): manif = Manifest(short_id='bk123', label='Book 1') page = Canvas(manifest=manif, label='Image 1', short_id='pg123', order=1) assert str(page) == '%s %d (%s)' % (str(manif), page.order + 1, page.label) page.thumbnail = True assert str(page).endswith('*') def test_image(self): img_service = 'https://images.co' img_id = 'some-file.jp2' page = Canvas(iiif_image_id='/'.join([img_service, img_id])) assert isinstance(page.image, IIIFImage) assert page.image.api_endpoint == img_service assert page.image.image_id == img_id def test_plain_text_url(self): # individual dictionary extra_data = { 'rendering': { '@id': 'http://some.org/with/text', 'format': 'text/plain', 'label': 'Download page text', } } page = Canvas(extra_data=extra_data) assert isinstance(page.plain_text_url, str) # plain text url returned assert page.plain_text_url == 'http://some.org/with/text' # dimensions not in extra data assert page.height is None assert page.width is None # with dimensions in extra data extra_data['width'] = 400 extra_data['height'] = 300 assert page.width == 400 assert page.height == 300 # no plain text url, returns None page.extra_data['rendering']['format'] = 'some/other-mime' assert page.plain_text_url is None # test with a list extra_data = { 'rendering': [ {'@id': 'http://some.org/pdf', 'format': 'application/pdf', 'label': 'View PDF image'}, {'@id': 'http://some.org/with/text', 'format': 'text/plain', 'label': 'Download page text'}, ] } # returns the correct text/plain version page = Canvas(extra_data=extra_data) assert isinstance(page.plain_text_url, str) assert page.plain_text_url == 'http://some.org/with/text' # delete the plain text version del page.extra_data['rendering'][1] assert page.plain_text_url is None def test_absolute_url(self): manif = Manifest(short_id='bk123', label='Book 1') page = Canvas(manifest=manif, label='Image 1', short_id='pg123', order=1) assert manif.short_id in page.get_absolute_url() assert page.short_id in page.get_absolute_url() # FIXME: is this a useful test or too specific assert page.get_absolute_url() == \ '/iiif/%s/canvases/%s/' % (manif.short_id, page.short_id) def test_admin_thumb(self): img_service = 'https://images.co' img_id = 'some-file.jp2' page = Canvas(iiif_image_id='/'.join([img_service, img_id])) admin_thumb = page.admin_thumbnail() assert '<img src="' in admin_thumb assert str(page.image.mini_thumbnail()) in admin_thumb def test_next(self): manif = Manifest.objects.create(short_id='bk123', label='Book 1') page1, page2, page3 = Canvas.objects.bulk_create([ Canvas(label='P1', short_id='pg1', order=0, manifest=manif), Canvas(label='P2', short_id='pg2', order=1, manifest=manif), Canvas(label='P3', short_id='pg3', order=2, manifest=manif) ]) assert page1.next().short_id == page2.short_id assert page2.next().short_id == page3.short_id assert not page3.next() def test_prev(self): manif = Manifest.objects.create(short_id='bk123', label='Book 1') page1, page2, page3 = Canvas.objects.bulk_create([ Canvas(label='P1', short_id='pg1', order=0, manifest=manif), Canvas(label='P2', short_id='pg2', order=1, manifest=manif), Canvas(label='P3', short_id='pg3', order=2, manifest=manif) ]) assert not page1.prev() assert page2.prev().short_id == page1.short_id assert page3.prev().short_id == page2.short_id class TestIIIFImage(TestCase): def setUp(self): self.img_id = 'testimage.png' self.img_service = 'https://ima.ge/loris/' self.iiif_img = IIIFImage(self.img_service, self.img_id) def test_thumbnail(self): thumb_img = self.iiif_img.thumbnail() size_info = thumb_img.size.as_dict() assert size_info['width'] == self.iiif_img.thumbnail_size assert size_info['height'] == self.iiif_img.thumbnail_size assert size_info['exact'] is True assert thumb_img.image_options['fmt'] == 'png' def test_mini_thumbnail(self): thumb_img = self.iiif_img.mini_thumbnail() size_info = thumb_img.size.as_dict() assert size_info['width'] == self.iiif_img.mini_thumbnail_size assert size_info['height'] == self.iiif_img.mini_thumbnail_size assert size_info['exact'] is True assert thumb_img.image_options['fmt'] == 'png' def test_page_size(self): thumb_img = self.iiif_img.page_size() size_info = thumb_img.size.as_dict() assert size_info['width'] == self.iiif_img.single_page_size assert size_info['height'] == self.iiif_img.single_page_size assert size_info['exact'] is True class TestIIIFPresentation(TestCase): test_manifest = os.path.join(FIXTURE_DIR, 'chto-manifest.json') def test_from_file(self): pres = IIIFPresentation.from_file(self.test_manifest) assert isinstance(pres, IIIFPresentation) assert pres.type == 'sc:Manifest' def test_from_url(self): manifest_url = 'http://ma.ni/fe.st' with open(self.test_manifest) as manifest: data = json.loads(manifest.read()) with patch('djiffy.models.requests') as mockrequests: mockrequests.codes = requests.codes mockresponse = mockrequests.get.return_value mockresponse.status_code = requests.codes.ok mockresponse.json.return_value = data pres = IIIFPresentation.from_url(manifest_url) assert pres.type == 'sc:Manifest' mockrequests.get.assert_called_with(manifest_url) mockrequests.get.return_value.json.assert_called_with() # error handling # bad status code response on the url with pytest.raises(IIIFException) as excinfo: mockresponse.status_code = requests.codes.forbidden mockresponse.reason = 'Forbidden' IIIFPresentation.from_url(manifest_url) assert 'Error retrieving manifest' in str(excinfo.value) assert '403 Forbidden' in str(excinfo.value) # valid http response but not a json response with pytest.raises(IIIFException) as excinfo: mockresponse.status_code = requests.codes.ok # content type header does not indicate json mockresponse.headers = {'content-type': 'text/html'} mockresponse.json.side_effect = \ json.decoder.JSONDecodeError('err', 'doc', 1) IIIFPresentation.from_url(manifest_url) assert 'No JSON found' in str(excinfo.value) # json parsing error with pytest.raises(IIIFException) as excinfo: # content type header indicates json, but parsing failed mockresponse.headers = {'content-type': 'application/json'} mockresponse.json.side_effect = \ json.decoder.JSONDecodeError('err', 'doc', 1) IIIFPresentation.from_url(manifest_url) assert 'Error parsing JSON' in str(excinfo.value) def test_from_url_or_file(self): with patch.object(IIIFPresentation, 'from_url') as mock_from_url: # local fixture file pres = IIIFPresentation.from_file_or_url(self.test_manifest) assert pres.type == 'sc:Manifest' mock_from_url.assert_not_called() pres = IIIFPresentation.from_file_or_url('http://mani.fe/st') mock_from_url.assert_called_with('http://mani.fe/st') # nonexistent file path with pytest.raises(IIIFException) as excinfo: IIIFPresentation.from_file_or_url('/manifest/not/found') assert 'File not found: ' in str(excinfo.value) def test_short_id(self): manifest_uri = 'https://ii.if/resources/p0c484h74c/manifest' assert IIIFPresentation.short_id(manifest_uri) == 'p0c484h74c' canvas_uri = 'https://ii.if/resources/p0c484h74c/manifest/canvas/ps7527b878' assert IIIFPresentation.short_id(canvas_uri) == 'ps7527b878' def test_toplevel_attrs(self): pres = IIIFPresentation.from_file(self.test_manifest) assert pres.context == "http://iiif.io/api/presentation/2/context.json" assert pres.id == "https://plum.princeton.edu/concern/scanned_resources/ph415q7581/manifest" assert pres.type == "sc:Manifest" assert pres.label[0] == "Chto my stroim : Tetrad\u02b9 s kartinkami" assert pres.viewingHint == "paged" assert pres.viewingDirection == "left-to-right" def test_nested_attrs(self): pres = IIIFPresentation.from_file(self.test_manifest) assert isinstance(pres.sequences, tuple) assert pres.sequences[0].id == \ "https://plum.princeton.edu/concern/scanned_resources/ph415q7581/manifest/sequence/normal" assert pres.sequences[0].type == "sc:Sequence" assert isinstance(pres.sequences[0].canvases, tuple) assert pres.sequences[0].canvases[0].id == \ "https://plum.princeton.edu/concern/scanned_resources/ph415q7581/manifest/canvas/p02871v98d" def test_set(self): pres = IIIFPresentation.from_file(self.test_manifest) pres.label = 'New title' pres.type = 'sc:Collection' assert pres.label == 'New title' assert pres.type == 'sc:Collection' def test_del(self): pres = IIIFPresentation.from_file(self.test_manifest) del pres.label del pres.type assert not hasattr(pres, 'label') assert not hasattr(pres, 'type') def test_first_label(self): pres = IIIFPresentation.from_file(self.test_manifest) assert pres.first_label == pres.label[0] pres.label = 'unlisted single title' assert pres.first_label == pres.label class TestManifestImporter(TestCase): test_manifest = os.path.join(FIXTURE_DIR, 'chto-manifest.json') test_coll_manifest = os.path.join(FIXTURE_DIR, 'cotsen-collection-manifest.json') test_manifest_noseq = os.path.join(FIXTURE_DIR, 'manifest-noseq.json') def setUp(self): self.importer = ManifestImporter() def test_import_supported(self): # currently only supports paged/left-to-right pres = IIIFPresentation.from_file(self.test_manifest) assert self.importer.import_supported(pres) == True # non paged pres.viewingHint = 'non-paged' pres.viewingDirection = None assert self.importer.import_supported(pres) == False # no viewing hint or direction pres.viewingHint = None assert self.importer.import_supported(pres) == False @patch('djiffy.importer.get_iiif_url') def test_import_manifest(self, mock_getiiifurl): pres = IIIFPresentation.from_file(self.test_manifest) mock_extra_data = { 'title': { '@value': 'Sample extra metadata', '@language': 'eng' }, 'identifier': ['ark:/88435/tm70mz058'] }
org.logo is not None assert org.logo.filename == "logo.png" with request_ctx( "/settings/logo", method="POST", data={ "reset": "Reset", }) as ctx: login_user(user) resp = ctx.app.full_dispatch_request() assert resp.status_code == 200 org.reload() assert org.logo is None @patch("orcid_hub.utils.send_email", side_effect=send_mail_mock) def test_invite_organisation(send_email, request_ctx): """Test invite an organisation to register.""" org = Organisation.get(name="TEST0") root = User.get(email="<EMAIL>") user = User.create( email="<EMAIL>", name="TEST USER", confirmed=True, organisation=org) UserOrg.create(user=user, org=org, is_admin=True) with request_ctx( "/invite/organisation", method="POST", data={ "org_name": "THE ORGANISATION", "org_email": "<EMAIL>", "tech_contact": "True", "via_orcid": "True", "first_name": "xyz", "last_name": "xyz", "city": "xyz" }) as ctx: login_user(root, remember=True) rv = ctx.app.full_dispatch_request() assert rv.status_code == 200 assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content" assert b"<EMAIL>" in rv.data send_email.assert_called_once() with request_ctx( "/invite/organisation", method="POST", data={ "org_name": "ORG NAME", "org_email": "<EMAIL>", "tech_contact": "True", "via_orcid": "True", "first_name": "xyz", "last_name": "xyz", "city": "xyz" }) as ctx: send_email.reset_mock() login_user(root, remember=True) org = Organisation.get(id=1) org.name = "ORG NAME" org.confirmed = True org.save() rv = ctx.app.full_dispatch_request() assert rv.status_code == 200 assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content" assert b"<EMAIL>" in rv.data send_email.assert_called_once() def core_mock(self=None, source_file=None, schema_files=None, source_data=None, schema_data=None, extensions=None, strict_rule_validation=False, fix_ruby_style_regex=False, allow_assertions=False, ): """Mock validation api call.""" return None def validate(self=None, raise_exception=True): """Mock validation api call.""" return False @patch("pykwalify.core.Core.validate", side_effect=validate) @patch("pykwalify.core.Core.__init__", side_effect=core_mock) def test_load_researcher_funding(patch, patch2, request_ctx): """Test preload organisation data.""" org = Organisation.create( name="THE ORGANISATION", tuakiri_name="THE ORGANISATION", confirmed=False, orcid_client_id="CLIENT ID", orcid_secret="Client Secret", city="CITY", country="COUNTRY", disambiguated_id="ID", disambiguation_source="SOURCE", is_email_sent=True) user = User.create( email="<EMAIL>", name="TEST USER", roles=Role.ADMIN, orcid="123", confirmed=True, organisation=org) UserOrg.create(user=user, org=org, is_admin=True) with request_ctx( "/load/researcher/funding", method="POST", data={ "file_": ( BytesIO( b'[{"invitees": [{"identifier":"00001", "email": "<EMAIL>",' b'"first-name": "Alice", "last-name": "<NAME>", "ORCID-iD": null, "put-code":null}],' b'"title": { "title": { "value": "1ral"}},"short-description": "Mi","type": "CONTRACT",' b'"contributors": {"contributor": [{"contributor-attributes": {"contributor-role": ' b'"co_lead"},"credit-name": {"value": "firentini"}}]}' b', "external-ids": {"external-id": [{"external-id-value": ' b'"GNS170661","external-id-type": "grant_number"}]}}]'), "logo.json",), "email": user.email }) as ctx: login_user(user, remember=True) rv = ctx.app.full_dispatch_request() assert rv.status_code == 302 # Funding file successfully loaded. assert "task_id" in rv.location assert "funding" in rv.location @patch("pykwalify.core.Core.validate", side_effect=validate) @patch("pykwalify.core.Core.__init__", side_effect=core_mock) def test_load_researcher_work(patch, patch2, request_ctx): """Test preload work data.""" user = User.get(email="<EMAIL>") user.roles = Role.ADMIN user.save() with request_ctx( "/load/researcher/work", method="POST", data={ "file_": ( BytesIO( b'[{"invitees": [{"identifier":"00001", "email": "<EMAIL>",' b'"first-name": "Alice", "last-name": "<NAME>", "ORCID-iD": null, "put-code":null}],' b'"title": { "title": { "value": "1ral"}}, "citation": {"citation-type": ' b'"FORMATTED_UNSPECIFIED", "citation-value": "This is citation value"}, "type": "BOOK_CHR",' b'"contributors": {"contributor": [{"contributor-attributes": {"contributor-role": ' b'"AUTHOR", "contributor-sequence" : "1"},"credit-name": {"value": "firentini"}}]}' b', "external-ids": {"external-id": [{"external-id-value": ' b'"GNS170661","external-id-type": "grant_number"}]}}]'), "logo.json",), "email": user.email }) as ctx: login_user(user, remember=True) rv = ctx.app.full_dispatch_request() assert rv.status_code == 302 # Work file successfully loaded. assert "task_id" in rv.location assert "work" in rv.location @patch("pykwalify.core.Core.validate", side_effect=validate) @patch("pykwalify.core.Core.__init__", side_effect=core_mock) def test_load_researcher_peer_review(patch, patch2, request_ctx): """Test preload peer review data.""" user = User.get(email="<EMAIL>") user.roles = Role.ADMIN user.save() with request_ctx( "/load/researcher/peer_review", method="POST", data={ "file_": ( BytesIO( b'[{"invitees": [{"identifier": "00001", "email": "<EMAIL>", ' b'"first-name": "Alice", "last-name": "<NAME>", "ORCID-iD": null, "put-code": null}]' b', "reviewer-role": "REVIEWER", "review-identifiers": { "external-id": [{ ' b'"external-id-type": "source-work-id", "external-id-value": "1212221", "external-id-url": ' b'{"value": "https://localsystem.org/1234"}, "external-id-relationship": "SELF"}]}, ' b'"review-type": "REVIEW", "review-group-id": "issn:90122", "subject-container-name": { ' b'"value": "Journal title"}, "subject-type": "JOURNAL_ARTICLE", "subject-name": { ' b'"title": {"value": "Name of the paper reviewed"}},"subject-url": { ' b'"value": "https://subject-alt-url.com"}, "convening-organization": { "name": ' b'"The University of Auckland", "address": { "city": "Auckland", "region": "Auckland",' b' "country": "NZ" } }}]'), "logo.json",), "email": user.email }) as ctx: login_user(user, remember=True) rv = ctx.app.full_dispatch_request() assert rv.status_code == 302 # peer-review file successfully loaded. assert "task_id" in rv.location assert "peer" in rv.location def test_load_researcher_affiliations(request_ctx): """Test preload organisation data.""" org = Organisation.create( name="THE ORGANISATION", tuakiri_name="THE ORGANISATION", confirmed=False, orcid_client_id="CLIENT ID", orcid_secret="Client Secret", city="CITY", country="COUNTRY", disambiguated_id="ID", disambiguation_source="SOURCE", is_email_sent=True) user = User.create( email="<EMAIL>", name="TEST USER", roles=Role.ADMIN, orcid="123", confirmed=True, organisation=org) UserOrg.create(user=user, org=org, is_admin=True) form = FileUploadForm() form.file_.name = "conftest.py" with request_ctx("/load/researcher", method="POST", data={"file_": "{'filename': 'xyz.json'}", "email": user.email, form: form}) as ctxx: login_user(user, remember=True) rv = ctxx.app.full_dispatch_request() assert rv.status_code == 200 assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content" assert user.email.encode() in rv.data def test_edit_record(request_ctx): """Test create a new or edit an existing profile section record.""" admin = User.get(email="<EMAIL>") user = User.get(email="<EMAIL>") admin.organisation.orcid_client_id = "ABC123" admin.organisation.save() if not user.orcid: user.orcid = "XXXX-XXXX-XXXX-0001" user.save() fake_response = make_response fake_response.status = 201 fake_response.headers = {'Location': '12344/xyz/12399'} OrcidToken.create(user=user, org=user.organisation, access_token="ABC123", scope="/read-limited,/activities/update") with patch.object( orcid_client.MemberAPIV20Api, "view_employment", MagicMock(return_value=make_fake_response('{"test": "TEST1234567890"}')) ) as view_employment, request_ctx(f"/section/{user.id}/EMP/1212/edit") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert admin.email.encode() in resp.data assert admin.name.encode() in resp.data view_employment.assert_called_once_with("XXXX-XXXX-XXXX-0001", 1212) with patch.object( orcid_client.MemberAPIV20Api, "view_education", MagicMock(return_value=make_fake_response('{"test": "TEST1234567890"}')) ) as view_education, request_ctx(f"/section/{user.id}/EDU/1234/edit") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert admin.email.encode() in resp.data assert admin.name.encode() in resp.data view_education.assert_called_once_with("XXXX-XXXX-XXXX-0001", 1234) with patch.object( orcid_client.MemberAPIV20Api, "create_education", MagicMock(return_value=fake_response)), request_ctx( f"/section/{user.id}/EDU/new", method="POST", data={ "city": "Auckland", "country": "NZ", "org_name": "TEST", }) as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 assert resp.location == f"/section/{user.id}/EDU/list" affiliation_record = UserOrgAffiliation.get(user=user) # checking if the UserOrgAffiliation record is updated with put_code supplied from fake response assert 12399 == affiliation_record.put_code def test_delete_employment(request_ctx, app): """Test delete an employment record.""" admin = User.get(email="<EMAIL>") user = User.get(email="<EMAIL>") with request_ctx(f"/section/{user.id}/EMP/1212/delete", method="POST") as ctx: login_user(user) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 assert resp.location.startswith("/?next=") with request_ctx(f"/section/99999999/EMP/1212/delete", method="POST") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 assert resp.location == "/admin/viewmembers/" with request_ctx(f"/section/{user.id}/EMP/1212/delete", method="POST") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 assert resp.location == f"/section/{user.id}/EMP/list" admin.organisation.orcid_client_id = "ABC123" admin.organisation.save() with request_ctx(f"/section/{user.id}/EMP/1212/delete", method="POST") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 assert resp.location == f"/section/{user.id}/EMP/list" if not user.orcid: user.orcid = "XXXX-XXXX-XXXX-0001" user.save() token = OrcidToken.create( user=user, org=user.organisation, access_token="ABC123", scope="/read-limited") with request_ctx(f"/section/{user.id}/EMP/1212/delete", method="POST") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 assert resp.location == f"/section/{user.id}/EMP/list" token.scope = "/read-limited,/activities/update" token.save() with patch.object( orcid_client.MemberAPIV20Api, "delete_employment", MagicMock( return_value='{"test": "TEST1234567890"}')) as delete_employment, request_ctx( f"/section/{user.id}/EMP/12345/delete", method="POST") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 delete_employment.assert_called_once_with("XXXX-XXXX-XXXX-0001", 12345) with patch.object( orcid_client.MemberAPIV20Api, "delete_education", MagicMock(return_value='{"test": "TEST1234567890"}')) as delete_education, request_ctx( f"/section/{user.id}/EDU/54321/delete", method="POST") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 delete_education.assert_called_once_with("XXXX-XXXX-XXXX-0001", 54321) def test_viewmembers(request_ctx): """Test affilated researcher view.""" non_admin = User.get(email="<EMAIL>") with request_ctx("/admin/viewmembers") as ctx: login_user(non_admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 admin = User.get(email="<EMAIL>") with request_ctx("/admin/viewmembers") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 200 assert b"<EMAIL>" in resp.data with request_ctx("/admin/viewmembers/?flt1_0=2018-05-01+to+2018-05-31&flt2_1=2018-05-01+to+2018-05-31") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 200 assert b"<EMAIL>" not in resp.data with request_ctx(f"/admin/viewmembers/edit/?id={non_admin.id}") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 200 assert non_admin.email.encode() in resp.data assert non_admin.name.encode() in resp.data with request_ctx(f"/admin/viewmembers/edit/?id=9999999999") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 404 user2 = User.get(email="<EMAIL>") with request_ctx(f"/admin/viewmembers/edit/?id={user2.id}") as ctx: login_user(admin) resp = ctx.app.full_dispatch_request() assert resp.status_code == 403 def test_viewmembers_delete(request_ctx): """Test affilated researcher deletion via the view.""" admin0 = User.get(email="<EMAIL>") admin1 = User.get(email="<EMAIL>") researcher0 = User.get(email="<EMAIL>") researcher1 = User.get(email="<EMAIL>") with request_ctx( "/admin/viewmembers/delete/", method="POST", data={ "id": str(researcher1.id), "url": "/admin/viewmembers/", }) as ctx: # noqa: F405 login_user(admin0) resp = ctx.app.full_dispatch_request() assert resp.status_code == 403 with request_ctx( "/admin/viewmembers/delete/", method="POST", data={ "id": str(researcher0.id), "url": "/admin/viewmembers/", }) as ctx, patch( "orcid_hub.views.AppModelView.on_model_delete", create=True, side_effect=Exception("FAILURED")), patch( "orcid_hub.views.AppModelView.handle_view_exception", create=True, return_value=False): # noqa: F405 login_user(admin0) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 assert resp.location == "/admin/viewmembers/" assert User.select().where(User.id == researcher0.id).count() == 1 with request_ctx( "/admin/viewmembers/delete/", method="POST", data={ "id": str(researcher0.id), "url": "/admin/viewmembers/", }) as ctx: # noqa: F405 login_user(admin0) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 with pytest.raises(User.DoesNotExist): User.get(id=researcher0.id) UserOrg.create(org=admin0.organisation, user=researcher1) OrcidToken.create(org=admin0.organisation, user=researcher1, access_token="ABC<PASSWORD>") with request_ctx( "/admin/viewmembers/delete/", method="POST", data={ "id": str(researcher1.id), "url": "/admin/viewmembers/", }) as ctx, patch("orcid_hub.views.requests.post") as mockpost: # noqa: F405 org = researcher1.organisation mockpost.return_value = MagicMock(status_code=400) login_user(admin1) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 assert User.select().where(User.id == researcher1.id).count() == 1 assert UserOrg.select().where(UserOrg.user == researcher1).count() == 2 assert OrcidToken.select().where(OrcidToken.org == org, OrcidToken.user == researcher1).count() == 1 mockpost.side_effect = Exception("FAILURE") resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 assert User.select().where(User.id == researcher1.id).count() == 1 assert UserOrg.select().where(UserOrg.user == researcher1).count() == 2 assert OrcidToken.select().where(OrcidToken.org == org, OrcidToken.user == researcher1).count() == 1 mockpost.reset_mock(side_effect=True) mockpost.return_value = MagicMock(status_code=200) resp = ctx.app.full_dispatch_request() assert resp.status_code == 302 assert User.select().where(User.id == researcher1.id).count() == 1 assert UserOrg.select().where(UserOrg.user == researcher1).count() == 1 args, kwargs = mockpost.call_args assert args[0] == ctx.app.config["ORCID_BASE_URL"] + "oauth/revoke" data = kwargs["data"] assert data["client_id"] == "ABC123" assert data["client_secret"] == "SECRET-12345" assert data["token"].startswith("TOKEN-1") assert OrcidToken.select().where(OrcidToken.org == org, OrcidToken.user == researcher1).count() == 0 def test_reset_all(request_ctx): """Test reset batch process.""" org = Organisation.create( name="THE ORGANISATION", tuakiri_name="THE ORGANISATION", confirmed=False, orcid_client_id="CLIENT ID", orcid_secret="Client Secret",
import pytest from helpers.cluster import ClickHouseCluster import random import string import os import time from multiprocessing.dummy import Pool cluster = ClickHouseCluster(__file__) node = cluster.add_instance('node', main_configs=['configs/enable_keeper.xml', 'configs/logs_conf.xml'], with_zookeeper=True) from kazoo.client import KazooClient, KazooState, KeeperState def get_genuine_zk(): print("Zoo1", cluster.get_instance_ip("zoo1")) return cluster.get_kazoo_client('zoo1') def get_fake_zk(): print("node", cluster.get_instance_ip("node")) _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181", timeout=30.0) def reset_last_zxid_listener(state): print("Fake zk callback called for state", state) nonlocal _fake_zk_instance if state != KazooState.CONNECTED: _fake_zk_instance._reset() _fake_zk_instance.add_listener(reset_last_zxid_listener) _fake_zk_instance.start() return _fake_zk_instance def random_string(length): return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) def create_random_path(prefix="", depth=1): if depth == 0: return prefix return create_random_path(os.path.join(prefix, random_string(3)), depth - 1) def stop_zk(zk): try: if zk: zk.stop() zk.close() except: pass @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() yield cluster finally: cluster.shutdown() def test_simple_commands(started_cluster): try: genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() for zk in [genuine_zk, fake_zk]: zk.create("/test_simple_commands", b"") zk.create("/test_simple_commands/somenode1", b"hello") zk.set("/test_simple_commands/somenode1", b"world") for zk in [genuine_zk, fake_zk]: assert zk.exists("/test_simple_commands") assert zk.exists("/test_simple_commands/somenode1") print(zk.get("/test_simple_commands/somenode1")) assert zk.get("/test_simple_commands/somenode1")[0] == b"world" finally: for zk in [genuine_zk, fake_zk]: stop_zk(zk) def test_sequential_nodes(started_cluster): try: genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() genuine_zk.create("/test_sequential_nodes") fake_zk.create("/test_sequential_nodes") for i in range(1, 11): genuine_zk.create("/test_sequential_nodes/" + ("a" * i) + "-", sequence=True) genuine_zk.create("/test_sequential_nodes/" + ("b" * i)) fake_zk.create("/test_sequential_nodes/" + ("a" * i) + "-", sequence=True) fake_zk.create("/test_sequential_nodes/" + ("b" * i)) genuine_childs = list(sorted(genuine_zk.get_children("/test_sequential_nodes"))) fake_childs = list(sorted(fake_zk.get_children("/test_sequential_nodes"))) assert genuine_childs == fake_childs finally: for zk in [genuine_zk, fake_zk]: stop_zk(zk) def assert_eq_stats(stat1, stat2): assert stat1.version == stat2.version assert stat1.cversion == stat2.cversion assert stat1.aversion == stat2.aversion assert stat1.aversion == stat2.aversion assert stat1.dataLength == stat2.dataLength assert stat1.numChildren == stat2.numChildren def test_stats(started_cluster): try: genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() genuine_zk.create("/test_stats_nodes") fake_zk.create("/test_stats_nodes") genuine_stats = genuine_zk.exists("/test_stats_nodes") fake_stats = fake_zk.exists("/test_stats_nodes") assert_eq_stats(genuine_stats, fake_stats) for i in range(1, 11): genuine_zk.create("/test_stats_nodes/" + ("a" * i) + "-", sequence=True) genuine_zk.create("/test_stats_nodes/" + ("b" * i)) fake_zk.create("/test_stats_nodes/" + ("a" * i) + "-", sequence=True) fake_zk.create("/test_stats_nodes/" + ("b" * i)) genuine_stats = genuine_zk.exists("/test_stats_nodes") fake_stats = fake_zk.exists("/test_stats_nodes") assert_eq_stats(genuine_stats, fake_stats) for i in range(1, 11): print("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2)) genuine_zk.delete("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2)) genuine_zk.delete("/test_stats_nodes/" + ("b" * i)) fake_zk.delete("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2)) fake_zk.delete("/test_stats_nodes/" + ("b" * i)) genuine_stats = genuine_zk.exists("/test_stats_nodes") fake_stats = fake_zk.exists("/test_stats_nodes") print(genuine_stats) print(fake_stats) assert_eq_stats(genuine_stats, fake_stats) for i in range(100): genuine_zk.set("/test_stats_nodes", ("q" * i).encode()) fake_zk.set("/test_stats_nodes", ("q" * i).encode()) genuine_stats = genuine_zk.exists("/test_stats_nodes") fake_stats = fake_zk.exists("/test_stats_nodes") print(genuine_stats) print(fake_stats) assert_eq_stats(genuine_stats, fake_stats) finally: for zk in [genuine_zk, fake_zk]: stop_zk(zk) def test_watchers(started_cluster): try: genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() genuine_zk.create("/test_data_watches") fake_zk.create("/test_data_watches") genuine_data_watch_data = None def genuine_callback(event): print("Genuine data watch called") nonlocal genuine_data_watch_data genuine_data_watch_data = event fake_data_watch_data = None def fake_callback(event): print("Fake data watch called") nonlocal fake_data_watch_data fake_data_watch_data = event genuine_zk.get("/test_data_watches", watch=genuine_callback) fake_zk.get("/test_data_watches", watch=fake_callback) print("Calling set genuine") genuine_zk.set("/test_data_watches", b"a") print("Calling set fake") fake_zk.set("/test_data_watches", b"a") time.sleep(3) print("Genuine data", genuine_data_watch_data) print("Fake data", fake_data_watch_data) assert genuine_data_watch_data == fake_data_watch_data genuine_children = None def genuine_child_callback(event): print("Genuine child watch called") nonlocal genuine_children genuine_children = event fake_children = None def fake_child_callback(event): print("Fake child watch called") nonlocal fake_children fake_children = event genuine_zk.get_children("/test_data_watches", watch=genuine_child_callback) fake_zk.get_children("/test_data_watches", watch=fake_child_callback) print("Calling genuine child") genuine_zk.create("/test_data_watches/child", b"b") print("Calling fake child") fake_zk.create("/test_data_watches/child", b"b") time.sleep(3) print("Genuine children", genuine_children) print("Fake children", fake_children) assert genuine_children == fake_children finally: for zk in [genuine_zk, fake_zk]: stop_zk(zk) def test_multitransactions(started_cluster): try: genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() for zk in [genuine_zk, fake_zk]: zk.create('/test_multitransactions') t = zk.transaction() t.create('/test_multitransactions/freddy') t.create('/test_multitransactions/fred', ephemeral=True) t.create('/test_multitransactions/smith', sequence=True) results = t.commit() assert len(results) == 3 assert results[0] == '/test_multitransactions/freddy' assert results[2].startswith('/test_multitransactions/smith0') is True from kazoo.exceptions import RolledBackError, NoNodeError for i, zk in enumerate([genuine_zk, fake_zk]): print("Processing ZK", i) t = zk.transaction() t.create('/test_multitransactions/q') t.delete('/test_multitransactions/a') t.create('/test_multitransactions/x') results = t.commit() print("Results", results) assert results[0].__class__ == RolledBackError assert results[1].__class__ == NoNodeError assert zk.exists('/test_multitransactions/q') is None assert zk.exists('/test_multitransactions/a') is None assert zk.exists('/test_multitransactions/x') is None finally: for zk in [genuine_zk, fake_zk]: stop_zk(zk) def exists(zk, path): result = zk.exists(path) return result is not None def get(zk, path): result = zk.get(path) return result[0] def get_children(zk, path): return [elem for elem in list(sorted(zk.get_children(path))) if elem not in ('clickhouse', 'zookeeper')] READ_REQUESTS = [ ("exists", exists), ("get", get), ("get_children", get_children), ] def create(zk, path, data): zk.create(path, data.encode()) def set_data(zk, path, data): zk.set(path, data.encode()) WRITE_REQUESTS = [ ("create", create), ("set_data", set_data), ] def delete(zk, path): zk.delete(path) DELETE_REQUESTS = [ ("delete", delete) ] class Request(object): def __init__(self, name, arguments, callback, is_return): self.name = name self.arguments = arguments self.callback = callback self.is_return = is_return def __str__(self): arg_str = ', '.join([str(k) + "=" + str(v) for k, v in self.arguments.items()]) return "ZKRequest name {} with arguments {}".format(self.name, arg_str) def generate_requests(prefix="/", iters=1): requests = [] existing_paths = [] for i in range(iters): for _ in range(100): rand_length = random.randint(0, 10) path = prefix for j in range(1, rand_length): path = create_random_path(path, 1) existing_paths.append(path) value = random_string(1000) request = Request("create", {"path" : path, "value": value[0:10]}, lambda zk, path=path, value=value: create(zk, path, value), False) requests.append(request) for _ in range(100): path = random.choice(existing_paths) value = random_string(100) request = Request("set", {"path": path, "value": value[0:10]}, lambda zk, path=path, value=value: set_data(zk, path, value), False) requests.append(request) for _ in range(100): path = random.choice(existing_paths) callback = random.choice(READ_REQUESTS) def read_func1(zk, path=path, callback=callback): return callback[1](zk, path) request = Request(callback[0], {"path": path}, read_func1, True) requests.append(request) for _ in range(30): path = random.choice(existing_paths) request = Request("delete", {"path": path}, lambda zk, path=path: delete(zk, path), False) for _ in range(100): path = random.choice(existing_paths) callback = random.choice(READ_REQUESTS) def read_func2(zk, path=path, callback=callback): return callback[1](zk, path) request = Request(callback[0], {"path": path}, read_func2, True) requests.append(request) return requests def test_random_requests(started_cluster): try: requests = generate_requests("/test_random_requests", 10) print("Generated", len(requests), "requests") genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() genuine_zk.create("/test_random_requests") fake_zk.create("/test_random_requests") for i, request in enumerate(requests): genuine_throw = False fake_throw = False fake_result = None genuine_result = None try: genuine_result = request.callback(genuine_zk) except Exception as ex: print("i", i, "request", request) print("Genuine exception", str(ex)) genuine_throw = True try: fake_result = request.callback(fake_zk) except Exception as ex: print("i", i, "request", request) print("Fake exception", str(ex)) fake_throw = True assert fake_throw == genuine_throw, "Fake throw genuine not or vise versa request {}" assert fake_result == genuine_result, "Zookeeper results differ" root_children_genuine = [elem for elem in list(sorted(genuine_zk.get_children("/test_random_requests"))) if elem not in ('clickhouse', 'zookeeper')] root_children_fake = [elem for elem in list(sorted(fake_zk.get_children("/test_random_requests"))) if elem not in ('clickhouse', 'zookeeper')] assert root_children_fake == root_children_genuine finally: for zk in [genuine_zk, fake_zk]: stop_zk(zk) def test_end_of_session(started_cluster): fake_zk1 = None fake_zk2 = None genuine_zk1 = None genuine_zk2 = None try: fake_zk1 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk1.start() fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk2.start() genuine_zk1 = cluster.get_kazoo_client('zoo1') genuine_zk1.start() genuine_zk2 = cluster.get_kazoo_client('zoo1') genuine_zk2.start() fake_zk1.create("/test_end_of_session") genuine_zk1.create("/test_end_of_session") fake_ephemeral_event = None def fake_ephemeral_callback(event): print("Fake watch triggered") nonlocal fake_ephemeral_event fake_ephemeral_event = event genuine_ephemeral_event = None def genuine_ephemeral_callback(event): print("Genuine watch triggered") nonlocal genuine_ephemeral_event genuine_ephemeral_event = event assert fake_zk2.exists("/test_end_of_session") is not None assert genuine_zk2.exists("/test_end_of_session") is not None fake_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True) genuine_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True) assert fake_zk2.exists("/test_end_of_session/ephemeral_node", watch=fake_ephemeral_callback) is not None assert genuine_zk2.exists("/test_end_of_session/ephemeral_node", watch=genuine_ephemeral_callback) is not None print("Stopping genuine zk") genuine_zk1.stop() print("Closing genuine zk") genuine_zk1.close() print("Stopping fake zk") fake_zk1.stop() print("Closing fake zk") fake_zk1.close() assert fake_zk2.exists("/test_end_of_session/ephemeral_node") is None assert genuine_zk2.exists("/test_end_of_session/ephemeral_node") is None assert fake_ephemeral_event == genuine_ephemeral_event finally: for zk in [fake_zk1, fake_zk2, genuine_zk1, genuine_zk2]: stop_zk(zk) def test_end_of_watches_session(started_cluster): fake_zk1 = None fake_zk2 = None try: fake_zk1 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk1.start() fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk2.start() fake_zk1.create("/test_end_of_watches_session") dummy_set = 0 def dummy_callback(event): nonlocal dummy_set dummy_set += 1 print(event) for child_node in range(100): fake_zk1.create("/test_end_of_watches_session/" + str(child_node)) fake_zk1.get_children("/test_end_of_watches_session/" + str(child_node), watch=dummy_callback) fake_zk2.get_children("/test_end_of_watches_session/" + str(0), watch=dummy_callback) fake_zk2.get_children("/test_end_of_watches_session/" + str(1), watch=dummy_callback) fake_zk1.stop() fake_zk1.close() for child_node in range(100): fake_zk2.create("/test_end_of_watches_session/" + str(child_node) + "/" + str(child_node), b"somebytes") assert dummy_set == 2 finally: for zk in [fake_zk1, fake_zk2]: stop_zk(zk) def test_concurrent_watches(started_cluster): try: fake_zk = get_fake_zk() fake_zk.restart() global_path = "/test_concurrent_watches_0" fake_zk.create(global_path) dumb_watch_triggered_counter = 0 all_paths_triggered = [] existing_path = [] all_paths_created = [] watches_created = 0 def create_path_and_watch(i): nonlocal watches_created nonlocal all_paths_created fake_zk.ensure_path(global_path + "/" + str(i)) # new function each time def dumb_watch(event): nonlocal dumb_watch_triggered_counter dumb_watch_triggered_counter += 1 nonlocal all_paths_triggered all_paths_triggered.append(event.path) fake_zk.get(global_path + "/" + str(i), watch=dumb_watch) all_paths_created.append(global_path + "/" + str(i)) watches_created += 1 existing_path.append(i) trigger_called = 0 def trigger_watch(i): nonlocal trigger_called trigger_called += 1 fake_zk.set(global_path + "/" + str(i), b"somevalue") try: existing_path.remove(i) except: pass def call(total): for i in range(total): create_path_and_watch(random.randint(0, 1000)) time.sleep(random.random() % 0.5) try: rand_num = random.choice(existing_path) trigger_watch(rand_num) except: pass while existing_path: try: rand_num = random.choice(existing_path) trigger_watch(rand_num) except: pass p = Pool(10) arguments = [100] *
<reponame>JeschkeLab/DeerLab from collections import namedtuple from deerlab.whitegaussnoise import whitegaussnoise from deerlab.model import Model, fit import numpy as np # Simple non-linear function for testing x = np.linspace(0,5,100) def gauss(mean,width): return np.exp(-(x-mean)**2/width**2/2) # Non-linear definition def gauss2(mean1,mean2,width1,width2,amp1,amp2): return amp1*gauss(mean1,width1) + amp2*gauss(mean2,width2) # Linear + Non-linear definition def gauss2_design(mean1,mean2,width1,width2): return np.atleast_2d([gauss(mean1,width1), gauss(mean2,width2)]).T # Linear + Non-linear definition def gauss2_identity(): return np.eye(len(x)) # Linear + Non-linear definition def gauss2_scaled(scale): return scale*np.eye(len(x)) mock_data = gauss2(mean1=3,mean2=4,width1=0.5,width2=0.2,amp1=0.5,amp2=0.6) def test_construction_length(): #================================================================ "Check that the model is contructed correctly with the appropiate number of parameters" model = Model(gauss) assert model.Nparam==2 #================================================================ def test_construction_names(): #================================================================ "Check that the model is contructed correctly with the correct parameter names" model = Model(gauss) assert 'mean' in model.__dict__ and 'width' in model.__dict__ #================================================================ def test_parameters_set(): #================================================================ "Check that attributes of the parameters are editable" model = Model(gauss) model.mean.set(lb=0,ub=10) assert getattr(model.mean,'lb')==0 and getattr(model.mean,'ub')==10 #================================================================ def test_call_keywords(): #================================================================ "Check that calling the model with parameter returns the correct response" model = Model(gauss) response = model(mean=3,width=0.5) reference = gauss(mean=3,width=0.5) assert np.allclose(response,reference) #================================================================ def test_call_positional(): #================================================================ "Check that calling the model with parameter returns the correct response" model = Model(gauss) response = model(3,0.5) reference = gauss(3,0.5) assert np.allclose(response,reference) #================================================================ def test_call_mixed(): #================================================================ "Check that calling the model with parameter returns the correct response" model = Model(gauss) response = model(3,width=0.5) reference = gauss(3,0.5) assert np.allclose(response,reference) #================================================================ def test_addlinear_length(): #================================================================ "Check that the model is contructed correctly with the appropiate number of parameters" model = Model(gauss2_design) model.addlinear('amp1',lb=0) model.addlinear('amp2',lb=0) assert model.Nparam==6 #================================================================ def test_addlinear_names(): #================================================================ "Check that linear parameters can be properly added" model = Model(gauss2_design) model.addlinear('amp1',lb=0) model.addlinear('amp2',lb=0) assert 'amp1' in model.__dict__ and 'amp2' in model.__dict__ #================================================================ def test_addlinear_set(): #================================================================ "Check that attributes of the linear parameters are editable" model = Model(gauss2_design) model.addlinear('amp1') model.amp1.set(lb=0,ub=10) assert getattr(model.amp1,'lb')==0 and getattr(model.amp1,'ub')==10 #================================================================ def test_addlinear_call_keywords(): #================================================================ "Check that calling the model with parameters returns the correct response" model = Model(gauss2_design) model.addlinear('amp1') model.addlinear('amp2') response = model(mean1=3,mean2=4,width1=0.2,width2=0.3,amp1=0.5,amp2=0.4) reference = gauss2(mean1=3,mean2=4,width1=0.2,width2=0.3,amp1=0.5,amp2=0.4) assert np.allclose(response,reference) #================================================================ def test_addlinear_call_positional(): #================================================================ "Check that calling the model with parameters returns the correct response" model = Model(gauss2_design) model.addlinear('amp1') model.addlinear('amp2') response = model(3,4,0.2,0.3,0.5,0.4) reference = gauss2(3,4,0.2,0.3,0.5,0.4) assert np.allclose(response,reference) #================================================================ def test_addlinear_call_mixed(): #================================================================ "Check that calling the model with parameters returns the correct response" model = Model(gauss2_design) model.addlinear('amp1') model.addlinear('amp2') response = model(3,4,0.2,width2=0.3,amp1=0.5,amp2=0.4) reference = gauss2(3,4,0.2,width2=0.3,amp1=0.5,amp2=0.4) assert np.allclose(response,reference) #================================================================ #================================================================ def test_addlinear_vector_length(): "Check that linear parameters can be defined as vectors" model = Model(gauss2_identity) model.addlinear('gaussian', vec=100) assert model.Nparam==100 #================================================================ #================================================================ def test_addlinear_vector_names(): "Check that linear parameters can be defined as vectors" model = Model(gauss2_identity) model.addlinear('gaussian', vec=100) assert 'gaussian' in model.__dict__ #================================================================ def test_addlinear_vector_set(): #================================================================ "Check that attributes of the vector linear parameters are editable" model = Model(gauss2_identity) model.addlinear('gaussian', vec=100) model.gaussian.set(lb=np.zeros(100)) assert np.allclose(getattr(model.gaussian,'lb'),np.zeros(100)) #================================================================ def test_addlinear_vector_call_keywords(): #================================================================ "Check that calling the model with scalar and vector parameters returns the correct response" model = Model(gauss2_identity) model.addlinear('gaussian', vec=len(x)) reference = gauss2(mean1=3,mean2=4,width1=0.2,width2=0.3,amp1=0.5,amp2=0.4) response = model(gaussian=reference) assert np.allclose(response,reference) #================================================================ def test_addlinear_vector_call_positional(): #================================================================ "Check that calling the model with scalar and vector parameters returns the correct response" model = Model(gauss2_identity) model.addlinear('gaussian', vec=len(x)) reference = gauss2(3,4,0.2,0.3,0.5,0.4) response = model(reference) assert np.allclose(response,reference) #================================================================ def test_mixed_vector_length(): #================================================================ "Check the definition of scalar nonlinear parameters and vector linear parameters" model = Model(gauss2_scaled) model.addlinear('gaussian', vec=100) assert model.Nparam==101 #================================================================ def test_mixed_vector_names(): #================================================================ "Check the definition of scalar nonlinear parameters and vector linear parameters" model = Model(gauss2_scaled) model.addlinear('gaussian', vec=100) assert 'gaussian' in model.__dict__ and 'scale' in model.__dict__ #================================================================ def test_mixed_vector_call_keywords(): #================================================================ "Check that calling the model with scalar and vector parameters returns the correct response" model = Model(gauss2_scaled) model.addlinear('gaussian', vec=len(x)) reference = 5*gauss2(mean1=3,mean2=4,width1=0.2,width2=0.3,amp1=0.5,amp2=0.4) response = model(scale=5,gaussian=gauss2(mean1=3,mean2=4,width1=0.2,width2=0.3,amp1=0.5,amp2=0.4)) assert np.allclose(response,reference) #================================================================ def test_mixed_vector_call_positional(): #================================================================ "Check that calling the model with scalar and vector parameters returns the correct response" model = Model(gauss2_scaled) model.addlinear('gaussian', vec=len(x)) reference = 5*gauss2(3,4,0.2,0.3,0.5,0.4) response = model(5,gauss2(3,4,0.2,0.3,0.5,0.4)) assert np.allclose(response,reference) #================================================================ def test_addnonlinear_length(): #================================================================ "Check that the model is contructed correctly with the appropiate number of parameters" model = Model(gauss) model.addnonlinear('trivial1',lb=0) model.addnonlinear('trivial2',lb=0) assert model.Nparam==4 #================================================================ def test_addnonlinear_names(): #================================================================ "Check that linear parameters can be properly added" model = Model(gauss) model.addnonlinear('trivial1',lb=0) model.addnonlinear('trivial2',lb=0) assert hasattr(model,'trivial1') and hasattr(model,'trivial2') #================================================================ def test_addnonlinear_set(): #================================================================ "Check that attributes of the linear parameters are editable" model = Model(gauss) model.addnonlinear('trivial1') model.trivial1.set(lb=0,ub=10) assert getattr(model.trivial1,'lb')==0 and getattr(model.trivial1,'ub')==10 #================================================================ def test_addnonlinear_call_keywords(): #================================================================ "Check that calling the model with parameters returns the correct response" model = Model(gauss) model.addnonlinear('trivial1') model.addnonlinear('trivial2') response = model(mean=3,width=0.2,trivial1=1,trivial2=1) reference = gauss(mean=3,width=0.2) assert np.allclose(response,reference) #================================================================ def test_addnonlinear_call_positional(): #================================================================ "Check that calling the model with parameters returns the correct response" model = Model(gauss) model.addnonlinear('trivial1') model.addnonlinear('trivial2') response = model(3,0.2,1,1) reference = gauss(3,0.2) assert np.allclose(response,reference) #================================================================ #---------------------------------------------------------------- def _getmodel(type): if type=='parametric': model = Model(gauss2) model.mean1.set(lb=0, ub=10, par0=2) model.mean2.set(lb=0, ub=10, par0=4) model.width1.set(lb=0.01, ub=5, par0=0.2) model.width2.set(lb=0.01, ub=5, par0=0.2) model.amp1.set(lb=0, ub=5, par0=1) model.amp2.set(lb=0, ub=5, par0=1) elif type=='semiparametric': model = Model(gauss2_design) model.mean1.set(lb=0, ub=10, par0=2) model.mean2.set(lb=0, ub=10, par0=4) model.width1.set(lb=0.01, ub=5, par0=0.2) model.width2.set(lb=0.01, ub=5, par0=0.2) model.addlinear('amp1',lb=0, ub=5) model.addlinear('amp2',lb=0, ub=5) elif type=='nonparametric': model = Model(gauss2_design(3,4,0.5,0.2)) model.addlinear('amp1',lb=0) model.addlinear('amp2',lb=0) return model #---------------------------------------------------------------- # ====================================================================== def test_preserve_original(): "Check that the original model is not changed by the function" model = Model(gauss) model.mean.par0 = 3 model.width.par0 = 0.2 _ = fit(model,mock_data) assert model._parameter_list() == ['mean','width'] # ====================================================================== def test_fit_parametric(): #================================================================ "Check that a parametric model can be correctly fitted" model = _getmodel('parametric') fitResult = fit(model,mock_data) assert np.allclose(fitResult.model,mock_data) #================================================================ def test_fit_semiparametric(): #================================================================ "Check that a semiparametric model can be correctly fitted" model = _getmodel('semiparametric') fitResult = fit(model,mock_data) assert np.allclose(fitResult.model,mock_data) #================================================================ def test_fit_nonparametric(): #================================================================ "Check that a semiparametric model can be correctly fitted" model = _getmodel('nonparametric') fitResult = fit(model,mock_data) assert np.allclose(fitResult.model,mock_data,atol=1e-3) #================================================================ def test_freeze(): #================================================================ "Check that a model parameter can be frozen to a fixed value" model = Model(gauss) model.mean.freeze(3) assert model.mean.value==3 and model.mean.frozen==True #================================================================ def test_unfreeze(): #================================================================ "Check that a model parameter can be frozen and then reversed" model = Model(gauss) model.mean.freeze(3) model.mean.unfreeze() assert model.mean.frozen==False and model.mean.value==None #================================================================ def test_fit_parametric_frozen(): #================================================================ "Check that a parametric model can be correctly fitted" model = _getmodel('parametric') model.mean1.freeze(3) fitResult = fit(model,mock_data) assert np.allclose(fitResult.model,mock_data) #================================================================ def test_fit_semiparametric_frozen(): #================================================================ "Check that a semiparametric model can be correctly fitted" model = _getmodel('semiparametric') model.mean1.freeze(3) fitResult = fit(model,mock_data) assert np.allclose(fitResult.model,mock_data) #================================================================ def test_fit_nonparametric_frozen(): #================================================================ "Check that a semiparametric model can be correctly fitted" model = _getmodel('nonparametric') model.amp1.freeze(0.5) fitResult = fit(model,mock_data) assert np.allclose(fitResult.model,mock_data) #================================================================ #---------------------------------------------------------------- def assert_attributes_cis(fitobject,attributes): for attr in attributes: parfit = getattr(fitobject,attr) parci = getattr(fitobject,f'{attr}Uncert').ci(95) ci_lower = parci[0] ci_upper = parci[1] if getattr(fitobject,f'{attr}Uncert').type=='bootstrap': assert np.allclose(parfit,getattr(fitobject,f'{attr}Uncert').median) assert parfit<=ci_upper and parfit>=ci_lower #---------------------------------------------------------------- def test_CIs_parametric(): #================================================================ "Check the default confidence intervals of the fitted parameters" model = _getmodel('parametric') fitResult = fit(model,mock_data) assert_attributes_cis(fitResult,['mean1','mean2','width1','width2','amp1','amp2']) #================================================================ def test_CIs_semiparametric(): #================================================================ "Check the default confidence intervals of the fitted parameters" model = _getmodel('semiparametric') fitResult = fit(model,mock_data) assert_attributes_cis(fitResult,['mean1','mean2','width1','width2','amp1','amp2']) #================================================================ def test_CIs_nonparametric(): #================================================================ "Check the default confidence intervals of the fitted parameters" model = _getmodel('semiparametric') fitResult = fit(model,mock_data) assert_attributes_cis(fitResult,['amp1','amp2']) #================================================================ def test_bootCIs_parametric(): #================================================================ "Check the bootstrapped confidence intervals of the fitted parameters" model = _getmodel('parametric') noisydata = mock_data + whitegaussnoise(0.01,seed=1) fitResult = fit(model,noisydata,bootstrap=3) assert_attributes_cis(fitResult,['mean1','mean2','width1','width2','amp1','amp2']) #================================================================ def test_bootCIs_semiparametric(): #================================================================ "Check the bootstrapped confidence intervals of the fitted parameters" model = _getmodel('semiparametric') noisydata = mock_data + whitegaussnoise(0.01,seed=1) fitResult = fit(model,noisydata,bootstrap=3) assert_attributes_cis(fitResult,['mean1','mean2','width1','width2','amp1','amp2']) #================================================================ def test_bootCIs_nonparametric(): #================================================================ "Check the bootstrapped confidence intervals of the fitted parameters" model = _getmodel('semiparametric') noisydata = mock_data + whitegaussnoise(0.01,seed=1) fitResult = fit(model,noisydata,bootstrap=3) assert_attributes_cis(fitResult,['amp1','amp2']) #================================================================ # Simple non-linear function for testing def gauss_axis(axis,mean,width): return np.exp(-(axis-mean)**2/width**2/2) # Non-linear definition def gauss2_axis(axis,mean1,mean2,width1,width2,amp1,amp2): return amp1*gauss_axis(axis,mean1,width1) + amp2*gauss_axis(axis,mean2,width2) # Linear + Non-linear definition def gauss2_design_axis(axis,mean1,mean2,width1,width2): return np.atleast_2d([gauss_axis(axis,mean1,width1), gauss_axis(axis,mean2,width2)]).T # Linear + Non-linear definition def gauss2_identity_axis(axis): return np.eye(len(axis)) mock_data_fcn = lambda axis: gauss2_axis(axis,mean1=3,mean2=4,width1=0.5,width2=0.2,amp1=0.5,amp2=0.6) def test_model_with_constant_positional(): #================================================================ "Check that a model with axis can be defined and called" model = Model(gauss_axis,constants='axis') x = np.linspace(0,10,300) reference = gauss_axis(x,3,0.5) response = model(x,3,0.5) assert np.allclose(reference,response) #================================================================ def test_model_with_constant_keywords(): #================================================================ "Check that a model with axis can be defined and called via keywords" model = Model(gauss_axis,constants='axis') x = np.linspace(0,10,300) reference = gauss_axis(x,3,0.5) response = model(axis=x, mean=3, width=0.5) assert np.allclose(reference,response) #================================================================ def test_model_with_constant_mixed(): #================================================================ "Check that a model with axis can be defined and called via keywords" model = Model(gauss_axis,constants='axis') x = np.linspace(0,10,300) reference = gauss_axis(x,3,0.5) response = model(x, mean=3, width=0.5) assert np.allclose(reference,response) #================================================================ #---------------------------------------------------------------- def _getmodel_axis(type): if type=='parametric': model = Model(gauss2_axis,constants='axis') model.mean1.set(lb=0, ub=10, par0=2) model.mean2.set(lb=0, ub=10, par0=4) model.width1.set(lb=0.01, ub=5, par0=0.2) model.width2.set(lb=0.01, ub=5, par0=0.2) model.amp1.set(lb=0, ub=5, par0=1) model.amp2.set(lb=0, ub=5, par0=1) elif type=='semiparametric': model = Model(gauss2_design_axis,constants='axis') model.mean1.set(lb=0, ub=10, par0=2) model.mean2.set(lb=0, ub=10, par0=4) model.width1.set(lb=0.01, ub=5, par0=0.2) model.width2.set(lb=0.01, ub=5, par0=0.2) model.addlinear('amp1',lb=0, ub=5) model.addlinear('amp2',lb=0, ub=5) elif type=='nonparametric': model = Model(lambda x: gauss2_design_axis(x,3,4,0.5,0.2),constants='x') model.addlinear('amp1',lb=0) model.addlinear('amp2',lb=0) return model #---------------------------------------------------------------- def test_fit_parametric_constant(): #================================================================ "Check that a parametric model can be correctly fitted while specifying an axis" model = _getmodel_axis('parametric') x = np.linspace(0,10,200) fitResult = fit(model,mock_data_fcn(x),x) assert np.allclose(fitResult.model,mock_data_fcn(x)) #================================================================ def test_fit_semiparametric_constant(): #================================================================ "Check that a semiparametric model can be correctly fitted while
might need to add also the score if self.showScores: additionalLines=[] currentLineEndings = [name.split('.')[-1] for name in currentLines] for key in newLines: scoreName = key.split('.')[-1]+"_score" for scoreVar in scoreVars: if scoreName in scoreVar: #we add this one only if it is not there already if scoreName not in currentLineEndings: additionalLines.append(scoreVar) if additionalLines: additionalLines=list(set(additionalLines))# remove duplicates self.logger.debug(f"MUST add scores: {additionalLines}.. in the next event") self.server.add_variables_selected(additionalLines) #return # wait for next event if deleteLines or newLines: forceYRescale = True else: forceYRescale = False data = self.__plot_lines(newVars = newLines,appendingDataArrived=appendingDataArrived,forceYRescale=forceYRescale) # the data contain all visible time series including the background #todo: make this differential as well if self.server.get_settings()["background"]["hasBackground"]: self.refresh_backgrounds(data) if self.server.get_settings()["hasHover"] not in [False,None]: self.__make_tooltips() #must be the last in the drawings def refresh_backgrounds_old(self): """ check if backgrounds must be drawn if not, we just hide them""" self.hide_backgrounds() if self.showBackgrounds: self.show_backgrounds() def refresh_backgrounds(self,data = None): if self.backgroundHighlightVisible: return #don't touch a running selection self.background_highlight_hide() # we show the new backgrounds first and then delete the old to avoid the short empty time, looks a bit better deleteList = [] for r in self.plot.renderers: if r.name: if "__background" in r.name: deleteList.append(r.name) if self.showBackgrounds: self.show_backgrounds(data = data) if deleteList: self.remove_renderers(deleteList=deleteList) def var_select_button_cb(self): """ UI callback, called when the variable selection button was clicked """ #apply the selected vars to the plot and the backend currentSelection = self.variablesMultiSelect.value #write the changes to the backend self.server.set_variables_selected(currentSelection) self.refresh_plot() def event_cb(self,event): """ the event callback from the UI for any user interaction: zoom, select, annotate etc Args: event (bokeh event): the event that happened """ eventType = str(event.__class__.__name__) msg = " " for k in event.__dict__: msg += str(k) + " " + str(event.__dict__[k]) + " " self.logger.debug("event " + eventType + msg) #print("event " + eventType + msg) if eventType in ["PanStart","Pan"]: if self.streamingMode: self.userZoomRunning = True # the user is starting with pannin, we old the ui updates during user pan self.inPan = True """ if eventType == "PanEnd": #self.refresh_plot() if self.streamingMode: self.userZoomRunning = False # the user is finished with zooming, we can now push data to the UI again #self.logger.debug(f"{self.toolBarBox.toolbar.active_pan}") self.autoAdjustY = False self.refresh_plot() """ #if eventType == "LODEnd": if eventType in ["LODEnd","PanEnd"]: self.inPan = False if self.streamingMode: self.userZoomRunning = False # the user is finished with zooming, we can now push data to the UI again # also update the zoom level during streaming self.streamingInterval = self.plot.x_range.end - self.plot.x_range.start #.rangeEnd - self.rangeStart self.logger.debug(f"new streaming interval: {self.streamingInterval}") #if self.server.get_settings()["autoScaleY"][".properties"]["value"] == True if eventType=="LODEnd":# self.boxModifierVisible: self.autoAdjustY = self.server.get_mirror()["autoScaleY"][".properties"]["value"] self.server.set_xy_range(self.rangeStart,self.rangeEnd) self.refresh_plot() if eventType == "Reset": self.reset_plot_cb() if eventType == "SelectionGeometry": #option = self.annotationButtons.active # gives a 0,1 list, get the label now #tags = self.server.get_settings()["tags"] #mytag = self.annotationTags[option] for k,v in self.columnData.items(): #v.selected = Selection(indices=[]) #not allowed in bokeh 2.01 f v.selected.indices = [] v.data = dict(v.data) pass mytag =self.currentAnnotationTag #self.logger.info("TAGS"+str(self.annotationTags)+" "+str(option)) #self.data.selected = Selection(indices=[]) # suppress real selection if mytag != None: self.edit_annotation_cb(event.__dict__["geometry"]["x0"],event.__dict__["geometry"]["x1"],mytag,event.__dict__["geometry"]["y0"],event.__dict__["geometry"]["y1"]) if eventType == "Tap": #self.logger.debug(f"TAP {self.annotationsVisible}, {event.__dict__['sx']}") #plot all attributes #self.logger.debug(f"legend {self.plot.legend.width}") self.box_modifier_tap(event.__dict__["x"],event.__dict__["y"] ) self.logger.debug(f"TAP done") self.logger.debug(f"leave event with user zomm running{self.userZoomRunning}") def reset_plot_cb(self): self.logger.debug("reset plot") self.rangeStart = None self.rangeEnd = None self.box_modifier_hide() # reset the selection self.refresh_plot() def delete_annotations(self,annoIds,apply=True): for tag,v in self.annotationsInfo.items(): hasChanged = False for id in annoIds: if id in v["data"]["id"]: #get the index and rework the table: we take out the index of this match fIdx =v["data"]["id"].index(id) for k,original in v["data"].items(): self.annotationsInfo[tag]["data"][k]=[item for idx, item in enumerate(original) if idx != fIdx] hasChanged = True if hasChanged and apply: # apply the update self.annotationsInfo[tag]["ColumnDataSource"].data = dict(self.annotationsInfo[tag]["data"]) hasChanged = False def find_renderer(self,rendererName): for r in self.plot.renderers: if r.name: if r.name == rendererName: return r #also look through the annotations for k,v in self.annotationsInfo.items(): if rendererName in v["data"]["name"]: return v["renderer"] return None def add_renderers(self,addList): self.plot.renderers.extend(addList) def remove_renderers(self,deleteList=[],deleteMatch="",renderers=[],deleteFromLocal = False): """ this functions removes renderers (plotted elements from the widget), we find the ones to delete based on their name attribute Args: deletelist: a list or set of renderer names to be deleted deleteMatch(string) a part of the name to be deleted, all renderer that have this string in their names will be removed renderers : a list of bokeh renderers to be deleted """ deletedRenderers = [] #sanity check: with self.renderersLock: if self.renderersGarbage: self.logger.info(f"renderers garbage collector {self.renderersGarbage}") renderers.extend(self.renderersGarbage) self.renderersGarbage = [] if deleteList == [] and deleteMatch == "" and renderers == []: return #self.logger.debug(f"remove_renderers(), {deleteList}, {deleteMatch}, {renderers}") deleteList = deleteList.copy() # we will modify it newRenderers = [] for r in self.plot.renderers: if r in renderers: deletedRenderers.append(r) continue # we ignore this one and do NOT add it to the renderers, this will hide the object if r.name: if r.name in deleteList: self.logger.debug(f"remove_renderers {r.name}") deleteList.remove(r.name) # reduce the list to speed up looking later deletedRenderers.append(r) continue # we ignore this one and do NOT add it to the renderers, this will hide the object elif deleteMatch != "" and deleteMatch in r.name: deletedRenderers.append(r) continue # we ignore this one and do NOT add it to the renderers, this will hide the object else: newRenderers.append(r) # we keep this one, as it doesnt mathc the deletersl else: newRenderers.append(r) # if we have no name, we can't filter, keep this self.plot.renderers = newRenderers if deleteFromLocal: #delete this also from the local renderers list: delList = [] for k,v in self.renderers.items(): if v["renderer"] in deletedRenderers: delList.append(k) for k in delList: del self.renderers[k] def annotation_toggle_click_cb(self,toggleState): """ callback from ui for turning on/off the annotations Args: toggleState (bool): true for set, false for unset """ if toggleState: self.showAnnotationToggle.label = "hide Annotations" self.show_annotations() else: #remove all annotations from plot self.showAnnotationToggle.label = "show Annotations" self.hide_annotations() def threshold_toggle_click_cb(self,toggleState): """ callback from ui for turning on/off the threshold annotations Args: toggleState (bool): true for set, false for unset """ if toggleState: self.showThresholdToggle.label = "hide Thresholds" self.showThresholds = True self.show_thresholds() else: #remove all annotations from plot self.showThresholdToggle.label = "show Thresholds" self.showThresholds = False self.hide_thresholds() def show_thresholds(self): """ check which lines are currently shown and show their thresholds as well """ #if not self.showThresholds: # return self.showThresholds = True for annoName,anno in self.server.get_annotations().items(): #self.logger.debug("@show_thresholds "+annoName+" "+anno["type"]) if anno["type"]=="threshold": # we only show the annotations where the lines are also there self.logger.debug("@show_thresholds "+annoName+" "+anno["type"]+"and the lines are currently"+str(list(self.lines.keys()))) if anno["variable"] in self.lines: self.draw_threshold(anno)#,anno["variable"]) def show_motifs(self): self.showMotifs = True for annoName,anno in self.server.get_annotations().items(): #self.logger.debug("@show_thresholds "+annoName+" "+anno["type"]) if anno["type"]=="motif": # we only show the annotations where the lines are also there self.logger.debug("@show_motifs "+annoName+" "+anno["type"]+"and the lines are currently"+str(list(self.lines.keys()))) if anno["variable"] in self.lines: self.draw_motif(anno)#,anno["variable"]) def hide_motifs(self): self.showMotifs = False self.box_modifier_hide() annotations = self.server.get_annotations() timeAnnos = [anno for anno in annotations.keys() if annotations[anno]["type"] == "motif"] self.remove_renderers(deleteList=timeAnnos, deleteFromLocal=True) def hide_thresholds(self): """ hide the current annotatios in the widget of type time""" self.showThresholds=False self.box_modifier_hide() annotations = self.server.get_annotations() timeAnnos = [anno for anno in annotations.keys() if annotations[anno]["type"]=="threshold" ] self.remove_renderers(deleteList=timeAnnos,deleteFromLocal=True) def backgroundbutton_cb(self,toggleState): """ event callback function triggered by the UI toggleStat(bool): True/False on toggle is set or not """ if toggleState: self.backgroundbutton.label = "hide Backgrounds" self.showBackgrounds = True self.show_backgrounds(None) else: self.backgroundbutton.label = "show Backgrounds" self.hide_backgrounds() self.showBackgrounds = False def init_annotations(self): # we assume that annotations are part of the model, ## get the annotations from the server and build the renderers, plot them if wanted ## but only the time annotations, the others are created and destroyed on demand #self.visibleAnnotations = set() # a set return self.logger.debug(f"init_annotations() {len(self.server.get_annotations())} annotations..") #now we build all renderers for the time annos and don't show them now for annoname, anno in self.server.get_annotations().items(): if anno["type"] == "time": self.draw_annotation(anno,False) self.logger.debug("init annotations done") def init_events(self): self.logger.debug(f"init_events") #create all renderers but don't show them visible = self.server.get_mirror()["visibleElements"][".properties"]["value"] if "events" in visible and visible["events"]==True: self.eventsVisible = True #currently turned on self.logger.debug(f"init_events visible") self.show_all_events() else: self.logger.debug("init events invisible") def show_all_events(self): self.logger.debug("show_all_events") data = self.server.get_events() self.eventsVisible = True if data: self.show_events(data) def init_annotations_old(self): """ chreate the actual bokeh objects based on existing
def compute_mask(self, inputs, mask=None): return [None, None, None, None] def get_config(self): config = super(DetectionTargetLayer, self).get_config() return config @tf.keras.utils.register_keras_serializable() class DetectionLayer(tfl.Layer): """Takes classified proposal boxes and their bounding box deltas and returns the final detection boxes. Returns: [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where coordinates are normalized. """ def __init__(self, proposals, detection_min_confidence, detection_max_instances, detection_nms_threshold, bbox_std_dev, images_per_gpu, batch_size, name='mrcnn_detection', **kwargs): super(DetectionLayer, self).__init__(name=name, **kwargs) self.detection_min_confidence = detection_min_confidence self.detection_max_instances = detection_max_instances self.detection_nms_threshold = detection_nms_threshold self.bbox_std_dev = bbox_std_dev self.batch_size = batch_size self.proposals = proposals self.images_per_gpu = images_per_gpu self.norm_boxes_layer = NormBoxesLayer(name='norm_boxes_detection') def build(self, input_shape): self.built = True super(DetectionLayer, self).build(input_shape) def refine_detections(self, rois, probs, deltas, window): """Refine classified proposals and filter overlaps and return final detections. Inputs: rois: [N, (y1, x1, y2, x2)] in normalized coordinates probs: [N, num_classes]. Class probabilities. deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific bounding box deltas. window: (y1, x1, y2, x2) in normalized coordinates. The part of the image that contains the image excluding the padding. Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where coordinates are normalized. """ # Class IDs per ROI class_ids = tf.argmax(probs, axis=1, output_type=tf.int32) # Class probability of the top class of each ROI # indices = tf.stack([tf.range(self.proposals), class_ids], axis=1) proposals_range = tf.range(self.proposals) indices = tf.concat([tf.reshape(proposals_range, (tf.shape(proposals_range)[0], 1)), tf.reshape(class_ids, (tf.shape(class_ids)[0], 1))], axis=1) class_scores = tf.gather_nd(probs, indices) # Class-specific bounding box deltas deltas_specific = tf.gather_nd(deltas, indices) # Apply bounding box deltas # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates refined_rois = utils.apply_box_deltas_graph( rois, tf.math.multiply(deltas_specific, self.bbox_std_dev)) # Clip boxes to image window refined_rois = utils.clip_boxes_graph(refined_rois, window) # TODO: Filter out boxes with zero area # Filter out background boxes keep = tf.where(class_ids > 0)[:, 0] # Filter out low confidence boxes if self.detection_min_confidence: conf_keep = tf.where(class_scores >= self.detection_min_confidence)[:, 0] """ keep = tf.sets.intersection(tf.expand_dims(keep, 0),tf.expand_dims(conf_keep, 0)) keep = tf.sparse.to_dense(keep)[0] """ broadcast_equal = tf.equal(conf_keep, tf.reshape(keep, (-1, 1))) broadcast_equal_int = tf.cast(broadcast_equal, tf.int32) broadcast_sum = tf.reduce_sum(broadcast_equal_int, axis=0) keep = tf.boolean_mask(conf_keep, broadcast_sum, axis=None) # Apply per-class NMS # 1. Prepare variables pre_nms_class_ids = tf.gather(class_ids, keep) pre_nms_scores = tf.gather(class_scores, keep) pre_nms_rois = tf.gather(refined_rois, keep) unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0] def nms_keep_map(class_id): """Apply Non-Maximum Suppression on ROIs of the given class.""" # Indices of ROIs of the given class ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0] # Apply NMS class_keep = tf.image.non_max_suppression( tf.gather(pre_nms_rois, ixs), tf.gather(pre_nms_scores, ixs), max_output_size=self.detection_max_instances, iou_threshold=self.detection_nms_threshold) # Map indices class_keep = tf.gather(keep, tf.gather(ixs, class_keep)) # Pad with -1 so returned tensors have the same shape gap = self.detection_max_instances - tf.shape(class_keep)[0] class_keep = tf.pad(class_keep, [(0, gap)], mode='CONSTANT', constant_values=-1) # Set shape so map_fn() can infer result shape # class_keep.set_shape([self.detection_max_instances]) return class_keep @tf.function def _nms_keep_func(class_ids): """ An experimental function for replacing nms_keep_map with map_fn. TODO: check/fix for multiple classes Args: class_ids: Returns: class_keep """ broadcast_equal = tf.equal(pre_nms_class_ids, tf.reshape(class_ids, (-1, 1))) broadcast_equal_int = tf.cast(broadcast_equal, tf.int32) bool_mask = tf.reduce_sum(broadcast_equal_int, axis=0) # Apply NMS class_keep = tf.image.non_max_suppression(tf.boolean_mask( pre_nms_rois, bool_mask, axis=None), tf.boolean_mask(pre_nms_scores, bool_mask, axis=None), max_output_size=self.detection_max_instances, iou_threshold=self.detection_nms_threshold) # Map indicies class_keep = tf.gather(keep, class_keep) # Pad with -1 so returned tensors have the same shape gap = self.detection_max_instances - tf.shape(class_keep)[0] class_keep = tf.pad(class_keep, [(0, gap)], mode='CONSTANT', constant_values=-1) return class_keep # 2. Map over class IDs # nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids, dtype=tf.int64) nms_keep = _nms_keep_func(unique_pre_nms_class_ids) # 3. Merge results into one list, and remove -1 padding # nms_keep = tf.reshape(nms_keep, [-1]) nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0]) # 4. Compute intersection between keep and nms_keep broadcast_equal = tf.equal(nms_keep, tf.reshape(keep, (-1, 1))) broadcast_equal_int = tf.cast(broadcast_equal, tf.int32) broadcast_sum = tf.reduce_sum(broadcast_equal_int, axis=0) keep = tf.boolean_mask(nms_keep, broadcast_sum, axis=None) """ keep = tf.sets.intersection(tf.expand_dims(keep, 0), tf.expand_dims(nms_keep, 0)) keep = tf.sparse.to_dense(keep)[0] """ # Keep top detections roi_count = self.detection_max_instances class_scores_keep = tf.gather(class_scores, keep) num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count) top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1] keep = tf.gather(keep, top_ids) # Arrange output as [N, (y1, x1, y2, x2, class_id, score)] # Coordinates are normalized. detections = tf.concat([ tf.gather(refined_rois, keep), tf.cast(tf.gather(class_ids, keep), dtype='float32')[..., tf.newaxis], tf.gather(class_scores, keep)[..., tf.newaxis] ], axis=1) # Pad with zeros if detections < DETECTION_MAX_INSTANCES gap = self.detection_max_instances - tf.shape(detections)[0] detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT") return detections def call(self, inputs, **kwargs): rois = inputs[0] mrcnn_class = inputs[1] mrcnn_bbox = inputs[2] image_meta = inputs[3] # Get windows of images in normalized coordinates. Windows are the area # in the image that excludes the padding. # Use the shape of the first image in the batch to normalize the window # because we know that all images get resized to the same size. m = utils.parse_image_meta_graph(image_meta) image_shape = m['image_shape'][0] window = self.norm_boxes_layer([m['window'], image_shape[:2]]) # Run detection refinement graph on each item in the batch detections_batch = utils.batch_slice( [rois, mrcnn_class, mrcnn_bbox, window], lambda x, y, w, z: self.refine_detections(x, y, w, z), self.images_per_gpu) # Reshape output # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in # normalized coordinates return tf.reshape( detections_batch, [self.batch_size, self.detection_max_instances, 6]) def compute_output_shape(self, input_shape): return None, self.detection_max_instances, 6 def get_config(self): config = super(DetectionLayer, self).get_config() return config @tf.keras.utils.register_keras_serializable() class DetectedBoxesExtraction(tfl.Layer): def __init__(self, config=None, name='detected_boxes_extraction', **kwargs): super(DetectedBoxesExtraction, self).__init__(name=name, **kwargs) self.config = config def build(self, input_shape): self.built = True super(DetectedBoxesExtraction, self).build(input_shape) def call(self, inputs, **kwargs): return inputs[..., :4] def get_config(self): config = super(DetectedBoxesExtraction, self).get_config() return config @tf.keras.utils.register_keras_serializable() class PyramidROIAlign(tfl.Layer): """Implements ROI Pooling on multiple levels of the feature pyramid. Params: - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7] Inputs: - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized coordinates. Possibly padded with zeros if not enough boxes to fill the array. - image_meta: [batch, (meta data)] Image details. See compose_image_meta() - feature_maps: List of feature maps from different levels of the pyramid. Each is [batch, height, width, channels] Output: Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels]. The width and height are those specific in the pool_shape in the layer constructor. """ def __init__(self, pool_shape, name='roi_align', **kwargs): super(PyramidROIAlign, self).__init__(name=name, **kwargs) self.pool_shape = tuple(pool_shape) def build(self, input_shape): self.built = True super(PyramidROIAlign, self).build(input_shape) def call(self, inputs, **kwargs): # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords boxes = inputs[0] # Image meta # Holds details about the image. See compose_image_meta() image_meta = inputs[1] # Feature Maps. List of feature maps from different level of the # feature pyramid. Each is [batch, height, width, channels] feature_maps = inputs[2:] # Assign each ROI to a level in the pyramid based on the ROI area. y1, x1, y2, x2 = tf.split(boxes, 4, axis=2) h = y2 - y1 w = x2 - x1 # Use shape of first image. Images in a batch must have the same size. image_shape = utils.parse_image_meta_graph(image_meta)['image_shape'][0] # Equation 1 in the Feature Pyramid Networks paper. Account for # the fact that our coordinates are normalized here. # e.g. a 224x224 ROI (in pixels) maps to P4 image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32) roi_level = utils.log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area))) roi_level = tf.minimum(5, tf.maximum( 2, 4 + tf.cast(tf.round(roi_level), tf.int32))) roi_level = tf.squeeze(roi_level, 2) # Loop through levels and apply ROI pooling to each. P2 to P5. pooled = [] box_to_level = [] # Workaround for onnxruntime which have issues with empty tensors concat because of the dimensions reorder. unique_levels = tf.unique(tf.reshape(roi_level, (-1, )))[0] unique_levels_padded = tf.pad(unique_levels, tf.constant([[0, 4]]), constant_values=2) unique_levels_padded = tf.split(unique_levels_padded[:4], 4) for i, level in enumerate(unique_levels_padded): ix = tf.where(tf.equal(roi_level, level)) level_boxes = tf.gather_nd(boxes, ix) # Box indices for crop_and_resize. box_indices = tf.cast(ix[:, 0], tf.int32) # Keep track of which box is mapped to which level box_to_level.append(ix) # Stop gradient propogation to ROI proposals level_boxes = tf.stop_gradient(level_boxes) box_indices = tf.stop_gradient(box_indices) # Crop and Resize # From Mask R-CNN paper: "We sample four regular locations, so # that we can evaluate either max or average pooling. In fact, # interpolating only a single value at each bin center (without # pooling) is nearly as effective." # # Here we use the simplified approach of a single value per bin, # which is how it's done in tf.crop_and_resize() # Result: [batch * num_boxes,
do_sqrt=True, grid_number=13, coord_scale=None, object_scale=None, prediction_not_a_object_scale=None, class_scale=None, detection_threshold=None, iou_threshold=None, random_boxes=False, match_anchor_size=None, num_to_force_coord=None): ''' Generates a deep learning model with the Yolov2 architecture. The model is same as Yolov2 proposed in original paper. In addition to Yolov2, the model adds a passthrough layer that brings feature from an earlier layer to lower resolution layer. Parameters ---------- conn : CAS Specifies the connection of the CAS connection. anchors : list Specifies the anchor box values. model_table : string, optional Specifies the name of CAS table to store the model. n_channels : int, optional Specifies the number of the channels (i.e., depth) of the input layer. Default: 3 width : int, optional Specifies the width of the input layer. Default: 416 height : int, optional Specifies the height of the input layer. Default: 416 scale : double, optional Specifies a scaling factor to be applied to each pixel intensity values. Default: 1.0 / 255 random_mutation : string, optional Specifies how to apply data augmentations/mutations to the data in the input layer. Valid Values: 'none', 'random' Default: 'NONE' act : string, optional Specifies the activation function for the batch normalization layers. Default: 'leaky' act_detection : string, optional Specifies the activation function for the detection layer. Valid Values: AUTO, IDENTITY, LOGISTIC, SIGMOID, TANH, RECTIFIER, RELU, SOFPLUS, ELU, LEAKY, FCMP Default: AUTO softmax_for_class_prob : bool, optional Specifies whether to perform Softmax on class probability per predicted object. Default: True coord_type : string, optional Specifies the format of how to represent bounding boxes. For example, a bounding box can be represented with the x and y locations of the top-left point as well as width and height of the rectangle. This format is the 'rect' format. We also support coco and yolo formats. Valid Values: 'rect', 'yolo', 'coco' Default: 'yolo' max_label_per_image : int, optional Specifies the maximum number of labels per image in the training. Default: 30 max_boxes : int, optional Specifies the maximum number of overall predictions allowed in the detection layer. Default: 30 n_classes : int, optional Specifies the number of classes. If None is assigned, the model will automatically detect the number of classes based on the training set. Default: 20 predictions_per_grid : int, optional Specifies the amount of predictions will be done per grid. Default: 5 do_sqrt : bool, optional Specifies whether to apply the SQRT function to width and height of the object for the cost function. Default: True grid_number : int, optional Specifies the amount of cells to be analyzed for an image. For example, if the value is 5, then the image will be divided into a 5 x 5 grid. Default: 13 coord_scale : float, optional Specifies the weight for the cost function in the detection layer, when objects exist in the grid. object_scale : float, optional Specifies the weight for object detected for the cost function in the detection layer. prediction_not_a_object_scale : float, optional Specifies the weight for the cost function in the detection layer, when objects do not exist in the grid. class_scale : float, optional Specifies the weight for the class of object detected for the cost function in the detection layer. detection_threshold : float, optional Specifies the threshold for object detection. iou_threshold : float, optional Specifies the IOU Threshold of maximum suppression in object detection. random_boxes : bool, optional Randomizing boxes when loading the bounding box information. Default: False match_anchor_size : bool, optional Whether to force the predicted box match the anchor boxes in sizes for all predictions num_to_force_coord : int, optional The number of leading chunk of images in training when the algorithm forces predicted objects in each grid to be equal to the anchor box sizes, and located at the grid center Returns ------- :class:`Sequential` References ---------- https://arxiv.org/pdf/1612.08242.pdf ''' model = Sequential(conn=conn, model_table=model_table) model.add(InputLayer(n_channels=n_channels, width=width, height=height, random_mutation=random_mutation, scale=scale)) # conv1 224 416 model.add(Conv2d(32, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) model.add(Pooling(width=2, height=2, stride=2, pool='max')) # conv2 112 208 model.add(Conv2d(64, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) model.add(Pooling(width=2, height=2, stride=2, pool='max')) # conv3 56 104 model.add(Conv2d(128, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv4 56 104 model.add(Conv2d(64, width=1, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv5 56 104 model.add(Conv2d(128, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) model.add(Pooling(width=2, height=2, stride=2, pool='max')) # conv6 28 52 model.add(Conv2d(256, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv7 28 52 model.add(Conv2d(128, width=1, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv8 28 52 model.add(Conv2d(256, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) model.add(Pooling(width=2, height=2, stride=2, pool='max')) # conv9 14 26 model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv10 14 26 model.add(Conv2d(256, width=1, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv11 14 26 model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv12 14 26 model.add(Conv2d(256, width=1, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv13 14 26 model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1)) pointLayer1 = BN(act=act, name='BN5_13') model.add(pointLayer1) model.add(Pooling(width=2, height=2, stride=2, pool='max')) # conv14 7 13 model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv15 7 13 model.add(Conv2d(512, width=1, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv16 7 13 model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv17 7 13 model.add(Conv2d(512, width=1, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv18 7 13 model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) # conv19 7 13 model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act, name='BN6_19')) # conv20 7 13 model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1)) pointLayer2 = BN(act=act, name='BN6_20') model.add(pointLayer2) # conv21 7 26 * 26 * 512 -> 26 * 26 * 64 model.add(Conv2d(64, width=1, act='identity', include_bias=False, stride=1, src_layers=[pointLayer1])) model.add(BN(act=act)) # reshape 26 * 26 * 64 -> 13 * 13 * 256 pointLayer3 = Reshape(act='identity', width=grid_number, height=grid_number, depth=256, name='reshape1') model.add(pointLayer3) # concat model.add(Concat(act='identity', src_layers=[pointLayer2, pointLayer3])) # conv22 7 13 model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1)) model.add(BN(act=act)) model.add( Conv2d((n_classes + 5) * predictions_per_grid, width=1, act='identity', include_bias=False, stride=1)) model.add(Detection(act=act_detection, detection_model_type='yolov2', anchors=anchors, softmax_for_class_prob=softmax_for_class_prob, coord_type=coord_type, class_number=n_classes, grid_number=grid_number, predictions_per_grid=predictions_per_grid, do_sqrt=do_sqrt, coord_scale=coord_scale, object_scale=object_scale, prediction_not_a_object_scale=prediction_not_a_object_scale, class_scale=class_scale, detection_threshold=detection_threshold, iou_threshold=iou_threshold, random_boxes=random_boxes, max_label_per_image=max_label_per_image, max_boxes=max_boxes, match_anchor_size=match_anchor_size, num_to_force_coord=num_to_force_coord)) return model def Tiny_YoloV2(conn, anchors, model_table='Tiny-Yolov2', n_channels=3, width=416, height=416, scale=1.0 / 255, random_mutation='NONE', act='leaky', act_detection='AUTO', softmax_for_class_prob=True, coord_type='YOLO', max_label_per_image=30, max_boxes=30, n_classes=20, predictions_per_grid=5, do_sqrt=True, grid_number=13, coord_scale=None, object_scale=None, prediction_not_a_object_scale=None, class_scale=None, detection_threshold=None, iou_threshold=None, random_boxes=False, match_anchor_size=None, num_to_force_coord=None): ''' Generate a deep learning model with the Tiny Yolov2 architecture. Tiny Yolov2 is a very small model of Yolov2, so that it includes fewer numbers of convolutional layer and batch normalization layer. Parameters ---------- conn : CAS Specifies the connection of the CAS connection. anchors : list Specifies the anchor box values. model_table : string, optional Specifies the name of CAS table to store the model. n_channels : int, optional Specifies the number of the channels (i.e., depth) of the input layer. Default: 3 width : int, optional Specifies the width of the input layer. Default: 416 height : int, optional Specifies the height of the input layer. Default: 416 scale : double, optional Specifies a scaling factor to be applied to each pixel intensity values. Default: 1.0 / 255 random_mutation : string, optional Specifies how to apply data augmentations/mutations to the data in the input layer. Valid Values: 'none', 'random' Default: 'NONE' act : string, optional Specifies the activation function for the batch normalization layers. Default: 'leaky' act_detection : string, optional Specifies the activation function for the detection layer. Valid Values: AUTO, IDENTITY, LOGISTIC, SIGMOID, TANH, RECTIFIER, RELU, SOFPLUS, ELU, LEAKY, FCMP Default: AUTO softmax_for_class_prob : bool, optional Specifies whether to perform Softmax on class probability per predicted object. Default: True coord_type : string, optional Specifies the format of how to represent bounding boxes. For example, a bounding box can be represented with the x and y locations of the top-left point as well as width and height of the rectangle. This format is the 'rect' format. We also support coco and yolo formats. Valid Values: 'rect', 'yolo', 'coco' Default: 'yolo' max_label_per_image : int, optional Specifies the maximum number of labels per image in the training. Default: 30 max_boxes : int, optional Specifies the maximum number of overall predictions allowed in the detection layer. Default: 30 n_classes
vertices `(0, 1, ..., n)` and all of its faces. EXAMPLES:: sage: simplicial_complexes.Simplex(3) The 3-simplex sage: simplicial_complexes.Simplex(5).euler_characteristic() 1 """ return UniqueSimplicialComplex([TrueSimplex(n)], name='The {}-simplex'.format(n)) def Torus(): r""" A minimal triangulation of the torus. This is a simplicial complex with 7 vertices, 21 edges and 14 faces. It is the unique triangulation of the torus with 7 vertices, and has been found by Möbius in 1861. This is also the combinatorial structure of the Császár polyhedron (see :wikipedia:`Császár_polyhedron`). EXAMPLES:: sage: T = simplicial_complexes.Torus(); T.homology(1) Z x Z sage: T.f_vector() [1, 7, 21, 14] TESTS:: sage: T.flip_graph().is_isomorphic(graphs.HeawoodGraph()) True REFERENCES: - [Lut2002]_ """ return UniqueSimplicialComplex([[0, 1, 2], [1, 2, 4], [1, 3, 4], [1, 3, 6], [0, 1, 5], [1, 5, 6], [2, 3, 5], [2, 4, 5], [2, 3, 6], [0, 2, 6], [0, 3, 4], [0, 3, 5], [4, 5, 6], [0, 4, 6]], name='Minimal triangulation of the torus') def RealProjectivePlane(): """ A minimal triangulation of the real projective plane. EXAMPLES:: sage: P = simplicial_complexes.RealProjectivePlane() sage: Q = simplicial_complexes.ProjectivePlane() sage: P == Q True sage: P.cohomology(1) 0 sage: P.cohomology(2) C2 sage: P.cohomology(1, base_ring=GF(2)) Vector space of dimension 1 over Finite Field of size 2 sage: P.cohomology(2, base_ring=GF(2)) Vector space of dimension 1 over Finite Field of size 2 """ return UniqueSimplicialComplex([[0, 1, 2], [0, 2, 3], [0, 1, 5], [0, 4, 5], [0, 3, 4], [1, 2, 4], [1, 3, 4], [1, 3, 5], [2, 3, 5], [2, 4, 5]], name='Minimal triangulation of the real projective plane') ProjectivePlane = RealProjectivePlane def KleinBottle(): """ A minimal triangulation of the Klein bottle, as presented for example in Davide Cervone's thesis [Cer1994]_. EXAMPLES:: sage: simplicial_complexes.KleinBottle() Minimal triangulation of the Klein bottle """ return UniqueSimplicialComplex([[2, 3, 7], [1, 2, 3], [1, 3, 5], [1, 5, 7], [1, 4, 7], [2, 4, 6], [1, 2, 6], [1, 6, 0], [1, 4, 0], [2, 4, 0], [3, 4, 7], [3, 4, 6], [3, 5, 6], [5, 6, 0], [2, 5, 0], [2, 5, 7]], name='Minimal triangulation of the Klein bottle') def SurfaceOfGenus(g, orientable=True): """ A surface of genus `g`. INPUT: - ``g`` -- a non-negative integer. The desired genus - ``orientable`` -- boolean (optional, default ``True``). If ``True``, return an orientable surface, and if ``False``, return a non-orientable surface. In the orientable case, return a sphere if `g` is zero, and otherwise return a `g`-fold connected sum of a torus with itself. In the non-orientable case, raise an error if `g` is zero. If `g` is positive, return a `g`-fold connected sum of a real projective plane with itself. EXAMPLES:: sage: simplicial_complexes.SurfaceOfGenus(2) Triangulation of an orientable surface of genus 2 sage: simplicial_complexes.SurfaceOfGenus(1, orientable=False) Triangulation of a non-orientable surface of genus 1 """ if g == 0: if not orientable: raise ValueError("no non-orientable surface of genus zero") else: return Sphere(2) if orientable: T = Torus() else: T = RealProjectivePlane() S = T for i in range(g-1): S = S.connected_sum(T) if orientable: orient_str = 'n orientable' else: orient_str = ' non-orientable' return UniqueSimplicialComplex(S, name='Triangulation of a{} surface of genus {}'.format(orient_str, g)) def MooreSpace(q): """ Triangulation of the mod `q` Moore space. INPUT: - ``q`` -0 integer, at least 2 This is a simplicial complex with simplices of dimension 0, 1, and 2, such that its reduced homology is isomorphic to `\\ZZ/q\\ZZ` in dimension 1, zero otherwise. If `q=2`, this is the real projective plane. If `q>2`, then construct it as follows: start with a triangle with vertices 1, 2, 3. We take a `3q`-gon forming a `q`-fold cover of the triangle, and we form the resulting complex as an identification space of the `3q`-gon. To triangulate this identification space, put `q` vertices `A_0`, ..., `A_{q-1}`, in the interior, each of which is connected to 1, 2, 3 (two facets each: `[1, 2, A_i]`, `[2, 3, A_i]`). Put `q` more vertices in the interior: `B_0`, ..., `B_{q-1}`, with facets `[3, 1, B_i]`, `[3, B_i, A_i]`, `[1, B_i, A_{i+1}]`, `[B_i, A_i, A_{i+1}]`. Then triangulate the interior polygon with vertices `A_0`, `A_1`, ..., `A_{q-1}`. EXAMPLES:: sage: simplicial_complexes.MooreSpace(2) Minimal triangulation of the real projective plane sage: simplicial_complexes.MooreSpace(3).homology()[1] C3 sage: simplicial_complexes.MooreSpace(4).suspension().homology()[2] C4 sage: simplicial_complexes.MooreSpace(8) Triangulation of the mod 8 Moore space """ if q <= 1: raise ValueError("the mod q Moore space is only defined if q is at least 2") if q == 2: return RealProjectivePlane() facets = [] for i in range(q): Ai = "A" + str(i) Aiplus = "A" + str((i+1) % q) Bi = "B" + str(i) facets.append([1, 2, Ai]) facets.append([2, 3, Ai]) facets.append([3, 1, Bi]) facets.append([3, Bi, Ai]) facets.append([1, Bi, Aiplus]) facets.append([Bi, Ai, Aiplus]) for i in range(1, q-1): Ai = "A" + str(i) Aiplus = "A" + str((i+1) % q) facets.append(["A0", Ai, Aiplus]) return UniqueSimplicialComplex(facets, name='Triangulation of the mod {} Moore space'.format(q)) def ComplexProjectivePlane(): """ A minimal triangulation of the complex projective plane. This was constructed by <NAME> Banchoff [KB1983]_. EXAMPLES:: sage: C = simplicial_complexes.ComplexProjectivePlane() sage: C.f_vector() [1, 9, 36, 84, 90, 36] sage: C.homology(2) Z sage: C.homology(4) Z """ return UniqueSimplicialComplex( [[1, 2, 4, 5, 6], [2, 3, 5, 6, 4], [3, 1, 6, 4, 5], [1, 2, 4, 5, 9], [2, 3, 5, 6, 7], [3, 1, 6, 4, 8], [2, 3, 6, 4, 9], [3, 1, 4, 5, 7], [1, 2, 5, 6, 8], [3, 1, 5, 6, 9], [1, 2, 6, 4, 7], [2, 3, 4, 5, 8], [4, 5, 7, 8, 9], [5, 6, 8, 9, 7], [6, 4, 9, 7, 8], [4, 5, 7, 8, 3], [5, 6, 8, 9, 1], [6, 4, 9, 7, 2], [5, 6, 9, 7, 3], [6, 4, 7, 8, 1], [4, 5, 8, 9, 2], [6, 4, 8, 9, 3], [4, 5, 9, 7, 1], [5, 6, 7, 8, 2], [7, 8, 1, 2, 3], [8, 9, 2, 3, 1], [9, 7, 3, 1, 2], [7, 8, 1, 2, 6], [8, 9, 2, 3, 4], [9, 7, 3, 1, 5], [8, 9, 3, 1, 6], [9, 7, 1, 2, 4], [7, 8, 2, 3, 5], [9, 7, 2, 3, 6], [7, 8, 3, 1, 4], [8, 9, 1, 2, 5]], name='Minimal triangulation of the complex projective plane') def PseudoQuaternionicProjectivePlane(): r""" Return a pure simplicial complex of dimension 8 with 490 facets. .. WARNING:: This is expected to be a triangulation of the projective plane `HP^2` over the ring of quaternions, but this has not been proved yet. This simplicial complex has the same homology as `HP^2`. Its automorphism group is isomorphic to the alternating group `A_5` and acts transitively on vertices. This is defined here using the description in [BK1992]_. This article deals with three different triangulations. This procedure returns the only one which has a transitive group of automorphisms. EXAMPLES:: sage: HP2 = simplicial_complexes.PseudoQuaternionicProjectivePlane() ; HP2 Simplicial complex with 15 vertices and 490 facets sage: HP2.f_vector() [1, 15, 105, 455, 1365, 3003, 4515, 4230, 2205, 490] Checking its automorphism group:: sage: HP2.automorphism_group().is_isomorphic(AlternatingGroup(5)) True """ from sage.groups.perm_gps.permgroup import PermutationGroup P = [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10), (11, 12, 13, 14, 15)] S = [(1, 6, 11), (2, 15, 14), (3, 13, 8), (4, 7, 5), (9, 12, 10)] start_list = [ (1, 2, 3, 6, 8, 11, 13, 14, 15), # A (1, 3, 6, 8, 9, 10, 11, 12, 13), # B (1, 2, 6, 9, 10, 11, 12, 14, 15), # C (1, 2, 3, 4, 7, 9, 12, 14, 15), # D (1, 2, 4, 7, 9, 10, 12, 13, 14), # E (1, 2, 6, 8, 9, 10, 11, 14, 15), # F (1, 2, 3, 4, 5, 6, 9, 11, 13), # G (1, 3, 5, 6, 8, 9, 10, 11, 12), # H (1, 3, 5, 6, 7, 8, 9, 10, 11), # I (1, 2, 3, 4, 5, 7, 10, 12, 15), #
import GPy import numpy as np import time from os import getpid import pandas as pd import matplotlib.pyplot as plt import scipy.spatial as spatial from scipy import stats from scipy.special import inv_boxcox import multiprocessing import math # se cargan los datos de entrenamiento train_data = pd.read_csv('../../GP_Data/cy17_spc_assays_rl6_entry.csv') train_cols = ['midx', 'midy', 'midz', 'cut'] test_data = pd.read_csv('../../GP_Data/cy17_spc_assays_pvo_entry.csv') test_cols = ['midx', 'midy', 'midz'] # se definen los estilos de los graficos # jtplot.style(theme='onedork',figsize = (16.5,12)) class Timer(object): def __init__(self, name=None): self.name = name def __enter__(self): self.tstart = time.time() def __exit__(self, type, value, traceback): if self.name: print('[%s]' % self.name, end=' ') print('Elapsed: %s' % (time.time() - self.tstart)) def get_holeids(): df_holeid = test_data['dhid'] seen = set() HOLEID = [] for item in df_holeid: if item not in seen: seen.add(item) HOLEID.append(item) return HOLEID def get_test_points_holeid(idhole): return test_data.loc[test_data['dhid'] == idhole][test_cols].as_matrix() def get_y_holeid(idhole): return test_data.loc[test_data['dhid'] == idhole][['cut']].as_matrix() def get_cut_xyz_by_holeid(idhole): xyz_cut = test_data.loc[test_data['dhid'] == idhole][['midx', 'midy', 'midz', 'cut']].as_matrix() return xyz_cut def get_pozo_holeid(idhole, cols_names=None): if cols_names is None: cols_names = ['midx', 'midy', 'midz', 'minty', 'cut', 'f1'] hole = test_data.loc[test_data['dhid'] == idhole][cols_names].as_matrix() return hole def get_trainingSet_by_point(test_point, distancia): # distancia se podria calcular por caso, # segun la cantidad de pts. que se encuentren X_train_df = train_data[['dhid', 'midx', 'midy', 'midz']] y_df = train_data[['dhid', 'cut']] X_train = X_train_df[['midx', 'midy', 'midz']].as_matrix() train_tree = spatial.cKDTree(X_train) idx = train_tree.query_ball_point(list(test_point), distancia) return X_train_df.iloc[idx, :], y_df.iloc[idx, :] def get_traningSet(idhole, distancia): # retorna X dataFrame con los puntos de # entrenamiento para todo el sondaje dhid X_train_df = train_data[['dhid', 'midx', 'midy', 'midz']] y_df = train_data[['dhid', 'cut']] X_train = X_train_df[['midx', 'midy', 'midz']].as_matrix() test_points = get_test_points_holeid(idhole) test_tree = spatial.cKDTree(test_points) train_tree = spatial.cKDTree(X_train) idx_rep = test_tree.query_ball_tree(train_tree, distancia) idx_sin_rep = list(set([indice for lista in idx_rep for indice in lista])) return X_train_df.iloc[idx_sin_rep, :], y_df.iloc[idx_sin_rep, :] def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'): """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) """ percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r') # Print New Line on Complete if iteration == total: print() def estimacion_by_point(modelo, ker, transform=False, Plotear=False, std=1, lik=GPy.likelihoods.Gaussian(), inf=GPy.inference.latent_function_inference.ExactGaussianInference()): # diccionario que guardara las predicciones por dhid global m, y_pred, lmb dicc_preds = dict() # distancia a la que se buscan muestras (quizas es mejor tomar la minima necesaria?) distancia = 33 IDHOLEs = get_holeids() n = len(IDHOLEs) # HOLEIDs.remove('F06-1580-016') # HOLEIDs.remove('F06-1610-004') # HOLEIDs.remove('F06-1625-021') # HOLEIDs.remove('F13-1595-005') # HOLEIDs.remove('F05-1565-009') for idx, idhole in enumerate(IDHOLEs): y_preds = list() test_points = get_test_points_holeid(idhole) for test_point in test_points: X_df, y_df = get_trainingSet_by_point(test_point, distancia) X = X_df[['midx', 'midy', 'midz']].as_matrix() y = y_df[['cut']].as_matrix() X_std = (X - X.mean()) / X.std() y_std = (y - y.mean()) / y.std() if std == 1: test_point_std = (test_point - X.mean()) / X.std() elif std == 0: (test_point - test_points.mean()) / test_points.std() else: print('std debe ser 0 o 1.') if X_df.shape[0] < 10: y_preds.extend(list(np.array([-99]))) continue if modelo == 'sgpr': # m = GPy.models.SparseGPRegression(X,y,ker) if transform: y_cox, lmb = stats.boxcox(y) m = GPy.core.GP(X, y_cox, kernel=ker, likelihood=lik, inference_method=inf) else: m = GPy.core.GP(X_std, y_std, kernel=ker, likelihood=lik, inference_method=inf) else: m = GPy.models.GPRegression(X, y, ker) try: m.optimize(messages=False, max_f_eval=1000) y_pred, _ = m.predict(np.array([[test_point_std[0], test_point_std[1], test_point_std[2]]])) except GPy.util.linalg.linalg.LinAlgError as err: if 'not positive definite' in err.message: print('not positive definite, even with jitter.') pass except np.linalg.LinAlgError: print('La matriz definida por el kernel no es definida positiva') pass y_preds.extend(list(y_pred * y.std() + y.mean())) if transform: y_preds = inv_boxcox(y_preds, lmb) # transformar restricciones en ndarray, por sia caso y_preds_ndarray = np.array(y_preds.copy()) dicc_preds[idhole] = y_preds_ndarray # guardar valores reales de oro en los puntos test de dhid y_medido = get_y_holeid(idhole).reshape(y_preds_ndarray.shape) if Plotear: # se analizan los resultados por dhid fig, ax = plt.subplots() ax.scatter(y_medido, y_preds_ndarray, edgecolors=(0, 0, 0)) ax.plot([y_medido.min(), y_medido.max()], [y_medido.min(), y_medido.max()], 'k--', lw=2) ax.set_xlabel('Medido') ax.set_ylabel('Prediccion') ax.set_title('Regresion para el sondaje %s' % idhole) # ax.set_title('DHID:%s, Kernel: %s' % (dhid,gp.kernel_)) printProgressBar(idx + 1, n, prefix='Current HOLEID: {}. Total Progress:'.format(IDHOLEs[(idx + 1) * (idx < n - 1)]), suffix='Complete', length=50) if Plotear: plt.show() return m, dicc_preds def estimation_by_point_mp(IDHOLEs, out_q, modelo, ker, distancia, transform, std=1, lik=GPy.likelihoods.Gaussian(), inf=GPy.inference.latent_function_inference.ExactGaussianInference()): # distancia a la que se buscan muestras (quizas es mejor tomar la minima necesaria?) global lmbda n = len(IDHOLEs) dicc_preds = {} for idx, idhole in enumerate(IDHOLEs): y_preds = list() test_points = get_test_points_holeid(idhole) for test_point in test_points: X_df, y_df = get_trainingSet_by_point(test_point, distancia) # while X_df.shape[0] < 20: # distancia_aumentada = 5 + distancia # X_df, y_df = get_trainingSet_by_point(test_point, distancia_aumentada) if X_df.shape[0] < 10: y_preds.extend(list(np.array([-99]))) continue X = X_df[['midx', 'midy', 'midz']].as_matrix() y = y_df[['cut']].as_matrix() X_std = (X - X.mean()) / X.std() y_std = (y - y.mean()) / y.std() if std == 1: test_point_std = (test_point - X.mean()) / X.std() elif std == 0: (test_point - test_points.mean()) / test_points.std() else: print('std debe ser 0 o 1.') if modelo == 'sgpr': # m = GPy.models.SparseGPRegression(X,y,ker) if transform: y_cox, lmbda = stats.boxcox(y) modelo = GPy.core.GP(X, y_cox, kernel=ker, likelihood=lik, inference_method=inf) else: modelo = GPy.core.GP(X_std, y_std, kernel=ker, likelihood=lik, inference_method=inf) else: modelo = GPy.models.GPRegression(X, y, ker) y_predicc = -99 try: modelo.optimize(messages=False, max_f_eval=1000) y_predicc, _ = modelo.predict(np.array([[test_point_std[0], test_point_std[1], test_point_std[2]]])) except GPy.util.linalg.linalg.LinAlgError as err: if 'not positive definite' in err.message: print('not positive definite, even with jitter.') pass except np.linalg.LinAlgError: print('La matriz definida por el kernel no es definida positiva') pass y_preds.extend(list(y_predicc * y.std() + y.mean())) if transform: y_preds = inv_boxcox(y_preds, lmbda) # transformar restricciones en ndarray, por sia caso y_preds_ndarray = np.array(y_preds.copy()) dicc_preds[idhole] = y_preds_ndarray # printProgressBar(i + 1, n, # prefix='Current HOLEID: {}. Total Progress:'.format(HOLEIDs[(i + 1) * (i < n - 1)]), # suffix='Complete', length=50) printProgressBar(idx + 1, n, prefix='Current process: {}. Total Progress:'.format(getpid()), suffix='Complete', length=50) out_q.put(dicc_preds) return def mp_gaussian_process_by_test_point(IDHOLEs, nprocs, modelo, ker, distancia=33, transform=False): out_q = multiprocessing.Queue() chuncksize = int(math.ceil(len(IDHOLEs) / float(nprocs))) procs = [] for idx in range(nprocs): p = multiprocessing.Process(target=estimation_by_point_mp, args=(IDHOLEs[chuncksize * idx:chuncksize * (idx + 1)], out_q, modelo, ker, distancia, transform)) procs.append(p) p.start() resultdict = {} for idx in range(nprocs): resultdict.update(out_q.get()) for p in procs: p.join() return resultdict if __name__ == '__main__': # modelo: sgpr # transform: True # lik: GPy.likelihoods.Gaussian() # ker: GPy.kern.Matern32(input_dim=2, active_dims=[0, 1]) + GPy.kern.Matern32(input_dim =1, active_dims = [2]) # inf: GPy.inference.latent_function_inference.ExactGaussianInference() # HOLEIDs = get_holeids() # kernel = GPy.kern.Matern32(input_dim=2, active_dims=[0, 1]) + GPy.kern.Matern32(input_dim =1, active_dims = [2]) # t0 = time.time() # diccionario = mp_gaussian_process_by_test_point(HOLEIDs, 8, 'sgpr', kernel) # print('Tiempo para gp en paralelo: {}'.format(time.time()-t0)) # exportar los datos # outfile_name = 'mp_test_'+'all'+'.csv' # outfile = open(outfile_name, 'w') # outfile.write('xcentre,ycentre,zcentre,minty,cut_poz,cut,f1\n') # for holeid in HOLEIDs: # pozo = get_pozo_holeid(holeid) # for i, fila in enumerate(pozo): # line = fila[0], fila[1], fila[2], fila[3], fila[4],diccionario[holeid][i,], fila[5] # outfile.write('%f,%f,%f,%s,%s,%s,%f,%f\n' % line) # outfile.close() # Modelo sobre todos los pozos # modelo: sgpr (Sparse Gaussian process) # transform: False # lik: GPy.likelihoods.Gaussian() # ker: GPy.kern.Matern32(input_dim=2, active_dims=[0, 1]) + GPy.kern.Bias(3) # inf: GPy.inference.latent_function_inference.ExactGaussianInference() # HOLEIDs = get_holeids() # kernel = GPy.kern.Matern32(input_dim=2, active_dims=[0, 1]) + GPy.kern.Bias(3) # t0 = time.time() # diccionario = mp_gaussian_process_by_test_point(HOLEIDs, 8, 'sgpr', kernel) # print('Tiempo para gp en paralelo: {}'.format(time.time()-t0)) # exportar los datos # outfile_name = 'mp_test_'+'all_1'+'.csv' # outfile = open(outfile_name, 'w') # outfile.write('xcentre,ycentre,zcentre,minty,lito,alt,cut,cut_poz\n') # for holeid in HOLEIDs: # pozo = get_pozo_holeid(holeid) # for i, fila in enumerate(pozo): # line = fila[0], fila[1], fila[2], fila[3], fila[4], fila[5], diccionario[holeid][i, ],fila[6] # outfile.write('%f,%f,%f,%s,%s,%s,%f,%f\n' % line) # outfile.close() # Modelo sobre todos los pozos # modelo: sgpr (Sparse Gaussian process) # transform: False # lik: GPy.likelihoods.Gaussian() # ker: GPy.kern.RBF(3,ARD = True) # inf: GPy.inference.latent_function_inference.ExactGaussianInference() # HOLEIDs = get_holeids() # kernel = GPy.kern.RBF(3,ARD=True) # t0 = time.time() # diccionario = mp_gaussian_process_by_test_point(HOLEIDs, 8, 'sgpr', kernel) # print('Tiempo para gp en paralelo: {}'.format(time.time()-t0)) # exportar los datos # outfile_name = 'mp_test_'+'all_2'+'.csv' # outfile = open(outfile_name, 'w')
(unpatch_img) predict_img = np.zeros((*unpatch_data_size,)) unpatch_weight_map = np.ones((*unpatch_data_size,)) * 1E-16 # Initialize weight to 1 for each patch size if output_patch_size is None: weight_patch = np.ones((*patch_size,) + (data_patch_size[-1],)) else: weight_patch = np.ones((*output_patch_size,) + (data_patch_size[-1],)) print('weight_patch.shape', weight_patch.shape) for data_patch, index in zip(data_patches, indice_list): # Indexing using function slice for variable dim, Indexing last channel by slice(None, None),equivalent to [:] if output_patch_size is None: # if input image shape==output image shape # Overlay all patch value on the predict image predict_img[ (*[slice(index[i], index[i] + patch_size[i]) for i in range(dim)] + [slice(None, None)],)] += data_patch # Overlay all weight value on the weight map unpatch_weight_map[ (*[slice(index[i], index[i] + patch_size[i]) for i in range(dim)] + [ slice(None, None)],)] += weight_patch else: # else if input image shape>=output image shape for j in range(dim): assert patch_size[j] >= output_patch_size[j] # Gap between input size image and output size image diff = (np.array(patch_size) - np.array(output_patch_size)) // 2 # Overlay all patch value on the predict image predict_img[ (*[slice(index[i] + diff[i], index[i] + diff[i] + output_patch_size[i]) for i in range(dim)] + [ slice(None, None)],)] += data_patch # Overlay all weight value on the weight map unpatch_weight_map[ (*[slice(index[i] + diff[i], index[i] + diff[i] + output_patch_size[i]) for i in range(dim)] + [ slice(None, None)],)] += weight_patch unpatch_img = predict_img / unpatch_weight_map #va1 = np.unique(unpatch_img, return_counts=True) if set_zero_by_threshold: unpatch_img[unpatch_img < threshold] = 0 #va2 = np.unique(unpatch_img, return_counts=True) return unpatch_img def prediction_prob(config, patch_prob_img, indice_list): """ :param config: :param patch_prob_img: size(patch num1146, class6) :param indice_list: :return: """ n_classes = config['body_identification_n_classes'] # patch_prob_img size(len of indice_list, n_classes) # Initialize # patch_shape = config['patch_size'] # Body identification patch size [1, X,Y] # patch_prob_img[0] is total num of patches.,= patch_prob_maps = np.zeros([len(indice_list), patch_shape[1], patch_shape[2], n_classes]) patch_decision_maps = np.zeros([len(indice_list), patch_shape[1], patch_shape[2], n_classes]) print('patch_prob_maps,line258', patch_prob_maps.shape) for i, pos in enumerate(indice_list): # one hot matrix patch_decision_maps[i, :, :, np.argmax(patch_prob_img[i, :])] += 1 for class_ in range(n_classes): patch_prob_maps[i, :, :, class_] += patch_prob_img[i, class_] # sio.savemat('t.mat',{'d':patch_decision_maps,'p':patch_prob_maps}) prob_map = unpatch_predict_image(patch_prob_maps, indice_list, patch_shape, set_zero_by_threshold=False) decision_map = unpatch_predict_image(patch_decision_maps, indice_list, patch_shape, set_zero_by_threshold=False) return prob_map, decision_map def get_patches_data(data_size, patch_size, data_img, data_label, index_list, random_rate=0.3, slice_channel_img=None, slice_channel_label=None, output_patch_size=None, random_shift_patch=True, squeeze_channel=False, squeeze_dimension=None, images_shape=None): """ Get patches from unpatched image and correspondent label by the list of patch positions. :param data_size: type ndarray: data size of :param: data_img and :param data_label :param patch_size: type list of int: patch size images :param data_img: type ndarray: unpatched image data with channel, if 3D image, then its shape is [height,width,depth,channel]. :param data_label: type ndarray: unpatch label data with channel, if 3D image, then its shape is [height,width,depth,channel]. :param index_list: type list of list of integers: list position of each patch :param slice_channel_img: type list of int: channel indice chosen for model inputs, if :param squeeze_channel is true, the img dimension remains same, else reduce 1. :param slice_channel_label: type list of int: channel indice chosen for model outputs :param output_patch_size: type list of int: model output size :param random_rate: type float,rate of random shift of position from :param index_list. random_rate=0 if no shift. :param random_shift_patch: type bool, True if the patches are randomly shift for data augmentation. :param squeeze_channel: type bool, True if select image channel. else all channel will be as input if :param slice_channel_img is False. :return: patch_img_collection: type list of ndarray with the shape :param patch_size: list of patches images. :return: patch_label_collection type list of ndarray with the shape :param patch_size: list of patches labels. :return: index_list: type list of int. Position of the patch. """ dim = len(patch_size) indices_max_bound = [data_size[i] - patch_size[i] for i in range(dim)] for j, index in enumerate(index_list): # Limiting the patching indices index_list[j] = [max(min(index[i], indices_max_bound[i]), 0) for i in range(dim)] if random_shift_patch: # Shift patches indices for data augmentation new_index = [ index[i] + random.randint(int(-patch_size[i] * random_rate / 2), int(patch_size[i] * random_rate / 2)) for i in range(dim)] index_list[j] = [new_index[i] if (indices_max_bound[i] >= new_index[i] >= 0) else max(min(index[i], indices_max_bound[i]), 0) for i in range(dim)] # indexing using function slice for variable dim,indexing last channel by slice(None, None),equivalent to [:] # Get patch image data patch_img_collection = [data_img[(*[slice(index[i], index[i]+ patch_size[i]) for i in range(dim)] + [slice(None, None)],)] for index in index_list] patch_label_collection = None if output_patch_size is not None: # If input label shape>=output label shape -> Enlarge label patch for j in range(dim): assert patch_size[j] >= output_patch_size[j] diff = (np.array(patch_size) - np.array(output_patch_size)) // 2 # Get label data with size= output_patch_size, keep the centre with same as image patch. if data_label is not None: patch_label_collection = [ data_label[(*[slice(index[i] + diff[i], index[i] + diff[i] + output_patch_size[i]) for i in range(dim)] + [slice(None, None)],)] for index in index_list] else: # If input label shape==output label shape if data_label is not None: patch_label_collection = [ data_label[(*[slice(index[i], index[i] + patch_size[i]) for i in range(dim)] + [slice(None, None)],)] for index in index_list] # Select channels for input images and labels by the yaml file if slice_channel_img is not None: if not squeeze_channel: # Select the image channel for patching patch_img_collection = [tf.stack([img[..., i] for i in slice_channel_img], axis=-1) for img in patch_img_collection] else: # Reduce one dimension (especially for network Body Identification) patch_img_collection = [img[..., 0] for img in patch_img_collection] if squeeze_dimension is not None: patch_img_collection = [img[..., 0, :] for img in patch_img_collection] if slice_channel_label is not None: # Select the label channel for patching patch_label_collection = [tf.stack([label[..., i] for i in slice_channel_label], axis=-1) for label in patch_label_collection] if squeeze_dimension is not None: patch_label_collection = [label[..., 0, :] for label in patch_label_collection] return patch_img_collection, patch_label_collection, index_list def get_sampled_patches(patch_size_tensor, img_data, label_data, class_p=None, max_class_value=None, patches_per_subject=10, data_shape=None, dim_patch=3, channel_img=2, channel_label=2, validation_for_1=0): """ Get sampled patches from unpatched image and correspondent label. :param patch_size_tensor: type ndarray: patch size as tensor. :param img_data: type ndarray: unpatched images, if 3D image, then its shape is [height,width,depth,channel]. :param label_data: type ndarray: unpatched label, if 3D image, then its shape is [height,width,depth,channel]. :param class_p: type float: normalized class probability. :param max_class_value: type integer: number of classes in the label. :param patches_per_subject: type integer: number of patches to extract from tensor. :param data_shape: type array of integer: shape of input tensor. :param dim_patch: type integer: dimension of patch size. :param channel_img: type integer: number of channels in image. :param channel_label: type integer: number of channels in label. :param validation_for_1: type integer: 0 or 1. 0 showing no cancer, 1 otherwise. :return: patch_img_return: type list of ndarray from images with size equal to patch size. :return: patch_label_return: type list of ndarray from label with size equal to patch size. """ list_patches_img = [] list_patches_label = [] _label_ax2_any = [] _label_ax2_any.append([tf.math.reduce_any(label_data[..., 1] == c, axis=2) for c in range(max_class_value)]) print("label any: ", _label_ax2_any) print("first element:") print(_label_ax2_any[0]) label_data_2_channel = label_data[..., 1] for patch in range(patches_per_subject): pos = None min_index_pos = None max_index_pos = None selected_class = 0 # selected_class = np.random.choice(range(len(class_p)), p=class_p) selected_class = tf.random.categorical(tf.math.log([tf.convert_to_tensor(class_p)]), num_samples=1) print(selected_class) #selected_class = tf.reshape(selected_class, shape=()) print(selected_class) # selected_class = 1 print("selected class: ", selected_class) def true_selected_class(): print("looking for a lesion position ...........................................................") def true_fn(): valid_idx = tf.where(_label_ax2_any[0][1] == True) idx = tf.random.shuffle(valid_idx, seed=1) idx = tf.gather(idx, indices=0) # Sample additional index along the third axis(=2). # Voxel value should be equal to the class value. valid_idx = label_data_2_channel[idx[0], idx[1], :] valid_idx = tf.where(valid_idx == 1) rnd = tf.random.shuffle(valid_idx, seed=1) rnd = tf.unstack(rnd[0], num=1) u = tf.unstack(idx, num=2) u.append(rnd[0]) idx_pos = tf.stack(u) idx_pos = tf.cast(idx_pos, dtype=tf.int32) # return idx min_index = tf.math.maximum(tf.math.add(tf.math.subtract(idx_pos, patch_size_tensor), 1), 0) max_index = tf.math.minimum(tf.math.add(tf.math.subtract(data_shape, patch_size_tensor), 1), tf.math.add(idx_pos, 1)) return min_index, max_index def false_fn(): min_index = tf.convert_to_tensor([0, 0, 0]) max_index = tf.math.subtract(data_shape, tf.math.add(patch_size_tensor, 1)) return min_index, max_index val_for_1 = tf.math.greater(validation_for_1, 0) min_index_pos_true, max_index_pos_true = tf.cond(val_for_1, true_fn, false_fn) return min_index_pos_true, max_index_pos_true def false_selected_class(): min_index_pos_false = tf.convert_to_tensor([0, 0, 0]) max_index_pos_false = tf.math.subtract(data_shape, tf.math.add(patch_size_tensor, 1)) return min_index_pos_false, max_index_pos_false cond_selected_class = tf.math.greater(selected_class, 0) min_index_pos, max_index_pos = tf.cond(cond_selected_class, true_selected_class, false_selected_class) ## here we get the position of that patch
<reponame>ftramer/stanford-suns-nips17<filename>untargeted/models.py import tensorflow as tf from nets import inception, resnet_v1, resnet_v2, resnet_utils, vgg, mobilenet_v1 from preprocessing.vgg_preprocessing import preprocess_image as vgg_preprocess from preprocessing.inception_preprocessing import preprocess_image as inception_preprocess import numpy as np import tf_jpeg_utils slim = tf.contrib.slim class KerasXceptionModel(object): def __init__(self, num_classes): self.num_classes = 1000 self.built = False self.logits = None self.ckpt = 'keras_xception.pb' self.name = 'keras_xception' self.isKerasModel = True def __call__(self, sess, batch_size, image, ckpt_path=''): print('INFO:tensorflow:Restoring parameters from %s' % (ckpt_path + '/' + self.ckpt)) with tf.variable_scope(self.name): with tf.gfile.FastGFile(ckpt_path + '/' + self.ckpt, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name='keras_xception', input_map={"input_image:0": image}) logits = sess.graph.get_tensor_by_name(self.name + '/keras_xception/output_prob:0') logits = tf.concat(values=[tf.ones([batch_size, 1])*(-100), logits], axis=1) preds = tf.argmax(logits, axis=1) self.image = image self.built = True self.logits = logits self.preds = preds return logits class InceptionV3Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'inception_v3.ckpt' self.name = 'inception_v3' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_v3_arg_scope()): with tf.variable_scope(self.name): logits, end_points = inception.inception_v3( x_input, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class SmoothInceptionV3Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'inception_v3.ckpt' self.name = 'smooth_inception_v3' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_v3_arg_scope()): with tf.variable_scope(self.name): avg_pooled = tf.nn.avg_pool(x_input, ksize=[1, 6, 6, 1], strides=[1, 1, 1, 1], padding='SAME') logits, end_points = inception.inception_v3( avg_pooled, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class AdvInceptionV3Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'adv_inception_v3.ckpt' self.name = 'adv_inception_v3' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_v3_arg_scope()): with tf.variable_scope(self.name): logits, end_points = inception.inception_v3( x_input, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class Ens3AdvInceptionV3Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'ens3_adv_inception_v3.ckpt' self.name = 'ens3_adv_inception_v3' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_v3_arg_scope()): with tf.variable_scope(self.name): logits, end_points = inception.inception_v3( x_input, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class SmoothEns3AdvInceptionV3Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'ens3_adv_inception_v3.ckpt' self.name = 'smooth_ens3_adv_inception_v3' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_v3_arg_scope()): with tf.variable_scope(self.name): avg_pooled = tf.nn.avg_pool(x_input, ksize=[1, 6, 6, 1], strides=[1, 1, 1, 1], padding='SAME') logits, end_points = inception.inception_v3( avg_pooled, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class Ens4AdvInceptionV3Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'ens4_adv_inception_v3.ckpt' self.name = 'ens4_adv_inception_v3' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_v3_arg_scope()): with tf.variable_scope(self.name): logits, end_points = inception.inception_v3( x_input, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class SmoothEns4AdvInceptionV3Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'ens4_adv_inception_v3.ckpt' self.name = 'smooth_ens4_adv_inception_v3' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_v3_arg_scope()): with tf.variable_scope(self.name): avg_pooled = tf.nn.avg_pool(x_input, ksize=[1, 6, 6, 1], strides=[1, 1, 1, 1], padding='SAME') logits, end_points = inception.inception_v3( avg_pooled, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class InceptionV4Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'inception_v4.ckpt' self.name = 'inception_v4' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_v4_arg_scope()): with tf.variable_scope(self.name): logits, end_points = inception.inception_v4( x_input, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class InceptionResNetV2Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes, batch_size=None): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'inception_resnet_v2_2016_08_30.ckpt' self.name = 'inception_resnet_v2' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_resnet_v2_arg_scope()): with tf.variable_scope(self.name): logits, end_points = inception.inception_resnet_v2( x_input, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class SmoothInceptionResNetV2Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes, batch_size=None): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'inception_resnet_v2_2016_08_30.ckpt' self.name = 'smooth_inception_resnet_v2' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_resnet_v2_arg_scope()): with tf.variable_scope(self.name): avg_pooled = tf.nn.avg_pool(x_input, ksize=[1, 6, 6, 1], strides=[1, 1, 1, 1], padding='SAME') logits, end_points = inception.inception_resnet_v2( avg_pooled, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class JPEGInceptionResNetV2Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes, batch_size=None): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'inception_resnet_v2_2016_08_30.ckpt' self.name = 'jpeg_inception_resnet_v2' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_resnet_v2_arg_scope()): with tf.variable_scope(self.name): compressed_image = tf_jpeg_utils.jpeg_compress_decompress(x_input) logits, end_points = inception.inception_resnet_v2( compressed_image, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class EnsAdvInceptionResNetV2Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes, batch_size=None): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'ens_adv_inception_resnet_v2.ckpt' self.name = 'ens_adv_inception_resnet_v2' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_resnet_v2_arg_scope()): with tf.variable_scope(self.name): logits, end_points = inception.inception_resnet_v2( x_input, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class SmoothEnsAdvInceptionResNetV2Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes, batch_size=None): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'ens_adv_inception_resnet_v2.ckpt' self.name = 'smooth_ens_adv_inception_resnet_v2' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_resnet_v2_arg_scope()): with tf.variable_scope(self.name): compressed_image = tf_jpeg_utils.jpeg_compress_decompress(x_input) logits, end_points = inception.inception_resnet_v2( compressed_image, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class JPEGEnsAdvInceptionResNetV2Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes, batch_size=None): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'ens_adv_inception_resnet_v2.ckpt' self.name = 'jpeg_ens_adv_inception_resnet_v2' def __call__(self, x_input, batch_size=None, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None with slim.arg_scope(inception.inception_resnet_v2_arg_scope()): with tf.variable_scope(self.name): compressed_image = tf_jpeg_utils.jpeg_compress_decompress(x_input) logits, end_points = inception.inception_resnet_v2( compressed_image, num_classes=self.num_classes, is_training=is_training, reuse=reuse) preds = tf.argmax(logits, axis=1) self.built = True self.logits = logits self.preds = preds return logits class ResNetV1Model(object): """Model class for CleverHans library.""" def __init__(self, num_classes): self.num_classes = num_classes self.built = False self.logits = None self.ckpt = 'resnet_v1_50.ckpt' self.name = 'resnet_v1_50' def __call__(self, x_input, batch_size, is_training=False): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None # ResNet V1 and VGG have different preprocessing preproc = tf.map_fn( lambda img: vgg_preprocess(0.5 * 255.0 * (img + 1.0), resnet_v1.resnet_v1.default_image_size, resnet_v1.resnet_v1.default_image_size), x_input) with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.variable_scope(self.name): logits, end_points = resnet_v1.resnet_v1_50( preproc, num_classes=self.num_classes - 1, is_training=is_training, reuse=reuse) # VGG and ResNetV1 don't have a background class background_class = tf.constant(-np.inf, dtype=tf.float32, shape=[batch_size, 1]) logits = tf.concat([background_class, logits], axis=1) preds = tf.argmax(logits, axis=1) self.built = True self.logits
"""if tweet contains any of matches return its text joined with comments by the same person that also match (and contain [1/2] etc)""" if not any_in(tweet.get('text', tweet.get("comment", "")), *matches): return "" text = tw.get_tweetinfo(tweet['id']).contents['text'] if any(text in t for t in found): return "" # TODO: ensure tweets are [1/2] etc not just "[" and by same person if "[" not in text: return text for t in sorted(tw.get_tweetcomments(tweet['id']).contents, key=lambda t: t['id']): rest = parse_tweet(tw, t, found + [text], *matches) if rest and rest not in text: text += " " + rest return text def get_tweets_from(userid, datefrom, dateto, *matches): "return tweets from single person that match, merging in followups of the form [1/2]. Caches to speed up" tw = TwitterScraper() filename = os.path.join("tweets", f"tweets2_{userid}.pickle") os.makedirs("tweets", exist_ok=True) try: with open(filename, "rb") as fp: tweets = pickle.load(fp) except (IOError, EOFError, OSError, pickle.PickleError, pickle.UnpicklingError) as e: print(f'Error detected when attempting to load the pickle file: {e}, setting an empty \'tweets\' dictionary') tweets = {} for date, tweet_list in tweets.items(): fixed = [] for tweet in tweet_list: text, url = (tweet, None) if type(tweet) == str else tweet fixed.append((text, (url if url else None))) tweets[date] = fixed latest = max(tweets.keys()) if tweets else None if latest and dateto and latest >= (datetime.datetime.today() if not dateto else dateto).date(): return tweets for limit in [50, 2000, 20000]: print(f"Getting {limit} tweets") try: resp = tw.get_tweets(userid, count=limit).contents except requests.exceptions.RequestException: resp = [] for tweet in sorted(resp, key=lambda t: t['id']): date = tweet['created_at'].date() url = tweet['urls'][0]['url'] if tweet['urls'] else f"https://twitter.com/{userid}/status/{tweet['id']}" text = parse_tweet(tw, tweet, tweets.get(date, []), *matches) if text: tweets[date] = tweets.get(date, []) + [(text, url)] earliest = min(tweets.keys()) latest = max(tweets.keys()) print(f"got tweets {earliest} to {latest} {len(tweets)}") if earliest <= datefrom.date(): # TODO: ensure we have every tweet in sequence? break else: print(f"Retrying: Earliest {earliest}") with open(filename, "wb") as fp: pickle.dump(tweets, fp) return tweets ################# # String helpers ################# def remove_prefix(text: str, *prefixes: str) -> str: """Removes the prefix of a string""" for prefix in prefixes: if text.startswith(prefix): text = text[len(prefix):] return text def remove_suffix(text: str, *suffixes: str) -> str: """Removes the suffix of a string""" for suffix in suffixes: if suffix and text.endswith(suffix): text = text[:-len(suffix)] return text def seperate(seq, condition): a, b = [], [] for item in seq: (a if condition(item) else b).append(item) return a, b def split(seq, condition, maxsplit=0): "Similar to str.split except works on lists of lines. e.g. split([1,2,3,4], lambda x: x==2) -> [[1],[2],[3,4]]" run = [] last = False splits = 0 for i in seq: if (maxsplit and splits >= maxsplit) or bool(condition(i)) == last: run.append(i) else: splits += 1 yield run run = [i] last = not last yield run # def nwise(iterable, n=2): # iters = tee(iterable, n) # for i, it in enumerate(iters): # next(islice(it, i, i), None) # return zip(*iters) def pairwise(lst): "Takes a list and turns them into pairs of tuples, e.g. [1,2,3,4] -> [[1,2],[3,4]]" lst = list(lst) return list(zip(compress(lst, cycle([1, 0])), compress(lst, cycle([0, 1])))) def parse_numbers(lst): return [float(i.replace(",", "")) if i != "-" else 0 for i in lst] def any_in(target, *matches): return any((m in target) if type(m) != re.Pattern else m.search(target) for m in matches) def all_in(target, *matches): return all((m in target) if type(m) != re.Pattern else m.search(target) for m in matches) def strip(lst): lst = [i.strip() for i in lst] return [i for i in lst if i] def unique_values(iterable): it = iter(iterable) seen = set() for item in it: if item in seen: continue seen.add(item) yield item def replace_matcher(matches, replacements=None): if replacements is None: replacements = matches def replace_match(item): for m, r in zip(matches, replacements): if re.search(m, item, re.IGNORECASE): return r return item return replace_match ########################### # Tableau scraping ########################### def explore(workbook): print() print() print("storypoints:", workbook.getStoryPoints()) print("parameters", workbook.getParameters()) for t in workbook.worksheets: print() print(f"worksheet name : {t.name}") #show worksheet name print(t.data) #show dataframe for this worksheet print("filters: ") for f in t.getFilters(): print(" ", f['column'], ":", f['values'][:10], '...' if len(f['values']) > 10 else '') print("selectableItems: ") for f in t.getSelectableItems(): print(" ", f['column'], ":", f['values'][:10], '...' if len(f['values']) > 10 else '') def worksheet2df(wb, date=None, **mappings): res = pd.DataFrame() data = dict() if date is not None: data["Date"] = [date] for name, col in mappings.items(): if "_getSelectableItems" in name: name = remove_suffix(name, "_getSelectableItems") df = pd.DataFrame({sel['column']: sel['values'] for sel in wb.getWorksheet(name).getSelectableItems()}) else: try: df = wb.getWorksheet(name).data except (KeyError, TypeError, AttributeError): # TODO: handle error getting wb properly earlier print(f"Error getting tableau {name}/{col}", date) explore(wb) continue if type(col) != str: if df.empty: print(f"Error getting tableau {name}/{col}", date) continue # if it's not a single value can pass in mapping of cols df = df[col.keys()].rename(columns={k: v for k, v in col.items() if type(v) == str}) df['Date'] = pd.to_datetime(df['Date']).dt.normalize() # if one mapping is dict then do pivot pivot = [(k, v) for k, v in col.items() if type(v) != str] if pivot: pivot_cols, pivot_mapping = pivot[0] # can only have one # Any other mapped cols are what are the values of the pivot df = df.pivot(index="Date", columns=pivot_cols) df = df.rename(columns=pivot_mapping) df.columns = df.columns.map(' '.join) df = df.reset_index() df = df.set_index("Date") # Important we turn all the other data to numberic. Otherwise object causes div by zero errors df = df.apply(pd.to_numeric, errors='coerce', axis=1) # If it's only some days rest we can assume are 0.0 # TODO: we don't know how far back to look? Currently 30days for tests and 60 for others? start = date - datetime.timedelta(days=30) if date is not None else df.index.min() start = min([start, df.index.min()]) # Some data like tests can be a 2 days late end = date - datetime.timedelta(days=2) if date is not None else df.index.max() end = max([end, df.index.max()]) all_days = pd.date_range(start, end, name="Date", normalize=True, closed=None) df = df.reindex(all_days, fill_value=0.0) res = res.combine_first(df) elif df.empty: # Seems to mean that this is 0? data[col] = [0.0] elif col == "Date": data[col] = [pd.to_datetime(list(df.loc[0])[0], dayfirst=False)] else: data[col] = list(df.loc[0]) if data[col] == ["%null%"]: data[col] = [np.nan] # combine all the single values with any subplots from the dashboard df = pd.DataFrame(data) if not df.empty: df['Date'] = df['Date'].dt.normalize() # Latest has time in it which creates double entries res = res.combine_first(df.set_index("Date")) return res def workbooks(url, skip=None, dates=[], **selects): if not dates: dates = [None] ts = tableauscraper.TableauScraper() ts.loads(url) fix_timeouts(ts.session, timeout=15) wbroot = ts.getWorkbook() # updated = workbook.getWorksheet("D_UpdateTime").data['max_update_date-alias'][0] # updated = pd.to_datetime(updated, dayfirst=False) last_date = today() idx_value = last_date if not selects else [last_date, None] if not selects: # Don't need the default wb just one per picked value yield wbroot, idx_value # assume its first from each list? wb = wbroot if selects: ws_name, col_name = list(selects.items()).pop() items = wb.getWorksheet(ws_name).getSelectableItems() values = [item['values'] for item in items if item['column'] == col_name] if values: values = values[0] meth = "select" else: items = wb.getWorksheet(ws_name).getFilters() values = [item['values'] for item in items if item['column'] == col_name].pop() meth = "setFilter" else: values = [None] # for param_name, idx_value in zip(param.keys(), itertools.product(params.values()): for date in dates: # Get list of the possible values from selectable. TODO: allow more than one # Annoying we have to throw away one request before we can get single province for value in values: idx_value = date if value is None else (date, value) if skip is not None and skip(idx_value): continue if date and last_date.date() != date.date(): # Only switch date if it hasn't been done # TODO: after doing select can't do setParam. have to reload. must be faster way try: ts = tableauscraper.TableauScraper() ts.loads(url) fix_timeouts(ts.session, timeout=15) wbroot = ts.getWorkbook() wb = setParameter(wbroot, "param_date", str(date.date())) except (RequestException, TableauException): print(date, "MOPH Dashboard", "Skip: Param Timeout Error.") break if not wb.worksheets: print(date, "MOPH Dashboard", "Skip: Error in setParam.") break last_date = date if value is not None: try: wb_val = getattr(wb.getWorksheet(ws_name), meth)(col_name, value) except (RequestException, TableauException): print(date, "MOPH Dashboard", "Skip: Select Timeout Error.") break if not wb_val.worksheets: print(date, "MOPH Dashboard", "Skip: Error in Select.") break else: wb_val = wb yield wb_val, idx_value def
#! /usr/bin/env python3 # Copyright 2021 OVHCloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import collections import functools import math from typing import List, DefaultDict, Set, Tuple, Dict import xml.etree.ElementTree as ET import subprocess import libvirt import pyparsing as pp import sympy as sm # This tool will print-out a dump of the OVS rules that may pertain to a given # VM (a la ovs-ofctl dump-flows, but filtering-out rules that cannot be passed # by traffic egressing or ingressing a VM). The main use-case for this tool is # to help with the diagnosis of issues related to OpenStack security groups or # to OVS flows. # # The tool starts by creating a cursor associated with an initial set of rules # to match, which will then be used to walk through the rules dump and build a # tree of the passed rules. All the passed rules are then listed sequentially. # # In addition to that simple rules dump, the tool will walk-back the tree from # every output node (i.e. rules that have an output or NORMAL action) in order # to print the paths that could lead to an output node or to a group of output # nodes when they can be grouped, along, when possible, a short summary of the # last filtering rules (e.g. source IP, dest. port) passed before each output. # # This last part can be particularly helpful when dealing with port ranges, as # they are broken down into sets of rules with each rule dedicated to matching # a single masked port defintion (the flow dump ends-up being filled with many # rules matching things like "tp_dst=0x7ff0/0xfff8") and security groups using # the "--remote-secgroup" option, which are turned into conjunction rule sets. # # Keep in mind that the summary wont necessarily always make sense, because of # two important reasons : # # 1. This tool is not aware of all the fields a rule can filter on. It handles # the "basic" ones, but it notably doesn't understand the ct-related flags. # 2. The aggregation function only merges rules that are at the last hierarchy # level before the rule resulting in a successful output. # # Usage : # # First, you may need to install, python3-sympy and python3-pyparsing. # # Then, run br-int-flows-analyze.py --vm-uuid VM_UUID --ingress/--egress. It # should produce the rules that may handle ingress/egress traffic pertaining # to the VM followed by a summary of all the different paths the traffic may # take through the rules. # # Another way to run the tool is to use a combination of the --flow-file and # --fields flags, if you saved the dump generated by running # ovs-ofctl --names --no-stats --read-only --color=always --sort \ # dump-flows br-int # For example, running with a --flow-file # and --field in_port=IGNORE dl_vlan=2 dl_dst=fa:16:3e:d5:be:ff # Will start walking the tree, initialized with a cursor filtering-out rules # that : # - match any in_port (unless the port is called IGNORE) # - match a dl_vlan value different from 2 # - match a dl_dst value different from fa:16:3e:d5:be:ff # 😱 # Sorry, this parser is poorly written and probably bugged. Despite that, it # seems to work well enough for our current use-case. def mask_to_range(n, mask, width=None): # Turn a masked number into a range width = round(math.log2(mask)) return n, n | (mask ^ (2**width - 1)) def generate_openflow_grammar(): s = pp.Suppress w = pp.Word group = pp.Group LPAR = s('(') RPAR = s(')') LBRACKET = s('[') RBRACKET = s(']') DEC_NUMBER = pp.Regex(r'[0-9]+').setParseAction(lambda t: int(t[0])) HEX_NUMBER = pp.Regex( r'0x[0-9A-Fa-f]+' ).setParseAction(lambda t: int(t[0], 16)) NUMBER = (HEX_NUMBER | DEC_NUMBER) EXPRESSION = pp.Forward() MASKED_NUMBER = ((NUMBER + s("/") + NUMBER) .setParseAction(lambda t: mask_to_range(t[0], t[1]))) INDEXER = LBRACKET + pp.Optional(w(pp.alphanums + ".")) + RBRACKET IDENTIFIER = w(pp.alphas, pp.alphanums + "_") KV_PAIR = (IDENTIFIER("key") + s("=") + (NUMBER ^ MASKED_NUMBER ^ EXPRESSION)("value")) FLOW_KV_SEP = s(", ") | s(",") | s(" ") PROTOCOL = pp.oneOf("eth ip ipv6 icmp icmp6 tcp tcp6 udp udp6 " "sctp sctp6 arp rarp mpls mplsm") FLOW_FILTER_FIELDS = ( PROTOCOL("proto") | (s("out_port=") + NUMBER("out_port")) | (s("vlan_tci=") + NUMBER("vlan_tci")) | (s("dl_vlan=") + NUMBER("dl_vlan")) ) ACTION_LOAD = group(s('load:') + ( HEX_NUMBER('value') + s('->') + IDENTIFIER('key') + INDEXER('index')))("load") FLOW_KV = pp.Dict(group(~s('action') + (KV_PAIR | IDENTIFIER))) FUNC_ARG = (FLOW_KV ^ EXPRESSION | pp.Empty()).setName('param') FUNC_CALL = group( IDENTIFIER('func_name') + LPAR + group(pp.delimitedList(FUNC_ARG, delim=FLOW_KV_SEP))('func_param') + RPAR)('func_call') EXPRESSION <<= (FUNC_CALL | (IDENTIFIER + INDEXER) | pp.QuotedString('"') | ACTION_LOAD | (NUMBER ^ w(pp.alphanums + "+:_/-."))) ACTION_RESUBMIT = group(( s("resubmit:") + EXPRESSION("resubmit_port")) | ( s("resubmit") + LPAR + pp.Optional(EXPRESSION("resubmit_port")) + s(",") + pp.Optional(EXPRESSION("resubmit_table") .setParseAction(lambda t: int(t[0]))) + RPAR ))("resubmit") KW_ACTION = pp.Dict(group(IDENTIFIER + s(":") + EXPRESSION)) ACTION_CONJUNCTION = group( s("conjunction") + LPAR + DEC_NUMBER("conj_id").setParseAction(lambda t: int(t[0])) + s(',') + DEC_NUMBER("conj_k").setParseAction(lambda t: int(t[0])) + s("/") + DEC_NUMBER("conj_n").setParseAction(lambda t: int(t[0])) + RPAR )("conjunction") ACTION = group(ACTION_LOAD | ACTION_RESUBMIT | ACTION_CONJUNCTION | KW_ACTION | FUNC_CALL | IDENTIFIER('action_type')) COLOR_MARKER = pp.Regex(r'\x1b\[((\d|\d;)*m|K)') FLOW_LINE = (group(pp.delimitedList( pp.Optional(FLOW_FILTER_FIELDS ^ FLOW_KV), delim=FLOW_KV_SEP))("fields") - pp.Optional(FLOW_KV_SEP) - s("actions=") - group( pp.delimitedList(ACTION, delim=FLOW_KV_SEP) )("actions")).ignore(COLOR_MARKER) return FLOW_LINE # Fields we don't care about when checking whether a cursor exhausted a rule MISC_FIELDS = ['cookie', 'table', 'conj_id', 'priority'] flow_grammar = generate_openflow_grammar() def human_rule_sorter(r): """ A function to help sort rules for human consumption. The idea here is to group rules by tables, priority, conjunction and port. """ conjunctions = [] dst_port = (r.fields.tp_dst if isinstance(r.fields.tp_dst, tuple) else (r.fields.tp_dst or 0, r.fields.tp_dst or 0)) sort_keys = [r.table if r.table else 0, -r.priority, conjunctions, dst_port] for action in r.actions: conj = action.conjunction if conj: conjunctions.append((-1 * conj.conj_id, conj.conj_k)) conj_id = int(r.fields.get('conj_id', 0)) if conj_id: conjunctions.insert(0, (-1 * conj_id, 99)) conjunctions.sort() return sort_keys class FlowCursor: def __init__(self): self.fields = collections.defaultdict(lambda: 0) self.from_rule = None def copy(self): a = FlowCursor() a.fields = self.fields.copy() return a def close_intervals(expr): """Turns Interval.open into Interval, they are nicer to look at.""" return expr.replace( lambda s: s.is_Interval, lambda i: sm.Interval(i.args[0]+1, i.args[1]-1), simultaneous=False ) class FlowRule: def __init__(self): self.annotations = dict() self._conj_key = None self._accumulated_conditions = True self._parent_rules: Set[Tuple['FlowRule']] = set() self._ranges = {} self._dump: FlowDump = None @property def simplified_conditions(self): conjunction_conditions = True if self.fields.conj_id: conj_precursors = self._dump.conj_setters[self.fields.conj_id] for rules in conj_precursors.values(): ored = self._dump.rules_conditions_union(rules) conjunction_conditions &= ored intrinsic = self.intrinsic_conditions() return (self._accumulated_conditions & intrinsic & conjunction_conditions) @property def conditions(self): return self._accumulated_conditions & self.intrinsic_conditions() @classmethod def from_line(cls, line, dump): self = cls() self._line = line self._dump = dump try: parse_out = flow_grammar.parseString(line, True) except Exception as excp: print("Error parsing line") print(line) raise excp self.priority = parse_out.fields.get('priority', 32768) self.table = parse_out.fields.get('table') self.actions = parse_out.actions self.fields = parse_out.fields self.fields = parse_out.fields return self def pass_or_drop(self): for action in self.actions: if action.getName() == 'action_type': if action.action_type == 'drop': return 'drop' if action.action_type in ("NORMAL", ): return 'pass' if action.getName() == 'output': return 'pass' return None @functools.lru_cache(256) def intrinsic_conditions(self): """ Returns the intrinsic conditions of a flow rule. That is, the conditions set by the rule itself represented as a sympy expression, with ranges "simplified" out. """ def to_port_annotation(parsed): # convert both single port and ranges into sympy.Interval # (open intervals) if isinstance(parsed, int): rv = sm.Interval(parsed-1, parsed+1, True, True) else: rv = sm.Interval(parsed[0]-1, parsed[1]+1, True, True) return rv annotation_fields_conversions = { 'tp_dst': ('dest_port', to_port_annotation), 'tp_src': ('src_port', to_port_annotation), 'dl_src': ('src_mac', sm.Symbol), 'nw_src': ('source_ip', sm.Symbol), 'ipv6_src': ('source_ip6', sm.Symbol), 'nd_target': ('dest_ip6', sm.Symbol), 'nw_dst': ('dest_ip', sm.Symbol), 'proto': ('proto', sm.Symbol), 'ct_state': ('ct_state', sm.Symbol) } conds = [] for fn, (dst, conv) in annotation_fields_conversions.items(): if self.fields.get(fn): value = conv(self.fields[fn]) if conv else self.fields[fn] if isinstance(value, sm.Interval): # I know it's not super cool to lru_cache this function # but in this case I don't think it's going to cause # much trouble ... self._ranges["__range_" + dst] = value conds.append(sm.Symbol("__range_" + dst)) else: cond = sm.Eq(sm.Symbol(dst), value)
get_binop(n.op)), lineno=n.lineno), Dyn node = ast.BinOp(left=left, op=n.op, right=right, lineno=n.lineno) return (node, ty) def visitUnaryOp(self, n, env, misc): (operand, ty) = self.dispatch(n.operand, env, misc) node = ast.UnaryOp(op=n.op, operand=operand, lineno=n.lineno) if isinstance(n.op, ast.Invert): ty = primjoin([ty], Int, Int) elif any([isinstance(n.op, op) for op in [ast.UAdd, ast.USub]]): ty = primjoin([ty]) elif isinstance(n.op, ast.Not): ty = Bool return (node, ty) def visitCompare(self, n, env, misc): (left, _) = self.dispatch(n.left, env, misc) comparators = [comp for (comp, _) in [self.dispatch(ocomp, env, misc) for ocomp in n.comparators]] return (ast.Compare(left=left, ops=n.ops, comparators=comparators, lineno=n.lineno), Bool) # Collections stuff def visitList(self, n, env, misc): eltdata = [self.dispatch(x, env, misc) for x in n.elts] elttys = [ty for (elt, ty) in eltdata] elts = [elt for (elt, ty) in eltdata] if isinstance(n.ctx, ast.Store): ty = Tuple(*elttys) else: inty = tyjoin(elttys) ty = List(inty) if flags.TYPED_LITERALS else Dyn return ast.List(elts=elts, ctx=n.ctx, lineno=n.lineno), ty def visitTuple(self, n, env, misc): eltdata = [self.dispatch(x, env, misc) for x in n.elts] tys = [ty for (elt, ty) in eltdata] elts = [elt for (elt, ty) in eltdata] if isinstance(n.ctx, ast.Store): ty = Tuple(*tys) else: ty = Tuple(*tys) if flags.TYPED_LITERALS else Dyn return (ast.Tuple(elts=elts, ctx=n.ctx, lineno=n.lineno), ty) def visitDict(self, n, env, misc): keydata = [self.dispatch(key, env, misc) for key in n.keys] valdata = [self.dispatch(val, env, misc) for val in n.values] keys, ktys = list(zip(*keydata)) if keydata else ([], []) values, vtys = list(zip(*valdata)) if valdata else ([], []) return (ast.Dict(keys=list(keys), values=list(values), lineno=n.lineno),\ Dict(tyjoin(list(ktys)), tyjoin(list(vtys)))) def visitSet(self, n, env, misc): eltdata = [self.dispatch(x, env, misc) for x in n.elts] elttys = [ty for (elt, ty) in eltdata] ty = tyjoin(elttys) elts = [elt for (elt, ty) in eltdata] return (ast.Set(elts=elts, lineno=n.lineno), Set(ty) if flags.TYPED_LITERALS else Dyn) def visitListComp(self, n, env, misc): disp = [self.dispatch(generator, env, misc, n.lineno) for generator in n.generators] generators, genenv = zip(*disp) if disp else ([], []) lenv = env.copy() lenv.update(dict(sum(genenv, []))) elt, ety = self.dispatch(n.elt, lenv, misc) return check(ast.ListComp(elt=elt, generators=list(generators), lineno=n.lineno), List(ety), errmsg('COMP_CHECK', misc.filename, n, List(ety))),\ (List(ety) if flags.TYPED_LITERALS else Dyn) def visitSetComp(self, n, env, misc): disp = [self.dispatch(generator, env, misc, n.lineno) for generator in n.generators] generators, genenv = zip(*disp) if disp else ([], []) lenv = env.copy() lenv.update(dict(sum(genenv, []))) elt, ety = self.dispatch(n.elt, lenv, misc) return check(ast.SetComp(elt=elt, generators=list(generators), lineno=n.lineno), Set(ety), errmsg('COMP_CHECK', misc.filename, n, Set(ety))), \ (Set(ety) if flags.TYPED_LITERALS else Dyn) def visitDictComp(self, n, env, misc): disp = [self.dispatch(generator, env, misc, n.lineno) for generator in n.generators] generators, genenv = zip(*disp) if disp else ([], []) lenv = env.copy() lenv.update(dict(sum(genenv,[]))) key, kty = self.dispatch(n.key, lenv, misc) value, vty = self.dispatch(n.value, lenv, misc) return check(ast.DictComp(key=key, value=value, generators=list(generators), lineno=n.lineno), Dict(kty, vty), errmsg('COMP_CHECK', misc.filename, n, Dict(kty, vty))), \ (Dict(kty, vty) if flags.TYPED_LITERALS else Dyn) def visitGeneratorExp(self, n, env, misc): disp = [self.dispatch(generator, env, misc, n.lineno) for generator in n.generators] generators, genenv = zip(*disp) if disp else ([], []) lenv = env.copy() lenv.update(dict(sum(genenv, []))) elt, ety = self.dispatch(n.elt, lenv, misc) return check(ast.GeneratorExp(elt=elt, generators=list(generators), lineno=n.lineno), Dyn, errmsg('COMP_CHECK', misc.filename, n, Dyn)), Dyn def visitcomprehension(self, n, env, misc, lineno): (iter, ity) = self.dispatch(n.iter, env, misc) ifs = [if_ for (if_, _) in [self.dispatch(if2, env, misc) for if2 in n.ifs]] (target, tty) = self.dispatch(n.target, env, misc) ety = Dyn if tyinstance(ity, List): ety = ity.type elif tyinstance(ity, Tuple): ety = tyjoin(*ity.elements) elif tyinstance(ity, Dict): ety = ity.keys assignments = [(target, ety)] new_assignments = [] while assignments: k, v = assignments[0] del assignments[0] if isinstance(k, ast.Name): new_assignments.append((Var(k.id),v)) elif isinstance(k, ast.Tuple) or isinstance(k, ast.List): if tyinstance(v, Tuple): assignments += (list(zip(k.elts, v.elements))) elif tyinstance(v, Iterable) or tyinstance(v, List): assignments += ([(e, v.type) for e in k.elts]) elif tyinstance(v, Dict): assignments += (list(zip(k.elts, v.keys))) else: assignments += ([(e, Dyn) for e in k.elts]) iter_target = Dyn #Iterable(tty) return ast.comprehension(target=target, iter=cast(env, misc.cls, iter, ity, iter_target, errmsg('ITER_ERROR', misc.filename, lineno, iter_target), misc=misc), ifs=ifs), new_assignments # Control flow stuff def visitYield(self, n, env, misc): value, _ = self.dispatch(n.value, env, misc) if n.value else (None, Void) return ast.Yield(value=value, lineno=n.lineno), Dyn def visitYieldFrom(self, n, env, misc): value, _ = self.dispatch(n.value, env, misc) return ast.YieldFrom(value=value, lineno=n.lineno), Dyn def visitIfExp(self, n, env, misc): test, _ = self.dispatch(n.test, env, misc) body, bty = self.dispatch(n.body, env, misc) orelse, ety = self.dispatch(n.orelse, env, misc) return ast.IfExp(test=test, body=body, orelse=orelse, lineno=n.lineno), tyjoin([bty,ety]) # Function stuff def visitCall(self, n, env, misc): if reflection.is_reflective(n): return reflection.reflect(n, env, misc, self) # Python3.5 gets rid of .kwargs and .starargs, instead has a Starred value in the args # and a keyword arg with no key (i.e. kwarg in n.keywords; kwarg = keyword(arg=None, value=[w/e])) def has_starargs(n): if flags.PY3_VERSION >= 5: return any(isinstance(e, ast.Starred) for e in n.args) else: return n.starargs is not None def has_kwargs(n): if flags.PY3_VERSION >= 5: return any(e.arg is None for e in n.keywords) else: return n.kwargs is not None project_needed = [False] # Python2 doesn't have nonlocal class BadCall(Exception): def __init__(self, msg): self.msg = msg def cast_args(argdata, fun, funty): vs, ss = zip(*argdata) if argdata else ([], []) vs = list(vs) ss = list(ss) if tyinstance(funty, Dyn): if n.keywords or has_kwargs(n) or has_starargs(n): targparams = DynParameters else: targparams = AnonymousParameters(ss) return vs, cast(env, misc.cls, fun, Dyn, Function(targparams, Dyn), errmsg('FUNC_ERROR', misc.filename, n, Function(targparams, Dyn)), misc=misc), Dyn elif tyinstance(funty, Function): argcasts = funty.froms.lenmatch(argdata) # Prototype implementation for type variables if argcasts != None: substs = [] casts = [] for (v, s), t in argcasts: if isinstance(t, TypeVariable): substs.append((t.name, s)) casts.append(v) else: casts.append(cast(env, misc.cls, v, s, t, errmsg('ARG_ERROR', misc.filename, n, t), misc=misc)) to = funty.to for var,rep in substs: # Still need to merge in case of multiple approaches to = to.substitute(var, rep, False) return(casts, fun, to) # return ([cast(env, misc.cls, v, s, t, errmsg('ARG_ERROR', misc.filename, n, t)) for \ # (v, s), t in argcasts], # fun, funty.to) else: raise BadCall(errmsg('BAD_ARG_COUNT', misc.filename, n, funty.froms.len(), len(argdata))) elif tyinstance(funty, Class): project_needed[0] = True if '__init__' in funty.members: inst = funty.instance() funty = funty.member_type('__init__') if tyinstance(funty, Function): funty = funty.bind() funty.to = inst else: funty = Function(DynParameters, funty.instance()) return cast_args(argdata, fun, funty) elif tyinstance(funty, Object): if '__call__' in funty.members: funty = funty.member_type('__call__') return cast_args(argdata, fun, funty) else: mfunty = Function(DynParameters, Dyn) return cast_args(argdata, cast(env, misc.cls, fun, funty, Record({'__call__': mfunty}), errmsg('OBJCALL_ERROR', misc.filename, n), misc=misc), mfunty) else: raise BadCall(errmsg('BAD_CALL', misc.filename, n, funty)) (func, ty) = self.dispatch(n.func, env, misc) if tyinstance(ty, InferBottom): return n, Dyn argdata = [self.dispatch(x, env, misc) for x in n.args] try: (args, func, retty) = cast_args(argdata, func, ty) except BadCall as e: if flags.REJECT_WEIRD_CALLS or not (n.keywords or has_kwargs(n) or has_starargs(n)): return error(e.msg, lineno=n.lineno), Dyn else: logging.warn('Function calls with keywords, starargs, and kwargs are not typechecked. Using them may induce a type error in file %s (line %d)' % (misc.filename, n.lineno), 0) args = n.args retty = Dyn call = ast_trans.Call(func=func, args=args, keywords=n.keywords, starargs=getattr(n, 'starargs', None), kwargs=getattr(n, 'kwargs', None), lineno=n.lineno) if project_needed[0]: call = cast(env, misc.cls, call, Dyn, retty, errmsg('BAD_OBJECT_INJECTION', misc.filename, n, retty, ty), misc=misc) else: call = check(call, retty, errmsg('RETURN_CHECK', misc.filename, n, retty)) return (call, retty) def visitLambda(self, n, env, misc): args, argnames, specials = self.dispatch(n.args, env, DynParameters, misc, n.lineno) params = [Dyn] * len(argnames) env = env.copy() env.update(dict(list(zip(argnames, params)))) env.update(dict(specials)) body, rty = self.dispatch(n.body, env, misc) if n.args.vararg: ffrom = DynParameters elif n.args.kwarg: ffrom = DynParameters elif flags.PY_VERSION == 3 and n.args.kwonlyargs: ffrom = DynParameters elif n.args.defaults: ffrom = DynParameters else: ffrom = NamedParameters(list(zip(argnames, params))) ty = Function(ffrom, rty) if flags.TYPED_LAMBDAS else Dyn return ast.Lambda(args=args, body=body, lineno=n.lineno), ty # Variable stuff def visitName(self, n, env, misc): if isinstance(n.ctx, ast.Param): # Compatibility with 2.7 return n.id try: ty = env[Var(n.id)] if isinstance(n.ctx, ast.Del) and not tyinstance(ty, Dyn) and flags.REJECT_TYPED_DELETES: return error(errmsg('TYPED_VAR_DELETE', misc.filename, n, n.id, ty)), Dyn except KeyError: ty = Dyn id = n.id if n.id not in rtypes.TYPES else n.id + '_' return ast.Name(id=id, ctx=n.ctx, lineno=n.lineno), ty def visitNameConstant(self, n, env, misc): if flags.TYPED_LITERALS: if n.value is True
> 0 and lR > 0 and relay.lower() == "gov": msg = "I'd rather die than use obscene and improper words; but when you, as a governor, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks." elif lt == 0 and le > 0 and lR > 0 and relay.lower() == "help": msg = "Alternative translations available via setting relay flag to: chief, dir or gov for chief, director or governor. Default is president or help to print this message." elif lt == 0 and le > 0 and lR > 0 and relay.lower() != "chief": msg = "I'd rather die than use obscene and improper words; but when you, as a president, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks." elif lt == 0 and le > 0 and lR > 0 and relay.lower() != "dir": msg = "I'd rather die than use obscene and improper words; but when you, as a president, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks." elif lt == 0 and le > 0 and lR > 0 and relay.lower() != "gov": msg = "I'd rather die than use obscene and improper words; but when you, as a president, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks." elif lt == 0 and le > 0 and lR > 0 and relay.lower() != "help": msg = "I'd rather die than use obscene and improper words; but when you, as a president, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks." elif lt > 0 and le > 0 and lR == 0: msg = "I'd rather die than use obscene and improper words; but when you, {0}, as a president, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks.".format( target) elif lt > 0 and le > 0 and lR > 0 and relay.lower() == "chief": msg = "I'd rather die than use obscene and improper words; but when you, {0}, as a chief, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks.".format( target) elif lt > 0 and le > 0 and lR > 0 and relay.lower() == "dir": msg = "I'd rather die than use obscene and improper words; but when you, {0}, as a director, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks.".format( target) elif lt > 0 and le > 0 and lR > 0 and relay.lower() == "gov": msg = "I'd rather die than use obscene and improper words; but when you, {0}, as a governor, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks.".format( target) elif lt > 0 and le > 0 and lR > 0 and relay.lower() == "help": msg = "Alternative translations available via setting relay flag to: chief, dir or gov for chief, director or governor. Default is president or help to print this message.".format( target) elif lt > 0 and le > 0 and lR > 0 and relay.lower() != "chief": msg = "I'd rather die than use obscene and improper words; but when you, {0}, as a president, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks.".format( target) elif lt > 0 and le > 0 and lR > 0 and relay.lower() != "dir": msg = "I'd rather die than use obscene and improper words; but when you, {0}, as a president, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks.".format( target) elif lt > 0 and le > 0 and lR > 0 and relay.lower() != "gov": msg = "I'd rather die than use obscene and improper words; but when you, {0}, as a president, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks.".format( target) elif lt > 0 and le > 0 and lR > 0 and relay.lower() != "help": msg = "I'd rather die than use obscene and improper words; but when you, {0}, as a president, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks.".format( target) else: msg = "Obscēnis, peream, sī nōn ūtī mē pudet improbīsque verbīs sed cum tū positō praefecus pudōre ostendās mihi cōleōs patentēs cum cunnō mihi mentula est vocanda!" return msg def priapus5(self): if lt == 0 and le == 0: msg = "Obscēnis, peream, sī nōn ūtī mē pudet improbīsque verbīs sed cum tū positō diabolus pudōre ostendās mihi cōleōs patentēs cum cunnō mihi mentula est vocanda." elif lt > 0 and le == 0: msg = "Obscēnis, peream, {0}, sī nōn ūtī mē pudet improbīsque verbīs sed cum tū positō diabolus pudōre ostendās mihi cōleōs patentēs cum cunnō mihi mentula est vocanda.".format( target) elif lt == 0 and le > 0: msg = "I'd rather die than use obscene and improper words; but when you, as a devil, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks." elif lt > 0 and le > 0: msg = "I'd rather die than use obscene and improper words; but when you, {0}, as a devil, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks.".format( target) else: msg = "Obscēnis, peream, sī nōn ūtī mē pudet improbīsque verbīs sed cum tū positō diabolus pudōre ostendās mihi cōleōs patentēs cum cunnō mihi mentula est vocanda!" return msg def priapus6(self): if lt == 0 and le == 0: msg = "Obscēnis, peream, sī nōn ūtī mē pudet improbīsque verbīs sed cum tū positō daemonis pudōre ostendās mihi cōleōs patentēs cum cunnō mihi mentula est vocanda." elif lt > 0 and le == 0: msg = "Obscēnis, peream, {0}, sī nōn ūtī mē pudet improbīsque verbīs sed cum tū positō daemonis pudōre ostendās mihi cōleōs patentēs cum cunnō mihi mentula est vocanda.".format( target) elif lt == 0 and le > 0: msg = "I'd rather die than use obscene and improper words; but when you, as a demon, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks." elif lt > 0 and le > 0: msg = "I'd rather die than use obscene and improper words; but when you, {0}, as a demon, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks.".format( target) else: msg = "Obscēnis, peream, sī nōn ūtī mē pudet improbīsque verbīs sed cum tū positō daemonis pudōre ostendās mihi cōleōs patentēs cum cunnō mihi mentula est vocanda!" return msg def priapus7(self): if lt == 0 and le == 0: msg = "Obscēnis, peream, sī nōn ūtī mē pudet improbīsque verbīs sed cum tū positō homo pudōre ostendās mihi cōleōs patentēs cum cunnō mihi mentula est vocanda." elif lt > 0 and le == 0: msg = "Obscēnis, peream, {0}, sī nōn ūtī mē pudet improbīsque verbīs sed cum tū positō homo pudōre ostendās mihi cōleōs patentēs cum cunnō mihi mentula est vocanda.".format( target) elif lt == 0 and le > 0: msg = "I'd rather die than use obscene and improper words; but when you, as a man, appear with your testicles hanging out, it is appropriate for me to speak of cunts and cocks." elif lt > 0 and le > 0: msg
#!/usr/bin/python """ XP7 Module """ import sys import os.path import csv import string import re import copy #################################################################################################### ### Functions #################################################################################################### def standard_format_ldev(ldev_nbr): ### remove : ### res = ldev_nbr.translate(None,":") res = res.translate(None,string.whitespace) ### lower case ### return res.zfill(4).lower() def standard_format_wwn(wwn): ### remove : ### res = wwn.translate(None,":") res = res.translate(None,string.whitespace) ### lower case ### return res.lower() def long_format_ldev(ldev_nbr): res = standard_format_ldev(ldev_nbr) res = "{}:{}".format(res[0:2].upper(),res[2:4].upper()) return res def long_format_wwn(wwn): res = standard_format_wwn(wwn) res = "{}:{}:{}:{}:{}:{}:{}:{}".format(res[0:2],res[2:4],res[4:6],res[6:8],res[8:10],res[10:12],res[12:14],res[14:16]) return res.upper() def convert_size(size): if size > 1024 ** 4: return "{0:d} TB".format( size / 1024**4 ) elif size > 1024 ** 3: return "{0:d} GB".format( size / 1024**3 ) elif size > 1024 ** 2: return "{0:d} MB".format( size / 1024**2 ) elif size > 1024: return "{0:d} KB".format( size / 1024 ) else: return "{0:d} B".format(size) def port_nbr_even(port_name): if re.match("CL\d-\w",port_name): port_nbr = int(re.match("CL(\d)",port_name).group(1)) if port_nbr % 2 == 0: return True else: return False else: return None def get_port_fabric_nbr(port_name): if re.match("CL\d-\w",port_name): port_nbr = int(re.match("CL(\d)",port_name).group(1)) if port_nbr % 2 == 0: return 2 else: return 1 else: return None def short_port_name(port_name): if re.match("CL\d-\w",port_name): return "{}{}".format(re.match("CL(\d)",port_name).group(1),re.match("CL\d-(\w)",port_name).group(1)) else: return None #################################################################################################### ### Class XP7 #################################################################################################### class XP7: def __init__(self,name,instance_nbr,serial_nbr,location,collect_file): self.name = name self.peer_box = None self.target_box = None self.source_box = None self.collect_file = collect_file self.instance_nbr = int(instance_nbr) self.bc_instance_nbr = int(instance_nbr) + 10 self.serial_nbr = serial_nbr self.location = location self.ldevs = [] self.ioports = [] self.hostgroups = [] self.hba_wwns = [] self.ldevs = [] self.luns = [] self.logins = [] ### load collect file ### if os.path.exists(self.collect_file): with open(self.collect_file,"rt") as f: collect_file_reader = csv.reader(f,delimiter=",",quotechar="'") for row in collect_file_reader: if row[0] == "PORT": io_port = IO_Port(*row[1:3]) self.ioports.append(io_port) if row[0] == "HOSTGROUP": hostgroup = Hostgroup(*row[1:6]) self.hostgroups.append(hostgroup) if row[0] == "WWN": hba_wwn = HBA_Wwn(*row[1:5]) self.hba_wwns.append(hba_wwn) if row[0] == "LDEV": ldev = Ldev(*row[1:7]) self.ldevs.append(ldev) if row[0] == "LUN": lun = Lun(*row[1:5]) self.luns.append(lun) if row[0] == "LOGIN": login = Login(*row[1:3]) self.logins.append(login) else: print "ERROR could not find collect file {}".format(self.collect_file) ### sort the ldev list ### self.ldevs.sort(key=lambda x: x.nbr) ### define hostgroup_name_list ### self.hostgroup_name_list = list(sorted(set([x.name for x in self.hostgroups]))) def __repr__(self): res ="{0:<30s} : {1:<10s}\n".format("BOX NAME",self.name) res +="{0:<30s} : {1:<10d}\n".format("SERIAL NBR",self.serial_nbr) res +="{0:<30s} : {1:<10d}\n".format("INSTANCE NBR",self.instance_nbr) res +="{0:<30s} : {1:<10s}\n".format("LOCATION",self.location) res +="{0:<30s} : {1:<10s}\n".format("COLLECT FILE",self.collect_file) res +="{0:<30s} : {1:<10s}\n".format("PEER BOX","" if self.peer_box is None else self.peer_box.name) res +="{0:<30s} : {1:d}\n".format("NBR of IO PORTs",len(self.ioports)) res +="{0:<30s} : {1:d}\n".format("NBR of HOSTGROUPs",len(self.hostgroups)) res +="{0:<30s} : {1:d}\n".format("NBR of HBA WWNs",len(self.hba_wwns)) res +="{0:<30s} : {1:d}\n".format("NBR of LDEVs",len(self.ldevs)) res +="{0:<30s} : {1:d}\n".format("NBR of LUNs",len(self.luns)) res +="{0:<30s} : {1:d}\n".format("NBR of LOGINs",len(self.logins)) return res def print_ldevs(self): """ print the list of LDEVs """ for ldev in self.ldevs: print ldev def print_luns(self): """ print the list of LUNs """ for lun in self.luns: print lun def print_hostgroups(self): """ print the list of hostgroups """ for hostgroup in self.hostgroups: print hostgroup def print_ioports(self): """ print the list of ioports """ for ioport in self.ioports: print ioport def print_logins(self): """ print the list of logins """ for login in self.logins: print login def print_logins_per_port(self): for port_name in sorted([x.port_name for x in self.ioports]): logins_per_port = [x.wwn for x in self.logins if x.port_name == port_name] nicknames_per_port = [] for wwn in logins_per_port: nickname = self.get_nickname(wwn) if nickname == "": nicknames_per_port.append(wwn) else: nicknames_per_port.append(nickname) print "{} : {}".format(port_name,",".join(nicknames_per_port)) def get_nickname(self,wwn): res = "" for hba_wwn in self.hba_wwns: if hba_wwn.wwn == wwn: res = hba_wwn.nickname return res def test_ldev_free(self,ldev_id): ldev_set = set([x.nbr] for x in self.ldevs) if ldev_id in ldev_set: return False else: return True def test_logged_in(self,port_name,wwn): res = False for login in self.logins: if login.port_name == port_name and login.wwn == wwn: res = True return res def print_hostgroup(self,hostgroup_name=None): """ print ports / hba_wwn / luns per hostgroup """ if hostgroup_name is None: ### list all hostgroups ### hostgroup_name_list = self.hostgroup_name_list elif hostgroup_name in self.hostgroup_name_list: ### list only the requested hostgroup ### hostgroup_name_list = [hostgroup_name] else: hostgroup_name_list = [] ### report on the hostgroups in the list ### for hostgroup_name in hostgroup_name_list: print "{:{fill}{align}{width}s}".format("=",fill="=",width=100,align="^") print "{:{fill}{align}{width}s}".format(hostgroup_name,fill="=",width=100,align="^") print "{:{fill}{align}{width}s}".format("=",fill="=",width=100,align="^") print ### ports ### hostgroup_list = [x for x in self.hostgroups if x.name == hostgroup_name] hostgroup_list.sort(key=lambda x: x.port_name) print "{:<50s} {:<10s} {:<10s} {:<20s} {:<10s}".format("HOSTGROUP","PORT","HOST MODE","HOST MODE OPTIONS","NBR") print "{:=<50s} {:=<10s} {:=<10s} {:=<20s} {:=<10s}".format("","","","","") for hostgroup in hostgroup_list: print "{:<50s} {:<10s} {:<10s} {:<20s} {:<10d}".format(hostgroup.name,hostgroup.port_name,hostgroup.mode,",".join(hostgroup.options),hostgroup.nbr) print ### hba_wwns ### hba_wwn_list = [x for x in self.hba_wwns if x.hostgroup_name == hostgroup_name] hba_wwn_list.sort(key=lambda x: x.port_name) print "{:<10s} {:<10s} {:<20s} {:<20s} {:<10s}".format("HBA_WWN","PORT","WWN","NICKNAME","LOGGED IN") print "{:=<10s} {:=<10s} {:=<20s} {:=<20s} {:=<10s}".format("","","","","") for hba_wwn in hba_wwn_list: print "{:<10s} {:<10s} {:<20s} {:<20s} {:<1s}".format("",hba_wwn.port_name,hba_wwn.wwn,hba_wwn.nickname,"V" if self.test_logged_in(hba_wwn.port_name,hba_wwn.wwn) else "X") print ### luns ### lun_list = [x for x in self.luns if x.hostgroup_name == hostgroup_name] lun_list.sort(key=lambda x: x.port_name) print "{:<10s} {:<10s} {:<10s} {:<10s}".format("LUN","PORT","LUN_ID","LDEV") print "{:=<10s} {:=<10s} {:=<10s} {:=<10s}".format("","","","","") for lun in lun_list: print "{:<10s} {:<10s} {:<10s} {:<10s}".format("",lun.port_name,lun.lun_id,lun.ldev_nbr) print def print_hostgroup_noluns(self,hostgroup_name=None): """ print ports / hba_wwn per hostgroup """ if hostgroup_name is None: ### list all hostgroups ### hostgroup_name_list = self.hostgroup_name_list elif hostgroup_name in self.hostgroup_name_list: ### list only the requested hostgroup ### hostgroup_name_list = [hostgroup_name] else: hostgroup_name_list = [] ### report on the hostgroups in the list ### for hostgroup_name in hostgroup_name_list: print "{:{fill}{align}{width}s}".format("=",fill="=",width=100,align="^") print "{:{fill}{align}{width}s}".format(hostgroup_name,fill="=",width=100,align="^") print "{:{fill}{align}{width}s}".format("=",fill="=",width=100,align="^") print ### ports ### hostgroup_list = [x for x in self.hostgroups if x.name == hostgroup_name] hostgroup_list.sort(key=lambda x: x.port_name) print "{:<50s} {:<10s} {:<10s} {:<20s} {:<10s}".format("HOSTGROUP","PORT","HOST MODE","HOST MODE OPTIONS","NBR") print "{:=<50s} {:=<10s} {:=<10s} {:=<20s} {:=<10s}".format("","","","","") for hostgroup in hostgroup_list: print "{:<50s} {:<10s} {:<10s} {:<20s} {:<10d}".format(hostgroup.name,hostgroup.port_name,hostgroup.mode,",".join(hostgroup.options),hostgroup.nbr) print ### hba_wwns ### hba_wwn_list = [x for x in self.hba_wwns if x.hostgroup_name == hostgroup_name] hba_wwn_list.sort(key=lambda x: x.port_name) print "{:<10s} {:<10s} {:<20s} {:<20s} {:<10s}".format("HBA_WWN","PORT","WWN","NICKNAME","LOGGED IN") print "{:=<10s} {:=<10s} {:=<20s} {:=<20s} {:=<10s}".format("","","","","") for hba_wwn in hba_wwn_list: print "{:<10s} {:<10s} {:<20s} {:<20s} {:<1s}".format("",hba_wwn.port_name,hba_wwn.wwn,hba_wwn.nickname,"V" if self.test_logged_in(hba_wwn.port_name,hba_wwn.wwn) else "X") print ### luns ### lun_list = [x for x in self.luns if x.hostgroup_name == hostgroup_name] port_set = set([x.port_name for x in lun_list]) print "{:<10s} {:<10s}".format("PORT","# LUNs") for port_name in sorted(port_set): print "{:<10s} {}".format(port_name,len([x for x in lun_list if x.port_name == port_name])) print def print_hostgroup_summary(self,hostgroup_name,indent=0): """ print hostgroup name / host_mode / host_mode_options / nbr """ if hostgroup_name not in self.hostgroup_name_list: return ### report on the hostgroups in the list, first match will do ### for hostgroup in self.hostgroups: if hostgroup.name == hostgroup_name: ### print summary ### print "{:{fill}{align}{width}} {:<30s} {:<20s} {:<10s}".format("",hostgroup.name,hostgroup.mode,",".join(hostgroup.options),fill=" ",align="<",width=indent) return def get_ldev_nbrs(self): """ return all ldev nbrs """ return sorted([x.nbr for x in self.ldevs]) def get_ldev_mapping(self,ldev_nbr): """ return the list of port / hostgroup_name / lun_id mappings found """ res = [] all_ldev_nbrs = set([x.nbr for x in self.ldevs]) if ldev_nbr in all_ldev_nbrs: ### run the list of all luns ### for lun in self.luns: if lun.ldev_nbr == ldev_nbr: ### we got a mapping ### res.append(lun) ### sort the result per port ### res.sort(key=lambda x: x.port_name) return res def check_ldev_mapping(self,ldev_nbr): """ return True if ldev mapping ok """ res = True lun_list = self.get_ldev_mapping(ldev_nbr) hg_dict = {} ### arrange the luns per hostgroup ### for lun in lun_list: if lun.hostgroup_name not in hg_dict: hg_dict[lun.hostgroup_name] = [] hg_dict[lun.hostgroup_name].append(lun) ### lun_ids should be the same per hostgroup ### for hg_name in hg_dict: lun_ids = set([x.lun_id for x in hg_dict[hg_name]]) if len(lun_ids) > 1: res = False return res def get_hostgroups(self): return self.hostgroup_name_list def get_hostgroup_ldevs(self,hostgroup_name,port_name=None): """ return the list of ldevs in the hostgroup """ ldev_set = set() for lun in self.luns: if port_name is None and lun.hostgroup_name == hostgroup_name: ldev_set.add(lun.ldev_nbr) elif port_name is not None and lun.hostgroup_name == hostgroup_name and lun.port_name == port_name: ldev_set.add(lun.ldev_nbr) return list(sorted(ldev_set)) def get_ldev(self,ldev_nbr): for ldev in self.ldevs: if ldev.nbr == ldev_nbr: return ldev return None def get_ldev_hostgroups(self,ldev_nbr): """ return the list of hostgroups in which the ldev is mapped """ res = set() lun_list = self.get_ldev_mapping(ldev_nbr) for lun in lun_list: res.add(lun.hostgroup_name) return list(sorted(res)) def set_ldev_ca_bc(self,ldev_nbr,ca="SMPL",bc_1="SMPL",bc_2="SMPL",bc_3="SMPL"): """ set the ldev CA and BC to SMPL,PVOL or SVOL """ for ldev in self.ldevs: if ldev.nbr == ldev_nbr:
<reponame>robschneider16/AIT-Core # Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT) # Bespoke Link to Instruments and Small Satellites (BLISS) # # Copyright 2016, by the California Institute of Technology. ALL RIGHTS # RESERVED. United States Government Sponsorship acknowledged. Any # commercial use must be negotiated with the Office of Technology Transfer # at the California Institute of Technology. # # This software may be subject to U.S. export control laws. By accepting # this software, the user agrees to comply with all applicable U.S. export # laws and regulations. User has the responsibility to obtain export licenses, # or other export authority as may be required before exporting such # information to foreign countries or providing access to foreign persons. ''' AIT Binary Stream Capturer The ait.bsc module handles logging of network data to PCAP files along with the server definition for RESTful manipulation of running loggers. ''' import calendar import datetime import json import os import socket import time from bottle import request, Bottle import gevent import gevent.monkey import gevent.pool import gevent.socket from ait.core import pcap, log gevent.monkey.patch_all() RAW_SOCKET_FD = None try: import rawsocket RAW_SOCKET_FD = rawsocket.rawsocket_fd() except ImportError: log.debug( 'The rawsocket library cannot be imported. ' 'Defaulting to the non-rawsocket approach.' ) except IOError: log.info( 'Unable to spawn rawsocket-helper. ' 'This may be a permissions issue (not SUID root?). ' 'Defaulting to non-rawsocket approach.' ) ETH_P_IP = 0x0800 ETH_P_ALL = 0x0003 ETH_PROTOCOL = ETH_P_ALL class SocketStreamCapturer(object): ''' Class for logging socket data to a PCAP file. ''' def __init__(self, capture_handlers, address, conn_type): ''' Args: capture_handlers: A list of handler configuration dictionaries that contains the following values name A unique name for this handler log_dir The directory path into which log files will be written. This path may include format strings which reference handler metadata (E.g., {name}) as well as `strftime format characters <https://docs.python.org/2/library/time.html#time.strftime>` Example:: '/tmp/additional_dir/test/%j' rotate_log *True* or *False* flag specifying whether logs should be rotated at a regular interval. rotate_log_index If **rotate_log** is *True* this controls the time frame of log rotations. The below values are all the valid options. Each row's values are equivalent:: 'year', 'years', 'tm_year', 'month', 'months', 'tm_mon', 'day', 'days', 'tm_mday', 'hour', 'hours', 'tm_hour', 'minute', 'minutes', 'tm_min', 'second', 'seconds', 'tm_sec', Default:: 'day' rotate_log_delta If **rotate_log** is *True* this controls the **rotate_log_index** delta between the current time at log rotation check versus the time the log file was open necessary to trigger a rotation. Default:: 1 file_name_pattern (optional) The pattern to use for the log file name. This will be joined with the **log_dir** option to generate the full log file path. This may also include format strings like *log_dir*. Example:: '%Y-%m-%d-randomUDPtestData-{name}.pcap' Default:: '%Y-%m-%d-%H-%M-%S-{name}.pcap' pre_write_transforms (optional) A list of *callables* to be run prior to data output for this handler. The currently captured data is passed through each transformation in order supplied with the output of the previous being used as the input for the next. address: The address to which a socket connection should be made. What is considered a valid address depends on the **conn_type** value. udp:: [host, port number] E.g., ['', 8500] ethernet:: ['interface name', protocol number] E.g., ['p2p2', 0] tcp:: [host, port] E.g., ['127.0.0.1', 8125] conn_type: A string identifying the connection type. Valid options are *udp*, *ethernet*, and *tcp*. ''' if not isinstance(capture_handlers, list): capture_handlers = [capture_handlers] self.capture_handlers = capture_handlers for h in self.capture_handlers: h['reads'] = 0 h['data_read'] = 0 self.conn_type = conn_type self.address = address if conn_type == 'udp': self.socket = gevent.socket.socket(gevent.socket.AF_INET, gevent.socket.SOCK_DGRAM) self.socket.bind((address[0], address[1])) # TODO: Make this configurable self._buffer_size = 65565 elif conn_type == 'ethernet': socket_family = getattr(gevent.socket, 'AF_PACKET', gevent.socket.AF_INET) if RAW_SOCKET_FD: self.socket = gevent.socket.fromfd(RAW_SOCKET_FD, socket_family, gevent.socket.SOCK_RAW, socket.htons(ETH_PROTOCOL)) else: self.socket = gevent.socket.socket(socket_family, gevent.socket.SOCK_RAW, socket.htons(ETH_PROTOCOL)) self.socket.bind((address[0], address[1])) self._buffer_size = 1518 elif conn_type == 'tcp': self.socket = gevent.socket.socket(gevent.socket.AF_INET, gevent.socket.SOCK_STREAM) self.socket.connect((address[0], address[1])) # TODO: Make this configurable self._buffer_size = 65565 self._init_log_file_handlers() @property def handler_count(self): ''' Return the number of active capture handlers. ''' return len(self.capture_handlers) def capture_packet(self): ''' Write packet data to the logger's log file. ''' data = self.socket.recv(self._buffer_size) for h in self.capture_handlers: h['reads'] += 1 h['data_read'] += len(data) d = data if 'pre_write_transforms' in h: for data_transform in h['pre_write_transforms']: d = data_transform(d) h['logger'].write(d) def clean_up(self): ''' Clean up the socket and log file handles. ''' self.socket.close() for h in self.capture_handlers: h['logger'].close() def socket_monitor_loop(self): ''' Monitor the socket and log captured data. ''' try: while True: gevent.socket.wait_read(self.socket.fileno()) self._handle_log_rotations() self.capture_packet() finally: self.clean_up() def add_handler(self, handler): ''' Add an additional handler Args: handler: A dictionary of handler configuration for the handler that should be added. See :func:`__init__` for details on valid parameters. ''' handler['logger'] = self._get_logger(handler) handler['reads'] = 0 handler['data_read'] = 0 self.capture_handlers.append(handler) def remove_handler(self, name): ''' Remove a handler given a name Note, if multiple handlers have the same name the last matching instance in the handler list will be removed. Args: name: The name of the handler to remove ''' index = None for i, h in enumerate(self.capture_handlers): if h['name'] == name: index = i if index is not None: self.capture_handlers[index]['logger'].close() del self.capture_handlers[index] def dump_handler_config_data(self): ''' Return capture handler configuration data. Return a dictionary of capture handler configuration data of the form: .. code-block:: none [{ 'handler': <handler configuration dictionary>, 'log_file_path': <Path to the current log file that the logger is writing. Note that if rotation is used it\'s possible this data will be stale eventually.>, 'conn_type': <The string defining the connection type of the logger.>, 'address': <The list containing address info that the logger is using for its connection.> }, ...] ''' ignored_keys = ['logger', 'log_rot_time', 'reads', 'data_read'] config_data = [] for h in self.capture_handlers: config_data.append({ 'handler': { k:v for k, v in h.iteritems() if k not in ignored_keys }, 'log_file_path': h['logger']._stream.name, 'conn_type': self.conn_type, 'address': self.address, }) return config_data def dump_all_handler_stats(self): ''' Return handler capture statistics Return a dictionary of capture handler statistics of the form: .. code-block:: none [{ 'name': The handler's name, 'reads': The number of packet reads this handler has received 'data_read_length': The total length of the data received 'approx_data_rate': The approximate data rate for this handler }, ...] ''' stats = [] for h in self.capture_handlers: now = calendar.timegm(time.gmtime()) rot_time = calendar.timegm(h['log_rot_time']) time_delta = now - rot_time approx_data_rate = '{} bytes/second'.format(h['data_read'] / float(time_delta)) stats.append({ 'name': h['name'], 'reads': h['reads'], 'data_read_length': '{} bytes'.format(h['data_read']), 'approx_data_rate': approx_data_rate }) return stats def _handle_log_rotations(self): ''' Rotate each handler's log file if necessary ''' for h in self.capture_handlers: if self._should_rotate_log(h): self._rotate_log(h) def _should_rotate_log(self, handler): ''' Determine if a log file rotation is necessary ''' if handler['rotate_log']: rotate_time_index = handler.get('rotate_log_index', 'day') try: rotate_time_index = self._decode_time_rotation_index(rotate_time_index) except ValueError: rotate_time_index = 2 rotate_time_delta = handler.get('rotate_log_delta', 1) cur_t = time.gmtime() first_different_index = 9 for i in range(9): if cur_t[i] != handler['log_rot_time'][i]: first_different_index = i break if first_different_index < rotate_time_index: # If the time deltas differ by a time step greater than what we # have set for the rotation (I.e., months instead of days) we will # automatically rotate. return True else: time_delta = cur_t[rotate_time_index] - handler['log_rot_time'][rotate_time_index] return time_delta >= rotate_time_delta return False def _decode_time_rotation_index(self, time_rot_index): ''' Return the time struct index to use for log rotation checks ''' time_index_decode_table = { 'year': 0, 'years': 0, 'tm_year': 0, 'month': 1, 'months': 1, 'tm_mon': 1, 'day': 2, 'days': 2, 'tm_mday': 2, 'hour': 3, 'hours': 3, 'tm_hour': 3, 'minute': 4, 'minutes': 4, 'tm_min': 4, 'second': 5, 'seconds': 5, 'tm_sec': 5, } if time_rot_index not in time_index_decode_table.keys(): raise ValueError('Invalid time option specified for log rotation') return time_index_decode_table[time_rot_index] def _rotate_log(self, handler): ''' Rotate a handlers log file ''' handler['logger'].close() handler['logger'] = self._get_logger(handler) def _get_log_file(self, handler): ''' Generate log file path for a given handler Args: handler: The handler configuration dictionary for which a log file path should be generated. ''' if 'file_name_pattern' not in handler: filename = '%Y-%m-%d-%H-%M-%S-{name}.pcap' else: filename = handler['file_name_pattern'] log_file = handler['log_dir'] if 'path' in handler: log_file = os.path.join(log_file, handler['path'], filename) else: log_file = os.path.join(log_file, filename) log_file = time.strftime(log_file, time.gmtime()) log_file = log_file.format(**handler) return log_file def _get_logger(self, handler): ''' Initialize a PCAP stream for logging data ''' log_file = self._get_log_file(handler) if not os.path.isdir(os.path.dirname(log_file)): os.makedirs(os.path.dirname(log_file)) handler['log_rot_time'] = time.gmtime()
<gh_stars>0 # Copyright (c) 2016-2018, University of Idaho # All rights reserved. # # <NAME> (<EMAIL>) # # The project described was supported by NSF award number IIA-1301792 # from the NSF Idaho EPSCoR Program and by the National Science Foundation. # standard library import os from os.path import join as _join from os.path import exists as _exists from os.path import split as _split from datetime import datetime import shutil from enum import IntEnum from copy import deepcopy from collections import Counter # non-standard import jsonpickle # wepppy from wepppy.soils.ssurgo import SurgoMap, StatsgoSpatial, SurgoSoilCollection, NoValidSoilsException, SoilSummary from wepppy.watershed_abstraction.support import is_channel from wepppy.all_your_base import isfloat from wepppy.all_your_base.geo.webclients import wmesque_retrieve from wepppy.wepp.soils.soilsdb import load_db, get_soil # wepppy submodules from .base import ( NoDbBase, TriggerEvents ) from .ron import Ron from .watershed import Watershed, WatershedNotAbstractedError class SoilsNoDbLockedException(Exception): pass class SoilsMode(IntEnum): Undefined = -1 Gridded = 0 Single = 1 SingleDb = 2 RRED_Unburned = 3 RRED_Burned = 4 # noinspection PyPep8Naming class Soils(NoDbBase): """ Manager that keeps track of project details and coordinates access of NoDb instances. """ __name__ = 'Soils' def __init__(self, wd, cfg_fn): super(Soils, self).__init__(wd, cfg_fn) self.lock() # noinspection PyBroadException try: self._mode = SoilsMode.Gridded self._single_selection = 0 self._single_dbselection = None self._ssurgo_db = self.config_get_path('soils', 'ssurgo_db') self.domsoil_d = None # topaz_id keys self.ssurgo_domsoil_d = None self.soils = None self.clay_pct = None self.liquid_limit = None self._subs_summary = None self._chns_summary = None self._initial_sat = 0.75 soils_dir = self.soils_dir if not _exists(soils_dir): os.mkdir(soils_dir) _soils_map = self.config_get_path('soils', 'soils_map') if _soils_map is not None: _soil_fn = _join(self.soils_dir, _split(_soils_map)[-1]) shutil.copyfile(_soils_map, _soil_fn) if _exists(_soils_map[:-4] + '.prj'): shutil.copyfile(_soils_map[:-4] + '.prj', _soil_fn[:-4] + '.prj') _soils_map = _split(_soils_map)[-1] self._soils_map = _soils_map self.dump_and_unlock() except Exception: self.unlock('-f') raise # # Required for NoDbBase Subclass # # noinspection PyPep8Naming @staticmethod def getInstance(wd): with open(_join(wd, 'soils.nodb')) as fp: db = jsonpickle.decode(fp.read().replace('"simple_texture"', '"_simple_texture"') .replace('"texture"', '"_texture"')) assert isinstance(db, Soils) if _exists(_join(wd, 'READONLY')): db.wd = os.path.abspath(wd) return db if os.path.abspath(wd) != os.path.abspath(db.wd): db.wd = wd db.lock() db.dump_and_unlock() return db @property def _nodb(self): return _join(self.wd, 'soils.nodb') @property def _lock(self): return _join(self.wd, 'soils.nodb.lock') @property def initial_sat(self): return getattr(self, '_initial_sat', 0.75) @initial_sat.setter def initial_sat(self, value): self.lock() # noinspection PyBroadException try: self._initial_sat = value self.dump_and_unlock() except Exception: self.unlock('-f') raise @property def mode(self): return self._mode @mode.setter def mode(self, value): self.lock() # noinspection PyBroadException try: if isinstance(value, SoilsMode): self._mode = value elif isinstance(value, int): self._mode = SoilsMode(value) else: raise ValueError('most be SoilsMode or int') self.dump_and_unlock() except Exception: self.unlock('-f') raise @property def soils_map(self): return getattr(self, '_soils_map', None) @property def single_selection(self): return self._single_selection @single_selection.setter def single_selection(self, mukey): self.lock() # noinspection PyBroadException try: self._single_selection = mukey self.dump_and_unlock() except Exception: self.unlock('-f') raise @property def single_dbselection(self): return getattr(self, '_single_dbselection', None) @single_dbselection.setter def single_dbselection(self, sol): self.lock() # noinspection PyBroadException try: self._single_dbselection = sol self.dump_and_unlock() except Exception: self.unlock('-f') raise @property def has_soils(self): mode = self.mode assert isinstance(mode, SoilsMode) if mode == SoilsMode.Undefined: return False else: return self.domsoil_d is not None @property def legend(self): mukeys = sorted(set(self.domsoil_d.values())) soils = [self.soils[mukey] for mukey in mukeys] descs = [soil.desc for soil in soils] colors = [soil.color for soil in soils] return list(zip(mukeys, descs, colors)) # # build # def clean(self): soils_dir = self.soils_dir if _exists(soils_dir): shutil.rmtree(soils_dir) os.mkdir(soils_dir) @property def ssurgo_db(self): return getattr(self, '_ssurgo_db', self.config_get_str('soils', 'ssurgo_db')) @ssurgo_db.setter def ssurgo_db(self, value): self.lock() # noinspection PyBroadException try: self._ssurgo_db = value self.dump_and_unlock() except Exception: self.unlock('-f') raise def build_statsgo(self, initial_sat=None): wd = self.wd watershed = Watershed.getInstance(wd) if not watershed.is_abstracted: raise WatershedNotAbstractedError() soils_dir = self.soils_dir self.lock() # noinspection PyBroadException try: if initial_sat is not None: self._initial_sat = initial_sat statsgoSpatial = StatsgoSpatial() watershed = Watershed.getInstance(wd) domsoil_d = {} for topaz_id, sub in watershed.sub_iter(): lng, lat = sub.centroid.lnglat mukey = statsgoSpatial.identify_mukey_point(lng, lat) domsoil_d[str(topaz_id)] = str(mukey) for topaz_id, chn in watershed.chn_iter(): lng, lat = chn.centroid.lnglat mukey = statsgoSpatial.identify_mukey_point(lng, lat) domsoil_d[str(topaz_id)] = str(mukey) mukeys = set(domsoil_d.values()) surgo_c = SurgoSoilCollection(mukeys, use_statsgo=True) surgo_c.makeWeppSoils(initial_sat=self.initial_sat) soils = surgo_c.writeWeppSoils(wd=soils_dir, write_logs=True) soils = {str(k): v for k, v in soils.items()} surgo_c.logInvalidSoils(wd=soils_dir) sand_d = self._sand_d(surgo_c) clay_d = self._clay_d(surgo_c) ll_d = self._ll_d(surgo_c) # all the mukeys might not be valid. Need to identify the most common so we can use this instead valid_k_counts = Counter() for topaz_id, k in domsoil_d.items(): if k in soils: valid_k_counts[k] += 1 # now assign hillslopes with invalid mukeys the most common valid mukey most_common_k = valid_k_counts.most_common()[0][0] for topaz_id, k in domsoil_d.items(): if k not in soils: domsoil_d[topaz_id] = most_common_k # while we are at it we will calculate the pct coverage # for the landcover types in the watershed for topaz_id, k in domsoil_d.items(): soils[k].area += watershed.area_of(topaz_id) for k in soils: coverage = 100.0 * soils[k].area / watershed.wsarea soils[k].pct_coverage = coverage clay = clay_d[k] sand = sand_d[k] soils[k].sand = sand soils[k].clay = clay soils[k].ll = ll_d[k] # store the soils dict self.domsoil_d = domsoil_d self.ssurgo_domsoil_d = deepcopy(domsoil_d) self.soils = soils self.clay_pct = self._calc_clay_pct(clay_d) self.dump_and_unlock() self.trigger(TriggerEvents.SOILS_BUILD_COMPLETE) # noinspection PyMethodFirstArgAssignment self = self.getInstance(wd) # reload instance from .nodb except Exception: self.unlock('-f') raise def _build_by_identify(self, build_func): soils_dir = self.soils_dir wd = self.wd self.lock() # noinspection PyBroadException try: watershed = Watershed.getInstance(wd) orders = [] for topaz_id, sub in watershed.sub_iter(): orders.append([topaz_id, sub.centroid.lnglat]) for topaz_id, chn in watershed.chn_iter(): orders.append([topaz_id, chn.centroid.lnglat]) soils, domsoil_d, clay_d, sand_d = build_func(orders, soils_dir) for topaz_id, k in domsoil_d.items(): soils[k].area += watershed.area_of(topaz_id) for k in soils: coverage = 100.0 * soils[k].area / watershed.wsarea soils[k].pct_coverage = coverage clay = clay_d[k] sand = sand_d[k] soils[k].sand = sand soils[k].clay = clay # soils[k].ll = ll_d[k] # store the soils dict self.domsoil_d = domsoil_d self.ssurgo_domsoil_d = deepcopy(domsoil_d) self.soils = soils self.dump_and_unlock() self.trigger(TriggerEvents.SOILS_BUILD_COMPLETE) # noinspection PyMethodFirstArgAssignment self = self.getInstance(self.wd) # reload instance from .nodb except Exception: self.unlock('-f') raise def build(self, initial_sat=None): wd = self.wd watershed = Watershed.getInstance(wd) if not watershed.is_abstracted: raise WatershedNotAbstractedError() ron = Ron.getInstance(wd) if self.config_stem.startswith('ak'): self._build_ak() elif self.mode == SoilsMode.Gridded: if 'eu' in ron.locales: from wepppy.eu.soils import build_esdac_soils self._build_by_identify(build_esdac_soils) elif 'au' in ron.locales: from wepppy.au.soils import build_asris_soils self._build_by_identify(build_asris_soils) else: self._build_gridded(initial_sat=initial_sat) elif self.mode == SoilsMode.Single: self._build_single(initial_sat=initial_sat) elif self.mode == SoilsMode.SingleDb: self._build_singledb() elif self._mode in [SoilsMode.RRED_Burned, SoilsMode.RRED_Unburned]: import wepppy rred = wepppy.nodb.mods.Rred.getInstance(self.wd) rred.build_soils(self._mode) return def _clay_d(self, surgo_c): fp = open(_join(self.soils_dir, 'clay_rpt.log'), 'w') fp.write('determining clay content for run {}\n'.format(self.wd)) fp.write(str(datetime.now()) + '\n\n') clay_d = {} for mukey, soil in surgo_c.weppSoils.items(): horizon0 = soil.getFirstHorizon() if horizon0 is None: clay_d[str(mukey)] = 7.0 cokey = None else: clay_d[str(mukey)] = float(horizon0.claytotal_r) cokey = horizon0.cokey fp.write('mukey={}, cokey={}, clay={}\n'.format(mukey, cokey, clay_d[str(mukey)])) return clay_d def _sand_d(self, surgo_c): fp = open(_join(self.soils_dir, 'sand_rpt.log'), 'w') fp.write('determining sand content for run {}\n'.format(self.wd)) fp.write(str(datetime.now()) + '\n\n') sand_d = {} for mukey, soil in surgo_c.weppSoils.items(): horizon0 = soil.getFirstHorizon() if horizon0 is None: sand_d[str(mukey)] = 66.8 cokey = None else: sand_d[str(mukey)] = float(horizon0.sandtotal_r) cokey = horizon0.cokey fp.write('mukey={}, cokey={}, clay={}\n'.format(mukey, cokey, sand_d[str(mukey)])) return sand_d def _ll_d(self, surgo_c): fp = open(_join(self.soils_dir, 'll_rpt.log'), 'w') fp.write('determining clay content for run {}\n'.format(self.wd)) fp.write(str(datetime.now()) + '\n\n') ll_d = {} for mukey, soil in surgo_c.weppSoils.items(): horizon0 = soil.getFirstHorizon() if horizon0 is None: ll_d[str(mukey)] = 13.2499999 cokey = None elif isfloat(horizon0.ll_r): ll_d[str(mukey)] = float(horizon0.ll_r) cokey = horizon0.cokey else: ll_d[str(mukey)] = 13.2499999 cokey = None fp.write('mukey={}, cokey={}, ll={}\n'.format(mukey, cokey, ll_d[str(mukey)])) return ll_d def _calc_clay_pct(self, clay_d): domsoil_d = self.ssurgo_domsoil_d assert domsoil_d is not None totalarea = 0.0 wsum = 0.0 watershed = Watershed.getInstance(self.wd) for topaz_id, ss in watershed.sub_iter(): mukey = domsoil_d[str(topaz_id)] clay = clay_d[str(mukey)] area = ss.area wsum += area * clay totalarea += area clay_pct = wsum / totalarea return clay_pct def _calc_liquid_limit(self, ll_d): domsoil_d = self.domsoil_d assert domsoil_d is not None totalarea = 0.0 wsum = 0.0 watershed = Watershed.getInstance(self.wd) for topaz_id, ss in watershed.sub_iter(): mukey = domsoil_d[str(topaz_id)] ll = ll_d[str(mukey)] area = ss.area wsum += area * ll totalarea += area ll_pct = wsum / totalarea return ll_pct def _build_ak(self): wd = self.wd self.lock() # noinspection PyBroadException try: watershed = Watershed.getInstance(wd) mukey = -9999 domsoil_d = {} soils = {str(mukey): SoilSummary( mukey=mukey, fname=None, soils_dir=None, build_date=str(datetime.now()), desc=None, pct_coverage=100.0 )} for topaz_id, sub in watershed.sub_iter(): domsoil_d[str(topaz_id)] = str(mukey) for topaz_id, chn in watershed.chn_iter(): domsoil_d[str(topaz_id)] = str(mukey) soils[str(mukey)].pct_coverage = 100.0 # store the soils dict self.domsoil_d = domsoil_d self.ssurgo_domsoil_d = deepcopy(domsoil_d) self.soils = soils self.dump_and_unlock() self.trigger(TriggerEvents.SOILS_BUILD_COMPLETE) # noinspection PyMethodFirstArgAssignment self = self.getInstance(self.wd) # reload instance from .nodb except Exception: self.unlock('-f') raise
L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(getCountries_result) getCountries_result.thrift_spec = ( (0, TType.SET, 'success', (TType.STRING, 'UTF8', False), None, ), # 0 (1, TType.STRUCT, 'e', [TalkException, None], None, ), # 1 ) class registerUserid_args(object): """ Attributes: - reqSeq - searchId """ def __init__(self, reqSeq=None, searchId=None,): self.reqSeq = reqSeq self.searchId = searchId def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.reqSeq = iprot.readI32() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.searchId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('registerUserid_args') if self.reqSeq is not None: oprot.writeFieldBegin('reqSeq', TType.I32, 1) oprot.writeI32(self.reqSeq) oprot.writeFieldEnd() if self.searchId is not None: oprot.writeFieldBegin('searchId', TType.STRING, 2) oprot.writeString(self.searchId.encode('utf-8') if sys.version_info[0] == 2 else self.searchId) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(registerUserid_args) registerUserid_args.thrift_spec = ( None, # 0 (1, TType.I32, 'reqSeq', None, None, ), # 1 (2, TType.STRING, 'searchId', 'UTF8', None, ), # 2 ) class registerUserid_result(object): """ Attributes: - success - e """ def __init__(self, success=None, e=None,): self.success = success self.e = e def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.e = TalkException() self.e.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('registerUserid_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(registerUserid_result) registerUserid_result.thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 (1, TType.STRUCT, 'e', [TalkException, None], None, ), # 1 ) class isUseridAvailable_args(object): """ Attributes: - searchId """ def __init__(self, searchId=None,): self.searchId = searchId def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 2: if ftype == TType.STRING: self.searchId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('isUseridAvailable_args') if self.searchId is not None: oprot.writeFieldBegin('searchId', TType.STRING, 2) oprot.writeString(self.searchId.encode('utf-8') if sys.version_info[0] == 2 else self.searchId) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(isUseridAvailable_args) isUseridAvailable_args.thrift_spec = ( None, # 0 None, # 1 (2, TType.STRING, 'searchId', 'UTF8', None, ), # 2 ) class isUseridAvailable_result(object): """ Attributes: - success - e """ def __init__(self, success=None, e=None,): self.success = success self.e = e def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.e = TalkException() self.e.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('isUseridAvailable_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(isUseridAvailable_result) isUseridAvailable_result.thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 (1, TType.STRUCT, 'e', [TalkException, None], None, ), # 1 ) class getProfile_args(object): """ Attributes: - syncReason """ def __init__(self, syncReason=None,): self.syncReason = syncReason def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.syncReason = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('getProfile_args') if self.syncReason is not None: oprot.writeFieldBegin('syncReason', TType.I32, 1) oprot.writeI32(self.syncReason) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(getProfile_args) getProfile_args.thrift_spec = ( None, # 0 (1, TType.I32, 'syncReason', None, None, ), # 1 ) class getProfile_result(object): """ Attributes: - success - e """ def __init__(self, success=None, e=None,): self.success = success self.e = e def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = Profile() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.e = TalkException() self.e.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('getProfile_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(getProfile_result) getProfile_result.thrift_spec = ( (0, TType.STRUCT, 'success', [Profile, None], None, ), # 0 (1, TType.STRUCT, 'e', [TalkException, None], None, ), # 1 ) class startUpdateVerification_args(object): """ Attributes: - region - carrier - phone - udidHash - deviceInfo - networkCode - locale - simInfo """ def __init__(self, region=None, carrier=None, phone=None, udidHash=None, deviceInfo=None, networkCode=None, locale=None, simInfo=None,): self.region = region self.carrier = carrier self.phone = phone self.udidHash = udidHash self.deviceInfo = deviceInfo self.networkCode = networkCode self.locale = locale self.simInfo = simInfo def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 2: if ftype == TType.STRING: self.region = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.carrier = iprot.readI32() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.phone = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.udidHash = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRUCT: self.deviceInfo = DeviceInfo() self.deviceInfo.read(iprot) else: iprot.skip(ftype) elif fid
columns from the DB db_cols = get_table_cols(cls._table, conn_handler) # Remove the sample_id and study_id columns db_cols.remove('sample_id') db_cols.remove(cls._id_column) # Insert values on required columns values = _as_python_types(md_template, db_cols) values.insert(0, sample_ids) values.insert(0, [study.id] * num_samples) values = [v for v in zip(*values)] conn_handler.add_to_queue( queue_name, "INSERT INTO qiita.{0} ({1}, sample_id, {2}) " "VALUES (%s, %s, {3})".format(cls._table, cls._id_column, ', '.join(db_cols), ', '.join(['%s'] * len(db_cols))), values, many=True) # Insert rows on *_columns table headers = list(set(headers).difference(db_cols)) datatypes = _get_datatypes(md_template.ix[:, headers]) # psycopg2 requires a list of tuples, in which each tuple is a set # of values to use in the string formatting of the query. We have all # the values in different lists (but in the same order) so use zip # to create the list of tuples that psycopg2 requires. values = [ v for v in zip([study.id] * len(headers), headers, datatypes)] conn_handler.add_to_queue( queue_name, "INSERT INTO qiita.{0} ({1}, column_name, column_type) " "VALUES (%s, %s, %s)".format(cls._column_table, cls._id_column), values, many=True) # Create table with custom columns table_name = cls._table_name(study.id) column_datatype = ["%s %s" % (col, dtype) for col, dtype in zip(headers, datatypes)] conn_handler.add_to_queue( queue_name, "CREATE TABLE qiita.{0} (sample_id varchar NOT NULL, {1})".format( table_name, ', '.join(column_datatype))) # Insert values on custom table values = _as_python_types(md_template, headers) values.insert(0, sample_ids) values = [v for v in zip(*values)] conn_handler.add_to_queue( queue_name, "INSERT INTO qiita.{0} (sample_id, {1}) " "VALUES (%s, {2})".format(table_name, ", ".join(headers), ', '.join(["%s"] * len(headers))), values, many=True) conn_handler.execute_queue(queue_name) # figuring out the filepath of the backup _id, fp = get_mountpoint('templates')[0] fp = join(fp, '%d_%s.txt' % (study.id, strftime("%Y%m%d-%H%M%S"))) # storing the backup st = cls(study.id) st.to_file(fp) # adding the fp to the object st.add_filepath(fp) return st @property def study_id(self): """Gets the study id with which this sample template is associated Returns ------- int The ID of the study with which this sample template is associated """ return self._id def extend(self, md_template): """Adds the given sample template to the current one Parameters ---------- md_template : DataFrame The metadata template file contents indexed by samples Ids """ conn_handler = SQLConnectionHandler() queue_name = "EXTEND_SAMPLE_TEMPLATE_%d" % self.id conn_handler.create_queue(queue_name) md_template = self._clean_validate_template(md_template, self.study_id, conn_handler) # Raise warning and filter out existing samples sample_ids = md_template.index.tolist() sql = ("SELECT sample_id FROM qiita.required_sample_info WHERE " "study_id = %d" % self.id) curr_samples = set(s[0] for s in conn_handler.execute_fetchall(sql)) existing_samples = curr_samples.intersection(sample_ids) if existing_samples: warnings.warn( "The following samples already exist and will be ignored: " "%s" % ", ".join(curr_samples.intersection( sorted(existing_samples))), QiitaDBWarning) md_template.drop(existing_samples, inplace=True) # Get some useful information from the metadata template sample_ids = md_template.index.tolist() num_samples = len(sample_ids) headers = list(md_template.keys()) # Get the required columns from the DB db_cols = get_table_cols(self._table, conn_handler) # Remove the sample_id and study_id columns db_cols.remove('sample_id') db_cols.remove(self._id_column) # Insert values on required columns values = _as_python_types(md_template, db_cols) values.insert(0, sample_ids) values.insert(0, [self.study_id] * num_samples) values = [v for v in zip(*values)] conn_handler.add_to_queue( queue_name, "INSERT INTO qiita.{0} ({1}, sample_id, {2}) " "VALUES (%s, %s, {3})".format(self._table, self._id_column, ', '.join(db_cols), ', '.join(['%s'] * len(db_cols))), values, many=True) # Add missing columns to the sample template dynamic table headers = list(set(headers).difference(db_cols)) datatypes = _get_datatypes(md_template.ix[:, headers]) table_name = self._table_name(self.study_id) new_cols = set(md_template.columns).difference( set(self.metadata_headers())) dtypes_dict = dict(zip(md_template.ix[:, headers], datatypes)) for category in new_cols: # Insert row on *_columns table conn_handler.add_to_queue( queue_name, "INSERT INTO qiita.{0} ({1}, column_name, column_type) " "VALUES (%s, %s, %s)".format(self._column_table, self._id_column), (self.study_id, category, dtypes_dict[category])) # Insert row on dynamic table conn_handler.add_to_queue( queue_name, "ALTER TABLE qiita.{0} ADD COLUMN {1} {2}".format( table_name, scrub_data(category), dtypes_dict[category])) # Insert values on custom table values = _as_python_types(md_template, headers) values.insert(0, sample_ids) values = [v for v in zip(*values)] conn_handler.add_to_queue( queue_name, "INSERT INTO qiita.{0} (sample_id, {1}) " "VALUES (%s, {2})".format(table_name, ", ".join(headers), ', '.join(["%s"] * len(headers))), values, many=True) conn_handler.execute_queue(queue_name) # figuring out the filepath of the backup _id, fp = get_mountpoint('templates')[0] fp = join(fp, '%d_%s.txt' % (self.id, strftime("%Y%m%d-%H%M%S"))) # storing the backup self.to_file(fp) # adding the fp to the object self.add_filepath(fp) def update(self, md_template): r"""Update values in the sample template Parameters ---------- md_template : DataFrame The metadata template file contents indexed by samples Ids Raises ------ QiitaDBError If md_template and db do not have the same sample ids If md_template and db do not have the same column headers """ conn_handler = SQLConnectionHandler() # Clean and validate the metadata template given new_map = self._clean_validate_template(md_template, self.id, conn_handler) # Retrieving current metadata current_map = self._transform_to_dict(conn_handler.execute_fetchall( "SELECT * FROM qiita.{0} WHERE {1}=%s".format(self._table, self._id_column), (self.id,))) dyn_vals = self._transform_to_dict(conn_handler.execute_fetchall( "SELECT * FROM qiita.{0}".format(self._table_name(self.id)))) for k in current_map: current_map[k].update(dyn_vals[k]) current_map[k].pop('study_id', None) # converting sql results to dataframe current_map = pd.DataFrame.from_dict(current_map, orient='index') # simple validations of sample ids and column names samples_diff = set( new_map.index.tolist()) - set(current_map.index.tolist()) if samples_diff: raise QiitaDBError('The new sample template differs from what is ' 'stored in database by these samples names: %s' % ', '.join(samples_diff)) columns_diff = set(new_map.columns) - set(current_map.columns) if columns_diff: raise QiitaDBError('The new sample template differs from what is ' 'stored in database by these columns names: %s' % ', '.join(columns_diff)) # here we are comparing two dataframes following: # http://stackoverflow.com/a/17095620/4228285 current_map.sort(axis=0, inplace=True) current_map.sort(axis=1, inplace=True) new_map.sort(axis=0, inplace=True) new_map.sort(axis=1, inplace=True) map_diff = (current_map != new_map).stack() map_diff = map_diff[map_diff] map_diff.index.names = ['id', 'column'] changed_cols = map_diff.index.get_level_values('column').unique() for col in changed_cols: self.update_category(col, new_map[col].to_dict()) # figuring out the filepath of the backup _id, fp = get_mountpoint('templates')[0] fp = join(fp, '%d_%s.txt' % (self.id, strftime("%Y%m%d-%H%M%S"))) # storing the backup self.to_file(fp) # adding the fp to the object self.add_filepath(fp) # generating all new QIIME mapping files for rd_id in Study(self.id).raw_data(): for pt_id in RawData(rd_id).prep_templates: pt = PrepTemplate(pt_id) for _, fp in pt.get_filepaths(): # the difference between a prep and a qiime template is the # word qiime within the name of the file if '_qiime_' not in basename(fp): pt.create_qiime_mapping_file(fp) def remove_category(self, category): """Remove a category from the sample template Parameters ---------- category : str The category to remove Raises ------ QiitaDBColumnError If the column does not exist in the table """ table_name = self._table_name(self.study_id) conn_handler = SQLConnectionHandler() if category not in self.categories(): raise QiitaDBColumnError("Column %s does not exist in %s" % (category, table_name)) # This operation may invalidate another user's perspective on the # table conn_handler.execute(""" ALTER TABLE qiita.{0} DROP COLUMN {1}""".format(table_name, category)) def update_category(self, category, samples_and_values): """Update an existing column Parameters ---------- category : str The category to update samples_and_values : dict A mapping of {sample_id: value} Raises ------ QiitaDBUnknownIDError If a sample_id is included in values that is not in the template QiitaDBColumnError If the column does not exist in the table. This is implicit, and can be thrown by the contained Samples. """ if not set(self.keys()).issuperset(samples_and_values): missing = set(self.keys()) - set(samples_and_values) table_name = self._table_name(self.study_id) raise QiitaDBUnknownIDError(missing, table_name) for k, v in viewitems(samples_and_values): sample = self[k] sample[category] = v def add_category(self, category, samples_and_values, dtype, default): """Add a metadata category Parameters ---------- category : str The category to add samples_and_values : dict A mapping of {sample_id: value} dtype : str The datatype of the column default : object The default value associated with the column. This must be specified as these columns are added "not null". Raises ------ QiitaDBDuplicateError If the column already exists """ table_name = self._table_name(self.study_id) conn_handler = SQLConnectionHandler() if category in self.categories(): raise QiitaDBDuplicateError(category, "N/A") conn_handler.execute(""" ALTER TABLE qiita.{0} ADD COLUMN {1} {2} NOT NULL DEFAULT '{3}'""".format(table_name, category, dtype, default)) self.update_category(category, samples_and_values) class PrepTemplate(MetadataTemplate): r"""Represent the PrepTemplate of a raw data. Provides access to the tables in the DB that holds the sample preparation information. See Also -------- MetadataTemplate SampleTemplate """ _table = "common_prep_info" _table_prefix = "prep_" _column_table = "prep_columns" _id_column = "prep_template_id" translate_cols_dict = {'emp_status_id': 'emp_status'} id_cols_handlers = {'emp_status_id': get_emp_status()} str_cols_handlers = {'emp_status_id': get_emp_status(key='emp_status_id')} _sample_cls = PrepSample @classmethod def create(cls, md_template, raw_data, study, data_type, investigation_type=None): r"""Creates the metadata template in the database Parameters ---------- md_template : DataFrame The metadata template file contents indexed by samples Ids raw_data : RawData The raw_data to which the prep template belongs to. study : Study The study to which the prep template belongs to.
<reponame>sumedhpb/testrunner from lib.mc_bin_client import MemcachedClient, MemcachedError from lib.memcacheConstants import * from .subdoc_base import SubdocBaseTest import copy, json import sys import random class SubdocErrorHandling(SubdocBaseTest): def setUp(self): super(SubdocErrorHandling, self).setUp() self.nesting_level = self.input.param("nesting_level", 0) self.client = self.direct_client(self.master, self.buckets[0]) def tearDown(self): super(SubdocErrorHandling, self).tearDown() def test_error_get_simple_data(self): result = {} simple_data = { "field":"simple", "array":[{"field":"exists"}, 1, 2] } # Add Simple Data jsonDump = json.dumps(simple_data) self.client.set("simple_data", 0, 0, jsonDump) #self.client.get_sd("simple_data","crap") self.log.info("simple_data :: path does not exist") self.error_gets("simple_data", "does_not_exist", error = "Memcached error #192 'Path not exists'", field = "simple_data : path does not exist - dictionary", result = result) self.log.info("simple_data :: malformed path") self.error_gets("simple_data", "{][]}", error = "Memcached error #194 'Invalid path'", field = "simple_data : malformed path", result = result) self.log.info("simple_data :: path does not exist - array, out of bounds index") self.error_gets("simple_data", "array[200]", error = "Memcached error #192 'Path not exists'", field = "simple_data : path does not exist - array, out of bounds index", result = result) self.log.info("simple_data :: document does not exist") self.error_gets("does_not_exist", "does_not_exist", error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result) self.assertTrue(len(result) == 0, result) def test_error_get_nested_data(self): result = {} simple_data = { "field":"simple", "array":[{"field":"exists"}, 1, 2] } nested_simple = { "field":"simple", "array":[{"field":"exists"}, 1, 2] } # Add Simple Data jsonDump = json.dumps(simple_data) self.client.set("simple_data", 0, 0, jsonDump) # Add Normal Nested Data base_json = self.generate_json_for_nesting() nested_json = self.generate_nested(base_json, nested_simple, 40) jsonDump = json.dumps(nested_json) self.client.set("nested_data", 0, 0, jsonDump) # Add Abnormal Nested Data base_json = self.generate_json_for_nesting() nested_json = self.generate_nested(base_json, nested_simple, 20) jsonDump = json.dumps(nested_json) self.client.set("normal_nested_data", 0, 0, jsonDump) # Tests for Nested Data self.log.info("nested_data :: path does not exist") new_path = self.generate_path(20, "does_not_exist") self.error_gets("normal_nested_data", new_path, error = "Memcached error #192 'Path not exists'", field = "nested_data : path does not exist - dictionary", result = result) self.log.info("nested_data ::path does not exist - array, out of bounds index") new_path = self.generate_path(20, "array[200]") self.error_gets("normal_nested_data", new_path, error = "Memcached error #192 'Path not exists'", field = "nested_data : path does not exist - array, out of bounds index", result = result) self.log.info("nested_data ::malformed path") new_path = self.generate_path(20, "{[]}") self.error_gets("normal_nested_data", new_path, error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result) # Tests for Nested Data with long path self.log.info("long_nested_data ::nested_data : path does not exist - too big path") new_path = self.generate_path(40, "field") self.error_gets("nested_data", new_path, error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result) self.assertTrue(len(result) == 0, result) def test_error_exists_nested_data(self): result = {} nested_simple = { "field":"simple", "array":[{"field":"exists"}, 1, 2] } # Add Normal Nested Data base_json = self.generate_json_for_nesting() nested_json = self.generate_nested(base_json, nested_simple, 40) jsonDump = json.dumps(nested_json) self.client.set("nested_data", 0, 0, jsonDump) # Add Abnormal Nested Data base_json = self.generate_json_for_nesting() nested_json = self.generate_nested(base_json, nested_simple, 20) jsonDump = json.dumps(nested_json) self.client.set("normal_nested_data", 0, 0, jsonDump) # Tests for Nested Data Set self.log.info("nested_data :: malformed path") new_path = self.generate_path(20, "{][]}") self.error_exists("normal_nested_data", new_path, error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result) self.log.info("nested_data :: path does not exist") new_path = self.generate_path(20, "does_not_exist") self.error_exists("normal_nested_data", new_path, error = "Memcached error #192 'Path not exists'", field = "nested_data : path does not exist malformed path", result = result) self.log.info("nested_data ::path does not exist - array, out of bounds index") new_path = self.generate_path(20, "array[200]") self.error_exists("normal_nested_data", new_path, error = "Memcached error #192 'Path not exists'", field = "nested_data : path does not exist - array, out of bounds index", result = result) # Tests for Nested Data with long path self.log.info("long_nested_data ::nested_data : path does not exist - too big path") new_path = self.generate_path(40, "field") self.error_exists("nested_data", new_path, error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result) self.assertTrue(len(result) == 0, result) def test_error_exists_simple_data(self): result = {} simple_data = { "field":"simple", "array":[{"field":"exists"}, 1, 2] } # Add Simple Data jsonDump = json.dumps(simple_data) self.client.set("simple_data", 0, 0, jsonDump) # Tests for Simple Data Set self.log.info("simple_data :: path does not exist") self.error_exists("simple_data", "does_not_exist", error = "Memcached error #192 'Path not exists'", field = "simple_data : path does not exist ", result = result) self.log.info("simple_data :: path does not exist - array, out of bounds index") self.error_exists("simple_data", "array[200]", error = "Memcached error #192 'Path not exists'", field = "simple_data : path does not exist - array, out of bounds index", result = result) self.log.info("simple_data :: document does not exist") self.error_exists("does_not_exist", "does_not_exist", error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result) self.log.info("simple_data :: malformed path") self.error_exists("simple_data", "[]{}]", error = "Memcached error #194 'Invalid path'", field = "simple_data : malformed path", result = result) self.assertTrue(len(result) == 0, result) def test_error_add_dict_simple_data(self): result = {} simple_data = { "field":"simple", "array":[{"field":"exists"}, 1, 2] } # Add Simple Data jsonDump = json.dumps(simple_data) self.client.set("simple_data", 0, 0, jsonDump) # Tests for Simple Data Set self.log.info("simple_data :: path exists") self.error_add_dict("simple_data", "field", value = "value_value", error = "Memcached error #197 'Cant insert'", field = "simple_data :: path exists", result = result) self.log.info("simple_data :: inserting into an array") self.error_add_dict("simple_data", "array[0]", value = "value_value", error = "Memcached error #197 'Cant insert'", field = "simple_data :: inserting into an array", result = result) self.log.info("simple_data :: empty path does not exist") self.error_add_dict("simple_data", "{][]}", value = "value_value", error = "Memcached error #194 'Invalid path'", field = "simple_data : malformed path", result = result) self.assertTrue(len(result) == 0, result) self.error_add_dict("simple_data", "", value = "value_value", error = "Memcached error #4 'Invalid'", field = "simple_data : empty path does not exist - dictionary", result = result) self.log.info("simple_data :: malformed path") def test_error_add_dict_nested_data(self): result = {} simple_data = { "field":"simple", "array":[{"field":"exists"}, 1, 2] } nested_simple = { "field":"simple", "array":[{"field":"exists"}, 1, 2] } # Add Simple Data jsonDump = json.dumps(simple_data) self.client.set("simple_data", 0, 0, jsonDump) # Add Normal Nested Data base_json = self.generate_json_for_nesting() nested_json = self.generate_nested(base_json, nested_simple, 40) jsonDump = json.dumps(nested_json) self.client.set("nested_data", 0, 0, jsonDump) # Add Abnormal Nested Data base_json = self.generate_json_for_nesting() nested_json = self.generate_nested(base_json, nested_simple, 20) jsonDump = json.dumps(nested_json) self.client.set("normal_nested_data", 0, 0, jsonDump) # Tests for Simple Data Set self.log.info("nested_data :: malformed json") new_path = self.generate_path(20, "field_1") self.error_add_dict("normal_nested_data", new_path, value = {"data"}, error = "Memcached error #197 'Cant insert'", field = "nested_data : malformed json", result = result) self.log.info("nested_data :: path exists") new_path = self.generate_path(20, "field") self.error_add_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : path exists", result = result) self.log.info("nested_data :: inserting into an array") new_path = self.generate_path(20, "array[0]") self.error_add_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : inserting into an array", result = result) self.log.info("nested_data :: empty path does not exist") new_path = self.generate_path(20, "") self.error_add_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : empty path does not exist - dictionary", result = result) self.log.info("nested_data :: malformed path") new_path = self.generate_path(20, "{}][") self.error_add_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result) # Tests for Nested Data with long path self.log.info("long_nested_data ::nested_data : path does not exist - too big path") new_path = self.generate_path(40, "field") self.error_add_dict("nested_data", new_path, value = "value_value", error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result) self.assertTrue(len(result) == 0, result) def test_error_upsert_dict_simple_data(self): result = {} simple_data = { "field":"simple", "array":[{"field":"exists"}, 1, 2] } nested_simple = { "field":"simple", "array":[{"field":"exists"}, 1, 2] } # Add Simple Data jsonDump = json.dumps(simple_data) self.client.set("simple_data", 0, 0, jsonDump) # Tests for Simple Data Set self.log.info("simple_data :: insertion into array") self.error_upsert_dict("simple_data", "array[0]", value = "value_value", error = "Memcached error #197 'Cant insert'", field = "simple_data : insertion into array", result = result) self.log.info("simple_data :: empty path does not exist") self.error_upsert_dict("simple_data", "", value = "value_value", error
inputdata: for key in nodes: if key not in inputdata: raise exc.InvalidArgumentException(key + ' not in request') datum = inputdata[key] if 'nextdevice' not in datum: raise exc.InvalidArgumentException( 'missing nextdevice argument') elif datum['nextdevice'] not in self.valid_values: raise exc.InvalidArgumentException( datum['nextdevice'] + ' is not one of ' + ','.join(self.valid_values)) self.bootdevbynode[key] = datum['nextdevice'] if 'bootmode' in datum: if datum['bootmode'] not in self.valid_bootmodes: raise exc.InvalidArgumentException( datum['bootmode'] + ' is not one of ' + ','.join(self.valid_bootmodes)) self.bootmodebynode[key] = datum['bootmode'] if 'persistent' in datum: self.bootmodebynode[key] = datum['persistent'] else: datum = inputdata if 'nextdevice' not in datum: raise exc.InvalidArgumentException( 'missing nextdevice argument') elif datum['nextdevice'] not in self.valid_values: raise exc.InvalidArgumentException( datum['nextdevice'] + ' is not one of ' + ','.join(self.valid_values)) for node in nodes: self.bootdevbynode[node] = datum['nextdevice'] if 'bootmode' in datum: self.bootmodebynode[node] = datum['bootmode'] if 'persistent' in datum: self.persistentbynode[node] = datum['persistent'] def bootdevice(self, node): return self.bootdevbynode[node] def bootmode(self, node): return self.bootmodebynode.get(node, 'unspecified') def persistent(self, node): return self.persistentbynode.get(node, False) class IdentifyState(ConfluentChoiceMessage): valid_values = set([ '', # allowed for output to indicate write-only support 'on', 'off', ]) keyname = 'identify' class ReseatResult(ConfluentChoiceMessage): valid_values = set([ 'success', ]) keyname = 'reseat' class PowerState(ConfluentChoiceMessage): valid_values = set([ 'on', 'off', 'reset', 'boot', 'shutdown', 'diag', ]) keyname = 'state' def __init__(self, node, state, oldstate=None): super(PowerState, self).__init__(node, state) self.myargs = (node, state, oldstate) if oldstate is not None: self.kvpairs[node]['oldstate'] = {'value': oldstate} class BMCReset(ConfluentChoiceMessage): valid_values = set([ 'reset', ]) keyname = 'state' class NTPEnabled(ConfluentChoiceMessage): valid_values = set([ 'True', 'False', ]) def __init__(self, node, enabled): self.stripped = False self.myargs = (node, enabled) self.kvpairs = { node: { 'state': {'value': str(enabled)}, } } class EventCollection(ConfluentMessage): """A collection of events This conveys a representation of an iterable of events. The following fields are supported: id (some data giving the class of event without the specific data of the event. For example, 'overtemp (1000 degrees celsius)' would have the same 'id' as 'overtemp (200 degrees celsius) component (specific name of the component this event references if any) component_type (A description of the sort of device component is) event (A text description of the event that occurred) severity (The text 'ok', 'warning', 'critical', 'failed', or 'unknown') timestamp (ISO 8601 compliant timestamp if available) """ readonly = True def __init__(self, events=(), name=None): eventdata = [] self.notnode = name is None self.myname = name self.myargs = (eventdata, name) for event in events: entry = { 'id': event.get('id', None), 'component': event.get('component', None), 'component_type': event.get('component_type', None), 'event': event.get('event', None), 'severity': event['severity'], 'timestamp': event.get('timestamp', None), 'message': event.get('message', None), 'record_id': event.get('record_id', None), 'log_id': event.get('log_id', None), } if event['severity'] not in valid_health_values: raise exc.NotImplementedException( 'Invalid severity - ' + repr(event['severity'])) eventdata.append(entry) if self.notnode: self.kvpairs = {'events': eventdata} else: self.kvpairs = {name: {'events': eventdata}} class AsyncCompletion(ConfluentMessage): def __init__(self): self.stripped = True self.notnode = True @classmethod def deserialize(cls): raise Exception("Not supported") def raw(self): return {'_requestdone': True} class AsyncMessage(ConfluentMessage): def __init__(self, pair): self.stripped = True self.notnode = True self.msgpair = pair @classmethod def deserialize(cls): raise Exception("Not supported") def raw(self): rsp = self.msgpair[1] rspdict = None if (isinstance(rsp, ConfluentMessage) or isinstance(rsp, ConfluentNodeError)): rspdict = rsp.raw() elif isinstance(rsp, exc.ConfluentException): rspdict = {'exceptioncode': rsp.apierrorcode, 'exception': rsp.get_error_body()} elif isinstance(rsp, Exception): rspdict = {'exceptioncode': 500, 'exception': str(rsp)} elif isinstance(rsp, dict): # console metadata rspdict = rsp else: # terminal text rspdict = {'data': rsp} return {'asyncresponse': {'requestid': self.msgpair[0], 'response': rspdict}} class AsyncSession(ConfluentMessage): def __init__(self, id): self.desc = 'foo' self.notnode = True self.stripped = True self.kvpairs = {'asyncid': id} class User(ConfluentMessage): def __init__(self, uid, username, privilege_level, name=None, expiration=None): self.desc = 'foo' self.stripped = False self.notnode = name is None self.myargs = (uid, username, privilege_level, name, expiration) kvpairs = {'username': {'value': username}, 'password': {'value': '', 'type': 'password'}, 'privilege_level': {'value': privilege_level}, 'enabled': {'value': ''}, 'expiration': {'value': expiration}, } if self.notnode: self.kvpairs = kvpairs else: self.kvpairs = {name: kvpairs} class UserCollection(ConfluentMessage): readonly = True def __init__(self, users=(), name=None): self.notnode = name is None self.desc = 'list of users' userlist = [] self.myargs = (userlist, name) for user in users: if 'username' in user: # processing an already translated dict userlist.append(user) continue entry = { 'uid': user['uid'], 'username': user['name'], 'expiration': user.get('expiration', None), 'privilege_level': user['access']['privilege_level'] } userlist.append(entry) if self.notnode: self.kvpairs = {'users': userlist} else: self.kvpairs = {name: {'users': userlist}} class AlertDestination(ConfluentMessage): def __init__(self, ip, acknowledge=False, acknowledge_timeout=None, retries=0, name=None): self.myargs = (ip, acknowledge, acknowledge_timeout, retries, name) self.desc = 'foo' self.stripped = False self.notnode = name is None kvpairs = {'ip': {'value': ip}, 'acknowledge': {'value': acknowledge}, 'acknowledge_timeout': {'value': acknowledge_timeout}, 'retries': {'value': retries}} if self.notnode: self.kvpairs = kvpairs else: self.kvpairs = {name: kvpairs} class InputAlertDestination(ConfluentMessage): valid_alert_params = { 'acknowledge': lambda x: False if type(x) in (unicode, bytes) and x.lower() == 'false' else bool(x), 'acknowledge_timeout': lambda x: int(x) if x and x.isdigit() else None, 'ip': lambda x: x, 'retries': lambda x: int(x) } def __init__(self, path, nodes, inputdata, multinode=False): self.alertcfg = {} if multinode: # keys are node names for node in inputdata: if not isinstance(inputdata[node], dict): break self.alertcfg[node] = inputdata[node] for key in inputdata[node]: if key not in self.valid_alert_params: raise exc.InvalidArgumentException( 'Unrecognized alert parameter ' + key) if isinstance(inputdata[node][key], dict): self.alertcfg[node][key] = \ self.valid_alert_params[key]( inputdata[node][key]['value']) else: self.alertcfg[node][key] = \ self.valid_alert_params[key](inputdata[node][key]) else: return for key in inputdata: if key not in self.valid_alert_params: raise exc.InvalidArgumentException( 'Unrecognized alert parameter ' + key) if isinstance(inputdata[key], dict): inputdata[key] = self.valid_alert_params[key]( inputdata[key]['value']) else: inputdata[key] = self.valid_alert_params[key]( inputdata[key]) for node in nodes: self.alertcfg[node] = inputdata def alert_params_by_node(self, node): return self.alertcfg[node] class SensorReadings(ConfluentMessage): readonly = True def __init__(self, sensors=(), name=None): readings = [] self.notnode = name is None self.myargs = (readings, name) for sensor in sensors: if isinstance(sensor, dict): readings.append(sensor) continue sensordict = {'name': sensor.name} if hasattr(sensor, 'value'): sensordict['value'] = sensor.value if hasattr(sensor, 'units'): sensordict['units'] = sensor.units if hasattr(sensor, 'states'): sensordict['states'] = sensor.states if hasattr(sensor, 'state_ids'): sensordict['state_ids'] = sensor.state_ids if hasattr(sensor, 'health'): sensordict['health'] = sensor.health if hasattr(sensor, 'type'): sensordict['type'] = sensor.type readings.append(sensordict) if self.notnode: self.kvpairs = {'sensors': readings} else: self.kvpairs = {name: {'sensors': readings}} class Firmware(ConfluentMessage): readonly = True def __init__(self, data, name): for datum in data: for component in datum: for field in datum[component]: tdatum = datum[component] if isinstance(tdatum[field], datetime): tdatum[field] = tdatum[field].strftime('%Y-%m-%dT%H:%M:%S') self.myargs = (data, name) self.notnode = name is None self.desc = 'Firmware information' if self.notnode: self.kvpairs = {'firmware': data} else: self.kvpairs = {name: {'firmware': data}} class KeyValueData(ConfluentMessage): readonly = True def __init__(self, kvdata, name=None): self.myargs = (kvdata, name) self.notnode = name is None if self.notnode: self.kvpairs = kvdata else: self.kvpairs = {name: kvdata} class Array(ConfluentMessage): def __init__(self, name, disks=None, raid=None, volumes=None, id=None, capacity=None, available=None): self.myargs = (name, disks, raid, volumes, id, capacity, available) self.kvpairs = { name: { 'type': 'array', 'disks': disks, 'raid': raid, 'id': id, 'volumes': volumes, 'capacity': capacity, 'available': available, } } class Volume(ConfluentMessage): def __init__(self, name, volname, size, state, array, stripsize=None): self.myargs = (name, volname, size, state, array, stripsize) self.kvpairs = { name: { 'type': 'volume', 'name': simplify_name(volname), 'label': volname, 'stripsize': stripsize, 'size': size, 'state': state, 'array': array, } } class Disk(ConfluentMessage): valid_states = set([ 'fault', 'jbod', 'unconfigured', 'hotspare', 'rebuilding', 'online', ]) state_aliases = { 'unconfigured bad': 'fault', 'unconfigured good': 'unconfigured', 'global hot spare': 'hotspare', 'dedicated hot spare': 'hotspare', } def _normalize_state(self, instate): newstate = instate.lower() if newstate in self.valid_states: return newstate elif newstate in self.state_aliases: return self.state_aliases[newstate] raise Exception("Unknown state {0}".format(instate)) def __init__(self, name, label=None, description=None, diskid=None, state=None, serial=None, fru=None, array=None): self.myargs = (name, label, description, diskid, state, serial, fru, array) state = self._normalize_state(state) self.kvpairs = { name: { 'type': 'disk', 'name': simplify_name(label), 'label': label, 'description': description, 'diskid': diskid, 'state': state, 'serial': serial, 'fru': fru, 'array': array, } } class LEDStatus(ConfluentMessage): readonly = True def __init__(self, data, name): self.myargs = (data, name) self.notnode = name is None self.desc = 'led status' if self.notnode: self.kvpairs = {'leds':data} else: self.kvpairs = {name: {'leds':data}} class NetworkConfiguration(ConfluentMessage): desc = 'Network configuration' def __init__(self, name=None, ipv4addr=None, ipv4gateway=None, ipv4cfgmethod=None, hwaddr=None): self.myargs = (name, ipv4addr, ipv4gateway, ipv4cfgmethod, hwaddr) self.notnode = name is None self.stripped = False kvpairs = { 'ipv4_address': {'value': ipv4addr}, 'ipv4_gateway': {'value': ipv4gateway}, 'ipv4_configuration': {'value': ipv4cfgmethod}, 'hw_addr': {'value': hwaddr}, } if self.notnode: self.kvpairs = kvpairs else: self.kvpairs = {name: kvpairs} class
1.2 0 .65-.55 1.2-1.2 1.2z"/></svg> <div class="select-menu-item-text"> <span class="select-menu-item-heading">Create branch: <span class="js-new-item-name"></span></span> <span class="description">from ‘master’</span> </div> <input type="hidden" name="name" id="name" class="js-new-item-value"> <input type="hidden" name="branch" id="branch" value="master"> <input type="hidden" name="path" id="path" value="scraping/extract_course_info_from_json_resp.py"> </form> </div> <div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags"> <div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring"> </div> <div class="select-menu-no-results">Nothing to show</div> </div> </div> </div> </div> <div class="BtnGroup float-right"> <a href="/XuchanBao/UofTCourseCluster/find/master" class="js-pjax-capture-input btn btn-sm BtnGroup-item" data-pjax data-hotkey="t"> Find file </a> <clipboard-copy for="blob-path" role="button" aria-label="Copy file path to clipboard" class="btn btn-sm BtnGroup-item tooltipped tooltipped-s" data-copied-hint="Copied!"> Copy path </clipboard-copy> </div> <div id="blob-path" class="breadcrumb"> <span class="repo-root js-repo-root"><span class="js-path-segment"><a href="/XuchanBao/UofTCourseCluster" data-pjax="true"><span>UofTCourseCluster</span></a></span></span><span class="separator">/</span><span class="js-path-segment"><a href="/XuchanBao/UofTCourseCluster/tree/master/scraping" data-pjax="true"><span>scraping</span></a></span><span class="separator">/</span><strong class="final-path">extract_course_info_from_json_resp.py</strong> </div> </div> <div class="commit-tease"> <span class="float-right"> <a class="commit-tease-sha" href="/XuchanBao/UofTCourseCluster/commit/e991e0b6e0953f98713671fa63d51611855060e0" data-pjax> e991e0b </a> <relative-time datetime="2018-01-25T02:36:51Z">Jan 24, 2018</relative-time> </span> <div> <img alt="" class="avatar" data-canonical-src="https://0.gravatar.com/avatar/00f9cd9ff503ba4ccd3e0f3eca760d1c?d=https%3A%2F%2Fassets-cdn.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png&amp;r=g&amp;s=140" height="20" src="https://camo.githubusercontent.com/760787ee917f1d55be81bb9fe3294b5ef6180b80/68747470733a2f2f302e67726176617461722e636f6d2f6176617461722f30306639636439666635303362613463636433653066336563613736306431633f643d68747470732533412532462532466173736574732d63646e2e6769746875622e636f6d253246696d6167657325324667726176617461727325324667726176617461722d757365722d3432302e706e6726723d6726733d313430" width="20" /> <span class="user-mention">xb1</span> <a href="/XuchanBao/UofTCourseCluster/commit/e991e0b6e0953f98713671fa63d51611855060e0" class="message" data-pjax="true" title="Scrape CSC courses">Scrape CSC courses</a> </div> <div class="commit-tease-contributors"> <button type="button" class="btn-link muted-link contributors-toggle" data-facebox="#blob_contributors_box"> <strong>0</strong> contributors </button> </div> <div id="blob_contributors_box" style="display:none"> <h2 class="facebox-header" data-facebox-id="facebox-header">Users who have contributed to this file</h2> <ul class="facebox-user-list" data-facebox-id="facebox-description"> </ul> </div> </div> <div class="file"> <div class="file-header"> <div class="file-actions"> <div class="BtnGroup"> <a href="/XuchanBao/UofTCourseCluster/raw/master/scraping/extract_course_info_from_json_resp.py" class="btn btn-sm BtnGroup-item" id="raw-url">Raw</a> <a href="/XuchanBao/UofTCourseCluster/blame/master/scraping/extract_course_info_from_json_resp.py" class="btn btn-sm js-update-url-with-hash BtnGroup-item" data-hotkey="b">Blame</a> <a href="/XuchanBao/UofTCourseCluster/commits/master/scraping/extract_course_info_from_json_resp.py" class="btn btn-sm BtnGroup-item" rel="nofollow">History</a> </div> <a class="btn-octicon tooltipped tooltipped-nw" href="github-windows://openRepo/https://github.com/XuchanBao/UofTCourseCluster?branch=master&amp;filepath=scraping%2Fextract_course_info_from_json_resp.py" aria-label="Open this file in GitHub Desktop" data-ga-click="Repository, open with desktop, type:windows"> <svg aria-hidden="true" class="octicon octicon-device-desktop" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M15 2H1c-.55 0-1 .45-1 1v9c0 .55.45 1 1 1h5.34c-.25.61-.86 1.39-2.34 2h8c-1.48-.61-2.09-1.39-2.34-2H15c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm0 9H1V3h14v8z"/></svg> </a> <!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/XuchanBao/UofTCourseCluster/edit/master/scraping/extract_course_info_from_json_resp.py" class="inline-form js-update-url-with-hash" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="<KEY> /></div> <button class="btn-octicon tooltipped tooltipped-nw" type="submit" aria-label="Edit this file" data-hotkey="e" data-disable-with> <svg aria-hidden="true" class="octicon octicon-pencil" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M0 12v3h3l8-8-3-3-8 8zm3 2H1v-2h1v1h1v1zm10.3-9.3L12 6 9 3l1.3-1.3a.996.996 0 0 1 1.41 0l1.59 1.59c.39.39.39 1.02 0 1.41z"/></svg> </button> </form> <!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/XuchanBao/UofTCourseCluster/delete/master/scraping/extract_course_info_from_json_resp.py" class="inline-form" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="<KEY> /></div> <button class="btn-octicon btn-octicon-danger tooltipped tooltipped-nw" type="submit" aria-label="Delete this file" data-disable-with> <svg aria-hidden="true" class="octicon octicon-trashcan" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M11 2H9c0-.55-.45-1-1-1H5c-.55 0-1 .45-1 1H2c-.55 0-1 .45-1 1v1c0 .55.45 1 1 1v9c0 .55.45 1 1 1h7c.55 0 1-.45 1-1V5c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm-1 12H3V5h1v8h1V5h1v8h1V5h1v8h1V5h1v9zm1-10H2V3h9v1z"/></svg> </button> </form> </div> <div class="file-info"> 47 lines (36 sloc) <span class="file-info-divider"></span> 1.16 KB </div> </div> <div itemprop="text" class="blob-wrapper data type-python"> <table class="highlight tab-size js-file-line-container" data-tab-size="8"> <tr> <td id="L1" class="blob-num js-line-number" data-line-number="1"></td> <td id="LC1" class="blob-code blob-code-inner js-file-line"><span class="pl-k">import</span> pickle</td> </tr> <tr> <td id="L2" class="blob-num js-line-number" data-line-number="2"></td> <td id="LC2" class="blob-code blob-code-inner js-file-line"> </td> </tr> <tr> <td id="L3" class="blob-num js-line-number" data-line-number="3"></td> <td id="LC3" class="blob-code blob-code-inner js-file-line"><span class="pl-c1">INDEX_TO_FEATURE</span> <span class="pl-k">=</span> {</td> </tr> <tr> <td id="L4" class="blob-num js-line-number" data-line-number="4"></td> <td id="LC4" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">0</span>: <span class="pl-c1">None</span>,</td> </tr> <tr> <td id="L5" class="blob-num js-line-number" data-line-number="5"></td> <td id="LC5" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">1</span>: <span class="pl-s"><span class="pl-pds">&quot;</span>code<span class="pl-pds">&quot;</span></span>,</td> </tr> <tr> <td id="L6" class="blob-num js-line-number" data-line-number="6"></td> <td id="LC6" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">2</span>: <span class="pl-s"><span class="pl-pds">&quot;</span>title<span class="pl-pds">&quot;</span></span>,</td> </tr> <tr> <td id="L7" class="blob-num js-line-number" data-line-number="7"></td> <td id="LC7" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">3</span>: <span class="pl-s"><span class="pl-pds">&quot;</span>credit<span class="pl-pds">&quot;</span></span>,</td> </tr> <tr> <td id="L8" class="blob-num js-line-number" data-line-number="8"></td> <td id="LC8" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">4</span>: <span class="pl-s"><span class="pl-pds">&quot;</span>location<span class="pl-pds">&quot;</span></span>,</td> </tr> <tr> <td id="L9" class="blob-num js-line-number" data-line-number="9"></td> <td id="LC9" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">5</span>: <span class="pl-s"><span class="pl-pds">&quot;</span>department<span class="pl-pds">&quot;</span></span>,</td> </tr> <tr> <td id="L10" class="blob-num js-line-number" data-line-number="10"></td> <td id="LC10" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">6</span>: <span class="pl-s"><span class="pl-pds">&quot;</span>session<span class="pl-pds">&quot;</span></span>,</td> </tr> <tr> <td id="L11" class="blob-num js-line-number" data-line-number="11"></td> <td id="LC11" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">7</span>: <span class="pl-s"><span class="pl-pds">&quot;</span>campus<span class="pl-pds">&quot;</span></span>,</td> </tr> <tr> <td id="L12" class="blob-num js-line-number" data-line-number="12"></td> <td id="LC12" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">8</span>: <span class="pl-s"><span class="pl-pds">&quot;</span>days of week<span class="pl-pds">&quot;</span></span>,</td> </tr> <tr> <td id="L13" class="blob-num js-line-number" data-line-number="13"></td> <td id="LC13" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">9</span>: <span class="pl-s"><span class="pl-pds">&quot;</span>level<span class="pl-pds">&quot;</span></span>,</td> </tr> <tr> <td id="L14" class="blob-num js-line-number" data-line-number="14"></td> <td id="LC14" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">10</span>: <span class="pl-s"><span class="pl-pds">&quot;</span>time of day<span class="pl-pds">&quot;</span></span></td> </tr> <tr> <td id="L15" class="blob-num js-line-number" data-line-number="15"></td> <td id="LC15" class="blob-code blob-code-inner js-file-line">}</td> </tr> <tr> <td id="L16" class="blob-num js-line-number" data-line-number="16"></td> <td id="LC16" class="blob-code blob-code-inner js-file-line"> </td> </tr> <tr> <td id="L17" class="blob-num js-line-number" data-line-number="17"></td> <td id="LC17" class="blob-code blob-code-inner js-file-line"><span class="pl-c1">JSON_KEYNAME</span> <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">&quot;</span>aaData<span class="pl-pds">&quot;</span></span></td> </tr> <tr> <td id="L18" class="blob-num js-line-number" data-line-number="18"></td> <td id="LC18" class="blob-code blob-code-inner js-file-line"> </td> </tr> <tr> <td id="L19" class="blob-num js-line-number" data-line-number="19"></td> <td id="LC19" class="blob-code blob-code-inner js-file-line"> </td> </tr> <tr> <td id="L20" class="blob-num js-line-number" data-line-number="20"></td> <td id="LC20" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">extract_course_code</span>(<span class="pl-smi">html_string</span>):</td> </tr> <tr> <td id="L21" class="blob-num js-line-number" data-line-number="21"></td> <td id="LC21" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> html_string.split(<span class="pl-s"><span class="pl-pds">&#39;</span>&lt;/a&gt;<span class="pl-pds">&#39;</span></span>)[<span class="pl-c1">0</span>].split(<span class="pl-s"><span class="pl-pds">&#39;</span>&gt;<span class="pl-pds">&#39;</span></span>)[<span class="pl-k">-</span><span class="pl-c1">1</span>]</td> </tr> <tr> <td id="L22" class="blob-num js-line-number" data-line-number="22"></td> <td id="LC22" class="blob-code blob-code-inner js-file-line"> </td> </tr> <tr> <td id="L23" class="blob-num js-line-number" data-line-number="23"></td> <td id="LC23" class="blob-code blob-code-inner js-file-line"> </td> </tr> <tr> <td id="L24" class="blob-num js-line-number" data-line-number="24"></td> <td id="LC24" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">extract_course_info_from_resp_and_save</span>(<span class="pl-smi">resp_filename</span>, <span class="pl-smi">save_filename</span>):</td> </tr> <tr> <td id="L25" class="blob-num js-line-number" data-line-number="25"></td> <td id="LC25" class="blob-code blob-code-inner js-file-line"> resp_dict <span class="pl-k">=</span> pickle.load(<span class="pl-c1">open</span>(resp_filename, <span class="pl-s"><span class="pl-pds">&#39;</span>rb<span class="pl-pds">&#39;</span></span>))</td> </tr> <tr> <td id="L26" class="blob-num js-line-number" data-line-number="26"></td> <td id="LC26" class="blob-code blob-code-inner js-file-line"> </td> </tr> <tr> <td id="L27" class="blob-num js-line-number" data-line-number="27"></td> <td id="LC27" class="blob-code blob-code-inner js-file-line"> all_courses <span class="pl-k">=</span> {}</td> </tr> <tr> <td id="L28" class="blob-num js-line-number" data-line-number="28"></td> <td id="LC28" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> course_info_list <span class="pl-k">in</span> resp_dict[<span class="pl-c1">JSON_KEYNAME</span>]:</td> </tr> <tr> <td id="L29" class="blob-num js-line-number" data-line-number="29"></td> <td id="LC29" class="blob-code blob-code-inner js-file-line"> course_info_dict <span class="pl-k">=</span> {}</td> </tr> <tr> <td id="L30" class="blob-num js-line-number" data-line-number="30"></td> <td id="LC30" class="blob-code blob-code-inner js-file-line"> course_key <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">&quot;</span>default_key<span class="pl-pds">&quot;</span></span></td> </tr> <tr> <td id="L31" class="blob-num js-line-number" data-line-number="31"></td> <td id="LC31" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> i <span class="pl-k">in</span> <span class="pl-c1">range</span>(<span class="pl-c1">len</span>(course_info_list)):</td> </tr> <tr> <td id="L32" class="blob-num js-line-number" data-line-number="32"></td> <td id="LC32" class="blob-code blob-code-inner js-file-line"> feature <span class="pl-k">=</span> <span class="pl-c1">INDEX_TO_FEATURE</span>[i]</td> </tr> <tr> <td id="L33" class="blob-num js-line-number" data-line-number="33"></td> <td id="LC33" class="blob-code blob-code-inner js-file-line"> value <span class="pl-k">=</span> course_info_list[i]</td> </tr> <tr> <td id="L34" class="blob-num js-line-number" data-line-number="34"></td> <td id="LC34" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> feature:</td> </tr> <tr> <td id="L35" class="blob-num js-line-number" data-line-number="35"></td> <td id="LC35" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">continue</span></td> </tr> <tr> <td id="L36" class="blob-num js-line-number" data-line-number="36"></td> <td id="LC36" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> feature <span class="pl-k">==</span> <span class="pl-s"><span class="pl-pds">&quot;</span>code<span class="pl-pds">&quot;</span></span>:</td> </tr> <tr> <td id="L37" class="blob-num js-line-number" data-line-number="37"></td> <td id="LC37" class="blob-code blob-code-inner js-file-line"> value <span class="pl-k">=</span> extract_course_code(value)</td> </tr> <tr> <td id="L38" class="blob-num js-line-number" data-line-number="38"></td> <td id="LC38" class="blob-code blob-code-inner js-file-line"> course_key <span class="pl-k">=</span> value</td> </tr> <tr> <td id="L39" class="blob-num js-line-number" data-line-number="39"></td> <td id="LC39" class="blob-code blob-code-inner js-file-line"> course_info_dict[feature] <span class="pl-k">=</span> value</td> </tr> <tr> <td id="L40" class="blob-num js-line-number" data-line-number="40"></td> <td id="LC40" class="blob-code blob-code-inner js-file-line"> all_courses[course_key] <span class="pl-k">=</span> course_info_dict</td> </tr> <tr> <td id="L41" class="blob-num js-line-number" data-line-number="41"></td> <td id="LC41" class="blob-code blob-code-inner js-file-line"> </td> </tr> <tr> <td id="L42" class="blob-num js-line-number" data-line-number="42"></td> <td id="LC42" class="blob-code blob-code-inner js-file-line"> pickle.dump(all_courses, <span class="pl-c1">open</span>(save_filename, <span class="pl-s"><span class="pl-pds">&#39;</span>wb<span class="pl-pds">&#39;</span></span>))</td> </tr> <tr> <td id="L43" class="blob-num js-line-number" data-line-number="43"></td> <td id="LC43" class="blob-code blob-code-inner js-file-line"> </td> </tr> <tr> <td id="L44" class="blob-num js-line-number" data-line-number="44"></td> <td id="LC44" class="blob-code blob-code-inner js-file-line"> </td> </tr> <tr> <td id="L45" class="blob-num js-line-number" data-line-number="45"></td> <td id="LC45" class="blob-code blob-code-inner js-file-line"><span class="pl-k">if</span> <span class="pl-c1">__name__</span> <span class="pl-k">==</span> <span class="pl-s"><span class="pl-pds">&quot;</span>__main__<span class="pl-pds">&quot;</span></span>:</td> </tr> <tr> <td id="L46" class="blob-num js-line-number" data-line-number="46"></td> <td id="LC46" class="blob-code blob-code-inner js-file-line"> extract_course_info_from_resp_and_save(<span class="pl-s"><span class="pl-pds">&quot;</span>csc_resp.p<span class="pl-pds">&quot;</span></span>, <span class="pl-s"><span class="pl-pds">&quot;</span>csc_courses.p<span class="pl-pds">&quot;</span></span>)</td> </tr> </table> <div class="BlobToolbar position-absolute js-file-line-actions dropdown js-menu-container js-select-menu d-none" aria-hidden="true"> <button class="btn-octicon ml-0 px-2 p-0 bg-white border border-gray-dark rounded-1 dropdown-toggle js-menu-target" id="js-file-line-action-button" type="button" aria-expanded="false" aria-haspopup="true" aria-label="Inline file action toolbar" aria-controls="inline-file-actions"> <svg aria-hidden="true" class="octicon octicon-kebab-horizontal" height="16" version="1.1" viewBox="0 0 13 16" width="13"><path fill-rule="evenodd" d="M1.5 9a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3z"/></svg> </button> <div class="dropdown-menu-content js-menu-content" id="inline-file-actions"> <ul class="BlobToolbar-dropdown dropdown-menu dropdown-menu-se mt-2"> <li><a class="js-zeroclipboard dropdown-item" style="cursor:pointer;" id="js-copy-lines" data-original-text="Copy lines">Copy lines</a></li> <li><a class="js-zeroclipboard dropdown-item" id= "js-copy-permalink" style="cursor:pointer;" data-original-text="Copy permalink">Copy permalink</a></li> <li><a href="/XuchanBao/UofTCourseCluster/blame/d98adeba89f6a955803828959146032339afe258/scraping/extract_course_info_from_json_resp.py" class="dropdown-item js-update-url-with-hash" id="js-view-git-blame">View git blame</a></li> <li><a href="/XuchanBao/UofTCourseCluster/issues/new" class="dropdown-item" id="js-new-issue">Open new issue</a></li> </ul> </div> </div> </div> </div> <button type="button" data-facebox="#jump-to-line" data-facebox-class="linejump" data-hotkey="l" class="d-none">Jump to Line</button> <div id="jump-to-line" style="display:none"> <!-- '"`
few attaches under it, # but, the attaches may be different to what the DCNM has for the same network. if have_a['networkName'] == want_a['networkName']: h_in_w = True atch_h = have_a['lanAttachList'] atch_w = want_a.get('lanAttachList') for a_h in atch_h: if not a_h['isAttached']: continue a_match = False if atch_w: for a_w in atch_w: if a_h['serialNumber'] == a_w['serialNumber']: # Have is already in diff, no need to continue looking for it. a_match = True break if not a_match: del a_h['isAttached'] a_h.update({'deployment': False}) r_net_list.append(a_h) break if not h_in_w: # This block will take care of deleting all the attachments which are in DCNM but # are not mentioned in the playbook. The playbook just has the network, but, does not have any attach # under it. found = next((net for net in self.want_create if net['networkName'] == have_a['networkName']), None) if found: atch_h = have_a['lanAttachList'] for a_h in atch_h: if not a_h['isAttached']: continue del a_h['isAttached'] a_h.update({'deployment': False}) r_net_list.append(a_h) if r_net_list: in_diff = False for d_attach in self.diff_attach: if have_a['networkName'] == d_attach['networkName']: in_diff = True d_attach['lanAttachList'].extend(r_net_list) break if not in_diff: r_net_dict = { 'networkName': have_a['networkName'], 'lanAttachList': r_net_list } diff_attach.append(r_net_dict) all_nets += have_a['networkName'] + "," if not all_nets: self.diff_create = diff_create self.diff_attach = diff_attach self.diff_deploy = diff_deploy return warn_msg if not self.diff_deploy: diff_deploy.update({'networkNames': all_nets[:-1]}) else: nets = self.diff_deploy['networkNames'] + "," + all_nets[:-1] diff_deploy.update({'networkNames': nets}) self.diff_create = diff_create self.diff_attach = diff_attach self.diff_deploy = diff_deploy return warn_msg def get_diff_merge(self, replace=False): # # Special cases: # 1. Update gateway on an existing network: # We need to use the network_update API with PUT method to update the nw with new gw. # attach logic remains same, but, we need to re-deploy the network in any case to reflect the new gw. # 2. Update vlan-id on an existing network: # This change will only affect new attachments of the same network. # 3. Auto generate networkId if its not mentioned by user: # In this case, we need to query the DCNM to get a usable ID and use it in the payload. # And also, any such network create requests need to be pushed individually(not bulk op). diff_create = [] diff_create_update = [] diff_create_quick = [] diff_attach = [] diff_deploy = {} prev_net_id_fetched = None gw_changed = {} tg_changed = {} warn_msg = None l2only_changed = {} vn_changed = {} intdesc_changed = {} mtu_changed = {} arpsup_changed = {} dhcp1_ip_changed = {} dhcp2_ip_changed = {} dhcp3_ip_changed = {} dhcp1_vrf_changed = {} dhcp2_vrf_changed = {} dhcp3_vrf_changed = {} for want_c in self.want_create: found = False for have_c in self.have_create: if want_c['networkName'] == have_c['networkName']: found = True diff, gw_chg, tg_chg, warn_msg, l2only_chg, vn_chg, idesc_chg, mtu_chg, \ arpsup_chg, dhcp1_ip_chg, dhcp2_ip_chg, dhcp3_ip_chg, dhcp1_vrf_chg, \ dhcp2_vrf_chg, dhcp3_vrf_chg = self.diff_for_create(want_c, have_c) gw_changed.update({want_c['networkName']: gw_chg}) tg_changed.update({want_c['networkName']: tg_chg}) l2only_changed.update({want_c['networkName']: l2only_chg}) vn_changed.update({want_c['networkName']: vn_chg}) intdesc_changed.update({want_c['networkName']: idesc_chg}) mtu_changed.update({want_c['networkName']: mtu_chg}) arpsup_changed.update({want_c['networkName']: arpsup_chg}) dhcp1_ip_changed.update({want_c['networkName']: dhcp1_ip_chg}) dhcp2_ip_changed.update({want_c['networkName']: dhcp2_ip_chg}) dhcp3_ip_changed.update({want_c['networkName']: dhcp3_ip_chg}) dhcp1_vrf_changed.update({want_c['networkName']: dhcp1_vrf_chg}) dhcp2_vrf_changed.update({want_c['networkName']: dhcp2_vrf_chg}) dhcp3_vrf_changed.update({want_c['networkName']: dhcp3_vrf_chg}) if diff: diff_create_update.append(diff) break if not found: net_id = want_c.get('networkId', None) if not net_id: # networkId(VNI-id) is not provided by user. # Need to query DCNM to fetch next available networkId and use it here. method = 'POST' attempt = 0 while True or attempt < 10: attempt += 1 path = '/rest/managed-pool/fabrics/{}/segments/ids'.format(self.fabric) net_id_obj = dcnm_send(self.module, method, path) missing_fabric, not_ok = self.handle_response(net_id_obj, 'query_dcnm') if missing_fabric or not_ok: msg1 = "Fabric {} not present on DCNM".format(self.fabric) msg2 = "Unable to generate networkId for network: {} " \ "under fabric: {}".format(want_c['networkName'], self.fabric) self.module.fail_json(msg=msg1 if missing_fabric else msg2) if not net_id_obj['DATA']: continue net_id = net_id_obj['DATA'].get('segmentId') if net_id != prev_net_id_fetched: want_c.update({'networkId': net_id}) prev_net_id_fetched = net_id break if not net_id: self.module.fail_json(msg="Unable to generate networkId for network: {} " "under fabric: {}".format(want_c['networkName'], self.fabric)) create_path = '/rest/top-down/fabrics/{}/networks'.format(self.fabric) diff_create_quick.append(want_c) if self.module.check_mode: continue resp = dcnm_send(self.module, method, create_path, json.dumps(want_c)) self.result['response'].append(resp) fail, self.result['changed'] = self.handle_response(resp, "create") if fail: self.failure(resp) else: diff_create.append(want_c) all_nets = [] for want_a in self.want_attach: dep_net = '' found = False for have_a in self.have_attach: if want_a['networkName'] == have_a['networkName']: found = True diff, net = self.diff_for_attach_deploy(want_a['lanAttachList'], have_a['lanAttachList'], replace) if diff: base = want_a.copy() del base['lanAttachList'] base.update({'lanAttachList': diff}) diff_attach.append(base) dep_net = want_a['networkName'] else: if net or gw_changed.get(want_a['networkName'], False) or \ tg_changed.get(want_a['networkName'], False) or \ l2only_changed.get(want_a['networkName'], False) or \ vn_changed.get(want_a['networkName'], False) or \ intdesc_changed.get(want_a['networkName'], False) or \ mtu_changed.get(want_a['networkName'], False) or \ arpsup_changed.get(want_a['networkName'], False) or \ dhcp1_ip_changed.get(want_a['networkName'], False) or \ dhcp2_ip_changed.get(want_a['networkName'], False) or \ dhcp3_ip_changed.get(want_a['networkName'], False) or \ dhcp1_vrf_changed.get(want_a['networkName'], False) or \ dhcp2_vrf_changed.get(want_a['networkName'], False) or \ dhcp3_vrf_changed.get(want_a['networkName'], False): dep_net = want_a['networkName'] if not found and want_a.get('lanAttachList'): atch_list = [] for attach in want_a['lanAttachList']: del attach['isAttached'] if bool(attach['deployment']): atch_list.append(attach) if atch_list: base = want_a.copy() del base['lanAttachList'] base.update({'lanAttachList': atch_list}) diff_attach.append(base) dep_net = want_a['networkName'] if dep_net: all_nets.append(dep_net) if all_nets: diff_deploy.update({'networkNames': ','.join(all_nets)}) self.diff_create = diff_create self.diff_create_update = diff_create_update self.diff_attach = diff_attach self.diff_deploy = diff_deploy self.diff_create_quick = diff_create_quick return warn_msg def format_diff(self): diff = [] diff_create = copy.deepcopy(self.diff_create) diff_create_quick = copy.deepcopy(self.diff_create_quick) diff_create_update = copy.deepcopy(self.diff_create_update) diff_attach = copy.deepcopy(self.diff_attach) diff_detach = copy.deepcopy(self.diff_detach) diff_deploy = self.diff_deploy['networkNames'].split(",") if self.diff_deploy else [] diff_undeploy = self.diff_undeploy['networkNames'].split(",") if self.diff_undeploy else [] diff_create.extend(diff_create_quick) diff_create.extend(diff_create_update) diff_attach.extend(diff_detach) diff_deploy.extend(diff_undeploy) for want_d in diff_create: found_a = next((net for net in diff_attach if net['networkName'] == want_d['networkName']), None) found_c = want_d json_to_dict = json.loads(found_c['networkTemplateConfig']) found_c.update({'net_name': found_c['networkName']}) found_c.update({'vrf_name': found_c.get('vrf', "NA")}) found_c.update({'net_id': found_c['networkId']}) found_c.update({'vlan_id': json_to_dict.get('vlanId', "")}) found_c.update({'gw_ip_subnet': json_to_dict.get('gatewayIpAddress', "")}) found_c.update({'net_template': found_c['networkTemplate']}) found_c.update({'net_extension_template': found_c['networkExtensionTemplate']}) found_c.update({'is_l2only': json_to_dict.get('isLayer2Only', False)}) found_c.update({'vlan_name': json_to_dict.get('vlanName', "")}) found_c.update({'int_desc': json_to_dict.get('intfDescription', "")}) found_c.update({'mtu_l3intf': json_to_dict.get('mtu', "")}) found_c.update({'arp_suppress': json_to_dict.get('suppressArp', False)}) found_c.update({'dhcp_srvr1_ip': json_to_dict.get('dhcpServerAddr1', "")}) found_c.update({'dhcp_srvr2_ip': json_to_dict.get('dhcpServerAddr2', "")}) found_c.update({'dhcp_srvr3_ip': json_to_dict.get('dhcpServerAddr3', "")}) found_c.update({'dhcp_srvr1_vrf': json_to_dict.get('vrfDhcp', "")}) found_c.update({'dhcp_srvr2_vrf': json_to_dict.get('vrfDhcp2', "")}) found_c.update({'dhcp_srvr3_vrf': json_to_dict.get('vrfDhcp3', "")}) found_c.update({'attach': []}) del found_c['fabric'] del found_c['networkName'] del found_c['networkId'] del found_c['networkTemplate'] del found_c['networkExtensionTemplate'] del found_c['networkTemplateConfig'] del found_c['vrf'] if diff_deploy and found_c['net_name'] in diff_deploy: diff_deploy.remove(found_c['net_name']) if not found_a: diff.append(found_c) continue attach = found_a['lanAttachList'] for a_w in attach: attach_d = {} detach_d = {} for k, v in self.ip_sn.items(): if v == a_w['serialNumber']: attach_d.update({'ip_address': k}) break if a_w['detachSwitchPorts']: detach_d.update({'ip_address': attach_d['ip_address']}) detach_d.update({'ports': a_w['detachSwitchPorts']}) detach_d.update({'deploy': False}) found_c['attach'].append(detach_d) attach_d.update({'ports': a_w['switchPorts']}) attach_d.update({'deploy': a_w['deployment']}) found_c['attach'].append(attach_d) diff.append(found_c) diff_attach.remove(found_a) for vrf in diff_attach: new_attach_dict = {} new_attach_list = [] attach = vrf['lanAttachList'] for a_w in attach: attach_d = {} detach_d = {} for k, v in self.ip_sn.items(): if v == a_w['serialNumber']: attach_d.update({'ip_address': k}) break if a_w['detachSwitchPorts']: detach_d.update({'ip_address': attach_d['ip_address']}) detach_d.update({'ports': a_w['detachSwitchPorts']}) detach_d.update({'deploy': False}) new_attach_list.append(detach_d) attach_d.update({'ports': a_w['switchPorts']}) attach_d.update({'deploy': a_w['deployment']}) new_attach_list.append(attach_d) if new_attach_list: if diff_deploy and vrf['networkName'] in diff_deploy: diff_deploy.remove(vrf['networkName']) new_attach_dict.update({'attach': new_attach_list}) new_attach_dict.update({'net_name': vrf['networkName']}) diff.append(new_attach_dict) for net in diff_deploy: new_deploy_dict = {'net_name': net} diff.append(new_deploy_dict) self.diff_input_format = diff def get_diff_query(self): method = 'GET' path = '/rest/top-down/fabrics/{}/vrfs'.format(self.fabric) vrf_objects = dcnm_send(self.module, method, path) missing_fabric, not_ok = self.handle_response(vrf_objects, 'query_dcnm') if missing_fabric or not_ok: msg1 = "Fabric {} not present on DCNM".format(self.fabric) msg2 = "Unable to find VRFs under fabric: {}".format(self.fabric) self.module.fail_json(msg=msg1 if missing_fabric else msg2) return if self.config: query = [] if self.have_create or self.have_attach: for want_c in self.want_create: # Query the Network item = {'parent': {}, 'attach': []} path = '/rest/top-down/fabrics/{}/networks/{}'.format(self.fabric, want_c['networkName']) network = dcnm_send(self.module, method, path) if not network['DATA']: continue net = network['DATA'] if (want_c['networkId'] == net['networkId']) and want_c['vrf'] == net['vrf']: item['parent'] = net item['parent']['networkTemplateConfig'] = json.loads(net['networkTemplateConfig']) # Query the Attachment for the found Networks path = '/rest/top-down/fabrics/{}/networks/attachments?network-names={}'. \ format(self.fabric, want_c['networkName']) net_attach_objects = dcnm_send(self.module, method, path) if not net_attach_objects['DATA']: return for net_attach in net_attach_objects['DATA']: if want_c['networkName'] == net_attach['networkName']: if not net_attach.get('lanAttachList'): continue attach_list = net_attach['lanAttachList'] for attach in attach_list: # append the attach network details item['attach'].append(attach) query.append(item) else: query = [] path = '/rest/top-down/fabrics/{}/networks'.format(self.fabric) networks = dcnm_send(self.module, method, path) if not networks['DATA']: return for net in networks['DATA']: item = {'parent': {}, 'attach': []} # append the parent network details item['parent'] = net item['parent']['networkTemplateConfig'] = json.loads(net['networkTemplateConfig']) # fetch the attachment for the network path = '/rest/top-down/fabrics/{}/networks/attachments?network-names={}'. \ format(self.fabric, net['networkName']) net_attach_objects = dcnm_send(self.module, method, path) if not net_attach_objects['DATA']: return for net_attach in net_attach_objects['DATA']: if not net_attach.get('lanAttachList'): continue attach_list = net_attach['lanAttachList'] for attach in attach_list: # append the attach network details item['attach'].append(attach) query.append(item) self.query = query def wait_for_del_ready(self): method = 'GET' if self.diff_delete: for net in self.diff_delete: state = False path = '/rest/top-down/fabrics/{}/networks/attachments?network-names={}'.format(self.fabric,
import numpy as np # import cupy as np from utils.base_optim import * class Layer(object): ''' 所有 Layer 的基类''' def __init__(self, name="layer"): # 层名 self.name = name # 标志,当处于训练状态时设置为 true,当处于测试状态时设置为 false self.train = True def __call__(self, x): # 重写 __call__ 之前使用前向传播:out = layer.forward(x) # 重写 __call__ 之后使用前向传播:out = layer(x) ✔ 更加方便 return self.forward(x) def forward(self, x): ''' 前向传播,所有继承自 Layer 类的层都要实现各自的前向传播函数 Args: x : 输入 Return: 前向传播结果 ''' return x def backward(self, dout): ''' 反向传播,所有继承自 Layer 类的层都要实现各自的反向传播函数 Args: dout : 输入梯度(一般为后面层反向传播过来的梯度) Return: 输入梯度 dout 对输入 x 的偏导数 dx(作为下一层的输入) ''' return dout def get_weights(self): ''' 获取本层的所有参数,所有继承自 Layer 类的具有可训练参数的层都要实现各自的 get_weights 函数 Return: 应该为 dict,dict 的 key 为参数名,value 为参数值 ''' return None def set_weights(self, weights_dict): ''' 用输入的参数更新本层的所有参数,所有继承自 Layer 类的具有可训练参数的层都要实现各自的 set_weights 函数 Args: weights_dict: 应该为 dict,dict 的 key 为参数名,value 为参数值,与 get_weights() 函数的输出对应 ''' pass def set_mode(self, train=True): ''' 用于递归更新当前训练状态,如 train=True -> train=False Args: train: bool 值,标志新的训练状态,若为 train 则为 True,若为 test 则为 False ''' self.train = train class Linear(Layer): ''' 全连接层 Args: in_node: 输入节点数 out_node: 输出节点数 weight_scale: 用于确定此层初始化值的大小,此层的初始化参数为 randn() * weight_scale name: 层名 optimizer:使用的优化器 ''' def __init__(self, in_node, out_node, weight_scale=1e-3, name="fc_layer", optimizer=Optimizer(0.1)): ''' 初始化层 ''' super(Linear, self).__init__(name=name) self.in_node = in_node self.out_node = out_node self.optimizer = optimizer # 参数初始化 self.weight = np.random.randn(in_node, out_node) * weight_scale self.b = np.zeros((out_node,)) def forward(self, x): ''' 前向传播 ''' # 如果输入为 None 则输出此层的信息 if x is None: print("(%s)\n\tLinear Layer -> in_node=%d\tout_node=%d" % (self.name, self.in_node, self.out_node)) return None # 判断输入是否符合此层的输入维度要求 assert np.shape(x)[1] == self.in_node, "正向传播输入数据维度不匹配!" # 前向传播 out = x @ self.weight + self.b # 保存此次前向传播的现场,用于反向传播 self.cache = (x, self.weight, out) return out def backward(self, dout): ''' 反向传播 ''' # 判断输入是否符合此层的输入维度要求 assert np.shape(dout)[1] == self.out_node, "反向传播输入梯度维度不匹配!" # 恢复前向传播的现场 (x, w, _) = self.cache # 反向传播,计算三个偏导 db = np.sum(dout, 0) dw = x.T @ dout dx = dout @ w.T # 更新参数 self.b = self.optimizer.optim(self.b, db) self.weight = self.optimizer.optim(self.weight, dw, add_reg=True) return dx def get_weights(self): ''' 获取参数 ''' return {"weights":self.weight, "bias":self.b} def set_weights(self, weights_dict): ''' 导入参数 ''' # 判断输入的参数形状是否与本层参数相匹配 assert weights_dict["weights"].shape == self.weight.shape, self.name + " 层权重参数大小输入不匹配,导入参数失败!应为 " + str(self.weight.shape) + " 实际为 " + str(weights_dict["weights"].shape) assert weights_dict["bias"].shape == self.b.shape, self.name + " 层偏置参数大小输入不匹配,导入参数失败!应为 " + str(self.b.shape) + " 实际为 " + str(weights_dict["bias"].shape) # 导入参数 self.weight = weights_dict["weights"] self.b = weights_dict["bias"] print("%s 层参数导入成功" % self.name) class ReLU(Layer): ''' ReLU 层 ''' def __init__(self, name="relu_layer"): ''' 初始化 ''' super(ReLU, self).__init__(name=name) def forward(self, x): ''' 前向传播 ''' if x is None: print("(%s)\n\tReLU Layer" % (self.name)) return None # 真正前向传播部分在这里 out = np.maximum(0, x) self.cache = x return out def backward(self, dout): ''' 反向传播 ''' x = self.cache # 真正反向传播部分在这里 dx = dout dx[x < 0] = 0 return dx class Conv2D(Layer): ''' 卷积层 Args: in_channels: 输入通道数 out_channels: 输出通道数 kernel_size: 卷积核长宽(默认长等于宽) stride: 步长 pad:补零,补零策略为上下左右各补 pad 个零 weight_scale: 用于确定此层初始化值的大小 name: 层名 mode:若为 "fast" 使用速度较快的 im2col 版本卷积,若为 "origin" 则使用原版卷积(速度较慢) optimizer: 优化器 ''' def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1, weight_scale=1e-3, name="conv_layer", mode="origin", optimizer=Optimizer(0.1)): ''' 初始化 ''' super(Conv2D, self).__init__(name=name) # 参数初始化 self.filter_weight = np.random.randn(out_channels, in_channels, kernel_size, kernel_size) * weight_scale self.bias = np.zeros((out_channels,)) self.kernel_size = kernel_size self.stride = stride self.pad = pad self.optimizer = optimizer assert mode in ["fast", "origin"], 'mode 必须为 "fast" 或 "origin" 之一' self.mode = mode # 选择是使用 im2col 快速卷积还是普通的卷积 def forward(self, x): ''' 前向传播 ''' if self.mode == "fast": # 若 mode == "fast" 调用 im2col 版本的前向传播函数 out = self.forward_im2col(x) elif self.mode == "origin": # 若 mode == "origin" 调用 origin 版本的前向传播函数 out = self.forward_origin(x) return out def backward(self, dout): ''' 反向传播 ''' if self.mode == "fast": dx = self.backward_col2im(dout) elif self.mode == "origin": dx = self.backward_origin(dout) return dx def forward_origin(self, x): ''' 卷积前向传播,原始版本 ''' if x is None: print("(%s)\n\tConv2D Layer -> in_channel=%d\tout_channel=%d\tkernal_size=%d\tstride=%d\tpad=%d" % ( self.name, np.shape(self.filter_weight)[1], np.shape(self.filter_weight)[0], self.kernel_size, self.stride, self.pad)) return None # 前向传播部分 N, C, H, W = x.shape w = self.filter_weight b = self.bias F, _, HH, WW = w.shape pad = self.pad stride = self.stride H_new = int(1 + (H+2*pad-HH) / stride) W_new = int(1 + (W+2*pad-WW) / stride) # pad x_pad = np.zeros((N, C, H+2*pad, W+2*pad)) x_pad[:, :, pad:-pad, pad:-pad] = x.copy() # 初始化输出 out = np.zeros((N, F, H_new, W_new)) # 卷积操作: # 先 reshape,将输入图片 reshape 成 N x 1 x (C x H_pad x W_pad) 形状 # 将卷积核 reshape 成 1 x F x (C x H_flt x W_flt) 形状 # 这样相乘时括号里的部分相乘(输入的图片中 H_pad 和 W_pad 会截取 H_flt 和 W_flt 的大小),括号外的部分广播,得到 N x F x C x H_flt x W_flt # 对后三个维度求和,得到 N x F x 1 大小向量 # 以上操作进行 H_new x W_new 次,得到 N x F x H_new x W_new,此为本次卷积的输出结果 # reshape x_5d = x_pad.reshape((N,1,C,H+2*pad,W+2*pad)) w_5d = w.reshape((1,F,C,HH,WW)) # 遍历 (index_y, index_x) 属于 (0 ~ H_new-1, 0 ~ W_new-1),开始卷积 for (index_y,index_x) in zip(*np.where(np.ones((H_new,W_new)))): # 截取输入 x_5d 中长 H_flt 宽 W_flt 的一部分 conv_region = x_5d[:,:,:,(stride*index_y):(stride*index_y+HH),(stride*index_x):(stride*index_x+WW)] # 卷积操作主要计算 out[:, :, index_y, index_x] = np.sum(conv_region * w_5d, axis=(2,3,4)) # 加上 bias out = out + b.reshape((1, -1, 1, 1)) # 保存现场 self.cache = (x, w, b) return out def backward_origin(self, dout): ''' 卷积反向传播,原始版本 ''' x, w, b = self.cache # 反向传播部分 N, C, H, W = x.shape F, _, HH, WW = w.shape _, _, H_out, W_out = dout.shape pad = self.pad stride = self.stride dw = np.zeros_like(w) dx = np.zeros_like(x) # 获取卷积时的 x_pad x_pad = np.zeros((N, C, H+2*pad, W+2*pad)) dx_pad = x_pad.copy() x_pad[:, :, pad:-pad, pad:-pad] = x.copy() # 卷积反向传播: # 忽略深度,两个 N x F x H_flt x W_flt 的块卷积成一个点(就是前面的 N x F x 1),这里要从这个点还原两个块 # 由于一个块乘以另一个块得到这个点,所以反过来某个块的梯度就是这个点乘以其中的另一个块 # reshape,这里的 x_5d 和 w_5d 与卷积时的 x_5d 和 w_5d 相同 x_5d = x_pad.reshape((N,1,C,H+2*pad,W+2*pad)) w_5d = w.reshape((1,F,C,HH,WW)) # 遍历 (index_y, index_x) 属于 (0 ~ H_new-1, 0 ~ W_new-1),开始卷积的反向传播 for (index_y,index_x) in zip(*np.where(np.ones((H_out,W_out)))): # 获取卷积区域 x_5d_region = x_5d[:,:,:,(stride*index_y):(stride*index_y+HH),(stride*index_x):(stride*index_x+WW)] # 取出卷积的那个点(其实是 N x F 个点,因为一次进行 N x F 次卷积) block_dout = dout[:,:,index_y,index_x].reshape((N,F,1,1,1)) # 得到 dout 对块 x 的梯度 block_dx = np.sum(w_5d * block_dout, 1) # 得到 dout 对块 w 的梯度 block_dw = np.sum(x_5d_region * block_dout, 0) # 总梯度为 n 次卷积反向传播得到的梯度相加(当然位置要对应) dw += block_dw dx_pad[:,:,(stride*index_y):(stride*index_y+HH),(stride*index_x):(stride*index_x+WW)] += block_dx # 得到 dx(dx_pad 去掉 pad 的部分) dx = dx_pad[:, :, pad:-pad, pad:-pad] # 得到 db db = np.sum(dout, (0,2,3)) # 更新参数 self.filter_weight = self.optimizer.optim(self.filter_weight, dw, add_reg=True) self.bias = self.optimizer.optim(self.bias, db) return dx def forward_im2col(self, x): ''' 卷积前向传播,利用 im2col 加速 ''' # 前面部分与正常卷积相同 if x is None: print("(%s)\n\tConv2D Layer -> in_channel=%d\tout_channel=%d\tkernal_size=%d\tstride=%d\tpad=%d" % ( self.name, self.filter_weight.shape[1], self.filter_weight.shape[0], self.kernel_size, self.stride, self.pad)) return None N, C, H_in, W_in = x.shape F, _, H_filter, W_filter = self.filter_weight.shape x_pad = np.pad(x, ((0,0), (0,0), (self.pad, self.pad), (self.pad,self.pad)), "constant") assert (H_in + 2*self.pad - H_filter) % self.stride == 0, "H + 2*pad - filter_size 不为 stride 整数倍" assert (W_in + 2*self.pad - W_filter) % self.stride == 0, "W + 2*pad - filter_size 不为 stride 整数倍" H_out = int(1 + (H_in + 2*self.pad - H_filter) / self.stride) W_out = int(1 + (W_in + 2*self.pad - W_filter) / self.stride) # im2col 操作: # 取出所有卷积区域(单个卷积区域大小为 C x H_filter x W_filter,一共有 H_out x W_out 个卷积区域,所以所有的卷积区域可以组成一个 # N x H_out x W_out x C x H_filter x W_filter 大小的向量,怎么取的牵扯到 np.lib.stride_tricks.as_strided 函数,可以网上搜这 # 个函数的说明)组成向量 x_col,然后将其后三个维度展平(变成大小为 N x H_out x W_out x (C * H_filter * W_filter) 的四维向量) # 同时将 F 个卷积核组成的向量 F x C x H_filter x W_filter 也展平(变成大小为 F x (C * H_filter * W_filter) 的二维向量),然后与 # 展平后的 x_col 向量做矩阵乘,得到 N x H_out x W_out x (C * H_filter * W_filter) @ (C * H_filter * W_filter) x F = N x H_out x W_out x F, # 把第四个维度换到前面来就是卷积的输出 N x F x H_out x W_out # np.lib.stride_tricks.as_strided 的 strides 参数选择: # x_pad: N x C x H_in+2*pad x W_in+2*pad # x_col: N x (H_out x W_out <- 卷积次数) x (C x H_filter x W_filter <- 一次卷积的区域) # x_col[x,x,x,x,x,i]: stride = base_stride (x_col[x,x,x,x,x,1] -> x_col[x,x,x,x,x,2] 这两个数在 x_pad 上距离为 stride) # x_col[x,x,x,x,i,x]: stride = (W_in+2*self.pad)*base_stride (x_col[x,x,x,x,1,1] -> x_col[x,x,x,x,2,1] 这两个数在 x_pad 上距离为 stride) # x_col[x,x,x,i,x,x]: stride = (H_in+2*pad) * (W_in+2*pad) * base_stride (x_col[x,x,x,1,1,1] -> x_col[x,x,x,2,1,1] 这两个数在 x_pad
import re import transformer.Constants as Constants from UPosTagMap import * from SimplifyDepParse3 import SimplifyDepParse class SentenceProcessing(object): def __init__(self, symbol_file="./models/symbols.txt", debug=False): self.debug = debug self._symbol2key = self.readSymbolFile(symbol_file) self._hanja = "([\u2e80-\u2eff\u31c0-\u31ef\u3200-\u32ff\u3400-\u4dbf\u4e00-\u9fbf\uf900-\ufaff]+)" self._english = "([a-zA-Z]+)" self._number = "([0-9]+)" self._sharp = "(#+)" self._key = "([A-Z]+##)" self._pattern_sharp = re.compile(self.sharp) self._pattern_english = re.compile(self.english) self._pattern_hanja = re.compile(self.hanja) self._pattern_number = re.compile(self.number) self._patterns = dict() for symbol in list(self.symbol2key.keys()): symbol_items = [re.escape(x) for x in self.symbol2key[symbol] if x != "#"] re_symbol_item = "|".join(symbol_items) self._patterns.setdefault(symbol, re.compile("({})".format(re_symbol_item))) self._key_pattern = re.compile(self._key) self._pattern_punctuations = re.compile("^[\.\?!,:;]+$") self.upostag_map = UPosTagMap() self.oo = SimplifyDepParse() self.unicode_symbol() @property def symbol2key(self): return self._symbol2key @property def hanja(self): return self._hanja @property def english(self): return self._english @property def number(self): return self._number @property def sharp(self): return self._sharp @property def pattern_sharp(self): return self._pattern_sharp @property def pattern_english(self): return self._pattern_english @property def pattern_hanja(self): return self._pattern_hanja @property def pattern_number(self): return self._pattern_number @property def patterns(self): return self._patterns @property def key_pattern(self): return self._key_pattern @property def pattern_punctuations(self): return self._pattern_punctuations def debug_print(self, sentence): print(sentence) def unicode_symbol(self): self.unicode_symbols = { "ᄀ": "ㄱ", "ᆫ": "ㄴ", "ᄃ": "ㄷ", "ᆯ": "ㄹ", "ᄆ": "ㅁ", "ᄇ": "ㅂ", "ᆼ": "ㅇ", "ᄎ": "ㅊ", "ᄒ": "ㅎ", ",": ",", "`": "\'", "\'": "\'", "\"": "\"", "-": "-", "‘": "\'", "“": "\"", "­": "-", "”": "\"", "─": "-", "―": "-", "-": "-", "_": "_", "’": "\'", "…": "...", "...": "...", "…………": "...", "∼": "~", "~": "~", "~": "~", "○": "O", "O": "O", "X": "X", "×": "x", "+": "+", ".": ".", "?": "?", "!": "!", "/": "/", ",": ",", "·": "·", ";": ";", "(": "(", ")": ")", "[": "[", "]": "]", "{": "{", "}": "}", "%": "%", "&": "&", "@": "@", "#": "#", "*": "*", "^": "^", "〓": "=", "<": "<", ">": ">" } def readSymbolFile(self, filepath): symbol2key = dict() file_contents = [x.strip() for x in open(filepath, "r").read().split("\n\n") if x != ""] for file_content in file_contents: for idx, line in enumerate(file_content.split("\n")): line = line.strip() if line == "": continue if idx == 0: key = line.split(":")[0] symbol2key.setdefault(key, {}) else: entity, freq = line.split(": ") symbol2key[key].setdefault(entity, int(freq)) if self.debug: self.debug_print("Find the Symbol Part-of-Speech({})".format(len(symbol2key))) self.debug_print("Symbol Keys: {}".format(list(symbol2key.items()))) return symbol2key def unicode_to_symbol(self, sentence): result = sentence for key1, key2 in self.unicode_symbols.items(): key1_, key2_ = re.escape(key1), re.escape(key2) if re.search(key1_, result): result = re.sub(key1_, key2, result) return result def check_symbol(self, entity, key2entity): for m in re.finditer(self.pattern_sharp, entity): m_str = m.group(1) if self.debug: self.debug_print("({}): Find the Escape Sharp({})".format(entity, m_str)) key2entity["SW##"].append(m_str) entity = self.pattern_sharp.sub("SW##", entity) if "SW##" not in entity: for m in re.finditer(self.pattern_english, entity): m_str = m.group(1) if self.debug: self.debug_print("({}): Find the English({})".format(entity, m_str)) key2entity["SL##"].append(m_str) entity = self.pattern_english.sub("SL##", entity) for m in re.finditer(self.pattern_hanja, entity): m_str = m.group(1) if self.debug: self.debug_print("({}): Find the Hanja({})".format(entity, m_str)) key2entity["SH##"].append(m_str) entity = self.pattern_hanja.sub("SH##", entity) for m in re.finditer(self.pattern_number, entity): m_str = m.group(1) if self.debug: self.debug_print("({}): Find the Number({})".format(entity, m_str)) key2entity["SN##"].append(m_str) entity = self.pattern_number.sub("SN##", entity) for symbol in list(self.symbol2key.keys()): pattern = self.patterns[symbol] for m in re.finditer(pattern, entity): m_str = m.group(1) if self.debug: self.debug_print("({}): Find the Symbol({})".format(entity, m_str)) key2entity["{}##".format(symbol)].append(m_str) entity = pattern.sub("{}##".format(symbol), entity) return entity # 190410 added def prefix_final_or_pause_punctuation(self, sentence): convert_sentence = [] tokens = sentence.split(" ") for token in tokens: if self.pattern_punctuations.search(token): convert_sentence[-1] += token else: convert_sentence.append(token) return " ".join(convert_sentence) def divided_brackets(self, sentence): divided_tokens = [] org_sentence = [] org_sentence_space_info = [] tokens = sentence.split(" ") for token in tokens: result_tmp_tokens = [] checksymbol = False if "[" in token and "]" not in token: token = token.replace("[", " [ ") checksymbol = True if "[" not in token and "]" in token: # 190412 닫는 괄호 후 마침표만 있을 경우에 띄어주지 않도록 하는 조건문 생성 if re.search("\][\.\?!]+$", token): token = token.replace("]", " ]") else: token = token.replace("]", " ] ") checksymbol = True if "(" in token and ")" not in token: token = token.replace("(", " ( ") checksymbol = True if "(" not in token and ")" in token: # 190412 닫는 괄호 후 마침표만 있을 경우에 띄어주지 않도록 하는 조건문 생성 if re.search("\)[\.\?!]+$", token): token = token.replace(")", " )") else: token = token.replace(")", " ) ") checksymbol = True if "{" in token and "}" not in token: token = token.replace("{", " { ") checksymbol = True if "{" not in token and "}" in token: # 190412 닫는 괄호 후 마침표만 있을 경우에 띄어주지 않도록 하는 조건문 생성 if re.search("\}[\.\?!]+$", token): token = token.replace("}", " }") else: token = token.replace("}", " } ") checksymbol = True if "「" in token and "」" not in token: token = token.replace("「", " 「 ") checksymbol = True if "「" not in token and "」" in token: # 190412 닫는 괄호 후 마침표만 있을 경우에 띄어주지 않도록 하는 조건문 생성 if re.search("」[\.\?!]+$", token): token = token.replace("」", " 」") else: token = token.replace("」", " 」 ") checksymbol = True if "『" in token and "』" not in token: token = token.replace("『", " 『 ") checksymbol = True if "『" not in token and "』" in token: # 190412 닫는 괄호 후 마침표만 있을 경우에 띄어주지 않도록 하는 조건문 생성 if re.search("』[\.\?!]+$", token): token = token.replace("』", " 』") else: token = token.replace("』", " 』 ") checksymbol = True if "<" in token and ">" not in token: token = token.replace("<", " < ") checksymbol = True if "<" not in token and ">" in token: # 190412 닫는 괄호 후 마침표만 있을 경우에 띄어주지 않도록 하는 조건문 생성 if re.search(">[\.\?!]+$", token): token = token.replace(">", " >") else: token = token.replace(">", " > ") checksymbol = True if "《" in token and "》" not in token: # 190412 닫는 괄호 후 마침표만 있을 경우에 띄어주지 않도록 하는 조건문 생성 token = token.replace("《", " 《 ") checksymbol = True if "《" not in token and "》" in token: # 190412 닫는 괄호 후 마침표만 있을 경우에 띄어주지 않도록 하는 조건문 생성 if re.search("》[\.\?!]+$", token): token = token.replace("》", " 》") else: token = token.replace("》", " 》 ") checksymbol = True if "【" in token and "】" not in token: # 190412 닫는 괄호 후 마침표만 있을 경우에 띄어주지 않도록 하는 조건문 생성 token = token.replace("【", " 【 ") checksymbol = True if "【" not in token and "】" in token: # 190412 닫는 괄호 후 마침표만 있을 경우에 띄어주지 않도록 하는 조건문 생성 if re.search("】[\.\?!]+$", token): token = token.replace("】", " 】") else: token = token.replace("】", " 】 ") checksymbol = True if token.count("\'") == 1: token = token.replace("\'", " \' ") checksymbol = True if token.count("\"") == 1: token = token.replace("\"", " \" ") checksymbol = True if token.count("˝") == 1: token = token.replace("˝", " \" ") checksymbol = True if token.count(":") == 1: token = token.replace(":", " : ") checksymbol = True if token.count("-") == 1: token = token.replace("-", " - ") checksymbol = True if not checksymbol: result_tmp_tokens.append(token) else: tmp_tokens = [x for x in token.split(" ") if x != ""] result_tmp_tokens.extend(tmp_tokens) result_tmp_tokens_length = len(result_tmp_tokens) for idx, t in enumerate(result_tmp_tokens, start=1): divided_tokens.append(t) divided_tokens.append("<sa>") org_sentence.append(t) if idx == result_tmp_tokens_length: org_sentence_space_info.append(False) else: org_sentence_space_info.append(True) return divided_tokens[:-1], " ".join(org_sentence), org_sentence_space_info def getCharacter(self, entity): characters = [] tmp_entity = entity while tmp_entity != "": tmp_regx = re.search("(^[A-Z]+##)", tmp_entity) if tmp_regx: symbol_character = tmp_regx.group() symbol_character_length = len(symbol_character) characters.append(symbol_character) tmp_entity = tmp_entity[symbol_character_length:] else: characters.append(tmp_entity[0]) tmp_entity = tmp_entity[1:] return characters def input_conversion(self, sentence): sentence = " ".join([x.strip() for x in sentence.split(" ") if x.strip() != ""]) sentence = self.prefix_final_or_pause_punctuation(sentence) sentence = self.unicode_to_symbol(sentence) symbol_mapping = {"SH##":[], "SL##":[], "SN##":[]} for symbol in list(self.symbol2key.keys()): symbol_mapping.setdefault("{}##".format(symbol), []) sentence_tokens, org_of_sentence, org_space_info_of_sentence = self.divided_brackets(sentence) input_sentence_characters = [] for token in sentence_tokens: if token == "<sa>": input_sentence_characters.append(token) else: token = self.check_symbol(token, symbol_mapping) input_sentence_characters.extend(self.getCharacter(token)) if self.debug: self.debug_print("ORG Sentence: {}\nConversion Sentence: {}\nMapping Symbols: {}\n\n".format(sentence, " ".join(input_sentence_characters), symbol_mapping.items())) return " ".join(input_sentence_characters), symbol_mapping, org_of_sentence, org_space_info_of_sentence def input_conversion_sentences(self, sentences): """ :param sentences: [sentence_1, sentence_2, sentence_3,...] :return: [[character of sentence_1], [character of sentence_2], ...]], [[{SF##: [...], SN##: [...], ...}], ...] """ org_sentences = [] org_sentence_space_infos = [] input_sentences = [] sentence_symbol_mappings = [] for sent in sentences: input_sentence, sentence_symbol_mapping, org_of_sentence, org_space_info_of_sentence =
""" .. /------------------------------------------------------------------------------\ | -- FACADE TECHNOLOGIES INC. CONFIDENTIAL -- | |------------------------------------------------------------------------------| | | | Copyright [2019] Facade Technologies Inc. | | All Rights Reserved. | | | | NOTICE: All information contained herein is, and remains the property of | | Facade Technologies Inc. and its suppliers if any. The intellectual and | | and technical concepts contained herein are proprietary to Facade | | Technologies Inc. and its suppliers and may be covered by U.S. and Foreign | | Patents, patents in process, and are protected by trade secret or copyright | | law. Dissemination of this information or reproduction of this material is | | strictly forbidden unless prior written permission is obtained from Facade | | Technologies Inc. | | | \------------------------------------------------------------------------------/ This module contains the Project class. """ from typing import Dict, Tuple import json import os from subprocess import PIPE import psutil from PySide2.QtWidgets import QTreeView, QMessageBox, QProgressDialog from PySide2.QtCore import Qt, QTimer from data.tguim.targetguimodel import TargetGuiModel from data.apim.apimodel import ApiModel from data.entity import Entity from qt_models.projectexplorermodel import ProjectExplorerModel from tguiil.explorer import Explorer from tguiil.observer import Observer import data.statemachine as sm import libs.env as env from libs.logging import main_logger as logger class Project: """ This class is the top level to a Facile Project. It stores information about the target application, the target GUI model, the API model, compilation profiles, etc. .. note:: Only one project can be stored in each directory. .. todo:: Create custom exceptions and check input in setters .. todo:: Store backend as enum instead of string """ def __init__(self, name: str, description: str, exe: str, backend: str, projectDir: str = "~/", startupTimeout: int = 10) -> 'Project': """ Constructs a Project object. :param name: The name of the project. :type name: str :param description: The project description. :type description: str :param exe: The executable file of the target application. :type exe: str :param backend: The accessibility technology used to control the target application. :type backend: str :param projectDir: The directory that the project is stored in. :type projectDir: str :param startupTimeout: The number of seconds to wait for the target application to startup. :type startupTimeout: int :return: The constructed project :rtype: Project """ self._projectDir = None self._description = None self._name = None self._executable = None self._backend = None self._startupTimeout = None self._targetGUIModel = TargetGuiModel() self._apiModel = ApiModel() self._process = None self._observer = None self._explorer = None self.autoCloseAppOnExit = None self.acaWarningShown = False self._notif = None # This temporarily holds a dialog self._timer = None # project information self.setProjectDir(os.path.abspath(projectDir)) self.setDescription(description) self.setName(name) # target application information self.setExecutableFile(exe) self.setBackend(backend) self.setStartupTimeout(startupTimeout) def getObserver(self) -> 'Observer': """ Gets the project's observer :return: The project's observer :rtype: Observer """ detailedViewAction = sm.StateMachine.instance.view.ui.actionDetailed_View captureImages = detailedViewAction.isChecked() if self._process is None or not self._process.is_running(): return None else: new = False if self._observer is None: self._observer = Observer(self._process.pid, captureImages, self._backend) self._observer.newSuperToken.connect(self._targetGUIModel.createComponent, type=Qt.BlockingQueuedConnection) self._observer.backendDetected.connect(lambda be: self.setBackend(be)) new = True elif self._observer.getPID() != self._process.pid: self._observer.pause() self._observer = Observer(self._process.pid, captureImages, self._backend) self._observer.newSuperToken.connect(self._targetGUIModel.createComponent, type=Qt.BlockingQueuedConnection) self._observer.backendDetected.connect(lambda be: self.setBackend(be)) new = True if new: self._observer.loadSuperTokens(self._targetGUIModel) detailedViewAction.triggered.connect(self._observer.captureImages, type=Qt.QueuedConnection) return self._observer def getExplorer(self) -> 'Explorer': """ Gets the project's explorer :return: The project's explorer :rtype: Explorer """ if self._process is None or not self._process.is_running(): return None else: # TODO: Uncomment when Explorer is done # self._explorer = Explorer(self._process.pid, self._backend) # return self._explorer return None def getTargetGUIModel(self) -> 'TargetGuiModel': """ Gets the project's target GUI model. :return: The project's target GUI model. :rtype: TargetGuiModel """ return self._targetGUIModel def getAPIModel(self) -> 'ApiModel': """ Gets the project's API model. :return: The project's API model """ return self._apiModel def setProjectDir(self, url: str) -> None: """ Sets the project's directory. :param url: The path to the directory where the project should be saved. :type url: str :return: None :rtype: NoneType """ self._projectDir = os.path.abspath(url) def setDescription(self, description: str) -> None: """ Sets the project's description :param description: The project's description. :type description: str :return: None :rtype: NoneType """ self._description = description def setName(self, name: str) -> None: """ Sets the name of the project :param name: The name of the project :type name: str :return: None :rtype: NoneType """ self._name = name def setExecutableFile(self, exe: str) -> None: """ Sets the target application of the project. :param exe: The executable of the target application :type exe: str :return: None :rtype: NoneType """ self._executable = exe def setBackend(self, backend: str = "auto") -> None: """ Sets the accessibility technology (backend) used to control the target application. The automatic selection is performed in the observer itself on first run. Also handles the QMessageBoxes needed to notify the user, because they can't be spawned in the observer's thread. Defaults to auto, but the default should never be used: just a fail-safe. :param backend: The accessibility technology used to control the target application :type backend: str :return: None :rtype: NoneType """ if backend.lower() == 'detecting': self._backend = backend.lower() # Time calculations interval = 50 # milliseconds totTime = 4800 # milliseconds steps = int(totTime/interval) # Initializations timer = QTimer() prog = QProgressDialog("We are currently detecting your application's backend technology...", "Hide", 0, steps) self._notif = prog self._timer = timer # Set values and connect signals timer.setInterval(interval) timer.timeout.connect(lambda: prog.setValue(prog.value() + 1)) prog.setAutoClose(True) prog.setValue(0) # Start timer and open progress dialog timer.start() self._notif.exec_() else: if self._backend == 'detecting': # Stop timer and close progress dialog if not done already self._timer.stop() self._notif.close() # Display message self._notif = QMessageBox(QMessageBox.Information, "Backend Detected", "The backend has been successfully detected: " + backend.upper() + '.', buttons=QMessageBox.Ok) self._notif.exec_() # Set backend self._backend = backend.lower() def setStartupTimeout(self, timeout: int) -> None: """ Sets the timeout for the target application startup time. :param timeout: the timeour for starting up the target application. :type timeout: int :return: None :rtype: NoneType """ self._startupTimeout = timeout def getName(self) -> str: """ Gets the project's name. :return: The project's name. :rtype: str """ return self._name def getAPIName(self) -> str: """ Gets the name of the API. :return: The API's name. :rtype: str """ return self._name.replace(" ", "_") def getExecutableFile(self) -> str: """ Gets the path to the executable file used to startup the target application. :return: The target application's executable file. :rtype: str """ return self._executable def getDescription(self) -> str: """ Gets the project's description. :return: The project's description. :rtype: str """ return self._description def getBackend(self) -> str: """ Gets the project's accessibility technology (backend). :return: The project's accessibility technology (backend) :rtype: str """ return self._backend def getStartupTimeout(self) -> int: """ Gets the target application's startup timeout. :return: the target app's startup timeout :rtype: int """ return self._startupTimeout def getProjectDir(self) -> str: """ Gets the directory that the project is located in. :return: The project's directory :rtype: str """ return self._projectDir def getProjectFile(self) -> str: """ Gets the project's main file path (the .fcl file) :return: The path to the project's .fcl file :rtype: str """ return os.path.join(self._projectDir, self._name + ".fcl") def startTargetApplication(self) -> None: """ Starts the target application :return: None :rtype: NoneType """ self._process = psutil.Popen([self._executable], stdout=PIPE) def stopTargetApplication(self) -> None: """ Kills the target application. :return: None :rtype: NoneType """ try: self._process.kill() except: pass def getProcess(self) -> psutil.Process: """ Gets the process of the target application iff it is running. :return: The process object if the target application is running. None if it is not running. :rtype: psutil.Process or NoneType """ if (self._process is None) or (not self._process.is_running()): return None return self._process def getProjectExplorerModel(self, view: QTreeView) -> ProjectExplorerModel: """ Gets a model that allows a Qt tree view to access the data in a limited manner. :param view: The view to place the model into :type view: QTreeView :return: The project explorer model :rtype: ProjectExplorerModel """ return ProjectExplorerModel(self, view) @staticmethod def load(projectFile: str, onEntityCreation = None, onCompletion = None) -> 'Project': """ Creates a Project object from a .fcl file. :param projectFile: The project's .fcl file :type projectFile: str :param onEntityCreation: The function to run when an entity is created (may be None) :type onEntityCreation: callable :param onCompletion: The function to run when loading is complete :type onCompletion: callable :return: The project object reconstructed from a .fcl file. :rtype: Project """ with open(projectFile) as mainProjectFile: projectJSON = json.loads(mainProjectFile.read()) projectDir = os.path.dirname(projectFile) name = projectJSON["Project Information"]["Name"] description = projectJSON["Project Information"]["Description"] exe = projectJSON["Application Information"]["Target Application"] backend = projectJSON["Application Information"]["Backend"] startupTimeout = projectJSON["Application Information"]["Startup Timeout"] autoClose = projectJSON["Settings"]["Close App on Exit"] warningShown = projectJSON["Settings"]["AutoClose Warning Shown"] loadedProject = Project(name, description, exe, backend, projectDir, startupTimeout) loadedProject.autoCloseAppOnExit = autoClose loadedProject.acaWarningShown = warningShown Entity.onCreation = onEntityCreation loadedProject._targetGUIModel = TargetGuiModel.fromDict(projectJSON["Data Structures"]["Target GUI Model"]) loadedProject._apiModel = ApiModel.fromDict(projectJSON["Data Structures"]["API Model"], loadedProject._targetGUIModel) Entity.onCreation = None onCompletion() return loadedProject @staticmethod def getEntityCount(mainFile:str) -> None: """ Gets the number of entities from a project. :param mainFile: The url to the project file (*.fcl) :type mainFile: str :return: """ mainProjectFile = open(mainFile) contents = mainProjectFile.read() projectJSON = json.loads(contents) mainProjectFile.close() return projectJSON["Project Information"].get("Model Entities", 1_000_000) def save(self) -> None: """ Writes a project out to disk as a set of files. (.fcl, .tguim, .apim) :return: None :rtype: NoneType """ projectDict = {} projectDict["Project Information"] = {} projectDict["Project Information"]["Name"] = self._name projectDict["Project Information"]["Description"] = self._description projectDict["Project Information"]["Model Entities"] = Entity.count projectDict["Application Information"] = {} projectDict["Application Information"]["Target Application"] = self._executable projectDict["Application Information"]["Backend"] = self._backend projectDict["Application Information"]["Startup Timeout"] = self._startupTimeout projectDict["Settings"] = {} projectDict["Settings"]["Close App on Exit"] = self.autoCloseAppOnExit projectDict["Settings"]["AutoClose Warning Shown"] = self.acaWarningShown projectDict["Data Structures"] = {} projectDict["Data Structures"]["Target GUI Model"] = self._targetGUIModel.asDict() projectDict["Data Structures"]["API Model"] = self._apiModel.asDict() # save the project file with open(self.getProjectFile(), "w") as file: s = json.dumps(projectDict, indent=4) file.write(s) def addToRecents(self) -> None: """ Adds the project to the recents file. :return: None :rtype: NoneType """ cwd = os.getcwd() recentsFile = os.path.join(env.TEMP_DIR, "recentProjects.json") recentProjects = [] try: with open(recentsFile, "r") as recents: recentProjects = json.loads(recents.read()) except Exception as e: logger.exception(e) if not self.getProjectFile() in recentProjects: recentProjects.insert(0, self.getProjectFile()) with open(recentsFile, "w") as recents: recents.write(json.dumps(recentProjects, indent=4)) @staticmethod def getRecents(limit: int = 0) -> list: """ Gets a list of project files that have recently been opened. The number of returned project locations will be limited iff the limit is set to an integer greater than 0. :param limit: The maximum number of recent projects to return. If limit is less than or equal to zero, the
<reponame>fabien-vavrand/aikit # -*- coding: utf-8 -*- """ Created on Wed Jun 6 14:13:56 2018 @author: <NAME> """ from sklearn.base import BaseEstimator, TransformerMixin from sklearn.utils.validation import check_is_fitted import pandas as pd import numpy as np try: import category_encoders except ImportError: category_encoders = None from collections import defaultdict from aikit.enums import DataTypes, TypeOfVariables from aikit.tools.data_structure_helper import get_type, get_rid_of_categories from aikit.transformers.model_wrapper import ModelWrapper class _NumericalEncoder(BaseEstimator, TransformerMixin): """ Numerical Encoder of categorical variables Parameters ---------- min_modalities_number : int, default = 20 if less that 'min_modalities_number' modalities no modalities will be filtered max_modalities_number : int, default = 100, the number of modalities kept will never be more than 'max_modalities_number' max_cum_proba : float, default = 0.95 if modalities should be filtered, first filter applied is removing modalities that account for less than 1-'max_cum_proba' min_nb_observations : int, default = 10 if modalities should be filtered, modalities with less thant 'min_nb_observations' observations will be removed max_na_percentage : float, default = 0.05 if more than 'max_na_percentage' percentage of missing value, None will be treated as a special modality named '__null__' otherwise, will just put -1 (for encoding_type == 'num') or 0 everywhere (for encoding_type == 'dummy') encoding_type : 'dummy' or 'num', default = 'dummy' type of encoding between a numerical encoding and a dummy encoding """ def __init__( self, min_modalities_number=20, max_modalities_number=100, max_cum_proba=0.95, min_nb_observations=10, max_na_percentage=0.05, encoding_type="dummy", ): self.min_modalities_number = min_modalities_number self.max_modalities_number = max_modalities_number self.max_cum_proba = max_cum_proba self.min_nb_observations = min_nb_observations self.max_na_percentage = max_na_percentage self.encoding_type = encoding_type def modalities_filter(self, input_serie): """ take a modality and filter the modalities that will be kept """ if not isinstance(input_serie, pd.Series): raise TypeError("input_serie should be a pd.Series") value_count = input_serie.value_counts() nb_null = np.asarray(input_serie.isnull().values).sum() # Remark : input_serie.isnull().sum() doesn't work if sparse Serie if nb_null > self.max_na_percentage * len(input_serie): value_count["__null__"] = nb_null value_count.sort_values(ascending=False, inplace=True) # Careful : pandas behavior, change order of index with equality ... nb_modalities = value_count.shape[0] # nb of different modalities (__null__ included) if self.min_modalities_number is not None and nb_modalities > self.min_modalities_number: # In that case I have too many modalities, I'll filter the one I want to keep NN = value_count.sum() to_keep = pd.Series(True, value_count.index) ### Filter 1 => using 'Max Cum Proba' ### if self.max_cum_proba is not None: cum_proba = value_count.cumsum().shift().fillna(0) / NN to_keep = to_keep & (cum_proba < self.max_cum_proba) ### Filter2 => using 'Min Nb Of Observations' ### if self.min_nb_observations is not None: if isinstance(self.min_nb_observations, float) and self.min_nb_observations < 1: min_nb = int(NN * self.min_nb_observations) else: min_nb = self.min_nb_observations to_keep = to_keep & (value_count >= min_nb) modalities_to_keep = value_count[to_keep] ### Filter 3 => If I still have too many modalities, keep only the first one ### if self.max_modalities_number is not None and modalities_to_keep.shape[0] > self.max_modalities_number: modalities_to_keep = modalities_to_keep.iloc[0 : self.max_modalities_number] else: modalities_to_keep = value_count mapping_dico = { m: k for k, m in enumerate(modalities_to_keep.index) } # modality that should be kept are flagged 0,1, ... P-1 if len(modalities_to_keep) < len(value_count): mapping_dico["__default__"] = len(modalities_to_keep) return mapping_dico def fit(self, X, y=None): Xtype = get_type(X) if Xtype != DataTypes.DataFrame: raise TypeError("X should be a DataFrame") Xcolumns = list(X.columns) self._columns_to_encode = Xcolumns # Force to encode everything now X = get_rid_of_categories(X) # Verif: if not isinstance(self._columns_to_encode, list): raise TypeError("_columns_to_encode should be a list") for c in self._columns_to_encode: if c not in Xcolumns: raise ValueError("column %s isn't in the DataFrame" % c) self.variable_modality_mapping = {col: self.modalities_filter(X[col]) for col in self._columns_to_encode} # Rmk : si on veut pas faire un encodage ou les variables sont par ordre croissant, on peut faire un randomization des numbre ici if self.encoding_type == "num": self._feature_names = self._columns_to_encode self.columns_mapping = {c: [c] for c in self._feature_names} elif self.encoding_type == "dummy": self.columns_mapping = {} index_column = {} self._variable_shift = {} cum_max = 0 for col in self._columns_to_encode: self.columns_mapping[col] = [] for i, (mod, ind) in enumerate(self.variable_modality_mapping[col].items()): index_column[ind + cum_max] = col + "__" + str(mod) self.columns_mapping[col].append(col + "__" + str(mod)) self._variable_shift[col] = cum_max cum_max += i + 1 self._dummy_size = cum_max self._dummy_feature_names = [index_column[i] for i in range(cum_max)] self._feature_names = self._dummy_feature_names else: raise NotImplementedError("I don't know that type of encoding %s" % self.encoding_type) return self def get_feature_names(self): return self._feature_names @staticmethod def _get_value(k, mapping, default=-1): if pd.isnull(k): k = "__null__" try: res = mapping[k] # Try in mapping except KeyError: try: res = mapping["__default__"] # Try in __default__ except KeyError: return default return res # Rmk : peut etre qu'on peut accelerer un tout petit peu en sauver si il y a default/un null ? def transform(self, X): if get_type(X) != DataTypes.DataFrame: raise TypeError("X should be a DataFrame") X = get_rid_of_categories(X) result = self._transform_to_encode(X) return result def _transform_to_encode(self, X): all_result_series = [] for col, mapping in self.variable_modality_mapping.items(): default_value = -1 if "__default__" not in mapping else mapping["__default__"] mapping = defaultdict(lambda: default_value, mapping) if "__null__" in mapping: mapping[np.nan] = mapping["__null__"] mapping[None] = mapping["__null__"] all_result_series.append(X[col].map(mapping)) if self.encoding_type == "num": result = pd.concat(all_result_series, axis=1, ignore_index=True, copy=False).astype(np.int32) return result elif self.encoding_type == "dummy": Xres = np.zeros((X.shape[0], self._dummy_size), dtype="int32") nn = np.arange(X.shape[0]) for col, result in zip(self._columns_to_encode, all_result_series): resultv = result.values + self._variable_shift[col] ii_not_minus_one = result.values != -1 Xres[nn[ii_not_minus_one], resultv[ii_not_minus_one]] = 1 return pd.DataFrame(1 * Xres, index=X.index, columns=self._dummy_feature_names) else: raise NotImplementedError("I don't know that type of encoding %s" % self.encoding_type) class NumericalEncoder(ModelWrapper): """ Numerical Encoder of categorical variables Parameters ---------- columns_to_use : list of str the columns to use min_modalities_number : int, default = 20 if less that 'min_modalities_number' modalities no modalities will be filtered max_modalities_number : int, default = 100, the number of modalities kept will never be more than 'max_modalities_number' max_cum_proba : float, default = 0.95 if modalities should be filtered, first filter applied is removing modalities that account for less than 1-'max_cum_proba' min_nb_observations : int, default = 10 if modalities should be filtered, modalities with less thant 'min_nb_observations' observations will be removed max_na_percentage : float, default = 0.05 if more than 'max_na_percentage' percentage of missing value, None will be treated as a special modality named '__null__' otherwise, will just put -1 (for encoding_type == 'num') or 0 everywhere (for encoding_type == 'dummy') encoding_type : 'dummy' or 'num', default = 'dummy' type of encoding between a numerical encoding and a dummy encoding regex_match : boolean, default = False if True use regex to match columns desired_output_type : DataType the type of result drop_used_columns : boolean, default=True what to do with the ORIGINAL columns that were transformed. If False, will keep them in the result (un-transformed) If True, only the transformed columns are in the result drop_unused_columns: boolean, default=True what to do with the column that were not used. if False, will drop them if True, will keep them in the result """ def __init__( self, columns_to_use=TypeOfVariables.CAT, min_modalities_number=20, max_modalities_number=100, max_cum_proba=0.95, min_nb_observations=10, max_na_percentage=0.05, encoding_type="dummy", regex_match=False, desired_output_type=DataTypes.DataFrame, drop_used_columns=True, drop_unused_columns=False, ): self.min_modalities_number = min_modalities_number self.max_modalities_number = max_modalities_number self.max_cum_proba = max_cum_proba self.min_nb_observations = min_nb_observations self.max_na_percentage = max_na_percentage self.encoding_type = encoding_type super(NumericalEncoder, self).__init__( columns_to_use=columns_to_use, regex_match=regex_match, work_on_one_column_only=False, all_columns_at_once=True, accepted_input_types=(DataTypes.DataFrame,), remove_sparse_serie=True, column_prefix=None, desired_output_type=desired_output_type, must_transform_to_get_features_name=False, dont_change_columns=False, drop_used_columns=drop_used_columns, drop_unused_columns=drop_unused_columns, ) def _get_model(self, X, y=None): return _NumericalEncoder( min_modalities_number=self.min_modalities_number, max_modalities_number=self.max_modalities_number, max_cum_proba=self.max_cum_proba, min_nb_observations=self.min_nb_observations, max_na_percentage=self.max_na_percentage, encoding_type=self.encoding_type, ) @property def columns_mapping(self): return self.model.columns_mapping # In[] # Rmk : Fix of categorical encoder # class _CategoricalEncoderFixer(object): # def fit_transform(self,X,y = None): # self.fit(X,y) # return self.transform(X,y) # # class TargetEncoderFixed(_CategoricalEncoderFixer,category_encoders.TargetEncoder): # pass # # class LeaveOneOutEncoderFixed(_CategoricalEncoderFixer,category_encoders.LeaveOneOutEncoder): # pass class CategoricalEncoder(ModelWrapper): """ Wrapper around categorical encoder package encoder Parameters ---------- columns_to_encode : None or list of str the columns to encode (if None will guess) encoding_type : str, default = 'dummy' the type of encoding, possible choices : * dummy * binary * basen * hashing basen_base : int, default = 2 the base when using encoding_type == 'basen' hashing_n_components : int, default = 10 the size of hashing when using encoding_type == 'hashing' columns_to_use : list of str or None the columns to use for that encoder regex_match : boolean if True will use regex to match columns desired_output_type : list of DataType the type of output
# Vesion 1.0 # run with "python Acimdes_Client.py" # for logging run with "python Acimdes_Client.py > log.txt" # Copyright 2020 <NAME> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random from typing import List import socket as soc import select as sel import threading as thrd import sys HEADER_LENGTH = 10 IP = soc.gethostbyname(soc.gethostname()) # IP = '192.168.5.128' PORT = 7777 ADDR = (IP, PORT) FORMAT = 'utf-8' soc_list = [] clients = {} player_lobby = {} STOP = '!Stop' class Server: def start_server(self): print("[STARTING_SERVER]Server is starting...") server = soc.socket(soc.AF_INET, soc.SOCK_STREAM) server.bind(ADDR) server.listen() print(f"[LISTENING]Listening on: {IP}:{PORT}") soc_list.append(server) self.handle_clients() @staticmethod def send_msg(conn, msg: str): msg = msg.encode(FORMAT) msg_head = f"{len(msg):<{HEADER_LENGTH}}".encode(FORMAT) msg_send = msg_head + msg conn.send(msg_send) @staticmethod def recv_msg(csoc): try: msg_head = csoc.recv(HEADER_LENGTH) if not len(msg_head): return False msg_len = int(msg_head.decode(FORMAT)) return csoc.recv(msg_len).decode(FORMAT) except: return False def handle_clients(self): while True: read_soc, _, exception_soc = sel.select(soc_list, [], soc_list) for not_soc in read_soc: if not_soc == soc_list[0]: csoc, caddr = soc_list[0].accept() usr = self.recv_msg(csoc) if usr is False: continue soc_list.append(csoc) clients[csoc] = usr if not lobby.isLobbyFull(): lobby.addPlayer(usr, csoc) player_lobby[csoc] = lobby else: print("Lobby is full!") server.send_msg(csoc, "Lobby is full!") csoc.close() print(f"[NEW_CONNECTION]Accepted new connection from: {caddr[0]}:{caddr[1]}, username: "f"{usr}") else: msg = self.recv_msg(not_soc) if msg is False: print(f"Closed connection from {clients[not_soc]}") soc_list.remove(not_soc) player_lobby[not_soc].deletePlayer(clients[not_soc]) del clients[not_soc] del player_lobby[not_soc] continue if msg == '!Start' and player_lobby[not_soc].isLobbyFull(): # t = thrd.Thread(target=player_lobby[not_soc].startGame) # t.start() player_lobby[not_soc].startGame() else: print("Lobby not full!") server.send_msg(not_soc, "Lobby not full!") if msg == STOP: for i in soc_list: i.close() sys.exit("[SERVER_CLOSED]Server closed") for not_soc in exception_soc: soc_list.remove(not_soc) del clients[not_soc] class Lobby: def __init__(self): self.players = [] self.player_sockets = [] self.g: GameOnline = None def addPlayer(self, player_username, player_socket): self.players.append(Player(player_username)) self.player_sockets.append(player_socket) def deletePlayer(self, username): self.player_sockets.pop(self.players.index(username)) self.players.pop(self.players.index(username)) def clearPlayers(self): self.players = [] self.player_sockets = [] def isLobbyFull(self): if len(self.players) == 4: return True return False def isLobbyEmpty(self): if len(self.players) == 0: return True return False def startGame(self): self.g = GameOnline(self.players, self.player_sockets) self.g.playgame() class KillGame: def __init__(self): self.isKilled = False def setToKill(self): self.isKilled = True def reset(self): self.isKilled = False def isSet(self): return self.isKilled class Cards: def __init__(self): self.cards = [] for _ in range(4): temp = random.sample(range(8), 8) for j in temp: self.cards.append(j) def lastcard(self): last = self.cards[-1] self.cards.pop() return last class Player: def __init__(self, username): self.username = username self.cardsInHand = [] self.score = 0 self.isLast = False self.isFirst = False self.takesTheHand = False def __str__(self): return self.username def __eq__(self, other): if isinstance(other, str): return self.username == other if isinstance(other, Player): return self.username == other.username return False def throwcard(self, n): card = self.cardsInHand[n] self.cardsInHand.pop(n) return card class GameOnline: def __init__(self, players: List[Player], player_sockets): self.cardsRoman = ['VII', 'VIII', 'IX', 'X', 'D', 'B', 'K', 'A'] self.allowedInput: List[str] = ['0', '1', '2', '3', 'end'] self.players: List[Player] = players self.player_sockets = player_sockets self.players[random.randint(0, 3)].isFirst = True self.cards: Cards = Cards() self.deal(self.cards, self.players) @staticmethod def draw(cards, players): for i in players: i.cardsInHand.append(cards.lastcard()) @staticmethod def deal(cards, players): for _ in range(2): for j in range(4): players[j].cardsInHand.append(cards.lastcard()) players[j].cardsInHand.append(cards.lastcard()) @staticmethod def sortPlayers(players: List[Player], player_sockets): for _ in range(4): if players[0].isFirst: break else: temp_p = players[0] players.pop(0) players.append(temp_p) temp_s = player_sockets[0] player_sockets.pop(0) player_sockets.append(temp_s) def canPlayerContinue(self, cardToBeat, first, i): if (cardToBeat not in self.players[0].cardsInHand and not first and i == self.players[0] and 0 not in self.players[0].cardsInHand): return True else: return False def printHand(self, hand, first): handOut = '| ' if not first: print("Bačene karte: ") for i in self.player_sockets: server.send_msg(i, "Bačene karte: ") for n in hand: handOut += self.cardsRoman[n] + ' | ' print(handOut) for i in self.player_sockets: server.send_msg(i, handOut) def printPlayer(self, i): cardsInHandOut = '| ' print(i.__str__()) server.send_msg(self.player_sockets[self.players.index(i)], i.__str__()) for n in i.cardsInHand: cardsInHandOut += self.cardsRoman[n] + ' | ' print("Ruka: " + cardsInHandOut) server.send_msg(self.player_sockets[self.players.index(i)], "Ruka: " + cardsInHandOut) server.send_msg(self.player_sockets[self.players.index(i)], f"Odaberite kartu (0 - {len(i.cardsInHand) - 1}): ") # return input(f"Odaberite kartu (0 - {len(i.cardsInHand) - 1}): ") return server.recv_msg(self.player_sockets[self.players.index(i)]) def printOrder(self): print("Redoslijed igre: ") for i in self.players: print(f"\t- {i}") for i in self.player_sockets: server.send_msg(i, "Redoslijed igre: ") for j in self.players: server.send_msg(i, f"\t- {j}") @staticmethod def cardTakesTheHand(thrownCard, cardToBeat, i, players): if thrownCard == cardToBeat or thrownCard == 0: for j in players: j.takesTheHand = False j.isFirst = False i.takesTheHand = True i.isFirst = True @staticmethod def pointSum(hand, players): sumPoints = 0 for i in hand: if i == 3 or i == 7: sumPoints += 10 for i in players: if i.takesTheHand: i.score += sumPoints break def pointReset(self): for i in self.players: i.score = 0 def contDeal(self, firstPlayer): if len(self.cards.cards) != 0: for i in range(min(4-len(firstPlayer.cardsInHand), len(self.cards.cards)/4)): self.draw(self.cards, self.players) def checkCardInput(self, cardToThrow, cardToBeat, first, a, i, firstPlayer): if cardToThrow not in self.allowedInput: print(f"Nedozvoljeni ulaz.") server.send_msg(self.player_sockets[self.players.index(i)], f"Nedozvoljeni ulaz.") return False if cardToThrow == 'end': if i != firstPlayer or first: print("Trenutno nije moguće završiti rundu!") server.send_msg(self.player_sockets[self.players.index(i)], "Trenutno nije moguće završiti rundu!") return False else: return True if int(cardToThrow) > (3-a): print(f"Odabrana karta nije unutar raspona.") server.send_msg(self.player_sockets[self.players.index(i)], f"Odabrana karta nije unutar raspona.") return False if i.cardsInHand[int(cardToThrow)] != cardToBeat and i.cardsInHand[int(cardToThrow)] != 0 and not first and i == firstPlayer: print(f"Odabrana karta nije ispravna.") server.send_msg(self.player_sockets[self.players.index(i)], f"Odabrana karta nije ispravna.") return False return True @property def handplay(self): hand = [] breakHand = False first = True cardToBeat = None for i in self.players: i.cardsInHand.sort() # Sortiranje igrača self.sortPlayers(self.players, self.player_sockets) firstPlayer = self.players[0] # Početak ruke if len(firstPlayer.cardsInHand) != 0: self.printOrder() # Krugovi for a in range(4): # Igrači for i in self.players: # Provjera može li prvi igrač nastaviti ruku breakHand = self.canPlayerContinue(cardToBeat, first, i) if breakHand: self.printHand(hand, first) break self.printHand(hand, first) cardToThrow = self.printPlayer(i) # Provjera da li je ulaz dobar while not self.checkCardInput(cardToThrow, cardToBeat, first, a, i, firstPlayer): server.send_msg(self.player_sockets[self.players.index(i)], f"Odaberite kartu (0 - {len(i.cardsInHand) - 1}): ") cardToThrow = server.recv_msg(self.player_sockets[self.players.index(i)]) if cardToThrow == 'end': breakHand = True break # Postavlja kartu za uzimanje if first: thrownCard = i.throwcard(int(cardToThrow)) cardToBeat = thrownCard first = False else: thrownCard = i.throwcard(int(cardToThrow)) # Provjerava da li bačena karta uzima ruku self.cardTakesTheHand(thrownCard, cardToBeat, i, self.players) # Bačene karte hand.append(thrownCard) if breakHand: print("Runda je završila.") for i in self.player_sockets: server.send_msg(i, "Runda je završila.") break # Zbrajanje bodova self.pointSum(hand, self.players) # Dijeljenje karata self.contDeal(firstPlayer) if not breakHand: print("Runda je završila.") for i in self.player_sockets: server.send_msg(i, "Runda je završila.") return False else: # Ispis pobjednika print("Kraj partije.") if (self.players[0].score + self.players[2].score) > (self.players[1].score + self.players[3].score): print(f"Pobijedili su: {self.players[0]} i {self.players[2]}") print("Bodovi:") print(f"\t{self.players[0]} i {self.players[2]}: {self.players[0].score + self.players[2].score}") print(f"\t{self.players[1]} i {self.players[3]}: {self.players[1].score + self.players[3].score}") elif (self.players[0].score + self.players[2].score) < (self.players[1].score + self.players[3].score): print(f"Pobijedili su: {self.players[1]} i {self.players[3]}") print("Bodovi:") print(f"\t{self.players[0]} i {self.players[2]}: {self.players[0].score + self.players[2].score}") print(f"\t{self.players[1]} i {self.players[3]}: {self.players[1].score + self.players[3].score}") else: if self.players[0].takesTheHand + self.players[2].takesTheHand: print(f"Pobijedili su: {self.players[0]} i {self.players[2]}") print("Uzeli su zadnji štih.") print("Bodovi:") print(f"\t{self.players[0]} i {self.players[2]}: {self.players[0].score + self.players[2].score}") print(f"\t{self.players[1]} i {self.players[3]}: {self.players[1].score + self.players[3].score}") else: print(f"Pobijedili su: {self.players[1]} i {self.players[3]}") print("Uzeli su zadnji štih.") print("Bodovi:") print(f"\t{self.players[0]} i {self.players[2]}: {self.players[0].score + self.players[2].score}") print(f"\t{self.players[1]} i {self.players[3]}: {self.players[1].score + self.players[3].score}") for i in self.player_sockets: server.send_msg(i, "Kraj partije.") if (self.players[0].score + self.players[2].score) > (self.players[1].score + self.players[3].score): server.send_msg(i, f"Pobijedili su: {self.players[0]} i {self.players[2]}") server.send_msg(i, "Bodovi:") server.send_msg(i, f"\t{self.players[0]} i {self.players[2]}: {self.players[0].score + self.players[2].score}") server.send_msg(i, f"\t{self.players[1]} i {self.players[3]}: {self.players[1].score + self.players[3].score}") elif (self.players[0].score + self.players[2].score) < (self.players[1].score + self.players[3].score): server.send_msg(i, f"Pobijedili su: {self.players[1]} i {self.players[3]}") server.send_msg(i, "Bodovi:") server.send_msg(i, f"\t{self.players[0]} i {self.players[2]}: {self.players[0].score + self.players[2].score}") server.send_msg(i, f"\t{self.players[1]} i {self.players[3]}: {self.players[1].score + self.players[3].score}") else: if self.players[0].takesTheHand + self.players[2].takesTheHand: server.send_msg(i, f"Pobijedili su: {self.players[0]} i {self.players[2]}") server.send_msg(i, "Uzeli su zadnji štih.") server.send_msg(i, "Bodovi:") server.send_msg(i, f"\t{self.players[0]} i {self.players[2]}: {self.players[0].score + self.players[2].score}") server.send_msg(i, f"\t{self.players[1]} i {self.players[3]}: {self.players[1].score + self.players[3].score}") else: server.send_msg(i, f"Pobijedili su: {self.players[1]} i {self.players[3]}") server.send_msg(i, "Uzeli su zadnji štih.") server.send_msg(i, "Bodovi:") server.send_msg(i, f"\t{self.players[0]} i {self.players[2]}: {self.players[0].score + self.players[2].score}") server.send_msg(i,
val1 if axis2 is not None: vec[axis2] = val2[axis2] if isinstance(val2, Py_Vec) else val2 if axis3 is not None: vec[axis3] = val3[axis3] if isinstance(val3, Py_Vec) else val3 return vec def rotate( self, pitch: float=0.0, yaw: float=0.0, roll: float=0.0, round_vals: bool=True, ) -> 'Vec': """Rotate a vector by a Source rotational angle. Returns the vector, so you can use it in the form val = Vec(0,1,0).rotate(p, y, r) If round is True, all values will be rounded to 6 decimals (since these calculations always have small inprecision.) """ warnings.warn("Use vec @ Angle() instead.", DeprecationWarning, stacklevel=2) mat = Py_Matrix.from_angle(Py_Angle(pitch, yaw, roll)) mat._vec_rot(self) if round_vals: self.x = round(self.x, 6) self.y = round(self.y, 6) self.z = round(self.z, 6) return self def rotate_by_str(self, ang: str, pitch=0.0, yaw=0.0, roll=0.0, round_vals=True) -> 'Vec': """Rotate a vector, using a string instead of a vector. If the string cannot be parsed, use the passed in values instead. """ warnings.warn("Use vec @ Angle.from_str() instead.", DeprecationWarning, stacklevel=2) mat = Py_Matrix.from_angle(Py_Angle.from_str(ang, pitch, yaw, roll)) mat._vec_rot(self) if round_vals: self.x = round(self.x, 6) self.y = round(self.y, 6) self.z = round(self.z, 6) return self @staticmethod @overload def bbox(_point: Iterable['Vec']) -> Tuple['Vec', 'Vec']: ... @staticmethod @overload def bbox(*points: 'Vec') -> Tuple['Vec', 'Vec']: ... @staticmethod def bbox(*points: Union[Iterable['Vec'], 'Vec']) -> Tuple['Vec', 'Vec']: """Compute the bounding box for a set of points. Pass either several Vecs, or an iterable of Vecs. Returns a (min, max) tuple. """ # Allow passing a single iterable, but also handle a single Vec. # The error messages match those produced by min()/max(). first: Vec point_coll: Iterable[Vec] if len(points) == 1 and not isinstance(points[0], Py_Vec): try: [[first, *point_coll]] = points except ValueError: raise ValueError('Vec.bbox() arg is an empty sequence') from None else: try: first, *point_coll = points except ValueError: raise TypeError( 'Vec.bbox() expected at ' 'least 1 argument, got 0.' ) from None bbox_min = Py_Vec(first) bbox_max = bbox_min.copy() for point in point_coll: bbox_min.min(point) bbox_max.max(point) return bbox_min, bbox_max @classmethod def iter_grid( cls, min_pos: 'Vec', max_pos: 'Vec', stride: int=1, ) -> Iterator['Vec']: """Loop over points in a bounding box. All coordinates should be integers. Both borders will be included. """ min_x = int(min_pos.x) min_y = int(min_pos.y) min_z = int(min_pos.z) max_x = int(max_pos.x) max_y = int(max_pos.y) max_z = int(max_pos.z) for x in range(min_x, max_x + 1, stride): for y in range(min_y, max_y + 1, stride): for z in range(min_z, max_z + 1, stride): yield cls(x, y, z) def iter_line(self, end: 'Vec', stride: int=1) -> Iterator['Vec']: """Yield points between this point and 'end' (including both endpoints). Stride specifies the distance between each point. If the distance is less than the stride, only end-points will be yielded. If they are the same, that point will be yielded. """ offset = end - self length = offset.mag() if length < stride: # Not enough room, yield both yield self.copy() if self != end: yield end.copy() return direction = offset.norm() for pos in range(0, int(length), int(stride)): yield self + direction * pos yield end.copy() # Directly yield - ensures no rounding errors. def axis(self) -> str: """For a normal vector, return the axis it is on.""" x = abs(self.x) > 1e-6 y = abs(self.y) > 1e-6 z = abs(self.z) > 1e-6 if x and not y and not z: return 'x' if not x and y and not z: return 'y' if not x and not y and z: return 'z' raise ValueError( f'({self.x:g}, {self.y:g}, {self.z:g}) is ' f'not an on-axis vector!' ) def to_angle(self, roll: float=0) -> 'Angle': """Convert a normal to a Source Engine angle. A +x axis vector will result in a 0, 0, 0 angle. The roll is not affected by the direction of the normal. The inverse of this is `Vec(x=1) @ Angle(pitch, yaw, roll)`. """ # Pitch is applied first, so we need to reconstruct the x-value horiz_dist = math.hypot(self.x, self.y) return Py_Angle( math.degrees(math.atan2(-self.z, horiz_dist)), math.degrees(math.atan2(self.y, self.x)) % 360, roll, ) def to_angle_roll(self, z_norm: 'Vec', stride: int=0) -> 'Angle': """Produce a Source Engine angle with roll. The z_normal should point in +z, and must be at right angles to this vector. This is deprecated, use Matrix.from_basis().to_angle(). Stride is no longer used. """ warnings.warn('Use Matrix.from_basis().to_angle()', DeprecationWarning) return Py_Matrix.from_basis(x=self, z=z_norm).to_angle() def rotation_around(self, rot: float=90) -> 'Angle': """For an axis-aligned normal, return the angles which rotate around it.""" warnings.warn('Use Matrix.axis_angle().to_angle()', DeprecationWarning) if self.x and not self.y and not self.z: return Py_Angle(roll=math.copysign(rot, self.x)) elif self.y and not self.x and not self.z: return Py_Angle(pitch=math.copysign(rot, self.y)) elif self.z and not self.x and not self.y: return Py_Angle(yaw=math.copysign(rot, self.z)) else: raise ValueError('Zero vector!') def __abs__(self) -> 'Vec': """Performing abs() on a Vec takes the absolute value of all axes.""" return Py_Vec( abs(self.x), abs(self.y), abs(self.z), ) # The numeric magic methods are defined via exec(), so we need stubs # to annotate them in a way a type-checker can understand. # These are immediately overwritten. def __add__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': pass def __radd__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': pass def __iadd__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': pass def __sub__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': pass def __rsub__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': pass def __isub__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': pass def __mul__(self, other: float) -> 'Vec': pass def __rmul__(self, other: float) -> 'Vec': pass def __imul__(self, other: float) -> 'Vec': pass def __truediv__(self, other: float) -> 'Vec': pass def __rtruediv__(self, other: float) -> 'Vec': pass def __itruediv__(self, other: float) -> 'Vec': pass def __floordiv__(self, other: float) -> 'Vec': pass def __rfloordiv__(self, other: float) -> 'Vec': pass def __ifloordiv__(self, other: float) -> 'Vec': pass def __mod__(self, other: float) -> 'Vec': pass def __rmod__(self, other: float) -> 'Vec': pass def __imod__(self, other: float) -> 'Vec': pass funcname = op = pretty = None # Use exec() to generate all the number magic methods. This reduces code # duplication since they're all very similar. for funcname, op in (('add', '+'), ('sub', '-')): exec( _VEC_ADDSUB_TEMP.format(func=funcname, op=op), globals(), locals(), ) for funcname, op, pretty in ( ('mul', '*', 'multiply'), ('truediv', '/', 'divide'), ('floordiv', '//', 'floor-divide'), ('mod', '%', 'modulus'), ): exec( _VEC_MULDIV_TEMP.format(func=funcname, op=op, pretty=pretty), globals(), locals(), ) del funcname, op, pretty # Divmod is entirely unique. def __divmod__(self, other: float) -> Tuple['Vec', 'Vec']: """Divide the vector by a scalar, returning the result and remainder.""" if isinstance(other, Py_Vec): raise TypeError("Cannot divide 2 Vectors.") else: try: x1, x2 = divmod(self.x, other) y1, y2 = divmod(self.y, other) z1, z2 = divmod(self.z, other) except TypeError: return NotImplemented else: return Py_Vec(x1, y1, z1), Py_Vec(x2, y2, z2) def __rdivmod__(self, other: float) -> Tuple['Vec', 'Vec']: """Divide a scalar by a vector, returning the result and remainder.""" try: x1, x2 = divmod(other, self.x) y1, y2 = divmod(other, self.y) z1, z2 = divmod(other, self.z) except (TypeError, ValueError): return NotImplemented else: return Py_Vec(x1, y1, z1), Py_Vec(x2, y2, z2) def __matmul__(self, other: Union['Angle', 'Matrix']) -> 'Vec': """Rotate this vector by an angle or matrix.""" if isinstance(other, Py_Matrix): mat = other elif isinstance(other, Py_Angle): mat = Py_Matrix.from_angle(other) else: return NotImplemented res = Vec(self.x, self.y, self.z) mat._vec_rot(res) return res def __imatmul__(self, other: Union['Angle', 'Matrix']) -> 'Vec': """We need to define this, so it's in-place.""" if isinstance(other, Py_Matrix): mat = other elif isinstance(other, Py_Angle): mat = Py_Matrix.from_angle(other) else: return NotImplemented mat._vec_rot(self) return self def __bool__(self) -> bool: """Vectors are True if any axis is non-zero.""" return self.x != 0 or self.y != 0 or self.z != 0 def __eq__(self, other: object) -> bool: """== test. Two Vectors are compared based on the axes. A Vector can be compared with a 3-tuple as if it was a Vector also. A tolerance of 1e-6 is accounted for automatically. """ if isinstance(other, Py_Vec): return ( abs(other.x - self.x) < 1e-6 and abs(other.y - self.y) < 1e-6 and abs(other.z - self.z) < 1e-6 ) elif isinstance(other, tuple) and len(other) == 3:
ooOoO0o * ooOoO0o + o0oOOo0O0Ooo . iII111i % iIii1I11I1II1 + Ii1I if 88 - 88: Oo0Ooo . iII111i oooOooOO -= 1 packet = packet [ 0 : 7 ] + struct . pack ( "B" , oooOooOO ) + packet [ 8 : : ] return ( packet ) if 89 - 89: OOooOOo + I1Ii111 % i11iIiiIii + Oo0Ooo / Oo0Ooo + OoO0O00 if 9 - 9: OoOoOO00 % i1IIi + IiII if 19 - 19: I1Ii111 - II111iiii / I1Ii111 + I1IiiI - OoooooooOO + o0oOOo0O0Ooo if 100 - 100: OoO0O00 / OoOoOO00 / OOooOOo / OoO0O00 if 95 - 95: ooOoO0o if 95 - 95: Ii1I + i1IIi . I1IiiI % I1Ii111 / Ii1I * O0 if 68 - 68: I1Ii111 - IiII - oO0o - Oo0Ooo - o0oOOo0O0Ooo if 32 - 32: OoOoOO00 % i11iIiiIii def lisp_mac_input ( packet ) : return ( packet ) if 53 - 53: I1Ii111 * Ii1I / IiII . i1IIi * II111iiii / o0oOOo0O0Ooo if 44 - 44: I1Ii111 + ooOoO0o if 15 - 15: I11i + OoO0O00 + OoOoOO00 if 100 - 100: I1Ii111 if 78 - 78: OoOoOO00 if 16 - 16: I1Ii111 % OoO0O00 - OoO0O00 % OoOoOO00 * OoO0O00 if 36 - 36: OoOoOO00 * II111iiii . OoooooooOO * I11i . I11i if 13 - 13: I1ii11iIi11i * II111iiii if 93 - 93: OOooOOo / O0 - o0oOOo0O0Ooo + OoO0O00 * I1IiiI def lisp_rate_limit_map_request ( source , dest ) : if ( lisp_last_map_request_sent == None ) : return ( False ) I1IIIIi1i = lisp_get_timestamp ( ) i11IiIIi11I = I1IIIIi1i - lisp_last_map_request_sent o0OOOoOo = ( i11IiIIi11I < LISP_MAP_REQUEST_RATE_LIMIT ) if 64 - 64: ooOoO0o if ( o0OOOoOo ) : if ( source != None ) : source = source . print_address ( ) dest = dest . print_address ( ) dprint ( "Rate-limiting Map-Request for {} -> {}" . format ( source , dest ) ) if 23 - 23: Oo0Ooo . OoO0O00 return ( o0OOOoOo ) if 49 - 49: oO0o % i11iIiiIii * Ii1I if 9 - 9: Oo0Ooo - OoO0O00 + ooOoO0o / o0oOOo0O0Ooo if 61 - 61: O0 - i11iIiiIii * o0oOOo0O0Ooo if 92 - 92: Oo0Ooo + OOooOOo - i11iIiiIii if 26 - 26: O0 % Oo0Ooo + ooOoO0o - Ii1I . Oo0Ooo if 33 - 33: I1Ii111 / iII111i . I1Ii111 % II111iiii if 52 - 52: I1ii11iIi11i def lisp_send_map_request ( lisp_sockets , lisp_ephem_port , seid , deid , rloc ) : global lisp_last_map_request_sent if 1 - 1: II111iiii + I1ii11iIi11i * OoOoOO00 % ooOoO0o - iII111i % OoooooooOO if 77 - 77: iII111i + o0oOOo0O0Ooo if 60 - 60: I1ii11iIi11i if 23 - 23: iII111i % I1IiiI % I1Ii111 * oO0o * I1IiiI if 74 - 74: O0 / I11i . Oo0Ooo / I11i % OoO0O00 % o0oOOo0O0Ooo if 83 - 83: OoO0O00 - i11iIiiIii + iIii1I11I1II1 oOOo = i11I = None if ( rloc ) : oOOo = rloc . rloc i11I = rloc . translated_port if lisp_i_am_rtr else LISP_DATA_PORT if 36 - 36: I1ii11iIi11i / OoO0O00 - oO0o % O0 if 12 - 12: i1IIi * ooOoO0o / oO0o + I1IiiI / OoooooooOO if 86 - 86: Oo0Ooo / OoO0O00 if 78 - 78: I1IiiI * I1IiiI if 13 - 13: oO0o iI11 , ooO , O0OoO0o = lisp_myrlocs if ( iI11 == None ) : lprint ( "Suppress sending Map-Request, IPv4 RLOC not found" ) return if 80 - 80: IiII % OOooOOo if ( ooO == None and oOOo != None and oOOo . is_ipv6 ( ) ) : lprint ( "Suppress sending Map-Request, IPv6 RLOC not found" ) return if 6 - 6: O0 - Ii1I . OOooOOo if 39 - 39: I1IiiI + I1Ii111 / I1ii11iIi11i * i1IIi O00O0 = lisp_map_request ( ) O00O0 . record_count = 1 O00O0 . nonce = lisp_get_control_nonce ( ) O00O0 . rloc_probe = ( oOOo != None ) if 37 - 37: O0 + iIii1I11I1II1 % IiII * oO0o if 43 - 43: OOooOOo . O0 if 76 - 76: OOooOOo * OoooooooOO / IiII . OoO0O00 + II111iiii if 23 - 23: OoO0O00 - OoooooooOO * I11i . iIii1I11I1II1 / o0oOOo0O0Ooo + oO0o if 74 - 74: II111iiii / I1IiiI * O0 * OoO0O00 . I11i if 74 - 74: O0 . i1IIi / I1ii11iIi11i + o0oOOo0O0Ooo if 24 - 24: ooOoO0o % I1Ii111 + OoO0O00 * o0oOOo0O0Ooo % O0 - i11iIiiIii if ( rloc ) : rloc . last_rloc_probe_nonce = O00O0 . nonce if 49 - 49: o0oOOo0O0Ooo / OoOoOO00 + iII111i I111iiiIii1I = deid . is_multicast_address ( ) if ( I111iiiIii1I ) : O00O0 . target_eid = seid O00O0 . target_group = deid else : O00O0 . target_eid = deid if 85 - 85: I1IiiI - o0oOOo0O0Ooo if 86 - 86: II111iiii + Ii1I * Ii1I if 26 - 26: o0oOOo0O0Ooo + oO0o * i11iIiiIii / II111iiii if 86 - 86: Ii1I if 69 - 69: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo if 1 - 1: Ii1I if 43 - 43: o0oOOo0O0Ooo if 78 - 78: I1Ii111 % i1IIi * I11i if 59 - 59: OoOoOO00 % OoO0O00 % i11iIiiIii . II111iiii % I1ii11iIi11i + i1IIi if ( O00O0 . rloc_probe == False ) : I11i111 = lisp_get_signature_eid ( ) if ( I11i111 ) : O00O0 . signature_eid . copy_address ( I11i111 . eid ) O00O0 . privkey_filename = "./lisp-sig.pem" if 99 - 99: I11i + IiII * I1Ii111 - OOooOOo - i1IIi if 77 - 77: I11i . IiII / OoO0O00 / I1Ii111 if 8 - 8: o0oOOo0O0Ooo + iII111i / OoO0O00 * ooOoO0o - oO0o . iII111i if 32 - 32: OoooooooOO . I1Ii111 - I1ii11iIi11i if 29 - 29: OoO0O00 if 33 - 33: I1ii11iIi11i - O0 if ( seid == None or I111iiiIii1I ) : O00O0 . source_eid . afi = LISP_AFI_NONE else : O00O0 . source_eid = seid if 72 - 72: Oo0Ooo * iII111i - I11i if 81 - 81: I1Ii111 if 85 - 85: O0 % OoOoOO00 . I1ii11iIi11i if 46 - 46: OOooOOo * iIii1I11I1II1 if 33 - 33: OoO0O00 * II111iiii / i1IIi if 93 - 93: I1Ii111 % I11i if 64 - 64: I1IiiI % OoOoOO00 / Oo0Ooo if 40 - 40: Ii1I + iIii1I11I1II1 / oO0o . II111iiii % O0 - IiII if 49 - 49: IiII - OOooOOo * OOooOOo . O0 if 60 - 60: OoOoOO00 % iIii1I11I1II1 + IiII % o0oOOo0O0Ooo if 64 - 64: OoOoOO00 * I1ii11iIi11i . OoooooooOO . i1IIi if 61 - 61: OoO0O00 if ( oOOo != None and lisp_nat_traversal and lisp_i_am_rtr == False ) : if ( oOOo . is_private_address ( ) == False ) : iI11 = lisp_get_any_translated_rloc ( ) if 100 - 100: OoOoOO00 if ( iI11 == None ) : lprint ( "Suppress sending Map-Request, translated RLOC not found" ) return if 97 - 97: OoooooooOO if 91 - 91: o0oOOo0O0Ooo / O0 % OoO0O00 if 35 - 35: iII111i % OoO0O00 * O0 if 37 - 37: OOooOOo if 100 - 100: Oo0Ooo * I1IiiI . ooOoO0o if 53 - 53: OOooOOo + o0oOOo0O0Ooo * Ii1I + O0 if 75 - 75: OoooooooOO if 24 - 24: I1Ii111 % i11iIiiIii % oO0o . OOooOOo % IiII if ( oOOo == None or oOOo . is_ipv4 ( ) ) : if ( lisp_nat_traversal and oOOo == None ) : IIIiIIi11Ii1 = lisp_get_any_translated_rloc ( ) if ( IIIiIIi11Ii1 != None ) : iI11 = IIIiIIi11Ii1 if 30 - 30: Ii1I / I1Ii111 - OoOoOO00 / OOooOOo * I1IiiI + Ii1I O00O0 . itr_rlocs . append ( iI11 ) if 41 - 41: ooOoO0o . i1IIi * iIii1I11I1II1 - I1IiiI if ( oOOo == None or oOOo . is_ipv6
import os import re import datetime import json import hashlib from google.cloud import translate_v2 as translate # --------------------------------------------------------------------------------------------------------------------------------- class TextTranslate(): def __init__(self, lang) -> None: self.text_ids = {} self.text_id = None self.source_language = lang self.load_proj() def load_proj(self): proj_filename = os.path.join(os.path.dirname(__file__), "text_translate.json") if os.path.exists(proj_filename): with open(proj_filename, 'r', encoding='utf-8') as fp: self.text_ids = json.load(fp) def save_proj(self): with open(os.path.join(os.path.dirname(__file__), "text_translate.json"), 'w', encoding='utf-8') as fp: json.dump(self.text_ids, fp, indent=2, ensure_ascii=False, default=str) @staticmethod def get_text_hash(text): return str(int(hashlib.sha512(text.encode('utf-8')).hexdigest()[:16], 16)) def upsert_text(self, text): text_id = self.get_text_hash(text) if text_id not in self.text_ids: self.text_ids[text_id] = { "text": text, "source_language": self.source_language } else: self.text_ids[text_id]["text"] = text def add_text(self, text, update=False): if update: del self.text_ids[self.text_id] if len(text) < 1: return re_links = re.compile(r'!*\[(.*?)\]\(.*?\)\S*') search = re_links.search(text) while search: if len(search.group(1)): self.upsert_text(search.group(1)) re_desc = re.compile(r'\((.*?)\s+"(.*?)"\)') search_desc = re_desc.search(text) if search_desc and len(search_desc.group(2)): self.upsert_text(search_desc.group(2)) if search.group(0) == text: return if search.group(0).startswith('!') or search.group(0).endswith(':'): text = (text[0:search.regs[0][0]] + text[search.regs[0][1]:]).lstrip() else: if len(text[0:search.regs[0][0]]): self.upsert_text(text[0:search.regs[0][0]]) text = text[search.regs[0][1]:].lstrip() search = re_links.search(text) if len(text) < 1: return re_links = re.compile(r'```.*```') search = re_links.search(text) while search: text = (text[0:search.regs[0][0]] + text[search.regs[0][1]:]).lstrip() search = re_links.search(text) if len(text) < 1: return self.text_id = self.get_text_hash(text) self.upsert_text(text) def get_translated_text(self, text, lang): text_id = self.get_text_hash(text) if text_id not in self.text_ids: return None return self.text_ids[text_id][lang]["text"] def translate_text(self, target): """Translates text into the target language. Target must be an ISO 639-1 language code. See https://g.co/cloud/translate/v2/translate-reference#supported_languages """ os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/home/alexsobral/.ssh/gcloud-service-account.json' translate_client = translate.Client() for text_id in self.text_ids: if target not in self.text_ids[text_id]: result = translate_client.translate( self.text_ids[text_id]["text"], source_language=self.source_language, target_language=target ) self.text_ids[text_id][target] = { "text": result["translatedText"], "date": datetime.datetime.now(), "approved": False } # --------------------------------------------------------------------------------------------------------------------------------- def flatten_text(file_content): idx = 0 state = None while idx < len(file_content) - 1: line = file_content[idx].lstrip().replace('\n', '') if line == "---": if state is None: state = "header" idx += 1 continue elif state == "header": state = "body" idx += 1 continue if state == "header": re_links = re.compile(r'^description:\s*>*"*(.*)"*') search = re_links.search(line) if search: text = f'description: {search.group(1)}' next_line = file_content[idx + 1].replace('\n', '') while next_line.startswith(' '): text += next_line del file_content[idx + 1] if idx < len(file_content) - 1: next_line = file_content[idx + 1].replace('\n', '').lstrip() file_content[idx] = text + '\n' idx += 1 continue if len(line) < 1 or line.startswith('#') or line.startswith('|') or line.startswith('{'): state = "body" idx += 1 continue if state == "code": if line.startswith('```'): state = "body" idx += 1 continue if line.startswith('```'): state = "code" idx += 1 continue re_search = re.compile(r'^(\s*[\-*\d]+\.*\s+)(.*)') search = re_search.search(line) if search: # if line.startswith('- ') or line.startswith('* '): text = file_content[idx].replace('\n', '') next_line = file_content[idx + 1].replace('\n', '').lstrip() next_search = re_search.search(next_line) while len(next_line) > 0 and not next_search and idx < len(file_content) - 1: text += ' ' + next_line del file_content[idx + 1] if idx < len(file_content) - 1: next_line = file_content[idx + 1].replace('\n', '').lstrip() file_content[idx] = text + '\n' idx += 1 continue if line.startswith('> '): text = line next_line = file_content[idx + 1].replace('\n', '').lstrip() while len(next_line) > 0 and next_line[0] == '>' and idx < len(file_content) - 1: text += ' ' + next_line[1:].lstrip() del file_content[idx + 1] if idx < len(file_content) - 1: next_line = file_content[idx + 1].replace('\n', '').lstrip() file_content[idx] = text + '\n' idx += 1 continue if state == "body": text = line next_line = file_content[idx + 1].replace('\n', '').lstrip() # next_search = re_search.search(next_line) while len(next_line) > 0 and next_line[0] not in ['-', '*', '#', '>', '1', '{'] and idx < len(file_content) - 1: text += ' ' + next_line del file_content[idx + 1] if idx < len(file_content) - 1: next_line = file_content[idx + 1].replace('\n', '').lstrip() file_content[idx] = text + '\n' idx += 1 continue return file_content # --------------------------------------------------------------------------------------------------------------------------------- def extract_text(file_content, text_trans): state = None for line in file_content: line = line.lstrip().replace('\n', '') if line == "---": if state is None: state = "header" continue elif state == "header": state = "body" continue if state == "header": re_links = re.compile(r'^title:\s*"(.*?)"') search = re_links.search(line) if search: text_trans.add_text(search.group(1)) continue re_links = re.compile(r'^linkTitle:\s*"(.*?)"') search = re_links.search(line) if search: text_trans.add_text(search.group(1)) continue re_links = re.compile(r'^description:\s*>*"*(.*)"*') search = re_links.search(line) if search: text_trans.add_text(search.group(1)) continue continue if len(line) < 1: state = "body" continue if state == "code": if line.startswith('```'): state = "body" continue if line.startswith('```'): state = "code" continue re_links = re.compile(r'<!--(.*?)-->') search = re_links.search(line) if search: line = (line[0:search.regs[0][0]] + line[search.regs[0][1]:]).lstrip() if len(line) < 1: continue re_links = re.compile(r'\{\{< /?note >\}\}') search = re_links.search(line) while search: line = (line[0:search.regs[0][0]] + line[search.regs[0][1]:]).lstrip() search = re_links.search(line) if len(line) < 1: continue re_links = re.compile(r'\{\{< /?tooltip >\}\}') search = re_links.search(line) while search: line = (line[0:search.regs[0][0]] + line[search.regs[0][1]:]).lstrip() search = re_links.search(line) if len(line) < 1: continue if line[0] in ['!', '[', '|']: state = "body" continue if line[0] == '#': text = line[line.find(' ') + 1:].lstrip() text_trans.add_text(text) continue if line.startswith('- ') or line.startswith('* ') or line.startswith('> '): text = line[line.find(' ') + 1:].lstrip() text_trans.add_text(text) continue if state == "body": text = line text_trans.add_text(text) continue raise Exception(f"Unknow state: {state}") # --------------------------------------------------------------------------------------------------------------------------------- def extract_ids(source_language, text_trans): source_path = os.path.join(os.path.dirname(__file__), f'content/{source_language}/docs') for root, dirs, files in os.walk(source_path): for filename in files: if filename.endswith('.md'): print(f'Extracting file: {os.path.join(root, filename)}') with open(os.path.join(root, filename), 'r', encoding='utf-8') as fp: file_text = fp.readlines() flatten_text(file_text) extract_text(file_text, text_trans) # --------------------------------------------------------------------------------------------------------------------------------- def translate_text(file_content, target_language, text_trans): dest_content = [] state = None for input_line in file_content: line = input_line.lstrip().replace('\n', '') if line == "---": if state is None: state = "header" dest_content.append(input_line) continue elif state == "header": dest_content.append(input_line) state = "body" continue if state == "header": re_links = re.compile(r'^title:\s*"(.*?)"') search = re_links.search(line) if search: trans_text = text_trans.get_translated_text(search.group(1), target_language) dest_content.append(f'title: "{trans_text}"\n') continue re_links = re.compile(r'^linkTitle:\s*"(.*?)"') search = re_links.search(line) if search: trans_text = text_trans.get_translated_text(search.group(1), target_language) dest_content.append(f'linkTitle: "{trans_text}"\n') continue re_links = re.compile(r'^description:\s*"*(.*)"*') search = re_links.search(line) if search: trans_text = text_trans.get_translated_text(search.group(1), target_language) dest_content.append(f"description: >\n") dest_content.append(f" {trans_text}\n") continue dest_content.append(input_line) continue if len(line) < 1: state = "body" dest_content.append('\n') continue re_links = re.compile(r'<!--(.*?)-->') search = re_links.search(line) while search: dest_content.append(f'{line[search.regs[0][0]:search.regs[0][1]]}\n') line = (line[0:search.regs[0][0]] + line[search.regs[0][1]:]).lstrip() search = re_links.search(line) if len(line) < 1: continue re_links = re.compile(r'\{\{< note >\}\}') search = re_links.search(line) if search: if search.regs[0][0] != 0: raise Exception("Note not first position") dest_content.append(f'{line[search.regs[0][0]:search.regs[0][1]]}\n') line = (line[0:search.regs[0][0]] + line[search.regs[0][1]:]).lstrip() re_links = re.compile(r'\{\{< /note >\}\}') search = re_links.search(line) if search: text = line[0:search.regs[0][0]] trans_text = text_trans.get_translated_text(text, target_language) if trans_text: dest_content.append(f'{trans_text}\n') dest_content.append(f'{line[search.regs[0][0]:search.regs[0][1]]}\n') line = (line[search.regs[0][1]:]).lstrip() if line: raise Exception("End Note not last position") re_links = re.compile(r'\{\{< tooltip >\}\}') search = re_links.search(line) if search: if search.regs[0][0] != 0: raise Exception("Note not first position") dest_content.append(f'{line[search.regs[0][0]:search.regs[0][1]]}\n') line = (line[0:search.regs[0][0]] + line[search.regs[0][1]:]).lstrip() re_links = re.compile(r'\{\{< /tooltip >\}\}') search = re_links.search(line) if search: text = line[0:search.regs[0][0]] trans_text = text_trans.get_translated_text(text, target_language) if trans_text: dest_content.append(f'{trans_text}\n') dest_content.append(f'{line[search.regs[0][0]:search.regs[0][1]]}\n') line = (line[search.regs[0][1]:]).lstrip() if line: raise Exception("End Note not last position") if len(line) < 1: continue if line[0] in ['|']: dest_content.append(input_line) state = "body" continue if state == "code": dest_content.append(input_line) if line.startswith('```'): state = "body" continue if line.startswith('```'): dest_content.append(input_line) state = "code" continue re_search = re.compile(r'^(\s*[\-*>#]+\s+)(.*)') search = re_search.search(input_line) if search: dest_content.append(search.group(1)) line = search.group(2) state = "continue" re_links = re.compile(r'(!*)\[(.*?)\]\((.*?)\)') search = re_links.search(line) links = [] while search: alt_text = search.group(2) if len(alt_text): trans_text = text_trans.get_translated_text(alt_text, target_language) if trans_text: alt_text = trans_text desc = '' re_desc = re.compile(r'\((.*)\s"(.*)"\)') search_desc = re_desc.search(search.group(3)) if search_desc and len(search_desc.group(2)): desc = search_desc.group(2) trans_text = text_trans.get_translated_text(desc, target_language) if trans_text: desc = trans_text links.append(search.group(1) + '[' + alt_text + '](' + search_desc.group(1) + ' "' + desc + '")') else: links.append(search.group(1) + '[' + alt_text + '](' + search.group(3) + ')') if search.group(0).startswith('!') or search.group(0).endswith(':'): line = (line[0:search.regs[0][0]] + line[search.regs[0][1]:]).lstrip() search = re_links.search(line, search.regs[0][1]) else: if len(line[0:search.regs[0][0]]): trans_text = text_trans.get_translated_text(line[0:search.regs[0][0]], target_language) if trans_text: if state == 'body': dest_content.append(f'{trans_text} ') state = "continue" elif state == 'continue': dest_content[-1] += trans_text line = links[-1] + ' ' + line[search.regs[0][1]:].lstrip() search = re_links.search(line, len(links[-1])) while links: if state == "body": dest_content.append(f'{links.pop()}\n') elif state == 'continue': dest_content[-1] += links.pop() if len(line) < 1: continue if state == "body": text = line trans_text = text_trans.get_translated_text(text, target_language) if trans_text: dest_content.append(f'{trans_text}\n') continue elif state == 'continue': text = line trans_text
global 'http://www.leboncoin.fr/', # Why: #181 in Alexa global 'http://www.goo.ne.jp/', # Why: #182 in Alexa global 'http://www.liveinternet.ru/', # Why: #183 in Alexa global 'http://www.google.co.ve/', # Why: #184 in Alexa global 'http://www.56.com/', # Why: #185 in Alexa global 'http://www.google.com.vn/', # Why: #186 in Alexa global 'http://www.google.gr/', # Why: #187 in Alexa global 'http://www.comcast.net/', # Why: #188 in Alexa global 'http://www.torrentz.eu/', # Why: #189 in Alexa global 'http://www.etsy.com/', # Why: #190 in Alexa global 'http://www.orange.fr/', # Why: #191 in Alexa global 'http://www.systweak.com/', # Why: #192 in Alexa global 'http://www.onet.pl/', # Why: #193 in Alexa global 'http://www.wellsfargo.com/', # Why: #194 in Alexa global 'http://pconline.com.cn/', # Why: #195 in Alexa global 'http://www.letv.com/', # Why: #196 in Alexa global 'http://www.goodgamestudios.com/', # Why: #197 in Alexa global 'http://www.secureserver.net/', # Why: #198 in Alexa global 'http://www.allegro.pl/', # Why: #199 in Alexa global 'http://www.themeforest.net/', # Why: #200 in Alexa global 'http://www.china.com.cn/', # Why: #201 in Alexa global 'http://www.tripadvisor.com/', # Why: #202 in Alexa global 'http://www.web.de/', # Why: #203 in Alexa global 'http://www.answers.com/', # Why: #204 in Alexa global 'http://www.amazon.ca/', # Why: #205 in Alexa global 'http://www.mozilla.org/', # Why: #206 in Alexa global 'http://www.guardian.co.uk/', # Why: #207 in Alexa global 'http://www.stumbleupon.com/', # Why: #208 in Alexa global 'http://www.hardsextube.com/', # Why: #209 in Alexa global 'http://www.espncricinfo.com/', # Why: #210 in Alexa global 'http://www.gmx.net/', # Why: #211 in Alexa global 'http://www.photobucket.com/', # Why: #212 in Alexa global 'http://www.ehow.com/', # Why: #213 in Alexa global 'http://www.rediff.com/', # Why: #214 in Alexa global 'http://www.popads.net/', # Why: #215 in Alexa global 'http://www.wikihow.com/', # Why: #216 in Alexa global 'http://www.search-results.com/', # Why: #217 in Alexa global 'http://www.fiverr.com/', # Why: #218 in Alexa global 'http://www.google.com.ua/', # Why: #219 in Alexa global 'http://www.files.wordpress.com/', # Why: #220 in Alexa global 'http://www.onlineaway.net/', # Why: #221 in Alexa global 'http://www.nbcnews.com/', # Why: #222 in Alexa global 'http://www.google.com.co/', # Why: #223 in Alexa global 'http://www.hootsuite.com/', # Why: #224 in Alexa global 'http://www.4dsply.com/', # Why: #225 in Alexa global 'http://www.google.ro/', # Why: #227 in Alexa global 'http://www.sourceforge.net/', # Why: #228 in Alexa global 'http://www.cnzz.com/', # Why: #229 in Alexa global 'http://www.java.com/', # Why: #230 in Alexa global 'http://www.hudong.com/', # Why: #231 in Alexa global 'http://www.ucoz.ru/', # Why: #232 in Alexa global 'http://www.tudou.com/', # Why: #233 in Alexa global 'http://www.addthis.com/', # Why: #234 in Alexa global 'http://zol.com.cn/', # Why: #235 in Alexa global 'http://www.google.com.ng/', # Why: #236 in Alexa global 'http://www.soundcloud.com/', # Why: #237 in Alexa global 'http://www.onclickads.net/', # Why: #238 in Alexa global 'http://www.google.com.ph/', # Why: #239 in Alexa global 'http://www.dmm.co.jp/', # Why: #240 in Alexa global 'http://www.reference.com/', # Why: #241 in Alexa global 'http://www.google.be/', # Why: #242 in Alexa global 'http://www.wp.pl/', # Why: #243 in Alexa global 'http://www.interbiz.me/', # Why: #244 in Alexa global 'http://www.beeg.com/', # Why: #245 in Alexa global 'http://www.rambler.ru/', # Why: #246 in Alexa global 'http://www.sweetim.com/', # Why: #247 in Alexa global 'http://www.aweber.com/', # Why: #248 in Alexa global 'http://www.google.com.my/', # Why: #249 in Alexa global 'http://www.pandora.com/', # Why: #250 in Alexa global 'http://www.w3schools.com/', # Why: #251 in Alexa global 'http://www.pengyou.com/', # Why: #252 in Alexa global 'http://www.archive.org/', # Why: #253 in Alexa global 'http://www.qvo6.com/', # Why: #254 in Alexa global 'http://www.bet365.com/', # Why: #255 in Alexa global 'http://www.etao.com/', # Why: #256 in Alexa global 'http://www.lollipop-network.com/', # Why: #257 in Alexa global 'http://www.qtrax.com/', # Why: #258 in Alexa global 'http://www.naver.jp/', # Why: #259 in Alexa global 'http://www.google.se/', # Why: #260 in Alexa global 'http://www.google.dz/', # Why: #261 in Alexa global 'http://www.usatoday.com/', # Why: #262 in Alexa global 'http://www.zillow.com/', # Why: #263 in Alexa global 'http://www.goal.com/', # Why: #264 in Alexa global 'http://www.avito.ru/', # Why: #265 in Alexa global 'http://kaixin001.com/', # Why: #266 in Alexa global 'http://yesky.com/', # Why: #267 in Alexa global 'http://www.mobile01.com/', # Why: #268 in Alexa global 'http://www.soufun.com/', # Why: #269 in Alexa global 'http://www.tagged.com/', # Why: #270 in Alexa global 'http://www.warriorforum.com/', # Why: #271 in Alexa global 'http://www.statcounter.com/', # Why: #272 in Alexa global 'http://www.google.com.pe/', # Why: #273 in Alexa global 'http://www.libero.it/', # Why: #274 in Alexa global 'http://www.thefreedictionary.com/', # Why: #275 in Alexa global 'http://www.soku.com/', # Why: #276 in Alexa global 'http://www.incredibar.com/', # Why: #277 in Alexa global 'http://www.kaskus.co.id/', # Why: #278 in Alexa global 'http://www.likes.com/', # Why: #279 in Alexa global 'http://www.weebly.com/', # Why: #280 in Alexa global 'http://iqiyi.com/', # Why: #281 in Alexa global 'http://www.pch.com/', # Why: #282 in Alexa global 'http://www.ameba.jp/', # Why: #284 in Alexa global 'http://www.samsung.com/', # Why: #285 in Alexa global 'http://www.linkbucks.com/', # Why: #286 in Alexa global 'http://www.uploaded.net/', # Why: #287 in Alexa global 'http://www.bild.de/', # Why: #288 in Alexa global 'http://www.google.com.bd/', # Why: #289 in Alexa global 'http://www.google.at/', # Why: #290 in Alexa global 'http://www.webcrawler.com/', # Why: #291 in Alexa global 'http://www.t-online.de/', # Why: #292 in Alexa global 'http://www.iminent.com/', # Why: #293 in Alexa global 'http://www.google.pt/', # Why: #294 in Alexa global 'http://www.detik.com/', # Why: #295 in Alexa global 'http://www.ganji.com/', # Why: #296 in Alexa global 'http://www.milliyet.com.tr/', # Why: #297 in Alexa global 'http://www.bleacherreport.com/', # Why: #298 in Alexa global 'http://www.forbes.com/', # Why: #299 in Alexa global 'http://www.twoo.com/', # Why: #300 in Alexa global 'http://www.olx.in/', # Why: #301 in Alexa global 'http://www.mercadolivre.com.br/', # Why: #302 in Alexa global 'http://www.hurriyet.com.tr/', # Why: #303 in Alexa global 'http://www.pof.com/', # Why: #304 in Alexa global 'http://www.wsj.com/', # Why: #305 in Alexa global 'http://www.hostgator.com/', # Why: #306 in Alexa global 'http://www.naver.com/', # Why: #307 in Alexa global 'http://www.putlocker.com/', # Why: #308 in Alexa global 'http://www.varzesh3.com/', # Why: #309 in Alexa global 'http://www.rutracker.org/', # Why: #311 in Alexa global 'http://www.optmd.com/', # Why: #312 in Alexa global 'http://www.youm7.com/', # Why: #313 in Alexa global 'http://www.google.cl/', # Why: #314 in Alexa global 'http://www.ikea.com/', # Why: #316 in Alexa global 'http://www.4399.com/', # Why: #317 in Alexa global 'http://www.salesforce.com/', # Why: #318 in Alexa global 'http://www.scribd.com/', # Why: #319 in Alexa global 'http://www.google.com.sg/', # Why: #320 in Alexa global 'http://it168.com/', # Why: #321 in Alexa global 'http://www.goodreads.com/', # Why: #322 in Alexa global 'http://www.target.com/', # Why: #323 in Alexa global 'http://www.xunlei.com/', # Why: #324 in Alexa global 'http://www.hulu.com/', # Why: #325 in Alexa global 'http://www.github.com/', # Why: #326 in Alexa global 'http://www.hp.com/', # Why: #327 in Alexa global 'http://www.buzzfeed.com/', # Why: #328 in Alexa global 'http://www.google.ch/', # Why: #329 in Alexa global 'http://www.youdao.com/', # Why: #330 in Alexa global 'http://www.blogspot.com.es/', # Why: #331 in Alexa global 'http://so.com/', # Why: #332 in Alexa global 'http://www.ups.com/', # Why: #333 in Alexa global 'http://www.google.co.kr/', # Why: #334 in Alexa global 'http://www.extratorrent.com/', # Why: #335 in Alexa global 'http://www.match.com/', # Why: #336 in Alexa global 'http://www.seznam.cz/', # Why: #337 in Alexa global 'http://autohome.com.cn/', # Why: #338 in Alexa global 'http://www.naukri.com/', # Why: #339 in Alexa global 'http://www.gmw.cn/', # Why: #340 in Alexa global 'http://www.drtuber.com/', # Why: #341 in Alexa global 'http://www.spiegel.de/', # Why: #342 in Alexa global 'http://www.marca.com/', # Why: #343 in Alexa global 'http://www.ign.com/', # Why: #344 in Alexa global 'http://www.domaintools.com/', # Why: #345 in Alexa global 'http://www.free.fr/', # Why: #346 in Alexa global 'http://www.telegraph.co.uk/', # Why: #347 in Alexa global 'http://www.mypcbackup.com/', # Why: #348 in Alexa global 'http://www.kakaku.com/', # Why: #349 in Alexa global 'http://www.imageshack.us/', # Why: #350 in Alexa global 'http://www.reuters.com/', # Why: #351 in Alexa global 'http://www.ndtv.com/', # Why: #352 in Alexa global 'http://www.ig.com.br/', # Why: #353 in Alexa global 'http://www.bestbuy.com/', # Why: #354 in Alexa global 'http://www.glispa.com/', # Why: #355 in Alexa global 'http://www.quikr.com/', # Why: #356 in Alexa global 'http://www.deadlyblessing.com/', # Why: #357 in Alexa global 'http://www.wix.com/', # Why: #358 in Alexa global 'http://xcar.com.cn/', # Why: #359 in Alexa global 'http://paipai.com/', # Why: #360 in Alexa global 'http://www.ebay.com.au/', # Why: #361 in Alexa global 'http://www.yandex.ua/', # Why: #362 in Alexa global 'http://chinanews.com/', # Why: #363 in Alexa global 'http://www.clixsense.com/', # Why: #364 in Alexa global 'http://nih.gov/', # Why: #365 in Alexa global 'http://www.aili.com/', # Why: #366 in Alexa global 'http://www.zing.vn/', # Why: #367 in
<reponame>rsdoherty/azure-sdk-for-python # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- try: from ._models_py3 import AFDDomain from ._models_py3 import AFDDomainHttpsParameters from ._models_py3 import AFDDomainListResult from ._models_py3 import AFDDomainProperties from ._models_py3 import AFDDomainUpdateParameters from ._models_py3 import AFDDomainUpdatePropertiesParameters from ._models_py3 import AFDEndpoint from ._models_py3 import AFDEndpointListResult from ._models_py3 import AFDEndpointProperties from ._models_py3 import AFDEndpointPropertiesUpdateParameters from ._models_py3 import AFDEndpointUpdateParameters from ._models_py3 import AFDOrigin from ._models_py3 import AFDOriginGroup from ._models_py3 import AFDOriginGroupListResult from ._models_py3 import AFDOriginGroupProperties from ._models_py3 import AFDOriginGroupUpdateParameters from ._models_py3 import AFDOriginGroupUpdatePropertiesParameters from ._models_py3 import AFDOriginListResult from ._models_py3 import AFDOriginProperties from ._models_py3 import AFDOriginUpdateParameters from ._models_py3 import AFDOriginUpdatePropertiesParameters from ._models_py3 import AFDStateProperties from ._models_py3 import AfdErrorResponse from ._models_py3 import AfdPurgeParameters from ._models_py3 import CacheExpirationActionParameters from ._models_py3 import CacheKeyQueryStringActionParameters from ._models_py3 import CdnCertificateSourceParameters from ._models_py3 import CdnEndpoint from ._models_py3 import CdnManagedHttpsParameters from ._models_py3 import CdnWebApplicationFirewallPolicy from ._models_py3 import CdnWebApplicationFirewallPolicyList from ._models_py3 import CdnWebApplicationFirewallPolicyPatchParameters from ._models_py3 import Certificate from ._models_py3 import CheckNameAvailabilityInput from ._models_py3 import CheckNameAvailabilityOutput from ._models_py3 import CidrIpAddress from ._models_py3 import Components18OrqelSchemasWafmetricsresponsePropertiesSeriesItemsPropertiesDataItems from ._models_py3 import Components1Gs0LlpSchemasMetricsresponsePropertiesSeriesItemsPropertiesDataItems from ._models_py3 import ComponentsKpo1PjSchemasWafrankingsresponsePropertiesDataItemsPropertiesMetricsItems from ._models_py3 import CompressionSettings from ._models_py3 import ContinentsResponse from ._models_py3 import ContinentsResponseContinentsItem from ._models_py3 import ContinentsResponseCountryOrRegionsItem from ._models_py3 import CookiesMatchConditionParameters from ._models_py3 import CustomDomain from ._models_py3 import CustomDomainHttpsParameters from ._models_py3 import CustomDomainListResult from ._models_py3 import CustomDomainParameters from ._models_py3 import CustomRule from ._models_py3 import CustomRuleList from ._models_py3 import CustomerCertificate from ._models_py3 import CustomerCertificateParameters from ._models_py3 import DeepCreatedOrigin from ._models_py3 import DeepCreatedOriginGroup from ._models_py3 import DeliveryRule from ._models_py3 import DeliveryRuleAction from ._models_py3 import DeliveryRuleCacheExpirationAction from ._models_py3 import DeliveryRuleCacheKeyQueryStringAction from ._models_py3 import DeliveryRuleCondition from ._models_py3 import DeliveryRuleCookiesCondition from ._models_py3 import DeliveryRuleHttpVersionCondition from ._models_py3 import DeliveryRuleIsDeviceCondition from ._models_py3 import DeliveryRulePostArgsCondition from ._models_py3 import DeliveryRuleQueryStringCondition from ._models_py3 import DeliveryRuleRemoteAddressCondition from ._models_py3 import DeliveryRuleRequestBodyCondition from ._models_py3 import DeliveryRuleRequestHeaderAction from ._models_py3 import DeliveryRuleRequestHeaderCondition from ._models_py3 import DeliveryRuleRequestMethodCondition from ._models_py3 import DeliveryRuleRequestSchemeCondition from ._models_py3 import DeliveryRuleRequestUriCondition from ._models_py3 import DeliveryRuleResponseHeaderAction from ._models_py3 import DeliveryRuleUrlFileExtensionCondition from ._models_py3 import DeliveryRuleUrlFileNameCondition from ._models_py3 import DeliveryRuleUrlPathCondition from ._models_py3 import DomainValidationProperties from ._models_py3 import EdgeNode from ._models_py3 import EdgenodeResult from ._models_py3 import Endpoint from ._models_py3 import EndpointListResult from ._models_py3 import EndpointProperties from ._models_py3 import EndpointPropertiesUpdateParameters from ._models_py3 import EndpointPropertiesUpdateParametersDeliveryPolicy from ._models_py3 import EndpointPropertiesUpdateParametersWebApplicationFirewallPolicyLink from ._models_py3 import EndpointUpdateParameters from ._models_py3 import ErrorResponse from ._models_py3 import GeoFilter from ._models_py3 import HeaderActionParameters from ._models_py3 import HealthProbeParameters from ._models_py3 import HttpErrorRangeParameters from ._models_py3 import HttpVersionMatchConditionParameters from ._models_py3 import IpAddressGroup from ._models_py3 import IsDeviceMatchConditionParameters from ._models_py3 import KeyVaultCertificateSourceParameters from ._models_py3 import KeyVaultSigningKeyParameters from ._models_py3 import LoadBalancingSettingsParameters from ._models_py3 import LoadParameters from ._models_py3 import ManagedCertificate from ._models_py3 import ManagedCertificateParameters from ._models_py3 import ManagedRuleDefinition from ._models_py3 import ManagedRuleGroupDefinition from ._models_py3 import ManagedRuleGroupOverride from ._models_py3 import ManagedRuleOverride from ._models_py3 import ManagedRuleSet from ._models_py3 import ManagedRuleSetDefinition from ._models_py3 import ManagedRuleSetDefinitionList from ._models_py3 import ManagedRuleSetList from ._models_py3 import MatchCondition from ._models_py3 import MetricsResponse from ._models_py3 import MetricsResponseSeriesItem from ._models_py3 import MetricsResponseSeriesPropertiesItemsItem from ._models_py3 import Operation from ._models_py3 import OperationDisplay from ._models_py3 import OperationsListResult from ._models_py3 import Origin from ._models_py3 import OriginGroup from ._models_py3 import OriginGroupListResult from ._models_py3 import OriginGroupOverrideAction from ._models_py3 import OriginGroupOverrideActionParameters from ._models_py3 import OriginGroupProperties from ._models_py3 import OriginGroupUpdateParameters from ._models_py3 import OriginGroupUpdatePropertiesParameters from ._models_py3 import OriginListResult from ._models_py3 import OriginProperties from ._models_py3 import OriginUpdateParameters from ._models_py3 import OriginUpdatePropertiesParameters from ._models_py3 import PolicySettings from ._models_py3 import PostArgsMatchConditionParameters from ._models_py3 import Profile from ._models_py3 import ProfileListResult from ._models_py3 import ProfileUpdateParameters from ._models_py3 import ProxyResource from ._models_py3 import PurgeParameters from ._models_py3 import QueryStringMatchConditionParameters from ._models_py3 import RankingsResponse from ._models_py3 import RankingsResponseTablesItem from ._models_py3 import RankingsResponseTablesPropertiesItemsItem from ._models_py3 import RankingsResponseTablesPropertiesItemsMetricsItem from ._models_py3 import RateLimitRule from ._models_py3 import RateLimitRuleList from ._models_py3 import RemoteAddressMatchConditionParameters from ._models_py3 import RequestBodyMatchConditionParameters from ._models_py3 import RequestHeaderMatchConditionParameters from ._models_py3 import RequestMethodMatchConditionParameters from ._models_py3 import RequestSchemeMatchConditionParameters from ._models_py3 import RequestUriMatchConditionParameters from ._models_py3 import Resource from ._models_py3 import ResourceReference from ._models_py3 import ResourceUsage from ._models_py3 import ResourceUsageListResult from ._models_py3 import ResourcesResponse from ._models_py3 import ResourcesResponseCustomDomainsItem from ._models_py3 import ResourcesResponseEndpointsItem from ._models_py3 import ResourcesResponseEndpointsPropertiesItemsItem from ._models_py3 import ResponseBasedOriginErrorDetectionParameters from ._models_py3 import Route from ._models_py3 import RouteListResult from ._models_py3 import RouteProperties from ._models_py3 import RouteUpdateParameters from ._models_py3 import RouteUpdatePropertiesParameters from ._models_py3 import Rule from ._models_py3 import RuleListResult from ._models_py3 import RuleProperties from ._models_py3 import RuleSet from ._models_py3 import RuleSetListResult from ._models_py3 import RuleSetProperties from ._models_py3 import RuleUpdateParameters from ._models_py3 import RuleUpdatePropertiesParameters from ._models_py3 import Secret from ._models_py3 import SecretListResult from ._models_py3 import SecretParameters from ._models_py3 import SecretProperties from ._models_py3 import SecurityPolicy from ._models_py3 import SecurityPolicyListResult from ._models_py3 import SecurityPolicyParameters from ._models_py3 import SecurityPolicyProperties from ._models_py3 import SecurityPolicyWebApplicationFirewallAssociation from ._models_py3 import SecurityPolicyWebApplicationFirewallParameters from ._models_py3 import SharedPrivateLinkResourceProperties from ._models_py3 import Sku from ._models_py3 import SsoUri from ._models_py3 import SupportedOptimizationTypesListResult from ._models_py3 import SystemData from ._models_py3 import TrackedResource from ._models_py3 import UrlFileExtensionMatchConditionParameters from ._models_py3 import UrlFileNameMatchConditionParameters from ._models_py3 import UrlPathMatchConditionParameters from ._models_py3 import UrlRedirectAction from ._models_py3 import UrlRedirectActionParameters from ._models_py3 import UrlRewriteAction from ._models_py3 import UrlRewriteActionParameters from ._models_py3 import UrlSigningAction from ._models_py3 import UrlSigningActionParameters from ._models_py3 import UrlSigningKey from ._models_py3 import UrlSigningKeyParameters from ._models_py3 import UrlSigningParamIdentifier from ._models_py3 import Usage from ._models_py3 import UsageName from ._models_py3 import UsagesListResult from ._models_py3 import UserManagedHttpsParameters from ._models_py3 import ValidateCustomDomainInput from ._models_py3 import ValidateCustomDomainOutput from ._models_py3 import ValidateProbeInput from ._models_py3 import ValidateProbeOutput from ._models_py3 import ValidateSecretInput from ._models_py3 import ValidateSecretOutput from ._models_py3 import ValidationToken from ._models_py3 import WafMetricsResponse from ._models_py3 import WafMetricsResponseSeriesItem from ._models_py3 import WafMetricsResponseSeriesPropertiesItemsItem from ._models_py3 import WafRankingsResponse from ._models_py3 import WafRankingsResponseDataItem except (SyntaxError, ImportError): from ._models import AFDDomain # type: ignore from ._models import AFDDomainHttpsParameters # type: ignore from ._models import AFDDomainListResult # type: ignore from ._models import AFDDomainProperties # type: ignore from ._models import AFDDomainUpdateParameters # type: ignore from ._models import AFDDomainUpdatePropertiesParameters # type: ignore from ._models import AFDEndpoint # type: ignore from ._models import AFDEndpointListResult # type: ignore from ._models import AFDEndpointProperties # type: ignore from ._models import AFDEndpointPropertiesUpdateParameters # type: ignore from ._models import AFDEndpointUpdateParameters # type: ignore from ._models import AFDOrigin # type: ignore from ._models import AFDOriginGroup # type: ignore from ._models import AFDOriginGroupListResult # type: ignore from ._models import AFDOriginGroupProperties # type: ignore from ._models import AFDOriginGroupUpdateParameters # type: ignore from ._models import AFDOriginGroupUpdatePropertiesParameters # type: ignore from ._models import AFDOriginListResult # type: ignore from ._models import AFDOriginProperties # type: ignore from ._models import AFDOriginUpdateParameters # type: ignore from ._models import AFDOriginUpdatePropertiesParameters # type: ignore from ._models import AFDStateProperties # type: ignore from ._models import AfdErrorResponse # type: ignore from ._models import AfdPurgeParameters # type: ignore from ._models import CacheExpirationActionParameters # type: ignore from ._models import CacheKeyQueryStringActionParameters # type: ignore from ._models import CdnCertificateSourceParameters # type: ignore from ._models import CdnEndpoint # type: ignore from ._models import CdnManagedHttpsParameters # type: ignore from ._models import CdnWebApplicationFirewallPolicy # type: ignore from ._models import CdnWebApplicationFirewallPolicyList # type: ignore from ._models import CdnWebApplicationFirewallPolicyPatchParameters # type: ignore from ._models import Certificate # type: ignore from ._models import CheckNameAvailabilityInput # type: ignore from ._models import CheckNameAvailabilityOutput # type: ignore from ._models import CidrIpAddress # type: ignore from ._models import Components18OrqelSchemasWafmetricsresponsePropertiesSeriesItemsPropertiesDataItems # type: ignore from ._models import Components1Gs0LlpSchemasMetricsresponsePropertiesSeriesItemsPropertiesDataItems # type: ignore from ._models import ComponentsKpo1PjSchemasWafrankingsresponsePropertiesDataItemsPropertiesMetricsItems # type: ignore from ._models import CompressionSettings # type: ignore from ._models import ContinentsResponse # type: ignore from ._models import ContinentsResponseContinentsItem # type: ignore from ._models import ContinentsResponseCountryOrRegionsItem # type: ignore from ._models import CookiesMatchConditionParameters # type: ignore from ._models import CustomDomain # type: ignore from ._models import CustomDomainHttpsParameters # type: ignore from ._models import CustomDomainListResult # type: ignore from ._models import CustomDomainParameters # type: ignore from ._models import CustomRule # type: ignore from ._models import CustomRuleList # type: ignore from ._models import CustomerCertificate # type: ignore from ._models import CustomerCertificateParameters # type: ignore from ._models import DeepCreatedOrigin # type: ignore from ._models import DeepCreatedOriginGroup # type: ignore from ._models import DeliveryRule #
1103305: "Raw Gargoyle's Halberd+5", 1103400: "Magic Gargoyle's Halberd", 1103401: "Magic Gargoyle's Halberd+1", 1103402: "Magic Gargoyle's Halberd+2", 1103403: "Magic Gargoyle's Halberd+3", 1103404: "Magic Gargoyle's Halberd+4", 1103405: "Magic Gargoyle's Halberd+5", 1103406: "Magic Gargoyle's Halberd+6", 1103407: "Magic Gargoyle's Halberd+7", 1103408: "Magic Gargoyle's Halberd+8", 1103409: "Magic Gargoyle's Halberd+9", 1103410: "Magic Gargoyle's Halberd+10", 1103500: "Ench. Gargoyle's Halberd", 1103501: "Ench. Gargoyle's Halberd+1", 1103502: "Ench. Gargoyle's Halberd+2", 1103503: "Ench. Gargoyle's Halberd+3", 1103504: "Ench. Gargoyle's Halberd+4", 1103505: "Ench. Gargoyle's Halberd+5", 1103600: "Divine Gargoyle's Halberd", 1103601: "Divine Gargoyle's Halberd+1", 1103602: "Divine Gargoyle's Halberd+2", 1103603: "Divine Gargoyle's Halberd+3", 1103604: "Divine Gargoyle's Halberd+4", 1103605: "Divine Gargoyle's Halberd+5", 1103606: "Divine Gargoyle's Halberd+6", 1103607: "Divine Gargoyle's Halberd+7", 1103608: "Divine Gargoyle's Halberd+8", 1103609: "Divine Gargoyle's Halberd+9", 1103610: "Div. Gargoyle's Halberd+10", 1103700: "Occult Gargoyle's Halberd", 1103701: "Occult Gargoyle's Halberd+1", 1103702: "Occult Gargoyle's Halberd+2", 1103703: "Occult Gargoyle's Halberd+3", 1103704: "Occult Gargoyle's Halberd+4", 1103705: "Occult Gargoyle's Halberd+5", 1103800: "Fire Gargoyle's Halberd", 1103801: "Fire Gargoyle's Halberd+1", 1103802: "Fire Gargoyle's Halberd+2", 1103803: "Fire Gargoyle's Halberd+3", 1103804: "Fire Gargoyle's Halberd+4", 1103805: "Fire Gargoyle's Halberd+5", 1103806: "Fire Gargoyle's Halberd+6", 1103807: "Fire Gargoyle's Halberd+7", 1103808: "Fire Gargoyle's Halberd+8", 1103809: "Fire Gargoyle's Halberd+9", 1103810: "Fire Gargoyle's Halberd+10", 1103900: "Chaos Gargoyle's Halberd", 1103901: "Chaos Gargoyle's Halberd+1", 1103902: "Chaos Gargoyle's Halberd+2", 1103903: "Chaos Gargoyle's Halberd+3", 1103904: "Chaos Gargoyle's Halberd+4", 1103905: "Chaos Gargoyle's Halberd+5", 1105000: "Black Knight Halberd", 1105001: "Black Knight Halberd+1", 1105002: "Black Knight Halberd+2", 1105003: "Black Knight Halberd+3", 1105004: "Black Knight Halberd+4", 1105005: "Black Knight Halberd+5", 1106000: "Lucerne", 1106001: "Lucerne +1", 1106002: "Lucerne +2", 1106003: "Lucerne +3", 1106004: "Lucerne +4", 1106005: "Lucerne +5", 1106006: "Lucerne +6", 1106007: "Lucerne +7", 1106008: "Lucerne +8", 1106009: "Lucerne +9", 1106010: "Lucerne +10", 1106011: "Lucerne +11", 1106012: "Lucerne +12", 1106013: "Lucerne +13", 1106014: "Lucerne +14", 1106015: "Lucerne +15", 1106100: "Crystal Lucerne", 1106101: "Crystal Lucerne +1", 1106102: "Crystal Lucerne +2", 1106103: "Crystal Lucerne +3", 1106104: "Crystal Lucerne +4", 1106105: "Crystal Lucerne +5", 1106200: "Lightning Lucerne", 1106201: "Lightning Lucerne +1", 1106202: "Lightning Lucerne +2", 1106203: "Lightning Lucerne +3", 1106204: "Lightning Lucerne +4", 1106205: "Lightning Lucerne +5", 1106300: "Raw Lucerne", 1106301: "Raw Lucerne +1", 1106302: "Raw Lucerne +2", 1106303: "Raw Lucerne +3", 1106304: "Raw Lucerne +4", 1106305: "Raw Lucerne +5", 1106400: "Magic Lucerne", 1106401: "Magic Lucerne +1", 1106402: "Magic Lucerne +2", 1106403: "Magic Lucerne +3", 1106404: "Magic Lucerne +4", 1106405: "Magic Lucerne +5", 1106406: "Magic Lucerne +6", 1106407: "Magic Lucerne +7", 1106408: "Magic Lucerne +8", 1106409: "Magic Lucerne +9", 1106410: "Magic Lucerne +10", 1106500: "Enchanted Lucerne", 1106501: "Enchanted Lucerne +1", 1106502: "Enchanted Lucerne +2", 1106503: "Enchanted Lucerne +3", 1106504: "Enchanted Lucerne +4", 1106505: "Enchanted Lucerne +5", 1106600: "Divine Lucerne", 1106601: "Divine Lucerne +1", 1106602: "Divine Lucerne +2", 1106603: "Divine Lucerne +3", 1106604: "Divine Lucerne +4", 1106605: "Divine Lucerne +5", 1106606: "Divine Lucerne +6", 1106607: "Divine Lucerne +7", 1106608: "Divine Lucerne +8", 1106609: "Divine Lucerne +9", 1106610: "Divine Lucerne +10", 1106700: "Occult Lucerne", 1106701: "Occult Lucerne +1", 1106702: "Occult Lucerne +2", 1106703: "Occult Lucerne +3", 1106704: "Occult Lucerne +4", 1106705: "Occult Lucerne +5", 1106800: "Fire Lucerne", 1106801: "Fire Lucerne +1", 1106802: "Fire Lucerne +2", 1106803: "Fire Lucerne +3", 1106804: "Fire Lucerne +4", 1106805: "Fire Lucerne +5", 1106806: "Fire Lucerne +6", 1106807: "Fire Lucerne +7", 1106808: "Fire Lucerne +8", 1106809: "Fire Lucerne +9", 1106810: "Fire Lucerne +10", 1106900: "Chaos Lucerne", 1106901: "Chaos Lucerne +1", 1106902: "Chaos Lucerne +2", 1106903: "Chaos Lucerne +3", 1106904: "Chaos Lucerne +4", 1106905: "Chaos Lucerne +5", 1107000: "Scythe", 1107001: "Scythe+1", 1107002: "Scythe+2", 1107003: "Scythe+3", 1107004: "Scythe+4", 1107005: "Scythe+5", 1107006: "Scythe+6", 1107007: "Scythe+7", 1107008: "Scythe+8", 1107009: "Scythe+9", 1107010: "Scythe+10", 1107011: "Scythe+11", 1107012: "Scythe+12", 1107013: "Scythe+13", 1107014: "Scythe+14", 1107015: "Scythe+15", 1107100: "Crystal Scythe", 1107101: "Crystal Scythe+1", 1107102: "Crystal Scythe+2", 1107103: "Crystal Scythe+3", 1107104: "Crystal Scythe+4", 1107105: "Crystal Scythe+5", 1107200: "Lightning Scythe", 1107201: "Lightning Scythe+1", 1107202: "Lightning Scythe+2", 1107203: "Lightning Scythe+3", 1107204: "Lightning Scythe+4", 1107205: "Lightning Scythe+5", 1107300: "Raw Scythe", 1107301: "Raw Scythe+1", 1107302: "Raw Scythe+2", 1107303: "Raw Scythe+3", 1107304: "Raw Scythe+4", 1107305: "Raw Scythe+5", 1107400: "Magic Scythe", 1107401: "Magic Scythe+1", 1107402: "Magic Scythe+2", 1107403: "Magic Scythe+3", 1107404: "Magic Scythe+4", 1107405: "Magic Scythe+5", 1107406: "Magic Scythe+6", 1107407: "Magic Scythe+7", 1107408: "Magic Scythe+8", 1107409: "Magic Scythe+9", 1107410: "Magic Scythe+10", 1107500: "Enchanted Scythe", 1107501: "Enchanted Scythe+1", 1107502: "Enchanted Scythe+2", 1107503: "Enchanted Scythe+3", 1107504: "Enchanted Scythe+4", 1107505: "Enchanted Scythe+5", 1107600: "Divine Scythe", 1107601: "Divine Scythe+1", 1107602: "Divine Scythe+2", 1107603: "Divine Scythe+3", 1107604: "Divine Scythe+4", 1107605: "Divine Scythe+5", 1107606: "Divine Scythe+6", 1107607: "Divine Scythe+7", 1107608: "Divine Scythe+8", 1107609: "Divine Scythe+9", 1107610: "Divine Scythe+10", 1107700: "Occult Scythe", 1107701: "Occult Scythe+1", 1107702: "Occult Scythe+2", 1107703: "Occult Scythe+3", 1107704: "Occult Scythe+4", 1107705: "Occult Scythe+5", 1107800: "Fire Scythe", 1107801: "Fire Scythe+1", 1107802: "Fire Scythe+2", 1107803: "Fire Scythe+3", 1107804: "Fire Scythe+4", 1107805: "Fire Scythe+5", 1107806: "Fire Scythe+6", 1107807: "Fire Scythe+7", 1107808: "Fire Scythe+8", 1107809: "Fire Scythe+9", 1107810: "Fire Scythe+10", 1107900: "Chaos Scythe", 1107901: "Chaos Scythe+1", 1107902: "Chaos Scythe+2", 1107903: "Chaos Scythe+3", 1107904: "Chaos Scythe+4", 1107905: "Chaos Scythe+5", 1150000: "Great Scythe", 1150001: "Great Scythe+1", 1150002: "Great Scythe+2", 1150003: "Great Scythe+3", 1150004: "Great Scythe+4", 1150005: "Great Scythe+5", 1150006: "Great Scythe+6", 1150007: "Great Scythe+7", 1150008: "Great Scythe+8", 1150009: "Great Scythe+9", 1150010: "Great Scythe+10", 1150011: "Great Scythe+11", 1150012: "Great Scythe+12", 1150013: "Great Scythe+13", 1150014: "Great Scythe+14", 1150015: "Great Scythe+15", 1150100: "Crystal Great Scythe", 1150101: "Crystal Great Scythe+1", 1150102: "Crystal Great Scythe+2", 1150103: "Crystal Great Scythe+3", 1150104: "Crystal Great Scythe+4", 1150105: "Crystal Great Scythe+5", 1150200: "Lightning Great Scythe", 1150201: "Lightning Great Scythe+1", 1150202: "Lightning Great Scythe+2", 1150203: "Lightning Great Scythe+3", 1150204: "Lightning Great Scythe+4", 1150205: "Lightning Great Scythe+5", 1150300: "Raw Great Scythe", 1150301: "Raw Great Scythe+1", 1150302: "Raw Great Scythe+2", 1150303: "Raw Great Scythe+3", 1150304: "Raw Great Scythe+4", 1150305: "Raw Great Scythe+5", 1150400: "Magic Great Scythe", 1150401: "Magic Great Scythe+1", 1150402: "Magic Great Scythe+2", 1150403: "Magic Great Scythe+3", 1150404: "Magic Great Scythe+4", 1150405: "Magic Great Scythe+5", 1150406: "Magic Great Scythe+6", 1150407: "Magic Great Scythe+7", 1150408: "Magic Great Scythe+8", 1150409: "Magic Great Scythe+9", 1150410: "Magic Great Scythe+10", 1150500: "Enchanted Great Scythe", 1150501: "Enchanted Great Scythe+1", 1150502: "Enchanted Great Scythe+2", 1150503: "Enchanted Great Scythe+3", 1150504: "Enchanted Great Scythe+4", 1150505: "Enchanted Great Scythe+5", 1150600: "Divine Great Scythe", 1150601: "Divine Great Scythe+1", 1150602: "Divine Great Scythe+2", 1150603: "Divine Great Scythe+3", 1150604: "Divine Great Scythe+4", 1150605: "Divine Great
ncluster, nsample=len(elm_labels['Shot']), delta=.001, maxiter=20, metric=kmeans_distance, verbose=0 ) nlabel=[] [nlabel.append(len(np.where(klabels == i)[0])) for i in range(ncluster)] # print(centres, klabels, dist) # plt.figure() # plt.scatter(np.arange(len(klabels)),klabels) # plt.pause(0.001) # return np.sort(np.asarray(nlabel)) elif kmeans_from == 'mlampert': clusters=[] n_elm=to_be_clustered.shape[0] dist_matrix=np.zeros([n_elm,n_elm]) for i in range(n_elm): for j in range(n_elm): i_in=False j_in=False if kmeans_distance == 'correlation': x=to_be_clustered[i,:]-np.mean(to_be_clustered[i,:]) y=to_be_clustered[j,:]-np.mean(to_be_clustered[j,:]) dist_matrix[i,j]=np.sum(x*y)/np.sqrt(np.sum(x**2)*np.sum(y**2)) if kmeans_distance == 'cosine': dist_matrix[i,j]=np.sum(to_be_clustered[i,:]*to_be_clustered[j,:])/np.sqrt(np.sum(to_be_clustered[i,:]**2)*np.sum(to_be_clustered[j,:]**2)) if dist_matrix[i,j] > corr_threshold: for i_cluster in range(len(clusters)): if i in clusters[i_cluster]: i_in=i_cluster if j in clusters[i_cluster]: j_in=i_cluster if i_in == False and j_in == False: clusters.append([i]) if i != j: clusters[-1].append(j) if i_in != False and j_in == False: clusters[i_in].append(j) if j_in != False and i_in == False: clusters[j_in].append(i) klabels=np.zeros(n_elm) for i in range(len(clusters)): for j in range(len(clusters[i])): klabels[clusters[i][j]]=i-1 pass ncluster=int(np.max(klabels))+1 elm_labels['kmeans label']=klabels all_average_results=[] all_variance_results=[] plot_labels=[] label_number=[] for label in range(ncluster): average_results={'Velocity ccf':np.zeros([2*nwin,2]), 'Velocity str avg':np.zeros([2*nwin,2]), 'Velocity str max':np.zeros([2*nwin,2]), 'Frame similarity':np.zeros([2*nwin]), 'Correlation max':np.zeros([2*nwin]), 'Size avg':np.zeros([2*nwin,2]), 'Size max':np.zeros([2*nwin,2]), 'Position avg':np.zeros([2*nwin,2]), 'Position max':np.zeros([2*nwin,2]), 'Centroid avg':np.zeros([2*nwin,2]), 'Centroid max':np.zeros([2*nwin,2]), 'COG avg':np.zeros([2*nwin,2]), 'COG max':np.zeros([2*nwin,2]), 'Area avg':np.zeros([2*nwin]), 'Area max':np.zeros([2*nwin]), 'Elongation avg':np.zeros([2*nwin]), 'Elongation max':np.zeros([2*nwin]), 'Angle avg':np.zeros([2*nwin]), 'Angle max':np.zeros([2*nwin]), 'Str number':np.zeros([2*nwin]), } if 'GPI Dalpha' in velocity_results.keys(): average_results['GPI Dalpha']=np.zeros([2*nwin]) notnan_counter=copy.deepcopy(average_results) label_boolean=np.where(klabels == label) variance_results=copy.deepcopy(average_results) elm_index=np.asarray(elm_index) elm_counter=0. print('The number of events in the cluster is: '+str(len(elm_index[label_boolean]))) if len(elm_index[label_boolean]) < 3: print('A cluster with two or less events is not plotted.') continue for index_elm in range(len(elm_index[label_boolean])): #preprocess velocity results, tackle with np.nan and outliers shot=int(db.loc[elm_index[label_boolean][index_elm]]['Shot']) #define ELM time for all the cases elm_time=db.loc[elm_index[label_boolean][index_elm]]['ELM time']/1000. if normalized_velocity: if normalized_structure: str_add='_ns' else: str_add='' filename=flap_nstx.tools.filename(exp_id=shot, working_directory=wd+'/processed_data', time_range=[elm_time-2e-3,elm_time+2e-3], comment='ccf_velocity_pfit_o'+str(subtraction_order)+'fst_0.0'+str_add+'_nv', extension='pickle') else: filename=wd+'/processed_data/'+db.loc[elm_index[index_elm]]['Filename']+'.pickle' status=db.loc[elm_index[label_boolean][index_elm]]['OK/NOT OK'] if status != 'NO': velocity_results=pickle.load(open(filename, 'rb')) velocity_results['Velocity ccf'][np.where(velocity_results['Correlation max'] < correlation_threshold),:]=[np.nan,np.nan] time=velocity_results['Time'] elm_time_interval_ind=np.where(np.logical_and(time >= elm_time-window_average, time <= elm_time+window_average)) elm_time=(time[elm_time_interval_ind])[np.argmin(velocity_results['Frame similarity'][elm_time_interval_ind])] elm_time_ind=np.argmin(np.abs(time-elm_time)) # try: if True: for key in average_results.keys(): if len(average_results[key].shape) == 1: ind_nan=np.isnan(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin]) notnan_counter[key]+=np.logical_not(ind_nan) if len(ind_nan) > 0 and key not in ['Frame similarity','Correlation max', 'GPI Dalpha']: (velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin])[ind_nan]=0. average_results[key]+=velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin] else: ind_nan_rad=np.isnan(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,0]) ind_nan_pol=np.isnan(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,1]) notnan_counter[key][:,0]+=np.logical_not(ind_nan_rad) notnan_counter[key][:,1]+=np.logical_not(ind_nan_pol) if len(ind_nan_rad) > 0 and len(ind_nan_pol) > 0: (velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,0])[ind_nan_rad]=0. (velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,1])[ind_nan_pol]=0. average_results[key][:,0]+=velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,0] average_results[key][:,1]+=velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,1] elm_counter+=1 # except: # print('Failed to add shot '+str(shot)+' @ '+str(elm_time)+' into the results.') for key in average_results.keys(): notnan_counter[key][np.where(notnan_counter[key] == 0)] = 1. if key in ['Frame similarity', 'Correlation max', 'GPI dalpha']: average_results[key]/=elm_counter elif not 'ccf' in key: if len(average_results[key].shape) == 1: average_results[key]=average_results[key]/(notnan_counter[key]) else: average_results[key][:,0]=average_results[key][:,0]/(notnan_counter[key][:,0]) average_results[key][:,1]=average_results[key][:,1]/(notnan_counter[key][:,1]) else: if len(average_results[key].shape) == 1: average_results[key]=average_results[key]/(notnan_counter[key]) else: average_results[key][:,0]=average_results[key][:,0]/(notnan_counter[key][:,0]) average_results[key][:,1]=average_results[key][:,1]/(notnan_counter[key][:,1]) for index_elm in range(len(elm_index[label_boolean])): #preprocess velocity results, tackle with np.nan and outliers shot=int(db.loc[elm_index[label_boolean][index_elm]]['Shot']) #define ELM time for all the cases elm_time=db.loc[elm_index[label_boolean][index_elm]]['ELM time']/1000. if normalized_velocity: if normalized_structure: str_add='_ns' else: str_add='' filename=flap_nstx.tools.filename(exp_id=shot, working_directory=wd+'/processed_data', time_range=[elm_time-2e-3,elm_time+2e-3], comment='ccf_velocity_pfit_o'+str(subtraction_order)+'_fst_0.0'+str_add+'_nv', extension='pickle') else: filename=wd+'/processed_data/'+db.loc[elm_index[index_elm]]['Filename']+'.pickle' status=db.loc[elm_index[label_boolean][index_elm]]['OK/NOT OK'] if status != 'NO': velocity_results=pickle.load(open(filename, 'rb')) velocity_results['Velocity ccf'][np.where(velocity_results['Correlation max'] < correlation_threshold),:]=[np.nan,np.nan] time=velocity_results['Time'] elm_time_interval_ind=np.where(np.logical_and(time >= elm_time-window_average, time <= elm_time+window_average)) elm_time=(time[elm_time_interval_ind])[np.argmin(velocity_results['Frame similarity'][elm_time_interval_ind])] elm_time_ind=np.argmin(np.abs(time-elm_time)) #current_elm_time=velocity_results['Time'][elm_time_ind[index_elm]] for key in average_results.keys(): if len(average_results[key].shape) == 1: ind_nan=np.isnan(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin]) if len(ind_nan) > 0 and key not in ['Frame similarity','Correlation max', 'GPI Dalpha']: (velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin])[ind_nan]=0. else: ind_nan_rad=np.isnan(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,0]) ind_nan_pol=np.isnan(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,1]) if len(ind_nan_rad) > 0 and len(ind_nan_pol) > 0: (velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,0])[ind_nan_rad]=0. (velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,1])[ind_nan_pol]=0. variance_results[key]+=(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin]-average_results[key])**2 for key in variance_results.keys(): if key in ['Frame similarity', 'Correlation max', 'GPI dalpha']: variance_results[key]=np.sqrt(variance_results[key]/elm_counter) elif not 'ccf' in key: if len(variance_results[key].shape) == 1: variance_results[key]=np.sqrt(variance_results[key]/(notnan_counter[key])) #SQRT FROM HERE else: variance_results[key][:,0]=np.sqrt(variance_results[key][:,0]/(notnan_counter[key][:,0])) variance_results[key][:,1]=np.sqrt(variance_results[key][:,1]/(notnan_counter[key][:,1])) else: if len(average_results[key].shape) == 1: variance_results[key]=np.sqrt(variance_results[key]/(notnan_counter[key])) else: variance_results[key][:,0]=np.sqrt(variance_results[key][:,0]/(notnan_counter[key][:,0])) variance_results[key][:,1]=np.sqrt(variance_results[key][:,1]/(notnan_counter[key][:,1])) #UNTIL HERE average_results['Tau']=(np.arange(2*nwin)*sampling_time-window_average)*1e3 #Let the results be in ms variance_results['Tau']=(np.arange(2*nwin)*sampling_time-window_average)*1e3 #Let the results be in ms all_average_results.append(average_results) all_variance_results.append(variance_results) plot_labels.append(label) label_number.append(len(elm_index[label_boolean])) ylimits={} for index_labels in range(len(all_average_results)): for key in average_results.keys(): if index_labels == 0 and len(all_average_results[index_labels][key].shape) == 1: ylimits[key]=np.asarray([(all_average_results[index_labels][key]-all_variance_results[index_labels][key]).min(), (all_average_results[index_labels][key]+all_variance_results[index_labels][key]).max()]) elif index_labels == 0 and len(all_average_results[index_labels][key].shape) == 2: ylimits[key]=np.asarray([np.asarray([(all_average_results[index_labels][key][:,0]-all_variance_results[index_labels][key][:,0]).min(), (all_average_results[index_labels][key][:,0]+all_variance_results[index_labels][key][:,0]).max()]), np.asarray([(all_average_results[index_labels][key][:,1]-all_variance_results[index_labels][key][:,1]).min(), (all_average_results[index_labels][key][:,1]+all_variance_results[index_labels][key][:,1]).max()])]) elif index_labels != 0 and len(all_average_results[index_labels][key].shape) == 1: ylimits[key]=np.asarray([np.asarray([ylimits[key][0], (all_average_results[index_labels][key]-all_variance_results[index_labels][key]).min()]).min(), np.asarray([ylimits[key][1], (all_average_results[index_labels][key]+all_variance_results[index_labels][key]).max()]).max()]) elif index_labels != 0 and len(all_average_results[index_labels][key].shape) == 2: ylimits[key]=np.asarray([np.asarray([np.asarray([ylimits[key][0,0], (all_average_results[index_labels][key][:,0]-all_variance_results[index_labels][key][:,0]).min()]).min(), np.asarray([ylimits[key][0,1], (all_average_results[index_labels][key][:,0]+all_variance_results[index_labels][key][:,0]).max()]).max()]), [np.asarray([ylimits[key][1,0], (all_average_results[index_labels][key][:,1]-all_variance_results[index_labels][key][:,1]).min()]).min(), np.asarray([ylimits[key][1,1], (all_average_results[index_labels][key][:,1]+all_variance_results[index_labels][key][:,1]).max()]).max()]]) for ind in range(len(all_average_results)): string='' if index is not None: if index == 0: string='radial' else: string='poloidal' string+='_ct_'+str(correlation_threshold) pdf_filename='NSTX_GPI_ALL_ELM_AVERAGE_RESULT_kmeans_nc_'+str(ncluster)+'_label_'+str(plot_labels[ind])+'_evnum_'+str(label_number[ind])+'_'+base.replace(' ','_')+'_'+string plot_average_velocity_results(average_results=all_average_results[ind], variance_results=all_variance_results[ind], ylimits=ylimits, plot_error=plot_error, plot=plot, pdf=pdf, pdf_filename=pdf_filename, opacity=opacity) def plot_average_velocity_results(average_results=None, variance_results=None, error_results=None, plot_variance=True, plot_error=False, pdf=True, plot=True, plot_max_only=False, plot_for_publication=False, pdf_filename='NSTX_GPI_ALL_ELM_AVERAGE_RESULTS', ylimits=None, normalized_velocity=False, opacity=0.2, plot_scatter=False): tau_range=[min(average_results['Tau']),max(average_results['Tau'])] if plot: import matplotlib matplotlib.use('QT5Agg') import matplotlib.pyplot as plt else: import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt plot_index=np.logical_not(np.isnan(average_results['Velocity ccf'][:,0])) plot_index_structure=np.logical_not(np.isnan(average_results['Elongation avg'])) if plot_for_publication: figsize=(8.5/2.54, 8.5/2.54/1.618*1.1) plt.rc('font', family='serif', serif='Helvetica') labelsize=9 linewidth=0.5 major_ticksize=2 plt.rc('text', usetex=False) plt.rcParams['pdf.fonttype'] = 42 plt.rcParams['ps.fonttype'] = 42 plt.rcParams['lines.linewidth'] = linewidth plt.rcParams['axes.linewidth'] = linewidth plt.rcParams['axes.labelsize'] = labelsize plt.rcParams['axes.titlesize'] = labelsize plt.rcParams['xtick.labelsize'] = labelsize plt.rcParams['xtick.major.size'] = major_ticksize plt.rcParams['xtick.major.width'] = linewidth plt.rcParams['xtick.minor.width'] = linewidth/2 plt.rcParams['xtick.minor.size'] = major_ticksize/2 plt.rcParams['ytick.labelsize'] = labelsize plt.rcParams['ytick.major.width'] = linewidth plt.rcParams['ytick.major.size'] = major_ticksize plt.rcParams['ytick.minor.width'] = linewidth/2 plt.rcParams['ytick.minor.size'] = major_ticksize/2 plt.rcParams['legend.fontsize'] = labelsize else: figsize=None if pdf: wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory'] pdf_filename=wd+'/plots/'+pdf_filename if plot_variance: pdf_filename+='_with_error' if normalized_velocity: pdf_filename+='_norm_vel' pdf_pages=PdfPages(pdf_filename+'.pdf') #Plotting the radial velocity from CCF fig, ax = plt.subplots(figsize=figsize) ax.plot(average_results['Tau'][plot_index], average_results['Velocity ccf'][plot_index,0]) if plot_scatter: ax.scatter(average_results['Tau'][plot_index], average_results['Velocity ccf'][plot_index,0], s=5, marker='o') if plot_variance: x=average_results['Tau'][plot_index] y=average_results['Velocity ccf'][plot_index,0] dy=variance_results['Velocity ccf'][plot_index,0] ax.fill_between(x,y-dy,y+dy, color='gray', alpha=opacity) if plot_error: x=average_results['Tau'][plot_index] y=average_results['Velocity ccf'][plot_index,0] dy=error_results['Velocity ccf'][plot_index,0] ax.fill_between(x,y-dy,y+dy, color='gray', alpha=opacity) if not plot_max_only: ax.plot(average_results['Tau'][plot_index_structure], average_results['Velocity str avg'][plot_index_structure,0], linewidth=0.3, color='green') ax.plot(average_results['Tau'][plot_index_structure], average_results['Velocity str max'][plot_index_structure,0], linewidth=0.3, color='red') ax.set_xlabel('Tau [ms]') ax.set_ylabel('v_rad[m/s]') ax.set_title('Radial velocity of the average results. \n (blue: ccf, green: str avg, red: str max)') ax.set_xlim(tau_range) if ylimits is not None: ax.set_ylim(ylimits['Velocity ccf'][0,:]) if plot_for_publication: x1,x2=ax.get_xlim() y1,y2=ax.get_ylim() ax.set_aspect((x2-x1)/(y2-y1)/1.618) fig.tight_layout() if pdf: pdf_pages.savefig() #Plotting the radial velocity from the structures. fig, ax = plt.subplots(figsize=figsize) ax.plot(average_results['Tau'][plot_index_structure], average_results['Velocity str max'][plot_index_structure,0], color='red') if plot_scatter: ax.scatter(average_results['Tau'][plot_index_structure], average_results['Velocity str max'][plot_index_structure,0], s=5, marker='o', color='red') if plot_variance: x=average_results['Tau'][plot_index_structure] y=average_results['Velocity str max'][plot_index_structure,0] dy=variance_results['Velocity str max'][plot_index_structure,0] ax.fill_between(x,y-dy,y+dy, color='gray', alpha=opacity) if plot_error: x=average_results['Tau'][plot_index] y=average_results['Velocity str max'][plot_index,0] dy=error_results['Velocity str max'][plot_index,0] ax.fill_between(x,y-dy,y+dy, color='gray', alpha=opacity) if not plot_max_only: ax.plot(average_results['Tau'][plot_index_structure], average_results['Velocity str avg'][plot_index_structure,0]) if plot_scatter: ax.scatter(average_results['Tau'][plot_index_structure], average_results['Velocity str avg'][plot_index_structure,0], s=5, marker='o') ax.set_xlabel('Tau [ms]') ax.set_ylabel('v_rad[m/s]') ax.set_title('Radial velocity of the average (blue) and \n maximum (red) structures.') ax.set_xlim(tau_range) if ylimits is not None: ax.set_ylim(ylimits['Velocity str max'][1,:]) if plot_for_publication: x1,x2=ax.get_xlim() y1,y2=ax.get_ylim() ax.set_aspect((x2-x1)/(y2-y1)/1.618) fig.tight_layout() if pdf: pdf_pages.savefig() #Plotting the poloidal velocity from CCF fig, ax = plt.subplots(figsize=figsize) ax.plot(average_results['Tau'][plot_index], average_results['Velocity ccf'][plot_index,1]) if plot_scatter: ax.scatter(average_results['Tau'][plot_index], average_results['Velocity ccf'][plot_index,1], s=5, marker='o') if plot_variance: x=average_results['Tau'][plot_index] y=average_results['Velocity ccf'][plot_index,1] dy=variance_results['Velocity ccf'][plot_index,1] ax.fill_between(x,y-dy,y+dy, color='gray', alpha=opacity) if plot_error: x=average_results['Tau'][plot_index] y=average_results['Velocity ccf'][plot_index,1] dy=error_results['Velocity ccf'][plot_index,1] ax.fill_between(x,y-dy,y+dy, color='gray', alpha=opacity) if not plot_max_only: ax.plot(average_results['Tau'][plot_index_structure], average_results['Velocity str avg'][plot_index_structure,1], linewidth=0.3, color='green') ax.plot(average_results['Tau'][plot_index_structure], average_results['Velocity str max'][plot_index_structure,1], linewidth=0.3, color='red') ax.set_xlabel('Tau [ms]') ax.set_ylabel('v_pol[m/s]') ax.set_title('Poloidal velocity of the average results. \n (blue: ccf, green: str avg, red: str max)') ax.set_xlim(tau_range) if ylimits is not None: ax.set_ylim(ylimits['Velocity ccf'][1,:]) if plot_for_publication: x1,x2=ax.get_xlim() y1,y2=ax.get_ylim() ax.set_aspect((x2-x1)/(y2-y1)/1.618) fig.tight_layout() if pdf: pdf_pages.savefig() #Plotting the poloidal velocity from the structures. fig, ax = plt.subplots(figsize=figsize) ax.plot(average_results['Tau'][plot_index_structure], average_results['Velocity str max'][plot_index_structure,1], color='red') if plot_scatter: ax.scatter(average_results['Tau'][plot_index_structure], average_results['Velocity str max'][plot_index_structure,1], s=5, marker='o', color='red') if plot_variance: x=average_results['Tau'][plot_index_structure] y=average_results['Velocity str max'][plot_index_structure,1] dy=variance_results['Velocity str max'][plot_index_structure,1] ax.fill_between(x,y-dy,y+dy, color='gray', alpha=opacity) if plot_error: x=average_results['Tau'][plot_index] y=average_results['Velocity str max'][plot_index,1] dy=error_results['Velocity str max'][plot_index,1] ax.fill_between(x,y-dy,y+dy, color='gray', alpha=opacity) if not plot_max_only: ax.plot(average_results['Tau'][plot_index_structure], average_results['Velocity str avg'][plot_index_structure,1]) if plot_scatter: ax.scatter(average_results['Tau'][plot_index_structure], average_results['Velocity str avg'][plot_index_structure,1], s=5, marker='o') ax.set_xlabel('Tau [ms]') ax.set_ylabel('v_pol[m/s]') ax.set_title('Poloidal velocity of the average (blue) and \n maximum (red) structures.') ax.set_xlim(tau_range) if ylimits is not None: ax.set_ylim(ylimits['Velocity str max'][1,:]) if plot_for_publication: x1,x2=ax.get_xlim() y1,y2=ax.get_ylim() ax.set_aspect((x2-x1)/(y2-y1)/1.618) fig.tight_layout() if pdf: pdf_pages.savefig() #Plotting both radial and poloidal velocity from CCF fig, ax = plt.subplots(figsize=figsize) ax.plot(average_results['Tau'][plot_index], average_results['Velocity ccf'][plot_index,0]) if plot_scatter: ax.scatter(average_results['Tau'][plot_index], average_results['Velocity ccf'][plot_index,0], s=5, marker='o') ax.plot(average_results['Tau'][plot_index], average_results['Velocity ccf'][plot_index,1], color='red') if plot_scatter: ax.scatter(average_results['Tau'][plot_index], average_results['Velocity ccf'][plot_index,1], s=5, marker='o', color='red') ax.set_xlabel('Tau [ms]') ax.set_ylabel('Velocity [m/s]') ax.set_title('Poloidal velocity (red) and radial velocity (blue)\n of the average results.') ax.set_xlim(tau_range) ax.grid() if ylimits is not None: ax.set_ylim(average_results['Velocity ccf'][plot_index,0].min(), average_results['Velocity ccf'][plot_index,1].max()) if plot_for_publication: x1,x2=ax.get_xlim() y1,y2=ax.get_ylim() ax.set_aspect((x2-x1)/(y2-y1)/1.618) fig.tight_layout() if pdf: pdf_pages.savefig() #Plotting both radial and poloidal velocity from CCF fig, ax = plt.subplots(figsize=figsize) ax.plot(average_results['Tau'][plot_index], average_results['Velocity ccf'][plot_index,0]*average_results['Velocity ccf'][plot_index,1]) if plot_scatter: ax.scatter(average_results['Tau'][plot_index], average_results['Velocity ccf'][plot_index,0]*average_results['Velocity ccf'][plot_index,1], s=5, marker='o') ax.set_xlabel('Tau [ms]') ax.set_ylabel('Velocity^2 [m^2/s^2]') ax.set_title('Poloidal velocity*radial velocity of the average results.') ax.set_xlim(tau_range)
_waitForNodeSet(self): ''' Fill node set for the request. Obtain nodes for the request, pausing all new request handling for this provider until the node set can be filled. note:: This code is a bit racey in its calculation of the number of nodes in use for quota purposes. It is possible for multiple launchers to be doing this calculation at the same time. Since we currently have no locking mechanism around the "in use" calculation, if we are at the edge of the quota, one of the launchers could attempt to launch a new node after the other launcher has already started doing so. This would cause an expected failure from the underlying library, which is ok for now. ''' # Since this code can be called more than once for the same request, # we need to calculate the difference between our current node set # and what was requested. We cannot use set operations here since a # node type can appear more than once in the requested types. saved_types = collections.Counter(self._satisfied_types.labels()) requested_types = collections.Counter(self.request.node_types) diff = requested_types - saved_types needed_types = list(diff.elements()) if self.request.reuse: ready_nodes = self.zk.getReadyNodesOfTypes(needed_types) else: ready_nodes = [] for ntype in needed_types: # First try to grab from the list of already available nodes. got_a_node = False if self.request.reuse and ntype in ready_nodes: for node in ready_nodes[ntype]: # Only interested in nodes from this provider and pool if node.provider != self.provider.name: continue if node.pool != self.pool.name: continue # Check this driver reuse requirements if not self.checkReusableNode(node): continue try: self.zk.lockNode(node, blocking=False) except exceptions.ZKLockException: # It's already locked so skip it. continue else: # Add an extra safety check that the node is still # ready. if node.state != zk.READY: self.zk.unlockNode(node) continue if self.paused: self.log.debug("Unpaused request %s", self.request) self.paused = False self.log.debug( "Locked existing node %s for request %s", node.id, self.request.id) got_a_node = True node.allocated_to = self.request.id self.zk.storeNode(node) self.nodeset.append(node) self._satisfied_types.add(ntype, node.id) # Notify driver handler about node re-use self.nodeReusedNotification(node) break # Could not grab an existing node, so launch a new one. if not got_a_node: # If we calculate that we're at capacity, pause until nodes # are released by Zuul and removed by the DeletedNodeWorker. if not self.hasRemainingQuota(ntype): if self.request.requestor == "NodePool:min-ready": # The point of the min-ready nodes is to have nodes on # standby for future requests. When at capacity, it # doesn't make sense to wait for and use resources to # speculatively create a node. Decline this so someone # else with capacity can take it. self.log.debug( "Declining node request %s because provider cannot" " satisfy min-ready", self.request.id) self.decline_request() self._declinedHandlerCleanup() return self.log.info( "Not enough quota remaining to satisfy request %s", self.request.id) if not self.paused: self.log.debug( "Pausing request handling to satisfy request %s", self.request.id) self.paused = True self.zk.deleteOldestUnusedNode(self.provider.name, self.pool.name) return if self.paused: self.log.debug("Unpaused request %s", self.request) self.paused = False node = zk.Node() node.state = zk.INIT node.type = ntype node.provider = self.provider.name node.pool = self.pool.name node.launcher = self.launcher_id node.allocated_to = self.request.id # This sets static data defined in the config file in the # ZooKeeper Node object. node.attributes = self.pool.node_attributes self.setNodeMetadata(node) # Note: It should be safe (i.e., no race) to lock the node # *after* it is stored since nodes in INIT state are not # locked anywhere. self.zk.storeNode(node) self.zk.lockNode(node, blocking=False) self.log.debug("Locked building node %s for request %s", node.id, self.request.id) # Set state AFTER lock so that it isn't accidentally cleaned # up (unlocked BUILDING nodes will be deleted). node.state = zk.BUILDING self.zk.storeNode(node) self.nodeset.append(node) self._satisfied_types.add(ntype, node.id) self.launch(node) def _runHandler(self): ''' Main body for the node request handling. ''' self._setFromPoolWorker() if self.provider is None or self.pool is None: # If the config changed out from underneath us, we could now be # an invalid provider and should stop handling this request. raise Exception("Provider configuration missing") # We have the launcher_id attr after _setFromPoolWorker() is called. self.log = logging.getLogger( "nodepool.driver.NodeRequestHandler[%s]" % self.launcher_id) declined_reasons = [] invalid_types = self._invalidNodeTypes() if self.pool.max_servers <= 0: declined_reasons.append('pool is disabled by max_servers') elif invalid_types: declined_reasons.append('node type(s) [%s] not available' % ','.join(invalid_types)) elif not self.imagesAvailable(): declined_reasons.append('images are not available') elif not self.hasProviderQuota(self.request.node_types): declined_reasons.append('it would exceed quota') if declined_reasons: self.log.debug("Declining node request %s because %s", self.request.id, ', '.join(declined_reasons)) self.decline_request() self._declinedHandlerCleanup() return if self.paused: self.log.debug("Retrying node request %s", self.request.id) else: self.log.debug("Accepting node request %s", self.request.id) self.request.state = zk.PENDING self.zk.storeNodeRequest(self.request) self._waitForNodeSet() def _declinedHandlerCleanup(self): """ After declining a request, do necessary cleanup actions. """ self.unlockNodeSet(clear_allocation=True) # If conditions have changed for a paused request to now cause us # to decline it, we need to unpause so we don't keep trying it if self.paused: self.paused = False try: self.zk.storeNodeRequest(self.request) self.zk.unlockNodeRequest(self.request) except Exception: # If the request is gone for some reason, we need to make # sure that self.done still gets set. self.log.exception("Unable to modify missing request %s", self.request.id) self.done = True # --------------------------------------------------------------- # Public methods # --------------------------------------------------------------- def unlockNodeSet(self, clear_allocation=False): ''' Attempt unlocking all Nodes in the node set. :param bool clear_allocation: If true, clears the node allocated_to attribute. ''' for node in self.nodeset: if not node.lock: continue if clear_allocation: node.allocated_to = None self.zk.storeNode(node) try: self.zk.unlockNode(node) except Exception: self.log.exception("Error unlocking node:") self.log.debug("Unlocked node %s for request %s", node.id, self.request.id) self.nodeset = [] def decline_request(self): # Technically, this check to see if we've already declined it should # not be necessary. But if there is a bug (and there has been), we # want to make sure we don't continuously grow this array. if self.launcher_id not in self.request.declined_by: self.request.declined_by.append(self.launcher_id) launchers = set([x.id for x in self.zk.getRegisteredLaunchers()]) if launchers.issubset(set(self.request.declined_by)): # All launchers have declined it self.log.debug("Failing declined node request %s", self.request.id) self.request.state = zk.FAILED else: self.request.state = zk.REQUESTED def run(self): ''' Execute node request handling. This code is designed to be re-entrant. Because we can't always satisfy a request immediately (due to lack of provider resources), we need to be able to call run() repeatedly until the request can be fulfilled. The node set is saved and added to between calls. ''' try: self._runHandler() except Exception: self.log.exception( "Declining node request %s due to exception in " "NodeRequestHandler:", self.request.id) self.decline_request() self._declinedHandlerCleanup() def poll(self): if self.paused: return False if self.done: return True # Driver must implement this call if not self.launchesComplete(): return False # Launches are complete, so populate ready_nodes and failed_nodes. aborted_nodes = [] for node in self.nodeset.copy(): if node.state == zk.READY: self.ready_nodes.append(node) elif node.state == zk.ABORTED: # ABORTED is a transient error triggered by overquota. In order # to handle this gracefully don't count this as failed so the # node is relaunched within this provider. Unlock the node so # the DeletedNodeWorker cleans up the zNode. aborted_nodes.append(node) self.nodeset.remove(node) self.zk.unlockNode(node) else: self.failed_nodes.append(node) # If the request has been pulled, unallocate the node set so other # requests can use them. if not self.zk.getNodeRequest(self.request.id): self.log.info("Node request %s disappeared", self.request.id) for node in self.nodeset: node.allocated_to = None self.zk.storeNode(node) self.unlockNodeSet() try: self.zk.unlockNodeRequest(self.request) except exceptions.ZKLockException: # If the lock object is invalid that is "ok" since we no # longer have a request either. Just do our best, log and # move on. self.log.debug("Request lock invalid for node request %s " "when attempting to clean up the lock", self.request.id) return True if self.failed_nodes: self.log.debug("Declining node request %s because nodes failed", self.request.id) self.decline_request() elif aborted_nodes: # Because nodes are added to the satisfied types list before they # are ready we need to remove the aborted nodes again so they can # be created again. for node in aborted_nodes: self._satisfied_types.removeNode(node.id) self.log.debug( "Pausing request handling after node abort to satisfy " "request %s", self.request.id) self.paused = True return False else: # The assigned nodes must be added to the request in the order # in which they were requested. for requested_type in self.request.node_types: node_id = self._satisfied_types.pop(requested_type) self.request.nodes.append(node_id) self.log.debug("Fulfilled node request %s", self.request.id) self.request.state = zk.FULFILLED self.unlockNodeSet() self.zk.storeNodeRequest(self.request) self.zk.unlockNodeRequest(self.request) return True
#torch dependencies import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torch.autograd.gradcheck import zero_gradients from torch.utils.data.sampler import SubsetRandomSampler # torch dependencies for data load import torchvision from torchvision import datasets, transforms # numpy and time import numpy as np import time ######################parse inputs################### import sys #READ ARGUMENTS opts = sys.argv[1::2] args = sys.argv[2::2] import os if not os.path.isdir('./results'): os.mkdir('./results') if not os.path.isdir('./log'): os.mkdir('./log') if not os.path.isdir('./models'): os.mkdir('./models') if not os.path.isdir('./data'): os.mkdir('./data') #Defaults EXP_NAME = 'CIFAR10_WideResNet34_NuAT2-WA' l_ce = 1.0 Nuc_reg = Nuc_max = 4 TRAIN_BATCH_SIZE = 64 Feps = 8.0 B_val = 4.0 MAX_EPOCHS = 80 lr_max = 0.1 lr_up = 20 for i in range(len(opts)): opt = opts[i] arg = args[i] #Experiment name if opt=='-EXP_NAME': EXP_NAME = str(arg) LOG_FILE_NAME = 'log/'+str(arg)+'.txt' print('EXP_NAME:',EXP_NAME) if opt=='-MAX_EPOCHS': MAX_EPOCHS = int(arg) print('MAX_EPOCHS:',MAX_EPOCHS) if opt=='-l_ce': l_ce = float(arg) print('l_ce:',l_ce) if opt=='-B_val': B_val = float(arg) print('Initial Noise Magnitude:',B_val) if opt=='-Nuc_max': Nuc_max = float(arg) print('Nuc_max:',Nuc_max) if opt=='-b_size': TRAIN_BATCH_SIZE = int(arg) print('Training Batch Size:',TRAIN_BATCH_SIZE) if opt=='-Feps': Feps = float(arg) print('RFGSM Epsilon:',Feps) if opt=='-lr_up': lr_up = int(arg) print('lr_up:',lr_up) if opt=='-lr_max': lr_max = float(arg) print('lr_max:',lr_max) ###################################### Function Definitions ####################################### def FGSM_Attack_step(model,loss,image,target,eps=0.1,bounds=[0,1],GPU=0,steps=30): tar = Variable(target.cuda()) img = image.cuda() eps = eps/steps for step in range(steps): img = Variable(img,requires_grad=True) zero_gradients(img) out = model(img) cost = loss(out,tar) cost.backward() per = eps * torch.sign(img.grad.data) adv = img.data + per.cuda() img = torch.clamp(adv,bounds[0],bounds[1]) return img def Nuc_SWA_Attack(model,model_swa,loss,image,target,eps=8./255.,bounds=[0,1],steps=2,Nuc_reg=4): image = image.cuda() target = target.cuda() out = model(image).detach() out_swa = model_swa(image).detach() img = image + ((B_val/255.0)*torch.sign(torch.tensor([0.5]) - torch.rand_like(image)).cuda()) img = torch.clamp(img,0.0,1.0).cuda() tar = Variable(target) for step in range(steps): img = Variable(img,requires_grad=True) zero_gradients(img) if (step)%2==0: rout_swa = model_swa(img) cost = loss(rout_swa,tar) + Nuc_reg*torch.norm(out_swa - rout_swa, 'nuc')/TRAIN_BATCH_SIZE else: rout = model(img) cost = loss(rout,tar) cost.backward() per = eps * torch.sign(img.grad.data) adv = img.data + per.cuda() img = torch.clamp(adv,bounds[0],bounds[1]) delta = img - image delta = torch.clamp(delta,-8.0/255.0,8.0/255) img = torch.clamp(image+delta,0.0,1.0) return img def execfile(filepath): with open(filepath, 'rb') as file: exec(compile(file.read(), filepath, 'exec')) globals().update(locals()) #######################################Cudnn############################################## torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark=True print('Cudnn status:',torch.backends.cudnn.enabled) #######################################Set tensor to CUDA######################################### torch.set_default_tensor_type('torch.cuda.FloatTensor') #######################################Parameters################################################## TRAIN_BATCH_SIZE = TRAIN_BATCH_SIZE VAL_BATCH_SIZE = 128 TEST_BATCH_SIZE = 128 BASE_LR = 1e-1 MAX_ITER = (MAX_EPOCHS*50000)/TRAIN_BATCH_SIZE MODEL_PREFIX = 'models/' + EXP_NAME + '_' #######################################load network################################################ execfile('WideResNet.py') model = Wide_ResNet(34,10,0,10) model.cuda() model.train() model_attack = Wide_ResNet(34,10,0,10) model_attack.cuda() model_attack.eval() tau = 0.9998 tau_list = [0.99,0.9998] exp_avgs = [] for tau in tau_list: exp_avgs.append(model.state_dict()) ######################################Load data ################################################### transform_train = transforms.Compose([ transforms.RandomCrop(size=32,padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(),]) transform_test = transforms.Compose([ transforms.ToTensor(),]) train_set = torchvision.datasets.CIFAR10(root='./data', train=True , download=True, transform=transform_train) val_set = torchvision.datasets.CIFAR10(root='./data', train=True , download=True, transform=transform_test) test_set = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) # Split training into train and validation train_size = 49000 valid_size = 1000 test_size = 10000 train_indices = list(range(50000)) val_indices = [] count = np.zeros(10) for index in range(len(train_set)): _, target = train_set[index] if(np.all(count==100)): break if(count[target]<100): count[target] += 1 val_indices.append(index) train_indices.remove(index) print("Overlap indices:",list(set(train_indices) & set(val_indices))) print("Size of train set:",len(train_indices)) print("Size of val set:",len(val_indices)) #get data loader ofr train val and test train_loader = torch.utils.data.DataLoader(train_set,batch_size=TRAIN_BATCH_SIZE ,sampler=SubsetRandomSampler(train_indices)) val_loader = torch.utils.data.DataLoader(val_set,sampler = SubsetRandomSampler(val_indices),batch_size=VAL_BATCH_SIZE) test_loader = torch.utils.data.DataLoader(test_set,batch_size=TEST_BATCH_SIZE) print('CIFAR10 dataloader: Done') ################################################################################################### epochs = MAX_EPOCHS iteration = 0 loss = nn.CrossEntropyLoss() #loss_no_reduce = nn.CrossEntropyLoss(reduce=False) LR = BASE_LR optimizer = torch.optim.SGD(model.parameters(), lr=lr_max,momentum=0.9,weight_decay=5e-4) lr_steps = len(train_loader) scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.0, max_lr=lr_max, step_size_up=lr_steps *lr_up, step_size_down=lr_steps *(epochs-lr_up)) ################################################################################################## Nuc_reg = 0.0 for epoch in range(epochs): start = time.time() iter_loss =0 counter =0 for data, target in train_loader: data = Variable(data).cuda() target = Variable(target).cuda() model.eval() if epoch ==0: model.eval() adv_data = Nuc_SWA_Attack(model,model,loss,data,target,eps=Feps/255.0,steps=2,Nuc_reg=Nuc_reg) else: model_attack.load_state_dict(exp_avgs[0]) model_attack.cuda() model_attack.eval() adv_data = Nuc_SWA_Attack(model,model_attack,loss,data,target,eps=Feps/255.0,steps=2,Nuc_reg=Nuc_reg) delta = adv_data - data delta = torch.clamp(delta,-8.0/255.0,8.0/255) adv_data = data+delta adv_data = torch.clamp(adv_data,0.0,1.0) model.train() optimizer.zero_grad() adv_out = model(adv_data) out = model(data) '''LOSS COMPUTATION''' closs = loss(out,target) reg_loss = torch.norm(out - adv_out, 'nuc')/TRAIN_BATCH_SIZE cost = l_ce*closs + Nuc_reg*reg_loss cost.backward() optimizer.step() scheduler.step() LR = optimizer.param_groups[0]["lr"] for tau, new_state_dict in zip(tau_list, exp_avgs): for key,value in model.state_dict().items(): new_state_dict[key] = (1-tau)*value + tau*new_state_dict[key] if iteration%100==0: msg = 'iter,'+str(iteration)+',clean loss,'+str(closs.data.cpu().numpy()) \ +',reg loss,'+str(reg_loss.data.cpu().numpy()) \ +',total loss,'+str(cost.data.cpu().numpy()) \ +'\n' log_file = open(LOG_FILE_NAME,'a+') log_file.write(msg) log_file.close() model.train() #print msg iteration = iteration + 1 ##console log counter = counter + 1 sys.stdout.write('\r') sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d] : Loss:%f \t\t' %(epoch, MAX_EPOCHS, counter, (train_size/TRAIN_BATCH_SIZE),cost.data.cpu().numpy())) end = time.time() print('Epoch:',epoch,' Time taken:',(end-start)) model_name = MODEL_PREFIX+str(epoch)+'.tar' checkpoint = { 'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(), 'scheduler' : scheduler.state_dict()} torch.save(checkpoint, model_name) torch.save(exp_avgs[1],"models/"+EXP_NAME+"_SWA_"+str(tau)+"_"+str(epoch)+'.pkl') Nuc_reg += Nuc_max/epochs ##################################### FIND BEST MODEL ############################################### model.eval() EVAL_LOG_NAME = 'results/'+EXP_NAME+'.txt' ACC_EPOCH_LOG_NAME = 'results/'+EXP_NAME+'acc_epoch.txt' ACC_IFGSM_EPOCH_LOG_NAME = 'results/'+EXP_NAME+'ifgsm_acc_epoch.txt' log_file = open(EVAL_LOG_NAME,'a+') msg = '##################### iter.FGSM: steps=7,eps=8.0/255,1####################\n' log_file.write(msg) log_file.close() accuracy_log = np.zeros(MAX_EPOCHS) for epoch in range(MAX_EPOCHS): model_name = MODEL_PREFIX+str(epoch)+'.tar' model.load_state_dict(torch.load(model_name)['state_dict']) eps=8.0/255 accuracy = 0 accuracy_ifgsm = 0 i = 0 for data, target in val_loader: data = Variable(data).cuda() target = Variable(target).cuda() out = model(data) prediction = out.data.max(1)[1] accuracy = accuracy + prediction.eq(target.data).sum() i = i + 1 for data, target in val_loader: data = FGSM_Attack_step(model,loss,data,target,eps=eps,steps=7) data = Variable(data).cuda() target = Variable(target).cuda() out = model(data) prediction = out.data.max(1)[1] accuracy_ifgsm = accuracy_ifgsm + prediction.eq(target.data).sum() acc = (accuracy.item()*1.0) / (i*VAL_BATCH_SIZE) * 100 acc_ifgsm = (accuracy_ifgsm.item()*1.0) / (i*VAL_BATCH_SIZE) * 100 #log accuracy to file msg= str(epoch)+','+str(acc)+'\n' log_file = open(ACC_EPOCH_LOG_NAME,'a+') log_file.write(msg) log_file.close() msg1= str(epoch)+','+str(acc_ifgsm)+'\n' log_file = open(ACC_IFGSM_EPOCH_LOG_NAME,'a+') log_file.write(msg1) log_file.close() accuracy_log[epoch] = acc_ifgsm sys.stdout.write('\r') sys.stdout.write('| Epoch [%3d/%3d] : Acc:%f \t\t' %(epoch, MAX_EPOCHS,acc)) sys.stdout.flush() log_file = open(EVAL_LOG_NAME,'a+') msg = 'Epoch,'+str(accuracy_log.argmax())+',Acc,'+str(accuracy_log.max())+'\n' log_file.write(msg) log_file.close() model_name = MODEL_PREFIX+str(accuracy_log.argmax())+'.tar' model.load_state_dict(torch.load(model_name)['state_dict']) model.eval() model.cuda() ##################################### FGSM ############################################# EVAL_LOG_NAME = 'results/'+EXP_NAME+'.txt' log_file = open(EVAL_LOG_NAME,'a+') msg = '##################### FGSM ####################\n' log_file.write(msg) log_file.close() for eps in np.arange(0.0/255,10.0/255,2.0/255): i = 0 accuracy = 0 for data, target in test_loader: adv = FGSM_Attack_step(model,loss,data,target,eps=eps,steps=1) data = Variable(adv).cuda() target = Variable(target).cuda() out = model(data) prediction = out.data.max(1)[1] accuracy = accuracy + prediction.eq(target.data).sum() i = i + 1 acc = (accuracy.item()*1.0) / (test_size) * 100 log_file = open(EVAL_LOG_NAME,'a+') msg = 'eps,'+str(eps)+',Acc,'+str(acc)+'\n' log_file.write(msg) log_file.close() ##################################### iFGSM ############################################# log_file = open(EVAL_LOG_NAME,'a+') msg = '##################### iFGSM: step=7 ####################\n' log_file.write(msg) log_file.close() for eps in np.arange(2.0/255,10.0/255,2.0/255): i = 0 accuracy = 0 for data, target in test_loader: adv = FGSM_Attack_step(model,loss,data,target,eps=eps,steps=7) data = Variable(adv).cuda() target = Variable(target).cuda() out = model(data) prediction = out.data.max(1)[1] accuracy = accuracy + prediction.eq(target.data).sum() i = i + 1 acc = (accuracy.item()*1.0) / (test_size) * 100 log_file = open(EVAL_LOG_NAME,'a+') msg = 'eps,'+str(eps)+',Acc,'+str(acc)+'\n' log_file.write(msg) log_file.close() def MSPGD(model,loss,data,target,eps=0.1,eps_iter=0.1,bounds=[],steps=[7,20,50,100,500]): """ model loss : loss used for training data : input to network target : ground truth label corresponding to data eps : perturbation srength added to image eps_iter """ #Raise error if in training mode if model.training: assert 'Model is in training mode' tar = Variable(target.cuda()) data = data.cuda() B,C,H,W = data.size() noise = torch.FloatTensor(np.random.uniform(-eps,eps,(B,C,H,W))).cuda() noise = torch.clamp(noise,-eps,eps) img_arr = [] for step in range(steps[-1]): # convert data and corresponding into cuda variable img = data + noise img = Variable(img,requires_grad=True) # make gradient of img to zeros zero_gradients(img) # forward pass out = model(img) #compute loss using true label cost = loss(out,tar) #backward pass cost.backward() #get gradient of loss wrt data per = torch.sign(img.grad.data) #convert eps 0-1 range to per channel range per[:,0,:,:] = (eps_iter * (bounds[0,1] - bounds[0,0])) * per[:,0,:,:] if(per.size(1)>1): per[:,1,:,:] = (eps_iter * (bounds[1,1] - bounds[1,0])) * per[:,1,:,:] per[:,2,:,:] = (eps_iter * (bounds[2,1] - bounds[2,0])) * per[:,2,:,:] # ascent adv = img.data + per.cuda() #clip per channel data out of the range img.requires_grad =False img[:,0,:,:] = torch.clamp(adv[:,0,:,:],bounds[0,0],bounds[0,1]) if(per.size(1)>1): img[:,1,:,:] = torch.clamp(adv[:,1,:,:],bounds[1,0],bounds[1,1]) img[:,2,:,:] = torch.clamp(adv[:,2,:,:],bounds[2,0],bounds[2,1]) img = img.data noise = img - data noise = torch.clamp(noise,-eps,eps) for j in range(len(steps)): if step == steps[j]-1: img_tmp = data + noise img_arr.append(img_tmp) break return img_arr ##################################### PGD, steps=[7,20,50,100,500] ############################################# log_file = open(EVAL_LOG_NAME,'a+') msg = '##################### PGD: steps=[7,20,50,100,500],eps_iter=2/255 ####################\n' log_file.write(msg) log_file.close() all_steps = [7,20,50,100,500] num_steps = len(all_steps) eps = 8.0/255 i = 0 acc_arr = torch.zeros((num_steps)) for data, target in test_loader: adv_arr = MSPGD(model,loss,data,target,eps=eps,eps_iter=2.0/255,bounds=np.array([[0,1],[0,1],[0,1]]),steps=all_steps) target = Variable(target).cuda() for j in range(num_steps): data = Variable(adv_arr[j]).cuda() out = model(data) prediction = out.data.max(1)[1] acc_arr[j] = acc_arr[j] + prediction.eq(target.data).sum() i = i + 1 print(acc_arr) for j in range(num_steps): acc_arr[j] = (acc_arr[j].item()*1.0) / (test_size) * 100 log_file = open(EVAL_LOG_NAME,'a+') msg = 'eps,'+str(eps)+',steps,'+str(all_steps[j])+',Acc,'+str(acc_arr[j])+'\n' log_file.write(msg) log_file.close() ##################################### FIND BEST SWA MODEL ############################################### EVAL_LOG_NAME = 'results/'+EXP_NAME+'_SWA.txt' ACC_EPOCH_LOG_NAME = 'results/'+EXP_NAME+'acc_epoch_SWA.txt' ACC_IFGSM_EPOCH_LOG_NAME = 'results/'+EXP_NAME+'ifgsm_acc_epoch_SWA.txt' log_file = open(EVAL_LOG_NAME,'a+') msg = '##################### iter.FGSM: steps=7,eps=8.0/255,1####################\n' log_file.write(msg) log_file.close() accuracy_log = np.zeros(MAX_EPOCHS) for epoch in range(MAX_EPOCHS): model_name = "models/"+EXP_NAME+"_SWA_"+str(tau)+"_"+str(epoch)+'.pkl' model.load_state_dict(torch.load(model_name)) eps=8.0/255 accuracy = 0 accuracy_ifgsm = 0 i = 0 for data, target in val_loader: data = Variable(data).cuda() target = Variable(target).cuda() out = model(data) prediction = out.data.max(1)[1] accuracy = accuracy + prediction.eq(target.data).sum() i = i + 1 for data, target in val_loader: data = FGSM_Attack_step(model,loss,data,target,eps=eps,steps=7) data = Variable(data).cuda() target = Variable(target).cuda() out = model(data) prediction = out.data.max(1)[1] accuracy_ifgsm = accuracy_ifgsm + prediction.eq(target.data).sum() acc = (accuracy.item()*1.0)
<filename>utils.py import numpy as np import torch from torch.autograd import grad from learn2learn.utils import clone_module, update_module from torch import nn, optim def maml_update(model, lr, grads=None): """ [[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/algorithms/maml.py) **Description** Performs a MAML update on model using grads and lr. The function re-routes the Python object, thus avoiding in-place operations. NOTE: The model itself is updated in-place (no deepcopy), but the parameters' tensors are not. **Arguments** * **model** (Module) - The model to update. * **lr** (float) - The learning rate used to update the model. * **grads** (list, *optional*, default=None) - A list of gradients for each parameter of the model. If None, will use the gradients in .grad attributes. **Example** ~~~python maml = l2l.algorithms.MAML(Model(), lr=0.1) model = maml.clone() # The next two lines essentially implement model.adapt(loss) grads = autograd.grad(loss, model.parameters(), create_graph=True) maml_update(model, lr=0.1, grads) ~~~ """ if grads is not None: params = list(model.parameters()) if not len(grads) == len(list(params)): msg = 'WARNING:maml_update(): Parameters and gradients have different length. (' msg += str(len(params)) + ' vs ' + str(len(grads)) + ')' print(msg) for p, g in zip(params, grads): if g is not None: p.update = - lr * g return update_module(model) def accuracy(predictions, targets): predictions = predictions.argmax(dim=1).view(targets.shape) return (predictions == targets).sum().float() / targets.size(0) def fast_adapt(batch, learner, loss, adaptation_steps, shots, ways, device): data, labels = batch data, labels = data.to(device), labels.to(device) # Separate data into adaptation/evalutation sets adaptation_indices = np.zeros(data.size(0), dtype=bool) adaptation_indices[np.arange(shots * ways) * 2] = True evaluation_indices = torch.from_numpy(~adaptation_indices) adaptation_indices = torch.from_numpy(adaptation_indices) # print("evaluation_indices",evaluation_indices) # print("adaptation_indices", adaptation_indices) adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices] evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices] # Adapt the model #support loss for step in range(adaptation_steps): train_error = loss(learner(adaptation_data), adaptation_labels) learner.adapt(train_error) # update # Evaluate the adapted model predictions = learner(evaluation_data) # query loss valid_error = loss(predictions, evaluation_labels) valid_accuracy = accuracy(predictions, evaluation_labels) return valid_error, valid_accuracy # Adapt the model #support loss def fake_adopt_debug2(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task): data, labels = batch data, labels = data.to(device), labels.to(device) # Separate data into adaptation/evalutation sets adaptation_indices = np.zeros(data.size(0), dtype=bool) adaptation_indices[np.arange(shots * ways) * 2] = True evaluation_indices = torch.from_numpy(~adaptation_indices) adaptation_indices = torch.from_numpy(adaptation_indices) # print("evaluation_indices",evaluation_indices) # print("adaptation_indices", adaptation_indices) adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices] evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices] loss2 = nn.CrossEntropyLoss(reduction='none') # Adapt the model #support loss for step in range(adaptation_steps): train_error = loss2(learner(adaptation_data), adaptation_labels) # learner.adapt(train_error) #update mean_seperate_error = torch.mean(train_error) grads = grad(mean_seperate_error, learner.parameters(), create_graph=True) updates = [-learner.lr * g for g in grads] update_module(learner, updates=updates) # Evaluate the adapted model predictions = learner(evaluation_data) # query loss valid_error = loss(predictions, evaluation_labels) valid_accuracy = accuracy(predictions, evaluation_labels) return valid_error, valid_accuracy,{"2":[3]},{"2":[3]} def fake_adopt_before(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task): datas, labels = batch datas, labels = datas.to(device), labels.to(device) # Separate data into adaptation/evalutation sets adaptation_indices = np.zeros(datas.size(0), dtype=bool) adaptation_indices[np.arange(shots * ways) * 2] = True evaluation_indices = torch.from_numpy(~adaptation_indices) adaptation_indices = torch.from_numpy(adaptation_indices) # print("evaluation_indices",evaluation_indices) # print("adaptation_indices", adaptation_indices) adaptation_data, adaptation_labels = datas[adaptation_indices], labels[adaptation_indices] evaluation_data, evaluation_labels = datas[evaluation_indices], labels[evaluation_indices] # Adapt the model train_error = 0 print("adaptation_labels)", adaptation_labels) for step in range(adaptation_steps): for (one_class_data, one_class_label) in zip(adaptation_data, adaptation_labels): print("one_class_label: ", one_class_label) one_class_data = one_class_data.unsqueeze(0) one_class_label = one_class_label.unsqueeze(0) print("one_class_label:(unsquzee) ", one_class_label) one_class_loss = loss(learner(one_class_data), one_class_label) grads = grad(one_class_loss / 5, learner.parameters(), allow_unused=False) error_dict[task].append(grads) train_error += one_class_loss # print("one class label loss :",one_class_loss) # print("mean train error :",train_error/5) original_error = loss(learner(adaptation_data), adaptation_labels) # print("original train error : ",original_error) # print("@@@@@@@@@@@@@@@@@@@debug loss") # fine-tune # learner.adapt(train_error) for g in error_dict[task]: learner = maml_update(learner, learner.lr, g) # Evaluate the adapted model error_data[task] = evaluation_data, evaluation_labels predictions = learner(evaluation_data) # query loss evaluation_error = loss(predictions, evaluation_labels) evaluation_accuracy = accuracy(predictions, evaluation_labels) return evaluation_error, evaluation_accuracy, error_dict, error_data def fake_adopt_now(learner, fake_grads, loss, error_data, task): for g in fake_grads: learner = maml_update(learner, learner.lr, g) query_data, query_label = error_data[task] predictions = learner(query_data) # query loss evaluation_error = loss(predictions, query_label) return evaluation_error def fake_adopt_debug(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task): datas, labels = batch datas, labels = datas.to(device), labels.to(device) # Separate data into adaptation/evalutation sets adaptation_indices = np.zeros(datas.size(0), dtype=bool) adaptation_indices[np.arange(shots * ways) * 2] = True evaluation_indices = torch.from_numpy(~adaptation_indices) adaptation_indices = torch.from_numpy(adaptation_indices) # print("evaluation_indices",evaluation_indices) # print("adaptation_indices", adaptation_indices) adaptation_data, adaptation_labels = datas[adaptation_indices], labels[adaptation_indices] evaluation_data, evaluation_labels = datas[evaluation_indices], labels[evaluation_indices] # Adapt the model train_error = [] # print("adaptation_labels)", adaptation_labels) for step in range(adaptation_steps): for (one_class_data, one_class_label) in zip(adaptation_data, adaptation_labels): # print("one_class_label: ", one_class_label) # print("one_class_label:(unsquzee) ", one_class_label) # 주석처리 one_class_data = one_class_data.unsqueeze(0) one_class_label = one_class_label.unsqueeze(0) one_class_loss = loss(learner(one_class_data), one_class_label) grads = grad(one_class_loss / 5, learner.parameters(), create_graph=True) updates = [-learner.lr * g for g in grads] error_dict[task].append(updates) train_error.append(one_class_loss) # print("one class label loss :",one_class_loss) # print("mean train error :",train_error/5) # original_error = loss(learner(adaptation_data), adaptation_labels) # print("original train error : ",original_error) # print("@@@@@@@@@@@@@@@@@@@debug loss") # fine-tune # learner.adapt(train_error) # 1차 시도 # for g in error_dict[task]: # learner = maml_update(learner, learner.lr, g) # 2차 시도 # for u in error_dict[task]: # update_module(learner,updates = u) # 3차 시도 # grads = grad(train_error, learner.parameters(), create_graph=True) # updates = [-learner.lr * g for g in grads] # update_module(learner, updates=updates) # 4차 시도 # grads = grad(original_error, learner.parameters(), create_graph=True) # updates = [-learner.lr * g for g in grads] # update_module(learner, updates=updates) # 5차 시도 # mean_error = torch.mean(torch.stack(train_error)) # grads = grad(mean_error, learner.parameters(), create_graph=True) # updates = [-learner.lr * g for g in grads] # update_module(learner, updates=updates) # 6차 시도 # mean_error = torch.mean(torch.stack(train_error)) # grads = grad(mean_error, learner.parameters(), create_graph=True) # updates = [-learner.lr * g for g in grads] # update_module(learner, updates=updates) # Evaluate the adapted model error_data[task] = evaluation_data, evaluation_labels predictions = learner(evaluation_data) # query loss evaluation_error = loss(predictions, evaluation_labels) evaluation_accuracy = accuracy(predictions, evaluation_labels) return evaluation_error, evaluation_accuracy, error_dict, error_data def evaluate(test_iteration, maml, task_information): tasksets, meta_batch_size, loss, adaptation_steps, shots, ways, device = task_information test_error = [] test_accuracy = [] for i in range(test_iteration): meta_test_error = 0.0 meta_test_accuracy = 0.0 # Compute meta-testing loss learner = maml.clone() batch = tasksets.test.sample() # print("batch",len(batch)) evaluation_error, evaluation_accuracy = fast_adapt(batch, learner, loss, adaptation_steps, shots, ways, device) meta_test_error += evaluation_error.item() meta_test_accuracy += evaluation_accuracy.item() test_error.append(meta_test_error) test_accuracy.append(meta_test_accuracy) # print('Meta Test Error', meta_test_error / meta_batch_size) # print('Meta Test Accuracy', meta_test_accuracy / meta_batch_size) test_error_mean = np.mean(test_error) test_accuracy_mean = np.mean(test_accuracy) test_error_std = np.std(test_error) test_accuracy_std = np.std(test_accuracy) print('Meta Test Error(Iteration Record)', test_error_mean) print('Meta Test Accuracy(Iteration Record)', test_accuracy_mean) return test_error_mean, test_error_std, test_accuracy_mean, test_accuracy_std ####new fake adopt 1 def fake_adopt_1_before(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task): datas, labels = batch datas, labels = datas.to(device), labels.to(device) # Separate data into adaptation/evalutation sets adaptation_indices = np.zeros(datas.size(0), dtype=bool) adaptation_indices[np.arange(shots * ways) * 2] = True evaluation_indices = torch.from_numpy(~adaptation_indices) adaptation_indices = torch.from_numpy(adaptation_indices) # print("evaluation_indices",evaluation_indices) # print("adaptation_indices", adaptation_indices) adaptation_data, adaptation_labels = datas[adaptation_indices], labels[adaptation_indices] evaluation_data, evaluation_labels = datas[evaluation_indices], labels[evaluation_indices] # Adapt the model loss2 = nn.CrossEntropyLoss(reduction='none') for step in range(adaptation_steps): individual_loss = loss2(learner(adaptation_data), adaptation_labels) for il in individual_loss: grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남 updates = [-learner.lr * g for g in grads] error_dict[task].append(updates) train_error = loss(learner(adaptation_data), adaptation_labels) learner.adapt(train_error) error_data[task] = evaluation_data, evaluation_labels # Evaluate the adapted model predictions = learner(evaluation_data) # query loss evaluation_error = loss(predictions, evaluation_labels) evaluation_accuracy = accuracy(predictions, evaluation_labels) return evaluation_error, evaluation_accuracy, error_dict, error_data def fake_adopt_1_now(learner, fake_grads, loss, error_data, task): for updates in fake_grads: update_module(learner, updates=updates) query_data, query_label = error_data[task] predictions = learner(query_data) # query loss evaluation_error = loss(predictions, query_label) return evaluation_error #####fake_adopt 3 def fake_adopt_3_before(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task, iteration): data, labels = batch data, labels = data.to(device), labels.to(device) adaptation_indices = np.zeros(data.size(0), dtype=bool) adaptation_indices[np.arange(shots * ways) * 2] = True evaluation_indices = torch.from_numpy(~adaptation_indices) adaptation_indices = torch.from_numpy(adaptation_indices) adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices] evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices] # Adapt the model #support loss if iteration % 49 == 0: loss2 = nn.CrossEntropyLoss(reduction='none') for step in range(adaptation_steps): individual_loss = loss2(learner(adaptation_data), adaptation_labels) for il in individual_loss: grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남 updates = [-learner.lr * g for g in grads] error_dict[task].append(updates) error_data[task] = evaluation_data, evaluation_labels # train_error = torch.mean(individual_loss) # learner.adapt(train_error) valid_error = torch.tensor([0]) valid_accuracy = torch.tensor([0]) else: for step in range(adaptation_steps): train_error = loss(learner(adaptation_data), adaptation_labels) learner.adapt(train_error) predictions = learner(evaluation_data) valid_error = loss(predictions, evaluation_labels) valid_accuracy = accuracy(predictions, evaluation_labels) return valid_error, valid_accuracy, error_dict, error_data def fake_adopt_3_now(learner, fake_grads, loss, error_data, task): for updates in fake_grads: update_module(learner, updates=updates) query_data, query_label = error_data[task] predictions = learner(query_data) # query loss evaluation_error = loss(predictions, query_label) return evaluation_error #############fake adopt 4 def fake_adopt_4_before(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task, iteration): data, labels = batch data, labels = data.to(device), labels.to(device) adaptation_indices = np.zeros(data.size(0), dtype=bool) adaptation_indices[np.arange(shots * ways) * 2] = True evaluation_indices = torch.from_numpy(~adaptation_indices) adaptation_indices = torch.from_numpy(adaptation_indices) adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices] evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices] # Adapt the model #support loss if iteration % 9 == 0: loss2 = nn.CrossEntropyLoss(reduction='none') for step in range(adaptation_steps): individual_loss = loss2(learner(adaptation_data), adaptation_labels) for il in individual_loss: #grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남 #updates = [-learner.lr * g for g in grads] error_dict[task].append(il) error_data[task] = evaluation_data, evaluation_labels # train_error = torch.mean(individual_loss) # learner.adapt(train_error) valid_error = torch.tensor([0]) valid_accuracy = torch.tensor([0]) else: for step in range(adaptation_steps): train_error = loss(learner(adaptation_data), adaptation_labels) learner.adapt(train_error) predictions = learner(evaluation_data) valid_error = loss(predictions, evaluation_labels) valid_accuracy = accuracy(predictions, evaluation_labels) return valid_error, valid_accuracy, error_dict, error_data def fake_adopt_4_now(learner, fake_grads, loss, error_data, task): #for in fake_grads: #update_module(learner, updates=updates) for updates in fake_grads: update_module(learner, updates=updates) query_data, query_label = error_data[task] predictions = learner(query_data) # query loss evaluation_error = loss(predictions, query_label) return evaluation_error #############fake adopt 5 def fake_adopt_5_before(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task, iteration,split_meta_batch_size): data, labels = batch data, labels = data.to(device), labels.to(device) adaptation_indices = np.zeros(data.size(0), dtype=bool) adaptation_indices[np.arange(shots * ways) * 2] = True evaluation_indices = torch.from_numpy(~adaptation_indices) adaptation_indices = torch.from_numpy(adaptation_indices) adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices] evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices] # Adapt the model #support loss if task >= split_meta_batch_size: loss2 = nn.CrossEntropyLoss(reduction='none') for step in range(adaptation_steps): individual_loss = loss2(learner(adaptation_data), adaptation_labels) for il in individual_loss: grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남 updates = [-learner.lr * g for g in grads] error_dict[task].append(updates) error_data[task] = evaluation_data, evaluation_labels # train_error = torch.mean(individual_loss) # learner.adapt(train_error) valid_error = torch.tensor([0]) valid_accuracy = torch.tensor([0]) else: for step in range(adaptation_steps): train_error = loss(learner(adaptation_data), adaptation_labels) learner.adapt(train_error) predictions = learner(evaluation_data) valid_error = loss(predictions, evaluation_labels) valid_accuracy = accuracy(predictions, evaluation_labels) return valid_error, valid_accuracy, error_dict, error_data def fake_adopt_5_now(learner, fake_grads, loss, error_data, task): # for in fake_grads: # update_module(learner, updates=updates) for updates in fake_grads: update_module(learner, updates=updates) query_data, query_label = error_data[task] predictions = learner(query_data) # query loss evaluation_error = loss(predictions, query_label) return evaluation_error #############fake adopt 6 def fake_adopt_6_before(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task, iteration,split_meta_batch_size): data, labels = batch data, labels = data.to(device), labels.to(device) adaptation_indices = np.zeros(data.size(0), dtype=bool) adaptation_indices[np.arange(shots
<filename>gerenciador_operacoes.py from time import sleep dados = {} ### --> DICIONÁRIO RECEBE TODOS OS DADOS COM SEUS RESPECTIVOS VALORES; ID, NOME ETC... lista_de_dados = [] lista_principal = [] copia_dados = []### --> DICIONÁRIO RECEBE TODOS OS DADOS codigo_cliente = 0 ### --> CONTADOR CONTAGEM TRANSAÇÕES moeda_origem_sigla = 'R$:' moeda_destino_sigla = 'U$$:' valor_tot_operacoes = float(0) valor_tot_operacoes_a = float(0) tot_taxas = float(0) tot_movimento_brasil_dolar_eua = float(0) tot_movimento_brasil_euro = float(0) tot_movimento_brasil_dolar_canada = float(0) tot_movimento_dolar_eua_brasil = float(0) tot_movimento_dolar_eua_euro = float(0) tot_movimento_dolar_eua_dolar_canada = float(0) tot_movimento_euro_brasil = float(0) tot_movimento_euro_dolar_eua = float(0) tot_movimento_euro_dolar_canada = float(0) tot_movimento_dolar_canada_brasil = float(0) tot_movimento_dolar_canada_dolar_eua = float(0) tot_movimento_dolar_canada_euro = float(0) print('--' * 35) print(f'\033[7;40m{" GERENCIADOR DE OPERAÇÕES ":*^70}\033[0;0m') print('--' * 35) while True: print(f' |-- {"OPÇÃO ":-<2}{" MENU ":-^38} |') print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|') print(f' |\033[7;40m{" -> 1 <-":.<3}|{" - CADASTROS - CLIENTES - OPERAÇÕES ":^38} \033[0;0m|') print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|') print(f' |{" -> 2 <-":.<3}|{" - LISTAR OPERAÇÕES - ":^38} |') print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|') print(f' |\033[7;40m{" -> 3 <-":.<3}|{" - VALOR TOTAL DAS OPERAÇÕES - ":^38} \033[0;0m|') print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|') print(f' |{" -> 4 <-":.<3}|{" - VALOR TOTAL DAS TAXAS COBRADAS - ":^38} |') print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|') print('--' * 35) print(f'\033[7;40m{" ESCOLHA UMA DAS OPÇÕES ACIMA: ":*^70}\033[0;0m') print('--' * 35) opcao_menu = str(input('Digite a opção desejada:?')) if opcao_menu.isnumeric(): if opcao_menu == '1': while True: print('--' * 35) print(f'\033[7;40m{" CADASTRAR CLIENTES ":*^70}\033[0;0m') print('--' * 35) codigo_cliente += 1 codigo_cliente_convertida = str(codigo_cliente) print(f' --> {codigo_cliente}º CLIENTE - ORDEM DE SERVIÇO DE Nº [ {codigo_cliente_convertida} ]') dados['Cód'] = [codigo_cliente] lista_de_dados.append(codigo_cliente_convertida) print('--' * 35) nome = str(input('Digite o nome do cliente:?')).strip().upper() dados['Nome'] = [nome] lista_de_dados.append(nome) # print(f'TESTE DADOS: {dados}') print('--' * 30) print('------------------- MOEDAS CADASTRADAS -------------------') print(' | Digite --> (1) para MOEDA REAL - BRASIL |') print(' | Digite --> (2) para MOEDA DÓLAR - EUA |') print(' | Digite --> (3) para MOEDA EURO - EUROPA |') print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |') print(' -------------------------------------------') moeda_origem = str(input('Moeda de origem?: [somente números acima]:?')) dados['Moeda origem'] = [moeda_origem] print('--' * 30) if moeda_origem == '1': print('MOEDA DE ORIGEM: - REAL - BRASIL') dados['Moeda origem'] = ['REAL - BRL'] moeda_origem_sigla = 'R$:' lista_de_dados.append('REAL - BRL') elif moeda_origem == '2': print('MOEDA DE ORIGEM: - DÓLAR - EUA') dados['Moeda origem'] = ['DÓLAR - EUA'] moeda_origem_sigla = 'U$$:' lista_de_dados.append('DÓLAR - EUA') elif moeda_origem == '3': print('MOEDA DE ORIGEM: - EURO - EUROPA') dados['Moeda origem'] = ['EURO'] moeda_origem_sigla = '€:' lista_de_dados.append('EURO') elif moeda_origem == '4': print('MOEDA DE ORIGEM: - DÓLAR - CANADÁ') dados['Moeda origem'] = ['DÓLAR - CAD'] moeda_origem_sigla = 'U$$:' lista_de_dados.append('DÓLAR - CAD') else: while True: print(f'\033[1;41mVALOR INVÁLIDO - SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS MOEDAS CADASTRADAS:\033[0;0m') print('--' * 30) print('------------------- MOEDAS CADASTRADAS -------------------') print(' | Digite --> (1) para MOEDA REAL - BRASIL |') print(' | Digite --> (2) para MOEDA DÓLAR - EUA |') print(' | Digite --> (3) para MOEDA EURO - EUROPA |') print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |') print(' -------------------------------------------') moeda_origem = str(input('Moeda de origem?: [somente números acima]:?')) print('--' * 30) if moeda_origem == '1': print('MOEDA DE ORIGEM: - REAL - BRASIL') dados['Moeda origem'] = ['REAL - BRL'] moeda_origem_sigla = 'R$:' lista_de_dados.append('REAL - BRL') break elif moeda_origem == '2': print('MOEDA DE ORIGEM: - DÓLAR - EUA') dados['Moeda origem'] = ['DÓLAR - EUA'] moeda_origem_sigla = 'U$$:' lista_de_dados.append('DÓLAR - EUA') break elif moeda_origem == '3': print('MOEDA DE ORIGEM: - EURO - EUROPA') dados['Moeda origem'] = ['EURO'] moeda_origem_sigla = '€:' lista_de_dados.append('EURO') break elif moeda_origem == '4': print('MOEDA DE ORIGEM: - DÓLAR - CANADÁ') dados['Moeda origem'] = ['DÓLAR - CAD'] moeda_origem_sigla = 'U$$:' lista_de_dados.append('DÓLAR - CAD') break print('--' * 30) print('------------------- MOEDAS CADASTRADAS -------------------') print(' | Digite --> (1) para MOEDA REAL - BRASIL |') print(' | Digite --> (2) para MOEDA DÓLAR - EUA |') print(' | Digite --> (3) para MOEDA EURO - EUROPA |') print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |') print(' -------------------------------------------') moeda_destino = str(input('Moeda de destino?: [somente números acima]:?')) print('--' * 30) if moeda_destino == '1': print('MOEDA DE DESTINO: - REAL - BRASIL') dados['Moeda destino'] = ['REAL - BRASIL'] moeda_destino_sigla = 'R$:' lista_de_dados.append('REAL - BRASIL') elif moeda_destino == '2': print('MOEDA DE DESTINO: - DÓLAR - EUA') dados['Moeda destino'] = ['DÓLAR - EUA'] moeda_destino_sigla = 'U$$:' lista_de_dados.append('DÓLAR - EUA') elif moeda_destino == '3': print('MOEDA DE DESTINO: - EURO - EUROPA') dados['Moeda destino'] = ['EURO'] moeda_destino_sigla = '€:' lista_de_dados.append('EURO') elif moeda_destino == '4': print('MOEDA DE DESTINO: - DÓLAR - CANADÁ') dados['Moeda destino'] = ['DÓLAR CAD'] moeda_destino_sigla = 'U$$:' lista_de_dados.append('DÓLAR CAD') else: while True: print(f'\033[1;41mVALOR INVÁLIDO - SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS MOEDAS CADASTRADAS:\033[0;0m') print('--' * 30) print('------------------- MOEDAS CADASTRADAS -------------------') print(' | Digite --> (1) para MOEDA REAL - BRASIL |') print(' | Digite --> (2) para MOEDA DÓLAR - EUA |') print(' | Digite --> (3) para MOEDA EURO - EUROPA |') print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |') print(' -------------------------------------------') moeda_destino = str(input('Moeda de destino?: [somente números acima]:?')) print('--' * 30) if moeda_destino == '1': print('MOEDA DE DESTINO: - REAL - BRASIL') dados['Moeda destino'] = ['REAL - BRASIL'] moeda_destino_sigla = 'R$:' lista_de_dados.append('REAL - BRASIL') break elif moeda_destino == '2': print('MOEDA DE DESTINO: - DÓLAR - EUA') dados['Moeda destino'] = ['DÓLAR - EUA'] moeda_destino_sigla = 'U$$:' lista_de_dados.append('DÓLAR - EUA') break elif moeda_destino == '3': print('MOEDA DE DESTINO: - EURO - EUROPA') dados['Moeda destino'] = ['EURO'] moeda_destino_sigla = '€:' lista_de_dados.append('EURO') break elif moeda_destino == '4': print('MOEDA DE DESTINO: - DÓLAR - CANADÁ') dados['Moeda destino'] = ['DÓLAR CAD'] moeda_destino_sigla = 'U$$:' lista_de_dados.append('DÓLAR CAD') break print('--' * 30) data_operacao = str(input('Data da operação:\033[1;90m[NO FORMATO: __/__/____ ]\033[0;0m:?')) dados['Data Operação'] = [data_operacao] lista_de_dados.append(data_operacao) print('--' * 30) valor_original = str(input(f'Valor original:? {moeda_origem_sigla}')) dados['Valor Original'] = [valor_original] lista_de_dados.append(valor_original) convertendo_valor = float(valor_original) valor_tot_operacoes += convertendo_valor if moeda_origem == '1' and moeda_destino == '2': convertendo_valor_brasil = float(valor_original) tot_movimento_brasil_dolar_eua += convertendo_valor_brasil elif moeda_origem == '1' and moeda_destino == '3': convertendo_valor_brasil_euro = float(valor_original) tot_movimento_brasil_euro += convertendo_valor_brasil_euro elif moeda_origem == '1' and moeda_destino == '4': convertendo_valor_brasil_dolar_canada = float(valor_original) tot_movimento_brasil_dolar_canada += convertendo_valor_brasil_dolar_canada if moeda_origem == '2' and moeda_destino == '1': convertendo_valor_eua_brasil = float(valor_original) tot_movimento_dolar_eua_brasil += convertendo_valor_eua_brasil elif moeda_origem == '2' and moeda_destino == '3': convertendo_valor_eua_euro = float(valor_original) tot_movimento_dolar_eua_euro += convertendo_valor_eua_euro elif moeda_origem == '2' and moeda_destino == '4': convertendo_valor_eua_canada = float(valor_original) tot_movimento_dolar_eua_dolar_canada += convertendo_valor_eua_canada if moeda_origem == '3' and moeda_destino == '1': convertendo_valor_euro_brasil = float(valor_original) tot_movimento_euro_brasil += convertendo_valor_euro_brasil elif moeda_origem == '3' and moeda_destino == '2': convertendo_valor_euro_eua = float(valor_original) tot_movimento_euro_dolar_eua += convertendo_valor_euro_eua elif moeda_origem == '3' and moeda_destino == '4': convertendo_valor_euro_canada = float(valor_original) tot_movimento_euro_dolar_canada += convertendo_valor_euro_canada if moeda_origem == '4' and moeda_destino == '1': convertendo_valor_canada_brasil = float(valor_original) tot_movimento_dolar_canada_brasil += convertendo_valor_canada_brasil elif moeda_origem == '4' and moeda_destino == '2': convertendo_valor_canada_eua = float(valor_original) tot_movimento_dolar_canada_dolar_eua += convertendo_valor_canada_eua elif moeda_origem == '4' and moeda_destino == '3': convertendo_valor_canada_euro = float(valor_original) tot_movimento_dolar_canada_euro += convertendo_valor_canada_euro print('--' * 30) valor_convertido = str(input(f'Valor convertido:? {moeda_destino_sigla}')) dados['Valor Convertido'] = [valor_convertido] lista_de_dados.append(valor_convertido) print('--' * 30) taxa_cobrada = str(input(f'Taxa cobrada:? R$:')) conversao_taxa = float(taxa_cobrada) tot_taxas += conversao_taxa dados['Taxa Cobrada'] = [taxa_cobrada] lista_de_dados.append(taxa_cobrada) print('--' * 30) copia_dados.append(dados.copy()) lista_principal.append(lista_de_dados) print('--' * 30) print('\033[1;90m----------------------- FIM CADASTRO -----------------------\033[0;0m') print('--' * 30) while True: continua_cadastro = str(input('\nCadastrar mais clientes:? [C - P/ CONTINUAR OU S - P/ SAIR]:?')).strip().upper()[0] print('') if continua_cadastro == 'C': break elif continua_cadastro == 'S': break else: print(f'\033[1;41m- SOMENTE UMA DAS OPÇÕES ACIMA: [C - CONTINUAR/ S - SAIR]:\033[0;0m') if continua_cadastro == 'S': print('--' * 35) print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='') sleep(0.3) print('\033[1;30m\033[1;43m.\033[0;0m', end='') sleep(0.3) print('\033[1;30m\033[1;43m.\033[0;0m', end='') sleep(0.3) print('\033[1;30m\033[1;43m.\033[0;0m', end='') sleep(0.3) print('\033[1;30m\033[1;43m50%\033[0;0m', end='') sleep(0.8) print('\033[1;30m\033[1;43m.\033[0;0m', end='') sleep(0.8) print('\033[1;30m\033[1;43m.\033[0;0m', end='') sleep(0.8) print('\033[1;30m\033[1;43m100%\033[0;0m', end='') print('\033[1;30m\033[1;43m \033[0;0m', end='') print('\n') break elif opcao_menu == '2': print('--' * 35) print(f'\033[7;40m{" RELATÓRIOS --> OPERAÇÕES REALIZADAS ":*^70}\033[0;0m') print('--' * 35) if codigo_cliente == 0: print('--' * 35) print(' \033[1;40m Aguarde ! AVERIGUANDO DADOS\033[0;0m', end='') sleep(0.3) print('\033[1;40m.\033[0;0m', end='') sleep(0.3) print('\033[1;40m.\033[0;0m', end='') sleep(0.3) print('\033[1;40m.\033[0;0m', end='') sleep(0.3) print('\033[1;40m50%\033[0;0m', end='') sleep(0.8) print('\033[1;40m.\033[0;0m', end='') sleep(0.8) print('\033[1;40m.\033[0;0m', end='') sleep(0.8) print('\033[1;40m100%\033[0;0m', end='') print('\033[1;40m \033[0;0m', end='') print('\n') print('--' * 35) print() print('\033[1;90m------------------------------------------------------------\033[0;0m') print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO
result will be stored. pExcepInfo: A pointer to a structure that contains exception information. puArgErr: The index of the first argument that has an error. """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class _EventInfo: """ Exposes the public members of the System.Reflection.EventInfo class to unmanaged code. """ def AddEventHandler(self, target, handler): """ AddEventHandler(self: _EventInfo, target: object, handler: Delegate) Provides COM objects with version-independent access to the System.Reflection.EventInfo.AddEventHandler(System.Object,System.Delegate) method. target: The event source. handler: A method or methods to be invoked when the event is raised by the target. """ pass def Equals(self, other): """ Equals(self: _EventInfo, other: object) -> bool Provides COM objects with version-independent access to the System.Object.Equals(System.Object) method. other: The System.Object to compare with the current System.Object. Returns: true if the specified System.Object is equal to the current System.Object; otherwise, false. """ pass def GetAddMethod(self, nonPublic=None): """ GetAddMethod(self: _EventInfo) -> MethodInfo Provides COM objects with version-independent access to the System.Reflection.EventInfo.GetAddMethod method. Returns: A System.Reflection.MethodInfo object representing the method used to add an event-handler delegate to the event source. GetAddMethod(self: _EventInfo, nonPublic: bool) -> MethodInfo Provides COM objects with version-independent access to the System.Reflection.EventInfo.GetAddMethod(System.Boolean) method. nonPublic: true to return non-public methods; otherwise, false. Returns: A System.Reflection.MethodInfo object representing the method used to add an event-handler delegate to the event source. """ pass def GetCustomAttributes(self, *__args): """ GetCustomAttributes(self: _EventInfo, inherit: bool) -> Array[object] Provides COM objects with version-independent access to the System.Reflection.MemberInfo.GetCustomAttributes(System.Boolean) method. inherit: true to search a member's inheritance chain to find the attributes; otherwise, false. Returns: An array that contains all the custom attributes, or an array with zero (0) elements if no attributes are defined. GetCustomAttributes(self: _EventInfo, attributeType: Type, inherit: bool) -> Array[object] Provides COM objects with version-independent access to the System.Reflection.MemberInfo.GetCustomAttributes(System.Type,System.Boolean) method. attributeType: The type of attribute to search for. Only attributes that are assignable to this type are returned. inherit: true to search this member's inheritance chain to find the attributes; otherwise, false. Returns: An array of custom attributes applied to this member, or an array with zero (0) elements if no attributes have been applied. """ pass def GetHashCode(self): """ GetHashCode(self: _EventInfo) -> int Provides COM objects with version-independent access to the System.Object.GetHashCode method. Returns: The hash code for the current instance. """ pass def GetIDsOfNames(self, riid, rgszNames, cNames, lcid, rgDispId): """ GetIDsOfNames(self: _EventInfo, riid: Guid, rgszNames: IntPtr, cNames: UInt32, lcid: UInt32, rgDispId: IntPtr) -> Guid Maps a set of names to a corresponding set of dispatch identifiers. riid: Reserved for future use. Must be IID_NULL. rgszNames: An array of names to be mapped. cNames: The count of the names to be mapped. lcid: The locale context in which to interpret the names. rgDispId: An array allocated by the caller that receives the identifiers corresponding to the names. """ pass def GetRaiseMethod(self, nonPublic=None): """ GetRaiseMethod(self: _EventInfo) -> MethodInfo Provides COM objects with version-independent access to the System.Reflection.EventInfo.GetRaiseMethod method. Returns: The method that is called when the event is raised. GetRaiseMethod(self: _EventInfo, nonPublic: bool) -> MethodInfo Provides COM objects with version-independent access to the System.Reflection.EventInfo.GetRaiseMethod(System.Boolean) method. nonPublic: true to return non-public methods; otherwise, false. Returns: The System.Reflection.MethodInfo object that was called when the event was raised. """ pass def GetRemoveMethod(self, nonPublic=None): """ GetRemoveMethod(self: _EventInfo) -> MethodInfo Provides COM objects with version-independent access to the System.Reflection.EventInfo.GetRemoveMethod method. Returns: A System.Reflection.MethodInfo object representing the method used to remove an event-handler delegate from the event source. GetRemoveMethod(self: _EventInfo, nonPublic: bool) -> MethodInfo Provides COM objects with version-independent access to the System.Reflection.EventInfo.GetRemoveMethod(System.Boolean) method. nonPublic: true to return non-public methods; otherwise, false. Returns: A System.Reflection.MethodInfo object representing the method used to remove an event-handler delegate from the event source. """ pass def GetType(self): """ GetType(self: _EventInfo) -> Type Provides COM objects with version-independent access to the System.Object.GetType method. Returns: A System.Type object. """ pass def GetTypeInfo(self, iTInfo, lcid, ppTInfo): """ GetTypeInfo(self: _EventInfo, iTInfo: UInt32, lcid: UInt32, ppTInfo: IntPtr) Retrieves the type information for an object, which can be used to get the type information for an interface. iTInfo: The type information to return. lcid: The locale identifier for the type information. ppTInfo: A pointer to the requested type information object. """ pass def GetTypeInfoCount(self, pcTInfo): """ GetTypeInfoCount(self: _EventInfo) -> UInt32 Retrieves the number of type information interfaces that an object provides (either 0 or 1). """ pass def Invoke(self, dispIdMember, riid, lcid, wFlags, pDispParams, pVarResult, pExcepInfo, puArgErr): """ Invoke(self: _EventInfo, dispIdMember: UInt32, riid: Guid, lcid: UInt32, wFlags: Int16, pDispParams: IntPtr, pVarResult: IntPtr, pExcepInfo: IntPtr, puArgErr: IntPtr) -> Guid Provides access to properties and methods exposed by an object. dispIdMember: An identifier for the member. riid: Reserved for future use. Must be IID_NULL. lcid: The locale context in which to interpret arguments. wFlags: Flags describing the context of the call. pDispParams: A pointer to a structure containing an array of arguments, an array of argument DISPIDs for named arguments, and counts for the number of elements in the arrays. pVarResult: A pointer to the location where the result will be stored. pExcepInfo: A pointer to a structure that contains exception information. puArgErr: The index of the first argument that has an error. """ pass def IsDefined(self, attributeType, inherit): """ IsDefined(self: _EventInfo, attributeType: Type, inherit: bool) -> bool Provides COM objects with version-independent access to the System.Reflection.MemberInfo.IsDefined(System.Type,System.Boolean) method. attributeType: The Type object to which the custom attributes are applied. inherit: true to search this member's inheritance chain to find the attributes; otherwise, false. Returns: true if one or more instance of the attributeType parameter is applied to this member; otherwise, false. """ pass def RemoveEventHandler(self, target, handler): """ RemoveEventHandler(self: _EventInfo, target: object, handler: Delegate) Provides COM objects with version-independent access to the System.Reflection.EventInfo.RemoveEventHandler(System.Object,System.Delegate) method. target: The event source. handler: The delegate to be disassociated from the events raised by target. """ pass def ToString(self): """ ToString(self: _EventInfo) -> str Provides COM objects with version-independent access to the System.Object.ToString method. Returns: A string that represents the current System.Object. """ pass def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==y """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __str__(self, *args): #cannot find CLR method pass Attributes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Provides COM objects with version-independent access to the System.Reflection.EventInfo.Attributes property. Get: Attributes(self: _EventInfo) -> EventAttributes """ DeclaringType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Provides COM objects with version-independent access to the System.Reflection.MemberInfo.DeclaringType property. Get: DeclaringType(self: _EventInfo) -> Type """ EventHandlerType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Provides COM objects with version-independent access to the System.Reflection.EventInfo.EventHandlerType property. Get: EventHandlerType(self: _EventInfo) -> Type """ IsMulticast = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Provides COM objects with version-independent access to the System.Reflection.EventInfo.IsMulticast property. Get: IsMulticast(self: _EventInfo) -> bool """ IsSpecialName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Provides COM objects with version-independent access to the System.Reflection.EventInfo.IsSpecialName property. Get: IsSpecialName(self: _EventInfo) -> bool """ MemberType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
<filename>src/Python27Packages/run_mdao/__init__.py from __future__ import print_function from __future__ import absolute_import from __future__ import division import os import os.path import io import json import importlib import time import contextlib import itertools import numpy import six from collections import defaultdict from run_mdao.csv_recorder import MappingCsvRecorder, CsvRecorder from run_mdao.enum_mapper import EnumMapper from run_mdao.drivers import FullFactorialDriver, UniformDriver, LatinHypercubeDriver, OptimizedLatinHypercubeDriver, PredeterminedRunsDriver, CsvDriver from run_mdao.restart_recorder import RestartRecorder from openmdao.api import IndepVarComp, Problem, Group, ScipyOptimizer, FileRef, SubProblem, Component from openmdao.api import profile as openmdao_profile from openmdao.core.mpi_wrap import MPI from run_mdao.testbenchcomponent import TestBenchComponent, _get_param_name from run_mdao.parallel_execution import par_clone_and_config import testbenchexecutor.progress_service __all__ = ('run', 'run_one', 'with_problem') def CouchDBRecorder(*args, **kwargs): """Lazy load CouchDBRecorder.""" from couchdb_recorder.couchdb_recorder import CouchDBRecorder return CouchDBRecorder(*args, **kwargs) def FmuWrapper(*args, **kwargs): from fmu_wrapper import fmu_wrapper return fmu_wrapper.FmuWrapper(*args, **kwargs) if MPI: from openmdao.core.petsc_impl import PetscImpl as impl else: from openmdao.api import BasicImpl as impl # cache the output of a TestBenchComponent if the computation exceeds this many seconds. Otherwise, save memory by throwing it out CACHE_THRESHOLD_SECONDS = 5 def _memoize_solve(component, fn): from run_mdao.array_hashable import array_hashable memo = {} component.add_output('_runtime', val=0.0) # don't attempt to memoize components with FileRefs if [v for v in itertools.chain(component._init_unknowns_dict.values(), component._init_params_dict.values()) if isinstance(v['val'], FileRef)]: return fn def solve_nonlinear(tb_params, unknowns, resids): # FIXME: without dict(), this returns wrong values. why? tb_params = dict(tb_params) def unwrap_val(val): if isinstance(val, numpy.ndarray): return array_hashable(val) return val hashable = tuple((unwrap_val(param) for param in tb_params.values())) memoized_unknowns = memo.get(hashable, None) if memoized_unknowns: # print('cache hit') for name, value in six.iteritems(memoized_unknowns): unknowns[name] = value return start = time.time() fn(tb_params, unknowns, resids=resids) unknowns['_runtime'] = time.time() - start if time.time() - start >= CACHE_THRESHOLD_SECONDS: # memo[hashable] = {key: (value['val'].val if value.get('pass_by_obj', False) else value['val']) for key, value in six.iteritems(unknowns)} memo[hashable] = {key: unknowns[key] for key in unknowns} return solve_nonlinear def run_one(filename, input): """Run one iteration with specified inputs.""" original_dir = os.path.dirname(os.path.abspath(filename)) class OneInputDriver(PredeterminedRunsDriver): def __init__(self, *args, **kwargs): super(OneInputDriver, self).__init__(original_dir=original_dir, num_samples=1, *args, **kwargs) self.use_restart = False def _deserialize_or_create_runlist(self): return [input] return run(filename, override_driver=OneInputDriver()) def instantiate_component(component, component_name, mdao_config, root, subproblem_output_meta): component_type = component.get('type', 'TestBenchComponent') if component_type == 'IndepVarComp': def get_unknown_val(unknown): if unknown.get('type') is None: return unknown['value'] return {'double': float, 'int': int, 'string': six.text_type, 'array': numpy.array}[unknown['type']](unknown['value']) def get_unknown_meta(unknown): ret = {'pass_by_obj': True} units = unknown.get('units') if units: ret['units'] = str(units) return ret vars = ((name, get_unknown_val(unknown), get_unknown_meta(unknown)) for name, unknown in six.iteritems(component['unknowns'])) return IndepVarComp(vars) elif component_type == 'TestBenchComponent': tb = TestBenchComponent(component_name, mdao_config, root, subproblem_output_meta) # FIXME verify this works properly tb.solve_nonlinear = _memoize_solve(tb, tb.solve_nonlinear) return tb elif component_type == 'EnumMap': return EnumMapper(component['details']['config'], param_name=_get_param_name('input')) elif component_type == 'FMU': return FmuWrapper(component['details']['fmu']) else: if '.' in component_type: mod_name = '.'.join(component_type.split('.')[:-1]) class_name = component_type.split('.')[-1] component_instance = getattr(importlib.import_module(mod_name), class_name)(**component['details']) else: component_instance = locals()[component_type](**component['details']) return component_instance def run(filename, override_driver=None, additional_recorders=(), append_csv=False, profile=False): """Run OpenMDAO on an mdao_config.""" original_dir = os.path.dirname(os.path.abspath(filename)) if MPI: mdao_config = par_clone_and_config(filename) else: with open(filename, 'r') as mdao_config_json: mdao_config = json.loads(mdao_config_json.read()) with with_problem(mdao_config, original_dir, override_driver, additional_recorders=additional_recorders, append_csv=append_csv, profile=profile) as top: top.run() return top def get_desvar_path(designVariable): return 'designVariable.{}'.format(designVariable) @contextlib.contextmanager def with_problem(mdao_config, original_dir, override_driver=None, additional_recorders=(), is_subproblem=False, append_csv=False, profile=False): testbenchexecutor.progress_service.update_progress("Configuring PET...", -1, -1) # TODO: can we support more than one driver if len(mdao_config['drivers']) == 0: driver = None else: driver = next(iter(mdao_config['drivers'].values())) top = Problem(impl=impl) root = top.root = Group() recorder = None driver_params = {'original_dir': original_dir} if driver is not None: eval(compile(driver['details'].get('Code', ''), '<driver Code>', 'exec'), globals(), driver_params) subProblemInputMeta = {} subProblemOutputMeta = {} if driver is not None: if driver['type'] == 'optimizer': if driver.get('details', {}).get('OptimizationFunction') == 'Custom': class_path = driver['details']['OptimizationClass'].split('.') mod = __import__('.'.join(class_path[:-1]), fromlist=[class_path[-1]]) top.driver = getattr(mod, class_path[-1])() else: top.driver = ScipyOptimizer() top.driver.options['optimizer'] = str(driver.get('details', {}).get('OptimizationFunction', 'SLSQP')) for key, value in six.iteritems(driver_params): try: top.driver.options[key] = value except KeyError: pass # Ignore options that aren't valid for driver elif driver['type'] == 'parameterStudy': drivers = { "Uniform": UniformDriver, "Full Factorial": FullFactorialDriver, "Latin Hypercube": LatinHypercubeDriver, "Opt Latin Hypercube": OptimizedLatinHypercubeDriver, "CSV File": CsvDriver, } driver_type = drivers.get(driver['details']['DOEType']) if driver_type is None: raise Exception('DOEType "{}" is unsupported'.format(driver['details']['DOEType'])) if override_driver is None: top.driver = driver_type(**driver_params) else: top.driver = override_driver seed = getattr(top.driver, 'seed', None) if seed is not None: print('Using random seed {}'.format(seed)) elif driver['type'] == 'PCCDriver': import PCC.pcc_driver driver_params.update(driver['details']) driver_params["_run_mdao_subproblem_output_meta"] = subProblemOutputMeta top.driver = PCC.pcc_driver.PCCdriver(**driver_params) else: raise ValueError('Unsupported driver type %s' % driver['type']) driver_vars = [] for var_name, var in six.iteritems(driver['designVariables']): if var.get('type', 'double') == 'double': default = 0.0 range_min = var.get('RangeMin') range_max = var.get('RangeMax') if range_min is not None and range_max is not None: default = range_min + (range_max - range_min) / 2 driver_vars.append((var_name, default, {})) elif var['type'] == 'enum': driver_vars.append((var_name, var['items'][0], {"pass_by_obj": True})) elif var['type'] == 'int': driver_vars.append((var_name, 0, {})) else: raise ValueError('Unimplemented designVariable type "{}"'.format(var['type'])) units = var.get('units') if units: driver_vars[-1][2]['units'] = str(var['units']) root.add(get_desvar_path('').split('.')[0], IndepVarComp(driver_vars)) for var_name, var in six.iteritems(driver['designVariables']): if var.get('type', 'double') == 'double': top.driver.add_desvar(get_desvar_path(var_name), lower=var.get('RangeMin'), upper=var.get('RangeMax')) elif var['type'] == 'enum': driver_vars.append((var_name, var['items'][0], {"pass_by_obj": True})) formatted_name = get_desvar_path(var_name) top.driver.add_desvar(formatted_name) top.driver._desvars[formatted_name]['type'] = var['type'] top.driver._desvars[formatted_name]['items'] = var['items'] elif var['type'] == 'int': driver_vars.append((var_name, 0.0)) formatted_name = get_desvar_path(var_name) top.driver.add_desvar(formatted_name, lower=var.get('RangeMin'), upper=var.get('RangeMax')) top.driver._desvars[formatted_name]['type'] = var['type'] else: raise ValueError('Unimplemented designVariable type "{}"'.format(var['type'])) for subProblemName, subProblemConfig in six.iteritems(mdao_config.get('subProblems', {})): subProblemDir = os.path.join(original_dir, subProblemName) with with_problem(subProblemConfig, subProblemDir, is_subproblem=True) as (subProblem, inputMeta, outputMeta): root.add(subProblemName, subProblem) subProblemInputMeta[subProblemName] = inputMeta subProblemOutputMeta[subProblemName] = outputMeta if is_subproblem: subProblemInputs = [] inputMeta = {} for name, problemInput in six.iteritems(mdao_config['problemInputs']): if problemInput.get("innerSource"): if problemInput["innerSource"][0] in mdao_config['drivers']: path = get_desvar_path(problemInput["innerSource"][1]) else: path = '{}.{}'.format(problemInput["innerSource"][0], problemInput["innerSource"][1]) subProblemInputs.append(path) inputMeta[name] = path else: # TODO: How important is it to figure out the correct type here? # We might be able to infer the type from a component that connects to # this ProblemInput, but might have to refer to something outside the # subproblem (initial_value, pass_by_obj) = get_problem_input_value(problemInput) root.add(name, IndepVarComp(name, initial_value, pass_by_obj=pass_by_obj)) path = "{0}.{0}".format(name) subProblemInputs.append(path) inputMeta[name] = path # TODO: Handle direct connection between ProblemInput and ProblemOutput (single-element Source) # TODO: Pass-through ExecComps to allow direct ProblemInput->ProblemOutput connections to behave subProblemOutputs = [] outputMeta = {} for name, source in six.iteritems(mdao_config['problemOutputs']): if len(source) == 1: if source[0] in mdao_config['problemInputs'] and 'innerSource' in mdao_config['problemInputs'][source[0]]: # Assume inner source is a design variable desvar = driver['designVariables'] passByObj = False if desvar.get('type', 'double') == 'double': initialVal = 0.0 elif desvar['type'] == 'enum': initialVal = '' # TODO or maybe initialVal = 0.0 elif desvar['type'] == 'int': initialVal = 0 else: raise ValueError('Unimplemented designVariable type "{}"'.format(desvar['type'])) else: if source[0] in mdao_config['problemInputs']: (initialVal, passByObj) = get_problem_input_value(mdao_config['problemInputs'][source[0]]) else: raise ValueError('Missing ProblemOutput source: {}'.format(source[0])) comp_name = "pass_through_{}".format(name) comp = PassThroughComponent() comp.add_var(name, initialVal) root.add(comp_name, comp) inputPath = "{}.{}".format(comp_name, name) path = "{}.{}_out".format(comp_name, name) root.connect(inputMeta[source[0]], inputPath) else: if source[0] in mdao_config['drivers']: # TODO: If it's legal for this desvar to also point to a ProblemInput, # we need to create a PassThroughComponent just like above this_driver = mdao_config['drivers'][source[0]] if source[1] in this_driver.get('designVariables', {}): path = get_desvar_path(source[1]) else: # Source is an objective, ivar, or constraint; need to get the actual source if source[1] in this_driver.get('objectives', {}): driver_output_type = 'objectives' elif source[1] in this_driver.get('constraints', {}): driver_output_type = 'constraints' elif source[1] in this_driver.get('intermediateVariables', {}): driver_output_type = 'intermediateVariables' else: raise ValueError('Driver output "{}"" not found'.format(source[1])) real_source = this_driver[driver_output_type][source[1]]['source'] if real_source[0] in mdao_config['subProblems']: unknown_name = subProblemOutputMeta[real_source[0]][real_source[1]] path = '{}.{}'.format(real_source[0], unknown_name) else: path = '{}.{}'.format(real_source[0], real_source[1]) elif source[0] in mdao_config['subProblems']: unknown_name = subProblemOutputMeta[source[0]][source[1]] path = '{}.{}'.format(source[0], unknown_name) else: path = '{}.{}'.format(source[0], source[1]) subProblemOutputs.append(path) outputMeta[name] = path def get_sorted_components(): """Apply Tarjan's algorithm to the Components.""" visited = {} tbs_sorted = [] def get_ordinal(name): ordinal = visited.get(name, -1) if ordinal is None: raise ValueError('Loop involving component "{}"'.format(name)) if ordinal != -1: return ordinal component = mdao_config['components'][name] visited[name] = None ordinal = 0 for source in (param.get('source') for param in component.get('parameters', {}).values()): if not source: continue if source[0] in mdao_config['drivers']: continue if source[0] in mdao_config.get('problemInputs', {}): continue if source[0] in mdao_config.get('subProblems', {}): continue ordinal = max(ordinal, get_ordinal(source[0]) + 1) visited[name] = ordinal tbs_sorted.append(name) return ordinal for component_name in mdao_config['components']: get_ordinal(component_name) return tbs_sorted tbs_sorted = get_sorted_components() # TestBenchComponents look at params they're connected to, so create them last def is_testbenchcomponent(component_name): return mdao_config['components'][component_name].get('type', 'TestBenchComponent') == 'TestBenchComponent' tbs_sorted = sorted(tbs_sorted, key=is_testbenchcomponent) for component_name in tbs_sorted: component = mdao_config['components'][component_name] mdao_component = instantiate_component(component, component_name, mdao_config, root, subProblemOutputMeta) root.add(component_name, mdao_component) for component_name, component in six.iteritems(mdao_config['components']): for parameter_name, parameter in six.iteritems(component.get('parameters', {})): if parameter.get('source'):
0.8*ar_ub, 5, endpoint=False), *numpy.linspace(0.8*ar_ub, ar_ub, 10) ]: ar_simplex_l.append(ar) if sim_simplex_reqed: ET_sim_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=True, mixed_traff=mixed_traff) ) c = next(dark_color) label = 'Simplex' # if t != 1 else 'Simplex or MDS' print("ET_sim_l= {}".format(pprint.pformat(ET_sim_l) ) ) plot.plot(ar_simplex_l, ET_sim_l, label=label, color=c, marker=next(marker), mew=mew, ms=ms, linestyle=':') # stab_lim = ET_simplex_approx(t, ar, servdist_m, incremental=True, ar_ub=True) # plot.axvline(stab_lim, label="Simplex stability", color=c, linestyle='--') # Rep ar_rep_l, E_T_rep_n_1_l = [], [] for ar in numpy.linspace(0.05, ar_ub_rep-0.05, 20): ar_rep_l.append(ar) E_T_rep_n_1_l.append(E_T_rep_n_1(ar, mu_rep, n_rep) ) # E_T_rep_n_1_l = [e*n_rep for e in E_T_rep_n_1_l] c = next(dark_color) plot.plot(ar_rep_l, E_T_rep_n_1_l, label=r'Replication', color=c, marker=next(marker), mew=mew, ms=ms, linestyle=':') # plot.axvline(ar_ub_rep, label="Rep stability", color=c, linestyle='--') # # MDS # if ar_ub_mds is not None: # ar_mds_l = [] # for ar in [*numpy.linspace(0.05, 0.7*ar_ub_mds, 5, endpoint=False), *numpy.linspace(0.7*ar_ub_mds, ar_ub, 10, endpoint=False) ]: # # for ar in numpy.linspace(ar_ub_mds, ar_ub_mds, 1): # ar_mds_l.append(ar) # if sim_mds_reqed: # E_T_sim_mds_l.append(test_avq(nf, ar, t=1, r, k, serv, {'mu': mu_mds}, w_sys=True) ) # print("E_T_sim_mds_l= {}".format(pprint.pformat(E_T_sim_mds_l) ) ) # plot.plot(ar_mds_l, E_T_sim_mds_l, label=r'MDS', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':') def plot_selectone(): # Simplex ar_ub = arub_simplex_selectone(t, mu) + 0.1 log(WARNING, "ar_ub= {}".format(ar_ub) ) ar_l, ET_l = [], [] for ar in numpy.linspace(0.05, ar_ub, 20): ar_l.append(ar) ET_l.append(ET_selectone(t, ar, mu) ) label = 'Simplex' # if t != 1 else 'Simplex or MDS' plot.plot(ar_l, ET_l, label=label, color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':') # Rep ar_ub_rep = n_rep*mu_rep ar_l, E_T_rep_l = [], [] for ar in numpy.linspace(0.05, ar_ub_rep-0.2, 20): ar_l.append(ar) E_T_rep_l.append(E_T_rep_n_1_split_to_one(ar, mu_rep, n_rep) ) plot.plot(ar_l, E_T_rep_l, label=r'Replication', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':') plot_reptoall() scheduling = "Replicate-to-all" # plot_selectone() # scheduling = "Split-to-one" plot.legend(prop={'size':12}) plot.xlabel(r'Arrival rate $\lambda$ (Request/s)', fontsize=12) plot.ylabel(r'Average download time (s)', fontsize=12) # plot.title(r'$t={}, \mu={}$'.format(t, mu) ) plot.title(r'{} scheduling, $t= {}$'.format(scheduling, t) ) fig = plot.gcf() def_size = fig.get_size_inches() fig.set_size_inches(def_size[0]/1.4, def_size[1]/1.4) fig.tight_layout() plot.savefig("plot_simplex_vs_rep_t_{}_{}.pdf".format(t, scheduling) ) fig.clear() # Energy # ar_simplex_l, Energy_simplex_l = [], [] # for ar in numpy.linspace(0.1, ar_ub, 20): # ar_simplex_l.append(ar) # Energy_simplex_l.append(n/ar) # ar_rep_l, Energy_rep_l = [], [] # for ar in numpy.linspace(0.1, ar_ub_rep, 20): # ar_rep_l.append(ar) # Energy_rep_l.append(n_total_rep/ar) # plot.plot(ar_simplex_l, Energy_simplex_l, label='Simplex', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':') # plot.plot(ar_rep_l, Energy_rep_l, label='Rep', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':') # plot.legend() # plot.xlabel(r'Arrival rate $\lambda$', fontsize=12) # plot.ylabel(r'Unit of energy per request', fontsize=12) # plot.title(r'$t={}, \mu={}$'.format(t, mu) ) # fig = plot.gcf() # def_size = fig.get_size_inches() # fig.set_size_inches(def_size[0]/1., def_size[1]/1.) # fig.tight_layout() # plot.savefig("plot_simplex_vs_rep_t_{}_energy.pdf".format(t) ) # fig.clear() log(WARNING, "done; scheduling= {}, t= {}".format(scheduling, t) ) def plot_reptoall(): mixed_traff, w_sys = False, True t, r, k = 1, 2, 2 serv = "Exp" # "Bern" # "Bern*Pareto" # "Pareto" # "Dolly" mu = 1 # loc, a = 1, 2 # U, L, p, loc, a = 1, 8, 0.2, 0.1, 1.5 # 1, 8, 0.2, 1, 3 U, L, p, loc, a = 1, 10, 0.3, 0.1, 1.5 # 1, 8, 0.2, 1, 3 # For rep-to-all if serv == "Exp": servdist_m = {'dist': serv, 'mu': mu} if t == 1: ar_ub = 1.6 elif t == 3: ar_ub = 2.4 elif t == 7: ar_ub = float(1.1*reptoall_innerbound_on_ar(t, servdist_m) ) else: ar_ub = reptoall_innerbound_on_ar(t, servdist_m) elif serv == "Pareto": servdist_m = {'dist': serv, 'loc': loc, 'a': a} ar_ub = reptoall_innerbound_on_ar(t, servdist_m) elif serv == "TPareto": servdist_m = {'dist': serv, 'l': l, 'u': u, 'a': a} ar_ub = reptoall_innerbound_on_ar(t, servdist_m) elif serv == "Bern" or serv == "Bern*Pareto": servdist_m = {'dist': serv, 'U': U, 'L': L, 'p': p, 'loc': loc, 'a': a} ar_ub = reptoall_innerbound_on_ar(t, servdist_m) elif serv == "Dolly": servdist_m = None if t == 1: ar_ub = 0.28 elif t == 3: ar_ub = 0.4 log(WARNING, "w_sys= {}, t= {}, r= {}, k= {}, servdist_m= {}, ar_ub= {}, mixed_traff= {}".format(w_sys, t, r, k, servdist_m, ar_ub, mixed_traff) ) ET_sm_l, ET_sim_l, ET_l, ET_lb_l = [], [], [], [] ET_alt_l, ET_matrixanalytic_l = [], [] ET_bestapprox_l, ET_betterapprox_l, ET_naiveapprox_l, ET_varkigauri_lb_l = [], [], [], [] ET_simbasedapprox_l = [] ET_sim_mixedtraff_l = [] # All below w_sys=True nf = 3 sim_simplex = False if serv == "Exp": if t == 1: ET_sim_l= [ 0.6775872854372559, 0.7909557937247363, 0.9486987202221493, 1.166209238915134, 1.5685720588787688, 2.478342315521276, 2.6376081306859107, 2.906788473547391, 3.263700392764921, 3.5974807041868426, 4.289127887822366, 4.794525358984301, 5.896928018871929, 8.099664758903687, 12.74155958739236] elif t == 3: ET_sim_l= [ 0.4676519075931255, 0.5247256264186801, 0.6230081386991332, 0.775814486873029, 1.0207917160021767, 1.6244613243247372, 1.7481208563178903, 1.9667165686859327, 2.163968348080258, 2.5923594863306776, 3.0700378671376627, 3.796384731111067, 4.841880170965622, 6.610367379250164, 13.559429107437742] else: sim_simplex = True elif serv == "Pareto": if loc == 1 and a == 2: if t == 1: ET_sim_l= [ 1.5299993522735693, 1.7233577876041122, 1.8952577131712123, 2.2418712080584897, 2.853623528849504, 4.2208097489868, 4.586420599121132, 5.191481636572133, 5.6340499086639815, 5.9712033727746, 7.94309766204549, 9.599736059102067, 13.280357368839619, 17.20104661693977, 25.449711725024084] elif t == 3: ET_sim_l= [ 1.3221090353539466, 1.4459274633541828, 1.6229349092564267, 1.9043964678064051, 2.4154300633936936, 3.6666730405584844, 3.9217550909479577, 4.256167164955279, 4.717366068731679, 5.891743883842969, 6.04468767433355, 8.073514650754076, 9.880581947509592, 15.816118977624845, 28.433468299774272] else: sim_simplex = True elif loc == 1 and a == 5: if t == 3: ET_sim_l= [ 1.1276007604818075, 1.240550592912947, 1.3862061325608057, 1.645653757532261, 2.0688083303883276, 3.2115831386711813, 3.2986018954384835, 3.8148027478966227, 4.033705086448495, 5.448028336643181, 5.697392211154507, 9.053323168666376, 10.17868048265699, 23.644561610837382, None] # 93.02644300031747 else: sim_simplex = True else: sim_simplex = True elif serv == "Bern": if U == 1 and L == 8 and p == 0.2: if t == 1: # nf = 3 ET_sim_l= [ 1.6376474738985423, 1.9851446427827089, 2.4840795375267626, 3.1829054073054217, 4.39332366216294, 7.063110373762194, 7.4445330550351665, 8.208129233744382, 9.309321611480481, 10.747520637423975, 12.460023568734707, 15.038255521201348, 18.778687793661728, 23.582209372296532, 36.21619587757658] elif t == 3: # nf = 1 ET_sim_l= [ 1.1072895175117927, 1.2582695204803385, 1.4572200912301614, 1.8340775367273732, 2.4430722742069184, 4.053853819806121, 4.4494192069988605, 5.061922101782603, 5.883304533639656, 6.705043861319703, 8.307668993372534, 11.041651319984396, 17.564101468045756, 33.184482866801716, None] else: sim_simplex = True else: sim_simplex = True elif serv == "Bern*Pareto": if U == 1 and L == 8 and p == 0.2 and loc == 1 and a == 3: if t == 11: # nf = 3 ET_sim_l= [ 2.142631836594827, 2.5302711620514966, 2.941315337537391, 3.8773353598252345, 4.550420407107853, 6.649089020276313, 7.000687768519389, 7.681497353358071, 8.058275694322152, 9.541434770613856, 10.136837383356713, 11.027889242435874, 14.072462480848941, 18.721889173565945, 29.85022801496356] elif t == 33: pass else: sim_simplex = True else: sim_simplex = True else: sim_simplex = True # Mixed traff sim_simplex_mixed_traff = False if mixed_traff: if serv == "Exp": if t == 1: ET_sim_mixedtraff_l= [ 0.678978501641253, 0.7748022818617738, 0.9072886738372506, 1.0928902616368403, 1.43754904360929, 2.0810587767368154, 2.266461910378062, 2.5977047234601125, 3.2441553951140985, 3.585616438620215, 4.415600179701042, 6.099149242270735, 9.786138444920114, None, # 21.631079441147904 None] elif t == 3: ET_sim_mixedtraff_l= [ 0.46217641274184773, 0.5249541076176077, 0.6065798815902482, 0.7193352388312126, 0.9238674360581351, 1.363955390788439, 1.4654931553890183, 1.733811055160431, 2.0493965738680795, 2.479767271681704, 3.065826086322138, 4.300842192226751, 8.05986376865404, None, # 35.70730644518723, None] else: sim_simplex_mixed_traff = True ar_l = [] for ar in [*numpy.linspace(0.05, 0.8*ar_ub, 5, endpoint=False), *numpy.linspace(0.8*ar_ub, ar_ub, 10) ]: # for ar in numpy.linspace(0.05, ar_ub, 2): ar_l.append(ar) p_i_l = [] if sim_simplex: ET_sim = test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, p_i_l=p_i_l) print("*** ET_sim= {}".format(ET_sim) ) ET_sim_l.append(ET_sim) # ET_sim_l.append(None) # ET_simbasedapprox_l.append(ET_simplex_approx(t, ar, servdist_m, p_i_l=p_i_l)[0] ) # if sim_simplex_mixed_traff: # ET_sim_mixedtraff_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, p_i_l=p_i_l, mixed_traff=True) ) ET_sm_l.append(ET_simplex_sm(t, ar, servdist_m) ) ET_lb_l.append(ET_simplex_lb(t, ar, servdist_m) ) if serv == "Exp": if t == 1: ET_l.append(ET_reptoall_t1(ar, mu) ) ET_matrixanalytic_l.append(ET_reptoall_t1_matrixanalytic(t, ar, mu) ) elif t == 2: if w_sys: ET_alt_l.append(simplex_w_two_repair__E_T(ar, mu, M=2) ) ET_l.append(simplex_w_two_repair__E_T(ar, mu, M=5) ) else: ET_l.append(simplex_wo_sys_w_two_repair__E_T(ar, mu) ) ET_naiveapprox_l.append(ET_simplex_approx(t, ar, servdist_m, naive=True)[0] ) ET_betterapprox_l.append(ET_simplex_approx(t, ar, servdist_m)[0] ) ET_bestapprox_l.append(ET_simplex_approx(t, ar, servdist_m, incremental=True)[0] ) # ET_varkigauri_lb_l.append(E_T_simplex_varki_gauri_lb(t, ar, gamma, mu)[0] ) ar_mixed_traff_l = [] # for ar in numpy.linspace(0.2, 0.2, 1): for ar in [*numpy.linspace(0.05, 0.8*ar_ub, 5, endpoint=False), *numpy.linspace(0.8*ar_ub, 1.1*ar_ub, 10) ]: ar_mixed_traff_l.append(ar) if sim_simplex_mixed_traff: ET_sim_mixedtraff_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, mixed_traff=True) ) # mew, ms = 0.1, 10 mew, ms = 2, 5 def plot_poster(): # for better looking plot ar_approx_l = list(ar_l) ar = ar_ub + 0.03 ar_approx_l.append(ar) ET_bestapprox_l.append(ET_simplex_approx(t, ar, servdist_m, incremental=True) ) plot.plot(ar_l, ET_sim_l, label="FJ-FA, simulation", marker=next(marker), zorder=1, color=next(dark_color), linestyle=':', mew=mew, ms=ms) plot.plot(ar_approx_l, ET_bestapprox_l, label="FJ-FA, M/G/1 approximation", zorder=2, marker=next(marker), color='black', linestyle=':', mew=mew, ms=ms) def get_xs_l_ys_l(_x_l, _y_l): x_l, y_l = [], [] for i, y in enumerate(_y_l): if y is not None: x_l.append(_x_l[i]) y_l.append(y) s = UnivariateSpline(x_l, y_l, s=0.001) xs_l = np.linspace(min(x_l), max(x_l), 20) ys_l = s(xs_l) return xs_l, ys_l def plot_(): log(WARNING, "ET_sim_l= {}".format(pprint.pformat(ET_sim_l) ) ) # plot.plot(ar_l, ET_simbasedapprox_l, label=r'Sim-based approximation', marker=next(marker), zorder=1, color=next(dark_color), linestyle=':', mew=mew, ms=ms) label = 'Simulation, fixed-arrivals' if mixed_traff else 'Simulation'
import math, copy import numpy as np import gym from gym_minigrid import entities, render_text, encoding class Cell(object): def __init__(self): self.color = 'black' # at the moment, we restrict background color to black self.clear() def clear(self): self.entity = None def __str__(self): if self.entity is not None: r = f', has:\n {self.entity}' else: r = ', empty' return f'cell: {self.color}{r}' def copy(self): return copy.deepcopy(self) def encode(self): if self.entity is not None: rep = self.entity.encode() else: rep = {} return rep class Grid(object): """ Represent a grid and operations on it It is defined as an array of Cells. """ def __init__(self, height, width): self._grid = np.empty((height, width), dtype=np.object) for (i, j), _ in np.ndenumerate(self._grid): self._grid[i, j] = Cell() def __getattr__(self, attr): if attr in self.__dict__: return self.__dict__[attr] elif f'_{attr}' in self.__dict__: return self.__dict__[f'_{attr}'] elif hasattr(self._grid, attr): return getattr(self._grid, attr) else: raise AttributeError(f'No attribute {attr}') def __getitem__(self, pos): try: cond = pos[0] < 0 or pos[0] >= self.shape[0] or pos[1] < 0 or pos[1] >= self.shape[1] if cond: return else: return self._grid[tuple(pos)] except: return self._grid[tuple(pos)] def __setitem__(self, pos, obj): self._grid[tuple(pos)] = obj def __contains__(self, key): if isinstance(key, entities.WorldObj): for cell in self._grid.ravel(): if cell is key: return True elif isinstance(key, tuple): for cell in self._grid.ravel(): if cell.entity is not None: if (cell.entity.color, cell.entity.type) == key: return True if key[0] is None and key[1] == cell.entity.type: return True return False def __ne__(self, other): return not self == other @property def shape(self): return self._grid.shape class MiniGridEnv(gym.Env): """ 2D grid world game environment """ metadata = { 'render.modes': ['human', 'rgb_array', 'pixmap', 'ascii', 'ansi', 'curses4bit', 'curses8bit'], 'video.frames_per_second': 10 } def __init__( self, height, width, max_steps=100, see_through_walls=False, seed=1337, agent_view_size=7 ): self.agent = entities.Agent(view_size=agent_view_size) # Action enumeration for this environment self.actions = entities.Agent.ACTIONS # Actions are discrete integer values self.action_space = gym.spaces.Discrete(len(self.actions)) # Observations are dictionaries containing an # encoding of the grid and a textual 'mission' string self._encoder = encoding.Encoder(observation=True) n_channels = len(self._encoder) self.observation_space = gym.spaces.Box( low=0, high=1, shape=(n_channels, agent_view_size, agent_view_size), dtype=int ) # Range of possible rewards self._step_reward = -1 self._win_reward = 100 self._lose_reward = -200 self.reward_range = (self._lose_reward, self._win_reward) # Environment configuration self.height = height self.width = width self.max_steps = max_steps self.see_through_walls = see_through_walls # self._decoder = Enc() # Initialize the RNG self.initial_seed = seed self.seed(seed=seed) # Initialize the state self.reset() def __str__(self): return self.render(mode='ansi') def __eq__(self, other): env1 = self.encode() env2 = other.encode() return np.array_equal(env1, env2) def _gen_grid(self, height, width): raise NotImplementedError('_gen_grid needs to be implemented by each environment') @property def shape(self): return (self.height, self.width) @property def steps_remaining(self): return self.max_steps - self.step_count def reset(self): self.agent.reset() self._gen_grid(self.height, self.width) assert self.agent.pos is not None # Step count since episode start self.step_count = 0 # Return first observation obs = self.get_obs() return obs def seed(self, seed=1337): # Seed the random number generator self.rng, seed = gym.utils.seeding.np_random(seed) return [seed] def __getitem__(self, pos): if not isinstance(pos[0], slice) and not isinstance(pos[1], slice): return self.grid[pos] env = copy.copy(self) env.agent = copy.copy(self.agent) env.height = pos[0].stop - pos[0].start env.width = pos[1].stop - pos[1].start env.grid = Grid(*env.shape) from_, to_ = self._slices(pos) env.agent.pos = (self.agent.pos[0] - pos[0].start, self.agent.pos[1] - pos[1].start) env.grid[to_] = self.grid[from_] return env def __setitem__(self, pos, obj): if isinstance(obj, MiniGridEnv): from_, to_ = self._slices(pos) self.grid[from_] = obj.grid[to_] elif isinstance(obj, Cell): self.grid[pos] = obj else: self[pos].entity = obj obj.pos = pos def _get_from_to_slices(self, slice_, axis): top = slice_.start bottom = slice_.stop offset = -top if top < 0 else 0 top = max(0, top) bottom = min(self.shape[axis], bottom) size = bottom - top return slice(top, top + size), slice(offset, offset + size) def _slices(self, pos): if isinstance(pos[0], slice): from_i, to_i = self._get_from_to_slices(pos[0], axis=0) else: from_i = slice(pos[0], pos[0] + 1) if isinstance(pos[1], slice): from_j, to_j = self._get_from_to_slices(pos[1], axis=1) else: from_j = slice(pos[1], pos[1] + 1) return (from_i, from_j), (to_i, to_j) def encode(self, mask=None): """ Produce a compact numpy encoding of the grid """ if mask is None: mask = np.ones(self.shape, dtype=bool) n_channels = len(self._encoder.keys) array = np.zeros((n_channels,) + self.shape, dtype=bool) e = self._encoder for (i, j), cell in np.ndenumerate(self.grid): array[e.cell['visible'], i, j] = mask[i, j] array[e.cell['visited'], i, j] = (i, j) in self.agent.visited if cell.entity is None: array[e.cell['empty'], i, j] = True else: idx = e.object_type[cell.entity.type] array[idx, i, j] = True idx = e.object_color[cell.entity.color] array[idx, i, j] = True if cell.entity.state is not None: idx = e.object_state[cell.entity.state] array[idx, i, j] = True if self.agent.pos == (i, j): array[e.agent['is_here'], i, j] = True idx = e.agent_state[self.agent.state] array[idx, i, j] = True if self.agent.is_carrying: array[e.agent['is_carrying'], i, j] = True idx = e.carrying_type[self.agent.carrying.type] array[idx, i, j] = True idx = e.carrying_color[self.agent.carrying.color] array[idx, i, j] = True return array def encode_obs(self): mask = self.visible() array = self.encode(mask=mask) array *= mask return array[self._encoder.obs_inds] def decode(self, array, observation=False): """ Decode an encoded array back into a grid """ channels, height, width = array.shape env = copy.copy(self) env.agent = copy.copy(self.agent) env.height = height env.width = width env.grid = Grid(height, width) if observation: if height != width: raise ValueError('For observations, we expect height and' f'width to be equal but got {height} and {width}.') env.agent.pos = (height - 1, (width - 1) // 2) env.agent.state = 'up' for (i, j), _ in np.ndenumerate(env.grid): d = encoding.Decoder(array[:, i, j], observation=observation) if not d.cell['empty']: obj = entities.make(d.object_type, color=d.object_color) if hasattr(obj, 'STATES'): obj.state = d.object_state env[i, j].entity = obj if not observation: if d.agent['is_here']: env.agent.pos = (i, j) env.agent.state = d.agent_state if d.agent['is_carrying']: env.agent.carrying = entities.make( d.carrying_type, color=d.carrying_color) if d.cell['visited']: env.agent.visited.add((i, j)) else: if (i, j) == env.agent.pos: if d.agent['is_carrying']: env.agent.carrying = entities.make( d.carrying_type, color=d.carrying_color) return env def decode_obs(self, array): # zero out cells that are not visible mask = array[self._encoder.slices['cell.visible']] >= .5 return self.decode(array * mask, observation=True) def visible(self): """ Process occluders and visibility Note that this incurs some performance cost # TODO: verify if still true """ def _flood_fill(pos): visited.add(pos) if pos[0] < 0 or pos[1] < 0 or pos[0] >= self.shape[0] or pos[1] >= self.shape[1]: return if mask[pos]: # already visited return if view_box[pos] == 0: # outside agent's view_size return if self[pos].entity is not None: if not self[pos].entity.see_behind(): # this is a boundary mask[pos] = True # add it to the list of visibles and exit return mask[pos] = True # visit all neighbors in the surrounding square for i in [-1, 0, 1]: for j in [-1, 0, 1]: new_pos = (pos[0] + i, pos[1] + j) if new_pos not in visited: _flood_fill(new_pos) view_box = np.zeros(self.shape, dtype=bool) from_, to_ = self._slices(self.agent.view_box) view_box[from_[0], from_[1]] = True if self.see_through_walls: mask = view_box else: mask = np.zeros(self.shape, dtype=bool) visited = set() _flood_fill(self.agent.pos) return mask def visited(self): arr = np.zeros(self.shape, dtype=bool) for pos in self.agent.visited: arr[pos] = True return arr def place_obj(self, obj, top=(0,0), size=None, reject_fn=None, max_tries=math.inf ): """ Place an object at an empty position in the grid :param top: top-left position of the rectangle where to place :param size: size of the rectangle where to place :param reject_fn: function to filter out potential positions """ top = (max(top[0], 0), max(top[1], 0)) if size is None: size = (self.height, self.width) num_tries = 0 while True: # This is to handle with rare cases where rejection sampling # gets stuck in an infinite loop if num_tries > max_tries: raise RecursionError('rejection sampling failed in place_obj') num_tries += 1 pos = ( self.rng.randint(top[0], min(top[0] + size[0], self.height)), self.rng.randint(top[1], min(top[1] + size[1], self.width)) ) # Don't place the object on top of another object # TODO: may want to consider can_overlap and can_contain cases if self[pos].entity is not None: continue # Check if there is a filtering criterion if reject_fn is not None and reject_fn(self, pos): continue break if obj.type == 'agent': obj.pos = pos else: self[pos] = obj def place_agent(self, top=(0,0), size=None, rand_dir=True, max_tries=math.inf): """ Set the agent's starting point at an empty position in the grid """ self.place_obj(self.agent, top=top, size=size, max_tries=max_tries) if rand_dir: self.agent.state = self.rng.choice(self.agent.STATES) def horz_wall(self, i, j, width=None, obj=entities.Wall): if
<reponame>bopopescu/sage r""" ISGCI: Information System on Graph Classes and their Inclusions This module implements an interface to the `ISGCI <http://www.graphclasses.org/>`_ database in Sage. This database gathers information on graph classes and their inclusions in each other. It also contains information on the complexity of several computational problems. It is available on the `GraphClasses.org <http://www.graphclasses.org/>`_ website maintained by <NAME> et al. How to use it? -------------- Presently, it is possible to use this database through the variables and methods present in the :obj:`graph_classes <GraphClasses>` object. For instance:: sage: Trees = graph_classes.Tree sage: Chordal = graph_classes.Chordal Inclusions ^^^^^^^^^^ It is then possible to check the inclusion of classes inside of others, if the information is available in the database:: sage: Trees <= Chordal True And indeed, trees are chordal graphs. The ISGCI database is not all-knowing, and so comparing two classes can return ``True``, ``False``, or ``Unknown`` (see the :mod:`documentation of the Unknown truth value <sage.misc.unknown>`). An *unknown* answer to ``A <= B`` only means that ISGCI cannot deduce from the information in its database that ``A`` is a subclass of ``B`` nor that it is not. For instance, ISGCI does not know at the moment that some chordal graphs are not trees:: sage: graph_classes.Chordal <= graph_classes.Tree Unknown Descriptions ^^^^^^^^^^^^ Given a graph class, one can obtain its associated information in the ISGCI database with the :meth:`~GraphClass.description` method:: sage: Chordal.description() Class of graphs : Chordal ------------------------- id : gc_32 name : chordal type : base <BLANKLINE> Problems : ----------- 3-Colourability : Linear Clique : Polynomial Clique cover : Polynomial Cliquewidth : Unbounded Cliquewidth expression : NP-complete Colourability : Linear Cutwidth : NP-complete Domination : NP-complete Feedback vertex set : Polynomial Hamiltonian cycle : NP-complete Hamiltonian path : NP-complete Independent set : Linear Maximum bisection : Unknown Maximum cut : NP-complete Minimum bisection : Unknown Recognition : Linear Treewidth : Polynomial Weighted clique : Polynomial Weighted feedback vertex set : Unknown Weighted independent set : Linear It is possible to obtain the complete list of the classes stored in ISGCI by calling the :meth:`~GraphClasses.show_all` method (beware -- long output):: sage: graph_classes.show_all() id | name | type | smallgraph ---------------------------------------------------------------------------------------------------------------------- gc_309 | $K_4$--minor--free | base | gc_541 | $N^*$ | base | gc_215 | $N^*$--perfect | base | gc_5 | $P_4$--bipartite | base | gc_3 | $P_4$--brittle | base | gc_6 | $P_4$--comparability | base | gc_7 | $P_4$--extendible | base | ... Until a proper search method is implemented, this lets one find classes which do not appear in :obj:`graph_classes.* <GraphClasses>`. To retrieve a class of graph from its ISGCI ID one may use the :meth:`~GraphClasses.get_class` method:: sage: GC = graph_classes.get_class("gc_5") sage: GC $P_4$--bipartite graphs Recognition of graphs ^^^^^^^^^^^^^^^^^^^^^ The graph classes represented by the ISGCI database can alternatively be used to access recognition algorithms. For instance, in order to check that a given graph is a tree one has the following the options :: sage: graphs.PathGraph(5) in graph_classes.Tree True or:: sage: graphs.PathGraph(5).is_tree() True Furthermore, all ISGCI graph classes which are defined by the exclusion of a finite sequence of induced subgraphs benefit from a generic recognition algorithm. For instance :: sage: g = graphs.PetersenGraph() sage: g in graph_classes.ClawFree False sage: g.line_graph() in graph_classes.ClawFree True Or directly from ISGCI :: sage: gc = graph_classes.get_class("gc_441") sage: gc diamond--free graphs sage: graphs.PetersenGraph() in gc True Predefined classes ------------------ :obj:`graph_classes <GraphClasses>` currently predefines the following graph classes .. list-table:: :widths: 20 30 :header-rows: 1 * - Class - Related methods * - Apex - :meth:`~Graph.is_apex()`, :meth:`~Graph.apex_vertices()` * - AT_free - :meth:`~Graph.is_asteroidal_triple_free` * - Biconnected - :meth:`~Graph.is_biconnected`, :meth:`~GenericGraph.blocks_and_cut_vertices`, :meth:`~GenericGraph.blocks_and_cuts_tree` * - BinaryTrees - :meth:`~sage.graphs.graph_generators.GraphGenerators.BalancedTree`, :meth:`~Graph.is_tree` * - Bipartite - :meth:`~sage.graphs.graph_generators.GraphGenerators.BalancedTree`, :meth:`~sage.graphs.graph.Graph.is_bipartite` * - Block - :meth:`~sage.graphs.graph.Graph.is_block_graph`, :meth:`~sage.graphs.generic_graph.GenericGraph.blocks_and_cut_vertices`, :meth:`~sage.graphs.graph_generators.GraphGenerators.RandomBlockGraph` * - Chordal - :meth:`~sage.graphs.generic_graph.GenericGraph.is_chordal` * - Claw-Free - :meth:`~sage.graphs.graph_generators.GraphGenerators.ClawGraph` * - Comparability - * - Gallai - :meth:`~sage.graphs.generic_graph.GenericGraph.is_gallai_tree` * - Grid - :meth:`~sage.graphs.graph_generators.GraphGenerators.Grid2dGraph`, :meth:`~sage.graphs.graph_generators.GraphGenerators.GridGraph` * - Interval - :meth:`~sage.graphs.graph_generators.GraphGenerators.RandomIntervalGraph`, :meth:`~sage.graphs.graph_generators.GraphGenerators.IntervalGraph`, :meth:`~sage.graphs.generic_graph.GenericGraph.is_interval` * - Line - :meth:`~sage.graphs.graph_generators.GraphGenerators.line_graph_forbidden_subgraphs`, :meth:`~sage.graphs.graph.Graph.is_line_graph` * - Modular - :meth:`~sage.graphs.graph.Graph.modular_decomposition` * - Outerplanar - :meth:`~sage.graphs.generic_graph.GenericGraph.is_circular_planar` * - Perfect - :meth:`~sage.graphs.graph.Graph.is_perfect` * - Planar - :meth:`~sage.graphs.generic_graph.GenericGraph.is_planar` * - Polyhedral - :meth:`~sage.graphs.generic_graph.Graph.is_polyhedral` * - Split - :meth:`~sage.graphs.graph.Graph.is_split` * - Tree - :meth:`~sage.graphs.graph_generators.GraphGenerators.trees`, :meth:`~Graph.is_tree` * - UnitDisk - :meth:`~sage.graphs.graph_generators.GraphGenerators.IntervalGraph` * - UnitInterval - :meth:`~sage.graphs.generic_graph.GenericGraph.is_interval` Sage's view of ISGCI -------------------- The database is stored by Sage in two ways. **The classes**: the list of all graph classes and their properties is stored in a huge dictionary (see :meth:`~sage.graphs.isgci.GraphClasses.classes`). Below is what Sage knows of ``gc_249``:: sage: graph_classes.classes()['gc_249'] # random {'problem': {'Independent set': 'Polynomial', 'Treewidth': 'Unknown', 'Weighted independent set': 'Polynomial', 'Cliquewidth expression': 'NP-complete', 'Weighted clique': 'Polynomial', 'Clique cover': 'Unknown', 'Domination': 'NP-complete', 'Clique': 'Polynomial', 'Colourability': 'NP-complete', 'Cliquewidth': 'Unbounded', '3-Colourability': 'NP-complete', 'Recognition': 'Linear'}, 'type': 'base', 'id': 'gc_249', 'name': 'line'} **The class inclusion digraph**: Sage remembers the class inclusions through the inclusion digraph (see :meth:`~sage.graphs.isgci.GraphClasses.inclusion_digraph`). Its nodes are ID of ISGCI classes:: sage: d = graph_classes.inclusion_digraph() sage: d.vertices()[-10:] ['gc_990', 'gc_991', 'gc_992', 'gc_993', 'gc_994', 'gc_995', 'gc_996', 'gc_997', 'gc_998', 'gc_999'] An arc from ``gc1`` to ``gc2`` means that ``gc1`` is a superclass of ``gc2``. This being said, not all edges are stored ! To ensure that a given class is included in another one, we have to check whether there is in the digraph a ``path`` from the first one to the other:: sage: bip_id = graph_classes.Bipartite._gc_id sage: perfect_id = graph_classes.Perfect._gc_id sage: d.has_edge(perfect_id, bip_id) False sage: d.distance(perfect_id, bip_id) 2 Hence bipartite graphs are perfect graphs. We can see how ISGCI obtains this result :: sage: p = d.shortest_path(perfect_id, bip_id) sage: len(p) - 1 2 sage: print(p) # random ['gc_56', 'gc_76', 'gc_69'] sage: for c in p: ....: print(graph_classes.get_class(c)) perfect graphs ... bipartite graphs What ISGCI knows is that perfect graphs contain unimodular graph which contain bipartite graphs. Therefore bipartite graphs are perfect ! .. note:: The inclusion digraph is **NOT ACYCLIC**. Indeed, several entries exist in the ISGCI database which represent the same graph class, for instance Perfect graphs and Berge graphs:: sage: graph_classes.inclusion_digraph().is_directed_acyclic() False sage: Berge = graph_classes.get_class("gc_274"); Berge Berge graphs sage: Perfect = graph_classes.get_class("gc_56"); Perfect perfect graphs sage: Berge <= Perfect True sage: Perfect <= Berge True sage: Perfect == Berge True Information for developpers ---------------------------- * The database is loaded not *so* large, but it is still preferable to only load it on demand. This is achieved through the cached methods :meth:`~sage.graphs.isgci.GraphClasses.classes` and :meth:`~sage.graphs.isgci.GraphClasses.inclusion_digraph`. * Upon the first access to the database, the information is extracted from the XML file and stored in the cache of three methods: * ``sage.graphs.isgci._classes`` (dictionary) * ``sage.graphs.isgci._inclusions`` (list of dictionaries) * ``sage.graphs.isgci._inclusion_digraph`` (DiGraph) Note that the digraph is only built if necessary (for instance if the user tries to compare two classes). .. TODO:: Technical things: * Query the database for non-inclusion results so that comparisons can return ``False``, and implement strict inclusions. * Implement a proper search method for the classes not listed in :obj:`graph_classes <GraphClasses>` .. SEEALSO:: :func:`sage.graphs.isgci.show_all`. * Some of the graph classes appearing in :obj:`graph_classes <GraphClasses>` already have a recognition algorithm implemented in Sage. It would be so nice to be able to write ``g in Trees``, ``g in Perfect``, ``g in Chordal``, ... :-) Long-term stuff: * Implement simple accessors for all the information in the ISGCI database (as can be done from the website) * Implement intersection of graph classes * Write generic recognition algorithms for specific classes (when a graph class is defined by the exclusion of subgraphs, one can write a generic algorithm checking the existence of each of the graphs, and this method already exists in Sage). * Improve the performance of Sage's graph library by letting it take advantage of the properties of graph classes. For example, :meth:`Graph.independent_set` could use the library to detect that a given graph is, say, a tree or a planar graph, and use a specialized algorithm for finding an independent set. AUTHORS: -------- * <NAME> et al. (ISGCI database) * <NAME> (Sage implementation) Methods ------- """ from __future__ import print_function from six import itervalues from sage.structure.sage_object import SageObject from sage.structure.unique_representation import CachedRepresentation, UniqueRepresentation from sage.misc.unknown import Unknown from sage.env import GRAPHS_DATA_DIR import six #***************************************************************************** # Copyright (C) 2011 <NAME> <<EMAIL>> # # Distributed under the terms of the GNU General Public License (GPL) # http://www.gnu.org/licenses/ #***************************************************************************** _XML_FILE = "isgci_sage.xml" _SMALLGRAPHS_FILE = "smallgraphs.txt" class GraphClass(SageObject, CachedRepresentation): r""" An instance of this class represents a Graph Class, matching some
"void": return _gskernel.GsFieldVector_set(self, n, val) # Register GsFieldVector in _gskernel: _gskernel.GsFieldVector_swigregister(GsFieldVector) class GsIntVector(object): thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self): _gskernel.GsIntVector_swiginit(self, _gskernel.new_GsIntVector()) __swig_destroy__ = _gskernel.delete_GsIntVector def add(self, obj: 'int const &') -> "void": return _gskernel.GsIntVector_add(self, obj) def size(self) -> "size_t": return _gskernel.GsIntVector_size(self) def clear(self) -> "void": return _gskernel.GsIntVector_clear(self) def empty(self) -> "bool": return _gskernel.GsIntVector_empty(self) def get(self, n: 'int') -> "int": return _gskernel.GsIntVector_get(self, n) def set(self, n: 'int', val: 'int const &') -> "void": return _gskernel.GsIntVector_set(self, n, val) # Register GsIntVector in _gskernel: _gskernel.GsIntVector_swigregister(GsIntVector) class GsUInt64Vector(object): thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self): _gskernel.GsUInt64Vector_swiginit(self, _gskernel.new_GsUInt64Vector()) __swig_destroy__ = _gskernel.delete_GsUInt64Vector def add(self, obj: 'unsigned long long const &') -> "void": return _gskernel.GsUInt64Vector_add(self, obj) def size(self) -> "size_t": return _gskernel.GsUInt64Vector_size(self) def clear(self) -> "void": return _gskernel.GsUInt64Vector_clear(self) def empty(self) -> "bool": return _gskernel.GsUInt64Vector_empty(self) def get(self, n: 'int') -> "unsigned long long": return _gskernel.GsUInt64Vector_get(self, n) def set(self, n: 'int', val: 'unsigned long long const &') -> "void": return _gskernel.GsUInt64Vector_set(self, n, val) # Register GsUInt64Vector in _gskernel: _gskernel.GsUInt64Vector_swigregister(GsUInt64Vector) eUnknownDataSource = _gskernel.eUnknownDataSource r""" 未知数据源类型""" eSqliteFile = _gskernel.eSqliteFile r""" Sqlite文件数据源""" eShapeFile = _gskernel.eShapeFile r""" ESRI Shape文件数据源""" eGeoPackage = _gskernel.eGeoPackage r""" OGC GeoPakcage数据源""" eOracleSpatial = _gskernel.eOracleSpatial r""" OracleSpatial数据源""" eOracleSpatial2 = _gskernel.eOracleSpatial2 r""" 标准OracleSpatial数据源""" eMySQL = _gskernel.eMySQL r""" MySQL数据源""" ePostgreSQL = _gskernel.ePostgreSQL r""" PostgreSQL数据源""" eFile = _gskernel.eFile r""" 所有文件类型的数据源""" eDameng = _gskernel.eDameng r""" 达梦数据源""" eWeb = _gskernel.eWeb r""" web数据源""" eSupportTransaction = _gskernel.eSupportTransaction r""" 数据库是否支持事务""" eSupportStoreFeatureClass = _gskernel.eSupportStoreFeatureClass r""" 数据库是否支持矢量地物类的存储""" eSupportStoreRowClass = _gskernel.eSupportStoreRowClass r""" 数据库是否支持普通二维表的存储""" eSupportStoreTileClass = _gskernel.eSupportStoreTileClass r""" 数据库是否支持瓦片类的存储""" eSupportDataRoomFolder = _gskernel.eSupportDataRoomFolder r""" 数据库是数据集目录""" eSupportRecursiveDataRoomFolder = _gskernel.eSupportRecursiveDataRoomFolder r""" 数据库是数据集目录多级递归 数据集目录下是否还可以支持数据集目录。""" eSupportStoreRasterClass = _gskernel.eSupportStoreRasterClass r""" 数据源是否支持栅格数据存储""" eSupportStoreMosaicRasterClass = _gskernel.eSupportStoreMosaicRasterClass r""" 数据源是否支持镶嵌栅格数据存储""" class GsConnectProperty(object): r"""数据库连接信息定义数据库连接的各类信息以及数据库类型等""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr Version = property(_gskernel.GsConnectProperty_Version_get, _gskernel.GsConnectProperty_Version_set, doc=r"""数据库连接信息版本。""") Server = property(_gskernel.GsConnectProperty_Server_get, _gskernel.GsConnectProperty_Server_set, doc=r"""数据服务地址或者文件路径""") Database = property(_gskernel.GsConnectProperty_Database_get, _gskernel.GsConnectProperty_Database_set, doc=r"""数据库名称""") User = property(_gskernel.GsConnectProperty_User_get, _gskernel.GsConnectProperty_User_set, doc=r"""用户名""") Password = property(_gskernel.GsConnectProperty_Password_get, _gskernel.GsConnectProperty_Password_set, doc=r"""密码""") Port = property(_gskernel.GsConnectProperty_Port_get, _gskernel.GsConnectProperty_Port_set, doc=r"""端口号""") DataSourceType = property(_gskernel.GsConnectProperty_DataSourceType_get, _gskernel.GsConnectProperty_DataSourceType_set, doc=r"""数据源类型""") def __init__(self, *args): r""" *Overload 1:* 缺省构造函数. | *Overload 2:* 拷贝构造函数:type other: :py:class:`GsConnectProperty` :param other: 需要拷贝的对象 | *Overload 3:* 根据服务地址、文件路径、url地址构造:type strServer: string :param strServer: 服务、文件、url地址 | *Overload 4:* 根据服务地址、文件路径、url地址和数据库名称构造:type strServer: string :param strServer: 服务、文件、url地址:type strDatabase: string :param strDatabase: 数据库名称 | *Overload 5:* 根据服务器地址、数据库和用户名密码构造:type strServer: string :param strServer: 服务、文件、url地址:type strDatabase: string :param strDatabase: 数据库名称:type strUser: string :param strUser: 用户名:type strPwd: string :param strPwd: 密码 | *Overload 6:* 根据服务器地址、数据库和用户名密码、端口构造:type strServer: string :param strServer: 服务、文件、url地址:type strDatabase: string :param strDatabase: 数据库名称:type strUser: string :param strUser: 用户名:type strPwd: string :param strPwd: 密码:type nPort: int :param nPort: 端口 | *Overload 7:* 根据服务器地址、数据库和用户名密码、端口、数据源类型构造:type strServer: string :param strServer: 服务、文件、url地址:type strDatabase: string :param strDatabase: 数据库名称:type strUser: string :param strUser: 用户名:type strPwd: string :param strPwd: 密码:type nPort: int :param nPort: 端口:type sourceType: int :param sourceType: 数据源类型 """ _gskernel.GsConnectProperty_swiginit(self, _gskernel.new_GsConnectProperty(*args)) __swig_destroy__ = _gskernel.delete_GsConnectProperty # Register GsConnectProperty in _gskernel: _gskernel.GsConnectProperty_swigregister(GsConnectProperty) eSimpleFeature = _gskernel.eSimpleFeature eAnnotationFeature = _gskernel.eAnnotationFeature eImageTileFeature = _gskernel.eImageTileFeature r""" 影像瓦片""" eTerrainTileFeature = _gskernel.eTerrainTileFeature r""" 地形瓦片""" eModelTileFeature = _gskernel.eModelTileFeature r""" 模型""" ePrevectorTileFeature = _gskernel.ePrevectorTileFeature r""" 静态矢量""" eDynvectorTileFeature = _gskernel.eDynvectorTileFeature r""" 动态矢量""" ePlaceNameTileFeature = _gskernel.ePlaceNameTileFeature r""" 地名""" eHypsographyTileFeature = _gskernel.eHypsographyTileFeature r""" 地势""" eDlgTileFeature = _gskernel.eDlgTileFeature r""" 矢量""" eTemporalImageTileFeature = _gskernel.eTemporalImageTileFeature r""" 实现多时相瓦片数据集将级扩充的""" eTemporalTerrainTileFeature = _gskernel.eTemporalTerrainTileFeature eTemporalModleDsTileFeature = _gskernel.eTemporalModleDsTileFeature eTemporalPreRaserVectorTileFeature = _gskernel.eTemporalPreRaserVectorTileFeature eTemporalDynRaserVectorTileFeature = _gskernel.eTemporalDynRaserVectorTileFeature eTemporalPlaceNameDsTileFeature = _gskernel.eTemporalPlaceNameDsTileFeature eTemporalColourHypsographyMapTileFeature = _gskernel.eTemporalColourHypsographyMapTileFeature eJpgType = _gskernel.eJpgType r""" jpg""" eZ7Type = _gskernel.eZ7Type r""" 地形压缩文件""" eX3dType = _gskernel.eX3dType r""" 自定义三维模型打包文件""" ePngType = _gskernel.ePngType r""" png""" ePlnType = _gskernel.ePlnType r""" 自定义地名打包文件""" eBmpType = _gskernel.eBmpType r""" bmp""" eDdsType = _gskernel.eDdsType r""" dds""" eGifType = _gskernel.eGifType r""" gif""" eTiffType = _gskernel.eTiffType r""" tiff""" eZLibType = _gskernel.eZLibType r""" zlib地形压缩文件""" eKmzType = _gskernel.eKmzType r""" Kmz格式""" eProtobuffType = _gskernel.eProtobuffType r""" protobuf格式""" eGZipProtobuffType = _gskernel.eGZipProtobuffType r""" 以GZip格式压缩后protobuf格式""" eQuantizedMeshType = _gskernel.eQuantizedMeshType r""" 量化Mesh的地形瓦片格式""" eModTexType = _gskernel.eModTexType r""" texture of model""" eUnKnownType = _gskernel.eUnKnownType r""" 未知类型""" eFeatureClass = _gskernel.eFeatureClass r""" 矢量地物类""" eTileClass = _gskernel.eTileClass r""" 瓦片类""" eRowClass = _gskernel.eRowClass r""" 二维表""" eDataRoomFolder = _gskernel.eDataRoomFolder r""" 数据集的集合""" eRasterClass = _gskernel.eRasterClass r""" 栅格类""" eSelectionSetUnion = _gskernel.eSelectionSetUnion r""" 选择集求并""" eSelectionSetIntersection = _gskernel.eSelectionSetIntersection r""" 选择集求交""" eSelectionSetDifference = _gskernel.eSelectionSetDifference r""" 选择集求差""" eSelectionSetSymDifference = _gskernel.eSelectionSetSymDifference r""" 选择集求对称差""" class GsTransaction(GsRefObject): r""" 事务对象""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract") __repr__ = _swig_repr __swig_destroy__ = _gskernel.delete_GsTransaction def IsTransaction(self) -> "bool": r""" 是否处于事务中:rtype: boolean :return: 返回是否处于事务中 """ return _gskernel.GsTransaction_IsTransaction(self) def StartTransaction(self) -> "bool": r""" 启动事务:rtype: boolean :return: 返回是否成功启动 """ return _gskernel.GsTransaction_StartTransaction(self) def CommitTransaction(self) -> "bool": r""" 提交事务:rtype: boolean :return: 返回是否提交成功 """ return _gskernel.GsTransaction_CommitTransaction(self) def RollbackTransaction(self) -> "bool": r""" 取消事务:rtype: boolean :return: 返回是否取消成功 """ return _gskernel.GsTransaction_RollbackTransaction(self) # Register GsTransaction in _gskernel: _gskernel.GsTransaction_swigregister(GsTransaction) class GsDataRoom(GsRefObject): r""" 数据集对象基类""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined") __repr__ = _swig_repr __swig_destroy__ = _gskernel.delete_GsDataRoom def Interrupt(self) -> "bool": r""" 打断正在运行中的操作 :rtype: boolean :return: 不支持打断操作的能力则返回false,成功打断或者无须打断都返回true """ return _gskernel.GsDataRoom_Interrupt(self) def Transaction(self) -> "GsTransaction *": r""" 获取事务对象,如果不支持事务则返回NULL""" return _gskernel.GsDataRoom_Transaction(self) def Folder(self) -> "GsDataRoomFolder *": r""" 数据集所在的目录:rtype: :py:class:`GsDataRoomFolder` :return: 返回数据集所在的目录,如果为空则标识数据集在GeoDatabase下 """ return _gskernel.GsDataRoom_Folder(self) def Name(self) -> "GsString": r""" 数据集名称:rtype: :py:class:`GsString` :return: 返回数据集对象的名称 """ return _gskernel.GsDataRoom_Name(self) def Type(self) -> "GsDataRoomType": r""" 数据集类型:rtype: int :return: 返回数据集对象的类型 """ return _gskernel.GsDataRoom_Type(self) def GeoDatabase(self) -> "GsGeoDatabase *": r""" 数据集所在的数据库对象:rtype: :py:class:`GsGeoDatabase` :return: 返回数据库对象指针 """ return _gskernel.GsDataRoom_GeoDatabase(self) def Delete(self) -> "bool": r""" 删除:rtype: boolean :return: 返回是否删除成功 """ return _gskernel.GsDataRoom_Delete(self) def MetadataDomain(self) -> "GsVector< GsString >": r""" 元数据的分类名称""" return _gskernel.GsDataRoom_MetadataDomain(self) def MetadataName(self, strDomainName: 'char const *') -> "GsVector< GsString >": r""" 获取元数据分类下的元数据名称""" return _gskernel.GsDataRoom_MetadataName(self, strDomainName) def MetadataItem(self, *args) -> "void": r""" *Overload 1:* 获取某个分类下的元数据值 | *Overload 2:* 设置某个分类下的元数据值 """ return _gskernel.GsDataRoom_MetadataItem(self, *args) def ReName(self, name: 'char const *') -> "bool": r""" 修改数据集名称 :type name: string :param name: 需要设置的名称字符串 :rtype: boolean :return: 返回是否修改成功 """ return _gskernel.GsDataRoom_ReName(self, name) def AliasName(self, *args) -> "bool": r""" *Overload 1:* 获取数据集别名 :rtype: :py:class:`GsString` :return: 返回数据集别名 | *Overload 2:* 修改数据集别名 :type name: string :param name: 需要设置的名称字符串 :rtype: boolean :return: 返回是否修改成功 """ return _gskernel.GsDataRoom_AliasName(self, *args) # Register GsDataRoom in _gskernel: _gskernel.GsDataRoom_swigregister(GsDataRoom) class GsGeoDataRoom(GsDataRoom): r""" 具备地理特性的数据集对象抽象基类""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined") __repr__ = _swig_repr __swig_destroy__ = _gskernel.delete_GsGeoDataRoom def Extent(self, bForce: 'bool'=False) -> "GsBox": r""" 获取最大外接矩形范围:type bForce: boolean :param bForce: 是否强制获取,强制获取将会从数据表中统计最大范围:rtype: :py:class:`GsBox` :return: 返回矩形对象 """ return _gskernel.GsGeoDataRoom_Extent(self, bForce) def SpatialReference(self) -> "GsSpatialReference *": r""" 获取数据集的空间参考:rtype: :py:class:`GsSpatialReference` :return: 返回空间参考对象的指针 """ return _gskernel.GsGeoDataRoom_SpatialReference(self) def ChangeSpatialReference(self, pSR: 'GsSpatialReference') -> "bool": r""" 修改数据集的空间参考:rtype: boolean :return: 返回修改是否成功 """ return _gskernel.GsGeoDataRoom_ChangeSpatialReference(self, pSR) @staticmethod def CanDowncast(b: 'GsDataRoom') -> "bool": return _gskernel.GsGeoDataRoom_CanDowncast(b) @staticmethod def DowncastTo(b: 'GsDataRoom') -> "GsSmarterPtr< GsGeoDataRoom >": return _gskernel.GsGeoDataRoom_DowncastTo(b) # Register GsGeoDataRoom in _gskernel: _gskernel.GsGeoDataRoom_swigregister(GsGeoDataRoom) def GsGeoDataRoom_CanDowncast(b: 'GsDataRoom') -> "bool": return _gskernel.GsGeoDataRoom_CanDowncast(b) def GsGeoDataRoom_DowncastTo(b: 'GsDataRoom') -> "GsSmarterPtr< GsGeoDataRoom >": return _gskernel.GsGeoDataRoom_DowncastTo(b) class GsQueryFilter(GsRefObject): r""" 属性条件查询""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, strWhere: 'char const *'=None): r""" 从where子句构造:type strWhere: string :param strWhere: SQL查询的where子句 """ _gskernel.GsQueryFilter_swiginit(self, _gskernel.new_GsQueryFilter(strWhere)) __swig_destroy__ = _gskernel.delete_GsQueryFilter def WhereClause(self, *args) -> "void": r""" *Overload 1:* 获取where子句:rtype: :py:class:`GsString` :return: 返回where子句字符串 | *Overload 2:* 设置where子句查询条件:type strWhere: string :param strWhere: SQL查询的where子句 """ return _gskernel.GsQueryFilter_WhereClause(self, *args) def And(self, pOther: 'GsQueryFilter') -> "bool": r""" 以And合并两个过滤条件:type pOther: :py:class:`GsQueryFilter` :param pOther: 另外一个合并的条件:rtype: boolean :return: 返回合并是否成功 """ return _gskernel.GsQueryFilter_And(self, pOther) def Or(self, pOther: 'GsQueryFilter') -> "bool": r""" 以Or合并两个过滤条件:type pOther: :py:class:`GsQueryFilter` :param pOther: 另外一个合并的条件:rtype: boolean :return: 返回合并是否成功 """ return _gskernel.GsQueryFilter_Or(self, pOther) def Clone(self) -> "GsSmarterPtr< GsQueryFilter >": r""" 复制过滤条件对象""" return _gskernel.GsQueryFilter_Clone(self) # Register GsQueryFilter in _gskernel: _gskernel.GsQueryFilter_swigregister(GsQueryFilter) class GsSpatialQueryFilter(GsQueryFilter): r""" 空间属性查询条件""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): r""" *Overload 1:* 从几何对象构造:type pGeo: :py:class:`GsGeometry` :param pGeo: 查询的几何对象指针 | *Overload 2:* 从几何对象和where子句构造:type pGeo: :py:class:`GsGeometry` :param pGeo: 查询的几何对象指针:type strWhere: string :param strWhere: 查询SQL语句where子句 | *Overload 3:* 从几何对象,where子句和查询方式字符串构造:type pGeo: :py:class:`GsGeometry` :param pGeo: 查询的几何对象指针:type strWhere: string :param strWhere: 查询SQL语句where子句:type strFilterType: string :param strFilterType: 查询方式 """ _gskernel.GsSpatialQueryFilter_swiginit(self, _gskernel.new_GsSpatialQueryFilter(*args)) __swig_destroy__ = _gskernel.delete_GsSpatialQueryFilter def Geometry(self, *args) -> "void": r""" *Overload 1:* 获取空间查询几何对象:rtype: :py:class:`GsGeometry` :return: 返回查询的几何对象指针 | *Overload 2:* 设置空间查询几何对象:param pGeo: 查询的几何对象指针 """ return _gskernel.GsSpatialQueryFilter_Geometry(self, *args) def FilterType(self, *args) -> "void": r""" *Overload 1:* 获取查询方式:rtype: :py:class:`GsString` :return: 返回查询方式字符串 | *Overload 2:* 设置查询方式:type strFilterType: string :param strFilterType: 查询方式字符串表达式 """ return _gskernel.GsSpatialQueryFilter_FilterType(self, *args) def IsMatch(self, *args) -> "bool": r""" *Overload 1:* 判断一个传入的Geometry是否符合几何过滤条件。:type pGeo: :py:class:`GsGeometry` :param pGeo: 要判断的几何对象:rtype: boolean :return: 返回是否匹配几何过滤条件。 | *Overload 2:* 判断一个传入的Geometry是否符合几何过滤条件。:param pGeo: 要判断的几何对象:rtype: boolean :return: 返回是否匹配几何过滤条件。 """ return _gskernel.GsSpatialQueryFilter_IsMatch(self, *args) def And(self, pOther: 'GsQueryFilter') -> "bool": r""" 以And合并两个过滤条件:type pOther: :py:class:`GsQueryFilter` :param pOther: 另外一个合并的条件:rtype: boolean :return: 返回合并是否成功 """ return _gskernel.GsSpatialQueryFilter_And(self, pOther) def Or(self, pOther: 'GsQueryFilter') -> "bool": r""" 以Or合并两个过滤条件:type pOther: :py:class:`GsQueryFilter` :param pOther: 另外一个合并的条件:rtype: boolean :return: 返回合并是否成功 """ return _gskernel.GsSpatialQueryFilter_Or(self, pOther) def Clone(self) -> "GsSmarterPtr< GsQueryFilter >": r""" 复制过滤条件对象""" return _gskernel.GsSpatialQueryFilter_Clone(self) @staticmethod def CanDowncast(b: 'GsQueryFilter') -> "bool": return _gskernel.GsSpatialQueryFilter_CanDowncast(b) @staticmethod def DowncastTo(b: 'GsQueryFilter') -> "GsSmarterPtr< GsSpatialQueryFilter
user = uuid.uuid4().hex password = <PASSWORD> conn = self.mox.CreateMockAnything() conn = fakeldap.FakeLdap(CONF.ldap.url, 0, alias_dereferencing=None, tls_cacertdir=None, tls_cacertfile=None, tls_req_cert=2, use_tls=False, chase_referrals=True).AndReturn(conn) conn.simple_bind_s(user, password).AndReturn(None) self.mox.ReplayAll() user_api.get_connection(user=user, password=password) def test_wrong_ldap_scope(self): CONF.ldap.query_scope = uuid.uuid4().hex self.assertRaisesRegexp( ValueError, 'Invalid LDAP scope: %s. *' % CONF.ldap.query_scope, identity.backends.ldap.Identity) def test_wrong_alias_dereferencing(self): CONF.ldap.alias_dereferencing = uuid.uuid4().hex self.assertRaisesRegexp( ValueError, 'Invalid LDAP deref option: %s\.' % CONF.ldap.alias_dereferencing, identity.backends.ldap.Identity) def test_user_extra_attribute_mapping(self): CONF.ldap.user_additional_attribute_mapping = ['description:name'] self.load_backends() user = { 'id': 'extra_attributes', 'name': 'EXTRA_ATTRIBUTES', 'password': '<PASSWORD>', 'domain_id': CONF.identity.default_domain_id } self.identity_api.create_user(user['id'], user) dn, attrs = self.identity_api.driver.user._ldap_get(user['id']) self.assertTrue(user['name'] in attrs['description']) def test_user_extra_attribute_mapping_description_is_returned(self): # Given a mapping like description:description, the description is # returned. self.config_fixture.config( group='ldap', user_additional_attribute_mapping=['description:description']) self.load_backends() description = uuid.uuid4().hex user = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': description, 'password': <PASSWORD>, 'domain_id': CONF.identity.default_domain_id } self.identity_api.create_user(user['id'], user) res = self.identity_api.driver.user.get_all() new_user = [u for u in res if u['id'] == user['id']][0] self.assertThat(new_user['description'], matchers.Equals(description)) @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get') def test_user_mixed_case_attribute(self, mock_ldap_get): # Mock the search results to return attribute names # with unexpected case. mock_ldap_get.return_value = ( 'cn=junk,dc=example,dc=com', { 'sN': [uuid.uuid4().hex], 'eMaIl': [uuid.uuid4().hex] } ) user = self.identity_api.get_user('junk') self.assertEqual(mock_ldap_get.return_value[1]['sN'][0], user['name']) self.assertEqual(mock_ldap_get.return_value[1]['eMaIl'][0], user['email']) def test_binary_attribute_values(self): result = [( 'cn=junk,dc=example,dc=com', { 'cn': ['junk'], 'sn': [uuid.uuid4().hex], 'mail': [uuid.uuid4().hex], 'binary_attr': ['\x00\xFF\x00\xFF'] } ), ] py_result = common_ldap_core.convert_ldap_result(result) # The attribute containing the binary value should # not be present in the converted result. self.assertNotIn('binary_attr', py_result[0][1]) def test_parse_extra_attribute_mapping(self): option_list = ['description:name', 'gecos:password', 'fake:invalid', 'invalid1', 'invalid2:', 'description:name:something'] mapping = self.identity_api.driver.user._parse_extra_attrs(option_list) expected_dict = {'description': 'name', 'gecos': 'password', 'fake': 'invalid', 'invalid2': ''} self.assertDictEqual(expected_dict, mapping) # TODO(henry-nash): These need to be removed when the full LDAP implementation # is submitted - see Bugs 1092187, 1101287, 1101276, 1101289 def test_domain_crud(self): domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex} self.assertRaises(exception.Forbidden, self.assignment_api.create_domain, domain['id'], domain) self.assertRaises(exception.Conflict, self.assignment_api.create_domain, CONF.identity.default_domain_id, domain) self.assertRaises(exception.DomainNotFound, self.assignment_api.get_domain, domain['id']) domain['description'] = uuid.uuid4().hex self.assertRaises(exception.DomainNotFound, self.assignment_api.update_domain, domain['id'], domain) self.assertRaises(exception.Forbidden, self.assignment_api.update_domain, CONF.identity.default_domain_id, domain) self.assertRaises(exception.DomainNotFound, self.assignment_api.get_domain, domain['id']) self.assertRaises(exception.DomainNotFound, self.assignment_api.delete_domain, domain['id']) self.assertRaises(exception.Forbidden, self.assignment_api.delete_domain, CONF.identity.default_domain_id) self.assertRaises(exception.DomainNotFound, self.assignment_api.get_domain, domain['id']) def test_create_domain_case_sensitivity(self): # domains are read-only, so case sensitivity isn't an issue ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.assertRaises(exception.Forbidden, self.assignment_api.create_domain, ref['id'], ref) def test_cache_layer_domain_crud(self): # TODO(morganfainberg): This also needs to be removed when full LDAP # implementation is submitted. No need to duplicate the above test, # just skip this time. self.skipTest('Domains are read-only against LDAP') def test_domain_rename_invalidates_get_domain_by_name_cache(self): parent = super(LDAPIdentity, self) self.assertRaises( exception.Forbidden, parent.test_domain_rename_invalidates_get_domain_by_name_cache) def test_project_rename_invalidates_get_project_by_name_cache(self): parent = super(LDAPIdentity, self) self.assertRaises( exception.Forbidden, parent.test_project_rename_invalidates_get_project_by_name_cache) def test_project_crud(self): # NOTE(topol): LDAP implementation does not currently support the # updating of a project name so this method override # provides a different update test project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'description': uuid.uuid4().hex, 'enabled': True } self.assignment_api.create_project(project['id'], project) project_ref = self.assignment_api.get_project(project['id']) self.assertDictEqual(project_ref, project) project['description'] = uuid.uuid4().hex self.assignment_api.update_project(project['id'], project) project_ref = self.assignment_api.get_project(project['id']) self.assertDictEqual(project_ref, project) self.assignment_api.delete_project(project['id']) self.assertRaises(exception.ProjectNotFound, self.assignment_api.get_project, project['id']) @tests.skip_if_cache_disabled('assignment') def test_cache_layer_project_crud(self): # NOTE(morganfainberg): LDAP implementation does not currently support # updating project names. This method override provides a different # update test. project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'description': uuid.uuid4().hex} project_id = project['id'] # Create a project self.assignment_api.create_project(project_id, project) self.assignment_api.get_project(project_id) updated_project = copy.deepcopy(project) updated_project['description'] = uuid.uuid4().hex # Update project, bypassing assignment_api manager self.assignment_api.driver.update_project(project_id, updated_project) # Verify get_project still returns the original project_ref self.assertDictContainsSubset( project, self.assignment_api.get_project(project_id)) # Invalidate cache self.assignment_api.get_project.invalidate(self.assignment_api, project_id) # Verify get_project now returns the new project self.assertDictContainsSubset( updated_project, self.assignment_api.get_project(project_id)) # Update project using the assignment_api manager back to original self.assignment_api.update_project(project['id'], project) # Verify get_project returns the original project_ref self.assertDictContainsSubset( project, self.assignment_api.get_project(project_id)) # Delete project bypassing assignment_api self.assignment_api.driver.delete_project(project_id) # Verify get_project still returns the project_ref self.assertDictContainsSubset( project, self.assignment_api.get_project(project_id)) # Invalidate cache self.assignment_api.get_project.invalidate(self.assignment_api, project_id) # Verify ProjectNotFound now raised self.assertRaises(exception.ProjectNotFound, self.assignment_api.get_project, project_id) # recreate project self.assignment_api.create_project(project_id, project) self.assignment_api.get_project(project_id) # delete project self.assignment_api.delete_project(project_id) # Verify ProjectNotFound is raised self.assertRaises(exception.ProjectNotFound, self.assignment_api.get_project, project_id) def test_multi_role_grant_by_user_group_on_project_domain(self): # This is a partial implementation of the standard test that # is defined in test_backend.py. It omits both domain and # group grants. since neither of these are yet supported by # the ldap backend. role_list = [] for _ in range(2): role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.assignment_api.create_role(role['id'], role) role_list.append(role) user1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'password': uuid.uuid4().hex, 'enabled': True} self.identity_api.create_user(user1['id'], user1) project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id} self.assignment_api.create_project(project1['id'], project1) self.assignment_api.add_role_to_user_and_project( user_id=user1['id'], tenant_id=project1['id'], role_id=role_list[0]['id']) self.assignment_api.add_role_to_user_and_project( user_id=user1['id'], tenant_id=project1['id'], role_id=role_list[1]['id']) # Although list_grants are not yet supported, we can test the # alternate way of getting back lists of grants, where user # and group roles are combined. Only directly assigned user # roles are available, since group grants are not yet supported combined_list = self.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id']) self.assertEqual(2, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) self.assertIn(role_list[1]['id'], combined_list) # Finally, although domain roles are not implemented, check we can # issue the combined get roles call with benign results, since thus is # used in token generation combined_role_list = self.assignment_api.get_roles_for_user_and_domain( user1['id'], CONF.identity.default_domain_id) self.assertEqual(0, len(combined_role_list)) def test_list_projects_for_alternate_domain(self): self.skipTest( 'N/A: LDAP does not support multiple domains') def test_create_grant_no_user(self): self.skipTest('Blocked by bug 1101287') def test_create_grant_no_group(self): self.skipTest('Blocked by bug 1101287') def test_get_default_domain_by_name(self): domain = self._get_domain_fixture() domain_ref = self.assignment_api.get_domain_by_name(domain['name']) self.assertEqual(domain_ref, domain) class LDAPIdentityEnabledEmulation(LDAPIdentity): def setUp(self): super(LDAPIdentityEnabledEmulation, self).setUp() self.clear_database() self.load_backends() self.load_fixtures(default_fixtures) for obj in [self.tenant_bar, self.tenant_baz, self.user_foo, self.user_two, self.user_badguy]: obj.setdefault('enabled', True) def config_files(self): config_files = super(LDAPIdentityEnabledEmulation, self).config_files() config_files.append(tests.dirs.tests_conf('backend_ldap.conf')) return config_files def config_overrides(self): super(LDAPIdentityEnabledEmulation, self).config_overrides() self.config_fixture.config(group='ldap', user_enabled_emulation=True, tenant_enabled_emulation=True) def test_project_crud(self): # NOTE(topol): LDAPIdentityEnabledEmulation will create an # enabled key in the project dictionary so this # method override handles this side-effect project = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'description': uuid.uuid4().hex} self.assignment_api.create_project(project['id'], project) project_ref = self.assignment_api.get_project(project['id']) # self.assignment_api.create_project adds an enabled # key with a value of True when LDAPIdentityEnabledEmulation # is used so we now add this expected key to the project dictionary project['enabled'] = True self.assertDictEqual(project_ref, project) project['description'] = uuid.uuid4().hex self.assignment_api.update_project(project['id'], project) project_ref = self.assignment_api.get_project(project['id']) self.assertDictEqual(project_ref, project) self.assignment_api.delete_project(project['id']) self.assertRaises(exception.ProjectNotFound, self.assignment_api.get_project, project['id']) def test_user_crud(self): user = { 'id': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'name': uuid.uuid4().hex, 'password': <PASSWORD>} self.identity_api.create_user(user['id'], user) user['enabled'] = True user_ref = self.identity_api.get_user(user['id']) del user['password'] user_ref_dict = dict((x, user_ref[x]) for x in user_ref) self.assertDictEqual(user_ref_dict, user) user['password'] = <PASSWORD> self.identity_api.update_user(user['id'], user) user_ref = self.identity_api.get_user(user['id']) del user['password'] user_ref_dict = dict((x, user_ref[x]) for x in user_ref) self.assertDictEqual(user_ref_dict, user) self.identity_api.delete_user(user['id']) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, user['id']) def test_user_enable_attribute_mask(self): self.skipTest( "Enabled emulation conflicts with enabled mask") class LdapIdentitySqlAssignment(BaseLDAPIdentity, tests.SQLDriverOverrides, tests.TestCase): def config_files(self): config_files = super(LdapIdentitySqlAssignment, self).config_files() config_files.append(tests.dirs.tests_conf('backend_ldap_sql.conf')) return config_files def setUp(self): super(LdapIdentitySqlAssignment, self).setUp() self.clear_database() self.load_backends() cache.configure_cache_region(cache.REGION) self.engine = sql.get_engine() self.addCleanup(sql.cleanup) sql.ModelBase.metadata.create_all(bind=self.engine) self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine) self.load_fixtures(default_fixtures) #defaulted by the data load self.user_foo['enabled'] = True def config_overrides(self): super(LdapIdentitySqlAssignment, self).config_overrides() self.config_fixture.config( group='identity', driver='keystone.identity.backends.ldap.Identity') self.config_fixture.config( group='assignment', driver='keystone.assignment.backends.sql.Assignment') def test_domain_crud(self): pass def test_list_domains(self): domains = self.assignment_api.list_domains() self.assertEqual([assignment.calc_default_domain()], domains) def test_list_domains_non_default_domain_id(self): # If change the default_domain_id, the ID of the default domain # returned by list_domains doesn't change because the SQL identity # backend reads it from the database, which doesn't get updated by # config change. orig_default_domain_id = CONF.identity.default_domain_id new_domain_id = uuid.uuid4().hex self.config_fixture.config(group='identity', default_domain_id=new_domain_id) domains = self.assignment_api.list_domains() self.assertEqual(orig_default_domain_id, domains[0]['id']) def test_project_filter(self): self.skipTest( 'N/A: Not part of SQL backend') def test_role_filter(self): self.skipTest( 'N/A: Not part of SQL backend') def test_add_role_grant_to_user_and_project_404(self): self.skipTest('Blocked by bug 1101287') def test_get_role_grants_for_user_and_project_404(self): self.skipTest('Blocked by bug 1101287') def test_list_projects_for_user_with_grants(self): self.skipTest('Blocked by bug 1221805') class MultiLDAPandSQLIdentity(BaseLDAPIdentity, tests.SQLDriverOverrides, tests.TestCase): """Class to test common SQL plus individual LDAP backends. We define a set of domains and domain-specific backends: - A separate LDAP backend for the default domain - A separate LDAP backend for domain1 - domain2 shares the same LDAP as domain1, but uses a different tree attach point - An SQL backend for all other domains (which will include domain3 and domain4) Normally one would expect that the default domain would be handled as part of the "other domains" - however the above provides better test coverage since most of the existing backend tests use the default domain. """ def setUp(self): super(MultiLDAPandSQLIdentity, self).setUp() self.load_backends() self.engine = sql.get_engine() self.addCleanup(sql.cleanup) sql.ModelBase.metadata.create_all(bind=self.engine) self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine) self._setup_domain_test_data() # All initial domain data setup complete, time to switch on support # for separate backends per domain. self.config_fixture.config(group='identity', domain_specific_drivers_enabled=True, domain_config_dir=tests.TESTSDIR) self._set_domain_configs() self.clear_database() self.load_fixtures(default_fixtures) def config_overrides(self): super(MultiLDAPandSQLIdentity, self).config_overrides() # Make sure identity and assignment are actually SQL drivers, #
self.white, (x, y), (self.SXSize - 10, y)) # To draw a separation line between commands and memory which is under return y+10 # Return the Y position 10px under our drawn line ############################################################################################################ class UnsignedBitsArray: # Class designed to be a list of cellsNbr values, which are cellLength bits unsigned integers. def __init__(self, cellLength, cellsNbr): self._cellLength = abs(cellLength) # Storing the absolute values of the length and number of bits, those can't be negative right? self._cellsNbr = abs(cellsNbr) self._arr = [0] * self._cellsNbr # Initialize the list of unsigned values def __str__(self): # Displays each cell with its index and value, in hexadecimal result = "" for idx, val in enumerate(self._arr): result += "{} : {}\n".format(hex(idx), hex(val)) return result def __getitem__(self, idx): # Getting a value is straightforward, we do not treat it return self._arr[idx] def __setitem__(self, idx, value): # When setting a value it stays withing the boudaries of an unsigned cellLength bits integer. If it overflows we just truncate the Most Significant Bits to take the length down to cellLength. The modulo does it well. self._arr[idx] = value % (2**self._cellLength) ############################################################################################################ class CHIP8: # Chip 8 core of the emulator, with reading ROMs, decoding opcodes and executing them def __init__(self, romPath, Speed = 1024): # Speed in Hz self.speed = Speed # Speed of CPU in Hz self.romPath = romPath # Path to the ROM file self.changeRom = False # Boolean to put to True when the user wants to change the ROM. It allows the program to not end when unloading the current ROM self.initVars() # Initialize all the emulator variables which are resetted when changing a ROM, so we don't have to destroy our CHIP8 object when rebooting the current ROM self.started = False # Boolean to check if the game is launched self.paused = False # Boolean to pause the emulator self.nextStep = False # Boolean to allow the emulator to jump to the next step self.DELAYSOUNDTIMER = USEREVENT + 1 # Pygame event for the 60Hz timers and the screen refreshing self.sound = True # Boolean to allow sound to be played self.screen = Screen() # Initialize the screen ######################################################################################################## # Those are properties used to manage the registers which have a maximum bit size and are unsigned # So no treatment is done when getting them # But when setting them we apply a modulo to keep the value in its boundaries @property def I(self): return self.__I @I.setter def I(self, value): self.__I = value % (2**16) @property def delay_timer(self): return self.__delay_timer @delay_timer.setter def delay_timer(self, value): self.__delay_timer = value % (2**8) @property def sound_timer(self): return self.__sound_timer @sound_timer.setter def sound_timer(self, value): self.__sound_timer = value % (2**8) @property def PC(self): return self.__PC @PC.setter def PC(self, value): self.__PC = value % (2**16) @property def SP(self): return self.__SP @SP.setter def SP(self, value): self.__SP = value % (2**8) ######################################################################################################## def initVars(self): # Initializes the variables used to run the emulator self.memory = UnsignedBitsArray(8, 4096) # Memory of 4096 * 8 bits self.V = UnsignedBitsArray(8, 16) # V[X] register. 16 * 8 bits self.I = 0 # I register self.delay_timer = 0 # Delay Timer. Should be decremented to 0 at a rate of 60Hz self.sound_timer = 0 # Sound Timer. Should be decremented to 0 at a rate of 60Hz self.PC = 512 # Program Counter. Starts at 0x200 self.SP = 0 # Stack Pointer self.stack = UnsignedBitsArray(16, 16) # Stack pile. 16 * 16 bits self.key = [False for i in range(16)] # List of pressed keys, max 16 keys self.loadFonts() # Load fonts in memory ######################################################################################################## def loadGame(self): # Loads the game from the ROM into memory i = 0x200 # Default adress to store the program with open(self.romPath, "rb") as f: # Opening the ROM while 1: # While the universe is working byte = f.read(1) # We get a byte if not byte: # If it is empty we reached the EOF break # Breaking out of the loop self.memory[i] = int.from_bytes(byte, "big", signed=False) # Storing into the memory at the i position the value of the byte we just read i += 1 # Increasing the counter ######################################################################################################## def startGame(self): # Starts the game self.screen.startScreen() # Start the screen pygame.time.set_timer(self.DELAYSOUNDTIMER, round((1/60)*1000)) # Sets up the 60Hz timer self.started = True # Sets the started boolean to True while self.started: # While the emulator is started self.listen() # Checks all Pygame events on queue (timers, window resizing, quitting the program or most of all pressing keys) self.executeOpcode(self.getCurrentOpcode()) # Retrieve and execute the current opcode while self.paused and self.started: # If the game is on pause self.nextStep = False # We reset the boolean value self.listen() # We listen to the events (to not block the program if we want to quit or anything) but we are mainly waiting for the next step key to be pressed if self.nextStep: # If the next step key was pressed break # We break out of the paused loop so we can execute the next opcode. But as the game is paused, if will immediatly go back into this loop right after the next opcode, to wait for the next step pygame.time.delay(50) # Delay of 50ms, to avoid using too much useless CPU time (I guess we don't really care but hey 20Hz to wait for a key event for the next step or quitting is nice enough) pygame.time.delay(round((1/self.speed)*1000)) # Delay allowing us to execute opcodes at the specified speed self.screen.destroyScreen() # When out of the loop, we are quitting so we destroy the screen return self.changeRom # Returning to the main program whether to quit or not to quit ######################################################################################################## def rebootGame(self): # Reboots the game self.initVars() # Initialize variables self.screen.clear() # Clear the screen self.loadGame() # Reload the game into memory ######################################################################################################## def listen(self): # Goes through all Pygame events on queue for event in pygame.event.get(): # For each event in the queue if event.type == QUIT: # QUIT self.started = False # We stop the game elif event.type == self.DELAYSOUNDTIMER: # 60Hz timer self.timeout60Hz() # Function refreshing screen and managing Chip 8 timers elif event.type == VIDEORESIZE: # Resizing the screen self.screen.resizeScreen(event.w, event.h) # Function to resize the screen and reset it elif event.type == KEYDOWN: # Pressing a key # GAME KEYS # When a key is pressed, we set the associated value to 1 if event.key == K_1: self.key[0x1] = 1 elif event.key == K_2: self.key[0x2] = 1 elif event.key == K_3: self.key[0x3] = 1 elif event.key == K_4: self.key[0xC] = 1 elif event.key == K_q: self.key[0x4] = 1 elif event.key == K_w: self.key[0x5] = 1 elif event.key == K_e: self.key[0x6] = 1 elif event.key == K_r: self.key[0xD] = 1 elif event.key == K_a: self.key[0x7] = 1 elif event.key == K_s: self.key[0x8] = 1 elif event.key == K_d: self.key[0x9] = 1 elif event.key == K_f: self.key[0xE] = 1 elif event.key == K_z: self.key[0xA] = 1 elif event.key == K_x: self.key[0x0] = 1 elif event.key == K_c: self.key[0xB] = 1 elif event.key == K_v: self.key[0xF] = 1 # MENU KEYS elif event.key == K_ESCAPE: self.started = False elif event.key == K_F1: self.started = False self.changeRom = True elif event.key == K_F2: self.rebootGame() elif event.key == K_F3: self.paused = not self.paused self.screen.togglePause(self.paused) elif event.key == K_F4: self.nextStep = True elif event.key == K_F5: self.sound = not self.sound elif event.type == KEYUP: # GAME KEYS # When a key is released, we set the associated value to 0 if event.key == K_1: self.key[0x1] = 0 elif event.key == K_2: self.key[0x2] = 0 elif event.key == K_3: self.key[0x3] = 0 elif event.key == K_4: self.key[0xC] = 0 elif event.key == K_q: self.key[0x4] = 0 elif event.key == K_w: self.key[0x5] = 0 elif event.key == K_e: self.key[0x6] = 0 elif event.key == K_r: self.key[0xD] = 0 elif event.key == K_a: self.key[0x7] = 0 elif event.key == K_s: self.key[0x8] = 0 elif event.key == K_d: self.key[0x9] = 0 elif event.key == K_f: self.key[0xE] = 0 elif event.key == K_z: self.key[0xA] = 0 elif event.key == K_x: self.key[0x0] = 0 elif event.key == K_c: self.key[0xB] = 0 elif event.key == K_v: self.key[0xF] = 0 ######################################################################################################## def timeout60Hz(self): # To be called at a frequency of 60Hz. Decrements the emulator timers and refreshes the screen if(self.delay_timer > 0): # We decrement the delay timer if it isn't 0 self.delay_timer -= 1 if(self.sound_timer > 0): # Same for the sound timer self.sound_timer -= 1 if self.sound: # And if it isn't 0 and we are allowing the sound to be played, we play a beep self.screen.beep.play() if self.started: # If the game is started self.screen.displayRegsAndVars(self.V, self.delay_timer, self.sound_timer, self.I, self.PC, self.SP) # We update the debugger values with our registers self.screen.displayMemory(self.memory) # Same with our memory self.screen.refreshScreen() # We refresh the screen ######################################################################################################## def dumpMemoryAndReg(self): # Dumps the whole memory and register values to a file named "memdump.txt" in the script folder result = "Opcode: {}\nPC: {}\nI: {}\nDT: {}\nST: {}\nSP: {}\nLast Stack: {}\n\n---------------------------------------------\n\n".format(hex(self.getCurrentOpcode()), hex(self.PC), hex(self.I), hex(self.delay_timer), hex(self.sound_timer), hex(self.SP), hex(self.stack[self.SP-1])) for idx, value in enumerate(self.V): result += "V[{}] = {} = {}\n".format(idx, hex(value), value) result
<filename>src/context.py from __future__ import annotations import sys from collections import defaultdict, OrderedDict from operator import attrgetter from typing import Dict, Any, ClassVar, List, Set, Optional, Tuple import src.settings as var # FIXME from src.messages.message import Message from src.logger import debuglog class _NotLoggedIn: def __copy__(self): return self def __deepcopy__(self, memo): return self def __bool__(self): return False def __repr__(self): return "NotLoggedIn" NotLoggedIn = _NotLoggedIn() def _who(cli, target, data=b""): """Handle WHO requests.""" if isinstance(data, str): data = data.encode(Features["CHARSET"]) elif isinstance(data, int): if data > 0xFFFFFF: data = b"" else: data = data.to_bytes(3, "little") if len(data) > 3: data = b"" if "WHOX" in Features: cli.send("WHO", target, b"%tcuihsnfdlar," + data) else: cli.send("WHO", target) return int.from_bytes(data, "little") def _send(data, first, sep, client, send_type, name, chan=None): full_address = "{cli.nickname}!{cli.ident}@{cli.hostmask}".format(cli=client) # Maximum length of sent data is 512 bytes. However, we have to # reduce the maximum length allowed to account for: # 1 (1) - The initial colon at the front of the data # 2 (1) - The space between the sender (us) and the command # 3 (1) - The space between the command and the target # 4 (1) - The space between the target and the data # 5 (1) - The colon at the front of the data to send # 6 (2) - The trailing \r\n length = 512 - 7 # Next, we need to reduce the length to account for our address length -= len(full_address) # Then we also need to account for the target's length length -= len(name) # Finally, we need to account for the send type's length length -= len(send_type) # The 'first' argument is sent along with every message, so deduce that too if length - len(first) > 0: # make sure it's not negative (or worse, 0) length -= len(first) else: first = "" if chan and send_type.lower() in ("cprivmsg", "cnotice"): chan = chan.strip() + " " # if sending CPRIVMSG or CNOTICE, we need a channel parameter as well length -= len(chan) else: chan = "" messages = [] count = 0 for line in data: if count and count + len(sep) + len(line) > length: count = len(line) cur_sep = "\n" elif not messages: count = len(line) cur_sep = "" else: count += len(sep) + len(line) cur_sep = sep messages.append(cur_sep) messages.append(line) for line in "".join(messages).split("\n"): while line: extra, line = line[:length], line[length:] client.send("{0} {1} {4}:{2}{3}".format(send_type, name, first, extra, chan)) def lower(nick, *, casemapping=None): if nick is None or nick is NotLoggedIn: return nick if isinstance(nick, IRCContext): return nick.lower() if casemapping is None: casemapping = Features["CASEMAPPING"] mapping = { "[": "{", "]": "}", "\\": "|", "^": "~", } if casemapping == "strict-rfc1459": mapping.pop("^") elif casemapping == "ascii": mapping.clear() return nick.lower().translate(str.maketrans(mapping)) def equals(nick1, nick2): return nick1 is not None and nick2 is not None and lower(nick1) == lower(nick2) def context_types(*types): def wrapper(cls): cls._getters = l = [] cls.is_fake = False for context_type in types: name = "is_" + context_type setattr(cls, name, False) l.append((context_type, attrgetter(name))) return cls return wrapper @context_types("channel", "user") class IRCContext: """Base class for channels and users.""" _messages = defaultdict(list) def __init__(self, name, client): self.name = name self.client = client self.ref = None def __format__(self, format_spec): if not format_spec: return self.name raise ValueError("Format specifier {0} has undefined semantics".format(format_spec)) def __eq__(self, other): return self._compare(other, __class__) # This will always return False def _compare(self, other, cls, *attributes): """Compare two instances and return a proper value.""" if self is other: return True if not isinstance(other, cls): return NotImplemented done = False for attr in attributes: if getattr(self, attr) is None or getattr(other, attr) is None: continue done = True if getattr(self, attr) != getattr(other, attr): return False return done def lower(self): temp = type(self)(lower(self.name), self.client) temp.ref = self.ref or self return temp def get_send_type(self, *, is_notice=False, is_privmsg=False): if is_notice and not is_privmsg: return "NOTICE" return "PRIVMSG" def queue_message(self, message): if self.is_fake: self.send(message) # Don't actually queue it return if isinstance(message, list): message = tuple(message) self._messages[message].append(self) @classmethod def send_messages(cls, *, notice=False, privmsg=False): messages = list(cls._messages.items()) cls._messages.clear() for message, targets in messages: if isinstance(message, Message): message = message.format() if isinstance(message, str): message = (message,) send_types = defaultdict(list) for target in targets: send_type = target.get_send_type(is_notice=notice, is_privmsg=privmsg) send_type, send_chan = target.use_cprivmsg(send_type) send_types[(send_type, send_chan)].append(target) for (send_type, send_chan), targets in send_types.items(): max_targets = Features["TARGMAX"][send_type] while targets: using, targets = targets[:max_targets], targets[max_targets:] _send(message, "", " ", using[0].client, send_type, ",".join([t.nick for t in using]), send_chan) @classmethod def get_context_type(cls, *, max_types=1): context_type = [] if cls.is_fake: context_type.append("fake") for name, getter in cls._getters: if getter(cls): context_type.append(name) final = " ".join(context_type) if len(context_type) > (cls.is_fake + max_types): raise RuntimeError("Invalid context type for {0}: {1!r}".format(cls.__name__, final)) return final def who(self, data=b""): """Send a WHO request with respect to the server's capabilities. To get the WHO replies, add an event listener for "who_result", and an event listener for "who_end" for the end of WHO replies. The return value of this function is an integer equal to the data given. If the server supports WHOX, the same integer will be in the event.params.data attribute. Otherwise, this attribute will be 0. """ return _who(self.client, self.name, data) def use_cprivmsg(self, send_type): if not self.is_user or var.DISABLE_CPRIVMSG: # FIXME: uses var return send_type, None # check if bot is opped in any channels shared with this user from src import users cprivmsg_eligible = None # type: Optional[IRCContext] op_modes = set() for status, mode in Features.PREFIX.items(): op_modes.add(mode) if status == "@": break for chan in self.channels: if users.Bot.channels[chan] & op_modes: cprivmsg_eligible = chan break if cprivmsg_eligible: if send_type == "PRIVMSG" and Features.CPRIVMSG: return "CPRIVMSG", cprivmsg_eligible.name elif send_type == "NOTICE" and Features.CNOTICE: return "CNOTICE", cprivmsg_eligible.name return send_type, None def send(self, *data, first=None, sep=None, notice=False, privmsg=False, prefix=None): new = [] for line in data: if isinstance(line, Message): line = line.format() new.append(line) if self.is_fake: # Leave out 'fake' from the message; get_context_type() takes care of that debuglog("Would message {0} {1}: {2!r}".format(self.get_context_type(), self.name, " ".join(new))) return send_type = self.get_send_type(is_notice=notice, is_privmsg=privmsg) send_type, send_chan = self.use_cprivmsg(send_type) name = self.name if prefix is not None: name = prefix + name if first is None: first = "" if sep is None: sep = " " _send(new, first, sep, self.client, send_type, name, send_chan) class IRCFeatures: """Class to store features that the ircd supports.""" # RPL_ISUPPORT and CAP can support more than what is listed here, we store all tokens into _features # even if we don't have a property that directly exposes it. A bot operator writing custom code can use # the generic get() and set() methods to retrieve and manipulate those values. # Note: we store whatever the ircd tells us, but normalize return values to what the bot expects _features = {} # type: Dict[str, Any] # RPL_ISUPPORT tokens @property def CASEMAPPING(self) -> str: value = self._features.get("CASEMAPPING", "rfc1459") if value not in ("rfc1459", "rfc1459-strict", "ascii"): value = "rfc1459" return value @CASEMAPPING.setter def CASEMAPPING(self, value: str): self._features["CASEMAPPING"] = value @property def CHANLIMIT(self) -> Dict[str, int]: limits = self._features.get("CHANLIMIT", {}) value = {} for t in self.CHANTYPES: value[t] = limits.get(t, sys.maxsize) if value[t] is None: value[t] = sys.maxsize return value @CHANLIMIT.setter def CHANLIMIT(self, value: str): self._features["CHANLIMIT"] = {} parts = value.split(",") for part in parts: prefixes, limit_str = part.split(":") if limit_str == "": limit: Optional[int] = None else: limit = int(limit_str) for prefix in prefixes: self._features["CHANLIMIT"][prefix] = limit @property def CHANMODES(self) -> Tuple[str, str, str, str]: modes = self._features.get("CHANMODES", []) while len(modes) < 4: modes.append("") return tuple(modes[:4]) @CHANMODES.setter def CHANMODES(self, value: str): self._features["CHANMODES"] = value.split(",") @property def CHANTYPES(self) -> Set[str]: return self._features.get("CHANTYPES", set()) @CHANTYPES.setter def CHANTYPES(self, value: str): self._features["CHANTYPES"] = set(value) @property def CHARSET(self) -> str: return self._features.get("CHARSET", "utf-8") @CHARSET.setter def CHARSET(self, value: str): self._features["CHARSET"] = value @property def CNOTICE(self) -> bool: return self._features.get("CNOTICE", False) @CNOTICE.setter def CNOTICE(self, value: str): self._features["CNOTICE"] = True @property def CPRIVMSG(self) -> bool: return self._features.get("CPRIVMSG", False) @CPRIVMSG.setter def CPRIVMSG(self, value: str): self._features["CPRIVMSG"] = True @property def EXCEPTS(self) -> Optional[str]: return self._features.get("EXCEPTS", None) @EXCEPTS.setter def
# -*- coding: utf-8 -*- """ Created on Wed Apr 7 14:57:55 2021 author: rringuet Satellite Flythrough code to call for any model. All desired model data for the desired model should be in a single directory. Example calls from the command line: Get list of models and function calls possible: python ./SatelliteFlythrough_new.py Get call syntax for one of the functions: python ./SatelliteFlythrough_new.py MyFlight Get information on what variables are available for a given model: python ./SatelliteFlythrough_new.py CTIPe For funtion calls, all of the parameters in the function call should be given after the name of the function in the example above, even if the default value is desired Help on coordinate systems: The resource information on SpacePy's coordinate conversion function is sparse at best, so the below information has been collected via other resources and our own testing of the function. The data concerning the spherical coordinate systems are collected into a table format for easier perusal. For cartesian coordinates, all of the input values should be in earth radii (R_E) in order (x, y, z) to work properly. For spherical coordinates, all of the input values should be in order (longitude, latitude, altitude or radius). The longitude and latitude values should be in degrees, altitude values in kilometers, and radius values in earth radii (R_E) from the Earth's center. All latitude values should fall between -90 and 90 degrees. The longitude range differs between the coordinate systems and is given for each in the table below. SpacePy Abbrev. Full Name Lon. range vertical variable -------------------------------------------------------------------------- GDZ Geodetic (WGS 84) (-180, 180) Altitude (km) GEO Geographic (-180, 180) Radius (R_E) GSM Geocentric Solar Magnetospheric (-180, 180) Radius (R_E) GSE Geocentric Solar Ecliptic (-180, 180) Radius (R_E) SM Solar Magnetic (-180, 180) Radius (R_E) GEI Geocentric Equatorial Inertial (-180, 180) Radius (R_E) (also ECI = Earth-Centered Inertial) MAG Geomagnetic (-180, 180) Radius (R_E) SPH Spherical (0, 360) Radius (R_E) RLL Radius, Latitude, Longitude (-180, 180) Radius (R_E) For descriptions of most of the coordinate systems, see https://sscweb.gsfc.nasa.gov/users_guide/Appendix_C.shtml and it's reference, "Geophysical Coordinate Transformations", <NAME>, Cosmic Electrodynamics, Vol. 2, pp. 184 - 196, 1971. """ import numpy as np from os.path import basename import kamodo_ccmc.flythrough.wrapper_output as WO import kamodo_ccmc.flythrough.SF_utilities as U def SatelliteTrajectory(dataset, start_ts, stop_ts, coord_type='GEO', verbose=False): '''Retrieve and return satellite trajectory from HAPI/CDAWeb Parameters: ---------- dataset: name of the satellite data set to pull trajectory from start_ts: utc timestamp for start of desired time interval stop_ts: utc timestamp for end of desired time interval coord_type: Pick from GEO, GSM, GSE, or SM verbose: Set to true to be overwhelmed with information. Coordinates are retrieved on a cartesian grid. ''' from kamodo_ccmc.readers.hapi import HAPI #convert from utc timestamps to isoformt start = U.ts_to_ISOstring(start_ts) stop = U.ts_to_ISOstring(stop_ts) #convert from integer input of coord_type to string coord_type, coord_grid = U.MW.convert_coordnames(coord_type, 'car') #check input coord_type if coord_type not in ['GEO','GSM','GSE','SM']: raise AttributeError(f'Coordinate type {coord_type} not available. '+ 'Pick from GEO, GSM, GSE, or SM.') else: parameters = 'X_'+coord_type+',Y_'+coord_type+',Z_'+coord_type #retrieve satellite trajectory server = 'http://hapi-server.org/servers/SSCWeb/hapi' #for coordinate data hapi = HAPI(server, dataset, parameters, start, stop, verbose=verbose) satellite_dict = {'sat_time': hapi.tsarray, #utc timestamps 'c1': hapi.variables[parameters.split(',')[0]]['data'], #x coord 'c2': hapi.variables[parameters.split(',')[1]]['data'], #y coord 'c3': hapi.variables[parameters.split(',')[2]]['data']} #z coord print(f'Attribute/Key names of return dictionary: {satellite_dict.keys()}') return satellite_dict, coord_type, 'car' def SampleTrajectory(start_time, stop_time, max_lat=65., min_lat=-65., lon_perorbit=363., max_height=450., min_height=400., p=0.01, n=2.): ''' Given start and stop times in timestamp form, return a test satellite trajectory. Parameters: ---------- start_time: utc timestamp in seconds for start stop_time: utc timestamp in seconds for stop max_lat: maximum latitude for sample trajectory, in degrees (default=65.) min_lat: minimum latitude for sample trajectory, in degrees (default=-65.) lon_perorbit: the degrees of longitude per about 90 minute orbit (set less than 360 for precession forward in longitude, set less than 360 for precession backwards) (default=363.) max_height: maximum starting height of orbit in km (default=450.) min_height: minimum starting height of orbit in km (default=400.) p: a rough precession variable, applied as an overall height decrease as a percentage of the min_height value: p = (default=0.01). n: the time cadence of the sample trajectory generated (default = 2 seconds) Returns a dictionary with keys: sat_time, c1, c2, and c3. sat_time is an array in seconds since 1970-01-01. (c1,c2,c3) = (lon, lat, alt) in (deg,deg,km) in the 'GDZ', 'sph' coordinate system in SpacePy. ''' #determine basic parameters orbit_seconds = int(90.*60./float(n)) #determine number of samples per 90min orbit n_orbits = (stop_time-start_time)/float(orbit_seconds*n) #orbits of 90 min each h_scale, h_offset = (max_height-min_height)/2., np.mean([max_height,min_height]) lat_scale, lat_offset = (max_lat-min_lat)/2., np.mean([max_lat,min_lat]) time_left = (stop_time-start_time)/float(n)-int(n_orbits)*orbit_seconds #create orbital tracks pi_arr = np.linspace(0.,2.*np.pi,orbit_seconds) lat, height = np.tile(np.cos(pi_arr), int(n_orbits)), np.tile(np.sin(pi_arr), int(n_orbits)) if time_left>0: lat = np.append(lat, np.cos(pi_arr[0:int(time_left)])) #add partial last orbit height = np.append(height, np.sin(pi_arr[0:int(time_left)])) lon = np.linspace(0.,float(lon_perorbit)*n_orbits,int((stop_time-start_time)/float(n))) while max(lon)>360.: lon[np.where(lon>360.)[0]]-=360. while max(lon)<0.: lon[np.where(lon<0.)[0]]+=360. height = height*h_scale+h_offset-np.linspace(0.,p,int((stop_time-start_time)/float(n)))*min_height #store results in dictionary to return sample_dict={'sat_time': np.linspace(start_time,stop_time,int((stop_time-start_time)/float(n))), 'c1': lon-180., 'c2': lat*lat_scale+lat_offset, 'c3': height} print(f'Attribute/Key names of return dictionary: {sample_dict.keys()}') print('(c1,c2,c3) = (lon, lat, alt) in (deg,deg,km) in the GDZ, sph coordinate system.'+\ 'sat_time contains the utc timestamps.') return sample_dict, 'GDZ', 'sph' #want to enable call of this from C++ for flexibility, so return only one value #keep so users can call this if they have their own satellite trajectory data def ModelFlythrough(model, file_dir, variable_list, sat_time, c1, c2, c3, coord_type, coord_grid, high_res=20., verbose=False, csv_output='', plot_output=''): '''Call satellite flythrough wrapper specific to the model chosen. Parameters: ------------ model: 'CTIPe','IRI', ... file_dir: complete path to where model data files are stored variable_list: List of standardized variable names. Corresponding integers are allowed. See model variable output for details. sat_time: a numpy array of the utc timestamps c1, c2, c3: numpy arrays of the positions correlating to the utc timestamps (c1, c2, c3) should be (x,y,z) in R_E for cartesian coordinates, and (lon, lat, radius (R_E) or altitude (km)) for spherical coordinates. coord_type: one of 'GDZ', 'GEO', 'GSM', 'GSE', 'SM', 'GEI', 'MAG', 'SPH', 'RLL' integers also allowed with 'GDZ'=0 and so on coord_grid: either 'car' or 'sph' (0 or 1). Note that not all combinations make sense (e.g. 'SPH' and 'car') and are not allowed. high_res: the accuracy of the conversion from radius or altitude to pressure level. Ignore if no conversion is needed for the variable(s) selected. csv_output: complete path pluts filename (without the .csv) for the file to write the results to. plot_output: complete path pluts file naming convention (without the .html) for the file to write the plots to. verbose: Set to true to be overwhelmed with information. ''' #if input types are lists, correct to be numpy arrays (important for calling from C++) if isinstance(sat_time, list): sat_time = np.array(sat_time) if isinstance(c1, list): c1 = np.array(c1) if isinstance(c2, list): c2 = np.array(c2) if isinstance(c3, list): c3 = np.array(c3) #if model is given as an integer, then convert to a string model = U.MW.convert_model_string(model) #if variable_list is a list of integers, convert to standard names for given model variable_list = U.MW.convert_variablenames(model, variable_list) #convert integer coordinate names or grids to strings coord_type, coord_grid = U.MW.convert_coordnames(coord_type, coord_grid) #prepare files for run U.Prepare_Files(model, file_dir) #get interpolated results #coord_type should be one of SpacePy's coordinates, coord_grid is either 'sph' or 'car' results = U.Model_SatelliteFlythrough(model, file_dir, variable_list, sat_time, c1, c2, c3, coord_type, coord_grid, high_res, verbose=verbose) #remove requested variables not found in the data var_list = [key for key in results.keys() if key not in ['utc_time','c1','c2','c3','net_idx']] #retrieve coordinate and results units coord_units = U.MW.coord_units(coord_type, coord_grid) results_units = U.MW.Var_units(model, var_list) for key in coord_units: results_units[key] = coord_units[key] print(results_units) if verbose: print(f'Units from the {model} model by variable name:\n{results_units}') print(f'Dictionary key names in results:\n{results.keys()}') print('The units of the trajectory variables are unchanged from the inputs.') if csv_output!='': #correct input filename if model not in
<reponame>bonitobonita24/Mayan-EDMS import logging from django.apps import apps from django.contrib.admin.utils import ( get_fields_from_path, reverse_field_path ) from django.db.models.signals import m2m_changed, post_save, pre_delete from django.utils.encoding import force_text from django.utils.functional import cached_property from django.utils.module_loading import import_string from django.utils.translation import ugettext as _ from mayan.apps.common.class_mixins import AppsModuleLoaderMixin from mayan.apps.common.utils import ( ResolverPipelineModelAttribute, flatten_list, get_class_full_name ) from mayan.apps.views.literals import LIST_MODE_CHOICE_LIST from .exceptions import DynamicSearchException from .literals import ( DEFAULT_SCOPE_ID, DELIMITER, MESSAGE_FEATURE_NO_STATUS, QUERY_PARAMETER_ANY_FIELD, SCOPE_MATCH_ALL, SCOPE_MARKER, SCOPE_OPERATOR_CHOICES, SCOPE_OPERATOR_MARKER, SCOPE_RESULT_MAKER ) from .settings import ( setting_backend, setting_backend_arguments, setting_results_limit ) logger = logging.getLogger(name=__name__) class SearchBackend: _initialized = False _search_field_transformations = {} @staticmethod def _disable(): for search_model in SearchModel.all(): post_save.disconnect( dispatch_uid='search_handler_index_instance', sender=search_model.model ) pre_delete.disconnect( dispatch_uid='search_handler_deindex_instance', sender=search_model.model ) for proxy in search_model.proxies: post_save.disconnect( dispatch_uid='search_handler_index_instance', sender=proxy ) pre_delete.disconnect( dispatch_uid='search_handler_deindex_instance', sender=proxy ) for related_model, path in search_model.get_related_models(): post_save.disconnect( dispatch_uid='search_handler_index_related_instance_{}_{}'.format( get_class_full_name(klass=search_model.model), get_class_full_name(klass=related_model) ), sender=related_model ) pre_delete.disconnect( dispatch_uid='search_handler_index_related_instance_delete_{}_{}'.format( get_class_full_name(klass=search_model.model), get_class_full_name(klass=related_model) ), sender=related_model ) for through_model, data in SearchModel.get_through_models().items(): m2m_changed.disconnect( dispatch_uid='search_handler_index_related_instance_m2m_{}'.format( get_class_full_name(klass=through_model), ), sender=through_model ) @staticmethod def _enable(): # Hidden import. from .handlers import ( handler_deindex_instance, handler_index_instance, handler_factory_index_related_instance_delete, handler_factory_index_related_instance_m2m, handler_factory_index_related_instance_save ) for search_model in SearchModel.all(): post_save.connect( dispatch_uid='search_handler_index_instance', receiver=handler_index_instance, sender=search_model.model ) pre_delete.connect( dispatch_uid='search_handler_deindex_instance', receiver=handler_deindex_instance, sender=search_model.model, weak=False ) for proxy in search_model.proxies: post_save.connect( dispatch_uid='search_handler_index_instance', receiver=handler_index_instance, sender=proxy ) pre_delete.connect( dispatch_uid='search_handler_deindex_instance', receiver=handler_deindex_instance, sender=proxy, weak=False ) for related_model, path in search_model.get_related_models(): post_save.connect( dispatch_uid='search_handler_index_related_instance_{}_{}'.format( get_class_full_name(klass=search_model.model), get_class_full_name(klass=related_model) ), receiver=handler_factory_index_related_instance_save( reverse_field_path=path ), sender=related_model, weak=False ) pre_delete.connect( dispatch_uid='search_handler_index_related_instance_delete_{}_{}'.format( get_class_full_name(klass=search_model.model), get_class_full_name(klass=related_model) ), receiver=handler_factory_index_related_instance_delete( reverse_field_path=path ), sender=related_model, weak=False ) for through_model, data in SearchModel.get_through_models().items(): m2m_changed.connect( dispatch_uid='search_handler_index_related_instance_m2m_{}'.format( get_class_full_name(klass=through_model) ), receiver=handler_factory_index_related_instance_m2m( data=data ), sender=through_model, weak=False ) @staticmethod def get_instance(extra_kwargs=None): kwargs = setting_backend_arguments.value.copy() if extra_kwargs: kwargs.update(extra_kwargs) return import_string(dotted_path=setting_backend.value)( **kwargs ) @staticmethod def limit_queryset(queryset): pk_list = queryset.values('pk')[:setting_results_limit.value] return queryset.filter(pk__in=pk_list) @staticmethod def index_related_instance_m2m( action, instance, model, pk_set, search_model_related_paths ): # Hidden import from .tasks import task_index_instance if action in ('post_add', 'pre_remove'): instance_paths = search_model_related_paths.get(instance._meta.model, ()) model_paths = search_model_related_paths.get(model, ()) if action == 'pre_remove': exclude_kwargs = { 'exclude_app_label': instance._meta.app_label, 'exclude_model_name': instance._meta.model_name, 'exclude_kwargs': {'id': instance.pk} } else: exclude_kwargs = {} for instance_path in instance_paths: result = ResolverPipelineModelAttribute.resolve( attribute=instance_path, obj=instance ) entries = flatten_list(value=result) for entry in entries: task_kwargs = { 'app_label': entry._meta.app_label, 'model_name': entry._meta.model_name, 'object_id': entry.pk } task_kwargs.update(exclude_kwargs) task_index_instance.apply_async( kwargs=task_kwargs ) if action == 'pre_remove': exclude_kwargs = { 'exclude_app_label': model._meta.app_label, 'exclude_model_name': model._meta.model_name, 'exclude_kwargs': {'id__in': pk_set} } else: exclude_kwargs = {} for model_instance in model._meta.default_manager.filter(pk__in=pk_set): for instance_path in model_paths: result = ResolverPipelineModelAttribute.resolve( attribute=instance_path, obj=model_instance ) entries = flatten_list(value=result) for entry in entries: task_kwargs = { 'app_label': entry._meta.app_label, 'model_name': entry._meta.model_name, 'object_id': entry.pk } task_kwargs.update(exclude_kwargs) task_index_instance.apply_async( kwargs=task_kwargs ) def __init__(self, **kwargs): self.kwargs = kwargs def _search(self, global_and_search, query, search_model, user, ignore_limit): raise NotImplementedError def cleanup_query(self, query, search_model): search_field_names = [ search_field.get_full_name() for search_field in search_model.search_fields ] clean_query = {} if QUERY_PARAMETER_ANY_FIELD in query: value = query[QUERY_PARAMETER_ANY_FIELD] if value: clean_query = {key: value for key in search_field_names} else: # Allow only valid search fields for the search model and scoping keys. clean_query = { key: value for key, value in query.items() if key in search_field_names and value } return clean_query def close(self): """ Optional method to terminate a backend instance, such as closing connections. """ def decode_query(self, query, global_and_search=False): # Clean up the query. query.pop('_match_all', None) # Turn scoped query dictionary into a series of unscoped queries. operators = {} result_scope = DEFAULT_SCOPE_ID scope_match_all = False scopes = {} for key, value in query.items(): scope_id = DEFAULT_SCOPE_ID # Check if the entry has a scope marker. if key.startswith(SCOPE_MARKER): # Remove the scope marker. key = key[len(SCOPE_MARKER):] if key.startswith(SCOPE_OPERATOR_MARKER): # Check for operator. # __operator_SCOPE_SCOPE=OPERATOR_SCOPE key = key[len(SCOPE_OPERATOR_MARKER):] operator_scopes = key[len(DELIMITER):].split(DELIMITER) operator_text, result = value.split(DELIMITER) operators[result] = { 'scopes': operator_scopes, 'function': SCOPE_OPERATOR_CHOICES[operator_text], } elif key.startswith(SCOPE_RESULT_MAKER): # Check for result. # __result=SCOPE result_scope = value else: # Check scope match all. # __SCOPE_match_all if key.endswith(SCOPE_MATCH_ALL): scope_id, key = key.split(DELIMITER, 1) scopes.setdefault(scope_id, {}) scope_match_all = value.lower() in ['on', 'true'] scopes[scope_id]['match_all'] = scope_match_all else: # Must be a scoped query. # __SCOPE_QUERY=VALUE scope_id, key = key.split(DELIMITER, 1) scopes.setdefault(scope_id, {}) scopes[scope_id].setdefault('match_all', False) scopes[scope_id].setdefault('query', {}) scopes[scope_id]['query'][key] = value else: scopes.setdefault(scope_id, {}) scopes[scope_id].setdefault('match_all', global_and_search) scopes[scope_id].setdefault('query', {}) scopes[scope_id]['query'][key] = value else: # If query if empty, create an empty scope 0. scopes.setdefault(DEFAULT_SCOPE_ID, {}) scopes[DEFAULT_SCOPE_ID].setdefault('match_all', scope_match_all) scopes[DEFAULT_SCOPE_ID].setdefault('query', {}) return { 'operators': operators, 'result_scope': result_scope, 'scopes': scopes } def deindex_instance(self, instance): """ Optional method to remove an model instance from the search index. """ def get_resolved_field_map(self, search_model): result = {} for search_field in self.get_search_model_fields(search_model=search_model): backend_field_type = self.field_map.get( search_field.field_type ) if backend_field_type: result[search_field.get_full_name()] = backend_field_type else: logger.warning( 'Unknown field type "%s" for model "%s"', search_field.get_full_name(), search_model.get_full_name() ) return result def get_search_field_transformation(self, search_field): if search_field not in self.__class__._search_field_transformations: field_map = self.get_resolved_field_map( search_model=search_field.search_model ) transformation = field_map[search_field.field].get( 'transformation', SearchModel.function_return_same ) self.__class__._search_field_transformations[ search_field ] = transformation return self.__class__._search_field_transformations[search_field] def get_search_model_fields(self, search_model): result = search_model.search_fields.copy() result.append( SearchField(search_model=search_model, field='id', label='ID') ) return result def get_status(self): """ Backend specific method to provide status and statistics information. """ if not hasattr(self, '_get_status'): return MESSAGE_FEATURE_NO_STATUS else: return self._get_status() def index_instance(self, instance, exclude_model=None, exclude_kwargs=None): """ Optional method to add or update an model instance to the search index. """ def index_search_model(self, search_model, range_string=None): """ Optional method to add or update all instance of a model. """ def initialize(self): if not self.__class__._initialized: self.__class__._initialized = True self._initialize() def _initialize(self): """ Optional method to setup the backend. Executed once on every boot up. """ def reset(self, search_model=None): """ Optional method to clear all search indices. """ def search( self, query, search_model, user, global_and_search=False ): AccessControlList = apps.get_model( app_label='acls', model_name='AccessControlList' ) result = self.decode_query( global_and_search=global_and_search, query=query ) # Recursive call to the backend's search using queries as unscoped # and then merge then using the corresponding operator. queryset = self.solve_scope( operators=result['operators'], result_scope=result['result_scope'], search_model=search_model, scopes=result['scopes'], user=user ) if search_model.permission: queryset = AccessControlList.objects.restrict_queryset( permission=search_model.permission, queryset=queryset, user=user ) return SearchBackend.limit_queryset(queryset=queryset) def solve_scope( self, search_model, user, result_scope, scopes, operators ): if len(scopes) > 1: ignore_limit = True else: ignore_limit = False try: # Try scopes. scope = scopes[result_scope] except KeyError: try: # Try operators. operator = operators[result_scope] except KeyError: raise DynamicSearchException( 'Scope `{}` not found.'.format(result_scope) ) else: result = None for scope in operator['scopes']: queryset = self.solve_scope( operators=operators, result_scope=scope, search_model=search_model, scopes=scopes, user=user ) if result is None: result = queryset else: result = operator['function'](result, queryset) return result else: try: query = self.cleanup_query( query=scope['query'], search_model=search_model ) except KeyError: raise DynamicSearchException( 'Scope `{}` does not specify a query.'.format(result_scope) ) else: if query: return self._search( global_and_search=scope['match_all'], ignore_limit=ignore_limit, search_model=search_model, query=query, user=user ) else: return search_model.get_queryset().none() def tear_down(self): """ Optional method to clean up and/or destroy search backend structures like indices. """ def upgrade(self): """ Optional method to upgrade the search backend persistent structures. """ class SearchField: """ Search for terms in fields that directly belong to the parent SearchModel. """ def __init__( self, search_model, field, label=None, transformation_function=None ): self.search_model = search_model self.field = field self._label = label self.transformation_function = transformation_function def __repr__(self): return '<{}: {}>'.format( self.__class__.__name__, self.field ) @cached_property def field_type(self): return self.get_model_field().__class__ def get_full_name(self): return self.field def get_model(self): return self.search_model.model def get_model_field(self): return get_fields_from_path(model=self.get_model(), path=self.field)[-1] @property def label(self): return self._label or self.get_model_field().verbose_name @cached_property def model(self): return self.search_model.model @cached_property def related_model(self): return self.get_model_field().model @cached_property def reverse_path(self): return reverse_field_path(model=self.model, path=self.field)[1] class SearchModel(AppsModuleLoaderMixin): _loader_module_name = 'search' _registry = {} @staticmethod def function_return_same(value): return value @classmethod def all(cls): return sorted( list(set(cls._registry.values())), key=lambda x: x.label ) @classmethod def as_choices(cls): return cls._registry @classmethod def get(cls, name): try: result = cls._registry[name] except KeyError: raise KeyError(_('Unknown search model `%s`.') % name) else: if getattr(result, 'serializer_path', None): result.serializer = import_string( dotted_path=result.serializer_path ) return result @classmethod def get_default(cls): for search_class in cls.all(): if search_class.default: return search_class @classmethod def get_for_model(cls, instance): # Works the same for model classes and model instances. return cls.get(name=instance._meta.label.lower()) @classmethod def get_through_models(cls): through_models = {} for search_model in cls.all(): for related_model, related_path in search_model.get_related_models(): # Check is each related model is connected to a many to many. for field in related_model._meta.get_fields(): if field.many_to_many: try: through_model = field.through except AttributeError: through_model = field.remote_field.through through_models.setdefault(through_model, {}) through_models[through_model].setdefault(related_model, set()) through_models[through_model][related_model].add(related_path) return through_models def __init__( self, app_label, model_name, default=False, label=None, list_mode=None, manager_name=None, permission=None, queryset=None, serializer_path=None ): self.default = default self._label = label self.app_label = app_label self.list_mode = list_mode or LIST_MODE_CHOICE_LIST self.model_name = model_name.lower()
genesets = { 'upregulated': signature[signature[sort_genes_by] > 0].index[:geneset_size], 'downregulated': signature[signature[sort_genes_by] < 0].index[-geneset_size:] } # Submit to Enrichr enrichr_ids = {geneset_label: submit_enrichr_geneset(geneset=geneset, label=signature_label+', '+geneset_label+', from Bulk RNA-seq Appyter') for geneset_label, geneset in genesets.items()} enrichr_ids['signature_label'] = signature_label return enrichr_ids def get_enrichr_results(user_list_id, gene_set_libraries, overlappingGenes=True, geneset=None): ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/enrich' query_string = '?userListId=%s&backgroundType=%s' results = [] for gene_set_library, label in gene_set_libraries.items(): response = requests.get( ENRICHR_URL + query_string % (user_list_id, gene_set_library) ) if not response.ok: raise Exception('Error fetching enrichment results') data = json.loads(response.text) resultDataframe = pd.DataFrame(data[gene_set_library], columns=[ 'rank', 'term_name', 'pvalue', 'zscore', 'combined_score', 'overlapping_genes', 'FDR', 'old_pvalue', 'old_FDR']) selectedColumns = ['term_name', 'zscore', 'combined_score', 'pvalue', 'FDR'] if not overlappingGenes else [ 'term_name', 'zscore', 'combined_score', 'FDR', 'pvalue', 'overlapping_genes'] resultDataframe = resultDataframe.loc[:, selectedColumns] resultDataframe['gene_set_library'] = label resultDataframe['log10P'] = -np.log10(resultDataframe['pvalue']) results.append(resultDataframe) concatenatedDataframe = pd.concat(results) if geneset: concatenatedDataframe['geneset'] = geneset return concatenatedDataframe def get_enrichr_results_by_library(enrichr_results, signature_label, plot_type='interactive', library_type='go', version='2018', sort_results_by='pvalue'): # Libraries if library_type == 'go': go_version = str(version) libraries = { 'GO_Biological_Process_'+go_version: 'Gene Ontology Biological Process ('+go_version+' version)', 'GO_Molecular_Function_'+go_version: 'Gene Ontology Molecular Function ('+go_version+' version)', 'GO_Cellular_Component_'+go_version: 'Gene Ontology Cellular Component ('+go_version+' version)' } elif library_type == "pathway": # Libraries libraries = { 'KEGG_2016': 'KEGG Pathways', 'WikiPathways_2016': 'WikiPathways', 'Reactome_2016': 'Reactome Pathways' } # Get Enrichment Results enrichment_results = {geneset: get_enrichr_results(enrichr_results[geneset]['userListId'], gene_set_libraries=libraries, geneset=geneset) for geneset in ['upregulated', 'downregulated']} enrichment_results['signature_label'] = signature_label enrichment_results['plot_type'] = plot_type enrichment_results['sort_results_by'] = sort_results_by # Return return enrichment_results def get_enrichr_result_tables_by_library(enrichr_results, signature_label, library_type='tf'): # Libraries if library_type == 'tf': # Libraries libraries = { 'ChEA_2016': 'A. ChEA (experimentally validated targets)', 'ENCODE_TF_ChIP-seq_2015': 'B. ENCODE (experimentally validated targets)', 'ARCHS4_TFs_Coexp': 'C. ARCHS4 (coexpressed genes)' } elif library_type == "ke": # Libraries libraries = { 'KEA_2015': 'A. KEA (experimentally validated targets)', 'ARCHS4_Kinases_Coexp': 'B. ARCHS4 (coexpressed genes)' } elif library_type == "mirna": libraries = { 'TargetScan_microRNA_2017': 'A. TargetScan (experimentally validated targets)', 'miRTarBase_2017': 'B. miRTarBase (experimentally validated targets)' } # Initialize results results = [] # Loop through genesets for geneset in ['upregulated', 'downregulated']: # Append ChEA results enrichment_dataframe = get_enrichr_results(enrichr_results[geneset]['userListId'], gene_set_libraries=libraries, geneset=geneset) results.append(enrichment_dataframe) # Concatenate results enrichment_dataframe = pd.concat(results) return {'enrichment_dataframe': enrichment_dataframe, 'signature_label': signature_label} def plot_library_barchart(enrichr_results, gene_set_library, signature_label, case_name, sort_results_by='pvalue', nr_genesets=15, height=400, plot_type='interactive'): sort_results_by = 'log10P' if sort_results_by == 'pvalue' else 'combined_score' fig = tools.make_subplots(rows=1, cols=2, print_grid=False) for i, geneset in enumerate(['upregulated', 'downregulated']): # Get dataframe enrichment_dataframe = enrichr_results[geneset] plot_dataframe = enrichment_dataframe[enrichment_dataframe['gene_set_library'] == gene_set_library].sort_values(sort_results_by, ascending=False).iloc[:nr_genesets].iloc[::-1] # Format n = 7 plot_dataframe['nr_genes'] = [len(genes) for genes in plot_dataframe['overlapping_genes']] plot_dataframe['overlapping_genes'] = ['<br>'.join([', '.join(genes[i:i+n]) for i in range(0, len(genes), n)]) for genes in plot_dataframe['overlapping_genes']] # Get Bar bar = go.Bar( x=plot_dataframe[sort_results_by], y=plot_dataframe['term_name'], orientation='h', name=geneset.title(), showlegend=False, hovertext=['<b>{term_name}</b><br><b>P-value</b>: <i>{pvalue:.2}</i><br><b>FDR</b>: <i>{FDR:.2}</i><br><b>Z-score</b>: <i>{zscore:.3}</i><br><b>Combined score</b>: <i>{combined_score:.3}</i><br><b>{nr_genes} Genes</b>: <i>{overlapping_genes}</i><br>'.format(**rowData) for index, rowData in plot_dataframe.iterrows()], hoverinfo='text', marker={'color': '#FA8072' if geneset == 'upregulated' else '#87CEFA'} ) fig.append_trace(bar, 1, i+1) # Get text text = go.Scatter( x=[max(bar['x'])/50 for x in range(len(bar['y']))], y=bar['y'], mode='text', hoverinfo='none', showlegend=False, text=['*<b>{}</b>'.format(rowData['term_name']) if rowData['FDR'] < 0.1 else '{}'.format( rowData['term_name']) for index, rowData in plot_dataframe.iterrows()], textposition="middle right", textfont={'color': 'black'} ) fig.append_trace(text, 1, i+1) # Get annotations annotations = [ {'x': 0.25, 'y': 1.06, 'text': '<span style="color: #FA8072; font-size: 10pt; font-weight: 600;">Up-regulated in ' + case_name+'</span>', 'showarrow': False, 'xref': 'paper', 'yref': 'paper', 'xanchor': 'center'}, {'x': 0.75, 'y': 1.06, 'text': '<span style="color: #87CEFA; font-size: 10pt; font-weight: 600;">Down-regulated in ' + case_name+'</span>', 'showarrow': False, 'xref': 'paper', 'yref': 'paper', 'xanchor': 'center'} ] if signature_label else [] # Get title title = signature_label + ' | ' + gene_set_library fig['layout'].update(height=height, title='<b>{}</b>'.format(title), hovermode='closest', annotations=annotations) fig['layout']['xaxis1'].update(domain=[0, 0.49], title='-log10P' if sort_results_by == 'log10P' else 'Enrichment score') fig['layout']['xaxis2'].update(domain=[0.51, 1], title='-log10P' if sort_results_by == 'log10P' else 'Enrichment score') fig['layout']['yaxis1'].update(showticklabels=False) fig['layout']['yaxis2'].update(showticklabels=False) fig['layout']['margin'].update(l=0, t=65, r=0, b=35) if plot_type=='interactive': plotly.offline.iplot(fig) else: py.image.ishow(fig) def results_table(enrichment_dataframe, source_label, target_label, table_counter): # Get libraries for gene_set_library in enrichment_dataframe['gene_set_library'].unique(): # Get subset enrichment_dataframe_subset = enrichment_dataframe[enrichment_dataframe['gene_set_library'] == gene_set_library].copy() # Get unique values from source column enrichment_dataframe_subset[source_label] = [x.split('_')[0] for x in enrichment_dataframe_subset['term_name']] enrichment_dataframe_subset = enrichment_dataframe_subset.sort_values(['FDR', 'pvalue']).rename(columns={'pvalue': 'P-value'}).drop_duplicates(source_label) # Add links and bold for significant results # if " " in enrichment_dataframe_subset[source_label][0]: enrichment_dataframe_subset[source_label] = ['<a href="http://www.mirbase.org/cgi-bin/query.pl?terms={}" target="_blank">{}</a>'.format(x.split(" ")[0], x) if '-miR-' in x else '<a href="http://amp.pharm.mssm.edu/Harmonizome/gene/{}" target="_blank">{}</a>'.format(x.split(" ")[0], x)for x in enrichment_dataframe_subset[source_label]] # else: # enrichment_dataframe_subset[source_label] = ['<a href="http://www.mirbase.org/cgi-bin/query.pl?terms={x}" target="_blank">{x}</a>'.format(**locals()) if '-miR-' in x else '<a href="http://amp.pharm.mssm.edu/Harmonizome/gene/{x}" target="_blank">{x}</a>'.format(**locals())for x in enrichment_dataframe_subset[source_label]] enrichment_dataframe_subset[source_label] = [rowData[source_label].replace('target="_blank">', 'target="_blank"><b>').replace('</a>', '*</b></a>') if rowData['FDR'] < 0.05 else rowData[source_label] for index, rowData in enrichment_dataframe_subset.iterrows()] # Add rank enrichment_dataframe_subset['Rank'] = ['<b>'+str(x+1)+'</b>' for x in range(len(enrichment_dataframe_subset.index))] # Add overlapping genes with tooltip enrichment_dataframe_subset['nr_overlapping_genes'] = [len(x) for x in enrichment_dataframe_subset['overlapping_genes']] enrichment_dataframe_subset['overlapping_genes'] = [', '.join(x) for x in enrichment_dataframe_subset['overlapping_genes']] enrichment_dataframe_subset[target_label.title()] = ['{nr_overlapping_genes} {geneset} '.format(**rowData)+target_label+'s' for index, rowData in enrichment_dataframe_subset.iterrows()] # enrichment_dataframe[target_label.title()] = ['<span class="gene-tooltip">{nr_overlapping_genes} {geneset} '.format(**rowData)+target_label+'s<div class="gene-tooltip-text">{overlapping_genes}</div></span>'.format(**rowData) for index, rowData in enrichment_dataframe.iterrows()] # Convert to HTML pd.set_option('max.colwidth', -1) html_table = enrichment_dataframe_subset.head(50)[['Rank', source_label, 'P-value', 'FDR', target_label.title()]].to_html(escape=False, index=False, classes='w-100') html_results = '<div style="max-height: 200px; overflow-y: scroll;">{}</div>'.format(html_table) # Add CSS display(HTML('<style>.w-100{width: 100%;} .text-left th{text-align: left !important;}</style>')) display(HTML('<style>.slick-cell{overflow: visible;}.gene-tooltip{text-decoration: underline; text-decoration-style: dotted;}.gene-tooltip .gene-tooltip-text{visibility: hidden; position: absolute; left: 60%; width: 250px; z-index: 1000; text-align: center; background-color: black; color: white; padding: 5px 10px; border-radius: 5px;} .gene-tooltip:hover .gene-tooltip-text{visibility: visible;} .gene-tooltip .gene-tooltip-text::after {content: " ";position: absolute;bottom: 100%;left: 50%;margin-left: -5px;border-width: 5px;border-style: solid;border-color: transparent transparent black transparent;}</style>')) # Display table display(HTML(html_results)) # Display gene set # display_object(table_counter, gene_set_library, istable=True) if source_label == "Transcription Factor": additional_description = ". The table contains scrollable tables displaying the results of the Transcription Factor (TF) enrichment analysis generated using Enrichr. Every row represents a TF; significant TFs are highlighted in bold." elif source_label == "Kinase": additional_description = ". The table contains browsable tables displaying the results of the Protein Kinase (PK) enrichment analysis generated using Enrichr. Every row represents a PK; significant PKs are highlighted in bold." elif source_label == "miRNA": additional_description = ". The figure contains browsable tables displaying the results of the miRNA enrichment analysis generated using Enrichr. Every row represents a miRNA; significant miRNAs are highlighted in bold." display_object(table_counter, gene_set_library+additional_description, istable=True) display(create_download_link(enrichment_dataframe_subset, filename="Enrichment_analysis_{}_{}.csv".format(source_label, gene_set_library))) table_counter += 1 return table_counter def display_table(analysis_results, source_label, table_counter): # Plot Table return results_table(analysis_results['enrichment_dataframe'].copy(), source_label=source_label, target_label='target', table_counter=table_counter) def CPM(data): with warnings.catch_warnings(): warnings.simplefilter("ignore") data = (data/data.sum())*10**6 data = data.fillna(0) return data def run_monocle(dataset, color_by='Pseudotime', ordering='de', plot_type='interactive'): robjects.r(''' suppressMessages(library(dplyr)) suppressMessages(library(monocle)) suppressMessages(library(tibble)) suppressMessages(require(Matrix)) suppressMessages(require(VGAM)) suppressMessages(require(igraph)) # Make a CellDataSet object # @expr_df: CPM expression data.frame (genes by samples) makeCellData <- function(expr_df) { genes <- rownames(expr_df) expr_mat = data.matrix(expr_df) num_cells_expressed <- (expr_mat > 0.1) + 0 num_cells_expressed <- Matrix::rowSums(num_cells_expressed) fd <- data.frame(num_cells_expressed=num_cells_expressed, row.names = genes) fd <- new("AnnotatedDataFrame", data = fd) pd <- new("AnnotatedDataFrame", data = data.frame(row.names=colnames(expr_mat))) newCellDataSet(expr_mat, phenoData = pd, featureData = fd, lowerDetectionLimit = 0.1, expressionFamily = VGAM::tobit(0.1)) } makeCellData3 <- function(expr_df) { genes <- rownames(expr_df) expr_mat = data.matrix(expr_df) num_cells_expressed <- (expr_mat > 0.1) + 0 num_cells_expressed <- Matrix::rowSums(num_cells_expressed) fd <- data.frame(num_cells_expressed=num_cells_expressed, row.names = genes) fd <- new("AnnotatedDataFrame", data = fd) pd <- data.frame(row.names=colnames(expr_mat)) # a hack to avoid error when running `partitionCells` pd['foo'] = 'bar' pd <- new("AnnotatedDataFrame", data = pd) newCellDataSet(expr_mat, phenoData = pd, featureData = fd, lowerDetectionLimit = 0.1) } getDEGsAsOrderingGenes <- function(cds){ # get DEGs among clusters cds_expressed_genes <- row.names(subset(fData(cds), num_cells_expressed >= 10)) clustering_DEG_genes <- differentialGeneTest(cds[cds_expressed_genes,], fullModelFormulaStr = '~Cluster', cores = 8) # order cells with top 1000 DEGs cds_ordering_genes <- row.names(clustering_DEG_genes)[order(clustering_DEG_genes$qval)][1:1000] cds_ordering_genes } getHighVarGenesAsOrderingGenes <- function(cds){ # Use genes with highest variance as ordering genes RowVar <- function(x, ...) { # from https://stackoverflow.com/questions/25099825/row-wise-variance-of-a-matrix-in-r rowSums((x - rowMeans(x, ...))^2, ...)/(dim(x)[2] - 1) } # use genes with high variances for ordering cell gene_variances <- RowVar(exprs(cds)) cds_ordering_genes <- names(gene_variances[order(gene_variances, decreasing = T)])[1:1000] cds_ordering_genes } # Run the entire Monocle-DDRTree pipeline to # 1) clustering # 2) identify DEGs across clusters # 3) ordering cells/psudotime estimation runMonocleDDRTree <- function(cds, ordering = "de") { # tSNE and clustering cells cds <- reduceDimension(cds, max_components = 2, norm_method = 'log', reduction_method = 'tSNE', perplexity = 5, verbose = T) n_cells <- as.numeric(dim(cds)[2]) k <- 50 # default k for louvain clustering if (n_cells < 52){ k <- n_cells - 2 } cds <- clusterCells(cds, method="louvain", k=k, verbose = T) n_clusters <- length(unique(cds$Cluster)) if (n_clusters > 1 && ordering
# -*- coding: utf-8 -*- # pylint: disable=too-many-lines import heapq from binascii import hexlify from collections import namedtuple from raiden.encoding.signing import recover_publickey from raiden.transfer.architecture import TransitionResult from raiden.transfer.balance_proof import signing_data from raiden.transfer.events import ( ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer, ) from raiden.transfer.mediated_transfer.state import LockedTransferUnsignedState from raiden.transfer.mediated_transfer.events import ( refund_from_sendmediated, SendBalanceProof, SendMediatedTransfer, ) from raiden.transfer.merkle_tree import ( LEAVES, merkleroot, compute_layers, compute_merkleproof_for, ) from raiden.transfer.state import ( CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState, ) from raiden.transfer.state_change import ( ActionChannelClose, ActionTransferDirect, Block, ContractReceiveChannelClosed, ContractReceiveChannelNewBalance, ContractReceiveChannelSettled, ContractReceiveChannelWithdraw, ReceiveTransferDirect, ) from raiden.utils import publickey_to_address, typing from raiden.settings import DEFAULT_NUMBER_OF_CONFIRMATIONS_BLOCK TransactionOrder = namedtuple( 'TransactionOrder', ('block_number', 'transaction') ) def is_known(end_state, hashlock): """True if the `hashlock` corresponds to a known lock.""" return ( hashlock in end_state.hashlocks_to_pendinglocks or hashlock in end_state.hashlocks_to_unclaimedlocks ) def is_deposit_confirmed(channel_state, block_number): if not channel_state.deposit_transaction_queue: return False return is_transaction_confirmed( channel_state.deposit_transaction_queue[0].block_number, block_number, ) def is_locked(end_state, hashlock): """True if the `hashlock` is known and the correspoding secret is not.""" return hashlock in end_state.hashlocks_to_pendinglocks def is_secret_known(end_state, hashlock): """True if the `hashlock` is for a lock with a known secret.""" return hashlock in end_state.hashlocks_to_unclaimedlocks def is_transaction_confirmed(transaction_block_number, blockchain_block_number): confirmation_block = transaction_block_number + DEFAULT_NUMBER_OF_CONFIRMATIONS_BLOCK return blockchain_block_number > confirmation_block def is_valid_signature(balance_proof, sender_address): data_that_was_signed = signing_data( balance_proof.nonce, balance_proof.transferred_amount, balance_proof.channel_address, balance_proof.locksroot, balance_proof.message_hash, ) try: # ValueError is raised if the PublicKey instantiation failed, let it # propagate because it's a memory pressure problem publickey = recover_publickey( data_that_was_signed, balance_proof.signature, ) except Exception: # pylint: disable=broad-except # secp256k1 is using bare Exception classes # raised if the recovery failed msg = 'Signature invalid, could not be recovered.' return (False, msg) is_correct_sender = sender_address == publickey_to_address(publickey) if is_correct_sender: return (True, None) msg = 'Signature was valid but the expected address does not match.' return (False, msg) def is_valid_directtransfer(direct_transfer, channel_state, sender_state, receiver_state): received_balance_proof = direct_transfer.balance_proof current_balance_proof = get_current_balanceproof(sender_state) current_locksroot, _, current_transferred_amount = current_balance_proof distributable = get_distributable(sender_state, receiver_state) expected_nonce = get_next_nonce(sender_state) amount = received_balance_proof.transferred_amount - current_transferred_amount is_valid, signature_msg = is_valid_signature( received_balance_proof, sender_state.address, ) if get_status(channel_state) != CHANNEL_STATE_OPENED: msg = 'Invalid direct message. The channel is already closed.' result = (False, msg) elif not is_valid: # The signature must be valid, otherwise the balance proof cannot be # used onchain. msg = 'Invalid DirectTransfer message. {}'.format(signature_msg) result = (False, msg) elif received_balance_proof.nonce != expected_nonce: # The nonces must increase sequentially, otherwise there is a # synchronization problem. msg = ( 'Invalid DirectTransfer message. ' 'Nonce did not change sequentially, expected: {} got: {}.' ).format( expected_nonce, received_balance_proof.nonce, ) result = (False, msg) elif received_balance_proof.locksroot != current_locksroot: # Direct transfers do not use hash time lock, so it cannot change the # locksroot, otherwise a lock could be removed. msg = ( "Invalid DirectTransfer message. " "Balance proof's locksroot changed, expected: {} got: {}." ).format( hexlify(current_locksroot).decode(), hexlify(received_balance_proof.locksroot).decode(), ) result = (False, msg) elif received_balance_proof.transferred_amount <= current_transferred_amount: # Direct transfers must increase the transferred_amount, otherwise the # sender is trying to play the protocol and steal token. msg = ( "Invalid DirectTransfer message. " "Balance proof's transferred_amount decreased, expected larger than: {} got: {}." ).format( current_transferred_amount, received_balance_proof.transferred_amount, ) result = (False, msg) elif received_balance_proof.channel_address != channel_state.identifier: # The balance proof must be tied to this channel, otherwise the # on-chain contract would be sucesstible to replay attacks across # channels. msg = ( 'Invalid DirectTransfer message. ' 'Balance proof is tied to the wrong channel, expected: {} got: {}' ).format( hexlify(channel_state.identifier).decode(), hexlify(received_balance_proof.channel_address).decode(), ) result = (False, msg) elif amount > distributable: # Direct transfer are limited to the current available balance, # otherwise the sender is doing a trying to play the protocol and do a # double spend. msg = ( 'Invalid DirectTransfer message. ' 'Transfer amount larger than the available distributable, ' 'transfer amount: {} maximum distributable: {}' ).format( amount, distributable, ) result = (False, msg) else: result = (True, None) return result def is_valid_mediatedtransfer(mediated_transfer, channel_state, sender_state, receiver_state): received_balance_proof = mediated_transfer.balance_proof current_balance_proof = get_current_balanceproof(sender_state) _, _, current_transferred_amount = current_balance_proof distributable = get_distributable(sender_state, receiver_state) expected_nonce = get_next_nonce(sender_state) lock = mediated_transfer.lock merkletree = compute_merkletree_with(sender_state.merkletree, lock.lockhash) if get_status(channel_state) != CHANNEL_STATE_OPENED: msg = 'Invalid direct message. The channel is already closed.' result = (False, msg, None) if merkletree is None: msg = 'Invalid MediatedTransfer message. Same lockhash handled twice.' result = (False, msg, None) else: locksroot_with_lock = merkleroot(merkletree) (is_valid, signature_msg) = is_valid_signature( received_balance_proof, sender_state.address, ) if not is_valid: # The signature must be valid, otherwise the balance proof cannot be # used onchain msg = 'Invalid MediatedTransfer message. {}'.format(signature_msg) result = (False, msg, None) elif received_balance_proof.nonce != expected_nonce: # The nonces must increase sequentially, otherwise there is a # synchronization problem msg = ( 'Invalid MediatedTransfer message. ' 'Nonce did not change sequentially, expected: {} got: {}.' ).format( expected_nonce, received_balance_proof.nonce, ) result = (False, msg, None) elif received_balance_proof.locksroot != locksroot_with_lock: # The locksroot must be updated to include the new lock msg = ( "Invalid MediatedTransfer message. " "Balance proof's locksroot didn't match, expected: {} got: {}." ).format( hexlify(locksroot_with_lock).decode(), hexlify(received_balance_proof.locksroot).decode(), ) result = (False, msg, None) elif received_balance_proof.transferred_amount != current_transferred_amount: # Mediated transfers must not change transferred_amount msg = ( "Invalid MediatedTransfer message. " "Balance proof's transferred_amount changed, expected: {} got: {}." ).format( current_transferred_amount, received_balance_proof.transferred_amount, ) result = (False, msg, None) elif received_balance_proof.channel_address != channel_state.identifier: # The balance proof must be tied to this channel, otherwise the # on-chain contract would be sucesstible to replay attacks across # channels. msg = ( 'Invalid MediatedTransfer message. ' 'Balance proof is tied to the wrong channel, expected: {} got: {}' ).format( hexlify(channel_state.identifier).decode(), hexlify(received_balance_proof.channel_address).decode(), ) result = (False, msg, None) # the locked amount is limited to the current available balance, otherwise # the sender is doing a trying to play the protocol and do a double spend elif lock.amount > distributable: msg = ( 'Invalid MediatedTransfer message. ' 'Lock amount larger than the available distributable, ' 'lock amount: {} maximum distributable: {}' ).format( lock.amount, distributable, ) result = (False, msg, None) else: result = (True, None, merkletree) return result def is_valid_unlock(unlock, channel_state, sender_state): received_balance_proof = unlock.balance_proof current_balance_proof = get_current_balanceproof(sender_state) lock = get_lock(sender_state, unlock.hashlock) if lock is not None: new_merkletree = compute_merkletree_without(sender_state.merkletree, lock.lockhash) locksroot_without_lock = merkleroot(new_merkletree) _, _, current_transferred_amount = current_balance_proof expected_nonce = get_next_nonce(sender_state) expected_transferred_amount = current_transferred_amount + lock.amount is_valid, signature_msg = is_valid_signature( received_balance_proof, sender_state.address, ) # TODO: Accept unlock messages if the node has not yet sent a transaction # with the balance proof to the blockchain, this will save one call to # withdraw on-chain for the non-closing party. if get_status(channel_state) != CHANNEL_STATE_OPENED: msg = 'Invalid Unlock message for {}. The channel is already closed.'.format( hexlify(unlock.hashlock).decode(), ) result = (False, msg, None) elif lock is None: msg = 'Invalid Secret message. There is no correspoding lock for {}'.format( hexlify(unlock.hashlock).decode(), ) result = (False, msg, None) elif not is_valid: # The signature must be valid, otherwise the balance proof cannot be # used onchain. msg = 'Invalid Secret message. {}'.format(signature_msg) result = (False, msg, None) elif received_balance_proof.nonce != expected_nonce: # The nonces must increase sequentially, otherwise there is a # synchronization problem. msg = ( 'Invalid Secret message. ' 'Nonce did not change sequentially, expected: {} got: {}.' ).format( expected_nonce, received_balance_proof.nonce, ) result = (False, msg, None) elif received_balance_proof.locksroot != locksroot_without_lock: # Secret messages remove a known lock, the new locksroot must have only # that lock removed, otherwise the sender may be trying to remove # additional locks. msg = ( "Invalid Secret message. " "Balance proof's locksroot didn't match, expected: {} got: {}." ).format( hexlify(locksroot_without_lock).decode(), hexlify(received_balance_proof.locksroot).decode(), ) result = (False, msg, None) elif received_balance_proof.transferred_amount != expected_transferred_amount: # Secret messages must increase the transferred_amount by lock amount, # otherwise the sender is trying to play the protocol and steal token. msg = ( "Invalid Secret message. " "Balance proof's wrong transferred_amount, expected: {} got: {}." ).format( expected_transferred_amount, received_balance_proof.transferred_amount, ) result = (False, msg, None) elif received_balance_proof.channel_address != channel_state.identifier: # The balance proof must be tied to this channel, otherwise the # on-chain contract would be sucesstible
self.container47['pady'] = 8 self.container47.pack() self.lblc63_1_1 = Label(self.container47) self.lblc63_1_1['text'] = ('63 - Agente Etiológico - RT-PCR/outro método por Biologia Molecular ' + 'Positivo para Influenza? (1 - Sim, 2 - Não, 9 - Ignorado): ') self.lblc63_1_1.pack(side=LEFT) self.txtc63_1_1 = Entry(self.container47) self.txtc63_1_1['width'] = 2 self.txtc63_1_1.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_1_1.get()) self.lblc63_1_2 = Label(self.container47) self.lblc63_1_2['text'] = 'Se sim, qual influenza? (1 - Influenza A, 2 - Influenza B):' self.lblc63_1_2.pack(side=LEFT) self.txtc63_1_2 = Entry(self.container47) self.txtc63_1_2['width'] = 2 self.txtc63_1_2.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_1_2.get()) self.container48 = Frame(janela_dados_laboratoriais) self.container48['padx'] = 10 self.container48['pady'] = 8 self.container48.pack() self.lblc63_2 = Label(self.container48) self.lblc63_2['text'] = ('Influenza A, qual subtipo? ' + '(1 - Influenza A(H1N1)pdm09, 2 - Influenza A/H3N2, ' + '3 - Influenza A não subtipado, 4 - Influenza A não subtipável, ' + '5 - Inconclusivo, 6 - Outro, especifique):') self.lblc63_2.pack(side=LEFT) self.txtc63_2 = Entry(self.container48) self.txtc63_2['width'] = 10 self.txtc63_2.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_2.get()) self.container50 = Frame(janela_dados_laboratoriais) self.container50['padx'] = 10 self.container50['pady'] = 8 self.container50.pack() self.lblc63_3 = Label(self.container50) self.lblc63_3['text'] = ('Influenza B, qual subtipo? ' + '(1 - Victoria, 2 - Yamagatha, 3 - Não realizado, 4 - Inconclusivo, 5 - Outro, especifique):' ) self.lblc63_3.pack(side=LEFT) self.txtc63_3 = Entry(self.container50) self.txtc63_3['width'] = 10 self.txtc63_3.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_3.get()) self.lblc63_4 = Label(self.container50) self.lblc63_4['text'] = 'Positivo para outro vírus? (1 - Sim, 2 - Não, 9 - Ignorado):' self.lblc63_4.pack(side=LEFT) self.txtc63_4 = Entry(self.container50) self.txtc63_4['width'] = 2 self.txtc63_4.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_4.get()) self.container51 = Frame(janela_dados_laboratoriais) self.container51['padx'] = 10 self.container51['pady'] = 8 self.container51.pack() self.lblc63_5_titulo = Label(self.container51) self.lblc63_5_titulo['text'] = ('Se outros vírus respiratórios, qual (is)? (<NAME>): ') self.lblc63_5_1 = Label(self.container51) self.lblc63_5_1['text'] = 'SARS-CoV-2:' self.lblc63_5_1.pack(side=LEFT) self.txtc63_5_1 = Entry(self.container51) self.txtc63_5_1['width'] = 2 self.txtc63_5_1.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_1.get()) self.lblc63_5_2 = Label(self.container51) self.lblc63_5_2['text'] = 'Vírus Sincicial Respiratório: ' self.lblc63_5_2.pack(side=LEFT) self.txtc63_5_2 = Entry(self.container51) self.txtc63_5_2['width'] = 2 self.txtc63_5_2.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_2.get()) self.lblc63_5_3 = Label(self.container51) self.lblc63_5_3['text'] = 'Parainfluenza 1: ' self.lblc63_5_3.pack(side=LEFT) self.txtc63_5_3 = Entry(self.container51) self.txtc63_5_3['width'] = 2 self.txtc63_5_3.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_3.get()) self.lblc63_5_4 = Label(self.container51) self.lblc63_5_4['text'] = 'Parainfluenza 2: ' self.lblc63_5_4.pack(side=LEFT) self.txtc63_5_4 = Entry(self.container51) self.txtc63_5_4['width'] = 2 self.txtc63_5_4.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_4.get()) self.lblc63_5_5 = Label(self.container51) self.lblc63_5_5['text'] = 'Parainfluenza 3: ' self.lblc63_5_5.pack(side=LEFT) self.txtc63_5_5 = Entry(self.container51) self.txtc63_5_5['width'] = 2 self.txtc63_5_5.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_5.get()) self.lblc63_5_6 = Label(self.container51) self.lblc63_5_6['text'] = 'Parainfluenza 4: ' self.lblc63_5_6.pack(side=LEFT) self.txtc63_5_6 = Entry(self.container51) self.txtc63_5_6['width'] = 2 self.txtc63_5_6.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_6.get()) self.lblc63_5_7 = Label(self.container51) self.lblc63_5_7['text'] = 'Adenovírus: ' self.lblc63_5_7.pack(side=LEFT) self.txtc63_5_7 = Entry(self.container51) self.txtc63_5_7['width'] = 2 self.txtc63_5_7.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_7.get()) self.lblc63_5_8 = Label(self.container51) self.lblc63_5_8['text'] = 'Metapneumovírus: ' self.lblc63_5_8.pack(side=LEFT) self.txtc63_5_8 = Entry(self.container51) self.txtc63_5_8['width'] = 2 self.txtc63_5_8.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_8.get()) self.lblc63_5_9 = Label(self.container51) self.lblc63_5_9['text'] = 'Bocavírus: ' self.lblc63_5_9.pack(side=LEFT) self.txtc63_5_9 = Entry(self.container51) self.txtc63_5_9['width'] = 2 self.txtc63_5_9.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_9.get()) self.lblc63_5_10 = Label(self.container51) self.lblc63_5_10['text'] = 'Rinovírus: ' self.lblc63_5_10.pack(side=LEFT) self.txtc63_5_10 = Entry(self.container51) self.txtc63_5_10['width'] = 2 self.txtc63_5_10.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_10.get()) self.lblc63_5_11 = Label(self.container51) self.lblc63_5_11['text'] = 'Outro vírus respiratório, especifique: ' self.lblc63_5_11.pack(side=LEFT) self.txtc63_5_11 = Entry(self.container51) self.txtc63_5_11['width'] = 10 self.txtc63_5_11.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc63_5_11.get()) self.container52 = Frame(janela_dados_laboratoriais) self.container52['padx'] = 10 self.container52['pady'] = 8 self.container52.pack() self.lblc64_1 = Label(self.container52) self.lblc64_1['text'] = '64 - Laboratório que realizou RT-PCR/outro método por biologia molecular: ' self.lblc64_1.pack(side=LEFT) self.txtc64_1 = Entry(self.container52) self.txtc64_1['width'] = 25 self.txtc64_1.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc64_1.get()) self.lblc64_2 = Label(self.container52) self.lblc64_2['text'] = 'Código (CNES): ' self.lblc64_2.pack(side=LEFT) self.txtc64_2 = Entry(self.container52) self.txtc64_2['width'] = 15 self.txtc64_2.pack(side=LEFT) self.dados_laboratoriais.append(self.txtc64_2.get()) self.container_extra7 = Frame(janela_dados_laboratoriais) self.container_extra7["padx"] = 20 self.container_extra7["pady"] = 8 self.container_extra7.pack() self.proxima_janela = Button(self.container_extra7) self.proxima_janela['text'] = 'Clique para ir para a próxima janela' self.proxima_janela['command'] = self.janela_Conclusao self.proxima_janela.pack() def janela_Conclusao(self): janela_dados_conclusao = Toplevel(root) janela_dados_conclusao.title("Sivep Ficha") self.dados_conclusao = [] self.container53 = Frame(janela_dados_conclusao) self.container53['padx'] = 10 self.container53['pady'] = 8 self.container53.pack() self.lbl_titulo = Label(self.container53) self.lbl_titulo['text'] = 'Conclusão' self.lbl_titulo['font'] = self.fonte_titulo self.lbl_titulo.pack() self.container54 = Frame(janela_dados_conclusao) self.container54['padx'] = 10 self.container54['pady'] = 8 self.container54.pack() self.lblc65 = Label(self.container54) self.lblc65['text'] = ('65 - Classificação final do caso) ' + '(1 - SRAG por influenza, 2 - SRAG por vírus respiratório, ' + '3 - SRAG por outro agente etiológico (especifique), ' + '4 - SRAG não especificado, 5 - COVID-19): ') self.lblc65.pack(side=LEFT) self.txtc65 = Entry(self.container54) self.txtc65['width'] = 15 self.txtc65.pack(side=LEFT) self.dados_conclusao.append(self.txtc65.get()) self.container55 = Frame(janela_dados_conclusao) self.container55['padx'] = 10 self.container55['pady'] = 8 self.container55.pack() self.lblc66 = Label(self.container55) self.lblc66['text'] = ('66 - Critério de Encerramento: ' + '(1 - Laboratorial, 2 - Vínculo-Epidemiológico, 3 - Clínico): ') self.lblc66.pack(side=LEFT) self.txtc66 = Entry(self.container55) self.txtc66['width'] = 2 self.txtc66.pack(side=LEFT) self.dados_conclusao.append(self.txtc66.get()) self.lblc67 = Label(self.container55) self.lblc67['text'] = '67 - Evolução do caso: (1 - Cura, 2 - Óbito, 3 - Ignorado): ' self.lblc67.pack(side=LEFT) self.txtc67 = Entry(self.container55) self.txtc67['width'] = 2 self.txtc67.pack(side=LEFT) self.dados_conclusao.append(self.txtc67.get()) self.container56 = Frame(janela_dados_conclusao) self.container56['padx'] = 10 self.container56['pady'] = 8 self.container56.pack() self.lblc68 = Label(self.container56) self.lblc68['text'] = 'Data de alta ou óbito' self.lblc68.pack(side=LEFT) self.txtc68 = Entry(self.container56) self.txtc68['width'] = 8 self.txtc68.pack(side=LEFT) self.dados_conclusao.append(self.txtc68.get()) self.lblc69 = Label(self.container56) self.lblc69['text'] = 'Data do encerramento' self.lblc69.pack(side=LEFT) self.txtc69 = Entry(self.container56) self.txtc69['width'] = 8 self.txtc69.pack(side=LEFT) self.dados_conclusao.append(self.txtc69.get()) self.container57 = Frame(janela_dados_conclusao) self.container57['padx'] = 10 self.container57['pady'] = 8 self.container57.pack() self.lblc70 = Label(self.container57) self.lblc70['text'] = 'Observações' self.lblc70.pack(side=LEFT) self.txtc70 = Entry(self.container57) self.txtc70.pack(side=LEFT) self.dados_conclusao.append(self.txtc70.get()) self.container_extra8 = Frame(janela_dados_conclusao) self.container_extra8["padx"] = 20 self.container_extra8["pady"] = 8 self.container_extra8.pack() self.cadastrar = Button(self.container_extra8) self.cadastrar['text'] = 'Seguir para confirmação?' self.cadastrar['command'] = self.janela_confirma self.cadastrar.pack() def janela_confirma(self): janela_final_final = Toplevel(root) janela_final_final.title('Confirimação') self.cont_1_fim = Frame(janela_final_final) self.cont_1_fim.pack() self.lblfimdofim = Label(self.cont_1_fim, text="Deseja concluir o cadastro?") self.lblfimdofim.pack() self.cont_2_fim = Frame(janela_final_final) self.cont_2_fim.pack() self.cont_3_fim = Frame(janela_final_final) self.cont_3_fim.pack() self.btn_sim = Button(self.cont_3_fim) self.btn_sim['text'] = 'Sim' self.btn_sim["command"] = self.cadastraFicha self.btn_sim.pack(side=RIGHT) self.btn_nao = Button(self.cont_2_fim) self.btn_nao['text'] = 'Não' self.btn_nao["command"] = print('QUER SIM') self.btn_nao.pack(side=LEFT) self.container_4_fim = Frame(janela_final_final) self.container_4_fim["padx"] = 20 self.container_4_fim["pady"] = 8 self.container_4_fim.pack() self.lblmsg = Label(self.container_4_fim) self.lblmsg["text"] = " " self.lblmsg["font"] = ("Verdana", "10", "bold") self.lblmsg.pack() def cadastrarUsuario(self): user = Usuarios() user.usuario = self.txtusuario.get() user.senha = self.txtsenha.get() user.nome = self.txtnome.get() user.crm = self.txtcrm.get() self.lblmsg["text"] = user.insertUser() self.txtusuario.delete(0, END) self.txtsenha.delete(0, END) self.txtnome.delete(0, END) self.txtcrm.delete(0, END) def alterarUsuario(self): user = Usuarios() user.usuario = self.txtusuario.get() user.senha = self.txtsenha.get() user.nome = self.txtnome.get() user.crm = self.txtcrm.get() self.lblmsg["text"] = user.updateUser() self.txtusuario.delete(0, END) self.txtsenha.delete(0, END) self.txtnome.delete(0, END) self.txtcrm.delete(0, END) def excluirUsuario(self): user = Usuarios() user.usuario = self.txtusuario.get() user.senha = self.tx<PASSWORD>ha.get() user.nome = self.txtnome.get() user.crm = self.txtcrm.get() self.lblmsg["text"] = user.deleteUser() self.txtusuario.delete(0, END) self.txtsenha.delete(0, END) self.txtnome.delete(0, END) self.txtcrm.delete(0, END) def buscarUsuario(self): user = Usuarios() user.usuario = self.txtusuario.get() self.lblmsg["text"] = user.selectUser() self.txtusuario.delete(0, END) self.txtusuario.insert(INSERT, user.usuario) self.txtsenha.delete(0, END) # self.txtsenha.insert(INSERT, user.senha) self.txtnome.delete(0, END) self.txtnome.insert(INSERT, user.nome) self.txtcrm.delete(0, END) self.txtcrm.insert(INSERT, user.crm) def efetuaLogin(self): user = Usuarios() user.usuario = self.txtusuario.get() user.senha = self.txtsenha.get() self.lblmsg["text"] = user.verifyLogin() self.txtusuario.delete(0, END) self.txtsenha.delete(0, END) self.txtnome.delete(0, END) self.txtcrm.delete(0, END) if self.lblmsg["text"] == "Login bem sucedido!": self.janelaMenu() else: return def janelaMenu(self): janela_menu = Toplevel(root) janela_menu.title("Menu Principal") self.container_extra9 = Frame(janela_menu) self.container_extra9["pady"] = 10 self.container_extra9.pack() self.proxima_janela = Button(self.container_extra9) self.proxima_janela['text'] = 'Clique para realizar novo preenchimento de ficha' self.proxima_janela['command'] = self.janela_dadosPaciente self.proxima_janela.pack() self.container_extra10 = Frame(janela_menu) self.container_extra10["pady"] = 10 self.container_extra10.pack() self.proxima_janela = Button(self.container_extra10) self.proxima_janela['text'] = 'Clique para visualizar histograma por idade' self.proxima_janela['command'] = self.plot_histograma_idade self.proxima_janela.pack() self.container_extra11 = Frame(janela_menu) self.container_extra11["pady"] = 10 self.container_extra11.pack() self.proxima_janela = Button(self.container_extra11) self.proxima_janela['text'] = 'Clique para visualizar histograma por sexo' self.proxima_janela['command'] = self.plot_histograma_sexo self.proxima_janela.pack() self.container_extra12 = Frame(janela_menu) self.container_extra12["pady"] = 10 self.container_extra12.pack() self.proxima_janela = Button(self.container_extra12) self.proxima_janela['text'] = 'Clique para visualizar histograma por estado' self.proxima_janela['command'] = self.plot_histograma_estado self.proxima_janela.pack() def plot_histograma_idade(self): self.impsql3 = sqlite3.connect('ficha.db') self.sql3_cursor = self.impsql3.cursor() self.sql3_cursor.execute('SELECT * FROM ficha') with open('ficha.csv','w') as out_csv_file: csv_out = csv.writer(out_csv_file) csv_out.writerow([d[0] for d in self.sql3_cursor.description]) for result in self.sql3_cursor: csv_out.writerow(result) self.impsql3.close() self.df = pd.read_csv('ficha.csv') plt.show(self.df['c10_1'].hist()) def plot_histograma_sexo(self): self.impsql3 = sqlite3.connect('ficha.db') self.sql3_cursor = self.impsql3.cursor() self.sql3_cursor.execute('SELECT * FROM ficha') with open('ficha.csv','w') as out_csv_file: csv_out = csv.writer(out_csv_file) csv_out.writerow([d[0] for d in self.sql3_cursor.description]) for result in self.sql3_cursor: csv_out.writerow(result) self.impsql3.close() self.df = pd.read_csv('ficha.csv') plt.show(self.df['c8'].hist()) def plot_histograma_estado(self): self.impsql3 = sqlite3.connect('ficha.db') self.sql3_cursor = self.impsql3.cursor() self.sql3_cursor.execute('SELECT * FROM ficha') with open('ficha.csv','w') as out_csv_file: csv_out = csv.writer(out_csv_file) csv_out.writerow([d[0] for d in self.sql3_cursor.description]) for result in self.sql3_cursor: csv_out.writerow(result) self.impsql3.close() self.df = pd.read_csv('ficha.csv') plt.show(self.df['c3'].hist()) def cadastraFicha(self): ficha = Ficha() self.lista = [] self.lista.append(self.txtc1.get()) self.lista.append(self.txtc2.get()) self.lista.append(self.txtc3.get()) self.lista.append(self.txtc4_1.get()) self.lista.append(self.txtc4_2.get()) self.lista.append(self.txtc5_1.get()) self.lista.append(self.txtc5_2.get()) self.lista.append(self.txtc6.get()) self.lista.append(self.txtc7.get()) self.lista.append(self.txtc8.get()) self.lista.append(self.txtc9.get()) self.lista.append(self.txtc10_1.get()) self.lista.append(self.txtc10_2.get()) self.lista.append(self.txtc11.get()) self.lista.append(self.txtc12.get()) self.lista.append(self.txtc13.get()) self.lista.append(self.txtc14.get()) self.lista.append(self.txtc15.get()) self.lista.append(self.txtc16.get()) self.lista.append(self.txtc17.get()) self.lista.append(self.txtc18.get()) self.lista.append(self.txtc19_1.get()) self.lista.append(self.txtc19_2.get()) self.lista.append(self.txtc20.get()) self.lista.append(self.txtc21.get()) self.lista.append(self.txtc22.get()) self.lista.append(self.txtc23.get()) self.lista.append(self.txtc24.get()) self.lista.append(self.txtc25.get()) self.lista.append(self.txtc26.get()) self.lista.append(self.txtc27.get()) self.lista.append(self.txtc28.get()) self.lista.append(self.txtc29.get()) self.lista.append(self.txtc30.get()) self.lista.append(self.txtc31.get()) self.lista.append(self.txtc32.get()) self.lista.append(self.txtc33.get()) self.lista.append(self.txtc34.get()) self.lista.append(self.txtc35_1.get()) self.lista.append(self.txtc35_2.get()) self.lista.append(self.txtc35_3.get()) self.lista.append(self.txtc35_4.get()) self.lista.append(self.txtc35_5.get()) self.lista.append(self.txtc35_6.get()) self.lista.append(self.txtc35_7.get()) self.lista.append(self.txtc35_8.get()) self.lista.append(self.txtc35_9.get()) self.lista.append(self.txtc36_1.get()) self.lista.append(self.txtc36_2.get()) self.lista.append(self.txtc36_3.get()) self.lista.append(self.txtc36_4.get()) self.lista.append(self.txtc36_5.get()) self.lista.append(self.txtc36_6.get()) self.lista.append(self.txtc36_7.get()) self.lista.append(self.txtc36_8.get()) self.lista.append(self.txtc36_9.get()) self.lista.append(self.txtc36_10.get()) self.lista.append(self.txtc36_11.get()) self.lista.append(self.txtc36_12.get()) self.lista.append(self.txtc36_13.get()) self.lista.append(self.txtc36_14_1.get()) self.lista.append(self.txtc36_14_2.get()) self.lista.append(self.txtc37.get()) self.lista.append(self.txtc38_1.get()) self.lista.append(self.txtc38_2.get()) self.lista.append(self.txtc38_3.get()) self.lista.append(self.txtc38_4.get()) self.lista.append(self.txtc38_5.get()) self.lista.append(self.txtc38_6.get()) self.lista.append(self.txtc38_7.get()) self.lista.append(self.txtc39.get()) self.lista.append(self.txtc40.get()) self.lista.append(self.txtc41.get()) self.lista.append(self.txtc42.get()) self.lista.append(self.txtc43.get()) self.lista.append(self.txtc44.get()) self.lista.append(self.txtc45_1.get()) self.lista.append(self.txtc45_2.get()) self.lista.append(self.txtc46_1.get()) self.lista.append(self.txtc46_2.get()) self.lista.append(self.txtc47.get()) self.lista.append(self.txtc48.get()) self.lista.append(self.txtc49.get()) self.lista.append(self.txtc50.get()) self.lista.append(self.txtc51.get()) self.lista.append(self.txtc52.get()) self.lista.append(self.txtc53.get()) self.lista.append(self.txtc54.get()) self.lista.append(self.txtc55.get()) self.lista.append(self.txtc56.get()) self.lista.append(self.txtc57.get()) self.lista.append(self.txtc58.get()) self.lista.append(self.txtc59_1.get()) self.lista.append(self.txtc59_2.get()) self.lista.append(self.txtc59_3.get()) self.lista.append(self.txtc59_4.get()) self.lista.append(self.txtc59_5.get()) self.lista.append(self.txtc59_6.get()) self.lista.append(self.txtc59_7.get()) self.lista.append(self.txtc59_8.get()) self.lista.append(self.txtc59_9.get()) self.lista.append(self.txtc60_1.get()) self.lista.append(self.txtc60_2.get()) self.lista.append(self.txtc61.get()) self.lista.append(self.txtc62.get()) self.lista.append(self.txtc63_1_1.get()) self.lista.append(self.txtc63_1_2.get()) self.lista.append(self.txtc63_2.get()) self.lista.append(self.txtc63_3.get()) self.lista.append(self.txtc63_4.get()) self.lista.append(self.txtc63_5_1.get()) self.lista.append(self.txtc63_5_2.get()) self.lista.append(self.txtc63_5_3.get()) self.lista.append(self.txtc63_5_4.get()) self.lista.append(self.txtc63_5_5.get())
self.register_instruction(SW(reg, "0($sp)")) self.used_registers[reg] = False @visit.register def _(self, node: cil.ReturnNode): # Generar dos formas de codigo en dependencia de si se devuelve un valor o no self.add_source_line_comment(node) val = node.value if val is not None: if isinstance(val, LocalNode): src = self.get_location_address(val) # almacenar el resultado en $v0 self.register_instruction(LW(v0, src)) elif isinstance(val, int): # val es una constante self.register_instruction(LI(v0, val)) else: # val es un str que representa la direccion # de un hardcoded string en la seccion .DATA self.register_instruction(LW(v0, val)) # Liberar el marco de pila assert self.current_function is not None self.deallocate_stack_frame(self.current_function) # Liberar el espacio de los argumentos de la funcion self.deallocate_args(self.current_function) # salir del llamado de la funcion self.register_instruction(branchNodes.JR(ra)) @visit.register def _(self, node: cil.LoadNode): dest = self.visit(node.dest) assert dest is not None # message es un string, asi que solo hay que cargar el puntero a dicho string reg = self.get_available_register() assert reg is not None, "Out of registers" self.add_source_line_comment(node) self.register_instruction(LA(reg, node.message.name)) self.register_instruction(SW(reg, dest)) self.used_registers[reg] = False ## NODOS REFERENTES A OPERACIONES BUILT-IN DE COOL ## @visit.register def _(self, node: cil.SubstringNode): dest = self.visit(node.dest) assert dest is not None l = self.visit(node.l) r = self.visit(node.r) assert l is not None assert r is not None reg = self.get_available_register() reg2 = self.get_available_register() temp = self.get_available_register() size_reg = self.get_available_register() assert reg is not None assert reg2 is not None assert temp is not None assert size_reg is not None # Cargar el string sobre el que se llama substr self.register_instruction(LW(reg, "12($s1)")) # Hacer que reg apunte al inicio del substr if isinstance(l, int): self.register_instruction(ADDU(reg, reg, l, True)) else: self.register_instruction(LW(temp, l)) # Cargar el valor self.register_instruction(LW(temp, f"12(${REG_TO_STR[temp]})")) self.register_instruction(ADDU(reg, reg, temp)) if isinstance(r, int): self.register_instruction(LI(a0, r)) else: self.register_instruction(LW(a0, r)) # cargar el valor self.register_instruction(LW(a0, f"12($a0)")) self.register_instruction(MOVE(size_reg, a0)) self.register_instruction(MOVE(reg2, a0)) # Agregar un byte mas para el fin de cadena self.register_instruction(ADDU(a0, a0, 1, True)) # $v0 = 9 (syscall 9 = sbrk) self.register_instruction(LI(v0, 9)) self.register_instruction(SYSCALL()) self.register_instruction(MOVE(temp, v0)) # Mientras reg != reg2 : Copiar a v0 self.register_instruction(Label("substr_loop")) self.register_instruction(BEQZ(reg2, "substr_end")) # Copiar un byte self.register_instruction(LB(a0, f"0(${REG_TO_STR[reg]})")) self.register_instruction(SB(a0, f"0(${REG_TO_STR[temp]})")) # Mover el puntero temp y el puntero reg self.register_instruction(ADDU(reg, reg, 1, True)) self.register_instruction(ADDU(temp, temp, 1, True)) # Restar del contador self.register_instruction(SUBU(reg2, reg2, 1, True)) # Saltar al ciclo while self.register_instruction(J("substr_loop")) # Salir del ciclo self.register_instruction(Label("substr_end")) # Agregar el null al final de la cadena self.register_instruction(SB(zero, f"0(${REG_TO_STR[temp]})")) # v0 contiene el substr self.register_instruction(MOVE(reg2, v0)) # Crear la instancia de str size = 20 # Reservar memoria para el tipo self.allocate_memory(size) self.comment("Allocating string") # Inicializar la instancia self.register_instruction(LA(reg, "String")) self.register_instruction(SW(reg, "0($v0)")) self.register_instruction(LA(reg, "String_start")) self.register_instruction(SW(reg, "4($v0)")) # Cargar el offset del tipo self.comment("Load type offset") offset = next(i for i, t in enumerate(self.mips_types) if t == "String") * 4 self.register_instruction(LI(reg, offset)) self.register_instruction(SW(reg, "8($v0)")) # Copiar el str en v0 al atributo value de la instancia self.register_instruction(SW(reg2, "12($v0)")) self.register_instruction(SW(size_reg, "16($v0)")) # devolver la instancia self.register_instruction(SW(v0, dest)) self.used_registers[reg] = False self.used_registers[reg2] = False self.used_registers[temp] = False self.used_registers[size_reg] = False @visit.register def _(self, node: ConcatString): self.add_source_line_comment(node) # Obtener los strings a concatenar dest = self.visit(node.dest) s = self.visit(node.s) assert s is not None assert dest is not None reg = self.get_available_register() reg2 = self.get_available_register() temp = self.get_available_register() size_reg = self.get_available_register() byte = self.get_available_register() assert ( reg is not None and reg2 is not None and temp is not None and size_reg is not None and byte is not None ) # Get Strings length self.comment("Get first string length from self") self.register_instruction(LW(reg, f"16($s1)")) # Obtener el segundo string self.comment("Get second string length from param") self.register_instruction(LW(v0, s)) self.register_instruction(LW(reg2, "16($v0)")) self.comment("Save new string length in a0 for memory allocation") self.register_instruction(ADDU(a0, reg, reg2)) self.register_instruction(MOVE(size_reg, a0)) # Obtener el primer string desde self self.comment("Get first string from self") self.register_instruction(LW(reg, f"12($s1)")) # Obtener el segundo string self.comment("Get second string from param") self.register_instruction(LW(reg2, "12($v0)")) # Reservar memoria para el nuevo buffer # $v0 = 9 (syscall 9 = sbrk) self.register_instruction(ADDU(a0, a0, 4, True)) self.register_instruction(LI(v0, 9)) self.register_instruction(SYSCALL()) # mover v0 a un puntero temporal que podamos mover self.register_instruction(MOVE(temp, v0)) # Hacer 0 el registro byte self.register_instruction(MOVE(byte, zero)) # while [reg] != 0: copy to temp self.register_instruction(Label("concat_loop1")) self.comment(f"Compare {REG_TO_STR[reg]} with \\0") self.register_instruction(LB(byte, f"0(${REG_TO_STR[reg]})")) self.register_instruction(BEQZ(byte, "concat_loop1_end")) # Copiar el byte hacia temp y aumentar en 1 self.comment("Copy 1 byte") self.register_instruction(SB(byte, f"0(${REG_TO_STR[temp]})")) self.register_instruction(ADDU(temp, temp, 1, True)) self.register_instruction(ADDU(reg, reg, 1, True)) self.register_instruction(J("concat_loop1")) self.register_instruction(Label("concat_loop1_end")) # Copiar el segundo string self.comment("Copy second string") # while [reg2] != 0: copy to temp self.register_instruction(Label("concat_loop2")) self.comment(f"Compare {REG_TO_STR[reg2]} with \\0") self.register_instruction(LB(byte, f"0(${REG_TO_STR[reg2]})")) self.register_instruction(BEQZ(byte, "concat_loop2_end")) # Copiar el byte hacia temp y aumentar en 1 self.comment("Copy 1 byte") self.register_instruction(SB(byte, f"0(${REG_TO_STR[temp]})")) self.register_instruction(ADDU(temp, temp, 1, True)) self.register_instruction(ADDU(reg2, reg2, 1, True)) self.register_instruction(J("concat_loop2")) self.register_instruction(Label("concat_loop2_end")) # Agregar el caracter null al final self.register_instruction(SB(zero, f"0(${REG_TO_STR[temp]})")) # v0 contiene el string concatenado self.comment("v0 contains resulting string") self.register_instruction(MOVE(reg2, v0)) # Crear la instancia de str size = 20 # Reservar memoria para el tipo self.allocate_memory(size) self.comment("Allocating string") # Inicializar la instancia self.register_instruction(LA(reg, "String")) self.register_instruction(SW(reg, "0($v0)")) self.register_instruction(LA(reg, "String_start")) self.register_instruction(SW(reg, "4($v0)")) # Cargar el offset del tipo self.comment("Load type offset") offset = next(i for i, t in enumerate(self.mips_types) if t == "String") * 4 self.register_instruction(LI(reg, offset)) self.register_instruction(SW(reg, "8($v0)")) # Copiar el str en v0 al atributo value de la instancia self.register_instruction(SW(reg2, "12($v0)")) self.register_instruction(SW(size_reg, "16($v0)")) # devolver la instancia self.register_instruction(SW(v0, dest)) self.used_registers[reg] = False self.used_registers[reg2] = False self.used_registers[temp] = False self.used_registers[size_reg] = False self.used_registers[byte] = False @visit.register def _(self, node: AbortNode): # Cargar el puntero al tipo de self src = self.visit(node.src) self.register_instruction(LA(a0, node.abortion.name)) # Print abortion self.register_instruction(LI(v0, 4)) self.register_instruction(SYSCALL()) self.register_instruction(LW(a0, src)) self.register_instruction(LI(v0, 4)) self.register_instruction(SYSCALL()) self.register_instruction(LA(a0, node.nl.name)) self.register_instruction(LI(v0, 4)) self.register_instruction(SYSCALL()) self.register_instruction(LI(v0, 10)) self.register_instruction(SYSCALL()) @visit.register def _(self, node: cil.ReadNode): dest = self.visit(node.dest) assert dest is not None size = 1024 # Max string length in COOL reg = self.get_available_register() reg2 = self.get_available_register() length = self.get_available_register() temp = self.get_available_register() assert reg is not None assert length is not None assert temp is not None assert reg2 is not None # Declarar un buffer para el string self.allocate_memory(size) self.register_instruction(MOVE(reg, v0)) # Mover la direccion del buffer a0 self.register_instruction(MOVE(a0, v0)) # declarar el espacio disponible en el buffer self.register_instruction(LI(a1, size)) # syscall 8 = read_int self.register_instruction(LI(v0, 8)) self.register_instruction(SYSCALL()) # reg contiene el string. # Crear la instancia del tipo string. # Primero calcular el length del string self.register_instruction(MOVE(length, zero)) self.register_instruction(MOVE(temp, zero)) self.register_instruction(MOVE(reg2, reg)) self.register_instruction(LB(temp, f"0(${REG_TO_STR[reg2]})")) self.register_instruction(BEQZ(temp, "end_loop")) self.register_instruction(Label("read_length_loop")) # while [reg2] != 0: length ++ self.register_instruction(LB(temp, f"0(${REG_TO_STR[reg2]})")) self.register_instruction(BEQZ(temp, "end_read_length_loop")) self.register_instruction(ADDU(reg2, reg2, 1, True)) self.register_instruction(ADDU(length, length, 1, True)) self.register_instruction(J("read_length_loop")) self.register_instruction(Label("end_read_length_loop")) # Remove new line self.register_instruction(SUBU(reg2, reg2, 1, True)) self.register_instruction(SB(zero, f"0(${REG_TO_STR[reg2]})")) self.register_instruction(SUBU(length, length, 1, True)) self.register_instruction(Label("end_loop")) # length contiene el length del string # Crear la instancia size = 20 # Reservar memoria para el tipo self.allocate_memory(size) self.comment("Allocating string") # Inicializar la instancia self.register_instruction(LA(reg2, "String")) self.register_instruction(SW(reg2, "0($v0)")) self.register_instruction(LA(reg2, "String_start")) self.register_instruction(SW(reg2, "4($v0)")) # Cargar el offset del tipo self.comment("Load type offset") offset = next(i for i, t in enumerate(self.mips_types) if t == "String") * 4 self.register_instruction(LI(reg2, offset)) self.register_instruction(SW(reg2, "8($v0)")) self.register_instruction(SW(reg, "12($v0)")) self.register_instruction(SW(length, "16($v0)")) # devolver la instancia self.register_instruction(SW(v0, dest)) self.used_registers[reg] = False self.used_registers[reg2] = False self.used_registers[temp] = False self.used_registers[length] = False @visit.register def _(self, node: cil.TypeName): dest = self.visit(node.dest) assert dest is not None # Cargar el puntero al objeto que se esta llamando reg = self.get_available_register() assert reg is not None self.register_instruction(LW(reg, f"0($s1)")) self.register_instruction(SW(reg, dest)) self.used_registers[reg] = False @visit.register def _(self, node: cil.PrintIntNode): # El valor a imprimir se encuentra en la direccion # de memoria src self.add_source_line_comment(node) src = self.visit(node.src) assert src is not None # En mips, syscall 1 se usa para imprimir el entero # almacenado en $a0 # Cargar el entero en $a0 self.register_instruction(LW(v0, src)) # Cargar el valor self.register_instruction(LW(a0, f"12($v0)")) # syscall 1 = print_int self.register_instruction(LI(v0, 1)) self.register_instruction(SYSCALL()) @visit.register def _(self, node: cil.ReadIntNode): dest = self.visit(node.dest) assert dest is not None self.add_source_line_comment(node) # Cargar syscall read_int en $v0 self.register_instruction(LI(v0, 5)) self.register_instruction(SYSCALL()) # Crear la instancia a Int self.register_instruction(MOVE(a2, v0)) size = 20
# -*- coding: utf-8 -*- import sys import time import httplib2 AUTO_RECONNECT_TIMES = 5 crawl_tips_json = {} SERVER = 'http://api.cn.faceplusplus.com/' category_Arts_Entertainment = ['Aquarium', 'Arcade', 'Art Gallery', 'Bowling Alley', 'Casino', 'Circus', 'Comedy Club', 'Concert Hall', 'Country Dance Club', 'Disc Golf', 'General Entertainment', 'Go Kart Track', 'Historic Site', 'Laser Tag', 'Mini Golf', 'Movie Theater', 'Indie Movie Theater', 'Multiplex', 'Museum', 'Art Museum', 'Erotic Museum', 'History Museum', 'Planetarium', 'Science Museum', 'Music Venue', 'Jazz Club', 'Piano Bar', 'Rock Club', 'Performing Arts Venue', 'Dance Studio', 'Indie Theater', 'Opera House', 'Theater', 'Pool Hall', 'Public Art', 'Outdoor Sculpture', 'Street Art', 'Racetrack', 'Roller Rink', 'Salsa Club', 'Stadium', 'Baseball Stadium', 'Basketball Stadium', 'Cricket Ground', 'Football Stadium', 'Hockey Arena', 'Soccer Stadium', 'Tennis Stadium', 'Track Stadium', 'Threet Art', 'Theme Park', 'Theme Park Ride / Attraction', 'Water Park', 'Zoo'] category_College_University = ['College Academic Building', 'College Arts Building', 'College Communications Building', 'College Engineering Building', 'College History Building', 'College Math Building', 'College Science Building', 'College Technology Building', 'College Administrative Building', 'College Auditorium', 'College Bookstore', 'College Cafeteria', 'College Classroom', 'College Gym', 'College Lab', 'College Library', 'College Quad', 'College Rec Center', 'College Residence Hall', 'College Stadium', 'College Baseball Diamond', 'College Basketball Court', 'College Cricket Pitch', 'College Football Field', 'College Hockey Rink', 'College Soccer Field', 'College Tennis Court', 'College Track', 'College Theater', 'Community College', 'Fraternity House', 'General College & University', 'Law School', 'Medical School', 'Sorority House', 'Student Center', 'Trade School', 'University'] category_Event = ['Conference', 'Convention', 'Festival', 'Music Festival', 'Other Event', 'Parade', 'Stoop Sale', 'Street Fair'] male_tipping_duration = [] female_tipping_duration = [] all_tip_timestamp = {} category_Food = ['Afghan Restaurant', 'African Restaurant', 'Ethiopian Restaurant', 'American Restaurant', 'New American Restaurant', 'Arepa Restaurant', 'Argentinian Restaurant', 'Asian Restaurant', 'Dim Sum Restaurant', 'Donburi Restaurant', 'Japanese Curry Restaurant', 'Kaiseki Restaurant', 'Kushikatsu Restaurant', 'Monjayaki Restaurant', 'Nabe Restaurant', 'Okonomiyaki Restaurant', 'Ramen Restaurant', 'Shabu-Shabu Restaurant', 'Soba Restaurant', 'Sukiyaki Restaurant', 'Takoyaki Place', 'Tempura Restaurant', 'Tonkatsu Restaurant', 'Udon Restaurant', 'Unagi Restaurant', 'Wagashi Place', 'Yakitori Restaurant', 'Yoshoku Restaurant', 'Korean Restaurant', 'Malaysian Restaurant', 'Mongolian Restaurant', 'Noodle House', 'Thai Restaurant', 'Tibetan Restaurant', 'Vietnamese Restaurant', 'Australian Restaurant', 'Austrian Restaurant', 'BBQ Joint', 'Bagel Shop', 'Bakery', 'Belarusian Restaurant', 'Belgian Restaurant', 'Bistro', 'Brazilian Restaurant', 'Acai House', 'Baiano Restaurant', 'Central Brazilian Restaurant', 'Churrascaria', 'Empada House', 'Goiano Restaurant', 'Mineiro Restaurant', 'Northeastern Brazilian Restaurant', 'Northern Brazilian Restaurant', 'Pastelaria', 'Southeastern Brazilian Restaurant', 'Southern Brazilian Restaurant', 'Tapiocaria', 'Breakfast Spot', 'Bubble Tea Shop', 'Buffet', 'Burger Joint', 'Burrito Place', 'Cafeteria', u'Café', 'Cajun / Creole Restaurant', 'Cambodian Restaurant', 'Caribbean Restaurant', 'Caucasian Restaurant', 'Chinese Restaurant', 'Anhui Restaurant', 'Beijing Restaurant', 'Cantonese Restaurant', 'Chinese Aristocrat Restaurant', 'Chinese Breakfast Place', 'Dongbei Restaurant', 'Fujian Restaurant', 'Guizhou Restaurant', 'Hainan Restaurant', 'Hakka Restaurant', 'Henan Restaurant', 'Hong Kong Restaurant', 'Huaiyang Restaurant', 'Hubei Restaurant', 'Hunan Restaurant', 'Imperial Restaurant', 'Jiangsu Restaurant', 'Jiangxi Restaurant', 'Macanese Restaurant', 'Manchu Restaurant', 'Peking Duck Restaurant', 'Shaanxi Restaurant', 'Shandong Restaurant', 'Shanghai Restaurant', 'Shanxi Restaurant', 'Szechuan Restaurant', 'Taiwanese Restaurant', 'Tianjin Restaurant', 'Xinjiang Restaurant', 'Yunnan Restaurant', 'Zhejiang Restaurant', 'Coffee Shop', 'Comfort Food Restaurant', 'Creperie', 'Cuban Restaurant', 'Cupcake Shop', 'Czech Restaurant', 'Deli / Bodega', 'Dessert Shop', 'Dim Sum Restaurant', 'Diner', 'Distillery', 'Donut Shop', 'Dumpling Restaurant', 'Eastern European Restaurant', 'English Restaurant', 'Ethiopian Restaurant', 'Falafel Restaurant', 'Fast Food Restaurant', 'Filipino Restaurant', 'Fish & Chips Shop', 'Fondue Restaurant', 'Food Truck', 'French Restaurant', 'Fried Chicken Joint', 'Gastropub', 'German Restaurant', 'Gluten-free Restaurant', 'Greek Restaurant', 'Bougatsa Shop', 'Cretan Restaurant', 'Fish Taverna', 'Grilled Meat Restaurant', 'Kafenio', 'Magirio', 'Meze Restaurant', 'Modern Greek Restaurant', 'Ouzeri', 'Patsa Restaurant', 'Taverna', 'Tsipouro Restaurant', 'Halal Restaurant', 'Hawaiian Restaurant', 'Himalayan Restaurant', 'Hot Dog Joint', 'Hotpot Restaurant', 'Hungarian Restaurant', 'Ice Cream Shop', 'Indian Restaurant', 'Indonesian Restaurant', 'Acehnese Restaurant', 'Balinese Restaurant', 'Betawinese Restaurant', 'Javanese Restaurant', 'Manadonese Restaurant', 'Meatball Place', 'Padangnese Restaurant', 'Sundanese Restaurant', 'Irish Pub', 'Italian Restaurant', 'Japanese Restaurant', 'Jewish Restaurant', 'Juice Bar', 'Korean Restaurant', 'Kosher Restaurant', 'Latin American Restaurant', 'Empanada Restaurant', 'Mac & Cheese Joint', 'Malaysian Restaurant', 'Mediterranean Restaurant', 'Mexican Restaurant'] category_Food.extend(['Middle Eastern Restaurant', 'Modern European Restaurant', 'Molecular Gastronomy Restaurant', 'Mongolian Restaurant', 'Moroccan Restaurant', 'New American Restaurant', 'Pakistani Restaurant', 'Persian Restaurant', 'Peruvian Restaurant', 'Pie Shop', 'Pizza Place', 'Polish Restaurant', 'Portuguese Restaurant', 'Ramen / Noodle House', 'Restaurant', 'Romanian Restaurant', 'Russian Restaurant', 'Blini House', 'Pelmeni House', 'Salad Place', 'Sandwich Place', 'Scandinavian Restaurant', 'Seafood Restaurant', 'Snack Place', 'Soup Place', 'South American Restaurant', 'Southern / Soul Food Restaurant', 'Souvlaki Shop', 'Spanish Restaurant', 'Paella Restaurant', 'Steakhouse', 'Sushi Restaurant', 'Swiss Restaurant', 'Taco Place', 'Tapas Restaurant', 'Tatar Restaurant', 'Tea Room', 'Thai Restaurant', 'Tibetan Restaurant', 'Turkish Restaurant', 'Borek Place', 'Cigkofte Place', 'Doner Restaurant', 'Gozleme Place', 'Home Cooking Restaurant', 'Kebab Restaurant', 'Kofte Place', u'Kokoreç Restaurant', 'Manti Place', 'Meyhane', 'Pide Place', 'Ukrainian Restaurant', 'Varenyky restaurant', 'West-Ukrainian Restaurant', 'Vegetarian / Vegan Restaurant', 'Vietnamese Restaurant', 'Winery', 'Wings Joint', 'Frozen Yogurt', 'Friterie', 'Andhra Restaurant', 'Awadhi Restaurant', 'Bengali Restaurant', 'Chaat Place', 'Chettinad Restaurant', 'Dhaba', 'Dosa Place', 'Goan Restaurant', 'Gujarati Restaurant', 'Indian Chinese Restaurant', 'Indian Sweet Shop', 'Irani Cafe', 'Jain Restaurant', 'Karnataka Restaurant', 'Kerala Restaurant', 'Maharashtrian Restaurant', 'Mughlai Restaurant', 'Multicuisine Indian Restaurant', 'North Indian Restaurant', 'Northeast Indian Restaurant', 'Parsi Restaurant', 'Punjabi Restaurant', 'Rajasthani Restaurant', 'South Indian Restaurant', 'Udupi Restaurant', 'Indonesian Meatball Place', 'Abruzzo', 'Turkish Home Cooking Restaurant', 'Sri Lankan Restaurant', 'Veneto Restaurant', 'Umbrian Restaurant', 'Tuscan Restaurant', 'Trentino Restaurant', 'Trattoria/Osteria', 'South Tyrolean Restaurant', 'Sicilian Restaurant', 'Sardinian Restaurant', 'Roman Restaurant', 'Romagna Restaurant', 'Rifugio di Montagna', 'Puglia Restaurant', 'Piedmontese Restaurant', 'Piadineria', 'Molise Restaurant', 'Marche Restaurant', 'Malga', 'Lombard Restaurant', 'Ligurian Restaurant', 'Friuli Restaurant', 'Emilia Restaurant', 'Campanian Restaurant', 'Calabria Restaurant', 'Basilicata Restaurant', 'Aosta Restaurant', 'Agriturismo', 'Abruzzo Restaurant', '']) category_Nightlife_Spot = ['Bar', 'Beach Bar', 'Beer Garden', 'Brewery', 'Champagne Bar', 'Cocktail Bar', 'Dive Bar', 'Gay Bar', 'Hookah Bar', 'Hotel Bar', 'Karaoke Bar', 'Lounge', 'Night Market', 'Nightclub', 'Other Nightlife', 'Pub', 'Sake Bar', 'Speakeasy', 'Sports Bar', 'Strip Club', 'Whisky Bar', 'Wine Bar', 'Speakeasy'] category_Outdoors_Recreation = ['Athletics & Sports', 'Badminton Court', 'Baseball Field', 'Basketball Court', 'Bowling Green', 'Golf Course', 'Hockey Field', 'Paintball Field', 'Rugby Pitch', 'Skate Park', 'Skating Rink', 'Soccer Field', 'Sports Club', 'Squash Court', 'Tennis Court', 'Volleyball Court', 'Bath House', 'Bathing Area', 'Beach', 'Nudist Beach', 'Surf Spot', 'Botanical Garden', 'Bridge', 'Campground', 'Castle', 'Cemetery', 'Dive Spot', 'Dog Run', 'Farm', 'Field', 'Fishing Spot', 'Forest', 'Garden', 'Gun Range', 'Harbor / Marina', 'Hot Spring', 'Island', 'Lake', 'Lighthouse', 'Mountain', 'National Park', 'Nature Preserve', 'Other Great Outdoors', 'Palace', 'Park', 'Pedestrian Plaza', 'Playground', 'Plaza', 'Pool', 'Rafting', 'Recreation Center', 'River', 'Rock Climbing Spot', 'Scenic Lookout', 'Sculpture Garden', 'Ski Area', 'Apres Ski Bar', 'Ski Chairlift', 'Ski Chalet', 'Ski Lodge', 'Ski Trail', 'Stables', 'States & Municipalities', 'City', 'County', 'Country', 'Neighborhood', 'State', 'Town', 'Village', 'Summer Camp', 'Trail', 'Tree', 'Vineyard', 'Volcano', 'Well'] category_Professional_Other_Places = ['Animal Shelter', 'Auditorium', 'Building', 'Club House', 'Community Center', 'Convention Center', 'Meeting Room', 'Cultural Center', 'Distribution Center', 'Event Space', 'Factory', 'Fair', 'Funeral Home', 'Government Building', 'Capitol Building', 'City Hall', 'Courthouse', 'Embassy / Consulate', 'Fire Station', 'Monument / Landmark', 'Police Station', 'Town Hall', 'Library', 'Medical Center', 'Acupuncturist', 'Alternative Healer', 'Chiropractor', "Dentist's Office", "Doctor's Office", 'Emergency Room', 'Eye Doctor', 'Hospital', 'Laboratory', 'Mental Health Office', 'Veterinarian', 'Military Base', 'Non-Profit', 'Office', 'Advertising Agency', 'Campaign Office', 'Conference Room', 'Coworking Space', 'Tech Startup', 'Parking', 'Post Office', 'Prison', 'Radio Station', 'Recruiting Agency', 'School', 'Circus School', 'Driving School', 'Elementary School', 'Flight School', 'High School', 'Language School', 'Middle School', 'Music School', 'Nursery School', 'Preschool', 'Private School', 'Religious School', 'Swim School', 'Social Club', 'Spiritual Center', 'Buddhist Temple', 'Church', 'Hindu Temple', 'Monastery', 'Mosque', 'Prayer Room', 'Shrine', 'Synagogue', 'Temple', 'TV Station', 'Voting Booth', 'Warehouse'] category_Residence = ['Assisted Living', 'Home (private)', 'Housing Development', 'Residential Building (Apartment / Condo)', 'Trailer Park'] category_Shop_Service = ['Construction & Lanscape', 'Event Service', 'ATM', 'Adult Boutique', 'Antique Shop', 'Arts & Crafts Store', 'Astrologer', 'Auto Garage', 'Automotive Shop', 'Baby Store', 'Bank', 'Betting Shop', 'Big Box Store', 'Bike Shop', 'Board Shop', 'Bookstore', 'Bridal Shop', 'Camera Store', 'Candy Store', 'Car Dealership', 'Car Wash', 'Carpet Store', 'Check Cashing Service', 'Chocolate Shop', 'Christmas Market', 'Clothing Store', 'Accessories Store', 'Boutique', 'Kids Store', 'Lingerie Store', "Men's Store", 'Shoe Store', "Women's Store", 'Comic Shop', 'Convenience Store', 'Cosmetics Shop', 'Costume Shop', 'Credit Union', 'Daycare', 'Department Store', 'Design Studio', 'Discount Store', 'Dive Shop', 'Drugstore / Pharmacy', 'Dry Cleaner', 'EV Charging Station', 'Electronics Store', 'Fabric Shop', 'Financial or Legal Service', 'Fireworks Store', 'Fishing Store', 'Flea Market', 'Flower Shop', 'Food & Drink Shop', 'Beer Store', 'Butcher', 'Cheese Shop', 'Farmers Market', 'Fish Market', 'Food Court', 'Gourmet Shop', 'Grocery Store', 'Health Food Store', 'Liquor
: dict The updated original dictionary """ # Iterate over key value pairs in the new dictionary to merge into the original for key, val in new_dict.items(): # If a mapping object (e.g. dictionary) call the function recursively if isinstance(val, Mapping): tmp = update_dict(orig_dict.get(key, {}), val) orig_dict[key] = tmp # If a list then merge it into the original dictionary elif isinstance(val, list): orig_dict[key] = orig_dict.get(key, []) + val # Do the same for any other type else: orig_dict[key] = new_dict[key] return orig_dict @checkargs def expand_dictionary(dictionary: dict, key_separator: str = ".") -> dict: """ Takes a flat dictionary (no nesting) with keys separated by a separator and converts it into a nested dictionary Parameters ---------- dictionary : dict The input dictionary with separated keys key_separator : str The seprator to use Returns ------- dict_expanded : dict The expanded nested dictionary """ dict_expanded = {} # Loop over each composite key and final value for key, value in dictionary.items(): # Split the key on the separator components = key.split(key_separator) # Get the expanded dictionary for this key and update the master dictionary update_dict( dict_expanded, expand_dictionary_single_recursive(0, components, value) ) return dict_expanded @checkargs def expand_dictionary_single_recursive(index: int, key_list: list, value) -> dict: """ Takes a list of keys and a value and turns it into a nested dictionary. This is a recursive function. Parameters ---------- index : int The current index of the key in the list of keys key_list : list[str] The list of keys to turn into a nested dictionary value : any The final value to match against the last (deepest) key Returns ------- dict The final value to match against the last (deepest) key """ # Gets the current key in the list key = key_list[index] # If it is the last key in the list return a dictionary with it keyed against the value if key == key_list[-1]: return {key: value} # Otherwise if it is not the last key, key it against calling this function recursively with the next key return {key: expand_dictionary_single_recursive(index + 1, key_list, value)} @checkargs def get_swagger_dict(api_url: str) -> dict: """ Gets the lusid.json swagger file Parameters ---------- api_url : str The base api url for the LUSID instance Returns ------- dict The swagger file as a dictionary """ swagger_path = "/swagger/v0/swagger.json" swagger_url = api_url + swagger_path swagger_file = requests.get(swagger_url) if swagger_file.status_code == 200: return json.loads(swagger_file.text) else: raise ValueError( f"""Received a {swagger_file.status_code} response from the provided url, please double check the base api url and try again""" ) def generate_required_attributes_list(): pass @checkargs def verify_all_required_attributes_mapped( mapping: dict, model_object_name: str, exempt_attributes: list = None, key_separator: str = ".", ) -> None: """ Verifies that all required attributes are included in the mapping, passes silently if they are and raises an exception otherwise Parameters ---------- mapping : dict The required mapping model_object_name : str The name of the lusid.models object that the mapping is for exempt_attributes : list[str] The attributes that are exempt from needing to be in the required mapping key_separator : str The separator to use to join the required attributes together Returns ------- key_separator : str The separator to use to join the required attributes together """ # Check that the provided model name actually exists model_object = getattr(lusid.models, model_object_name, None) if model_object is None: raise TypeError("The provided model_object is not a lusid.model object") # Convert a None to an empty list exempt_attributes = ( Validator(exempt_attributes, "exempt_attributes") .set_default_value_if_none([]) .value ) # Gets the required attributes for this model required_attributes = get_required_attributes_model_recursive( model_object=model_object, key_separator=key_separator ) # Removes the exempt attributes for attribute in required_attributes: # Removes all nested attributes for example if "identifiers" is exempt "identifiers.value" will be removed if attribute.split(key_separator)[0] in exempt_attributes: required_attributes.remove(attribute) missing_attributes = set(required_attributes) - set(list(mapping.keys())) if len(missing_attributes) > 0: raise ValueError( f"""The required attributes {str(missing_attributes)} are missing from the mapping. Please add them.""" ) @checkargs def get_required_attributes_model_recursive(model_object, key_separator: str = "."): """ This is a recursive function which gets all of the required attributes on a LUSID model. If the model is nested then it separates the attributes by a '.' until the bottom level where no more models are required and a primitive type is supplied e.g. string, int etc. Parameters ---------- model_object : lusid.model The model to get required attributes for key_separator : str The separator to use to join the required attributes together Returns ------- list[str] The required attributes of the model """ attributes = [] # Get the required attributes for the current model required_attributes = get_required_attributes_from_model(model_object) # Get the types of the attributes for the current model open_api_types = model_object.openapi_types for required_attribute in required_attributes: required_attribute_type = open_api_types[required_attribute] # Check to see if there is a LUSID model for this required attribute, if no further nesting then add this attribute if not check_nested_model(required_attribute_type): attributes.append(camel_case_to_pep_8(required_attribute)) # Otherwise call the function recursively else: # Ensure that that if there is a complex attribute type e.g. dict(str, InstrumentIdValue) it is extracted ( required_attribute_type, nested_type, ) = extract_lusid_model_from_attribute_type(required_attribute_type) nested_required_attributes = get_required_attributes_model_recursive( model_object=getattr(lusid.models, required_attribute_type), ) for nested_required_attribute in nested_required_attributes: attributes.append( key_separator.join( [ camel_case_to_pep_8(required_attribute), nested_required_attribute, ] ) ) return attributes def get_required_attributes_from_model(model_object) -> list: """ Gets the required attributes for a LUSID model using reflection Parameters ---------- model_object : lusid.models A LUSID model object Returns ------- list[str] The required attributes """ # Get the source code for the model model_details = inspect.getsource(model_object) # Get all the setter function definitions setters = re.findall(r"(?<=.setter).+?(?:@|to_dict)", model_details, re.DOTALL) # Set the status (required or optional) for each attribute based on whether "is None:" exists in the setter function ''' Here are two examples A) A None value is not allowed and hence this is required. Notice the "if identifiers is None:" condition. @identifiers.setter def identifiers(self, identifiers): """Sets the identifiers of this InstrumentDefinition. A set of identifiers that can be used to identify the instrument. At least one of these must be configured to be a unique identifier. # noqa: E501 :param identifiers: The identifiers of this InstrumentDefinition. # noqa: E501 :type: dict(str, InstrumentIdValue) """ if identifiers is None: raise ValueError("Invalid value for `identifiers`, must not be `None`") # noqa: E501 self._identifiers = identifiers B) A None value is allowed and hence this is optional @look_through_portfolio_id.setter def look_through_portfolio_id(self, look_through_portfolio_id): """Sets the look_through_portfolio_id of this InstrumentDefinition. :param look_through_portfolio_id: The look_through_portfolio_id of this InstrumentDefinition. # noqa: E501 :type: ResourceId """ self._look_through_portfolio_id = look_through_portfolio_id ''' attribute_status = { re.search(r"(?<=def ).+?(?=\(self)", setter).group(0): "Required" if "is None:" in setter else "Optional" for setter in setters } # If there are required attributes collect them as a list required_attributes = [ key for key, value in attribute_status.items() if value == "Required" ] # If there are no required attributes on a model, assume that all attributes are required # This is for cases such as lusid.models.TransactionRequest.transaction_price if len(required_attributes) == 0: required_attributes = list(attribute_status.keys()) return required_attributes def extract_lusid_model_from_attribute_type(attribute_type: str) -> str: """ Extracts a LUSID model from a complex attribute type e.g. dict(str, InstrumentIdValue) if it exists. If there is no LUSID model the attribute type is still returned Parameters ---------- attribute_type : str The attribute type to extract the model from Returns ------- attribute_type : str The returned attribute type with the LUSID model extracted if possible nested_type : str The type of nesting used e.g. List or Dict """ nested_type = None # If the attribute type is a dictionary e.g. dict(str, InstrumentIdValue), extract the type if "dict" in attribute_type: attribute_type = attribute_type.split(", ")[1].rstrip(")") nested_type = "dict" # If it is a list e.g. list[ModelProperty] extract the type if "list" in attribute_type: attribute_type = attribute_type.split("list[")[1].rstrip("]") nested_type = "list" return attribute_type, nested_type @checkargs def check_nested_model(required_attribute_type: str) -> bool: """ Takes the properties of a required attribute on a model and searches as to whether or not this attribute requires a model of its own Parameters ---------- required_attribute_type : str The type of the required attribute Returns
# coding=utf-8 """ Modelos de datos básicos para calcular los Tipos de Instalacion. """ class Linea(object): """ Objeto que reperesenta una linea Podemos obtener el Tipo de Instalcion creando un objeto linea, assignando los linea = Linea() # TODO: Explicar dev """ def __init__(self): self.tension = None """Tension en kV """ self.num_circuitos = None """Número de circuitos """ self.num_conductores = None """Número de conductores """ self.seccion = None """Sección del cable en mm² """ self.despliegue = None """Despliegue de la linea: - Tensada sobre postes: ``AP`` - Apoyada sobre fachada: ``AF`` - Subterránea: `S` """ @property def tipoinstalacion(self): """ Obtiene el tipo de instalacion de la linea :return: """ if self.despliegue is None: return None if self.despliegue[0] == 'A': u = self.tension s = self.seccion if u > 123: if self.num_circuitos == 1: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-1UX' elif 180 < s <= 300: return 'TI-1UY' elif 300 < s: return 'TI-1UZ' elif self.num_conductores == 2: if 0 < s <= 180: return 'TI-2UX' elif 180 < s <= 300: return 'TI-2UY' elif 300 < s: return 'TI-2UZ' elif self.num_circuitos == 2: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-3UX' elif 180 < s <= 300: return 'TI-3UY' elif 300 < s: return 'TI-3UZ' elif self.num_conductores == 2: if 0 < s <= 180: return 'TI-4UX' elif 180 < s <= 300: return 'TI-4UY' elif 300 < s: return 'TI-4UZ' elif self.num_circuitos >= 3: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-3AUX' elif 180 < s <= 300: return 'TI-3AUY' elif 300 < s: return 'TI-3AUZ' elif 123 >= u > 72.5: if self.num_circuitos == 1: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-1VX' elif 180 < s <= 300: return 'TI-1VY' elif 300 < s: return 'TI-1VZ' elif self.num_conductores == 2: if 0 < s <= 180: return 'TI-2VX' elif 180 < s <= 300: return 'TI-2VY' elif 300 < s: return 'TI-2VZ' elif self.num_circuitos == 2: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-3VX' elif 180 < s <= 300: return 'TI-3VY' elif 300 < s: return 'TI-3VZ' elif self.num_conductores == 2: if 0 < s <= 180: return 'TI-4VX' elif 180 < s <= 300: return 'TI-4VY' elif 300 < s: return 'TI-4VZ' elif self.num_circuitos >= 3: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-3AVX' elif 180 < s <= 300: return 'TI-3AVY' elif 300 < s: return 'TI-3AVZ' elif 72.5 >= u > 52: if self.num_circuitos == 1: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-5UX' elif 180 < s <= 300: return 'TI-5UY' elif 300 < s: return 'TI-5UZ' elif self.num_conductores == 2: if 0 < s <= 180: return 'TI-6UX' elif 180 < s <= 300: return 'TI-6UY' elif 300 < s: return 'TI-6UZ' elif self.num_circuitos == 2: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-7UX' elif 180 < s <= 300: return 'TI-7UY' elif 300 < s: return 'TI-7UZ' elif self.num_conductores == 2: if 0 < s <= 180: return 'TI-8UX' elif 180 < s <= 300: return 'TI-8UY' elif 300 < s: return 'TI-8UZ' elif self.num_circuitos == 3: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-7AUY' elif 180 < s <= 300: return 'TI-7AUX' elif 300 < s: return 'TI-7AUZ' elif self.num_circuitos >= 3: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-7AUY' elif 180 < s <= 300: return 'TI-7AUX' elif 300 < s: return 'TI-7AUZ' elif 52 >= u > 36: if self.num_circuitos == 1: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-5VX' elif 180 < s <= 300: return 'TI-5VY' elif 300 < s: return 'TI-5VZ' elif self.num_conductores == 2: if 0 < s <= 180: return 'TI-6VX' elif 180 < s <= 300: return 'TI-6VY' elif 300 < s: return 'TI-6VZ' elif self.num_circuitos == 2: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-7VX' elif 180 < s <= 300: return 'TI-7VY' elif 300 < s: return 'TI-7VZ' elif self.num_conductores == 2: if 0 < s <= 180: return 'TI-8VX' elif 180 < s <= 300: return 'TI-8VY' elif 300 < s: return 'TI-8VZ' elif self.num_circuitos >= 3: if self.num_conductores == 1: if 0 < s <= 180: return 'TI-7AVY' elif 180 < s <= 300: return 'TI-7AVX' elif 300 < s: return 'TI-7AVZ' elif 36 >= u > 24: if self.num_circuitos == 1: if 0 < s <= 56: return 'TI-9UX' elif 56 < s <= 110: return 'TI-9UY' elif 110 < s: return 'TI-9UZ' elif self.num_circuitos == 2: if 0 < s <= 56: return 'TI-10UX' elif 56 < s <= 110: return 'TI-10UY' elif 110 < s: return 'TI-10UZ' elif self.num_circuitos >= 3: if 0 < s <= 56: return 'TI-10AUX' elif 56 < s <= 110: return 'TI-10AUY' elif 110 < s: return 'TI-10AUZ' elif 24 >= u > 17.5: if self.num_circuitos == 1: if 0 < s <= 56: return 'TI-9VX' elif 56 < s <= 110: return 'TI-9VY' elif 110 < s: return 'TI-9VZ' elif self.num_circuitos == 2: if 0 < s <= 56: return 'TI-10VX' elif 56 < s <= 110: return 'TI-10VY' elif 110 < s: return 'TI-10VZ' elif self.num_circuitos >= 3: if 0 < s <= 56: return 'TI-10AVX' elif 56 < s <= 110: return 'TI-10AVY' elif 110 < s: return 'TI-10AVZ' elif 17.5 >= u > 12: if self.num_circuitos == 1: if 0 < s <= 56: return 'TI-9WX' elif 56 < s <= 110: return 'TI-9WY' elif 110 < s: return 'TI-9WZ' elif self.num_circuitos == 2: if 0 < s <= 56: return 'TI-10WX' elif 56 < s <= 110: return 'TI-10WY' elif 110 < s: return 'TI-10WZ' elif self.num_circuitos >= 3: if 0 < s <= 56: return 'TI-10AWX' elif 56 < s <= 110: return 'TI-10AWY' elif 110 < s: return 'TI-10AWZ' elif 12 >= u > 1: if self.num_circuitos == 1: if 0 < s <= 56: return 'TI-9BX' elif 56 < s <= 110: return 'TI-9BY' elif 110 < s: return 'TI-9BZ' elif self.num_circuitos == 2: if 0 < s <= 56: return 'TI-10BX' elif 56 < s <= 110: return 'TI-10BY' elif 110 < s: return 'TI-10BZ' elif self.num_circuitos >= 3: if 0 < s <= 56: return 'TI-10ABX' elif 56 < s <= 110: return 'TI-10ABY' elif 110 < s: return 'TI-10ABZ' elif 1 > u: if self.despliegue == 'AP': if self.num_circuitos == 1: if s < 75: return 'TI-11X' elif s >= 75: return 'TI-11Y' elif self.num_circuitos == 2: if s < 75: return 'TI-13X' elif s >= 75: return 'TI-13Y' if self.despliegue == 'AF': if self.num_circuitos == 1: if s < 75: return 'TI-12X' elif s >= 75: return 'TI-12Y' else: return None elif self.despliegue[0] == 'S': u = self.tension s = self.seccion if u > 123: if self.num_circuitos == 1: if 0 < s <= 630: return 'TI-14UX' elif 630 < s <= 1200: return 'TI-14UY' elif 1200 < s: return 'TI-14UZ' elif self.num_circuitos == 2: if 0 < s <= 630: return 'TI-15UX' elif 630 < s <= 1200: return 'TI-15UY' elif 1200 < s: return 'TI-15UZ' elif self.num_circuitos >= 3: if 0 < s <= 630: return 'TI-15AUX' elif 630 < s <= 1200: return 'TI-15AUY' elif 1200 < s: return 'TI-15AUZ' elif 123 >= u > 72.5: if self.num_circuitos == 1: if 0 < s <= 630: return 'TI-14VX' elif 630 < s <= 1200: return 'TI-14VY' elif 1200 < s: return 'TI-14VZ' elif self.num_circuitos == 2: if 0 < s <= 630: return 'TI-15VX' elif 630 < s
0.434294481903252*log(1 + 22.9527167938577*m.b217) + 0.434294481903252*log(1 + 6.55753492060242* m.b218) + 0.434294481903252*log(1 + 0.57750965326708*m.b219) + 0.434294481903252*log(1 + 1.17445395540411*m.b220) + 0.434294481903252*log(1 + 5.58082608780692*m.b221) + 0.434294481903252*log(1 + 8.54665307162323*m.b222) + 0.434294481903252*log(1 + 0.904131288343215 *m.b223) + 0.434294481903252*log(1 + 9.96092244928357*m.b224) + 0.434294481903252*log(1 + 55.1508965214644*m.b225) + 0.434294481903252*log(1 + 0.762735414453824*m.b226) + 0.434294481903252*log(1 + 7.92940747749633*m.b227) + 0.434294481903252*log(1 + 12.37433365888* m.b228) + 0.434294481903252*log(1 + 15.4075087582294*m.b229) + 0.434294481903252*log(1 + 45.4087023224054*m.b230) + 0.434294481903252*log(1 + 0.0569465408602056*m.b231) + 0.434294481903252*log(1 + 950.268343882408*m.b232) + 0.434294481903252*log(1 + 14.0146427448639* m.b233) + 0.434294481903252*log(1 + 48.2622840339531*m.b234) + 0.434294481903252*log(1 + 59.1154775719293*m.b235) + 0.434294481903252*log(1 + 106.791124312609*m.b236) + 0.434294481903252*log(1 + 18.2609404559526*m.b237) + 0.434294481903252*log(1 + 1.36243976696537* m.b238) + 0.434294481903252*log(1 + 0.232175441240096*m.b239) + 0.434294481903252*log(1 + 22.7465002276522*m.b240) + 0.434294481903252*log(1 + 83.6440577246705*m.b241) + 0.434294481903252*log(1 + 50.9139424902747*m.b242) + 0.434294481903252*log(1 + 23.5442813006769* m.b243) + 0.434294481903252*log(1 + 1.13092887066533*m.b244) + 0.434294481903252*log(1 + 23.0792683863803*m.b245) + 0.434294481903252*log(1 + 1.05259533482199*m.b246) + 0.434294481903252*log(1 + 33.4471609592659*m.b247) + 0.434294481903252*log(1 + 312.027249071196* m.b248) + 0.434294481903252*log(1 + 21.381864326462*m.b249) + 0.434294481903252*log(1 + 4.61776925549672*m.b250) + 0.434294481903252*log(1 + 0.850403119449719*m.b251) + 0.434294481903252*log(1 + 6.04549274129207*m.b252) + 0.434294481903252*log(1 + 2.31626375880436* m.b253) + 0.434294481903252*log(1 + 6.52660928676575*m.b254) + 0.434294481903252*log(1 + 14.8597407839452*m.b255) + 0.434294481903252*log(1 + 2.06041038208614*m.b256) + 0.434294481903252*log(1 + 8.01305312437427*m.b257) + 0.434294481903252*log(1 + 27.0821161178967* m.b258) + 0.434294481903252*log(1 + 17.286050889689*m.b259) + 0.434294481903252*log(1 + 17.3831534593309*m.b260) + 0.434294481903252*log(1 + 162.279815690267*m.b261) + 0.434294481903252*log(1 + 39.2370070576682*m.b262) + 0.434294481903252*log(1 + 6.57948746685798* m.b263) + 0.434294481903252*log(1 + 0.725908386254844*m.b264) + 0.434294481903252*log(1 + 15.4474743324445*m.b265) + 0.434294481903252*log(1 + 12.3124413577695*m.b266) + 0.434294481903252*log(1 + 18.4387536778241*m.b267) + 0.434294481903252*log(1 + 1.23850039518286* m.b268) + 0.434294481903252*log(1 + 13.8120242692985*m.b269) + 0.434294481903252*log(1 + 10.1954634111504*m.b270) + 0.434294481903252*log(1 + 7.56234635429108*m.b271) + 0.434294481903252*log(1 + 34.8893314937733*m.b272) + 0.434294481903252*log(1 + 196.710087319055* m.b273) + 0.434294481903252*log(1 + 9.38601247234896*m.b274) + 0.434294481903252*log(1 + 41.5035857373984*m.b275) + 0.434294481903252*log(1 + 24.1805281587732*m.b276) + 0.434294481903252*log(1 + 26.6821060260029*m.b277) + 0.434294481903252*log(1 + 114.239042949201* m.b278) + 0.434294481903252*log(1 + 18.2075092669915*m.b279) + 0.434294481903252*log(1 + 41.5332876490466*m.b280) + 0.434294481903252*log(1 + 0.291813745783498*m.b281) + 0.434294481903252*log(1 + 0.634485242653686*m.b282) + 0.434294481903252*log(1 + 0.760462798273029*m.b283) + 0.434294481903252*log(1 + 3.73230574215145*m.b284) + 0.434294481903252*log(1 + 0.762622521522688*m.b285) + 0.434294481903252*log(1 + 5.8002705369568* m.b286) + 0.434294481903252*log(1 + 6.90616002336022*m.b287) + 0.434294481903252*log(1 + 6.50428287469064*m.b288) + 0.434294481903252*log(1 + 3.55966086154444*m.b289) + 0.434294481903252*log(1 + 1.37501346145747*m.b290) + 0.434294481903252*log(1 + 15.4938201473979* m.b291) + 0.434294481903252*log(1 + 38.5780232568333*m.b292) + 0.434294481903252*log(1 + 28.2423330516121*m.b293) + 0.434294481903252*log(1 + 108.0536833147*m.b294) + 0.434294481903252* log(1 + 257.365063164696*m.b295) + 0.434294481903252*log(1 + 5.68315276161109*m.b296) + 0.434294481903252*log(1 + 7.11304478986188*m.b297) + 0.434294481903252*log(1 + 2.87393866273881* m.b298) + 0.434294481903252*log(1 + 23.9190034854088*m.b299) + 0.434294481903252*log(1 + 16.9664070831965*m.b300) >= 7.5257498916) m.c17 = Constraint(expr=0.434294481903252*log(1 + 6471.47630366129*m.b301) + 0.434294481903252*log(1 + 559.470088916934* m.b302) + 0.434294481903252*log(1 + 131.065353224642*m.b303) + 0.434294481903252*log(1 + 4527.58508947219*m.b304) + 0.434294481903252*log(1 + 725.556936560107*m.b305) + 0.434294481903252*log(1 + 6197.68478678289*m.b306) + 0.434294481903252*log(1 + 28948.2986841625* m.b307) + 0.434294481903252*log(1 + 2018.19943806769*m.b308) + 0.434294481903252*log(1 + 19641.6479462097*m.b309) + 0.434294481903252*log(1 + 26951.9823247315*m.b310) + 0.434294481903252*log(1 + 21654.0074744564*m.b311) + 0.434294481903252*log(1 + 187272.254701162* m.b312) + 0.434294481903252*log(1 + 347.080398901013*m.b313) + 0.434294481903252*log(1 + 7548.98180619965*m.b314) + 0.434294481903252*log(1 + 35619.0970308209*m.b315) + 0.434294481903252*log(1 + 565.828950547017*m.b316) + 0.434294481903252*log(1 + 691.980736513294* m.b317) + 0.434294481903252*log(1 + 90.2881193419812*m.b318) + 0.434294481903252*log(1 + 66.455792864316*m.b319) + 0.434294481903252*log(1 + 917.323181769107*m.b320) + 0.434294481903252 *log(1 + 317.548275464461*m.b321) + 0.434294481903252*log(1 + 2353.48851009575*m.b322) + 0.434294481903252*log(1 + 944.33833646317*m.b323) + 0.434294481903252*log(1 + 593.441522643409* m.b324) + 0.434294481903252*log(1 + 14364.0803160569*m.b325) + 0.434294481903252*log(1 + 569.56959569268*m.b326) + 0.434294481903252*log(1 + 468.065906499702*m.b327) + 0.434294481903252 *log(1 + 283.323473459373*m.b328) + 0.434294481903252*log(1 + 2453.12359348963*m.b329) + 0.434294481903252*log(1 + 130.053864019821*m.b330) + 0.434294481903252*log(1 + 504.824501007987* m.b331) + 0.434294481903252*log(1 + 864.389638743978*m.b332) + 0.434294481903252*log(1 + 5358.57000009405*m.b333) + 0.434294481903252*log(1 + 536.813092655462*m.b334) + 0.434294481903252*log(1 + 2090.62622832526*m.b335) + 0.434294481903252*log(1 + 949.053088497376* m.b336) + 0.434294481903252*log(1 + 7662.54885913674*m.b337) + 0.434294481903252*log(1 + 1278.90348751647*m.b338) + 0.434294481903252*log(1 + 199.937538875581*m.b339) + 0.434294481903252*log(1 + 316.14891683603*m.b340) + 0.434294481903252*log(1 + 10157.7856529245* m.b341) + 0.434294481903252*log(1 + 2047.95141639884*m.b342) + 0.434294481903252*log(1 + 1162.63096049805*m.b343) + 0.434294481903252*log(1 + 11619.1092674715*m.b344) + 0.434294481903252*log(1 + 41350.5410959091*m.b345) + 0.434294481903252*log(1 + 3752.26117044855* m.b346) + 0.434294481903252*log(1 + 44.4989485679777*m.b347) + 0.434294481903252*log(1 + 686.691887671087*m.b348) + 0.434294481903252*log(1 + 1437.9826588788*m.b349) + 0.434294481903252 *log(1 + 401.711289569219*m.b350) + 0.434294481903252*log(1 + 2383.86262616102*m.b351) + 0.434294481903252*log(1 + 3888.85713004546*m.b352) + 0.434294481903252*log(1 + 17366.3523925122* m.b353) + 0.434294481903252*log(1 + 284.00124612878*m.b354) + 0.434294481903252*log(1 + 6148.55050638918*m.b355) + 0.434294481903252*log(1 + 3037.45047411846*m.b356) + 0.434294481903252*log(1 + 3275.00629600741*m.b357) + 0.434294481903252*log(1 + 3395.73015290174* m.b358) + 0.434294481903252*log(1 + 1429.96089356484*m.b359) + 0.434294481903252*log(1 + 4550.67406071516*m.b360) + 0.434294481903252*log(1 + 1881.71989283143*m.b361) + 0.434294481903252*log(1 + 4559.26128482543*m.b362) + 0.434294481903252*log(1 + 25130.9871453682* m.b363) + 0.434294481903252*log(1 + 8858.82947420268*m.b364) + 0.434294481903252*log(1 + 6891.89059858094*m.b365) + 0.434294481903252*log(1 + 56.7437826313155*m.b366) + 0.434294481903252*log(1 + 1186.85848978579*m.b367) + 0.434294481903252*log(1 + 2647.00974945559* m.b368) + 0.434294481903252*log(1 + 18842.3402946927*m.b369) + 0.434294481903252*log(1 + 162.431769131442*m.b370) + 0.434294481903252*log(1 + 575.072593259644*m.b371) + 0.434294481903252*log(1 + 29.5055634426532*m.b372) + 0.434294481903252*log(1 + 208.879219752119* m.b373) + 0.434294481903252*log(1 + 3110.93584933623*m.b374) + 0.434294481903252*log(1 + 1133.21378729381*m.b375) + 0.434294481903252*log(1 + 2600.70123420095*m.b376) + 0.434294481903252*log(1 + 1573.17828412703*m.b377) + 0.434294481903252*log(1 + 2035.90040193901* m.b378) + 0.434294481903252*log(1 + 749.521533332783*m.b379) + 0.434294481903252*log(1 + 5216.34977943056*m.b380) + 0.434294481903252*log(1 + 24659.4611658913*m.b381) + 0.434294481903252*log(1 + 36602.5787183749*m.b382) + 0.434294481903252*log(1 + 905.973206005301* m.b383) + 0.434294481903252*log(1 + 1650.97239803738*m.b384) + 0.434294481903252*log(1 + 275.5808872219*m.b385) + 0.434294481903252*log(1 + 1571.62050681183*m.b386) + 0.434294481903252* log(1 + 1583.79797575396*m.b387) + 0.434294481903252*log(1 + 3204.44639454359*m.b388) + 0.434294481903252*log(1 + 1594.34140792519*m.b389) + 0.434294481903252*log(1 + 289.788660712723* m.b390) + 0.434294481903252*log(1 + 45.7993627086468*m.b391) + 0.434294481903252*log(1 + 909.054014986723*m.b392) + 0.434294481903252*log(1 + 4204.87554782629*m.b393) + 0.434294481903252*log(1 + 11341.0309779474*m.b394) + 0.434294481903252*log(1 + 2145.45719436087* m.b395) + 0.434294481903252*log(1 + 1990.01604145283*m.b396) + 0.434294481903252*log(1 + 2545.67909359917*m.b397) + 0.434294481903252*log(1 + 3662.68466372276*m.b398) + 0.434294481903252*log(1 + 179.970043430086*m.b399) + 0.434294481903252*log(1 + 295.443155265909* m.b400) + 0.434294481903252*log(1 + 2796.48917426792*m.b401) + 0.434294481903252*log(1 + 1883.28163581232*m.b402) + 0.434294481903252*log(1 + 3416.8798679252*m.b403) + 0.434294481903252 *log(1 + 248.974373481593*m.b404) + 0.434294481903252*log(1 + 1068.6395326998*m.b405) + 0.434294481903252*log(1 + 488.791162654025*m.b406) + 0.434294481903252*log(1 + 131796.958497938* m.b407) + 0.434294481903252*log(1 + 1061.83843450452*m.b408) + 0.434294481903252*log(1 + 1120.26853817214*m.b409) + 0.434294481903252*log(1 + 20869.4710262626*m.b410) + 0.434294481903252*log(1 + 126.673438157006*m.b411) + 0.434294481903252*log(1 + 1712.08389524498* m.b412) + 0.434294481903252*log(1 + 904.413371097369*m.b413) + 0.434294481903252*log(1 + 245.033479216517*m.b414) + 0.434294481903252*log(1 + 1742.7961951974*m.b415) + 0.434294481903252 *log(1 + 14991.6266467876*m.b416) + 0.434294481903252*log(1 + 9336.22071586801*m.b417) + 0.434294481903252*log(1 + 8037.52130105346*m.b418) + 0.434294481903252*log(1 + 4961.05480983374* m.b419) + 0.434294481903252*log(1 + 1901.14186235812*m.b420) + 0.434294481903252*log(1 + 2000.16234669028*m.b421) + 0.434294481903252*log(1 + 1299.88706326494*m.b422) + 0.434294481903252*log(1 + 770.847812439209*m.b423) + 0.434294481903252*log(1 + 4494.08207586391* m.b424) + 0.434294481903252*log(1 + 1592.49217520873*m.b425) + 0.434294481903252*log(1 + 2552.69733849714*m.b426) + 0.434294481903252*log(1 + 35409.3134232912*m.b427) + 0.434294481903252*log(1 + 1229.4598009979*m.b428) + 0.434294481903252*log(1 + 1126.62799751325* m.b429) + 0.434294481903252*log(1 + 556.537239288819*m.b430) + 0.434294481903252*log(1 + 303.153844953105*m.b431) + 0.434294481903252*log(1 + 626.677652617085*m.b432) + 0.434294481903252*log(1 + 3916.42712700753*m.b433) + 0.434294481903252*log(1 + 1095.5386896181* m.b434) + 0.434294481903252*log(1 + 4366.65071010069*m.b435) + 0.434294481903252*log(1 + 6845.53458884457*m.b436) + 0.434294481903252*log(1 + 8828.54247525223*m.b437) + 0.434294481903252*log(1 + 576.759986132865*m.b438) + 0.434294481903252*log(1 + 511.298188957026* m.b439) + 0.434294481903252*log(1 + 6002.62735391228*m.b440) + 0.434294481903252*log(1 + 29361.6961708015*m.b441) + 0.434294481903252*log(1 + 3038.30211605352*m.b442) + 0.434294481903252*log(1 + 1019.55989596854*m.b443) + 0.434294481903252*log(1 + 509.028970420411* m.b444) + 0.434294481903252*log(1 + 1570.42520784469*m.b445) + 0.434294481903252*log(1 + 5275.38770297345*m.b446) + 0.434294481903252*log(1 + 584.131489871035*m.b447) + 0.434294481903252*log(1 + 234.963573135633*m.b448) + 0.434294481903252*log(1 + 854.516325216566* m.b449) + 0.434294481903252*log(1 + 22278.8514420317*m.b450) + 0.434294481903252*log(1 + 213.671919540301*m.b451) + 0.434294481903252*log(1 + 11.1684948220821*m.b452) + 0.434294481903252*log(1 + 41.0731531189741*m.b453) + 0.434294481903252*log(1 + 430.559353760991* m.b454) + 0.434294481903252*log(1 + 5196.13561645755*m.b455) + 0.434294481903252*log(1 + 3589.97186789298*m.b456) + 0.434294481903252*log(1 + 2842.12999595488*m.b457) + 0.434294481903252*log(1 + 9343.97729133051*m.b458) + 0.434294481903252*log(1 + 308.836217905704* m.b459) + 0.434294481903252*log(1 + 14.8582390734279*m.b460) + 0.434294481903252*log(1 + 992.410778829705*m.b461) + 0.434294481903252*log(1 + 10387.5133145329*m.b462) + 0.434294481903252*log(1 + 2468.43131982649*m.b463) + 0.434294481903252*log(1 + 6712.22267223199* m.b464) + 0.434294481903252*log(1 + 2963.5530515031*m.b465) + 0.434294481903252*log(1 + 7074.91117520856*m.b466) + 0.434294481903252*log(1 + 1497.86622679251*m.b467) + 0.434294481903252*log(1 + 3946.60875538217*m.b468) + 0.434294481903252*log(1 + 5465.51859830608* m.b469) + 0.434294481903252*log(1 + 1812.98545622816*m.b470) + 0.434294481903252*log(1 + 311.397431465084*m.b471) + 0.434294481903252*log(1 + 73793.8020703349*m.b472) + 0.434294481903252*log(1 + 3359.06015301475*m.b473) + 0.434294481903252*log(1 + 5103.58224704446* m.b474) + 0.434294481903252*log(1 + 1077.31867258415*m.b475) + 0.434294481903252*log(1 + 1.93760239144411*m.b476) + 0.434294481903252*log(1 + 984.70285127129*m.b477) + 0.434294481903252 *log(1 + 2337.28408918471*m.b478) + 0.434294481903252*log(1 + 1254.13243360348*m.b479) + 0.434294481903252*log(1 + 2872.06730434175*m.b480) + 0.434294481903252*log(1 + 68.1265222697127* m.b481) + 0.434294481903252*log(1 + 8755.33939619909*m.b482) + 0.434294481903252*log(1 + 1936.75042110481*m.b483) + 0.434294481903252*log(1 + 7148.66816201746*m.b484) + 0.434294481903252*log(1 + 16.9354408418557*m.b485) + 0.434294481903252*log(1 + 6492.38262333412* m.b486) + 0.434294481903252*log(1 + 5194.71243938615*m.b487) + 0.434294481903252*log(1 + 12.7105950312914*m.b488) + 0.434294481903252*log(1 + 2152.87386742329*m.b489) + 0.434294481903252*log(1 + 5286.81506305987*m.b490) + 0.434294481903252*log(1 + 9279.3457526658* m.b491) + 0.434294481903252*log(1 + 682.178004091211*m.b492) + 0.434294481903252*log(1 + 2004.84490453662*m.b493) + 0.434294481903252*log(1 + 7668.24541476436*m.b494) +
""" Abstract conv interface """ from __future__ import absolute_import, print_function, division import logging from six import reraise, integer_types import sys import theano from theano.tensor import as_tensor_variable, patternbroadcast from theano.tensor import get_scalar_constant_value, NotScalarConstantError from theano.gof import Apply, Op from six.moves import xrange import warnings import numpy import numpy as np try: from scipy.signal.signaltools import _valfrommode, _bvalfromboundary from scipy.signal.sigtools import _convolve2d imported_scipy_signal = True except ImportError: imported_scipy_signal = False __docformat__ = "restructuredtext en" _logger = logging.getLogger("theano.tensor.nnet.abstract_conv") def get_conv_output_shape(image_shape, kernel_shape, border_mode, subsample): """ This function compute the output shape of convolution operation. Parameters ---------- image_shape: tuple of int (symbolic or numeric) corresponding to the input image shape. Its four (or five) element must correspond respectively to: batch size, number of input channels, height and width (and possibly depth) of the image. None where undefined. kernel_shape: tuple of int (symbolic or numeric) corresponding to the kernel shape. Its four (or five) elements must correspond respectively to: number of output channels, number of input channels, height and width (and possibly depth) of the kernel. None where undefined. border_mode: string, int (symbolic or numeric) or tuple of int (symbolic or numeric). If it is a string, it must be 'valid', 'half' or 'full'. If it is a tuple, its two (or three) elements respectively correspond to the padding on height and width (and possibly depth) axis. subsample: tuple of int (symbolic or numeric). Its or three elements espectively correspond to the subsampling on height and width (and possibly depth) axis. Returns ------- output_shape: tuple of int corresponding to the output image shape. Its four element must correspond respectively to: batch size, number of output channels, height and width of the image. None where undefined. """ bsize, imshp = image_shape[0], image_shape[2:] nkern, kshp = kernel_shape[0], kernel_shape[2:] if isinstance(border_mode, tuple): out_shp = tuple(get_conv_shape_1axis( imshp[i], kshp[i], border_mode[i], subsample[i]) for i in range(len(subsample))) else: out_shp = tuple(get_conv_shape_1axis( imshp[i], kshp[i], border_mode, subsample[i]) for i in range(len(subsample))) return (bsize, nkern) + out_shp def get_conv_shape_1axis(image_shape, kernel_shape, border_mode, subsample): """ This function compute the output shape of convolution operation. Parameters ---------- image_shape: int or None. Corresponds to the input image shape on a given axis. None if undefined. kernel_shape: int or None. Corresponds to the kernel shape on a given axis. None if undefined. border_mode: string or int. If it is a string, it must be 'valid', 'half' or 'full'. If it is an integer, it must correspond to the padding on the considered axis. subsample: int. It must correspond to the subsampling on the considered axis. Returns ------- out_shp: int corresponding to the output image shape on the considered axis. None if undefined. """ if None in [image_shape, kernel_shape, border_mode, subsample]: return None if border_mode == "half": pad = kernel_shape // 2 elif border_mode == "full": pad = kernel_shape - 1 elif border_mode == "valid": pad = 0 else: pad = border_mode if pad < 0: raise ValueError("border_mode must be >= 0") out_shp = (image_shape + 2 * pad - kernel_shape) // subsample + 1 return out_shp def conv2d(input, filters, input_shape=None, filter_shape=None, border_mode='valid', subsample=(1, 1), filter_flip=True): """This function will build the symbolic graph for convolving a mini-batch of a stack of 2D inputs with a set of 2D filters. The implementation is modelled after Convolutional Neural Networks (CNN). Refer to :func:`nnet.conv2d <theano.tensor.nnet.conv2d>` for a more detailed documentation. """ input = as_tensor_variable(input) filters = as_tensor_variable(filters) conv_op = AbstractConv2d(imshp=input_shape, kshp=filter_shape, border_mode=border_mode, subsample=subsample, filter_flip=filter_flip) return conv_op(input, filters) def conv2d_grad_wrt_inputs(output_grad, filters, input_shape, filter_shape=None, border_mode='valid', subsample=(1, 1), filter_flip=True): """Compute conv output gradient w.r.t its inputs This function builds the symbolic graph for getting the gradient of the output of a convolution (namely output_grad) w.r.t the input of the convolution, given a set of 2D filters used by the convolution, such that the output_grad is upsampled to the input_shape. Parameters ---------- output_grad : symbolic 4D tensor mini-batch of feature map stacks, of shape (batch size, input channels, input rows, input columns). This is the tensor that will be upsampled or the output gradient of the convolution whose gradient will be taken with respect to the input of the convolution. filters : symbolic 4D tensor set of filters used in CNN layer of shape (output channels, input channels, filter rows, filter columns). See the optional parameter ``filter_shape``. input_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2 The shape of the input (upsampled) parameter. A tuple/list of len 4, with the first two dimensions being None or int or Constant and the last two dimensions being Tensor or int or Constant. Not Optional, since given the output_grad shape and the subsample values, multiple input_shape may be plausible. filter_shape : None or [None/int/Constant] * 4 The shape of the filters parameter. None or a tuple/list of len 4. Optional, possibly used to choose an optimal implementation. You can give ``None`` for any element of the list to specify that this element is not known at compile time. border_mode : str, int or tuple of two int Either of the following: ``'valid'`` apply filter wherever it completely overlaps with the input. Generates output of shape: input shape - filter shape + 1 ``'full'`` apply filter wherever it partly overlaps with the input. Generates output of shape: input shape + filter shape - 1 ``'half'`` pad input with a symmetric border of ``filter rows // 2`` rows and ``filter columns // 2`` columns, then perform a valid convolution. For filters with an odd number of rows and columns, this leads to the output shape being equal to the input shape. It is known as 'same' elsewhere. ``int`` pad input with a symmetric border of zeros of the given width, then perform a valid convolution. ``(int1, int2)`` pad input with a symmetric border of ``int1`` rows and ``int2`` columns, then perform a valid convolution. subsample : tuple of len 2 The subsampling used in the forward pass. Also called strides elsewhere. filter_flip : bool If ``True``, will flip the filter rows and columns before sliding them over the input. This operation is normally referred to as a convolution, and this is the default. If ``False``, the filters are not flipped and the operation is referred to as a cross-correlation. Returns ------- symbolic 4D tensor set of feature maps generated by convolutional layer. Tensor is of shape (batch size, output channels, output rows, output columns) Notes ----- :note: If CuDNN is available, it will be used on the GPU. Otherwise, it is the *CorrMM* convolution that will be used "caffe style convolution". :note: This is only supported in Theano 0.8 or the development version until it is released. """ filters = as_tensor_variable(filters) output_grad = as_tensor_variable(output_grad) # checking the type of input_shape for dim in [0, 1]: assert isinstance(input_shape[dim], (theano.tensor.TensorConstant, integer_types, type(None))) for dim in [2, 3]: assert isinstance(input_shape[dim], (theano.tensor.TensorVariable, theano.tensor.TensorConstant, integer_types)) # checking the type of filter_shape if filter_shape is not None: for dim in [0, 1, 2, 3]: assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant, integer_types, type(None))) # setting the last two dimensions of input_shape to None, if # the type of these dimensions is TensorVariable. numerical_input_shape = list(input_shape) for dim in [2, 3]: if isinstance(input_shape[dim], theano.tensor.TensorVariable): numerical_input_shape[dim] = None grad_input_op = AbstractConv2d_gradInputs(imshp=numerical_input_shape, kshp=filter_shape, border_mode=border_mode, subsample=subsample, filter_flip=filter_flip) return grad_input_op(filters, output_grad, input_shape[-2:]) def conv2d_grad_wrt_weights(input, output_grad, filter_shape, input_shape=None, border_mode='valid', subsample=(1, 1), filter_flip=True): """Compute conv output gradient w.r.t its weights This function will build the symbolic graph for getting the gradient of the output of a convolution (output_grad) w.r.t its wights. Parameters ---------- input : symbolic 4D tensor mini-batch of feature map stacks, of shape (batch size, input channels, input rows, input columns). This is the input of the convolution in the forward pass. output_grad : symbolic 4D tensor mini-batch of feature map stacks, of shape (batch size, input channels, input rows, input columns). This is the gradient of the output of convolution. filter_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2 The shape of the filter parameter. A tuple/list of len 4, with the
a BGP backdoor route type: bool route_map: description: Route-map to modify the attributes type: str redistribute: description: Redistribute information from another routing protocol type: list elements: dict suboptions: application: description: Application type: dict suboptions: name: description: Application name type: str metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str bgp: description: Border Gateway Protocol (BGP) type: dict suboptions: as_number: description: Autonomous system number type: str metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str connected: description: Connected type: dict suboptions: metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str eigrp: description: Enhanced Interior Gateway Routing Protocol (EIGRP) type: dict suboptions: as_number: description: Autonomous system number type: str metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str isis: description: ISO IS-IS type: dict suboptions: area_tag: description: ISO routing area tag type: str clns: description: Redistribution of OSI dynamic routes type: bool ip: description: Redistribution of IP dynamic routes type: bool metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str iso_igrp: description: IGRP for OSI networks type: dict suboptions: area_tag: description: ISO routing area tag type: str route_map: description: Route map reference type: str lisp: description: Locator ID Separation Protocol (LISP) type: dict suboptions: metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str mobile: description: Mobile routes type: dict suboptions: metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str odr: description: On Demand stub Routes type: dict suboptions: metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str ospf: description: Open Shortest Path First (OSPF) type: dict suboptions: process_id: description: Process ID type: int match: description: On Demand stub Routes type: dict suboptions: external: description: Redistribute OSPF external routes type: bool internal: description: Redistribute OSPF internal routes type: bool nssa_external: description: Redistribute OSPF NSSA external routes type: bool type_1: description: Redistribute NSSA external type 1 routes type: bool type_2: description: Redistribute NSSA external type 2 routes type: bool metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str vrf: description: VPN Routing/Forwarding Instance type: str ospfv3: description: OSPFv3 type: dict suboptions: process_id: description: Process ID type: int match: description: On Demand stub Routes type: dict suboptions: external: description: Redistribute OSPF external routes type: bool internal: description: Redistribute OSPF internal routes type: bool nssa_external: description: Redistribute OSPF NSSA external routes type: bool type_1: description: Redistribute NSSA external type 1 routes type: bool type_2: description: Redistribute NSSA external type 2 routes type: bool metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str rip: description: Routing Information Protocol (RIP) type: dict suboptions: metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str static: description: Static routes type: dict suboptions: clns: description: Redistribution of OSI static routes type: bool ip: description: Redistribution of IP static routes type: bool metric: description: Metric for redistributed routes type: int route_map: description: Route map reference type: str vrf: description: Specify a source VRF type: dict suboptions: name: description: Source VRF name type: str global: description: global VRF type: bool snmp: description: Modify snmp parameters type: dict suboptions: context: description: - Configure a SNMP context - Context Name type: dict suboptions: name: description: Context Name type: str community: description: Configure a SNMP v2c Community string and access privs type: dict suboptions: snmp_community: description: SNMP community string type: str acl: description: - Standard IP accesslist allowing access with this community string - Expanded IP accesslist allowing access with this community string - Access-list name type: str ipv6: description: - Specify IPv6 Named Access-List - IPv6 Access-list name type: str ro: description: Read-only access with this community string type: bool rw: description: Read-write access with this community string type: bool user: description: Configure a SNMP v3 user type: dict suboptions: name: description: SNMP community string type: str access: description: specify an access-list associated with this group type: dict suboptions: acl: description: SNMP community string type: str ipv6: description: - Specify IPv6 Named Access-List - IPv6 Access-list name type: str auth: description: authentication parameters for the user type: dict suboptions: md5: description: - Use HMAC MD5 algorithm for authentication - authentication password for user type: str sha: description: - Use HMAC SHA algorithm for authentication - authentication password for user type: str priv: description: encryption parameters for the user type: dict suboptions: des: description: Use 56 bit DES algorithm for encryption type: str credential: description: If the user password is already configured and saved type: bool encrypted: description: specifying passwords as MD5 or SHA digests type: bool table_map: description: Map external entry attributes into routing table type: dict suboptions: name: description: route-map name type: str filter: description: Selective route download type: bool running_config: description: - This option is used only with state I(parsed). - The value of this option should be the output received from the IOS device by executing the command B(sh running-config | section ^router bgp). - The state I(parsed) reads the configuration from C(running_config) option and transforms it into Ansible structured data as per the resource module's argspec and the value is then returned in the I(parsed) key within the result. type: str state: choices: - merged - replaced - overridden - deleted - gathered - rendered - parsed default: merged description: - The state the configuration should be left in - The states I(rendered), I(gathered) and I(parsed) does not perform any change on the device. - The state I(rendered) will transform the configuration in C(config) option to platform specific CLI commands which will be returned in the I(rendered) key within the result. For state I(rendered) active connection to remote host is not required. - The state I(gathered) will fetch the running configuration from device and transform it into structured data in the format as per the resource module argspec and the value is returned in the I(gathered) key within the result. - The state I(parsed) reads the configuration from C(running_config) option and transforms it into JSON format as per the resource module parameters and the value is returned in the I(parsed) key within the result. The value of C(running_config) option should be the same format as the output of command I(show running-config | include ip route|ipv6 route) executed on device. For state I(parsed) active connection to remote host is not required. type: str """ EXAMPLES = """ # Using merged # Before state: # ------------- # # vios#sh running-config | section ^router bgp # router bgp 65000 # bgp log-neighbor-changes # bgp nopeerup-delay cold-boot 20 - name: Merge provided configuration with device configuration cisco.ios.ios_bgp_address_family: config: as_number: 65000 address_family: - afi: ipv4 safi: multicast vrf: blue aggregate_address: - address: 192.0.2.1 netmask: 255.255.255.255 as_confed_set: true bgp: aggregate_timer: 10 dampening: penalty_half_time: 1 reuse_route_val: 1 suppress_route_val: 1 max_suppress: 1 slow_peer: - detection: threshold: 150 neighbor: - address: 198.51.100.1 aigp: send: cost_community: id: 100 poi: igp_cost: true transitive: true slow_peer: - detection: threshold: 150 remote_as: 10 route_map: - name: test-route-out out: true - name: test-route-in in: true route_server_client: true network: - address: 192.168.3.11 mask: 255.255.255.255 backdoor: true snmp: context: name: snmp_con community: snmp_community: community ro: true acl: 10 - afi: ipv4 safi: mdt bgp: dmzlink_bw: true dampening: penalty_half_time: 1 reuse_route_val: 10 suppress_route_val: 100 max_suppress: 5 soft_reconfig_backup: true - afi: ipv4 safi: multicast aggregate_address: - address: 172.16.31.10 netmask: 255.255.255.255 as_confed_set: true default_metric: 12 distance: external: 10 internal: 10 local: 100 network: - address: 172.16.58.3 mask: 255.255.255.255 route_map: test table_map: name: test_tableMap filter: true state: merged # Commands fired: # --------------- # "commands": [ # "router bgp 65000", # "address-family ipv4 multicast vrf blue", # "bgp aggregate-timer 10", # "bgp slow-peer detection threshold 150", # "bgp dampening 1 1 1 1", # "neighbor 198.51.100.1
tile coordinates to Microsoft QuadTree" quadKey = "" ty = (2**zoom - 1) - ty for i in range(zoom, 0, -1): digit = 0 mask = 1 << (i-1) if (tx & mask) != 0: digit += 1 if (ty & mask) != 0: digit += 2 quadKey += str(digit) return quadKey class Point: def __init__(self): self.x = 0.0 self.y = 0.0 def __init__(self, x, y): self.x = x self.y = y class Cache: def __init__(self): self.path = None self.xmlTree = None self.wkt = None self.min_x = None self.min_y = None self.max_x = None self.max_y = None self.matrix_min_x = None self.matrix_min_y = None self.matrix_max_x = None self.matrix_max_y = None self.tileStart = None self.tileStop = None self.levels = [] self.mercator = GlobalMercator() self.verbose = False self.srs_id = 0 self.srs_org_id = 3857 self.srs_org_name = 'EPSG' self.level_info = collections.namedtuple('level_info', ['startX', 'startY', 'stopX', 'stopY', 'matrix_width', 'matrix_height', 'zoom_level', 'pixel_x_size', 'pixel_y_size', 'offset_x', 'offset_y']) self.level_infos = [] def deg2num(self, lat_deg, lon_deg, zoom): lat_rad = math.radians(lat_deg) n = 2.0 ** zoom xtile = int((lon_deg + 180.0) / 360.0 * n) ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n) return (xtile, ytile) def num2deg(self, xtile, ytile, zoom): n = 2.0 ** zoom lon_deg = xtile / n * 360.0 - 180.0 lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n))) lat_deg = math.degrees(lat_rad) return (lat_deg, lon_deg) def parseXML(self, path): self.xmlTree = ET.ElementTree(file=path) wktElement = self.xmlTree.iterfind('SpatialReference/WKT') if wktElement is not None: self.wkt = next(wktElement).text originElement = self.xmlTree.iterfind('XMin') if originElement is not None: self.min_x = float(next(originElement).text) originElement = self.xmlTree.iterfind('YMin') if originElement is not None: self.min_y = float(next(originElement).text) originElement = self.xmlTree.iterfind('XMax') if originElement is not None: self.max_x = float(next(originElement).text) originElement = self.xmlTree.iterfind('YMax') if originElement is not None: self.max_y = float(next(originElement).text) print("self.max_x = {0}".format(self.max_x)) print("self.max_y = {0}".format(self.max_y)) if self.max_y > self.min_y: tmp = self.min_y self.min_y = self.max_y self.max_y = tmp print("self.max_x = {0}".format(self.max_x)) print("self.max_y = {0}".format(self.max_y)) if self.wkt is None or self.min_x is None or self.min_y is None or self.max_x is None or self.max_y is None: return False latestWKIDElement = self.xmlTree.iterfind('SpatialReference/LatestWKID') if latestWKIDElement is not None: self.srs_id = int(next(latestWKIDElement).text) print("Meters: {0}, {1}, {2}, {3}".format(self.min_x, self.min_y, self.max_x, self.max_y)) ulLatLon = self.mercator.MetersToLatLon(self.min_x, self.min_y) lrLatLon = self.mercator.MetersToLatLon(self.max_x, self.max_y) print("Lat/Lon: {0}, {1}, {2}, {3}".format(ulLatLon[0], ulLatLon[1], lrLatLon[0], lrLatLon[1])) startX, startY, stopX, stopY = self.getTileStartStopLL(ulLatLon[0], ulLatLon[1], lrLatLon[0], lrLatLon[1], self.levels[0]) ulLatLon = self.num2deg(startX, startY, self.levels[0]) lrLatLon = self.num2deg(stopX, stopY, self.levels[0]) self.matrix_min_x, self.matrix_max_y = self.mercator.LatLonToMeters(ulLatLon[0], ulLatLon[1]) self.matrix_max_x, self.matrix_min_y = self.mercator.LatLonToMeters(lrLatLon[0], lrLatLon[1]) for index, level in enumerate(self.levels): if index == 0: startX, startY, stopX, stopY = self.getTileStartStop(self.levels[0]) else: prev = self.level_infos[index - 1] startX = prev.startX * 2 startY = prev.startY * 2 stopX = prev.stopX * 2 stopY = prev.stopY * 2 self.level_infos.append(self.level_info(startX=startX, startY=startY, stopX=stopX, stopY=stopY, matrix_width=self.mercator.MatrixDim(level), matrix_height=self.mercator.MatrixDim(level), zoom_level=level, pixel_x_size=self.mercator.Resolution(level), pixel_y_size=self.mercator.Resolution(level), offset_x=0, offset_y=0)) print("Tile(s)[{0}]: {1}, {2}, {3}, {4}".format(level, startX, startY, stopX-1, stopY-1)) return True def findZ(self, path): for entry in os.listdir(path): entry_path = os.path.join(path, entry) entry_lower = entry.lower() if os.path.isdir(entry_path) and fnmatch.fnmatch(entry_lower, 'l??'): level_str = entry_lower.split('l') if len(level_str) == 2: self.levels.append(int(level_str[1])) if not self.levels: return False self.levels = sorted(self.levels) print("Found level(s): {0}".format(str(self.levels).strip('[]'))) return True def getTileStartStopLL(self, min_x, min_y, max_x, max_y, level): if level not in self.levels: return (0, 0), (0, 0) startX, startY = self.deg2num(min_x, min_y, level) stopX, stopY = self.deg2num(max_x, max_y, level) return startX, startY, stopX+1, stopY+1 def getTileStartStopTMS(self, level): if level not in self.levels: return (0, 0), (0, 0) tileStartTMS = self.mercator.MetersToTile(self.min_x, self.min_y, level) tileStopTMS = self.mercator.MetersToTile(self.max_x, self.max_y, level) startX = tileStartTMS[0] startY = tileStartTMS[1] stopX = tileStopTMS[0] stopY = tileStopTMS[1] return startX, startY, stopX+1, stopY+1 def getTileStartStop(self, level): if level not in self.levels: return (0, 0), (0, 0) tileStartTMS = self.mercator.MetersToTile(self.min_x, self.min_y, level) tileStart = self.mercator.GoogleTile(tileStartTMS[0], tileStartTMS[1], level) tileStopTMS = self.mercator.MetersToTile(self.max_x, self.max_y, level) tileStop = self.mercator.GoogleTile(tileStopTMS[0], tileStopTMS[1], level) startX = tileStart[0] startY = tileStart[1] stopX = tileStop[0] stopY = tileStop[1] return startX, startY, stopX+1, stopY+1 def getTilePath(self, x, y, level): levelPath = "L{0:02d}".format(level) rowPath = "R{0:08x}".format(y) columnPath = "C{0:08x}".format(x) return os.path.join(self.path, "_alllayers", levelPath, rowPath, columnPath) def findTile(self, path): jpgPath = path + '.jpg' if os.path.exists(jpgPath): return jpgPath jpegPath = path + '.jpeg' if os.path.exists(jpegPath): return jpegPath pngPath = path + '.png' if os.path.exists(pngPath): return pngPath return None def checkTiles(self): for level in self.levels: startX, startY, stopX, stopY = self.getTileStartStop(level) for y in range(startY, stopY): for x in range(startX, stopX): tilePath = self.getTilePath(x, y, level) foundTilePath = self.findTile(tilePath) if foundTilePath is None: print("Missing tile: {0}.png/.jpg".format(tilePath)) elif self.verbose: print("Found tile: {0}".format(tilePath)) print("Required tiles found at expected locations.") return True def open(self, path): if not os.path.isdir(path): return False self.path = path levelsDir = os.path.join(self.path, '_alllayers') if not self.findZ(levelsDir): return False xmlFile = os.path.join(self.path, 'conf.cdi') if not self.parseXML(xmlFile): return False if self.verbose: self.checkTiles() return True class GeoPackage: """ Simple class to add tiles to an existing or new GeoPackage using GDAL. """ def __init__(self): self.connection = None self.filename = None self.tile_width = 256 self.tile_height = 256 self.sr_organization = "NONE" self.sr_organization_coordsys_id = 0 self.sr_description = None self.description = None self.cache = Cache() self.verbose = False def __del__(self): if self.connection is not None: self.connection.close() def write_srs(self, srs_name): """ Write SRS to gpkg_spatial_ref_sys table and return srs_id. @param wkt: WKT string. @param srs_name: Value for srs_name field. @return: srs_id for new entry or -1 (undefined cartesian) """ if self.cache.wkt is None: return -1 result = self.connection.execute("""SELECT * FROM gpkg_spatial_ref_sys WHERE srs_id=?;""", (self.cache.srs_id,)).fetchone() if result is None: self.connection.execute( """ INSERT INTO gpkg_spatial_ref_sys(srs_name, srs_id, organization, organization_coordsys_id, definition) VALUES(?, ?, ?, ?, ?) """, (srs_name, self.cache.srs_id, self.cache.srs_org_name, self.cache.srs_org_id, self.cache.wkt)) self.connection.commit() return self.cache.srs_id else: return result['srs_id'] def add_cache(self, path): if not self.cache.open(path): return False identifier = os.path.basename(path) table_name = re.sub('[.~,;-]', '', identifier + TILES_TABLE_SUFFIX) if not table_name[0].isalpha(): table_name = TILES_TABLE_PREFIX + table_name #table_name = table_name.lower() if self.cache.matrix_max_y < self.cache.matrix_min_y: tmp = self.cache.matrix_min_y self.cache.matrix_min_y = self.cache.matrix_max_y self.cache.matrix_max_y = tmp if self.connection.execute("""SELECT * FROM gpkg_contents WHERE identifier=? OR table_name=?""", (identifier, table_name)).fetchone() is not None: print("An entry with identifier {0} and/or table_name {1} already exists in gpkg_contents.".format(identifier, table_name)) return False if self.cache.srs_id == 3857: srs_id = self.write_srs('Web Mercator') if self.description is None: self.description = path try: self.connection.execute( """ INSERT INTO gpkg_contents(table_name, data_type, identifier, description, min_x, min_y, max_x, max_y, srs_id) VALUES(?, 'tiles', ?, ?, ?, ?, ?, ?, ?); """, (table_name, identifier, self.description, self.cache.matrix_min_x, self.cache.matrix_min_y, self.cache.matrix_max_x, self.cache.matrix_max_y, self.cache.srs_id) ) self.connection.execute( """ INSERT INTO gpkg_tile_matrix_set(table_name, srs_id, min_x, min_y, max_x, max_y) VALUES(?, ?, ?, ?, ?, ?); """, (table_name, self.cache.srs_id, -self.cache.mercator.originShift, -self.cache.mercator.originShift, self.cache.mercator.originShift, self.cache.mercator.originShift) ) sql_string = """ CREATE TABLE """ + table_name + """ ( id INTEGER PRIMARY KEY AUTOINCREMENT, zoom_level INTEGER NOT NULL, tile_column INTEGER NOT NULL, tile_row INTEGER NOT NULL, tile_data BLOB NOT NULL, UNIQUE (zoom_level, tile_column, tile_row) ); """ self.connection.execute(sql_string) sql_string = """ CREATE TRIGGER '""" + table_name + """_zoom_insert' BEFORE INSERT ON '""" + table_name + """' FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'insert on table """ + table_name + """ violates constraint: zoom_level not specified for table in gpkg_tile_matrix') WHERE NOT (NEW.zoom_level IN (SELECT zoom_level FROM gpkg_tile_matrix WHERE table_name = '""" + table_name + """')) ; END """ self.connection.execute(sql_string) sql_string = """ CREATE TRIGGER '""" + table_name + """_zoom_update' BEFORE UPDATE OF zoom_level ON '""" + table_name + """' FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'update on table """ + table_name + """ violates constraint: zoom_level not specified for table in gpkg_tile_matrix') WHERE NOT (NEW.zoom_level IN (SELECT zoom_level FROM gpkg_tile_matrix WHERE table_name = '""" + table_name + """')) ; END """ self.connection.execute(sql_string) sql_string = """ CREATE TRIGGER '""" + table_name + """_tile_column_insert' BEFORE INSERT ON '""" + table_name + """' FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'insert on table """ + table_name + """ violates constraint: tile_column cannot be < 0') WHERE (NEW.tile_column < 0) ; SELECT RAISE(ABORT, 'insert on table """ + table_name + """ violates constraint: tile_column must by < matrix_width specified for table and zoom level in gpkg_tile_matrix') WHERE NOT (NEW.tile_column < (SELECT matrix_width FROM gpkg_tile_matrix WHERE table_name =
import aiohttp import asyncio import contextlib import functools import sys import zlib from collections.abc import Iterable, Mapping from datetime import date as Date from lxml import etree, objectify from types import MappingProxyType from typing import ( Any as _Any, AsyncGenerator as _AsyncGenerator, ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union, ) from urllib.parse import parse_qs, unquote_plus, urlencode, urlparse, urlunparse from .errors import BadRequest from .info import API_URL, __version__ from .objects import Threadsafe, NSResponse from .utils import get_running_loop from ._lock import ResetLock __all__ = ["Api", "Dumps", "Archives"] AGENT_FMT = f"{{}} Python/{sys.version_info[0]}.{sys.version_info[1]} aiohttp/{aiohttp.__version__} sans/{__version__}" API_VERSION = "10" PINS: dict = {} # Set of keys that should be added to rather than overwritten class _Adds: @staticmethod def q(x: str, y: str): return " ".join((x, y)) scale, mode, filter, tags = q, q, q, q @staticmethod def view(x, y): xs, ys = x.split("."), y.split(".") if len(xs) != 1: raise ValueError() if len(ys) != 1: raise ValueError() if xs[0] != ys[0] or len(xs) != 1 or len(ys) != 1: # overwrite return y return "{}.{},{}".format(xs[0], xs[1], ys[1]) def _normalize_dicts(*dicts: _Mapping[str, _Iterable[str]]): final: dict = {} for d in dicts: for k, v in d.items(): if not all((k, v)): continue if not isinstance(v, str) and isinstance(v, Iterable): v = " ".join(map(str, v)) v = unquote_plus(str(v)).strip().strip("_") if not v: continue if k in final: with contextlib.suppress(AttributeError, TypeError): final[k] = getattr(_Adds, k)(final[k], v) continue final[k] = v final.setdefault("v", API_VERSION) return final class ApiMeta(type): """ The :class:`Api`'s metaclass. Lazily loads and controls access and settings to various global states, accessible by attribute access from the :class:`Api` class. """ def __init__(cls, *args, **kwargs): super().__init__(*args, **kwargs) cls._agent = None cls.__lock = None cls._loop = None cls._session = None @property def agent(cls) -> _Optional[str]: """The API wrapper's set user agent.""" return cls._agent @agent.setter def agent(cls, value: str) -> None: if value: cls._agent = AGENT_FMT.format(value) @property def loop(cls) -> _Optional[asyncio.AbstractEventLoop]: """The API wrapper's event loop. Cannot be changed once it has been set or referenced.""" if not cls._loop: cls._loop = get_running_loop() return cls._loop @loop.setter def loop(cls, loop: asyncio.AbstractEventLoop) -> None: if cls._loop and cls._loop != loop: raise asyncio.InvalidStateError("Cannot change loop when it's already set!") cls._loop = loop @property def session(cls) -> aiohttp.ClientSession: """The API wrapper's HTTP client session.""" if not cls._session or cls._session.closed: loop = cls.loop if not loop: cls.loop = asyncio.get_event_loop() cls._session = aiohttp.ClientSession(loop=loop, response_class=NSResponse) return cls._session @property def _lock(cls) -> ResetLock: if not cls.__lock: loop = cls.loop if not loop: cls.loop = asyncio.get_event_loop() cls.__lock = ResetLock(loop=loop) return cls.__lock @property def locked(cls) -> _Optional[bool]: """ Whether or not the API's lock is currently locked. As every request acquires the lock, use :attr:`xra` to check if the rate limit is saturated instead. """ if cls.__lock: return cls.__lock.locked() return None @property def xra(cls) -> _Optional[float]: """ If the rate limit is currently saturated, returns when another API call can be made in the form of a Unix timestamp. Otherwise, returns `None`. """ if cls.__lock: return cls.__lock.xra return None class Api(metaclass=ApiMeta): r""" Construct and send an NS API request. NS API docs can be found here: https://www.nationstates.net/pages/api.html This is a low-level API wrapper. Some attempts will be made to prevent bad requests, but it will not check shards against a verified list. Authentication may be provided for private nation shards. X-Pin headers are be stored internally and globally for ease of use. Api objects may be awaited or asynchronously iterated. To perform operations from another thread, use the :attr:`threadsafe` property. The Api object itself supports all :class:`collections.abc.Mapping` methods. ================ ============================================================================ Operation Description ================ ============================================================================ await x Make a request to the NS API and returns the root XML element. async for y in x Make a request to the NS API and return each shard element as it is parsed. Useful for larger requests. x + y Combines two :class:`Api` objects together into a new one. Shard keywords that can't be combined will be overwritten with y's data. bool(x) Naïvely check if this :class:`Api` object would result in a 400 Bad Request. Truthy :class:`Api` objects may still result in a 400 Bad Request. Use `len(x)` to check for containment. str(x) Return the URL this :class:`Api` object will make a request to. Other All other :class:`collections.abc.Mapping` methods, except x == y, are also supported. ================ ============================================================================ Parameters ---------- \*shards: str Shards to request from the API. password: str X-Password authentication for private nation shards. autologin: str X-Autologin authentication for private nation shards. \*\*parameters: str Query parameters to append to the request, e.g. nation, scale. Examples -------- Usage:: darc = await Api(nation="darcania") async for shard in Api(nation="testlandia"): print(pretty_string(shard)) tnp = Api(region="the_north_pacific").threadsafe() for shard in Api(region="testregionia").threadsafe: print(pretty_string(shard)) """ __slots__ = ("__proxy", "_password", "_str", "_hash", "_last_response") def __new__( cls, *shards: _Union[str, _Iterable[str]], password: _Optional[str] = None, autologin: _Optional[str] = None, **parameters: str, ): if len(shards) == 1 and not parameters: if isinstance(shards[0], cls): return shards[0] with contextlib.suppress(Exception): return cls.from_url(shards[0]) return super().__new__(cls) def __init__( self, *shards: _Union[str, _Iterable[str]], password: _Optional[str] = None, autologin: _Optional[str] = None, **parameters: str, ): has_nation = "nation" in parameters dicts = [parameters] if parameters else [] for shard in filter(bool, shards): if isinstance(shard, Mapping): dicts.append(shard) if not has_nation and "nation" in shard: has_nation = True else: dicts.append({"q": shard}) if not has_nation and (password or autologin): raise ValueError("Authentication may only be used with the Nation API.") self.__proxy = MappingProxyType(_normalize_dicts(*dicts)) self._password = password self._last_response = None self._str = None self._hash = None async def __await(self): async for element in self.__aiter__(clear=False): pass return element def __await__(self): return self.__await().__await__() async def __aiter__( self, *, clear: bool = True ) -> _AsyncGenerator[objectify.ObjectifiedElement, None]: if not Api.agent: raise RuntimeError("The API's user agent is not yet set.") if "a" in self and self["a"].lower() == "sendtg": raise RuntimeError("This API wrapper does not support API telegrams.") if not self: # Preempt the request to conserve ratelimit raise BadRequest() url = str(self) headers = {"User-Agent": Api.agent} if self._password: headers["X-Password"] = <PASSWORD> autologin = self.autologin if autologin: headers["X-Autologin"] = autologin if self.get("nation") in PINS: headers["X-Pin"] = PINS[self["nation"]] async with Api.session.request("GET", url, headers=headers) as response: self._last_response = response if "X-Autologin" in response.headers: self._password = <PASSWORD> if "X-Pin" in response.headers: PINS[self["nation"]] = response.headers["X-Pin"] response.raise_for_status() encoding = ( response.headers["Content-Type"].split("charset=")[1].split(",")[0] ) with contextlib.suppress(etree.XMLSyntaxError), contextlib.closing( etree.XMLPullParser(["end"], base_url=url, remove_blank_text=True) ) as parser: parser.set_element_class_lookup(objectify.ObjectifyElementClassLookup()) events = parser.read_events() async for data, _ in response.content.iter_chunks(): parser.feed(data.decode(encoding)) for _, element in events: if clear and ( element.getparent() is None or element.getparent().getparent() is not None ): continue yield element if clear: element.clear(keep_tail=True) def __add__(self, other: _Any) -> "Api": if isinstance(other, str): with contextlib.suppress(Exception): other = type(self).from_url(other) with contextlib.suppress(Exception): return type(self)(self, other) return NotImplemented def __bool__(self): if any(a in self for a in ("nation", "region")): return True if "a" in self: if self["a"] == "verify" and all(a in self for a in ("nation", "checksum")): return True if self["a"] == "sendtg" and all( a in self for a in ("client", "tgid", "key", "to") ): return True return False return "q" in self def __contains__(self, key: str) -> bool: return key in self.__proxy def __dir__(self): return set(super().__dir__()).union( dir(self.__proxy), (a for a in dir(type(self)) if not hasattr(type, a)) ) __eq__ = None def __getattr__(self, name: str): with contextlib.suppress(AttributeError): return getattr(self.__proxy, name) raise AttributeError(f"{type(self).__name__!r} has no attribute {name!r}") def __getitem__(self, key): return self.__proxy[str(key).lower()] def __hash__(self): if self._hash is not None: return self._hash params = sorted( (k, v if isinstance(v, str) else " ".join(sorted(v))) for k, v in self.items() ) self._hash = hash(tuple(params)) return self._hash def __iter__(self): return iter(self.__proxy) def __len__(self): return len(self.__proxy) def __repr__(self) -> str: return "{}.{}({})".format( type(self).__module__, type(self).__name__, ", ".join("{}={!r}".format(*t) for t in self.__proxy.items()), ) def __str__(self) -> str: if self._str is not None: return self._str params = [ (k, v if isinstance(v, str) else "+".join(v)) for k, v in self.items() ] self._str = urlunparse((*API_URL, None, urlencode(params, safe="+"), None)) return self._str @property def autologin(self) -> _Optional[str]: """ If a private nation shard was properly requested and returned, this property may be used to get the "X-Autologin" token. """
the value of target. Args: target: the target to which the framebuffer is bound for glcheckframebufferstatus, and the target against which framebuffer completeness of framebuffer is checked for glchecknamedframebufferstatus. ''' @accepts(t.bitfield) @returns(t.void) @binds(dll) def clear(mask): ''' clear buffers to preset values. gl.clear sets the bitplane area of the window to values previously selected by gl.clear_color, gl.clear_depth, and gl.clear_stencil. Multiple color buffers can be cleared simultaneously by selecting more than one buffer at a time using gl.draw_buffer. Args: mask: bitwise or of masks that indicate the buffers to be cleared. ''' @accepts(t.float, t.float, t.float, t.float) @returns(t.void) @binds(dll) def clear_color(red, green, blue, alpha): ''' specify clear values for the color buffers. gl.clear_color specifies the red, green, blue, and alpha values used by gl.clear to clear the color buffers. Values specified by gl.clear_color are clamped to the range 0 1. Args: red: the red, green, blue, and alpha values used when the color buffers are cleared. green: the red, green, blue, and alpha values used when the color buffers are cleared. blue: the red, green, blue, and alpha values used when the color buffers are cleared. alpha: the red, green, blue, and alpha values used when the color buffers are cleared. ''' @accepts(t.float) @returns(t.void) @binds(dll) def clear_depthf(d): pass @accepts(t.int) @returns(t.void) @binds(dll) def clear_stencil(s): ''' specify the clear value for the stencil buffer. gl.clear_stencil specifies the index used by gl.clear to clear the stencil buffer. s is masked with 2 m - 1 , where. Args: s: the index used when the stencil buffer is cleared. ''' @accepts(t.boolean, t.boolean, t.boolean, t.boolean) @returns(t.void) @binds(dll) def color_mask(red, green, blue, alpha): ''' enable and disable writing of frame buffer color components. gl.color_mask and gl.color_maski specify whether the individual color components in the frame buffer can or cannot be written. gl.color_maski sets the mask for a specific draw buffer, whereas gl.color_mask sets the mask for all draw buffers. If red is gl.FALSE, for example, no change is made to the red component of any pixel in any of the color buffers, regardless of the drawing operation attempted. Args: red: whether red, green, blue, and alpha are to be written into the frame buffer. green: whether red, green, blue, and alpha are to be written into the frame buffer. blue: whether red, green, blue, and alpha are to be written into the frame buffer. alpha: whether red, green, blue, and alpha are to be written into the frame buffer. ''' @accepts(t.uint) @returns(t.void) @binds(dll) def compile_shader(shader): ''' Compiles a shader object. gl.compile_shader compiles the source code strings that have been stored in the shader object specified by shader. Args: shader: the shader object to be compiled. ''' @accepts(t.enum, t.int, t.enum, t.sizei, t.sizei, t.int, t.sizei, t.void) @returns(t.void) @binds(dll) def compressed_tex_image2_d(target, level, internalformat, width, height, border, imagesize, data): ''' specify a two-dimensional texture image in a compressed format. Args: target: the target texture. level: the level-of-detail number. internalformat: the format of the compressed image data stored at address data. width: the width of the texture image. height: the height of the texture image. border: this value must be 0. imagesize: the number of unsigned bytes of image data starting at the address specified by data. data: a pointer to the compressed image data in memory. ''' @accepts(t.enum, t.int, t.int, t.int, t.sizei, t.sizei, t.enum, t.sizei, t.void) @returns(t.void) @binds(dll) def compressed_tex_sub_image2_d(target, level, xoffset, yoffset, width, height, format, imagesize, data): ''' specify a two-dimensional texture subimage in a compressed format. Args: target: the target to which the texture is bound for glcompressedtexsubimage2d function. level: the level-of-detail number. xoffset: a texel offset in the x direction within the texture array. yoffset: a texel offset in the y direction within the texture array. width: the width of the texture subimage. height: the height of the texture subimage. format: the format of the compressed image data stored at address data. imagesize: the number of unsigned bytes of image data starting at the address specified by data. data: a pointer to the compressed image data in memory. ''' @accepts(t.enum, t.int, t.enum, t.int, t.int, t.sizei, t.sizei, t.int) @returns(t.void) @binds(dll) def copy_tex_image2_d(target, level, internalformat, x, y, width, height, border): ''' copy pixels into a 2D texture image. Args: target: the target texture. level: the level-of-detail number. internalformat: the internal format of the texture. x: the window coordinates of the lower left corner of the rectangular region of pixels to be copied. y: the window coordinates of the lower left corner of the rectangular region of pixels to be copied. width: the width of the texture image. height: the height of the texture image. border: must be 0. ''' @accepts(t.enum, t.int, t.int, t.int, t.int, t.int, t.sizei, t.sizei) @returns(t.void) @binds(dll) def copy_tex_sub_image2_d(target, level, xoffset, yoffset, x, y, width, height): ''' copy a two-dimensional texture subimage. Args: target: the target to which the texture object is bound for glcopytexsubimage2d function. level: the level-of-detail number. xoffset: a texel offset in the x direction within the texture array. yoffset: a texel offset in the y direction within the texture array. x: the window coordinates of the lower left corner of the rectangular region of pixels to be copied. y: the window coordinates of the lower left corner of the rectangular region of pixels to be copied. width: the width of the texture subimage. height: the height of the texture subimage. ''' @accepts() @returns(t.uint) @binds(dll) def create_program(): ''' Creates a program object. ''' @accepts(t.enum) @returns(t.uint) @binds(dll) def create_shader(type): ''' Creates a shader object. gl.create_shader creates an empty shader object and returns a non-zero value by which it can be referenced. A shader object is used to maintain the source code strings that define a shader. shaderType indicates the type of shader to be created. Five types of shader are supported. Args: type: the type of shader to be created. ''' @accepts(t.enum) @returns(t.void) @binds(dll) def cull_face(mode): ''' specify whether front- or back-facing facets can be culled. gl.cull_face specifies whether front- or back-facing facets are culled when facet culling is enabled. Facet culling is initially disabled. To enable and disable facet culling, call the gl.enable and gl.disable commands with the argument gl.CULL_FACE. Facets include triangles, quadrilaterals, polygons, and rectangles. Args: mode: whether front- or back-facing facets are candidates for culling. ''' @accepts(t.sizei, POINTER(t.uint)) @returns(t.void) @binds(dll) def delete_buffers(n, buffers): ''' delete named buffer objects. gl.delete_buffers deletes n buffer objects named by the elements of the array buffers. After a buffer object is deleted, it has no contents, and its name is free for reuse. If a buffer object that is currently bound is deleted, the binding reverts to 0. Args: n: the number of buffer objects to be deleted. buffers: an array of buffer objects to be deleted. ''' @accepts(t.sizei, POINTER(t.uint)) @returns(t.void) @binds(dll) def delete_framebuffers(n, framebuffers): ''' delete framebuffer objects. gl.delete_framebuffers deletes the n framebuffer objects whose names are stored in the array addressed by framebuffers. The name zero is reserved by the GL and is silently ignored, should it occur in framebuffers, as are other unused names. Once a framebuffer object is deleted, its name is again unused and it has no attachments. If a framebuffer that is currently bound to one or more of the targets gl.DRAW_FRAMEBUFFER or gl.READ_FRAMEBUFFER is deleted, it is as though gl.bind_framebuffer had been executed with the corresponding target and framebuffer zero. Args: n: the number of framebuffer objects to be deleted. framebuffers: a pointer to an array containing n framebuffer objects to be deleted. ''' @accepts(t.uint) @returns(t.void) @binds(dll) def delete_program(program): ''' Deletes a program object. gl.delete_program frees the memory and invalidates the name associated with the program object specified by program. This command effectively undoes the effects of a call to gl.create_program. Args: program: the program object to be deleted. ''' @accepts(t.sizei, POINTER(t.uint)) @returns(t.void) @binds(dll) def delete_renderbuffers(n, renderbuffers): ''' delete renderbuffer objects. gl.delete_renderbuffers deletes the n renderbuffer objects whose names are stored in the array addressed by renderbuffers. The name zero is reserved by the GL and is silently ignored, should it occur in renderbuffers, as are other unused names. Once a renderbuffer object is
!= self.object.author: context["auth_user"] = self.request.user # Add previous and next activities in the context context["next_course_activity"] = CourseActivity.objects.filter( course=self.object, rank=course_activity.rank + 1 ).first() context["previous_course_activity"] = CourseActivity.objects.filter( course=self.object, rank=course_activity.rank - 1 ).first() return context class CourseDetailActivityResourceView(CourseDetailView): """ View a specific resource on an activity. """ template_name = "learning/course/details/activity_resource.html" # noinspection PyAttributeOutsideInit,PyMissingOrEmptyDocstring def dispatch(self, request, *args, **kwargs): self.object = self.get_object() self.activity = get_object_or_404(Activity, slug=self.kwargs.get("activity_slug")) self.resource = get_object_or_404(Resource, slug=self.kwargs.get("resource_slug")) if self.object.course_activities.filter(activity=self.activity).exists() and \ self.activity.resources.filter(id=self.resource.id).exists(): return self.get(request, args, kwargs) return HttpResponseNotFound() # noinspection PyMissingOrEmptyDocstring def get_context_data(self, **kwargs): context = super().get_context_data() context["activity"] = self.activity context["resource"] = self.resource context["activity"] = self.activity context["resource"] = self.resource context["current_course_activity_resource_objective"] = PaginatorFactory.get_paginator_as_context( ResourceObjective.objects.filter(resource=self.resource).order_by("created").reverse(), self.request.GET, nb_per_page=6 ) return context class CourseDetailCollaboratorsListView(BasicModelDetailCollaboratorsListView, CourseDetailMixin): """ View collaborators on a course in a HTML page. """ template_name = "learning/course/details/collaborators.html" class CourseDetailCollaboratorsAddView(BasicModelDetailCollaboratorsAddView, CourseDetailMixin): """ Add a collaborator on a course in a HTML page. """ success_url = "learning:course/detail/collaborators" class CourseDetailCollaboratorsChangeView(BasicModelDetailCollaboratorsChangeView, CourseDetailMixin): """ Change a collaborator on a course in a HTML page. """ success_url = "learning:course/detail/collaborators" class CourseDetailCollaboratorsDeleteView(BasicModelDetailCollaboratorsDeleteView, CourseDetailMixin): """ Delete a collaborator from a course in a HTML page. """ success_url = "learning:course/detail/collaborators" class CourseDetailStudentViewMixin(LoginRequiredMixin, CourseDetailView): """ Mixin to view students registered on a course. .. note:: Viewing students registered on a course requires the **view_students** permission. """ # noinspection PyMissingOrEmptyDocstring def has_permission(self): return self.object.user_can("view_students", self.request.user) and super().has_permission() # noinspection PyMissingOrEmptyDocstring def handle_no_permission(self): messages.error(self.request, _("You do not have the required permissions to view students of this course.")) return super().handle_no_permission() class CourseDetailStudentsView(CourseDetailStudentViewMixin, FormView): """ View students registered on a course in a HTML page. """ form_class = AddStudentOnCourseForm template_name = "learning/course/details/students.html" # noinspection PyMissingOrEmptyDocstring def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update( PaginatorFactory.get_paginator_as_context( self.object.registrations.order_by("student__last_login").all(), self.request.GET, nb_per_page=10) ) context["number_student"] = self.object.registrations.count() return context class CourseDetailStudentsAddViewMixin(LoginRequiredMixin, CourseDetailMixin): """ Mixin to register a student on a course. .. note:: Adding a student on a course requires the **add_student** permission.. """ # noinspection PyUnresolvedReferences,PyMissingOrEmptyDocstring def has_permission(self): return self.object.user_can("add_student", self.request.user) and super().has_permission() # noinspection PyUnresolvedReferences,PyMissingOrEmptyDocstring def handle_no_permission(self): messages.error(self.request, _("You do not have the required permissions to add a student on this course.")) return super().handle_no_permission() class CourseDetailStudentsAddView(CourseDetailStudentsAddViewMixin, InvalidFormHandlerMixin, FormView): """ Register a student on a course in a HTML page. """ form_class = AddStudentOnCourseForm # noinspection PyAttributeOutsideInit,PyMissingOrEmptyDocstring def form_valid(self, form): self.object = self.get_object() username = form.cleaned_data.get("username", None) locked = form.cleaned_data.get("registration_locked", True) try: user = get_user_model().objects.filter(username=username).get() self.object.register_student(user, locked) messages.success(self.request, _("%(user)s is now registered on this course.") % {"user": user}) except ObjectDoesNotExist: messages.error(self.request, _("This user does not exists.")) except (UserIsAlreadyStudent, UserIsAlreadyCollaborator, UserIsAlreadyAuthor) as ex: messages.warning(self.request, ex) except LearningError as ex: messages.error(self.request, ex) return redirect("learning:course/detail/students", slug=self.object.slug) # noinspection PyMissingOrEmptyDocstring def form_invalid(self, form): super().form_invalid(form) return redirect("learning:course/detail/students", slug=self.object.slug) class CourseDetailStudentChangeViewMixin(LoginRequiredMixin, CourseDetailMixin): """ Mixin to change a student on a course .. caution:: Changing a student on a course requires the **change_student** permission. """ # noinspection PyUnresolvedReferences,PyMissingOrEmptyDocstring def has_permission(self): return self.object.user_can("change_student", self.request.user) and super().has_permission() # noinspection PyUnresolvedReferences,PyMissingOrEmptyDocstring def handle_no_permission(self): messages.error(self.request, _("You do not have the required permissions to change a student on this course.")) return super().handle_no_permission() class CourseDetailStudentChangeView(CourseDetailStudentChangeViewMixin, InvalidFormHandlerMixin, ProcessFormView): """ Change a student on a course in a HTML page. """ # noinspection PyMissingOrEmptyDocstring class RegistrationPKForm(Form): """ The registration form that is used to track the student id. """ registration_pk = forms.IntegerField(min_value=1, required=True) def post(self, request, *args, **kwargs): registration_pk_form = self.RegistrationPKForm(kwargs or None) if registration_pk_form.is_valid(): registration = get_object_or_404( RegistrationOnCourse, pk=registration_pk_form.cleaned_data.get("registration_pk") ) registration.registration_locked = not registration.registration_locked registration.save() if registration.registration_locked: messages.success( self.request, _("Registration is now locked for %(user)s. This user will not be able to unregister.") % {"user": registration.student} ) return redirect("learning:course/detail/students", slug=self.object.slug) messages.success( self.request, _("%(user)s now can unregister by itself from this course.") % {"user": registration.student} ) return redirect("learning:course/detail/students", slug=self.object.slug) return HttpResponseNotFound(_("The given registration primary key is invalid. It this intentional?")) class CourseDetailStudentsDeleteViewMixin(LoginRequiredMixin, CourseDetailMixin): """ Mixin to unregister a student on a course .. caution:: Unregistering a student on a course requires the **delete_student** permission. """ # noinspection PyUnresolvedReferences,PyMissingOrEmptyDocstring def has_permission(self): return self.object.user_can("delete_student", self.request.user) and super().has_permission() # noinspection PyUnresolvedReferences,PyMissingOrEmptyDocstring def handle_no_permission(self): messages.error( self.request, _("You do not have the required permissions to unregister a student from this course.") ) return super().handle_no_permission() class CourseDetailStudentsDeleteView(CourseDetailStudentsDeleteViewMixin, ProcessFormView): """ Unregister a student from a course. """ # noinspection PyMissingOrEmptyDocstring def post(self, request, *args, **kwargs): user_pk_form = UserPKForm(self.request.POST or None) if user_pk_form.is_valid(): student = get_object_or_404(get_user_model(), pk=user_pk_form.cleaned_data.get("user_pk")) self.object.unsubscribe_student(student) messages.success( self.request, _("The student “%(student)s” has been unregistered from the course “%(course)s”.") % {"course": self.object, "student": student} ) return redirect("learning:course/detail/students", slug=self.object.slug) return HttpResponseNotFound(user_pk_form.errors.get("user_pk")) # noinspection PyMissingOrEmptyDocstring def get(self, request, *args, **kwargs): return HttpResponseNotAllowed(["POST"]) class CourseDetailSimilarViewMixin(LoginRequiredMixin, CourseDetailView): """ Mixin to view courses that are similar to the current one. .. caution:: Viewing similar courses requires the **view_similar** permission. """ # noinspection PyMissingOrEmptyDocstring def has_permission(self): return self.object.user_can("view_similar", self.request.user) and super().has_permission() # noinspection PyMissingOrEmptyDocstring def handle_no_permission(self): # pragma: no cover messages.error(self.request, _("You do not have the required permissions to view similar courses.")) return super().handle_no_permission() class CourseDetailSimilarView(CourseDetailSimilarViewMixin): """ View courses that are similar to the current one. .. note:: This requires the permission to view similar courses and to view the course itself. """ template_name = "learning/course/details/similar.html" # noinspection PyMissingOrEmptyDocstring def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) # noinspection PyBroadException try: similar_list = [ similar for similar in self.object.tags.similar_objects() if isinstance(similar, Course) and similar.user_can_view(self.request.user) ] context.update(PaginatorFactory.get_paginator_as_context(similar_list, self.request.GET, nb_per_page=9)) # django-taggit similar tags can have weird behaviour sometimes: https://github.com/jazzband/django-taggit/issues/80 except Exception: messages.error( self.request, _("We’re having some problems finding similar courses… We try to fix this as soon as possible.") ) return context class CourseRegisterView(LoginRequiredMixin, CourseDetailView, ProcessFormView): """ Register the current user on the course. """ # noinspection PyUnusedLocal,PyMissingOrEmptyDocstring def post(self, request, *args, **kwargs): try: self.object.register(request.user) messages.success( request, _("You have been registered on the course “%(course)s”") % {"course": self.object} ) except LearningError as ex: messages.error(request, ex) return redirect("learning:course/detail", slug=self.object.slug) # noinspection PyMissingOrEmptyDocstring def get(self, request, *args, **kwargs): return HttpResponseNotAllowed(["POST"]) class CourseUnregisterView(LoginRequiredMixin, CourseDetailView, FormView): """ Unsubscribe the current user from the course. """ # noinspection PyUnusedLocal,PyMissingOrEmptyDocstring def post(self, request, *args, **kwargs): try: self.object.unsubscribe(request.user) messages.success( request, _("You have been unregistered from the course “%(course)s”") % {"course": self.object} ) except LearningError as ex: messages.error(request, ex) return redirect("learning:course/detail", slug=self.object.slug) def get(self, request, *args, **kwargs): return HttpResponseNotAllowed(["POST"]) class CourseSearchView(TemplateView): """ Search for a specific course. """ template_name = "learning/course/search.html" # noinspection PyMissingOrEmptyDocstring def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) # Show the user some recommended courses, based on its profile queryset = None if self.request.user.is_authenticated: queryset = Course.objects.recommendations_for(self.request.user).all() context.update(PaginatorFactory.get_paginator_as_context( queryset, self.request.GET, prefix="recommended", nb_per_page=9) ) # Show the user all public courses context.update(PaginatorFactory.get_paginator_as_context( Course.objects.public_without_followed_by_without_taught_by(self.request.user, self.request.user).all(), self.request.GET, prefix="public", nb_per_page=33) ) # The search results context.update(SearchQuery.search_query_as_context(Course, self.request.GET)) return context class ActivityCreateOnCourseViewMixin(LoginRequiredMixin, CourseDetailMixin): """ Mixin to create an activity on a course. .. caution:: Adding an activity on a course implies you have the **change** permission and it’s not read-only. """ # noinspection PyUnresolvedReferences,PyMissingOrEmptyDocstring def has_permission(self): return self.object.user_can_change(self.request.user) and not self.object.read_only # noinspection PyUnresolvedReferences,PyMissingOrEmptyDocstring def handle_no_permission(self): messages.error( self.request, _("You do not have the required permissions to change this course. " "It may be read-only because archived or you may not have the rights to edit the course.") ) return super().handle_no_permission() class ActivityAttachOnCourseViewMixin(ActivityCreateOnCourseViewMixin): """ Mixin to attach an activity on a course. .. caution:: Adding an activity on a course implies you have the **change** permission and it’s not read-only. """ # noinspection PyUnresolvedReferences,PyMissingOrEmptyDocstring def handle_no_permission(self): messages.warning( self.request, _("You do not have the required permission to add an activity on this course.") ) return super().handle_no_permission() class ActivityCreateOnCourseView(ActivityCreateOnCourseViewMixin, InvalidFormHandlerMixin, UpdateView): """ View to create a new activity and automatically link it with the current course. .. note:: This requires the right to change the course, and implies the course is not read-only (can be edited) .. note:: This implies that data from the CourseActivity and Activity models are presented in two separated forms. """ template_name = "learning/course/details/add_activity_on_course.html" # noinspection PyMissingOrEmptyDocstring # FIXME: is this really necessary? It should be handled by Django itself. def get_form(self, form_class=None) -> ModelForm: return ActivityCreateForm(self.request.POST or None) # noinspection PyMissingOrEmptyDocstring def form_valid(self, form): activity = form.instance # Extract form instances activity.author = self.request.user # Manually set the activity author to the current user try: self.object.add_activity(activity) messages.success( self.request, _("The activity “%(activity)s” has been added to
%d, '%s', '%s')" % (target_item.stack_id, target_item.branch_id, source_row['aadt'], source_row['aadt_year'], source_row['aadt_type'], source_row['last_modified'], )) self.insert_aadts.append(insert_aadt) self.stats['items_clone_aadt'] += 1 # def merge_byway_ratings(self, shpfeat, target_item, source_item): # Skipping: We show but don't allow editing of ratings via Shpfile, # and, anyway, we're just copying user ratings here, and # we're not touching any of the calculated ratings (though # you should re-run the ratings generator after running # the import script). # Check the byway_rating table for user ratings. # rating_user_sql = target_item.rating_user_sql() target_rows = self.qb.db.sql(rating_user_sql) # rating_user_sql = source_item.rating_user_sql() source_rows = self.qb.db.sql(rating_user_sql) # if (not target_rows) and source_rows: for source_row in source_rows: # See we might try merging to the same item from more than one # other item, we have to check for duplicates. try: self.brats_dbldict[source_row['username']][target_item.stack_id] except KeyError: misc.dict_dict_update(self.brats_dbldict, source_row['username'], target_item.stack_id, True) brat_insert_expr = ( "(%d, %d, %s, %s)" % (target_item.stack_id, target_item.branch_id, self.qb.db.quoted(source_row['username']), source_row['value'], # Skipping (has trigger): last_modified )) self.insert_brats.append(brat_insert_expr) # MAYBE: Also update byway_rating_event? # Or maybe we don't have to worry... self.stats['items_clone_brat'] += 1 # def merge_user_lvals(self, shpfeat, target_item, source_item): # MAYBE: We don't copy watchers: # item_findability # item_event_alert # item_event_read # Maybe we should copy item watchers... # Copy watchers and reminders. # # /item/reminder_email is deleted... # for multi_attr_name in ('/item/alert_email', '/item/reminder_email',): for multi_attr_name in ('/item/alert_email',): link_many = link_attribute.Many(multi_attr_name) all_attribute_lvals_sql = link_many.link_multiple_allowed_sql( self.qb, source_item.stack_id) rows = self.qb.db.sql(all_attribute_lvals_sql) for row in rows: # new_lval = link_value.One() # # Item Base # Skipping: dirty, fresh, valid, req, attrs, tagged, # link_values, lvals_wired_, lvals_inclusive_ # # Item Versioned new_lval.system_id = self.qb.db.sequence_get_next( 'item_versioned_system_id_seq') new_lval.branch_id = self.qb.branch_hier[0][0] new_lval.stack_id = self.qb.db.sequence_get_next( 'item_stack_stack_id_seq') new_lval.version = 1 new_lval.deleted = False new_lval.reverted = False new_lval.name = row['name'] new_lval.valid_start_rid = self.qb.item_mgr.rid_new new_lval.valid_until_rid = conf.rid_inf # # Item Revisionless new_lval.acl_grouping = 1 # Skipping: new_lval.edited_date # Skipping: new_lval.edited_user # Skipping: new_lval.edited_addr # Skipping: new_lval.edited_host # Skipping: new_lval.edited_note # Skipping: new_lval.edited_what # # Item User Access / These are inferred from GIA records new_lval.access_level_id = row['access_level_id'] # Skipping: diff_group, style_change, real_item_type_id new_lval.item_type_id = new_lval.item_type_id # # Item Stack # Skipping: stealth_secret new_lval.cloned_from_id = row['cloned_from_id'] new_lval.access_style_id = row['access_style_id'] new_lval.access_infer_id = row['access_infer_id'] # # Link_Value new_lval.lhs_stack_id = link_many.attr_stack_id new_lval.rhs_stack_id = target_item.stack_id new_lval.link_lhs_type_id = link_many.one_class.item_type_table new_lval.link_rhs_type_id = target_item.item_type_id new_lval.value_boolean = row['value_boolean'] new_lval.value_integer = row['value_integer'] new_lval.value_real = row['value_real'] new_lval.value_text = row['value_text'] new_lval.value_binary = row['value_binary'] new_lval.value_date = row['value_date'] new_lval.lhs_name = multi_attr_name new_lval.rhs_name = target_item.name #new_lval.split_from_stack_id = source_item.stack_id # FIXME: Only if really split, not if CCP_FROMS_, but if dupl sids # FIXME: Only if the other ID > ours? new_lval.split_from_stack_id = row['stack_id'] g.assurt(new_lval.split_from_stack_id > 0) # Skipping/Not implemented: # direction_id, line_evt_mval_a, line_evt_mval_b, line_evt_dir_id # self.add_new_item_gia(new_lval) # new_lval.fresh = True new_lval.validize( self.qb, is_new_item=True, dirty_reason=item_base.One.dirty_reason_item_auto, ref_item=None) g.assurt(new_lval.stack_id not in self.create_lvals) self.create_lvals[new_lval.stack_id] = new_lval target_item.wire_lval(self.qb, new_lval, heavywt=True) self.stats['items_clone_lval_multiattr'] += 1 # **** Process edited items: bulk-save database changes # def save_db_changes(self): self.edited_items_finalize_versions(self.update_feats.values()) self.edited_feats_bulk_insert_rows(self.create_feats.values() + self.update_feats.values()) self.edited_items_finalize_versions(self.update_lvals.values()) self.edited_lvals_bulk_insert_rows(self.create_lvals.values() + self.update_lvals.values()) if self.cli_opts.item_type == 'byway': self.save_db_aadt() self.save_db_nodes() # def save_db_aadt(self): # AADT. if self.insert_aadts: insert_sql = ( """ INSERT INTO %s.aadt ( byway_stack_id , branch_id , aadt , aadt_year , aadt_type , last_modified ) VALUES %s """ % (conf.instance_name, ','.join(self.insert_aadts),)) self.qb.db.sql(insert_sql) # def save_db_nodes(self): # We're about to commit the transaction, so mucking with the branch_hier # is okay. And we can't clone the database to save node stuff, since we # haven't save byway changes; we'd have to commit first, anyway. node_qb = self.qb node_qb.revision = revision.Current() node_qb.branch_hier[0] = (node_qb.branch_hier[0][0], node_qb.revision, node_qb.branch_hier[0][2],) # Call node_qb.revision.setup_gids(node_qb.db, node_qb.username) or ?: # node_qb.branch_hier_set(node_qb.branch_hier) node_qb.db.transaction_begin_rw('node_endpoint') if self.delete_nodes_for: log.debug('save_db_chngs: deleting %d from node_byway...' % (len(self.delete_nodes_for),)) cleanup_sql = ( """ DELETE FROM node_byway WHERE branch_id = %d AND byway_stack_id IN (%s) """ % (node_qb.branch_hier[0][0], ','.join([str(x) for x in self.delete_nodes_for]),)) node_qb.db.sql(cleanup_sql) self.delete_nodes_for = set() for feat in (self.create_feats.values() + self.update_feats.values()): try: # We're saving separately in parallel so we'll get node-endpoint # complainst about counts since we saved at revision minus one # but now we're at the current revision. feat.save_or_update_node_endpoints(node_qb) except AttributeError: raise # def edited_items_finalize_versions(self, updated_items): if updated_items: item_sids = [str(item.stack_id) for item in updated_items] # Finalize item rows (change valid_until_rid of the last version from # conf.rid_inf to a historic rid) in the two tables, item_versioned # and group_item_access. for table_name in (group_item_access.One.item_type_table, item_versioned.One.item_type_table,): update_sql = ( """ UPDATE %s.%s SET valid_until_rid = %s WHERE (stack_id IN (%s)) AND (branch_id = %d) AND (valid_until_rid = %d) """ % (conf.instance_name, table_name, self.qb.item_mgr.rid_new, ','.join(item_sids), self.qb.branch_hier[0][0], conf.rid_inf, )) self.qb.db.sql(update_sql) # def edited_feats_bulk_insert_rows(self, edited_feats): item_module = item_factory.get_item_module(self.cli_opts.item_type) g.assurt(item_module is not None) is_rows = [] iv_rows = [] ir_rows = [] #gf_rows = [] by_rows = [] gia_rows = [] #at_rows = [] #tg_rows = [] rat_sids = [] for feat in edited_feats: g.assurt(feat.valid or feat.deleted) if feat.version == 1: g.assurt(feat.fresh) expr = item_stack.One.as_insert_expression(self.qb, feat) is_rows.append(expr) else: g.assurt(not feat.fresh) expr = item_versioned.One.as_insert_expression(self.qb, feat) iv_rows.append(expr) expr = item_revisionless.One.as_insert_expression(self.qb, feat) ir_rows.append(expr) # Skipping: geofeature; use byway class instead. expr = item_module.One.as_insert_expression(self.qb, feat) by_rows.append(expr) self.edit_items_bulk_insert_gia(feat, gia_rows) # Also, byway ratings. if (item_module == byway) and (not feat.deleted): byway.One.add_insert_expressions_ratings_generic( self.qb, feat, self.insert_brats, rat_sids) # else, don't update ratings for deleted byways, right? log.debug('edited_feats_bulk_insert_rows: updating feats...') time_0 = time.time() item_stack.Many.bulk_insert_rows(self.qb, is_rows) item_versioned.Many.bulk_insert_rows(self.qb, iv_rows) item_revisionless.Many.bulk_insert_rows(self.qb, ir_rows) item_module.Many.bulk_insert_rows(self.qb, by_rows) # IntegrityError('insert or update on table # "group_item_access" violates foreign key constraint # "group_item_access_branch_id_stack_id_version_fkey"\nDETAIL: Key # (branch_id, stack_id, version)=(2500677, 4067168, 0) is not present in table # "item_versioned".\n',) group_item_access.Many.bulk_insert_rows(self.qb, gia_rows) log.debug('edited_feats_bulk_insert_rows: updated feats in %s' % (misc.time_format_elapsed(time_0),)) time_0 = time.time() if rat_sids: try: byway.Many.bulk_delete_ratings_generic(self.qb, rat_sids) byway.Many.bulk_insert_ratings(self.qb, self.insert_brats) except Exception, e: conf.break_here() # To help a DEV. raise log.debug('edited_feats_bulk_insert_rows: updated ratings in %s' % (misc.time_format_elapsed(time_0),)) # def edited_lvals_bulk_insert_rows(self, edited_lvals): is_rows = [] iv_rows = [] ir_rows = [] lv_rows = [] gia_rows = [] for lval in edited_lvals: if lval.version == 1: expr = item_stack.One.as_insert_expression(self.qb, lval) is_rows.append(expr) expr = item_versioned.One.as_insert_expression(self.qb, lval) iv_rows.append(expr) expr = item_revisionless.One.as_insert_expression(self.qb, lval) ir_rows.append(expr) expr = link_value.One.as_insert_expression(self.qb, lval) lv_rows.append(expr) self.edit_items_bulk_insert_gia(lval, gia_rows) log.debug('edited_lvals_bulk_insert_rows: updating lvals...') time_0 = time.time() item_stack.Many.bulk_insert_rows(self.qb, is_rows) item_versioned.Many.bulk_insert_rows(self.qb, iv_rows) item_revisionless.Many.bulk_insert_rows(self.qb, ir_rows) link_value.Many.bulk_insert_rows(self.qb, lv_rows) group_item_access.Many.bulk_insert_rows(self.qb, gia_rows) log.debug('edited_lvals_bulk_insert_rows: updated lvals in %s' % (misc.time_format_elapsed(time_0),)) # def edit_items_bulk_insert_gia(self, item, gia_rows): for grp_acc in item.groups_access.itervalues(): # If we made a new GIA record, we didn't set its rids, # because the owning item complains in its validize. g.assurt(grp_acc.group_id > 0) # Skipping: session_id # Skipping: access_level_id grp_acc.branch_id = item.branch_id grp_acc.item_id == item.system_id grp_acc.stack_id = item.stack_id grp_acc.version = item.version grp_acc.acl_grouping = 1 grp_acc.deleted = item.deleted grp_acc.reverted = item.reverted grp_acc.valid_start_rid = item.valid_start_rid grp_acc.valid_until_rid = item.valid_until_rid grp_acc.name = item.name # Skipping: tsvect_name try: g.assurt((item.link_lhs_type_id > 0) and (item.link_rhs_type_id > 0)) grp_acc.link_lhs_type_id = item.link_lhs_type_id grp_acc.link_rhs_type_id = item.link_rhs_type_id except AttributeError: pass # item not a link_value. expr = group_item_access.One.as_insert_expression(self.qb, grp_acc, item.item_type_id) gia_rows.append(expr) # *** Non-byway export (see work item export job for exporting byways) # def do_export_non_byway(self): self.stats_init_export() item_module = item_factory.get_item_module(self.cli_opts.item_type) g.assurt(item_module is not None) if not os.path.exists(self.cli_opts.source_dir): try: os.mkdir(self.cli_opts.source_dir, 02775) except OSError, e: log.error('Unexpected: Could not make export directory: %s' % (str(e),)) raise target_schema = Hausdorff_Import.intermediate_schema_non_byway try: self.prepare_target_shapefiles(target_schema) except Exception, e: log.error('Unable to prepare targets: %s' % (str(e),)) raise try: self.load_and_export_items(item_module) finally: for shpfile in self.slayers.values(): try: shpfile.close() except: pass self.slayers = None try: self.everylayer.close() except: pass self.everylayer = None self.symlink_target_shapefiles(link_name='Exported') self.stats_show_export() # # C.f. services/merge/export_cyclop.py def load_and_export_items(self, feat_class): log.info('load_and_export_items: working on type: %s' % (Item_Type.id_to_str(feat_class.One.item_type_id),)) time_0 = time.time() prog_log = Debug_Progress_Logger() prog_log.log_freq = 100 self.qb.filters.rating_special = True self.qb.filters.make_geometry_ewkt = True # The merge_job setup the item_mgr, which we use now to load the byways # and their attrs and tags. feat_search_fcn = 'search_for_items' # E.g. byway.Many().search_for_items processing_fcn = self.feat_export self.qb.item_mgr.load_feats_and_attcs( self.qb, feat_class, feat_search_fcn, processing_fcn, prog_log, heavyweight=False) log.info('... exported %d features in %s' % (prog_log.progress, misc.time_format_elapsed(time_0),)) # *** # def feat_export(self, qb, gf, prog_log): log.debug('feat_export: gf: %s' % (str(gf),)) new_geom = {} if isinstance(gf, terrain.One): #new_geom['type'] = 'Polygon' new_geom['type'] = 'MultiPolygon' new_geom['coordinates'] = geometry.wkt_polygon_to_xy(gf.geometry_wkt) elif isinstance(gf, region.One): #new_geom['type'] = 'Polygon' new_geom['type'] = 'MultiPolygon' new_geom['coordinates'] = geometry.wkt_polygon_to_xy(gf.geometry_wkt) elif isinstance(gf, waypoint.One): new_geom['type'] = 'Point' new_geom['coordinates'] = geometry.wkt_point_to_xy(gf.geometry_wkt) else: g.assurt(False) gf_lyr_id, gf_lyr_name = ( self.qb.item_mgr.geofeature_layer_resolve( self.qb.db, gf.geofeature_layer_id)) for tag_name
<reponame>calearning/testGithubClone<gh_stars>1-10 from __future__ import print_function, division import imgaug as ia from imgaug import augmenters as iaa from imgaug import parameters as iap import numpy as np from scipy import ndimage, misc #from skimage import data #import matplotlib.pyplot as plt #from matplotlib import gridspec #import six #import six.moves as sm import os import PIL.Image import math from skimage import data try: from cStringIO import StringIO as BytesIO except ImportError: from io import BytesIO DOCS_IMAGES_BASE_PATH = os.path.join( os.path.dirname(os.path.abspath(__file__)), "docs", "images" ) def main(): chapter_examples_basics() chapter_examples_keypoints() chapter_augmenters() def save(chapter_dir, filename, image, quality=None): dir_fp = os.path.join(DOCS_IMAGES_BASE_PATH, chapter_dir) if not os.path.exists(dir_fp): os.makedirs(dir_fp) file_fp = os.path.join(dir_fp, filename) image_jpg = compress_to_jpg(image, quality=quality) image_jpg_decompressed = decompress_jpg(image_jpg) # If the image file already exists and is (practically) identical, # then don't save it again to avoid polluting the repository with tons # of image updates. # Not that we have to compare here the results AFTER jpg compression # and then decompression. Otherwise we compare two images of which # image (1) has never been compressed while image (2) was compressed and # then decompressed. if os.path.isfile(file_fp): image_saved = ndimage.imread(file_fp, mode="RGB") #print("arrdiff", arrdiff(image_jpg_decompressed, image_saved)) same_shape = (image_jpg_decompressed.shape == image_saved.shape) d_avg = arrdiff(image_jpg_decompressed, image_saved) if same_shape else -1 if same_shape and d_avg <= 1.0: print("[INFO] Did not save image '%s/%s', because the already saved image is basically identical (d_avg=%.4f)" % (chapter_dir, filename, d_avg,)) return with open(file_fp, "w") as f: f.write(image_jpg) def arrdiff(arr1, arr2): nb_cells = np.prod(arr2.shape) d_avg = np.sum(np.power(np.abs(arr1 - arr2), 2)) / nb_cells return d_avg def compress_to_jpg(image, quality=75): quality = quality if quality is not None else 75 im = PIL.Image.fromarray(image) out = BytesIO() im.save(out, format="JPEG", quality=quality) jpg_string = out.getvalue() out.close() return jpg_string def decompress_jpg(image_compressed): img_compressed_buffer = BytesIO() img_compressed_buffer.write(image_compressed) img = ndimage.imread(img_compressed_buffer, mode="RGB") img_compressed_buffer.close() return img def grid(images, rows, cols, border=1, border_color=255): nb_images = len(images) cell_height = max([image.shape[0] for image in images]) cell_width = max([image.shape[1] for image in images]) channels = set([image.shape[2] for image in images]) assert len(channels) == 1 nb_channels = list(channels)[0] if rows is None and cols is None: rows = cols = int(math.ceil(math.sqrt(nb_images))) elif rows is not None: cols = int(math.ceil(nb_images / rows)) elif cols is not None: rows = int(math.ceil(nb_images / cols)) assert rows * cols >= nb_images cell_height = cell_height + 1 * border cell_width = cell_width + 1 * border width = cell_width * cols height = cell_height * rows grid = np.zeros((height, width, nb_channels), dtype=np.uint8) cell_idx = 0 for row_idx in range(rows): for col_idx in range(cols): if cell_idx < nb_images: image = images[cell_idx] border_top = border_right = border_bottom = border_left = border #if row_idx > 1: border_top = 0 #if col_idx > 1: border_left = 0 image = np.pad(image, ((border_top, border_bottom), (border_left, border_right), (0, 0)), mode="constant", constant_values=border_color) cell_y1 = cell_height * row_idx cell_y2 = cell_y1 + image.shape[0] cell_x1 = cell_width * col_idx cell_x2 = cell_x1 + image.shape[1] grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image cell_idx += 1 grid = np.pad(grid, ((border, 0), (border, 0), (0, 0)), mode="constant", constant_values=border_color) return grid def checkerboard(size): img = data.checkerboard() img3d = np.tile(img[..., np.newaxis], (1, 1, 3)) return misc.imresize(img3d, size) ############################### # Examples: Basics ############################### def chapter_examples_basics(): """Generate all example images for the chapter `Examples: Basics` in the documentation.""" chapter_examples_basics_simple() chapter_examples_basics_heavy() def chapter_examples_basics_simple(): import imgaug as ia from imgaug import augmenters as iaa # Example batch of images. # The array has shape (32, 64, 64, 3) and dtype uint8. images = np.array( [ia.quokka(size=(64, 64)) for _ in range(32)], dtype=np.uint8 ) seq = iaa.Sequential([ iaa.Fliplr(0.5), # horizontal flips iaa.Crop(percent=(0, 0.1)), # random crops # Small gaussian blur with random sigma between 0 and 0.5. # But we only blur about 50% of all images. iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 0.5)) ), # Strengthen or weaken the contrast in each image. iaa.ContrastNormalization((0.75, 1.5)), # Add gaussian noise. # For 50% of all images, we sample the noise once per pixel. # For the other 50% of all images, we sample the noise per pixel AND # channel. This can change the color (not only brightness) of the # pixels. iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # Make some images brighter and some darker. # In 20% of all cases, we sample the multiplier once per channel, # which can end up changing the color of the images. iaa.Multiply((0.8, 1.2), per_channel=0.2), # Apply affine transformations to each image. # Scale/zoom them, translate/move them, rotate them and shear them. iaa.Affine( scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, rotate=(-25, 25), shear=(-8, 8) ) ], random_order=True) # apply augmenters in random order ia.seed(1) images_aug = seq.augment_images(images) # ------------ save( "examples_basics", "simple.jpg", grid(images_aug, cols=8, rows=4) ) def chapter_examples_basics_heavy(): import imgaug as ia from imgaug import augmenters as iaa import numpy as np # Example batch of images. # The array has shape (32, 64, 64, 3) and dtype uint8. images = np.array( [ia.quokka(size=(64, 64)) for _ in range(32)], dtype=np.uint8 ) # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases, # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second # image. sometimes = lambda aug: iaa.Sometimes(0.5, aug) # Define our sequence of augmentation steps that will be applied to every image. seq = iaa.Sequential( [ # # Apply the following augmenters to most images. # iaa.Fliplr(0.5), # horizontally flip 50% of all images iaa.Flipud(0.2), # vertically flip 20% of all images # crop some of the images by 0-10% of their height/width sometimes(iaa.Crop(percent=(0, 0.1))), # Apply affine transformations to some of the images # - scale to 80-120% of image height/width (each axis independently) # - translate by -20 to +20 relative to height/width (per axis) # - rotate by -45 to +45 degrees # - shear by -16 to +16 degrees # - order: use nearest neighbour or bilinear interpolation (fast) # - mode: use any available mode to fill newly created pixels # see API or scikit-image for which modes are available # - cval: if the mode is constant, then use a random brightness # for the newly created pixels (e.g. sometimes black, # sometimes white) sometimes(iaa.Affine( scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, rotate=(-45, 45), shear=(-16, 16), order=[0, 1], cval=(0, 255), mode=ia.ALL )), # # Execute 0 to 5 of the following (less important) augmenters per # image. Don't execute all of them, as that would often be way too # strong. # iaa.SomeOf((0, 5), [ # Convert some images into their superpixel representation, # sample between 20 and 200 superpixels per image, but do # not replace all superpixels with their average, only # some of them (p_replace). sometimes( iaa.Superpixels( p_replace=(0, 1.0), n_segments=(20, 200) ) ), # Blur each image with varying strength using # gaussian blur (sigma between 0 and 3.0), # average/uniform blur (kernel size between 2x2 and 7x7) # median blur (kernel size between 3x3 and 11x11). iaa.OneOf([ iaa.GaussianBlur((0, 3.0)), iaa.AverageBlur(k=(2, 7)), iaa.MedianBlur(k=(3, 11)), ]), # Sharpen each image, overlay the result with the original # image using an alpha between 0 (no sharpening) and 1 # (full sharpening effect). iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # Same as sharpen, but for an embossing effect. iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # Search in some images either for all edges or for # directed edges. These edges are then marked in a black # and white image and overlayed with the original image # using an alpha of 0 to 0.7. sometimes(iaa.OneOf([ iaa.EdgeDetect(alpha=(0, 0.7)), iaa.DirectedEdgeDetect( alpha=(0, 0.7), direction=(0.0, 1.0) ), ])), # Add gaussian noise to some images. # In 50% of these cases, the noise is randomly sampled per # channel and pixel. # In the other 50% of all cases it is sampled once per # pixel (i.e. brightness change). iaa.AdditiveGaussianNoise( loc=0, scale=(0.0, 0.05*255), per_channel=0.5 ), # Either drop randomly 1 to 10% of all pixels (i.e. set # them to black) or drop them on an image with 2-5% percent # of the original size, leading to large dropped # rectangles. iaa.OneOf([ iaa.Dropout((0.01, 0.1), per_channel=0.5), iaa.CoarseDropout( (0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2 ), ]),
7967 2.443706759489844288902124937428528E-4797 1.221853379744922144451062468714264E-4797 7968 6.10926689872461072225531234357132E-4798 3.05463344936230536112765617178566E-4798 7969 1.52731672468115268056382808589283E-4798 7.63658362340576340281914042946415E-4799 7970 3.818291811702881701409570214732075E-4799 1.909145905851440850704785107366038E-4799 7971 9.54572952925720425352392553683019E-4800 4.772864764628602126761962768415095E-4800 7972 2.386432382314301063380981384207548E-4800 1.193216191157150531690490692103774E-4800 7973 5.96608095578575265845245346051887E-4801 2.983040477892876329226226730259435E-4801 7974 1.491520238946438164613113365129718E-4801 7.457601194732190823065566825648588E-4802 7975 3.728800597366095411532783412824294E-4802 1.864400298683047705766391706412147E-4802 7976 9.322001493415238528831958532060735E-4803 4.661000746707619264415979266030368E-4803 7977 2.330500373353809632207989633015184E-4803 1.165250186676904816103994816507592E-4803 7978 5.82625093338452408051997408253796E-4804 2.91312546669226204025998704126898E-4804 7979 1.45656273334613102012999352063449E-4804 7.28281366673065510064996760317245E-4805 7980 3.641406833365327550324983801586225E-4805 1.820703416682663775162491900793113E-4805 7981 9.103517083413318875812459503965565E-4806 4.551758541706659437906229751982783E-4806 7982 2.275879270853329718953114875991392E-4806 1.137939635426664859476557437995696E-4806 7983 5.68969817713332429738278718997848E-4807 2.84484908856666214869139359498924E-4807 7984 1.42242454428333107434569679749462E-4807 7.1121227214166553717284839874731E-4808 7985 3.55606136070832768586424199373655E-4808 1.778030680354163842932120996868275E-4808 7986 8.890153401770819214660604984341375E-4809 4.445076700885409607330302492170688E-4809 7987 2.222538350442704803665151246085344E-4809 1.111269175221352401832575623042672E-4809 7988 5.55634587610676200916287811521336E-4810 2.77817293805338100458143905760668E-4810 7989 1.38908646902669050229071952880334E-4810 6.9454323451334525114535976440167E-4811 7990 3.47271617256672625572679882200835E-4811 1.736358086283363127863399411004175E-4811 7991 8.681790431416815639316997055020875E-4812 4.340895215708407819658498527510438E-4812 7992 2.170447607854203909829249263755219E-4812 1.085223803927101954914624631877610E-4812 7993 5.42611901963550977457312315938805E-4813 2.713059509817754887286561579694025E-4813 7994 1.356529754908877443643280789847013E-4813 6.782648774544387218216403949235063E-4814 7995 3.391324387272193609108201974617532E-4814 1.695662193636096804554100987308766E-4814 7996 8.47831096818048402277050493654383E-4815 4.239155484090242011385252468271915E-4815 7997 2.119577742045121005692626234135958E-4815 1.059788871022560502846313117067979E-4815 7998 5.298944355112802514231565585339895E-4816 2.649472177556401257115782792669948E-4816 7999 1.324736088778200628557891396334974E-4816 6.62368044389100314278945698167487E-4817 8000 3.311840221945501571394728490837435E-4817 1.655920110972750785697364245418718E-4817 8001 8.27960055486375392848682122709359E-4818 4.139800277431876964243410613546795E-4818 8002 2.069900138715938482121705306773398E-4818 1.034950069357969241060852653386699E-4818 8003 5.174750346789846205304263266933495E-4819 2.587375173394923102652131633466748E-4819 8004 1.293687586697461551326065816733374E-4819 6.46843793348730775663032908366687E-4820 8005 3.234218966743653878315164541833435E-4820 1.617109483371826939157582270916718E-4820 8006 8.08554741685913469578791135458359E-4821 4.042773708429567347893955677291795E-4821 8007 2.021386854214783673946977838645898E-4821 1.010693427107391836973488919322949E-4821 8008 5.053467135536959184867444596614745E-4822 2.526733567768479592433722298307373E-4822 8009 1.263366783884239796216861149153687E-4822 6.316833919421198981084305745768433E-4823 8010 3.158416959710599490542152872884217E-4823 1.579208479855299745271076436442108E-4823 8011 7.89604239927649872635538218221054E-4824 3.94802119963824936317769109110527E-4824 8012 1.974010599819124681588845545552635E-4824 9.870052999095623407944227727763175E-4825 8013 4.935026499547811703972113863881588E-4825 2.467513249773905851986056931940794E-4825 8014 1.233756624886952925993028465970397E-4825 6.168783124434764629965142329851985E-4826 8015 3.084391562217382314982571164925993E-4826 1.542195781108691157491285582462996E-4826 8016 7.71097890554345578745642791231498E-4827 3.85548945277172789372821395615749E-4827 8017 1.927744726385863946864106978078745E-4827 9.638723631929319734320534890393725E-4828 8018 4.819361815964659867160267445196863E-4828 2.409680907982329933580133722598431E-4828 8019 1.204840453991164966790066861299216E-4828 6.024202269955824833950334306496078E-4829 8020 3.012101134977912416975167153248039E-4829 1.506050567488956208487583576624020E-4829 8021 7.53025283744478104243791788312010E-4830 3.76512641872239052121895894156005E-4830 8022 1.882563209361195260609479470780025E-4830 9.412816046805976303047397353900125E-4831 8023 4.706408023402988151523698676950063E-4831 2.353204011701494075761849338475031E-4831 8024 1.176602005850747037880924669237516E-4831 5.883010029253735189404623346187578E-4832 8025 2.941505014626867594702311673093789E-4832 1.470752507313433797351155836546895E-4832 8026 7.353762536567168986755779182734475E-4833 3.676881268283584493377889591367238E-4833 8027 1.838440634141792246688944795683619E-4833 9.192203170708961233444723978418095E-4834 8028 4.596101585354480616722361989209048E-4834 2.298050792677240308361180994604524E-4834 8029 1.149025396338620154180590497302262E-4834 5.74512698169310077090295248651131E-4835 8030 2.872563490846550385451476243255655E-4835 1.436281745423275192725738121627828E-4835 8031 7.18140872711637596362869060813914E-4836 3.59070436355818798181434530406957E-4836 8032 1.795352181779093990907172652034785E-4836 8.976760908895469954535863260173925E-4837 8033 4.488380454447734977267931630086963E-4837 2.244190227223867488633965815043481E-4837 8034 1.122095113611933744316982907521741E-4837 5.610475568059668721584914537608703E-4838 8035 2.805237784029834360792457268804352E-4838 1.402618892014917180396228634402176E-4838 8036 7.01309446007458590198114317201088E-4839 3.50654723003729295099057158600544E-4839 8037 1.75327361501864647549528579300272E-4839 8.7663680750932323774764289650136E-4840 8038 4.3831840375466161887382144825068E-4840 2.1915920187733080943691072412534E-4840 8039 1.0957960093866540471845536206267E-4840 5.4789800469332702359227681031335E-4841 8040 2.73949002346663511796138405156675E-4841 1.369745011733317558980692025783375E-4841 8041 6.848725058666587794903460128916875E-4842 3.424362529333293897451730064458438E-4842 8042 1.712181264666646948725865032229219E-4842 8.560906323333234743629325161146095E-4843 8043 4.280453161666617371814662580573048E-4843 2.140226580833308685907331290286524E-4843 8044 1.070113290416654342953665645143262E-4843 5.35056645208327171476832822571631E-4844 8045 2.675283226041635857384164112858155E-4844 1.337641613020817928692082056429078E-4844 8046 6.68820806510408964346041028214539E-4845 3.344104032552044821730205141072695E-4845 8047 1.672052016276022410865102570536348E-4845 8.360260081380112054325512852681738E-4846 8048 4.180130040690056027162756426340869E-4846 2.090065020345028013581378213170435E-4846 8049 1.045032510172514006790689106585218E-4846 5.225162550862570033953445532926088E-4847 8050 2.612581275431285016976722766463044E-4847 1.306290637715642508488361383231522E-4847 8051 6.53145318857821254244180691615761E-4848 3.265726594289106271220903458078805E-4848 8052 1.632863297144553135610451729039403E-4848 8.164316485722765678052258645197013E-4849 8053 4.082158242861382839026129322598507E-4849 2.041079121430691419513064661299253E-4849 8054 1.020539560715345709756532330649627E-4849 5.102697803576728548782661653248133E-4850 8055 2.551348901788364274391330826624067E-4850 1.275674450894182137195665413312033E-4850 8056 6.378372254470910685978327066560165E-4851 3.189186127235455342989163533280083E-4851 8057 1.594593063617727671494581766640042E-4851 7.972965318088638357472908833200208E-4852 8058 3.986482659044319178736454416600104E-4852 1.993241329522159589368227208300052E-4852 8059 9.96620664761079794684113604150026E-4853 4.98310332380539897342056802075013E-4853 8060 2.491551661902699486710284010375065E-4853 1.245775830951349743355142005187533E-4853 8061 6.228879154756748716775710025937665E-4854 3.114439577378374358387855012968833E-4854 8062 1.557219788689187179193927506484417E-4854 7.786098943445935895969637532422083E-4855 8063 3.893049471722967947984818766211042E-4855 1.946524735861483973992409383105521E-4855 8064 9.732623679307419869962046915527605E-4856 4.866311839653709934981023457763803E-4856 8065 2.433155919826854967490511728881902E-4856 1.216577959913427483745255864440951E-4856 8066 6.082889799567137418726279322204755E-4857 3.041444899783568709363139661102378E-4857 8067 1.520722449891784354681569830551189E-4857 7.603612249458921773407849152755945E-4858 8068 3.801806124729460886703924576377973E-4858 1.900903062364730443351962288188986E-4858 8069 9.50451531182365221675981144094493E-4859 4.752257655911826108379905720472465E-4859 8070 2.376128827955913054189952860236233E-4859 1.188064413977956527094976430118116E-4859 8071 5.94032206988978263547488215059058E-4860 2.97016103494489131773744107529529E-4860 8072 1.485080517472445658868720537647645E-4860 7.425402587362228294343602688238225E-4861 8073 3.712701293681114147171801344119113E-4861 1.856350646840557073585900672059556E-4861 8074 9.28175323420278536792950336029778E-4862 4.64087661710139268396475168014889E-4862 8075 2.320438308550696341982375840074445E-4862 1.160219154275348170991187920037223E-4862 8076 5.801095771376740854955939600186115E-4863 2.900547885688370427477969800093058E-4863 8077 1.450273942844185213738984900046529E-4863 7.251369714220926068694924500232645E-4864 8078 3.625684857110463034347462250116323E-4864 1.812842428555231517173731125058161E-4864 8079 9.064212142776157585868655625290805E-4865 4.532106071388078792934327812645403E-4865 8080 2.266053035694039396467163906322702E-4865 1.133026517847019698233581953161351E-4865 8081 5.665132589235098491167909765806755E-4866 2.832566294617549245583954882903378E-4866 8082 1.416283147308774622791977441451689E-4866 7.081415736543873113959887207258445E-4867 8083 3.540707868271936556979943603629223E-4867 1.770353934135968278489971801814611E-4867 8084 8.851769670679841392449859009073055E-4868 4.425884835339920696224929504536528E-4868 8085 2.212942417669960348112464752268264E-4868 1.106471208834980174056232376134132E-4868 8086 5.53235604417490087028116188067066E-4869 2.76617802208745043514058094033533E-4869 8087 1.383089011043725217570290470167665E-4869 6.915445055218626087851452350838325E-4870 8088 3.457722527609313043925726175419163E-4870 1.728861263804656521962863087709581E-4870 8089 8.644306319023282609814315438547905E-4871 4.322153159511641304907157719273953E-4871 8090 2.161076579755820652453578859636977E-4871 1.080538289877910326226789429818488E-4871 8091 5.40269144938955163113394714909244E-4872 2.70134572469477581556697357454622E-4872 8092 1.35067286234738790778348678727311E-4872 6.75336431173693953891743393636555E-4873 8093 3.376682155868469769458716968182775E-4873 1.688341077934234884729358484091388E-4873 8094 8.44170538967117442364679242045694E-4874 4.22085269483558721182339621022847E-4874 8095 2.110426347417793605911698105114235E-4874 1.055213173708896802955849052557118E-4874 8096 5.27606586854448401477924526278559E-4875 2.638032934272242007389622631392795E-4875 8097 1.319016467136121003694811315696398E-4875 6.595082335680605018474056578481988E-4876 8098 3.297541167840302509237028289240994E-4876 1.648770583920151254618514144620497E-4876 8099 8.243852919600756273092570723102485E-4877 4.121926459800378136546285361551243E-4877 8100 2.060963229900189068273142680775622E-4877 1.030481614950094534136571340387811E-4877 8101 5.152408074750472670682856701939055E-4878 2.576204037375236335341428350969528E-4878 8102 1.288102018687618167670714175484764E-4878 6.44051009343809083835357087742382E-4879 8103 3.22025504671904541917678543871191E-4879 1.610127523359522709588392719355955E-4879 8104 8.050637616797613547941963596779775E-4880 4.025318808398806773970981798389888E-4880 8105 2.012659404199403386985490899194944E-4880 1.006329702099701693492745449597472E-4880 8106 5.03164851049850846746372724798736E-4881 2.51582425524925423373186362399368E-4881 8107 1.25791212762462711686593181199684E-4881 6.2895606381231355843296590599842E-4882 8108 3.1447803190615677921648295299921E-4882 1.57239015953078389608241476499605E-4882 8109 7.86195079765391948041207382498025E-4883 3.930975398826959740206036912490125E-4883 8110 1.965487699413479870103018456245063E-4883 9.827438497067399350515092281225313E-4884 8111 4.913719248533699675257546140612657E-4884 2.456859624266849837628773070306328E-4884 8112 1.228429812133424918814386535153164E-4884 6.14214906066712459407193267576582E-4885 8113 3.07107453033356229703596633788291E-4885 1.535537265166781148517983168941455E-4885 8114 7.677686325833905742589915844707275E-4886 3.838843162916952871294957922353638E-4886 8115 1.919421581458476435647478961176819E-4886 9.597107907292382178237394805884095E-4887 8116 4.798553953646191089118697402942048E-4887 2.399276976823095544559348701471024E-4887 8117 1.199638488411547772279674350735512E-4887 5.99819244205773886139837175367756E-4888 8118 2.99909622102886943069918587683878E-4888 1.49954811051443471534959293841939E-4888 8119 7.49774055257217357674796469209695E-4889 3.748870276286086788373982346048475E-4889 8120 1.874435138143043394186991173024238E-4889 9.372175690715216970934955865121188E-4890 8121 4.686087845357608485467477932560594E-4890 2.343043922678804242733738966280297E-4890 8122 1.171521961339402121366869483140149E-4890 5.857609806697010606834347415700743E-4891 8123 2.928804903348505303417173707850372E-4891 1.464402451674252651708586853925186E-4891 8124 7.32201225837126325854293426962593E-4892 3.661006129185631629271467134812965E-4892 8125 1.830503064592815814635733567406483E-4892 9.152515322964079073178667837032413E-4893 8126 4.576257661482039536589333918516207E-4893 2.288128830741019768294666959258103E-4893 8127 1.144064415370509884147333479629052E-4893 5.720322076852549420736667398145258E-4894 8128 2.860161038426274710368333699072629E-4894 1.430080519213137355184166849536315E-4894 8129 7.150402596065686775920834247681575E-4895 3.575201298032843387960417123840788E-4895 8130 1.787600649016421693980208561920394E-4895 8.93800324508210846990104280960197E-4896 8131 4.469001622541054234950521404800985E-4896 2.234500811270527117475260702400493E-4896 8132 1.117250405635263558737630351200247E-4896 5.586252028176317793688151756001233E-4897 8133 2.793126014088158896844075878000617E-4897 1.396563007044079448422037939000308E-4897 8134 6.98281503522039724211018969500154E-4898 3.49140751761019862105509484750077E-4898 8135 1.745703758805099310527547423750385E-4898 8.728518794025496552637737118751925E-4899 8136 4.364259397012748276318868559375963E-4899 2.182129698506374138159434279687981E-4899 8137 1.091064849253187069079717139843991E-4899 5.455324246265935345398585699219953E-4900 8138 2.727662123132967672699292849609977E-4900 1.363831061566483836349646424804988E-4900 8139 6.81915530783241918174823212402494E-4901 3.40957765391620959087411606201247E-4901 8140 1.704788826958104795437058031006235E-4901 8.523944134790523977185290155031175E-4902 8141 4.261972067395261988592645077515588E-4902 2.130986033697630994296322538757794E-4902 8142 1.065493016848815497148161269378897E-4902 5.327465084244077485740806346894485E-4903 8143 2.663732542122038742870403173447243E-4903 1.331866271061019371435201586723621E-4903 8144 6.659331355305096857176007933618105E-4904 3.329665677652548428588003966809053E-4904 8145 1.664832838826274214294001983404527E-4904 8.324164194131371071470009917022633E-4905 8146 4.162082097065685535735004958511317E-4905 2.081041048532842767867502479255658E-4905 8147 1.040520524266421383933751239627829E-4905 5.202602621332106919668756198139145E-4906 8148 2.601301310666053459834378099069573E-4906 1.300650655333026729917189049534786E-4906 8149 6.50325327666513364958594524767393E-4907 3.251626638332566824792972623836965E-4907 8150 1.625813319166283412396486311918483E-4907 8.129066595831417061982431559592413E-4908 8151 4.064533297915708530991215779796207E-4908 2.032266648957854265495607889898103E-4908 8152 1.016133324478927132747803944949052E-4908 5.080666622394635663739019724745258E-4909 8153 2.540333311197317831869509862372629E-4909 1.270166655598658915934754931186315E-4909 8154 6.350833277993294579673774655931575E-4910 3.175416638996647289836887327965788E-4910 8155 1.587708319498323644918443663982894E-4910 7.93854159749161822459221831991447E-4911 8156 3.969270798745809112296109159957235E-4911 1.984635399372904556148054579978618E-4911 8157 9.92317699686452278074027289989309E-4912 4.961588498432261390370136449946545E-4912 8158 2.480794249216130695185068224973273E-4912 1.240397124608065347592534112486636E-4912 8159 6.20198562304032673796267056243318E-4913 3.10099281152016336898133528121659E-4913 8160 1.550496405760081684490667640608295E-4913 7.752482028800408422453338203041475E-4914 8161 3.876241014400204211226669101520738E-4914 1.938120507200102105613334550760369E-4914 8162 9.690602536000510528066672753801845E-4915 4.845301268000255264033336376900923E-4915 8163 2.422650634000127632016668188450462E-4915 1.211325317000063816008334094225231E-4915 8164 6.056626585000319080041670471126155E-4916 3.028313292500159540020835235563078E-4916 8165 1.514156646250079770010417617781539E-4916 7.570783231250398850052088088907695E-4917 8166 3.785391615625199425026044044453848E-4917 1.892695807812599712513022022226924E-4917 8167 9.46347903906299856256511011113462E-4918 4.73173951953149928128255505556731E-4918 8168 2.365869759765749640641277527783655E-4918 1.182934879882874820320638763891828E-4918 8169 5.91467439941437410160319381945914E-4919 2.95733719970718705080159690972957E-4919 8170 1.478668599853593525400798454864785E-4919 7.393342999267967627003992274323925E-4920 8171 3.696671499633983813501996137161963E-4920 1.848335749816991906750998068580981E-4920 8172 9.241678749084959533754990342904905E-4921 4.620839374542479766877495171452453E-4921 8173 2.310419687271239883438747585726227E-4921 1.155209843635619941719373792863113E-4921 8174 5.776049218178099708596868964315565E-4922 2.888024609089049854298434482157783E-4922 8175 1.444012304544524927149217241078892E-4922 7.220061522722624635746086205394458E-4923 8176 3.610030761361312317873043102697229E-4923 1.805015380680656158936521551348615E-4923 8177 9.025076903403280794682607756743075E-4924 4.512538451701640397341303878371538E-4924 8178 2.256269225850820198670651939185769E-4924 1.128134612925410099335325969592885E-4924 8179 5.640673064627050496676629847964425E-4925 2.820336532313525248338314923982213E-4925 8180 1.410168266156762624169157461991107E-4925 7.050841330783813120845787309955533E-4926 8181 3.525420665391906560422893654977767E-4926 1.762710332695953280211446827488883E-4926 8182 8.813551663479766401057234137444415E-4927 4.406775831739883200528617068722208E-4927 8183 2.203387915869941600264308534361104E-4927 1.101693957934970800132154267180552E-4927 8184 5.50846978967485400066077133590276E-4928 2.75423489483742700033038566795138E-4928 8185 1.37711744741871350016519283397569E-4928 6.88558723709356750082596416987845E-4929 8186 3.442793618546783750412982084939225E-4929 1.721396809273391875206491042469613E-4929 8187 8.606984046366959376032455212348065E-4930 4.303492023183479688016227606174033E-4930 8188 2.151746011591739844008113803087017E-4930 1.075873005795869922004056901543508E-4930 8189 5.37936502897934961002028450771754E-4931 2.68968251448967480501014225385877E-4931 8190 1.344841257244837402505071126929385E-4931 6.724206286224187012525355634646925E-4932 8191 3.362103143112093506262677817323463E-4932 1.681051571556046753131338908661731E-4932 8192 8.405257857780233765656694543308655E-4933 4.202628928890116882828347271654328E-4933 8193 2.101314464445058441414173635827164E-4933 1.050657232222529220707086817913582E-4933 8194 5.25328616111264610353543408956791E-4934 2.626643080556323051767717044783955E-4934 8195 1.313321540278161525883858522391978E-4934 6.566607701390807629419292611959888E-4935 8196 3.283303850695403814709646305979944E-4935 1.641651925347701907354823152989972E-4935 8197 8.20825962673850953677411576494986E-4936 4.10412981336925476838705788247493E-4936 8198 2.052064906684627384193528941237465E-4936 1.026032453342313692096764470618733E-4936 8199 5.130162266711568460483822353093665E-4937 2.565081133355784230241911176546833E-4937 8200 1.282540566677892115120955588273417E-4937 6.412702833389460575604777941367083E-4938 8201 3.206351416694730287802388970683542E-4938 1.603175708347365143901194485341771E-4938 8202 8.015878541736825719505972426708855E-4939 4.007939270868412859752986213354428E-4939 8203 2.003969635434206429876493106677214E-4939 1.001984817717103214938246553338607E-4939 8204 5.009924088585516074691232766693035E-4940 2.504962044292758037345616383346518E-4940 8205 1.252481022146379018672808191673259E-4940 6.262405110731895093364040958366295E-4941 8206 3.131202555365947546682020479183148E-4941 1.565601277682973773341010239591574E-4941 8207 7.82800638841486886670505119795787E-4942 3.914003194207434433352525598978935E-4942 8208 1.957001597103717216676262799489468E-4942 9.785007985518586083381313997447338E-4943 8209 4.892503992759293041690656998723669E-4943 2.446251996379646520845328499361835E-4943 8210 1.223125998189823260422664249680918E-4943 6.115629990949116302113321248404588E-4944 8211 3.057814995474558151056660624202294E-4944 1.528907497737279075528330312101147E-4944 8212 7.644537488686395377641651560505735E-4945 3.822268744343197688820825780252868E-4945 8213 1.911134372171598844410412890126434E-4945 9.55567186085799422205206445063217E-4946 8214 4.777835930428997111026032225316085E-4946 2.388917965214498555513016112658043E-4946 8215 1.194458982607249277756508056329022E-4946 5.972294913036246388782540281645108E-4947 8216 2.986147456518123194391270140822554E-4947 1.493073728259061597195635070411277E-4947 8217 7.465368641295307985978175352056385E-4948 3.732684320647653992989087676028193E-4948 8218 1.866342160323826996494543838014097E-4948 9.331710801619134982472719190070483E-4949 8219 4.665855400809567491236359595035242E-4949 2.332927700404783745618179797517621E-4949 8220 1.166463850202391872809089898758811E-4949 5.832319251011959364045449493794053E-4950 8221 2.916159625505979682022724746897027E-4950 1.458079812752989841011362373448513E-4950 8222 7.290399063764949205056811867242565E-4951 3.645199531882474602528405933621283E-4951 8223 1.822599765941237301264202966810642E-4951 9.112998829706186506321014834053208E-4952 8224 4.556499414853093253160507417026604E-4952 2.278249707426546626580253708513302E-4952 8225 1.139124853713273313290126854256651E-4952 5.695624268566366566450634271283255E-4953 8226 2.847812134283183283225317135641628E-4953 1.423906067141591641612658567820814E-4953 8227 7.11953033570795820806329283910407E-4954 3.559765167853979104031646419552035E-4954 8228 1.779882583926989552015823209776018E-4954 8.899412919634947760079116048880088E-4955 8229 4.449706459817473880039558024440044E-4955 2.224853229908736940019779012220022E-4955 8230 1.112426614954368470009889506110011E-4955 5.562133074771842350049447530550055E-4956 8231 2.781066537385921175024723765275028E-4956 1.390533268692960587512361882637514E-4956 8232 6.95266634346480293756180941318757E-4957 3.476333171732401468780904706593785E-4957 8233 1.738166585866200734390452353296893E-4957 8.690832929331003671952261766484463E-4958 8234 4.345416464665501835976130883242232E-4958 2.172708232332750917988065441621116E-4958 8235 1.086354116166375458994032720810558E-4958 5.43177058083187729497016360405279E-4959 8236 2.715885290415938647485081802026395E-4959 1.357942645207969323742540901013198E-4959 8237 6.78971322603984661871270450506599E-4960 3.394856613019923309356352252532995E-4960 8238 1.697428306509961654678176126266498E-4960 8.487141532549808273390880631332488E-4961 8239 4.243570766274904136695440315666244E-4961 2.121785383137452068347720157833122E-4961 8240 1.060892691568726034173860078916561E-4961 5.304463457843630170869300394582805E-4962 8241 2.652231728921815085434650197291403E-4962 1.326115864460907542717325098645701E-4962 8242 6.630579322304537713586625493228505E-4963 3.315289661152268856793312746614253E-4963 8243 1.657644830576134428396656373307127E-4963 8.288224152880672141983281866535633E-4964 8244 4.144112076440336070991640933267817E-4964 2.072056038220168035495820466633908E-4964 8245 1.036028019110084017747910233316954E-4964 5.18014009555042008873955116658477E-4965 8246 2.590070047775210044369775583292385E-4965 1.295035023887605022184887791646193E-4965 8247 6.475175119438025110924438958230965E-4966 3.237587559719012555462219479115483E-4966 8248 1.618793779859506277731109739557742E-4966 8.093968899297531388655548697788708E-4967 8249 4.046984449648765694327774348894354E-4967 2.023492224824382847163887174447177E-4967 8250 1.011746112412191423581943587223589E-4967 5.058730562060957117909717936117943E-4968 8251 2.529365281030478558954858968058972E-4968 1.264682640515239279477429484029486E-4968 8252 6.32341320257619639738714742014743E-4969 3.161706601288098198693573710073715E-4969 8253 1.580853300644049099346786855036858E-4969 7.904266503220245496733934275184288E-4970 8254 3.952133251610122748366967137592144E-4970 1.976066625805061374183483568796072E-4970 8255 9.88033312902530687091741784398036E-4971 4.94016656451265343545870892199018E-4971 8256 2.47008328225632671772935446099509E-4971 1.235041641128163358864677230497545E-4971 8257 6.175208205640816794323386152487725E-4972 3.087604102820408397161693076243863E-4972 8258 1.543802051410204198580846538121932E-4972 7.719010257051020992904232690609658E-4973 8259 3.859505128525510496452116345304829E-4973 1.929752564262755248226058172652415E-4973 8260 9.648762821313776241130290863262075E-4974 4.824381410656888120565145431631038E-4974 8261 2.412190705328444060282572715815519E-4974 1.206095352664222030141286357907760E-4974 8262 6.03047676332111015070643178953880E-4975 3.01523838166055507535321589476940E-4975 8263 1.50761919083027753767660794738470E-4975 7.5380959541513876883830397369235E-4976 8264 3.76904797707569384419151986846175E-4976 1.884523988537846922095759934230875E-4976 8265 9.422619942689234610478799671154375E-4977 4.711309971344617305239399835577188E-4977 8266 2.355654985672308652619699917788594E-4977 1.177827492836154326309849958894297E-4977 8267 5.889137464180771631549249794471485E-4978 2.944568732090385815774624897235743E-4978 8268 1.472284366045192907887312448617872E-4978 7.361421830225964539436562243089358E-4979 8269 3.680710915112982269718281121544679E-4979 1.840355457556491134859140560772340E-4979 8270 9.20177728778245567429570280386170E-4980 4.60088864389122783714785140193085E-4980 8271 2.300444321945613918573925700965425E-4980 1.150222160972806959286962850482713E-4980 8272 5.751110804864034796434814252413565E-4981 2.875555402432017398217407126206783E-4981 8273 1.437777701216008699108703563103392E-4981 7.188888506080043495543517815516958E-4982 8274 3.594444253040021747771758907758479E-4982 1.797222126520010873885879453879240E-4982 8275 8.98611063260005436942939726939620E-4983 4.49305531630002718471469863469810E-4983 8276 2.24652765815001359235734931734905E-4983 1.123263829075006796178674658674525E-4983 8277 5.616319145375033980893373293372625E-4984 2.808159572687516990446686646686313E-4984 8278 1.404079786343758495223343323343157E-4984 7.020398931718792476116716616715783E-4985 8279 3.510199465859396238058358308357892E-4985 1.755099732929698119029179154178946E-4985 8280 8.77549866464849059514589577089473E-4986 4.387749332324245297572947885447365E-4986 8281 2.193874666162122648786473942723683E-4986 1.096937333081061324393236971361841E-4986 8282 5.484686665405306621966184856809205E-4987 2.742343332702653310983092428404603E-4987 8283 1.371171666351326655491546214202302E-4987 6.855858331756633277457731071011508E-4988 8284 3.427929165878316638728865535505754E-4988 1.713964582939158319364432767752877E-4988 8285 8.569822914695791596822163838764385E-4989 4.284911457347895798411081919382193E-4989 8286 2.142455728673947899205540959691097E-4989 1.071227864336973949602770479845548E-4989
<reponame>hat-open/hat-aio import asyncio import collections import signal import subprocess import sys import threading import time import unittest.mock import pytest from hat import aio pytestmark = pytest.mark.asyncio async def test_first(): queue = aio.Queue() queue.put_nowait(1) queue.put_nowait(2) queue.put_nowait(3) queue.close() result = await aio.first(queue, lambda i: i == 2) assert result == 2 assert not queue.empty() assert queue.get_nowait() == 3 assert queue.empty() queue = aio.Queue() queue.put_nowait(1) queue.put_nowait(2) queue.put_nowait(3) queue.close() result = await aio.first(queue, lambda i: i == 4) assert result is None assert queue.empty() async def test_first_example(): async def async_range(x): for i in range(x): await asyncio.sleep(0) yield i assert await aio.first(async_range(3)) == 0 assert await aio.first(async_range(3), lambda x: x > 1) == 2 assert await aio.first(async_range(3), lambda x: x > 2) is None assert await aio.first(async_range(3), lambda x: x > 2, 123) == 123 async def test_first_example_docs(): queue = aio.Queue() queue.put_nowait(1) queue.put_nowait(2) queue.put_nowait(3) queue.close() assert 1 == await aio.first(queue) assert 3 == await aio.first(queue, lambda x: x > 2) assert 123 == await aio.first(queue, default=123) async def test_uncancellable(): f1 = asyncio.Future() async def f(): return await f1 f2 = aio.uncancellable(f(), raise_cancel=False) f3 = asyncio.ensure_future(f2) asyncio.get_event_loop().call_soon(f3.cancel) f1.set_result(123) result = await f3 assert result == 123 async def test_uncancellable_exception(): f1 = asyncio.Future() e = Exception() async def f(): await asyncio.sleep(0) return await f1 f2 = aio.uncancellable(f(), raise_cancel=False) f3 = asyncio.ensure_future(f2) asyncio.get_event_loop().call_soon(f3.cancel) f1.set_exception(e) try: await f3 except Exception as ex: exc = ex assert exc is e async def test_uncancellable_vs_shield(): async def set_future(f, value): await asyncio.sleep(0.001) f.set_result(value) future = asyncio.Future() t1 = asyncio.shield(set_future(future, 1)) t2 = asyncio.ensure_future(t1) asyncio.get_event_loop().call_soon(t2.cancel) with pytest.raises(asyncio.CancelledError): await t2 assert not future.done() await future assert future.result() == 1 future = asyncio.Future() t1 = aio.uncancellable(set_future(future, 1), raise_cancel=True) t2 = asyncio.ensure_future(t1) asyncio.get_event_loop().call_soon(t2.cancel) with pytest.raises(asyncio.CancelledError): await t2 assert future.done() assert future.result() == 1 future = asyncio.Future() t1 = aio.uncancellable(set_future(future, 1), raise_cancel=False) t2 = asyncio.ensure_future(t1) asyncio.get_event_loop().call_soon(t2.cancel) await t2 assert future.done() assert future.result() == 1 async def test_call(): def f1(x): return x async def f2(x): await asyncio.sleep(0) return x result = await aio.call(f1, 123) assert result == 123 result = await aio.call(f2, 123) assert result == 123 async def test_call_on_cancel(): exceptions = aio.Queue() called = asyncio.Future() group = aio.Group(exceptions.put_nowait) async def closing(called): called.set_result(True) assert group.is_closing assert not group.is_closed group.spawn(aio.call_on_cancel, closing, called) assert not called.done() await group.async_close() assert called.done() assert exceptions.empty() async def test_call_on_cancel_example(): f = asyncio.Future() group = aio.Group() group.spawn(aio.call_on_cancel, f.set_result, 123) assert not f.done() await group.async_close() assert f.result() == 123 async def test_call_on_done(): f1 = asyncio.Future() f2 = asyncio.Future() f3 = asyncio.ensure_future(aio.call_on_done(f1, f2.set_result, 123)) await asyncio.sleep(0) assert not f1.done() assert not f2.done() assert not f3.done() f1.set_result(None) await asyncio.wait_for(f3, 0.001) assert f2.result() == 123 assert f3.result() is None async def test_call_on_done_example(): f = asyncio.Future() group = aio.Group() group.spawn(aio.call_on_done, f, group.close) assert group.is_open f.set_result(None) await group.wait_closed() assert group.is_closed async def test_wait_for(event_loop): async def return_result(delay): await asyncio.sleep(delay) return 123 async def raise_exception(delay): await asyncio.sleep(delay) raise Exception() result = await aio.wait_for(return_result(0), 0.001) assert result == 123 with pytest.raises(Exception): await aio.wait_for(raise_exception(0), 0.001) with pytest.raises(asyncio.TimeoutError): await aio.wait_for(return_result(0.001), 0) f = asyncio.ensure_future(aio.wait_for(return_result(0.001), 0)) event_loop.call_soon(f.cancel) with pytest.raises(asyncio.CancelledError): await f async def f1(): try: await aio.wait_for(return_result(0), 0.001) except aio.CancelledWithResultError as e: assert e.result == 123 assert e.exception is None else: assert False f = asyncio.ensure_future(f1()) event_loop.call_soon(f.cancel) await f async def f2(): try: await aio.wait_for(raise_exception(0), 0.001) except aio.CancelledWithResultError as e: assert e.result is None assert e.exception is not None else: assert False f = asyncio.ensure_future(f2()) event_loop.call_soon(f.cancel) await f async def test_create_executor(): executor = aio.create_executor() result = await executor(lambda: threading.current_thread().name) assert threading.current_thread().name != result async def test_create_executor_example(): executor1 = aio.create_executor() executor2 = aio.create_executor() tid1 = await executor1(threading.get_ident) tid2 = await executor2(threading.get_ident) assert tid1 != tid2 @pytest.mark.skipif(sys.platform == 'win32', reason="pthread_kill not supported") def test_run_asyncio(): ident = threading.get_ident() def thread(): time.sleep(0.01) signal.pthread_kill(ident, signal.SIGINT) t = threading.Thread(target=thread) t.start() async def f(): await asyncio.Future() with pytest.raises(asyncio.CancelledError): aio.run_asyncio(f()) t.join() # TODO check implementation for posible deadlock def test_run_asyncio_with_subprocess(tmp_path): py_path = tmp_path / 'temp.py' run_path = tmp_path / 'temp.run' with open(py_path, 'w', encoding='utf-8') as f: f.write(f"from hat import aio\n" f"import asyncio\n" f"import sys\n" f"async def f():\n" f" open(r'{run_path}', 'w').close()\n" f" await asyncio.Future()\n" f"try:\n" f" aio.run_asyncio(f())\n" f"except asyncio.CancelledError:\n" f" sys.exit(0)\n" f"except Exception:\n" f" sys.exit(10)\n" f"sys.exit(5)\n") p = subprocess.Popen([sys.executable, str(py_path)], creationflags=(subprocess.CREATE_NEW_PROCESS_GROUP if sys.platform == 'win32' else 0)) while not run_path.exists(): assert p.poll() is None time.sleep(0.01) p.send_signal(signal.CTRL_BREAK_EVENT if sys.platform == 'win32' else signal.SIGINT) assert p.wait() == 0 @pytest.mark.skipif(sys.platform == 'win32', reason="pthread_kill not supported") def test_run_asyncio_with_multiple_signals(): ident = threading.get_ident() def thread(): time.sleep(0.01) for _ in range(5): signal.pthread_kill(ident, signal.SIGINT) time.sleep(0.001) t = threading.Thread(target=thread) t.start() async def f(): await aio.uncancellable(asyncio.sleep(0.02), raise_cancel=False) return 1 assert aio.run_asyncio(f()) == 1 t.join() # TODO: test run_asyncio with `handle_signals` and `loop` def test_run_async_example(): async def run(): await asyncio.sleep(0) return 123 result = aio.run_asyncio(run()) assert result == 123 async def test_queue(): queue = aio.Queue() assert not queue.is_closed f = asyncio.ensure_future(queue.get()) assert not f.done() queue.put_nowait(1) assert 1 == await f for _ in range(5): queue.close() assert queue.is_closed with pytest.raises(aio.QueueClosedError): await queue.get() async def test_queue_str(): queue = aio.Queue() result = str(queue) assert isinstance(result, str) assert result async def test_queue_len(): count = 10 queue = aio.Queue() assert len(queue) == 0 for i in range(count): queue.put_nowait(None) assert len(queue) == i + 1 assert queue.qsize() == i + 1 for i in range(count): queue.get_nowait() assert queue.qsize() == count - i - 1 assert len(queue) == 0 async def test_queue_get_nowait(): queue = aio.Queue() with pytest.raises(aio.QueueEmptyError): queue.get_nowait() async def test_queue_get_until_empty(): queue = aio.Queue() queue.put_nowait(1) queue.put_nowait(2) queue.put_nowait(3) result = await queue.get_until_empty() assert result == 3 async def test_queue_get_nowait_until_empty(): queue = aio.Queue() queue.put_nowait(1) queue.put_nowait(2) queue.put_nowait(3) result = queue.get_nowait_until_empty() assert result == 3 async def test_queue_with_size(): queue_size = 5 queue = aio.Queue(queue_size) assert queue.maxsize == queue_size for _ in range(queue_size): queue.put_nowait(None) with pytest.raises(aio.QueueFullError): queue.put_nowait(None) async def test_queue_put(): queue = aio.Queue(1) await queue.put(1) put_future = asyncio.ensure_future(queue.put(1)) asyncio.get_event_loop().call_soon(queue.close) with pytest.raises(aio.QueueClosedError): await put_future async def test_queue_put_cancel(): queue = aio.Queue(1) await queue.put(0) f1 = asyncio.ensure_future(queue.put(1)) f2 = asyncio.ensure_future(queue.put(2)) await asyncio.sleep(0) assert 0 == queue.get_nowait() f1.cancel() assert 2 == await queue.get() with pytest.raises(asyncio.CancelledError): await f1 await f2 @pytest.mark.parametrize('item_count', [0, 1, 2, 10]) async def test_queue_async_iterable(item_count): queue = aio.Queue() data = collections.deque() for i in range(10): queue.put_nowait(i) data.append(i) asyncio.get_running_loop().call_later(0.001, queue.close) async for i in queue: assert i == data.popleft() assert queue.empty() assert len(data) == 0 async def test_queue_get_canceled(): queue = aio.Queue() f1 = asyncio.ensure_future(queue.get()) f2 = asyncio.ensure_future(queue.get()) await asyncio.sleep(0) queue.put_nowait(123) f1.cancel() with pytest.raises(asyncio.CancelledError): await f1 assert 123 == await f2 async def test_queue_example(): queue = aio.Queue(maxsize=1) async def producer(): for i in range(4): await queue.put(i) queue.close() async def consumer(): result = 0 async for i in queue: result += i return result asyncio.ensure_future(producer()) result = await consumer() assert result == 6 async def test_group(): group = aio.Group() futures = [group.wrap(asyncio.Future()) for _ in range(100)] assert not any(future.done() for future in futures) await group.async_close() assert all(future.done() for future in futures) assert not group.is_open assert group.is_closing assert group.is_closed await group.wait_closing() await group.wait_closed() async def test_group_spawn_async_close(): async def task(): group.spawn(group.async_close) await asyncio.Future() group = aio.Group() group.spawn(task) await group.wait_closed() async def test_group_subgroup(): g1 = aio.Group() g2 = g1.create_subgroup() f = g2.wrap(asyncio.Future()) g1.close() assert g1.is_closing assert g2.is_closing assert not f.done() assert not g1.is_closed assert not g2.is_closed with pytest.raises(Exception): g1.create_subgroup() await g1.async_close() with pytest.raises(Exception): g1.create_subgroup() assert f.done() assert g1.is_closed assert g2.is_closed async def test_group_async_close_subgroup_without_tasks(): g1 = aio.Group() g2 = g1.create_subgroup() await g1.async_close() assert g1.is_closed assert g2.is_closed async def test_group_spawn_subgroup_in_closing_subgroup(): exceptions = aio.Queue() g1 = aio.Group(lambda e: exceptions.put_nowait(e)) g2 = g1.create_subgroup() async def task(): try: await asyncio.Future() except asyncio.CancelledError: with pytest.raises(Exception): g1.create_subgroup() g2.spawn(task) await g1.async_close() assert g1.is_closed assert g2.is_closed assert exceptions.empty() async def test_group_spawn_when_not_open(): g = aio.Group() g.spawn(asyncio.Future) g.close() with pytest.raises(Exception): g.spawn(asyncio.Future) await g.async_close() with pytest.raises(Exception): g.spawn(asyncio.Future) with pytest.raises(Exception): g.wrap(asyncio.Future()) async def test_group_close_empty_group(): g = aio.Group() assert not g.is_closing assert not g.is_closed g.close() assert g.is_closing assert g.is_closed async def test_group_context(): async with aio.Group() as g: f = g.spawn(asyncio.Future) assert not f.done() assert f.done() async def test_group_custom_exception_handler(): def exception_cb(e): nonlocal e2 e2 = e async def f(): raise e1 e1 = Exception() e2 = None g = aio.Group(exception_cb) with pytest.raises(Exception): await g.spawn(f) await g.async_close() assert e1 is e2 async def test_group_default_exception_handler(): async def f(): raise e e = Exception() g = aio.Group() with unittest.mock.patch('hat.aio.mlog.warning') as mock: with pytest.raises(Exception): await g.spawn(f) await g.async_close() _, kwargs = mock.call_args assert kwargs['exc_info'] is e async def test_group_example_docs_spawn(): async def f1(x): try: await