code
stringlengths
2k
1.04M
repo_path
stringlengths
5
517
parsed_code
stringlengths
0
1.04M
quality_prob
float64
0.02
0.95
learning_prob
float64
0.02
0.93
import math from spydrnet_physical.util import ConnectPointList from svgwrite.container import Group DEFAULT_COLOR = " black" class ConnectionPattern: ''' This creates a connection patterns (`ConnectPointList`) based on pre-defined rule ''' def __init__(self, sizex, sizey): ''' Initialise FPGA parameters args: sizex (int): Width of FPGA grid sizey (int): Size of FPGA grid ''' self.sizex = sizex self.sizey = sizey self.xbias = 0 self.ybias = 0 self.dwg_main = None self._connect = ConnectPointList(sizex=sizex, sizey=sizey) @property def svg_main(self): """ Returns the svgwrite drawing object, call after running ``render_pattern`` """ return self.dwg_main @ property def connections(self): """ Returns the ConnectPointList in current pattern """ return self._connect @ connections.setter def connections(self, value): self._connect = value return self._connect @ staticmethod def _get_prime_factors(number): prime_factors = [] while number % 2 == 0: prime_factors.append(2) number = number / 2 for i in range(3, int(math.sqrt(number)) + 1, 2): while number % i == 0: prime_factors.append(int(i)) number = number / i if number > 2: prime_factors.append(int(number)) return prime_factors @staticmethod def get_htree(size, root=0, side=0, repeat=1): ''' Returns H-Tree of specific size Args: root(int): Extension in the root connection (Default=0) side(int): Extension in the side connection (Default=0) repeat(int): NUmber of sides on each direction (Default=1) .. rst-class:: ascii :: ^ ^ ^ ^ | | | | | | root | | +---------+----------+ | | | | | ^ | | | | | | SIDE v v | v v v | ^ ^ | +-+--+ | + | REPEAT + ''' points = ConnectPointList(sizex=size, sizey=size) size = size if size % 2 else (size-1) mid = (size+1)/2 points.cursor = (mid, mid) for _ in range(repeat): points.release_cursor() points.move_x(value=1, steps=int(mid/2)+root) points.hold_cursor() points.move_y(value=1, steps=int(mid/2)+side) points.move_y(value=-1, steps=int(mid/2)+side) points.cursor = (mid, mid) for _ in range(repeat): points.release_cursor() points.move_x(value=-1, steps=int(mid/2)+root) points.hold_cursor() points.move_y(value=1, steps=int(mid/2)+side) points.move_y(value=-1, steps=int(mid/2)+side) return points def auto_select(self): ''' Auto implements the global tree with crop and scale operations TODO: NotImplemented ''' NotImplementedError def add_htree(self, n=3): ''' Returns HTree pattern fo the given grid size This method auto creates multiple levels of HTree from the given grid size. Minimum size H-Tree is 5x5 args: n (int): 2^n, Number representng size of the grid ''' assert (math.log2(n-1) % 1) == 0, "Support only (2^n)+1 width" self._connect.merge(self.get_htree(n)) return self._connect dev_size = min(self.sizex, self.sizey) while n < dev_size: print(n) n = n*2 self.get_fishbone() return self._connect # points = self._connect # x_center = ((self.sizex+1)*0.5) # y_center = ((self.sizey+1)*0.5) # print(x_center, y_center) def reset(self): """ Removes all the ConnectionPoints from the pattern """ self._connect = ConnectPointList(sizex=self.sizex, sizey=self.sizey) def get_fishbone(self, width=None, height=None, steps=1, x_margin=(0, 0), y_margin=(0, 0)): ''' Returns fishbone pattern for the given grid size Spine is created at the center of the grid, to change bias when grid is symetric change ``xbias`` and ``ybias`` parameter x_margin(tuple(int, int)): Skips the repective grid connectivity y_margin(tuple(int, int)): Skips the repective grid connectivity ''' width = width or self.sizex height = height or self.sizey points = self._connect x_center = ((width+1)*0.5) x_pt = math.ceil(x_center) if self.xbias else math.floor(x_center) y_pt = (1+y_margin[0]) points.add_connection(x_pt, 0, x_pt, y_pt) points.cursor = (x_pt, y_pt) for indx in range(0, height-y_margin[1], steps): if not indx == 0: points.move_y(steps=steps) center = points.cursor while points.get_x < (width-x_margin[1]): points.move_x() points.cursor = center while points.get_x > (1 + x_margin[0]): points.move_x(-1) points.cursor = center return points def render_pattern(self, scale=20, title=None, add_module_labels=False): """ Renders the connection points """ dwg = self._connect.render_pattern(scale) self.dwg_main = [e for e in dwg.elements if e.get_id() == "main"][0] dwgText = self.dwg_main.add(Group(id="text")) dwgMarker = [e for e in dwg.elements if e.get_id() == "markers"] if dwgMarker: dwgMarker = dwgMarker[0] for i in range(0, self.sizex+1): dwgMarker.add(dwg.line(start=((i+0.5)*scale, 0.5*scale), end=((i+0.5)*scale, (self.sizey+0.5)*scale), class_="gridmarker")) for i in range(0, self.sizey+1): dwgMarker.add(dwg.line(start=(0.5*scale, (i+0.5)*scale), end=((self.sizex+0.5) * scale, (i+0.5)*scale), class_="gridmarker")) # Add labels to the grid if add_module_labels: for x in range(1, 1+self.sizex): for y in range(1, 1+self.sizey): txt = self._connect.get_top_instance(x, y).name label = dwg.text("", font_size=self.sizey*scale*0.03, alignment_baseline="middle", class_="gridLabels", text_anchor="middle", transform="scale(1,-1)", insert=(x*scale, (-1*y*scale) + 0.25*scale)) label.add(dwg.tspan(txt, x=[x*scale])) label.add(dwg.tspan( "["+self._connect.get_reference(x, y)+"]", font_size=self.sizey*scale*0.02, x=[x*scale], dy=["2%", ])) dwgText.add(label) # Add title to generated SVG image title = title or f" %d x %d FPGA " % (self.sizex, self.sizey) dwgText.add(dwg.text(title, insert=((self.sizex+1)*scale*0.5, -1*-0.5*scale), transform="scale(1,-1)", class_="moduleLabel", fill="black", font_size=self.sizey*scale*0.1, alignment_baseline="middle", text_anchor="middle")) width = self.sizex*scale + (scale) height = self.sizey*scale + (3*scale) x_offset = 0 y_offset = -1*height + (1.5*scale) dwg["width"] = width dwg["height"] = height dwg.viewbox(x_offset, y_offset, width, height) return dwg if __name__ == "__main__": # conn_list = ConnectPointList(5, 5) # conn_list.add_connection(1, 1, 1, 2) # conn_list.add_connection(1, 2, 2, 2) # print(conn_list) # conn_list.render_pattern().save(pretty=True, indent=4) # fpga = ConnectionPattern(5, 5) # conn_list = fpga.get_fishbone() # print(conn_list) # conn_list.rotate(90) # fpga.render_pattern().save(pretty=True, indent=4) fpga = ConnectionPattern(5, 5) left_tree = fpga.connections left_tree = fpga.get_fishbone(x_margin=(0, 0)) left_tree.scale(2, anchor=(1, 1)) fpga = ConnectionPattern(10, 10) conn_list = fpga.connections conn_list.merge(left_tree) conn_list.crop_edges() conn_list.sample_connections() fpga.render_pattern().save(pretty=True, indent=4)
spydrnet_physical/util/ConnectionPattern.py
import math from spydrnet_physical.util import ConnectPointList from svgwrite.container import Group DEFAULT_COLOR = " black" class ConnectionPattern: ''' This creates a connection patterns (`ConnectPointList`) based on pre-defined rule ''' def __init__(self, sizex, sizey): ''' Initialise FPGA parameters args: sizex (int): Width of FPGA grid sizey (int): Size of FPGA grid ''' self.sizex = sizex self.sizey = sizey self.xbias = 0 self.ybias = 0 self.dwg_main = None self._connect = ConnectPointList(sizex=sizex, sizey=sizey) @property def svg_main(self): """ Returns the svgwrite drawing object, call after running ``render_pattern`` """ return self.dwg_main @ property def connections(self): """ Returns the ConnectPointList in current pattern """ return self._connect @ connections.setter def connections(self, value): self._connect = value return self._connect @ staticmethod def _get_prime_factors(number): prime_factors = [] while number % 2 == 0: prime_factors.append(2) number = number / 2 for i in range(3, int(math.sqrt(number)) + 1, 2): while number % i == 0: prime_factors.append(int(i)) number = number / i if number > 2: prime_factors.append(int(number)) return prime_factors @staticmethod def get_htree(size, root=0, side=0, repeat=1): ''' Returns H-Tree of specific size Args: root(int): Extension in the root connection (Default=0) side(int): Extension in the side connection (Default=0) repeat(int): NUmber of sides on each direction (Default=1) .. rst-class:: ascii :: ^ ^ ^ ^ | | | | | | root | | +---------+----------+ | | | | | ^ | | | | | | SIDE v v | v v v | ^ ^ | +-+--+ | + | REPEAT + ''' points = ConnectPointList(sizex=size, sizey=size) size = size if size % 2 else (size-1) mid = (size+1)/2 points.cursor = (mid, mid) for _ in range(repeat): points.release_cursor() points.move_x(value=1, steps=int(mid/2)+root) points.hold_cursor() points.move_y(value=1, steps=int(mid/2)+side) points.move_y(value=-1, steps=int(mid/2)+side) points.cursor = (mid, mid) for _ in range(repeat): points.release_cursor() points.move_x(value=-1, steps=int(mid/2)+root) points.hold_cursor() points.move_y(value=1, steps=int(mid/2)+side) points.move_y(value=-1, steps=int(mid/2)+side) return points def auto_select(self): ''' Auto implements the global tree with crop and scale operations TODO: NotImplemented ''' NotImplementedError def add_htree(self, n=3): ''' Returns HTree pattern fo the given grid size This method auto creates multiple levels of HTree from the given grid size. Minimum size H-Tree is 5x5 args: n (int): 2^n, Number representng size of the grid ''' assert (math.log2(n-1) % 1) == 0, "Support only (2^n)+1 width" self._connect.merge(self.get_htree(n)) return self._connect dev_size = min(self.sizex, self.sizey) while n < dev_size: print(n) n = n*2 self.get_fishbone() return self._connect # points = self._connect # x_center = ((self.sizex+1)*0.5) # y_center = ((self.sizey+1)*0.5) # print(x_center, y_center) def reset(self): """ Removes all the ConnectionPoints from the pattern """ self._connect = ConnectPointList(sizex=self.sizex, sizey=self.sizey) def get_fishbone(self, width=None, height=None, steps=1, x_margin=(0, 0), y_margin=(0, 0)): ''' Returns fishbone pattern for the given grid size Spine is created at the center of the grid, to change bias when grid is symetric change ``xbias`` and ``ybias`` parameter x_margin(tuple(int, int)): Skips the repective grid connectivity y_margin(tuple(int, int)): Skips the repective grid connectivity ''' width = width or self.sizex height = height or self.sizey points = self._connect x_center = ((width+1)*0.5) x_pt = math.ceil(x_center) if self.xbias else math.floor(x_center) y_pt = (1+y_margin[0]) points.add_connection(x_pt, 0, x_pt, y_pt) points.cursor = (x_pt, y_pt) for indx in range(0, height-y_margin[1], steps): if not indx == 0: points.move_y(steps=steps) center = points.cursor while points.get_x < (width-x_margin[1]): points.move_x() points.cursor = center while points.get_x > (1 + x_margin[0]): points.move_x(-1) points.cursor = center return points def render_pattern(self, scale=20, title=None, add_module_labels=False): """ Renders the connection points """ dwg = self._connect.render_pattern(scale) self.dwg_main = [e for e in dwg.elements if e.get_id() == "main"][0] dwgText = self.dwg_main.add(Group(id="text")) dwgMarker = [e for e in dwg.elements if e.get_id() == "markers"] if dwgMarker: dwgMarker = dwgMarker[0] for i in range(0, self.sizex+1): dwgMarker.add(dwg.line(start=((i+0.5)*scale, 0.5*scale), end=((i+0.5)*scale, (self.sizey+0.5)*scale), class_="gridmarker")) for i in range(0, self.sizey+1): dwgMarker.add(dwg.line(start=(0.5*scale, (i+0.5)*scale), end=((self.sizex+0.5) * scale, (i+0.5)*scale), class_="gridmarker")) # Add labels to the grid if add_module_labels: for x in range(1, 1+self.sizex): for y in range(1, 1+self.sizey): txt = self._connect.get_top_instance(x, y).name label = dwg.text("", font_size=self.sizey*scale*0.03, alignment_baseline="middle", class_="gridLabels", text_anchor="middle", transform="scale(1,-1)", insert=(x*scale, (-1*y*scale) + 0.25*scale)) label.add(dwg.tspan(txt, x=[x*scale])) label.add(dwg.tspan( "["+self._connect.get_reference(x, y)+"]", font_size=self.sizey*scale*0.02, x=[x*scale], dy=["2%", ])) dwgText.add(label) # Add title to generated SVG image title = title or f" %d x %d FPGA " % (self.sizex, self.sizey) dwgText.add(dwg.text(title, insert=((self.sizex+1)*scale*0.5, -1*-0.5*scale), transform="scale(1,-1)", class_="moduleLabel", fill="black", font_size=self.sizey*scale*0.1, alignment_baseline="middle", text_anchor="middle")) width = self.sizex*scale + (scale) height = self.sizey*scale + (3*scale) x_offset = 0 y_offset = -1*height + (1.5*scale) dwg["width"] = width dwg["height"] = height dwg.viewbox(x_offset, y_offset, width, height) return dwg if __name__ == "__main__": # conn_list = ConnectPointList(5, 5) # conn_list.add_connection(1, 1, 1, 2) # conn_list.add_connection(1, 2, 2, 2) # print(conn_list) # conn_list.render_pattern().save(pretty=True, indent=4) # fpga = ConnectionPattern(5, 5) # conn_list = fpga.get_fishbone() # print(conn_list) # conn_list.rotate(90) # fpga.render_pattern().save(pretty=True, indent=4) fpga = ConnectionPattern(5, 5) left_tree = fpga.connections left_tree = fpga.get_fishbone(x_margin=(0, 0)) left_tree.scale(2, anchor=(1, 1)) fpga = ConnectionPattern(10, 10) conn_list = fpga.connections conn_list.merge(left_tree) conn_list.crop_edges() conn_list.sample_connections() fpga.render_pattern().save(pretty=True, indent=4)
0.758242
0.473414
from typing import List import sys import warnings import time import pickle import numpy as np import pandas as pd from tqdm import tqdm from load_data import read_data from model import Model warnings.filterwarnings("ignore") def arrays_to_str_list(arr: np.array) -> List[str]: """ Transform predictions arrays into a list of strings to match the submission format. """ result = [] for index in range(arr.shape[0]): tmp = list(arr[index, :]) tmp = "[" + " ".join([str(_tmp) for _tmp in tmp]) + "]" result.append(str(tmp)) return result def lists_to_str_list(array: List[List[str]]) -> List[str]: """ Transform most important features arrays into a list of strings to match submission format. """ result = [] for tmp in array: tmp = "[" + " ".join(["'" + str(s) + "'" for s in tmp]) + "]" result.append(str(tmp)) return result def make_predictions_for_test_suite( data: pd.DataFrame, test_suite: pd.Series, model: Model ) -> pd.DataFrame: """ Make predictions for single `test_suite` Args: data (pd.DataFrame): test_suite input data test_suite (pd.Series): test_suite value model (Model): model Returns: pd.DataFrame: predictions """ ( y_hat_1, y_hat_3, y_hat_1_label, y_hat_3_label, most_important_features, ) = model.predict(data) # combine results result = pd.DataFrame({}) result["timestamp"] = data.reset_index()["timestamp"] result["test_suite"] = test_suite result["predicted_induced_state"] = y_hat_1_label result["three_sec_predicted_induced_state"] = y_hat_3_label result["predicted_induced_state_confidence"] = arrays_to_str_list(y_hat_1) result["three_sec_predicted_induced_state_confidence"] = arrays_to_str_list(y_hat_3) result["top_three_features"] = lists_to_str_list(most_important_features) result_cols = [ "timestamp", "test_suite", "predicted_induced_state", "predicted_induced_state_confidence", "three_sec_predicted_induced_state", "three_sec_predicted_induced_state_confidence", "top_three_features", ] result = result[result_cols] return result def make_predictions( data: pd.DataFrame, dummies: pd.DataFrame, model: Model ) -> pd.DataFrame: """ Make predictions for raw input data Args: data (pd.DataFrame): input data dummies (pd.DataFrame): dummies dataframe to match sample submission format model (Model): model to make predictions Returns: pd.DataFrame: predictions """ t_start = time.time() # get unique temp = [index[0] for index in data.index] test_suites = [] for _temp in temp: if _temp not in test_suites: test_suites.append(_temp) # make predictions result = [] for test_suite in tqdm(test_suites): tmp = make_predictions_for_test_suite(data.loc[test_suite], test_suite, model) result.append(tmp) result = pd.concat(result, axis=0).reset_index(drop=True) # combine with dummies df to keep order result = pd.merge(dummies, result, how="left", on=["timestamp", "test_suite"]) # process ts result["timestamp"] = ( pd.to_datetime(result["timestamp"]).apply(lambda x: x.value) / 10**3 ) result["timestamp"] = result["timestamp"].astype("int") t_end = time.time() print(f"Predicions are made. Time: {(t_end-t_start)/60:.2f} minutes") return result def main(): """ main entry Returns: """ if len(sys.argv) < 2 or len(sys.argv[1]) == 0: print("Testing input file is missing.") return 1 if len(sys.argv) < 3 or len(sys.argv[2]) == 0: print("Testing output file is missing.") return 1 print("Testing started.") input_file = sys.argv[1] output_file = sys.argv[2] model_file = sys.argv[3] with open(model_file, "rb") as file: model = pickle.load(file) # load data data, dummies = read_data(input_file) result = make_predictions(data, dummies, model) result.to_csv(output_file, index=False) return 0 if __name__ == "__main__": main()
code/topcoder_cognitive_state/test.py
from typing import List import sys import warnings import time import pickle import numpy as np import pandas as pd from tqdm import tqdm from load_data import read_data from model import Model warnings.filterwarnings("ignore") def arrays_to_str_list(arr: np.array) -> List[str]: """ Transform predictions arrays into a list of strings to match the submission format. """ result = [] for index in range(arr.shape[0]): tmp = list(arr[index, :]) tmp = "[" + " ".join([str(_tmp) for _tmp in tmp]) + "]" result.append(str(tmp)) return result def lists_to_str_list(array: List[List[str]]) -> List[str]: """ Transform most important features arrays into a list of strings to match submission format. """ result = [] for tmp in array: tmp = "[" + " ".join(["'" + str(s) + "'" for s in tmp]) + "]" result.append(str(tmp)) return result def make_predictions_for_test_suite( data: pd.DataFrame, test_suite: pd.Series, model: Model ) -> pd.DataFrame: """ Make predictions for single `test_suite` Args: data (pd.DataFrame): test_suite input data test_suite (pd.Series): test_suite value model (Model): model Returns: pd.DataFrame: predictions """ ( y_hat_1, y_hat_3, y_hat_1_label, y_hat_3_label, most_important_features, ) = model.predict(data) # combine results result = pd.DataFrame({}) result["timestamp"] = data.reset_index()["timestamp"] result["test_suite"] = test_suite result["predicted_induced_state"] = y_hat_1_label result["three_sec_predicted_induced_state"] = y_hat_3_label result["predicted_induced_state_confidence"] = arrays_to_str_list(y_hat_1) result["three_sec_predicted_induced_state_confidence"] = arrays_to_str_list(y_hat_3) result["top_three_features"] = lists_to_str_list(most_important_features) result_cols = [ "timestamp", "test_suite", "predicted_induced_state", "predicted_induced_state_confidence", "three_sec_predicted_induced_state", "three_sec_predicted_induced_state_confidence", "top_three_features", ] result = result[result_cols] return result def make_predictions( data: pd.DataFrame, dummies: pd.DataFrame, model: Model ) -> pd.DataFrame: """ Make predictions for raw input data Args: data (pd.DataFrame): input data dummies (pd.DataFrame): dummies dataframe to match sample submission format model (Model): model to make predictions Returns: pd.DataFrame: predictions """ t_start = time.time() # get unique temp = [index[0] for index in data.index] test_suites = [] for _temp in temp: if _temp not in test_suites: test_suites.append(_temp) # make predictions result = [] for test_suite in tqdm(test_suites): tmp = make_predictions_for_test_suite(data.loc[test_suite], test_suite, model) result.append(tmp) result = pd.concat(result, axis=0).reset_index(drop=True) # combine with dummies df to keep order result = pd.merge(dummies, result, how="left", on=["timestamp", "test_suite"]) # process ts result["timestamp"] = ( pd.to_datetime(result["timestamp"]).apply(lambda x: x.value) / 10**3 ) result["timestamp"] = result["timestamp"].astype("int") t_end = time.time() print(f"Predicions are made. Time: {(t_end-t_start)/60:.2f} minutes") return result def main(): """ main entry Returns: """ if len(sys.argv) < 2 or len(sys.argv[1]) == 0: print("Testing input file is missing.") return 1 if len(sys.argv) < 3 or len(sys.argv[2]) == 0: print("Testing output file is missing.") return 1 print("Testing started.") input_file = sys.argv[1] output_file = sys.argv[2] model_file = sys.argv[3] with open(model_file, "rb") as file: model = pickle.load(file) # load data data, dummies = read_data(input_file) result = make_predictions(data, dummies, model) result.to_csv(output_file, index=False) return 0 if __name__ == "__main__": main()
0.515376
0.414306
from ai_flow.util.json_utils import Jsonable from typing import Dict, List from datetime import datetime, timedelta from pytz import timezone class PeriodicConfig(Jsonable): """ Define the periodic running configuration of the running unit (ai_flow.workflow.job.Job and ai_flow.workflow.workflow.Workflow). """ def __init__(self, trigger_config: Dict) -> None: """ :param trigger_config: Support two types of configuration: 1. cron config: {'start_date': 'start_date_expression', 'cron': 'cron_expression', 'timezone': 'timezone'} start_date_expression: year:int,month:int,day:int,hour:int,minute:int,second:int,Option[tzinfo: str] cron_expression: seconds minutes hours days months weeks years timezone: utc 2. interval config {'start_date': 'start_date_expression', 'interval': 'interval_expression'} start_date_expression: year:int,month:int,day:int,hour:int,minute:int,second:int,Option[tzinfo: str] interval_expression: days:int,hours:int,minutes:int,seconds:int """ super().__init__() self.trigger_config: Dict = trigger_config @classmethod def to_dict(cls, config: 'PeriodicConfig') -> Dict: if 'cron' in config.trigger_config: periodic_dict = {'start_date': config.trigger_config.get('start_date'), 'cron': config.trigger_config.get('cron')} return {**periodic_dict, **{'timezone': config.trigger_config.get( 'timezone')}} if 'timezone' in config.trigger_config else periodic_dict elif 'interval' in config.trigger_config: return {'start_date': config.trigger_config.get('start_date'), 'interval': config.trigger_config.get('interval')} else: raise Exception('Periodic config must be one of:\n' """1. cron config: {'start_date': 'start_date_expression', 'cron': 'cron_expression', 'timezone': 'timezone'}\n""" """2. interval config {'start_date': 'start_date_expression', 'interval': 'interval_expression'}""") @classmethod def from_dict(cls, data: Dict) -> 'PeriodicConfig': return PeriodicConfig(trigger_config=data) def get_cron_items(self): cron_list = self.trigger_config.get('cron').split(' ') if len(cron_list) != 7: raise Exception('cron expression {} is not validated! ' 'Usage: seconds minutes hours days months weeks years') result = [] for i in cron_list: result.append(i.strip()) return result def get_start_date_items(self): start_date_list = self.trigger_config.get('start_date').split(',') if len(start_date_list) == 7: result = [] for i in range(len(start_date_list)): if i < 6: if len(start_date_list[i].strip()) == 0: if i < 3: raise Exception('year month, day mast set!') else: result.append(0) else: result.append(int(start_date_list[i].strip())) else: if len(start_date_list[i].strip()) == 0: result.append(None) else: result.append(start_date_list[i].strip()) return result elif len(start_date_list) == 6: result = [] for i in start_date_list: result.append(int(i.strip())) return result else: raise Exception('start expression {} is not validated! ' 'Usage: year:int,month:int,day:int,hour:int,minute:int,second:int,Option[tzinfo: str]') def get_interval_items(self) -> List[float]: interval_list = self.trigger_config.get('interval').split(',') if len(interval_list) != 4: raise Exception('interval expression {} is not validated! ' 'Usage: days:float,hours:float,minutes:float,seconds:float') result = [] for i in interval_list: if len(i.strip()) == 0: result.append(0) else: result.append(int(i.strip())) return result def get_start_date(self) -> datetime: tmp = self.get_start_date_items() if tmp[6] is None: return datetime(year=tmp[0], month=tmp[1], day=tmp[2], hour=tmp[3], minute=tmp[4], second=tmp[5]) else: return datetime(year=tmp[0], month=tmp[1], day=tmp[2], hour=tmp[3], minute=tmp[4], second=tmp[5], tzinfo=timezone(tmp[6])) def get_interval(self) -> timedelta: tmp = self.get_interval_items() return timedelta(days=tmp[0], hours=tmp[1], minutes=tmp[2], seconds=tmp[3]) def get_timezone(self): return self.trigger_config.get('timezone')
ai_flow/workflow/periodic_config.py
from ai_flow.util.json_utils import Jsonable from typing import Dict, List from datetime import datetime, timedelta from pytz import timezone class PeriodicConfig(Jsonable): """ Define the periodic running configuration of the running unit (ai_flow.workflow.job.Job and ai_flow.workflow.workflow.Workflow). """ def __init__(self, trigger_config: Dict) -> None: """ :param trigger_config: Support two types of configuration: 1. cron config: {'start_date': 'start_date_expression', 'cron': 'cron_expression', 'timezone': 'timezone'} start_date_expression: year:int,month:int,day:int,hour:int,minute:int,second:int,Option[tzinfo: str] cron_expression: seconds minutes hours days months weeks years timezone: utc 2. interval config {'start_date': 'start_date_expression', 'interval': 'interval_expression'} start_date_expression: year:int,month:int,day:int,hour:int,minute:int,second:int,Option[tzinfo: str] interval_expression: days:int,hours:int,minutes:int,seconds:int """ super().__init__() self.trigger_config: Dict = trigger_config @classmethod def to_dict(cls, config: 'PeriodicConfig') -> Dict: if 'cron' in config.trigger_config: periodic_dict = {'start_date': config.trigger_config.get('start_date'), 'cron': config.trigger_config.get('cron')} return {**periodic_dict, **{'timezone': config.trigger_config.get( 'timezone')}} if 'timezone' in config.trigger_config else periodic_dict elif 'interval' in config.trigger_config: return {'start_date': config.trigger_config.get('start_date'), 'interval': config.trigger_config.get('interval')} else: raise Exception('Periodic config must be one of:\n' """1. cron config: {'start_date': 'start_date_expression', 'cron': 'cron_expression', 'timezone': 'timezone'}\n""" """2. interval config {'start_date': 'start_date_expression', 'interval': 'interval_expression'}""") @classmethod def from_dict(cls, data: Dict) -> 'PeriodicConfig': return PeriodicConfig(trigger_config=data) def get_cron_items(self): cron_list = self.trigger_config.get('cron').split(' ') if len(cron_list) != 7: raise Exception('cron expression {} is not validated! ' 'Usage: seconds minutes hours days months weeks years') result = [] for i in cron_list: result.append(i.strip()) return result def get_start_date_items(self): start_date_list = self.trigger_config.get('start_date').split(',') if len(start_date_list) == 7: result = [] for i in range(len(start_date_list)): if i < 6: if len(start_date_list[i].strip()) == 0: if i < 3: raise Exception('year month, day mast set!') else: result.append(0) else: result.append(int(start_date_list[i].strip())) else: if len(start_date_list[i].strip()) == 0: result.append(None) else: result.append(start_date_list[i].strip()) return result elif len(start_date_list) == 6: result = [] for i in start_date_list: result.append(int(i.strip())) return result else: raise Exception('start expression {} is not validated! ' 'Usage: year:int,month:int,day:int,hour:int,minute:int,second:int,Option[tzinfo: str]') def get_interval_items(self) -> List[float]: interval_list = self.trigger_config.get('interval').split(',') if len(interval_list) != 4: raise Exception('interval expression {} is not validated! ' 'Usage: days:float,hours:float,minutes:float,seconds:float') result = [] for i in interval_list: if len(i.strip()) == 0: result.append(0) else: result.append(int(i.strip())) return result def get_start_date(self) -> datetime: tmp = self.get_start_date_items() if tmp[6] is None: return datetime(year=tmp[0], month=tmp[1], day=tmp[2], hour=tmp[3], minute=tmp[4], second=tmp[5]) else: return datetime(year=tmp[0], month=tmp[1], day=tmp[2], hour=tmp[3], minute=tmp[4], second=tmp[5], tzinfo=timezone(tmp[6])) def get_interval(self) -> timedelta: tmp = self.get_interval_items() return timedelta(days=tmp[0], hours=tmp[1], minutes=tmp[2], seconds=tmp[3]) def get_timezone(self): return self.trigger_config.get('timezone')
0.775647
0.329419
from math import radians from robotling_lib.misc.helpers import timed_function from robotling_lib.sensors.sensor_base import SensorBase from robotling_lib.driver.bno055 import BNO055, ADDRESS_BNO055 import robotling_lib.misc.ansi_color as ansi import robotling_lib.robotling_board as rb __version__ = "0.1.0.0" CHIP_NAME = "BNO055" # ---------------------------------------------------------------------------- class Compass(SensorBase): """Compass class that uses the 9-DoF MNU BNO055 breakout.""" def __init__(self, i2c): """ Requires already initialized I2C bus instance. """ self._i2c = i2c self._BNO055 = None self._isReady = False super().__init__(None, 0) addrList = self._i2c.deviceAddrList if (ADDRESS_BNO055 in addrList): # Initialize try: self._BNO055 = BNO055(i2c) self._version = 1 self._type = "Compass w/ tilt-compensation" self._isReady = True except RuntimeError: pass c = ansi.GREEN if self._isReady else ansi.RED cn = "{0}_v{1}".format(CHIP_NAME, self._version) print(c +"[{0:>12}] {1:35} ({2}): {3}" .format(cn, self._type, __version__, "ok" if self._isReady else "FAILED") +ansi.BLACK) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - def get_heading(self, tilt=False, calib=False, hires=True): """ Returns heading with or w/o tilt compensation and/or calibration, if available. NOTE: The BNO055 has built-in tilt compensation and is pre-calibra- ted, therefore the parameters `tilt` and `calib` are only for compatibility reasons and have no effect; `hires` is ignored. """ if not self._isReady: return rb.RBL_ERR_DEVICE_NOT_READY return self._BNO055.euler[0] #@timed_function def get_heading_3d(self, calib=False): """ Returns heading, pitch and roll in [°] with or w/o calibration, if available. NOTE: The BNO055 has built-in tilt compensation and is pre-calibra- ted, therefore the parameter `calib` exists only for compatibility reasons and has no effect. """ if not self._isReady: return (rb.RBL_ERR_DEVICE_NOT_READY, 0, 0, 0) hd, pit, rol = self._BNO055.euler return (rb.RBL_OK, hd, pit, rol) def get_pitch_roll(self, radians=False): """ Returns error code, pitch and roll in [°] as a tuple """ if not self._isReady: return (rb.RBL_ERR_DEVICE_NOT_READY, 0, 0) hd, pit, rol = self._BNO055.euler if radians: return (rb.RBL_OK, -1, radians(pit), radians(rol)) else: return (rb.RBL_OK, -1, pit, rol) @property def is_ready(self): return self._isReady @property def channel_count(self): return CHAN_COUNT # ----------------------------------------------------------------------------
sensors/compass_bno055.py
from math import radians from robotling_lib.misc.helpers import timed_function from robotling_lib.sensors.sensor_base import SensorBase from robotling_lib.driver.bno055 import BNO055, ADDRESS_BNO055 import robotling_lib.misc.ansi_color as ansi import robotling_lib.robotling_board as rb __version__ = "0.1.0.0" CHIP_NAME = "BNO055" # ---------------------------------------------------------------------------- class Compass(SensorBase): """Compass class that uses the 9-DoF MNU BNO055 breakout.""" def __init__(self, i2c): """ Requires already initialized I2C bus instance. """ self._i2c = i2c self._BNO055 = None self._isReady = False super().__init__(None, 0) addrList = self._i2c.deviceAddrList if (ADDRESS_BNO055 in addrList): # Initialize try: self._BNO055 = BNO055(i2c) self._version = 1 self._type = "Compass w/ tilt-compensation" self._isReady = True except RuntimeError: pass c = ansi.GREEN if self._isReady else ansi.RED cn = "{0}_v{1}".format(CHIP_NAME, self._version) print(c +"[{0:>12}] {1:35} ({2}): {3}" .format(cn, self._type, __version__, "ok" if self._isReady else "FAILED") +ansi.BLACK) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - def get_heading(self, tilt=False, calib=False, hires=True): """ Returns heading with or w/o tilt compensation and/or calibration, if available. NOTE: The BNO055 has built-in tilt compensation and is pre-calibra- ted, therefore the parameters `tilt` and `calib` are only for compatibility reasons and have no effect; `hires` is ignored. """ if not self._isReady: return rb.RBL_ERR_DEVICE_NOT_READY return self._BNO055.euler[0] #@timed_function def get_heading_3d(self, calib=False): """ Returns heading, pitch and roll in [°] with or w/o calibration, if available. NOTE: The BNO055 has built-in tilt compensation and is pre-calibra- ted, therefore the parameter `calib` exists only for compatibility reasons and has no effect. """ if not self._isReady: return (rb.RBL_ERR_DEVICE_NOT_READY, 0, 0, 0) hd, pit, rol = self._BNO055.euler return (rb.RBL_OK, hd, pit, rol) def get_pitch_roll(self, radians=False): """ Returns error code, pitch and roll in [°] as a tuple """ if not self._isReady: return (rb.RBL_ERR_DEVICE_NOT_READY, 0, 0) hd, pit, rol = self._BNO055.euler if radians: return (rb.RBL_OK, -1, radians(pit), radians(rol)) else: return (rb.RBL_OK, -1, pit, rol) @property def is_ready(self): return self._isReady @property def channel_count(self): return CHAN_COUNT # ----------------------------------------------------------------------------
0.78469
0.28189
import sys import time import os import logging import tqdm import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np import configparser from model.models import * from model.models import Word2Score from utils.data_helper import Dataset from utils.loader import Testdataset from scipy import stats from gensim.models import Word2Vec from sklearn.metrics import average_precision_score,precision_recall_curve SIEGE_EVALUATIONS = [ ("bless", "data/bless.tsv"), ("eval", "data/eval.tsv"), ("leds", "data/leds.tsv"), ("shwartz", "data/shwartz.tsv"), ("weeds", "data/wbless.tsv"), ] CORRELATION_EVAL_DATASETS = [("hyperlex", "data/hyperlex_rnd.tsv"), ("hyperlex_noun", "data/hyperlex_noun.tsv")] def predict_many(data, model, hypos, hypers, embedding, device, reverse=False): num = 0 result = [] result_svd = [] count_oop = 0 count_pair = 0 for hypon, hyper in zip(hypos, hypers): count_pair += 1 if hypon in data.vocab and hyper in data.vocab: l = data.word2id[hypon] r = data.word2id[hyper] if reverse: pred = data.U[r].dot(data.V[l]) else: pred = data.U[l].dot(data.V[r]) result_svd.append(pred) else: # out of pattern mode result_svd.append(0.0) count_oop += 1 if hypon in embedding and hyper in embedding: hypon_tensor = torch.from_numpy(embedding[hypon]).view(1,300).to(device) hyper_tensor = torch.from_numpy(embedding[hyper]).view(1,300).to(device) if reverse: # pred = inference(saved_model,hyper_tensor, hypon_tensor) pred = model.inference(hyper_tensor, hypon_tensor).detach().cpu().numpy()[0] else: # pred = inference(saved_model,hypon_tensor, hyper_tensor) pred = model.inference(hypon_tensor, hyper_tensor).detach().cpu().numpy()[0] else: num +=1 pred = 0.0 result.append(pred) # num = 0 -> all the word in the embedding oop_rate = count_oop * 1.0 / count_pair return np.array(result, dtype=np.float32), np.array(result_svd, dtype=np.float32), oop_rate def make_hparam_string(config): hparam = "{}/s{}_h{}-{}_n{}_w{}".format( config.get("hyperparameters", "model"), config.get("hyperparameters", "svd_dimension"), config.get("hyperparameters", "number_hidden_layers"), config.get("hyperparameters", "hidden_layer_size"), config.get("hyperparameters", "negative_num"), # config.get("hyperparameters", "batch_size"), config.get("hyperparameters", "weight_decay"), # config.get("hyperparameters", "context_num"), # config.get("hyperparameters", "context_len") ) return hparam def init_model(config): hidden_layer_size = int(config.getfloat("hyperparameters", "hidden_layer_size")) number_hidden_layers = int(config.getfloat("hyperparameters", "number_hidden_layers")) model = Word2Score(hidden_layer_size, number_hidden_layers) return model def load_gensim_word2vec(): print("Loading pretrained word embedding ... ") wv_model = Word2Vec.load("/home/shared/embedding/ukwac.model") embedding = wv_model.wv return embedding def detection_setup(file_name, model, matrix_data, embedding,device): ds = Testdataset(file_name, matrix_data.vocab) logger.info("-" * 80) logger.info("processing dataset :{}".format(file_name)) m_val = ds.val_mask m_test = ds.test_mask h = np.zeros(len(ds)) h_ip = np.zeros(len(ds)) predict_mask = np.full(len(ds), True) inpattern_mask = np.full(len(ds), True) true_prediction = [] in_pattern_prediction = [] count_w2v = 0 mask_idx = 0 for x,y in zip(ds.hypos, ds.hypers): if x in matrix_data.vocab and y in matrix_data.vocab: l = matrix_data.word2id[x] r = matrix_data.word2id[y] score = matrix_data.U[l].dot(matrix_data.V[r]) true_prediction.append(score) in_pattern_prediction.append(score) else: # out of pattern inpattern_mask[mask_idx] = False if x in embedding and y in embedding: hypon_tensor = torch.from_numpy(embedding[x]).view(1,300).to(device) hyper_tensor = torch.from_numpy(embedding[y]).view(1,300).to(device) score = model.inference(hypon_tensor, hyper_tensor).detach().cpu().numpy()[0] true_prediction.append(score) count_w2v +=1 else: predict_mask[mask_idx] = False mask_idx +=1 h[predict_mask] = np.array(true_prediction, dtype=np.float32) h[~predict_mask] = h[predict_mask].min() h_ip[inpattern_mask] = np.array(in_pattern_prediction, dtype=np.float32) h_ip[~inpattern_mask] = h_ip[inpattern_mask].min() y = ds.y result= { "ap_val": average_precision_score(y[m_val],h[m_val]), "ap_test": average_precision_score(y[m_test],h[m_test]), } result['oov_rate'] = np.mean(ds.oov_mask) result['predict_num'] = int(np.sum(predict_mask)) result['oov_num'] = int(np.sum(ds.oov_mask)) logger.info("there are {:2d}/{:2d} pairs appeared in the trained embedding".format(count_w2v, result['oov_num'])) logger.info("Word2Vec : AP for validation is :{} || for test is :{}".format(result['ap_val'], result['ap_test'])) logger.info("Svdppmi : AP for validation is :{} || for test is :{}".format(average_precision_score(y[m_val],h_ip[m_val]), average_precision_score(y[m_test],h_ip[m_test]) )) return result def hyperlex_setup(file_name, model, matrix_data, embedding,device): logger.info("-" * 80) logger.info("processing dataset :{}".format(file_name)) ds = Testdataset(file_name, matrix_data.vocab, ycolumn='score') h = np.zeros(len(ds)) predict_mask = np.full(len(ds), True) true_prediction = [] mask_idx = 0 for x,y in zip(ds.hypos, ds.hypers): if x in matrix_data.vocab and y in matrix_data.vocab: l = matrix_data.word2id[x] r = matrix_data.word2id[y] score = matrix_data.U[l].dot(matrix_data.V[r]) true_prediction.append(score) else: # out of pattern if x in embedding and y in embedding: hypon_tensor = torch.from_numpy(embedding[x]).view(1,300).to(device) hyper_tensor = torch.from_numpy(embedding[y]).view(1,300).to(device) score = model.inference(hypon_tensor, hyper_tensor).detach().cpu().numpy()[0] true_prediction.append(score) else: predict_mask[mask_idx] = False mask_idx +=1 h[predict_mask] = np.array(true_prediction, dtype=np.float32) h[~predict_mask] = np.median(h[predict_mask]) y = ds.labels m_train = ds.train_mask m_val = ds.val_mask m_test = ds.test_mask result = { "spearman_train": stats.spearmanr(y[m_train], h[m_train])[0], "spearman_val": stats.spearmanr(y[m_val], h[m_val])[0], "spearman_test": stats.spearmanr(y[m_test], h[m_test])[0], } result['oov_rate'] = np.mean(ds.oov_mask) result['predict_num'] = int(np.sum(predict_mask)) result['oov_num'] = int(np.sum(ds.oov_mask)) logger.info("Word2Vec: train cor: {} | test cor:{}".format(result['spearman_train'],result['spearman_test'])) return result def dir_bless_setup(model, matrix_data, embedding, device): logger.info("-" * 80) logger.info("processing dataset : dir_bless") ds = Testdataset("data/bless.tsv", matrix_data.vocab) hypos = ds.hypos[ds.y] hypers = ds.hypers[ds.y] m_val = ds.val_mask[ds.y] m_test = ds.test_mask[ds.y] h = np.zeros(len(ds)) pred_score_list = [] svd_pred_list = [] count_oop = 0 count_pair = 0 for hypon, hyper in zip(hypos, hypers): if hypon in matrix_data.vocab and hyper in matrix_data.vocab: l = matrix_data.word2id[hypon] r = matrix_data.word2id[hyper] forward_pred = matrix_data.U[l].dot(matrix_data.V[r]) reverse_pred = matrix_data.U[r].dot(matrix_data.V[l]) if forward_pred > reverse_pred: pred_score_list.append(1) svd_pred_list.append(1) else: pred_score_list.append(0) svd_pred_list.append(0) else: # out of pattern mode svd_pred_list.append(0) count_oop += 1 if hypon in embedding and hyper in embedding: hypon_tensor = torch.from_numpy(embedding[hypon]).view(1,300).to(device) hyper_tensor = torch.from_numpy(embedding[hyper]).view(1,300).to(device) forward_pred = model.inference(hypon_tensor, hyper_tensor).detach().cpu().numpy()[0] reverse_pred = model.inference(hyper_tensor, hypon_tensor).detach().cpu().numpy()[0] if forward_pred > reverse_pred: pred_score_list.append(1) else: pred_score_list.append(0) else: pred_score_list.append(0) acc = np.mean(np.asarray(pred_score_list)) acc_val = np.mean(np.asarray(pred_score_list)[m_val]) acc_test = np.mean(np.asarray(pred_score_list)[m_test]) s_acc = np.mean(np.asarray(svd_pred_list)) logger.info("Val Acc : {} || Test Acc: {} ".format(acc_val, acc_test)) logger.info("Sppmi Acc: {} ".format(s_acc)) def dir_wbless_setup(model, data, embedding,device): logger.info("-" * 80) logger.info("processing dataset : dir_wbless") data_path = "data/wbless.tsv" ds = Testdataset(data_path, data.vocab) rng = np.random.RandomState(42) VAL_PROB = .02 NUM_TRIALS = 1000 # We have no way of handling oov h, h_svd, _ = predict_many(data, model, ds.hypos, ds.hypers, embedding, device) y = ds.y val_scores = [] test_scores = [] for _ in range(NUM_TRIALS): # Generate a new mask every time m_val = rng.rand(len(y)) < VAL_PROB # Test is everything except val m_test = ~m_val _, _, t = precision_recall_curve(y[m_val], h[m_val]) # pick the highest accuracy on the validation set thr_accs = np.mean((h[m_val, np.newaxis] >= t) == y[m_val, np.newaxis], axis=0) best_t = t[thr_accs.argmax()] preds_val = h[m_val] >= best_t preds_test = h[m_test] >= best_t # Evaluate val_scores.append(np.mean(preds_val == y[m_val])) test_scores.append(np.mean(preds_test == y[m_test])) # sanity check assert np.allclose(val_scores[-1], thr_accs.max()) # report average across many folds logger.info("w2v: acc_val_inv: {} acc_test_inv: {}".format(np.mean(val_scores), np.mean(test_scores))) val_scores = [] test_scores = [] for _ in range(NUM_TRIALS): # Generate a new mask every time m_val = rng.rand(len(y)) < VAL_PROB # Test is everything except val m_test = ~m_val _, _, t = precision_recall_curve(y[m_val], h_svd[m_val]) # pick the highest accuracy on the validation set thr_accs = np.mean((h_svd[m_val, np.newaxis] >= t) == y[m_val, np.newaxis], axis=0) best_t = t[thr_accs.argmax()] preds_val = h_svd[m_val] >= best_t preds_test = h_svd[m_test] >= best_t # Evaluate val_scores.append(np.mean(preds_val == y[m_val])) test_scores.append(np.mean(preds_test == y[m_test])) # sanity check assert np.allclose(val_scores[-1], thr_accs.max()) # report average across many folds logger.info("sppmi: acc_val_inv: {} acc_test_inv: {}".format(np.mean(val_scores), np.mean(test_scores))) def dir_bibless_setup(model, data, embedding, device): logger.info("-" * 80) logger.info("processing dataset : dir_bibless") data_path = "data/bibless.tsv" ds = Testdataset(data_path, data.vocab) rng = np.random.RandomState(42) VAL_PROB = .02 NUM_TRIALS = 1000 #y = ds.y[ds.invocab_mask] y = ds.y # hypernymy could be either direction yh = y != 0 # get forward and backward predictions hf, hf_svd, oop_rate = predict_many(data, model, ds.hypos, ds.hypers, embedding, device, reverse=False) hr, hr_svd, _ = predict_many(data, model, ds.hypos, ds.hypers, embedding, device, reverse=True) logger.info('OOP Rate: {}'.format(oop_rate)) h = np.max([hf, hr], axis=0) h_svd = np.max([hf_svd, hr_svd], axis=0) dir_pred = 2 * np.float32(hf >= hr) - 1 dir_pred_svd = 2 * np.float32(hf_svd >= hr_svd) - 1 val_scores = [] test_scores = [] for _ in range(NUM_TRIALS): # Generate a new mask every time m_val = rng.rand(len(y)) < VAL_PROB # Test is everything except val m_test = ~m_val # set the threshold based on the maximum score _, _, t = precision_recall_curve(yh[m_val], h[m_val]) thr_accs = np.mean((h[m_val, np.newaxis] >= t) == yh[m_val, np.newaxis], axis=0) best_t = t[thr_accs.argmax()] det_preds_val = h[m_val] >= best_t det_preds_test = h[m_test] >= best_t fin_preds_val = det_preds_val * dir_pred[m_val] fin_preds_test = det_preds_test * dir_pred[m_test] val_scores.append(np.mean(fin_preds_val == y[m_val])) test_scores.append(np.mean(fin_preds_test == y[m_test])) # report average across many folds logger.info("w2v: acc_val_all: {}, acc_test_all: {}".format(np.mean(val_scores),np.mean(test_scores))) val_scores = [] test_scores = [] for _ in range(NUM_TRIALS): # Generate a new mask every time m_val = rng.rand(len(y)) < VAL_PROB # Test is everything except val m_test = ~m_val # set the threshold based on the maximum score _, _, t = precision_recall_curve(yh[m_val], h_svd[m_val]) thr_accs = np.mean((h_svd[m_val, np.newaxis] >= t) == yh[m_val, np.newaxis], axis=0) best_t = t[thr_accs.argmax()] det_preds_val = h_svd[m_val] >= best_t det_preds_test = h_svd[m_test] >= best_t fin_preds_val = det_preds_val * dir_pred_svd[m_val] fin_preds_test = det_preds_test * dir_pred_svd[m_test] val_scores.append(np.mean(fin_preds_val == y[m_val])) test_scores.append(np.mean(fin_preds_test == y[m_test])) # report average across many folds logger.info("sppmi: acc_val_all: {}, acc_test_all: {}".format(np.mean(val_scores),np.mean(test_scores))) def evaluation_all(model_config): embedding = load_gensim_word2vec() config = configparser.RawConfigParser() config.read(model_config) gpu_device = config.get("hyperparameters", "gpu_device") device = torch.device('cuda:{}'.format(gpu_device) if torch.cuda.is_available() else 'cpu') matrix_data = Dataset(config) model = init_model(config) model.to(device) #pretrain = torch.load("/home/shared/acl-data/hype_detection/checkpoints/mlp_unisample_svd/s50_h2-300_n400_w0/best.ckpt") pretrain = torch.load("/home/cyuaq/comHyper/checkpoints/mlp_unisample_svd/s50_h2-300_n400_b128/best.ckpt") pretrain.pop("embs.weight") model.load_state_dict(pretrain) model.eval() results = {} for taskname, filename in SIEGE_EVALUATIONS: result = detection_setup(filename, model, matrix_data, embedding,device) results["detec_{}".format(taskname)] = result for taskname, filename in CORRELATION_EVAL_DATASETS: result = hyperlex_setup(filename, model, matrix_data, embedding, device) results["corr_{}".format(taskname)] = result dir_bless_setup(model, matrix_data, embedding, device) dir_wbless_setup(model, matrix_data, embedding, device) dir_bibless_setup(model, matrix_data, embedding, device) return results if __name__ == "__main__": config_file = sys.argv[1] log_path = "/home/cyuaq/comHyper/checkpoints/mlp_unisample_svd/s50_h2-300_n400_b128/word2score.log" logger = logging.getLogger() logger.setLevel(logging.INFO) handler = logging.FileHandler(log_path, 'w') handler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S') handler.setFormatter(formatter) logger.addHandler(handler) results = evaluation_all(config_file) print(results)
evaluation/evaluation_all_word.py
import sys import time import os import logging import tqdm import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np import configparser from model.models import * from model.models import Word2Score from utils.data_helper import Dataset from utils.loader import Testdataset from scipy import stats from gensim.models import Word2Vec from sklearn.metrics import average_precision_score,precision_recall_curve SIEGE_EVALUATIONS = [ ("bless", "data/bless.tsv"), ("eval", "data/eval.tsv"), ("leds", "data/leds.tsv"), ("shwartz", "data/shwartz.tsv"), ("weeds", "data/wbless.tsv"), ] CORRELATION_EVAL_DATASETS = [("hyperlex", "data/hyperlex_rnd.tsv"), ("hyperlex_noun", "data/hyperlex_noun.tsv")] def predict_many(data, model, hypos, hypers, embedding, device, reverse=False): num = 0 result = [] result_svd = [] count_oop = 0 count_pair = 0 for hypon, hyper in zip(hypos, hypers): count_pair += 1 if hypon in data.vocab and hyper in data.vocab: l = data.word2id[hypon] r = data.word2id[hyper] if reverse: pred = data.U[r].dot(data.V[l]) else: pred = data.U[l].dot(data.V[r]) result_svd.append(pred) else: # out of pattern mode result_svd.append(0.0) count_oop += 1 if hypon in embedding and hyper in embedding: hypon_tensor = torch.from_numpy(embedding[hypon]).view(1,300).to(device) hyper_tensor = torch.from_numpy(embedding[hyper]).view(1,300).to(device) if reverse: # pred = inference(saved_model,hyper_tensor, hypon_tensor) pred = model.inference(hyper_tensor, hypon_tensor).detach().cpu().numpy()[0] else: # pred = inference(saved_model,hypon_tensor, hyper_tensor) pred = model.inference(hypon_tensor, hyper_tensor).detach().cpu().numpy()[0] else: num +=1 pred = 0.0 result.append(pred) # num = 0 -> all the word in the embedding oop_rate = count_oop * 1.0 / count_pair return np.array(result, dtype=np.float32), np.array(result_svd, dtype=np.float32), oop_rate def make_hparam_string(config): hparam = "{}/s{}_h{}-{}_n{}_w{}".format( config.get("hyperparameters", "model"), config.get("hyperparameters", "svd_dimension"), config.get("hyperparameters", "number_hidden_layers"), config.get("hyperparameters", "hidden_layer_size"), config.get("hyperparameters", "negative_num"), # config.get("hyperparameters", "batch_size"), config.get("hyperparameters", "weight_decay"), # config.get("hyperparameters", "context_num"), # config.get("hyperparameters", "context_len") ) return hparam def init_model(config): hidden_layer_size = int(config.getfloat("hyperparameters", "hidden_layer_size")) number_hidden_layers = int(config.getfloat("hyperparameters", "number_hidden_layers")) model = Word2Score(hidden_layer_size, number_hidden_layers) return model def load_gensim_word2vec(): print("Loading pretrained word embedding ... ") wv_model = Word2Vec.load("/home/shared/embedding/ukwac.model") embedding = wv_model.wv return embedding def detection_setup(file_name, model, matrix_data, embedding,device): ds = Testdataset(file_name, matrix_data.vocab) logger.info("-" * 80) logger.info("processing dataset :{}".format(file_name)) m_val = ds.val_mask m_test = ds.test_mask h = np.zeros(len(ds)) h_ip = np.zeros(len(ds)) predict_mask = np.full(len(ds), True) inpattern_mask = np.full(len(ds), True) true_prediction = [] in_pattern_prediction = [] count_w2v = 0 mask_idx = 0 for x,y in zip(ds.hypos, ds.hypers): if x in matrix_data.vocab and y in matrix_data.vocab: l = matrix_data.word2id[x] r = matrix_data.word2id[y] score = matrix_data.U[l].dot(matrix_data.V[r]) true_prediction.append(score) in_pattern_prediction.append(score) else: # out of pattern inpattern_mask[mask_idx] = False if x in embedding and y in embedding: hypon_tensor = torch.from_numpy(embedding[x]).view(1,300).to(device) hyper_tensor = torch.from_numpy(embedding[y]).view(1,300).to(device) score = model.inference(hypon_tensor, hyper_tensor).detach().cpu().numpy()[0] true_prediction.append(score) count_w2v +=1 else: predict_mask[mask_idx] = False mask_idx +=1 h[predict_mask] = np.array(true_prediction, dtype=np.float32) h[~predict_mask] = h[predict_mask].min() h_ip[inpattern_mask] = np.array(in_pattern_prediction, dtype=np.float32) h_ip[~inpattern_mask] = h_ip[inpattern_mask].min() y = ds.y result= { "ap_val": average_precision_score(y[m_val],h[m_val]), "ap_test": average_precision_score(y[m_test],h[m_test]), } result['oov_rate'] = np.mean(ds.oov_mask) result['predict_num'] = int(np.sum(predict_mask)) result['oov_num'] = int(np.sum(ds.oov_mask)) logger.info("there are {:2d}/{:2d} pairs appeared in the trained embedding".format(count_w2v, result['oov_num'])) logger.info("Word2Vec : AP for validation is :{} || for test is :{}".format(result['ap_val'], result['ap_test'])) logger.info("Svdppmi : AP for validation is :{} || for test is :{}".format(average_precision_score(y[m_val],h_ip[m_val]), average_precision_score(y[m_test],h_ip[m_test]) )) return result def hyperlex_setup(file_name, model, matrix_data, embedding,device): logger.info("-" * 80) logger.info("processing dataset :{}".format(file_name)) ds = Testdataset(file_name, matrix_data.vocab, ycolumn='score') h = np.zeros(len(ds)) predict_mask = np.full(len(ds), True) true_prediction = [] mask_idx = 0 for x,y in zip(ds.hypos, ds.hypers): if x in matrix_data.vocab and y in matrix_data.vocab: l = matrix_data.word2id[x] r = matrix_data.word2id[y] score = matrix_data.U[l].dot(matrix_data.V[r]) true_prediction.append(score) else: # out of pattern if x in embedding and y in embedding: hypon_tensor = torch.from_numpy(embedding[x]).view(1,300).to(device) hyper_tensor = torch.from_numpy(embedding[y]).view(1,300).to(device) score = model.inference(hypon_tensor, hyper_tensor).detach().cpu().numpy()[0] true_prediction.append(score) else: predict_mask[mask_idx] = False mask_idx +=1 h[predict_mask] = np.array(true_prediction, dtype=np.float32) h[~predict_mask] = np.median(h[predict_mask]) y = ds.labels m_train = ds.train_mask m_val = ds.val_mask m_test = ds.test_mask result = { "spearman_train": stats.spearmanr(y[m_train], h[m_train])[0], "spearman_val": stats.spearmanr(y[m_val], h[m_val])[0], "spearman_test": stats.spearmanr(y[m_test], h[m_test])[0], } result['oov_rate'] = np.mean(ds.oov_mask) result['predict_num'] = int(np.sum(predict_mask)) result['oov_num'] = int(np.sum(ds.oov_mask)) logger.info("Word2Vec: train cor: {} | test cor:{}".format(result['spearman_train'],result['spearman_test'])) return result def dir_bless_setup(model, matrix_data, embedding, device): logger.info("-" * 80) logger.info("processing dataset : dir_bless") ds = Testdataset("data/bless.tsv", matrix_data.vocab) hypos = ds.hypos[ds.y] hypers = ds.hypers[ds.y] m_val = ds.val_mask[ds.y] m_test = ds.test_mask[ds.y] h = np.zeros(len(ds)) pred_score_list = [] svd_pred_list = [] count_oop = 0 count_pair = 0 for hypon, hyper in zip(hypos, hypers): if hypon in matrix_data.vocab and hyper in matrix_data.vocab: l = matrix_data.word2id[hypon] r = matrix_data.word2id[hyper] forward_pred = matrix_data.U[l].dot(matrix_data.V[r]) reverse_pred = matrix_data.U[r].dot(matrix_data.V[l]) if forward_pred > reverse_pred: pred_score_list.append(1) svd_pred_list.append(1) else: pred_score_list.append(0) svd_pred_list.append(0) else: # out of pattern mode svd_pred_list.append(0) count_oop += 1 if hypon in embedding and hyper in embedding: hypon_tensor = torch.from_numpy(embedding[hypon]).view(1,300).to(device) hyper_tensor = torch.from_numpy(embedding[hyper]).view(1,300).to(device) forward_pred = model.inference(hypon_tensor, hyper_tensor).detach().cpu().numpy()[0] reverse_pred = model.inference(hyper_tensor, hypon_tensor).detach().cpu().numpy()[0] if forward_pred > reverse_pred: pred_score_list.append(1) else: pred_score_list.append(0) else: pred_score_list.append(0) acc = np.mean(np.asarray(pred_score_list)) acc_val = np.mean(np.asarray(pred_score_list)[m_val]) acc_test = np.mean(np.asarray(pred_score_list)[m_test]) s_acc = np.mean(np.asarray(svd_pred_list)) logger.info("Val Acc : {} || Test Acc: {} ".format(acc_val, acc_test)) logger.info("Sppmi Acc: {} ".format(s_acc)) def dir_wbless_setup(model, data, embedding,device): logger.info("-" * 80) logger.info("processing dataset : dir_wbless") data_path = "data/wbless.tsv" ds = Testdataset(data_path, data.vocab) rng = np.random.RandomState(42) VAL_PROB = .02 NUM_TRIALS = 1000 # We have no way of handling oov h, h_svd, _ = predict_many(data, model, ds.hypos, ds.hypers, embedding, device) y = ds.y val_scores = [] test_scores = [] for _ in range(NUM_TRIALS): # Generate a new mask every time m_val = rng.rand(len(y)) < VAL_PROB # Test is everything except val m_test = ~m_val _, _, t = precision_recall_curve(y[m_val], h[m_val]) # pick the highest accuracy on the validation set thr_accs = np.mean((h[m_val, np.newaxis] >= t) == y[m_val, np.newaxis], axis=0) best_t = t[thr_accs.argmax()] preds_val = h[m_val] >= best_t preds_test = h[m_test] >= best_t # Evaluate val_scores.append(np.mean(preds_val == y[m_val])) test_scores.append(np.mean(preds_test == y[m_test])) # sanity check assert np.allclose(val_scores[-1], thr_accs.max()) # report average across many folds logger.info("w2v: acc_val_inv: {} acc_test_inv: {}".format(np.mean(val_scores), np.mean(test_scores))) val_scores = [] test_scores = [] for _ in range(NUM_TRIALS): # Generate a new mask every time m_val = rng.rand(len(y)) < VAL_PROB # Test is everything except val m_test = ~m_val _, _, t = precision_recall_curve(y[m_val], h_svd[m_val]) # pick the highest accuracy on the validation set thr_accs = np.mean((h_svd[m_val, np.newaxis] >= t) == y[m_val, np.newaxis], axis=0) best_t = t[thr_accs.argmax()] preds_val = h_svd[m_val] >= best_t preds_test = h_svd[m_test] >= best_t # Evaluate val_scores.append(np.mean(preds_val == y[m_val])) test_scores.append(np.mean(preds_test == y[m_test])) # sanity check assert np.allclose(val_scores[-1], thr_accs.max()) # report average across many folds logger.info("sppmi: acc_val_inv: {} acc_test_inv: {}".format(np.mean(val_scores), np.mean(test_scores))) def dir_bibless_setup(model, data, embedding, device): logger.info("-" * 80) logger.info("processing dataset : dir_bibless") data_path = "data/bibless.tsv" ds = Testdataset(data_path, data.vocab) rng = np.random.RandomState(42) VAL_PROB = .02 NUM_TRIALS = 1000 #y = ds.y[ds.invocab_mask] y = ds.y # hypernymy could be either direction yh = y != 0 # get forward and backward predictions hf, hf_svd, oop_rate = predict_many(data, model, ds.hypos, ds.hypers, embedding, device, reverse=False) hr, hr_svd, _ = predict_many(data, model, ds.hypos, ds.hypers, embedding, device, reverse=True) logger.info('OOP Rate: {}'.format(oop_rate)) h = np.max([hf, hr], axis=0) h_svd = np.max([hf_svd, hr_svd], axis=0) dir_pred = 2 * np.float32(hf >= hr) - 1 dir_pred_svd = 2 * np.float32(hf_svd >= hr_svd) - 1 val_scores = [] test_scores = [] for _ in range(NUM_TRIALS): # Generate a new mask every time m_val = rng.rand(len(y)) < VAL_PROB # Test is everything except val m_test = ~m_val # set the threshold based on the maximum score _, _, t = precision_recall_curve(yh[m_val], h[m_val]) thr_accs = np.mean((h[m_val, np.newaxis] >= t) == yh[m_val, np.newaxis], axis=0) best_t = t[thr_accs.argmax()] det_preds_val = h[m_val] >= best_t det_preds_test = h[m_test] >= best_t fin_preds_val = det_preds_val * dir_pred[m_val] fin_preds_test = det_preds_test * dir_pred[m_test] val_scores.append(np.mean(fin_preds_val == y[m_val])) test_scores.append(np.mean(fin_preds_test == y[m_test])) # report average across many folds logger.info("w2v: acc_val_all: {}, acc_test_all: {}".format(np.mean(val_scores),np.mean(test_scores))) val_scores = [] test_scores = [] for _ in range(NUM_TRIALS): # Generate a new mask every time m_val = rng.rand(len(y)) < VAL_PROB # Test is everything except val m_test = ~m_val # set the threshold based on the maximum score _, _, t = precision_recall_curve(yh[m_val], h_svd[m_val]) thr_accs = np.mean((h_svd[m_val, np.newaxis] >= t) == yh[m_val, np.newaxis], axis=0) best_t = t[thr_accs.argmax()] det_preds_val = h_svd[m_val] >= best_t det_preds_test = h_svd[m_test] >= best_t fin_preds_val = det_preds_val * dir_pred_svd[m_val] fin_preds_test = det_preds_test * dir_pred_svd[m_test] val_scores.append(np.mean(fin_preds_val == y[m_val])) test_scores.append(np.mean(fin_preds_test == y[m_test])) # report average across many folds logger.info("sppmi: acc_val_all: {}, acc_test_all: {}".format(np.mean(val_scores),np.mean(test_scores))) def evaluation_all(model_config): embedding = load_gensim_word2vec() config = configparser.RawConfigParser() config.read(model_config) gpu_device = config.get("hyperparameters", "gpu_device") device = torch.device('cuda:{}'.format(gpu_device) if torch.cuda.is_available() else 'cpu') matrix_data = Dataset(config) model = init_model(config) model.to(device) #pretrain = torch.load("/home/shared/acl-data/hype_detection/checkpoints/mlp_unisample_svd/s50_h2-300_n400_w0/best.ckpt") pretrain = torch.load("/home/cyuaq/comHyper/checkpoints/mlp_unisample_svd/s50_h2-300_n400_b128/best.ckpt") pretrain.pop("embs.weight") model.load_state_dict(pretrain) model.eval() results = {} for taskname, filename in SIEGE_EVALUATIONS: result = detection_setup(filename, model, matrix_data, embedding,device) results["detec_{}".format(taskname)] = result for taskname, filename in CORRELATION_EVAL_DATASETS: result = hyperlex_setup(filename, model, matrix_data, embedding, device) results["corr_{}".format(taskname)] = result dir_bless_setup(model, matrix_data, embedding, device) dir_wbless_setup(model, matrix_data, embedding, device) dir_bibless_setup(model, matrix_data, embedding, device) return results if __name__ == "__main__": config_file = sys.argv[1] log_path = "/home/cyuaq/comHyper/checkpoints/mlp_unisample_svd/s50_h2-300_n400_b128/word2score.log" logger = logging.getLogger() logger.setLevel(logging.INFO) handler = logging.FileHandler(log_path, 'w') handler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S') handler.setFormatter(formatter) logger.addHandler(handler) results = evaluation_all(config_file) print(results)
0.492432
0.280693
from os.path import ( basename as _basename, dirname as _dirname, realpath as _realpath, join as _join, ) DEPLOYMENT_DIR = _realpath(_dirname(__file__)) PROJECT_DIR = _dirname(DEPLOYMENT_DIR) def setvariable(name, value, is_output=True): """ Set a Azure pipeline variable. Args: name (str): Variable name. value (str): Variable value. is_output (bool): Make variable available to future jobs. """ print( f"##vso[task.setvariable variable={name}" f'{";isOutput=true" if is_output else ""}]{value}' ) def render_template(src, dst, show=True, **kwargs): """ Render a file from a template using Jinja2. Args: src (str): Source template. dst (str): Destination file. show (bool): If true, print result. kwargs: Template arguments. """ from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader(_dirname(src))) template = env.get_template(_basename(src)) rendered = template.render(**kwargs) if show: print( "\033[34m== START RENDERED ==\033[30m\n" f"{rendered}" "\n\033[34m== END RENDERED ==\033[30m" ) with open(dst, "wt") as file: file.write(rendered) def get_drm_version(): """ Return DRM library version. Returns: str: DRM library version. """ path = _join(PROJECT_DIR, "CMakeLists.txt") with open(path, "rt") as cmakelists: for line in cmakelists: if line.startswith("set(ACCELIZEDRM_VERSION "): version = f"v{line.split(' ')[1].strip().strip(')')}" print(f"Detected DRM library version: {version}") return version raise ValueError(f'ACCELIZEDRM_VERSION not found in "{path}"') def get_next_package_release(versions_json): """ Return next release number for current DRM library version. Args: versions_json (str): Path to "versions.json" published release manifest. Returns: int: Next package release number. """ from json import load version = get_drm_version() with open(versions_json) as file: versions = load(file) try: release = versions["accelize_drm"][version] except KeyError: print( f"No published release for DRM library version {version}, " 'setting next release number to "1"' ) return 1 next_release = release + 1 print( f"DRM library version {version} was already published with release " f'number "{release}", setting next release number to ' f'"{next_release}"' ) return next_release def publish_packages( pkg_source, versions_json, deb_repo, rpm_repo, deb_s3, gpg_private_key, gpg_key_id ): """ Publish Accelize DRM library packages. Args: pkg_source (str): Path to packages source directory. versions_json (str): Path to versions.json file that store last release number for all versions. deb_repo (str): Path to local DEB repository. rpm_repo (str): Path to local RPM repository. deb_s3 (str): S3 DEB repository. gpg_private_key (str): Path to GPG key to use to sign packages. gpg_key_id (str): ID of the GPG key to use to sign packages. """ from json import dump, load from os import makedirs, listdir, walk, symlink from os.path import splitext, join, isfile, realpath from subprocess import run, STDOUT from tempfile import TemporaryDirectory run_kwargs = dict(stderr=STDOUT, check=True) # Define repositories information pkg_version = None tag = get_drm_version() prerelease = "-" in tag component = "prerelease" if prerelease else "stable" repo_base_url = "https://tech.accelize.com" if prerelease: assert deb_s3.endswith("deb_prerelease/") deb_conf = { "Origin": "Accelize", "Label": "Accelize", "Codename": None, "Architectures": "amd64", "Components": component, "Description": "Accelize DEB repository", "SignWith": gpg_key_id, } rpm_conf = { "name": "Accelize RPM repository", "baseurl": f"{repo_base_url}/rpm/{component}/$releasever/$basearch/", "enabled": "1", "gpgkey": f"{repo_base_url}/gpg", "gpgcheck": "1", } # Import GPG key print("GETTING GPG KEY...") run(["gpg", "--batch", "--no-tty", "--import", gpg_private_key], **run_kwargs) # Retrieve packages packages = dict(rpm=dict(), deb=dict()) print("GETTING PACKAGES...") for root, dirs, files in walk(pkg_source): for file_name in files: pkg_name, ext = splitext(file_name) # DEB with format: # <name>_<version>-<release>+<os_codename>_<cpu_arch>.deb if ext == ".deb": # Get: # - OS codename, example: stretch, xenial # - Package version parts = pkg_name.split("_") pkg_version, codename = parts[1].split("+", 1) # Add package to list packages["deb"].setdefault(codename, list()) pkg_path = join(root, file_name) packages["deb"][codename].append(pkg_path) print(f'Found DEB (codename="{codename}", path="{pkg_path}")') # RPM with format: # <name>-<version>-<release>.<os_release>.<cpu_arch>.rpm elif ext == ".rpm": parts = pkg_name.rsplit(".", 2) # Get CPU architecture, example: x86_64, noarch basearch = parts[-1] # Get OS release, example: el7, fc30 releasever = "".join(c for c in parts[-2] if c.isdigit()) # Add package to list pkg_path = join(root, file_name) packages["rpm"].setdefault(releasever, dict()) packages["rpm"][releasever].setdefault(basearch, list()) packages["rpm"][releasever][basearch].append((pkg_path, file_name)) print( f'found RPM (releasever="{releasever}", ' f'basearch="{basearch}", path="{pkg_path}")' ) if not packages["deb"] and not packages["rpm"]: raise FileNotFoundError(f'No packages in "{realpath(pkg_source)}"') # Update DEB repository print("UPDATING DEB REPOSITORY...") with TemporaryDirectory() as base_dir: reprepro = ["reprepro", "--outdir", deb_repo, "--basedir", base_dir] # Create configuration for each codename conf_dir = join(base_dir, "conf") makedirs(conf_dir, exist_ok=True) conf_file_path = join(conf_dir, "distributions") for codename in packages["deb"]: deb_conf["Codename"] = codename with open(conf_file_path, "at") as conf_file: for key, value in deb_conf.items(): conf_file.write(f"{key}: {value}\n") conf_file.write("\n") print(f'Created configuration file: "{conf_file_path}"') # Add package to repository and update it for codename, pkg_set in packages["deb"].items(): for pkg_path in pkg_set: run( reprepro + ["--component", component, "includedeb", codename, pkg_path], **run_kwargs, ) print(f'Included package: "{pkg_path}"') # Check repository if isfile(conf_file_path): run(reprepro + ["check"] + list(packages["deb"]), **run_kwargs) run(reprepro + ["checkpool"], **run_kwargs) # Create RPM repository configuration file print("UPDATING RPM REPOSITORY...") conf_file_path = join(rpm_repo, f"accelize_{component}.repo") with open(conf_file_path, "wt") as conf_file: conf_file.write(f"[accelize_{component}]\n") for key, value in rpm_conf.items(): conf_file.write(f"{key}={value}\n") print(f'Created configuration file: "{conf_file_path}"') # Update .rpm repository for releasever, basearchs in packages["rpm"].items(): # Move "noarch" in other architectures. try: pkg_set = basearchs.pop("noarch") except KeyError: continue for basearch in tuple(basearchs): basearchs[basearch] += pkg_set # Update repository for basearch, pkg_set in basearchs.items(): repo_path = join(rpm_repo, component, releasever, basearch) makedirs(repo_path, exist_ok=True) for pkg in pkg_set: pkg_src = pkg[0] pkg_dst = join(repo_path, pkg[1]) symlink(pkg_src, pkg_dst) print(f'Moved package "{pkg_src}" to "{pkg_dst}"') with TemporaryDirectory() as cache_dir: run( [ "createrepo", "--update", "--cachedir", cache_dir, "--deltas", repo_path, ], **run_kwargs, ) # Sign metadata XML for name in listdir(repo_path): path = join(repo_path, name, "repodata/repomd.xml") if isfile(path): run( [ "gpg", "--detach-sign", "--batch", "--yes", "--no-tty", "--armor", path, ], **run_kwargs, ) print(f'Signed "{path}"') # Get release number from packages and check it match with next # excepted release in published release manifest pkg_release = int(pkg_version.rsplit("-", 1)[1].split("+", 1)[0]) next_release = get_next_package_release(versions_json) if next_release != pkg_release: raise RuntimeError( f'Package release number "{pkg_release}" does not match with ' f'current repository next package number "{next_release}".' ) # Update published release manifest with open(versions_json, "rt") as file: versions = load(file) versions.setdefault("accelize_drm", dict()) versions["accelize_drm"][tag] = pkg_release with open(versions_json, "wt") as file: dump(versions, file) print(f'Saved release "{pkg_release}" for version "{tag}".')
deployment/azure_pipeline_utils.py
from os.path import ( basename as _basename, dirname as _dirname, realpath as _realpath, join as _join, ) DEPLOYMENT_DIR = _realpath(_dirname(__file__)) PROJECT_DIR = _dirname(DEPLOYMENT_DIR) def setvariable(name, value, is_output=True): """ Set a Azure pipeline variable. Args: name (str): Variable name. value (str): Variable value. is_output (bool): Make variable available to future jobs. """ print( f"##vso[task.setvariable variable={name}" f'{";isOutput=true" if is_output else ""}]{value}' ) def render_template(src, dst, show=True, **kwargs): """ Render a file from a template using Jinja2. Args: src (str): Source template. dst (str): Destination file. show (bool): If true, print result. kwargs: Template arguments. """ from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader(_dirname(src))) template = env.get_template(_basename(src)) rendered = template.render(**kwargs) if show: print( "\033[34m== START RENDERED ==\033[30m\n" f"{rendered}" "\n\033[34m== END RENDERED ==\033[30m" ) with open(dst, "wt") as file: file.write(rendered) def get_drm_version(): """ Return DRM library version. Returns: str: DRM library version. """ path = _join(PROJECT_DIR, "CMakeLists.txt") with open(path, "rt") as cmakelists: for line in cmakelists: if line.startswith("set(ACCELIZEDRM_VERSION "): version = f"v{line.split(' ')[1].strip().strip(')')}" print(f"Detected DRM library version: {version}") return version raise ValueError(f'ACCELIZEDRM_VERSION not found in "{path}"') def get_next_package_release(versions_json): """ Return next release number for current DRM library version. Args: versions_json (str): Path to "versions.json" published release manifest. Returns: int: Next package release number. """ from json import load version = get_drm_version() with open(versions_json) as file: versions = load(file) try: release = versions["accelize_drm"][version] except KeyError: print( f"No published release for DRM library version {version}, " 'setting next release number to "1"' ) return 1 next_release = release + 1 print( f"DRM library version {version} was already published with release " f'number "{release}", setting next release number to ' f'"{next_release}"' ) return next_release def publish_packages( pkg_source, versions_json, deb_repo, rpm_repo, deb_s3, gpg_private_key, gpg_key_id ): """ Publish Accelize DRM library packages. Args: pkg_source (str): Path to packages source directory. versions_json (str): Path to versions.json file that store last release number for all versions. deb_repo (str): Path to local DEB repository. rpm_repo (str): Path to local RPM repository. deb_s3 (str): S3 DEB repository. gpg_private_key (str): Path to GPG key to use to sign packages. gpg_key_id (str): ID of the GPG key to use to sign packages. """ from json import dump, load from os import makedirs, listdir, walk, symlink from os.path import splitext, join, isfile, realpath from subprocess import run, STDOUT from tempfile import TemporaryDirectory run_kwargs = dict(stderr=STDOUT, check=True) # Define repositories information pkg_version = None tag = get_drm_version() prerelease = "-" in tag component = "prerelease" if prerelease else "stable" repo_base_url = "https://tech.accelize.com" if prerelease: assert deb_s3.endswith("deb_prerelease/") deb_conf = { "Origin": "Accelize", "Label": "Accelize", "Codename": None, "Architectures": "amd64", "Components": component, "Description": "Accelize DEB repository", "SignWith": gpg_key_id, } rpm_conf = { "name": "Accelize RPM repository", "baseurl": f"{repo_base_url}/rpm/{component}/$releasever/$basearch/", "enabled": "1", "gpgkey": f"{repo_base_url}/gpg", "gpgcheck": "1", } # Import GPG key print("GETTING GPG KEY...") run(["gpg", "--batch", "--no-tty", "--import", gpg_private_key], **run_kwargs) # Retrieve packages packages = dict(rpm=dict(), deb=dict()) print("GETTING PACKAGES...") for root, dirs, files in walk(pkg_source): for file_name in files: pkg_name, ext = splitext(file_name) # DEB with format: # <name>_<version>-<release>+<os_codename>_<cpu_arch>.deb if ext == ".deb": # Get: # - OS codename, example: stretch, xenial # - Package version parts = pkg_name.split("_") pkg_version, codename = parts[1].split("+", 1) # Add package to list packages["deb"].setdefault(codename, list()) pkg_path = join(root, file_name) packages["deb"][codename].append(pkg_path) print(f'Found DEB (codename="{codename}", path="{pkg_path}")') # RPM with format: # <name>-<version>-<release>.<os_release>.<cpu_arch>.rpm elif ext == ".rpm": parts = pkg_name.rsplit(".", 2) # Get CPU architecture, example: x86_64, noarch basearch = parts[-1] # Get OS release, example: el7, fc30 releasever = "".join(c for c in parts[-2] if c.isdigit()) # Add package to list pkg_path = join(root, file_name) packages["rpm"].setdefault(releasever, dict()) packages["rpm"][releasever].setdefault(basearch, list()) packages["rpm"][releasever][basearch].append((pkg_path, file_name)) print( f'found RPM (releasever="{releasever}", ' f'basearch="{basearch}", path="{pkg_path}")' ) if not packages["deb"] and not packages["rpm"]: raise FileNotFoundError(f'No packages in "{realpath(pkg_source)}"') # Update DEB repository print("UPDATING DEB REPOSITORY...") with TemporaryDirectory() as base_dir: reprepro = ["reprepro", "--outdir", deb_repo, "--basedir", base_dir] # Create configuration for each codename conf_dir = join(base_dir, "conf") makedirs(conf_dir, exist_ok=True) conf_file_path = join(conf_dir, "distributions") for codename in packages["deb"]: deb_conf["Codename"] = codename with open(conf_file_path, "at") as conf_file: for key, value in deb_conf.items(): conf_file.write(f"{key}: {value}\n") conf_file.write("\n") print(f'Created configuration file: "{conf_file_path}"') # Add package to repository and update it for codename, pkg_set in packages["deb"].items(): for pkg_path in pkg_set: run( reprepro + ["--component", component, "includedeb", codename, pkg_path], **run_kwargs, ) print(f'Included package: "{pkg_path}"') # Check repository if isfile(conf_file_path): run(reprepro + ["check"] + list(packages["deb"]), **run_kwargs) run(reprepro + ["checkpool"], **run_kwargs) # Create RPM repository configuration file print("UPDATING RPM REPOSITORY...") conf_file_path = join(rpm_repo, f"accelize_{component}.repo") with open(conf_file_path, "wt") as conf_file: conf_file.write(f"[accelize_{component}]\n") for key, value in rpm_conf.items(): conf_file.write(f"{key}={value}\n") print(f'Created configuration file: "{conf_file_path}"') # Update .rpm repository for releasever, basearchs in packages["rpm"].items(): # Move "noarch" in other architectures. try: pkg_set = basearchs.pop("noarch") except KeyError: continue for basearch in tuple(basearchs): basearchs[basearch] += pkg_set # Update repository for basearch, pkg_set in basearchs.items(): repo_path = join(rpm_repo, component, releasever, basearch) makedirs(repo_path, exist_ok=True) for pkg in pkg_set: pkg_src = pkg[0] pkg_dst = join(repo_path, pkg[1]) symlink(pkg_src, pkg_dst) print(f'Moved package "{pkg_src}" to "{pkg_dst}"') with TemporaryDirectory() as cache_dir: run( [ "createrepo", "--update", "--cachedir", cache_dir, "--deltas", repo_path, ], **run_kwargs, ) # Sign metadata XML for name in listdir(repo_path): path = join(repo_path, name, "repodata/repomd.xml") if isfile(path): run( [ "gpg", "--detach-sign", "--batch", "--yes", "--no-tty", "--armor", path, ], **run_kwargs, ) print(f'Signed "{path}"') # Get release number from packages and check it match with next # excepted release in published release manifest pkg_release = int(pkg_version.rsplit("-", 1)[1].split("+", 1)[0]) next_release = get_next_package_release(versions_json) if next_release != pkg_release: raise RuntimeError( f'Package release number "{pkg_release}" does not match with ' f'current repository next package number "{next_release}".' ) # Update published release manifest with open(versions_json, "rt") as file: versions = load(file) versions.setdefault("accelize_drm", dict()) versions["accelize_drm"][tag] = pkg_release with open(versions_json, "wt") as file: dump(versions, file) print(f'Saved release "{pkg_release}" for version "{tag}".')
0.564699
0.181934
import warnings warnings.filterwarnings('ignore', category=FutureWarning) from flask import abort, render_template, Flask import logging import db APP = Flask(__name__) # Start page @APP.route('/') def index(): stats = {} x = db.execute('SELECT COUNT(*) AS movies FROM MOVIE').fetchone() stats.update(x) x = db.execute('SELECT COUNT(*) AS actors FROM ACTOR').fetchone() stats.update(x) x = db.execute('SELECT COUNT(*) AS streams FROM STREAM').fetchone() stats.update(x) logging.info(stats) return render_template('index.html',stats=stats) # Initialize db # It assumes a script called db.sql is stored in the sql folder @APP.route('/init/') def init(): return render_template('init.html', init=db.init()) # Movies @APP.route('/movies/') def list_movies(): movies = db.execute( ''' SELECT MovieId, Title, Year, Duration FROM MOVIE ORDER BY Title ''').fetchall() return render_template('movie-list.html', movies=movies) @APP.route('/movies/<int:id>/') def get_movie(id): movie = db.execute( ''' SELECT MovieId, Title, Year, Duration FROM MOVIE WHERE movieId = %s ''', id).fetchone() if movie is None: abort(404, 'Movie id {} does not exist.'.format(id)) genres = db.execute( ''' SELECT GenreId, Label FROM MOVIE_GENRE NATURAL JOIN GENRE WHERE movieId = %s ORDER BY Label ''', id).fetchall() actors = db.execute( ''' SELECT ActorId, Name FROM MOVIE_ACTOR NATURAL JOIN ACTOR WHERE MovieId = %s ORDER BY Name ''', id).fetchall() streams = db.execute( ''' SELECT StreamId, StreamDate FROM STREAM WHERE MovieId = %s ORDER BY StreamDate Desc ''', id).fetchall(); return render_template('movie.html', movie=movie, genres=genres, actors=actors, streams=streams) @APP.route('/movies/search/<expr>/') def search_movie(expr): search = { 'expr': expr } expr = '%' + expr + '%' movies = db.execute( ''' SELECT MovieId, Title FROM MOVIE WHERE Title LIKE %s ''', expr).fetchall() return render_template('movie-search.html', search=search,movies=movies) # Actors @APP.route('/actors/') def list_actors(): actors = db.execute(''' SELECT ActorId, Name FROM Actor ORDER BY Name ''').fetchall() return render_template('actor-list.html', actors=actors) @APP.route('/actors/<int:id>/') def view_movies_by_actor(id): actor = db.execute( ''' SELECT ActorId, Name FROM ACTOR WHERE actorId = %s ''', id).fetchone() if actor is None: abort(404, 'Actor id {} does not exist.'.format(id)) movies = db.execute( ''' SELECT MovieId, Title FROM MOVIE NATURAL JOIN MOVIE_ACTOR WHERE actorId = %s ORDER BY Title ''', id).fetchall() return render_template('actor.html', actor=actor, movies=movies) @APP.route('/actors/search/<expr>/') def search_actor(expr): search = { 'expr': expr } # SQL INJECTION POSSIBLE! - avoid this! actors = db.execute( ' SELECT ActorId, Name' ' FROM ACTOR ' ' WHERE NAME LIKE \'%' + expr + '%\'' ).fetchall() return render_template('actor-search.html', search=search,actors=actors) # Genres @APP.route('/genres/') def list_genres(): genres = db.execute(''' SELECT GenreId, Label FROM GENRE ORDER BY Label ''').fetchall() return render_template('genre-list.html', genres=genres) @APP.route('/genres/<int:id>/') def view_movies_by_genre(id): genre = db.execute( ''' SELECT GenreId, Label FROM GENRE WHERE GenreId = %s ''', id).fetchone() if genre is None: abort(404, 'Genre id {} does not exist.'.format(id)) movies = db.execute( ''' SELECT MovieId, Title FROM MOVIE NATURAL JOIN MOVIE_GENRE WHERE GenreId = %s ORDER BY Title ''', id).fetchall() return render_template('genre.html', genre=genre, movies=movies) # Streams @APP.route('/streams/<int:id>/') def get_stream(id): stream = db.execute( ''' SELECT StreamId, StreamDate, Charge, MovieId, Title, CustomerId, Name FROM STREAM NATURAL JOIN MOVIE NATURAL JOIN CUSTOMER WHERE StreamId = %s ''', id).fetchone() if stream is None: abort(404, 'Stream id {} does not exist.'.format(id)) return render_template('stream.html', stream=stream) # Staff @APP.route('/staff/') def list_staff(): staff = db.execute(''' SELECT S1.StaffId AS StaffId, S1.Name AS Name, S1.Job AS Job, S1.Supervisor AS Supervisor, S2.Name AS SupervisorName FROM STAFF S1 LEFT JOIN STAFF S2 ON(S1.Supervisor = S2.StaffId) ORDER BY S1.Name ''').fetchall() return render_template('staff-list.html', staff=staff) @APP.route('/staff/<int:id>/') def show_staff(id): staff = db.execute( ''' SELECT StaffId, Name, Supervisor, Job FROM STAFF WHERE staffId = %s ''', id).fetchone() if staff is None: abort(404, 'Staff id {} does not exist.'.format(id)) superv={} if not (staff['Supervisor'] is None): superv = db.execute( ''' SELECT Name FROM staff WHERE staffId = %s ''', staff['Supervisor']).fetchone() supervisees = [] supervisees = db.execute( ''' SELECT StaffId, Name from staff where supervisor = %s ORDER BY Name ''',id).fetchall() return render_template('staff.html', staff=staff, superv=superv, supervisees=supervisees)
app.py
import warnings warnings.filterwarnings('ignore', category=FutureWarning) from flask import abort, render_template, Flask import logging import db APP = Flask(__name__) # Start page @APP.route('/') def index(): stats = {} x = db.execute('SELECT COUNT(*) AS movies FROM MOVIE').fetchone() stats.update(x) x = db.execute('SELECT COUNT(*) AS actors FROM ACTOR').fetchone() stats.update(x) x = db.execute('SELECT COUNT(*) AS streams FROM STREAM').fetchone() stats.update(x) logging.info(stats) return render_template('index.html',stats=stats) # Initialize db # It assumes a script called db.sql is stored in the sql folder @APP.route('/init/') def init(): return render_template('init.html', init=db.init()) # Movies @APP.route('/movies/') def list_movies(): movies = db.execute( ''' SELECT MovieId, Title, Year, Duration FROM MOVIE ORDER BY Title ''').fetchall() return render_template('movie-list.html', movies=movies) @APP.route('/movies/<int:id>/') def get_movie(id): movie = db.execute( ''' SELECT MovieId, Title, Year, Duration FROM MOVIE WHERE movieId = %s ''', id).fetchone() if movie is None: abort(404, 'Movie id {} does not exist.'.format(id)) genres = db.execute( ''' SELECT GenreId, Label FROM MOVIE_GENRE NATURAL JOIN GENRE WHERE movieId = %s ORDER BY Label ''', id).fetchall() actors = db.execute( ''' SELECT ActorId, Name FROM MOVIE_ACTOR NATURAL JOIN ACTOR WHERE MovieId = %s ORDER BY Name ''', id).fetchall() streams = db.execute( ''' SELECT StreamId, StreamDate FROM STREAM WHERE MovieId = %s ORDER BY StreamDate Desc ''', id).fetchall(); return render_template('movie.html', movie=movie, genres=genres, actors=actors, streams=streams) @APP.route('/movies/search/<expr>/') def search_movie(expr): search = { 'expr': expr } expr = '%' + expr + '%' movies = db.execute( ''' SELECT MovieId, Title FROM MOVIE WHERE Title LIKE %s ''', expr).fetchall() return render_template('movie-search.html', search=search,movies=movies) # Actors @APP.route('/actors/') def list_actors(): actors = db.execute(''' SELECT ActorId, Name FROM Actor ORDER BY Name ''').fetchall() return render_template('actor-list.html', actors=actors) @APP.route('/actors/<int:id>/') def view_movies_by_actor(id): actor = db.execute( ''' SELECT ActorId, Name FROM ACTOR WHERE actorId = %s ''', id).fetchone() if actor is None: abort(404, 'Actor id {} does not exist.'.format(id)) movies = db.execute( ''' SELECT MovieId, Title FROM MOVIE NATURAL JOIN MOVIE_ACTOR WHERE actorId = %s ORDER BY Title ''', id).fetchall() return render_template('actor.html', actor=actor, movies=movies) @APP.route('/actors/search/<expr>/') def search_actor(expr): search = { 'expr': expr } # SQL INJECTION POSSIBLE! - avoid this! actors = db.execute( ' SELECT ActorId, Name' ' FROM ACTOR ' ' WHERE NAME LIKE \'%' + expr + '%\'' ).fetchall() return render_template('actor-search.html', search=search,actors=actors) # Genres @APP.route('/genres/') def list_genres(): genres = db.execute(''' SELECT GenreId, Label FROM GENRE ORDER BY Label ''').fetchall() return render_template('genre-list.html', genres=genres) @APP.route('/genres/<int:id>/') def view_movies_by_genre(id): genre = db.execute( ''' SELECT GenreId, Label FROM GENRE WHERE GenreId = %s ''', id).fetchone() if genre is None: abort(404, 'Genre id {} does not exist.'.format(id)) movies = db.execute( ''' SELECT MovieId, Title FROM MOVIE NATURAL JOIN MOVIE_GENRE WHERE GenreId = %s ORDER BY Title ''', id).fetchall() return render_template('genre.html', genre=genre, movies=movies) # Streams @APP.route('/streams/<int:id>/') def get_stream(id): stream = db.execute( ''' SELECT StreamId, StreamDate, Charge, MovieId, Title, CustomerId, Name FROM STREAM NATURAL JOIN MOVIE NATURAL JOIN CUSTOMER WHERE StreamId = %s ''', id).fetchone() if stream is None: abort(404, 'Stream id {} does not exist.'.format(id)) return render_template('stream.html', stream=stream) # Staff @APP.route('/staff/') def list_staff(): staff = db.execute(''' SELECT S1.StaffId AS StaffId, S1.Name AS Name, S1.Job AS Job, S1.Supervisor AS Supervisor, S2.Name AS SupervisorName FROM STAFF S1 LEFT JOIN STAFF S2 ON(S1.Supervisor = S2.StaffId) ORDER BY S1.Name ''').fetchall() return render_template('staff-list.html', staff=staff) @APP.route('/staff/<int:id>/') def show_staff(id): staff = db.execute( ''' SELECT StaffId, Name, Supervisor, Job FROM STAFF WHERE staffId = %s ''', id).fetchone() if staff is None: abort(404, 'Staff id {} does not exist.'.format(id)) superv={} if not (staff['Supervisor'] is None): superv = db.execute( ''' SELECT Name FROM staff WHERE staffId = %s ''', staff['Supervisor']).fetchone() supervisees = [] supervisees = db.execute( ''' SELECT StaffId, Name from staff where supervisor = %s ORDER BY Name ''',id).fetchall() return render_template('staff.html', staff=staff, superv=superv, supervisees=supervisees)
0.308294
0.054651
import csv import logging import os from collections import defaultdict from typing import ( Iterator, Dict, List, Tuple, TextIO, Any, DefaultDict, Optional, ) import rdflib from smart_open import open from forte.common import Resources from forte.common.configuration import Config from forte.common.exception import ResourceError from forte.data.base_reader import PackReader from forte.data.data_pack import DataPack from forte.datasets.wikipedia.dbpedia.db_utils import ( NIFParser, get_resource_attribute, get_resource_name, get_resource_fragment, print_progress, ContextGroupedNIFReader, state_type, ) from forte.processors.base import PackWriter from ft.onto.wikipedia import ( WikiPage, WikiSection, WikiParagraph, WikiTitle, WikiAnchor, WikiInfoBoxProperty, WikiInfoBoxMapped, WikiCategory, ) __all__ = [ "DBpediaWikiReader", "WikiPackReader", "WikiArticleWriter", "WikiAnchorReader", "WikiStructReader", "WikiPropertyReader", "WikiInfoBoxReader", ] class DBpediaWikiReader(PackReader): """ This reader reads in the Wikipedia full text articles from a DBpedia full text dump, which is the `NIF Context` dataset from here: https://wiki.dbpedia.org/downloads-2016-10#p10608-2 . """ def __init__( self, ): super().__init__() self.__redirects: Dict[str, str] = {} def initialize(self, resources: Resources, configs: Config): super().initialize(resources, configs) if self.resources.contains("redirects"): self.__redirects = self.resources.get("redirects") logging.info("%d redirects loaded.", len(self.__redirects)) else: raise ResourceError("Redirects not provided from resources.") def _collect( # type: ignore self, nif_context: str ) -> Iterator[Dict[str, str]]: str_data: Dict[str, str] = {} for context_statements in NIFParser(nif_context): for s, v, o, c in context_statements: nif_type = get_resource_attribute(s, "nif") print_progress(f"Collecting DBpedia resource: [{c.identifier}]") fragment = get_resource_fragment(v) if ( nif_type and nif_type == "context" and fragment is not None and fragment == "isString" ): str_data["text"] = o.toPython() doc_name: Optional[str] = get_resource_name(s) old_id: Optional[str] = get_resource_attribute( c.identifier, "oldid" ) if doc_name is not None and old_id is not None: str_data["doc_name"] = doc_name str_data["oldid"] = old_id yield str_data def _parse_pack(self, doc_data: Dict[str, str]) -> Iterator[DataPack]: pack = DataPack() doc_name: str = doc_data["doc_name"] if doc_name in self.__redirects: doc_name = self.__redirects[doc_name] full_text: str = doc_data["text"] pack.set_text(full_text) page = WikiPage(pack, 0, len(full_text)) page.page_id = doc_data["oldid"] page.page_name = doc_name pack.pack_name = doc_name yield pack def read_index(pack_index_path: str) -> Dict[str, str]: """ Reads an index from the page name to the path of the stored pack. Args: pack_index_path: The path of this index file. The file should be a tab separated file. Returns: A dictionary that maps from the page name to the full path. """ page_idx: Dict[str, str] = {} logging.info("Reading pack index from %s", pack_index_path) with open(pack_index_path) as idx: for page_name, page_path in csv.reader(idx, delimiter="\t"): page_idx[page_name] = page_path return page_idx class WikiPackReader(PackReader): """ This reader reads information from an NIF graph, and find out the corresponding data pack stored on disk. The output from this reader are these data packs plus the additional NIF information. The function `add_wiki_info` is to be implemented to handle how the NIF statements are added to the data pack. """ def __init__(self): super().__init__() self._pack_index: Dict[str, str] = {} self._pack_dir: str = "" self._redirects: Dict[str, str] = {} self._resume_index: Dict[str, str] = {} def initialize(self, resources: Resources, configs: Config): super().initialize(resources, configs) # A mapping from the name of the page to the path on th disk. self._pack_index = read_index(configs.pack_index) self._pack_dir = configs.pack_dir if self.configs.resume_index: self._resume_index = read_index(configs.resume_index) print_progress( f"Loaded {len(self._resume_index)} existing " f"files.", "\n" ) if self.resources.contains("redirects"): self._redirects = self.resources.get("redirects") print_progress(f"{len(self._redirects)} redirects loaded.", "\n") else: raise ResourceError("Redirects not provided from resources.") def add_wiki_info(self, pack: DataPack, statements: List[state_type]): raise NotImplementedError def _collect( # type: ignore self, nif_path: str ) -> Iterator[Tuple[str, Dict[str, List[state_type]]]]: skipped = 0 for _, statements in ContextGroupedNIFReader(nif_path): name = get_resource_name(statements[0][0]) if name is not None: if name not in self._resume_index: yield name, statements else: skipped += 1 print_progress( f"Skipped {skipped} documents", terminal_only=True ) def _parse_pack( self, collection: Tuple[str, List[state_type]] ) -> Iterator[DataPack]: resource_name, statements = collection if resource_name in self._redirects: resource_name = self._redirects[resource_name] if resource_name in self._pack_index: print_progress( f"Handling resource [{resource_name}] in {self.component_name}" ) pack_path = os.path.join( self._pack_dir, self._pack_index[resource_name] ) if os.path.exists(pack_path): pack: DataPack = DataPack.deserialize( pack_path, self.configs.serialize_method, self.configs.zip_pack, ) self.add_wiki_info(pack, statements) yield pack else: logging.info("Resource %s pack not found.", resource_name) @classmethod def default_configs(cls): """ This defines a basic config structure for the reader. Here: - pack_dir: the directory that contains all the serialized packs. - pack_index: the file name under the pack directory that points to the index from the name to the actual pack path. """ return { "pack_index": "article.idx", "pack_dir": ".", "resume_index": None, } class WikiArticleWriter(PackWriter): """ This is a pack writer that writes out the Wikipedia articles to disk. It has two special behaviors: 1. If the `input_index_file` file is provided via the configuration and it exists, the file will be used to determine the path of writing the data packs. This will also activate the overwrite mode. 2. If the `input_index_file` file is not provided, 2a. The packs are organized into directories. Each directory contains at most 2000 documents. 2b. the overwrite mode will not be activated 3. If the `output_index_file` is provided, an index file with the provided name/path will be created, its content will be a mapping from the article name to the article path. There are two general use cases: 1. If the writer is used to write a new directory of data, simply provide the `output_index_file` 2. If the writer is used to add content/overwriting to an existing directory, it is suggested to use the index file of the original directory as the `input_index_file`, and the `output_index_file` can be used to store the information for this new writing process if desired. """ _input_index_file: TextIO _output_index_file: TextIO # It is difficult to get the type of the csv writer # https://stackoverflow.com/questions # /51264355/how-to-type-annotate-object-returned-by-csv-writer _csv_writer: Any def __init__(self): super().__init__() self.article_count: int = 0 self.__use_existing_index: bool = False self._article_index = {} def initialize(self, resources: Resources, configs: Config): super().initialize(resources, configs) self.article_count = 0 if self.configs.use_input_index and self.configs.input_index_file: # Load input index. input_index_path = self.configs.input_index_file self._article_index = {} if os.path.exists(input_index_path): self._input_index_file = open(input_index_path) with open(input_index_path) as f: for line in f: article_name, sub_path = line.strip().split() self._article_index[article_name] = sub_path self.__use_existing_index = True self.configs.overwrite = True logging.info( "Wikipedia writer is setup with existing index " "file. The output will be written following the input " "index path and overwritten is enabled." ) else: raise FileNotFoundError( f"Cannot find provided index file {input_index_path}" ) else: self.__use_existing_index = False output_index_path = os.path.join( self.configs.output_dir, self.configs.output_index_file ) self._output_index_file = ( open(output_index_path, "a") if self.configs.append_to_index else open(output_index_path, "w") ) self._csv_writer = csv.writer(self._output_index_file, delimiter="\t") def sub_output_path(self, pack: DataPack) -> Optional[str]: if self.__use_existing_index: if pack.pack_name in self._article_index: # Since datasets are built separated, there might be cases # where the article referred later is not in the original # parsed dataset, so we need to check if they exist. # We could replace the suffix based on writing config. return ( self._article_index[pack.pack_name].split(".")[0] + self._suffix ) else: return None else: # Organize the data by IO ordering instead. sub_dir = str(int(self.article_count / 2000)).zfill(5) pid = pack.get_single(WikiPage).page_id # type: ignore doc_name = f"doc_{self.article_count}" if pid is None else pid return os.path.join(sub_dir, doc_name) + self._suffix def _process(self, input_pack: DataPack): """ In additional writing the data pack, we also write the index under the condition, to store the document id to the relative storage of this DataPack. This can be used as a simple index to retrieve the relevant file, which can enable faster lookup. Args: input_pack: The DataPack that contains the Wikipedia information. Returns: """ super()._process(input_pack) # Write the output index files. out_path = self.sub_output_path(input_pack) self._csv_writer.writerow([input_pack.pack_name, out_path]) self.article_count += 1 if self.article_count % 1000 == 0: logging.info( "Written %s to %s", self.article_count, self.configs.output_dir ) def finish(self, _: Resources): if self.configs.use_input_index and self.configs.input_index_file: self._input_index_file.close() self._output_index_file.close() @classmethod def default_configs(cls): """ This defines a basic config structure for the reader. The additional parameters to provide: - use_input_index (bool): whether to use the input index file to find data. - input_index_file (str): the path providing the index from the wikipedia article name to the relative paths that stores these files. This file will only be used if the `use_input_index` and `overwrite` are both set to true, and the data path will be used to write the results (which means the existing files will be overwritten). - output_index_file (str): if provided, will write out the index from file name to the packs. This path and the relative paths are all relative names are relative to the `output_dir`. - append_to_index (bool): if provided, will append to the `output_index_file` instead of creating a new one. Returns: The default configuration of this writer. """ return { "use_input_index": False, "input_index_file": None, "output_index_file": "article.idx", "append_to_index": False, } class WikiStructReader(WikiPackReader): """ This reader extends the WikiPackReader and add the Wikipedia Structure information from https://wiki.dbpedia.org/downloads-2016-10#p10608-2 """ def add_wiki_info(self, pack: DataPack, statements: List): for nif_range, rel, struct_type in statements: r = get_resource_fragment(rel) if r is not None and r == "type": range_ = get_resource_attribute(nif_range, "char") if range_ is None: continue begin, end = [int(d) for d in range_.split(",")] if end > len(pack.text): # Some nif dataset are off by a bit, mostly when there # are new line characters, we cannot correct them. # but we need to make sure they don't go longer than # the text. logging.info( "NIF Structure end is %d by %s, " "clipped to fit with the text.", end, nif_range, ) end = len(pack.text) if end <= begin: logging.info( "Provided struct [%d:%d] is invalid.", begin, end ) continue struct_ = get_resource_fragment(struct_type) if struct_ is not None: if struct_ == "Section": WikiSection(pack, begin, end) elif struct_ == "Paragraph": WikiParagraph(pack, begin, end) elif struct_ == "Title": WikiTitle(pack, begin, end) else: logging.warning("Unknown struct type: %s", struct_type) class WikiAnchorReader(WikiPackReader): """ This reader extends the WikiPackReader and add the Wikipedia anchors from https://wiki.dbpedia.org/downloads-2016-10#p10608-2 """ def add_wiki_info(self, pack: DataPack, statements: List): link_grouped: DefaultDict[ str, Dict[str, rdflib.term.Node] ] = defaultdict(dict) for nif_range, rel, info in statements: range_ = get_resource_attribute(nif_range, "char") r = get_resource_fragment(rel) if range_ is not None and r is not None: link_grouped[range_][r] = info for range_, link_infos in link_grouped.items(): begin, end = [int(d) for d in range_.split(",")] if end > len(pack.text): # Some nif dataset are off by a bit, mostly when there are # new line characters, we cannot correct them. # but we need to make sure they don't go longer than the # text. logging.info( "Provided anchor end is %d, " "clipped to fit with the text.", end, ) end = len(pack.text) if end <= begin: logging.info("Provided anchor [%d:%d is invalid.]", begin, end) continue for info_key, info_value in link_infos.items(): info_value = str(info_value) if info_key == "type": anchor_type = get_resource_fragment(info_value) if ( not anchor_type == "Phrase" and not anchor_type == "Word" ): logging.warning("Unknown anchor type: %s", info_value) if info_key == "taIdentRef": target_page_name = get_resource_name(info_value) if ( target_page_name is not None and target_page_name in self._redirects ): target_page_name = self._redirects[target_page_name] if target_page_name is not None: # Only create anchor with proper link. anchor = WikiAnchor(pack, begin, end) anchor.target_page_name = target_page_name # If it is an DBpedia resource, the domain will be # truncated, otherwise it will stay the same, meaning # it is an external link. anchor.is_external = target_page_name == str(info_value) class WikiPropertyReader(WikiPackReader): """ This reader extends the WikiPackReader and add the Wikipedia raw info boxes (also known as properties) from https://wiki.dbpedia.org/downloads-2016-10#p10608-2 """ def add_wiki_info(self, pack: DataPack, statements: List): for _, v, o in statements: slot_name = v.toPython() slot_value = get_resource_name(o) if slot_value is not None: info_box = WikiInfoBoxProperty(pack) info_box.key = slot_name info_box.value = slot_value class WikiInfoBoxReader(WikiPackReader): """ This reader extends the WikiPackReader and add the Wikipedia cleaned info boxes from https://wiki.dbpedia.org/downloads-2016-10#p10608-2 """ def add_wiki_info(self, pack: DataPack, info_box_statements: List): for _, v, o in info_box_statements: name = get_resource_name(o) if name is not None: info_box = WikiInfoBoxMapped(pack) info_box.key = v.toPython() info_box.value = name class WikiCategoryReader(WikiPackReader): """ Read the dbpedia category file to add category information. """ def add_wiki_info(self, pack: DataPack, statements: List[state_type]): for _, _, o in statements: resource_name = get_resource_name(o) if resource_name is not None: wc = WikiCategory(pack) wc.values.append(resource_name) pack.add_entry(wc)
forte/datasets/wikipedia/dbpedia/dbpedia_datasets.py
import csv import logging import os from collections import defaultdict from typing import ( Iterator, Dict, List, Tuple, TextIO, Any, DefaultDict, Optional, ) import rdflib from smart_open import open from forte.common import Resources from forte.common.configuration import Config from forte.common.exception import ResourceError from forte.data.base_reader import PackReader from forte.data.data_pack import DataPack from forte.datasets.wikipedia.dbpedia.db_utils import ( NIFParser, get_resource_attribute, get_resource_name, get_resource_fragment, print_progress, ContextGroupedNIFReader, state_type, ) from forte.processors.base import PackWriter from ft.onto.wikipedia import ( WikiPage, WikiSection, WikiParagraph, WikiTitle, WikiAnchor, WikiInfoBoxProperty, WikiInfoBoxMapped, WikiCategory, ) __all__ = [ "DBpediaWikiReader", "WikiPackReader", "WikiArticleWriter", "WikiAnchorReader", "WikiStructReader", "WikiPropertyReader", "WikiInfoBoxReader", ] class DBpediaWikiReader(PackReader): """ This reader reads in the Wikipedia full text articles from a DBpedia full text dump, which is the `NIF Context` dataset from here: https://wiki.dbpedia.org/downloads-2016-10#p10608-2 . """ def __init__( self, ): super().__init__() self.__redirects: Dict[str, str] = {} def initialize(self, resources: Resources, configs: Config): super().initialize(resources, configs) if self.resources.contains("redirects"): self.__redirects = self.resources.get("redirects") logging.info("%d redirects loaded.", len(self.__redirects)) else: raise ResourceError("Redirects not provided from resources.") def _collect( # type: ignore self, nif_context: str ) -> Iterator[Dict[str, str]]: str_data: Dict[str, str] = {} for context_statements in NIFParser(nif_context): for s, v, o, c in context_statements: nif_type = get_resource_attribute(s, "nif") print_progress(f"Collecting DBpedia resource: [{c.identifier}]") fragment = get_resource_fragment(v) if ( nif_type and nif_type == "context" and fragment is not None and fragment == "isString" ): str_data["text"] = o.toPython() doc_name: Optional[str] = get_resource_name(s) old_id: Optional[str] = get_resource_attribute( c.identifier, "oldid" ) if doc_name is not None and old_id is not None: str_data["doc_name"] = doc_name str_data["oldid"] = old_id yield str_data def _parse_pack(self, doc_data: Dict[str, str]) -> Iterator[DataPack]: pack = DataPack() doc_name: str = doc_data["doc_name"] if doc_name in self.__redirects: doc_name = self.__redirects[doc_name] full_text: str = doc_data["text"] pack.set_text(full_text) page = WikiPage(pack, 0, len(full_text)) page.page_id = doc_data["oldid"] page.page_name = doc_name pack.pack_name = doc_name yield pack def read_index(pack_index_path: str) -> Dict[str, str]: """ Reads an index from the page name to the path of the stored pack. Args: pack_index_path: The path of this index file. The file should be a tab separated file. Returns: A dictionary that maps from the page name to the full path. """ page_idx: Dict[str, str] = {} logging.info("Reading pack index from %s", pack_index_path) with open(pack_index_path) as idx: for page_name, page_path in csv.reader(idx, delimiter="\t"): page_idx[page_name] = page_path return page_idx class WikiPackReader(PackReader): """ This reader reads information from an NIF graph, and find out the corresponding data pack stored on disk. The output from this reader are these data packs plus the additional NIF information. The function `add_wiki_info` is to be implemented to handle how the NIF statements are added to the data pack. """ def __init__(self): super().__init__() self._pack_index: Dict[str, str] = {} self._pack_dir: str = "" self._redirects: Dict[str, str] = {} self._resume_index: Dict[str, str] = {} def initialize(self, resources: Resources, configs: Config): super().initialize(resources, configs) # A mapping from the name of the page to the path on th disk. self._pack_index = read_index(configs.pack_index) self._pack_dir = configs.pack_dir if self.configs.resume_index: self._resume_index = read_index(configs.resume_index) print_progress( f"Loaded {len(self._resume_index)} existing " f"files.", "\n" ) if self.resources.contains("redirects"): self._redirects = self.resources.get("redirects") print_progress(f"{len(self._redirects)} redirects loaded.", "\n") else: raise ResourceError("Redirects not provided from resources.") def add_wiki_info(self, pack: DataPack, statements: List[state_type]): raise NotImplementedError def _collect( # type: ignore self, nif_path: str ) -> Iterator[Tuple[str, Dict[str, List[state_type]]]]: skipped = 0 for _, statements in ContextGroupedNIFReader(nif_path): name = get_resource_name(statements[0][0]) if name is not None: if name not in self._resume_index: yield name, statements else: skipped += 1 print_progress( f"Skipped {skipped} documents", terminal_only=True ) def _parse_pack( self, collection: Tuple[str, List[state_type]] ) -> Iterator[DataPack]: resource_name, statements = collection if resource_name in self._redirects: resource_name = self._redirects[resource_name] if resource_name in self._pack_index: print_progress( f"Handling resource [{resource_name}] in {self.component_name}" ) pack_path = os.path.join( self._pack_dir, self._pack_index[resource_name] ) if os.path.exists(pack_path): pack: DataPack = DataPack.deserialize( pack_path, self.configs.serialize_method, self.configs.zip_pack, ) self.add_wiki_info(pack, statements) yield pack else: logging.info("Resource %s pack not found.", resource_name) @classmethod def default_configs(cls): """ This defines a basic config structure for the reader. Here: - pack_dir: the directory that contains all the serialized packs. - pack_index: the file name under the pack directory that points to the index from the name to the actual pack path. """ return { "pack_index": "article.idx", "pack_dir": ".", "resume_index": None, } class WikiArticleWriter(PackWriter): """ This is a pack writer that writes out the Wikipedia articles to disk. It has two special behaviors: 1. If the `input_index_file` file is provided via the configuration and it exists, the file will be used to determine the path of writing the data packs. This will also activate the overwrite mode. 2. If the `input_index_file` file is not provided, 2a. The packs are organized into directories. Each directory contains at most 2000 documents. 2b. the overwrite mode will not be activated 3. If the `output_index_file` is provided, an index file with the provided name/path will be created, its content will be a mapping from the article name to the article path. There are two general use cases: 1. If the writer is used to write a new directory of data, simply provide the `output_index_file` 2. If the writer is used to add content/overwriting to an existing directory, it is suggested to use the index file of the original directory as the `input_index_file`, and the `output_index_file` can be used to store the information for this new writing process if desired. """ _input_index_file: TextIO _output_index_file: TextIO # It is difficult to get the type of the csv writer # https://stackoverflow.com/questions # /51264355/how-to-type-annotate-object-returned-by-csv-writer _csv_writer: Any def __init__(self): super().__init__() self.article_count: int = 0 self.__use_existing_index: bool = False self._article_index = {} def initialize(self, resources: Resources, configs: Config): super().initialize(resources, configs) self.article_count = 0 if self.configs.use_input_index and self.configs.input_index_file: # Load input index. input_index_path = self.configs.input_index_file self._article_index = {} if os.path.exists(input_index_path): self._input_index_file = open(input_index_path) with open(input_index_path) as f: for line in f: article_name, sub_path = line.strip().split() self._article_index[article_name] = sub_path self.__use_existing_index = True self.configs.overwrite = True logging.info( "Wikipedia writer is setup with existing index " "file. The output will be written following the input " "index path and overwritten is enabled." ) else: raise FileNotFoundError( f"Cannot find provided index file {input_index_path}" ) else: self.__use_existing_index = False output_index_path = os.path.join( self.configs.output_dir, self.configs.output_index_file ) self._output_index_file = ( open(output_index_path, "a") if self.configs.append_to_index else open(output_index_path, "w") ) self._csv_writer = csv.writer(self._output_index_file, delimiter="\t") def sub_output_path(self, pack: DataPack) -> Optional[str]: if self.__use_existing_index: if pack.pack_name in self._article_index: # Since datasets are built separated, there might be cases # where the article referred later is not in the original # parsed dataset, so we need to check if they exist. # We could replace the suffix based on writing config. return ( self._article_index[pack.pack_name].split(".")[0] + self._suffix ) else: return None else: # Organize the data by IO ordering instead. sub_dir = str(int(self.article_count / 2000)).zfill(5) pid = pack.get_single(WikiPage).page_id # type: ignore doc_name = f"doc_{self.article_count}" if pid is None else pid return os.path.join(sub_dir, doc_name) + self._suffix def _process(self, input_pack: DataPack): """ In additional writing the data pack, we also write the index under the condition, to store the document id to the relative storage of this DataPack. This can be used as a simple index to retrieve the relevant file, which can enable faster lookup. Args: input_pack: The DataPack that contains the Wikipedia information. Returns: """ super()._process(input_pack) # Write the output index files. out_path = self.sub_output_path(input_pack) self._csv_writer.writerow([input_pack.pack_name, out_path]) self.article_count += 1 if self.article_count % 1000 == 0: logging.info( "Written %s to %s", self.article_count, self.configs.output_dir ) def finish(self, _: Resources): if self.configs.use_input_index and self.configs.input_index_file: self._input_index_file.close() self._output_index_file.close() @classmethod def default_configs(cls): """ This defines a basic config structure for the reader. The additional parameters to provide: - use_input_index (bool): whether to use the input index file to find data. - input_index_file (str): the path providing the index from the wikipedia article name to the relative paths that stores these files. This file will only be used if the `use_input_index` and `overwrite` are both set to true, and the data path will be used to write the results (which means the existing files will be overwritten). - output_index_file (str): if provided, will write out the index from file name to the packs. This path and the relative paths are all relative names are relative to the `output_dir`. - append_to_index (bool): if provided, will append to the `output_index_file` instead of creating a new one. Returns: The default configuration of this writer. """ return { "use_input_index": False, "input_index_file": None, "output_index_file": "article.idx", "append_to_index": False, } class WikiStructReader(WikiPackReader): """ This reader extends the WikiPackReader and add the Wikipedia Structure information from https://wiki.dbpedia.org/downloads-2016-10#p10608-2 """ def add_wiki_info(self, pack: DataPack, statements: List): for nif_range, rel, struct_type in statements: r = get_resource_fragment(rel) if r is not None and r == "type": range_ = get_resource_attribute(nif_range, "char") if range_ is None: continue begin, end = [int(d) for d in range_.split(",")] if end > len(pack.text): # Some nif dataset are off by a bit, mostly when there # are new line characters, we cannot correct them. # but we need to make sure they don't go longer than # the text. logging.info( "NIF Structure end is %d by %s, " "clipped to fit with the text.", end, nif_range, ) end = len(pack.text) if end <= begin: logging.info( "Provided struct [%d:%d] is invalid.", begin, end ) continue struct_ = get_resource_fragment(struct_type) if struct_ is not None: if struct_ == "Section": WikiSection(pack, begin, end) elif struct_ == "Paragraph": WikiParagraph(pack, begin, end) elif struct_ == "Title": WikiTitle(pack, begin, end) else: logging.warning("Unknown struct type: %s", struct_type) class WikiAnchorReader(WikiPackReader): """ This reader extends the WikiPackReader and add the Wikipedia anchors from https://wiki.dbpedia.org/downloads-2016-10#p10608-2 """ def add_wiki_info(self, pack: DataPack, statements: List): link_grouped: DefaultDict[ str, Dict[str, rdflib.term.Node] ] = defaultdict(dict) for nif_range, rel, info in statements: range_ = get_resource_attribute(nif_range, "char") r = get_resource_fragment(rel) if range_ is not None and r is not None: link_grouped[range_][r] = info for range_, link_infos in link_grouped.items(): begin, end = [int(d) for d in range_.split(",")] if end > len(pack.text): # Some nif dataset are off by a bit, mostly when there are # new line characters, we cannot correct them. # but we need to make sure they don't go longer than the # text. logging.info( "Provided anchor end is %d, " "clipped to fit with the text.", end, ) end = len(pack.text) if end <= begin: logging.info("Provided anchor [%d:%d is invalid.]", begin, end) continue for info_key, info_value in link_infos.items(): info_value = str(info_value) if info_key == "type": anchor_type = get_resource_fragment(info_value) if ( not anchor_type == "Phrase" and not anchor_type == "Word" ): logging.warning("Unknown anchor type: %s", info_value) if info_key == "taIdentRef": target_page_name = get_resource_name(info_value) if ( target_page_name is not None and target_page_name in self._redirects ): target_page_name = self._redirects[target_page_name] if target_page_name is not None: # Only create anchor with proper link. anchor = WikiAnchor(pack, begin, end) anchor.target_page_name = target_page_name # If it is an DBpedia resource, the domain will be # truncated, otherwise it will stay the same, meaning # it is an external link. anchor.is_external = target_page_name == str(info_value) class WikiPropertyReader(WikiPackReader): """ This reader extends the WikiPackReader and add the Wikipedia raw info boxes (also known as properties) from https://wiki.dbpedia.org/downloads-2016-10#p10608-2 """ def add_wiki_info(self, pack: DataPack, statements: List): for _, v, o in statements: slot_name = v.toPython() slot_value = get_resource_name(o) if slot_value is not None: info_box = WikiInfoBoxProperty(pack) info_box.key = slot_name info_box.value = slot_value class WikiInfoBoxReader(WikiPackReader): """ This reader extends the WikiPackReader and add the Wikipedia cleaned info boxes from https://wiki.dbpedia.org/downloads-2016-10#p10608-2 """ def add_wiki_info(self, pack: DataPack, info_box_statements: List): for _, v, o in info_box_statements: name = get_resource_name(o) if name is not None: info_box = WikiInfoBoxMapped(pack) info_box.key = v.toPython() info_box.value = name class WikiCategoryReader(WikiPackReader): """ Read the dbpedia category file to add category information. """ def add_wiki_info(self, pack: DataPack, statements: List[state_type]): for _, _, o in statements: resource_name = get_resource_name(o) if resource_name is not None: wc = WikiCategory(pack) wc.values.append(resource_name) pack.add_entry(wc)
0.644561
0.087525
import os from collections import deque import numpy as np import pandas as pd __all__ = ['TFSummary', 'find_tf_summary_dirs', 'read_tf_summary_dir'] class TFSummary(object): """Data parsed from TensorFlow summary files. Parameters ---------- training_loss : pd.Series Training loss series, with the step as index. validation_loss : pd.Series Validation loss series, with the step as index. """ def __init__(self, training_loss=None, validation_loss=None): self.training_loss = training_loss self.validation_loss = validation_loss @classmethod def from_accumulator(cls, acc): """Extract values from TensorFlow event accumulator. Parameters ---------- acc : tensorflow.python.summary.event_accumulator.EventAccumulator TensorFlow event accumulator Returns ------- TFSummary """ tags = acc.Tags() kwargs = {} # extract scalar summaries def extract_scalar(t): events = acc.Scalars(t) steps = np.asarray([e.step for e in events], dtype=np.int) values = np.asarray([e.value for e in events], dtype=np.float64) return pd.Series(index=steps, data=values) for tag in tags['scalars']: for loss_tag in ('/training_loss', '/validation_loss'): if tag.endswith(loss_tag): kwargs[loss_tag[1:]] = extract_scalar(tag) # compose the summary object return TFSummary(**kwargs) def find_tf_summary_dirs(root): """Find all summary directories from the specified root. Directory which contains files of pattern "*.tfevents.*" will be considered as a summary directory. Parameters ---------- root : str Path of the root directory. Yields ------ (str, tuple[str]) A tuple containing the path of summary directory, as well as the file names matching "*tfevents*". """ filenames = [] queue = deque() queue.append(root) while queue: path = queue.popleft() for f in os.listdir(path): fpath = os.path.join(path, f) if os.path.isdir(fpath): queue.append(fpath) elif 'tfevents' in f: filenames.append(f) if filenames: yield path, filenames filenames = [] def read_tf_summary_dir(path): """Read summaries from specified directory. Parameters ---------- path : str Path of the summary directory. Returns ------- TFSummary """ from tensorflow.python.summary.event_accumulator import EventAccumulator acc = EventAccumulator(path) acc.Reload() return TFSummary.from_accumulator(acc)
madoka/utils/tfsummary.py
import os from collections import deque import numpy as np import pandas as pd __all__ = ['TFSummary', 'find_tf_summary_dirs', 'read_tf_summary_dir'] class TFSummary(object): """Data parsed from TensorFlow summary files. Parameters ---------- training_loss : pd.Series Training loss series, with the step as index. validation_loss : pd.Series Validation loss series, with the step as index. """ def __init__(self, training_loss=None, validation_loss=None): self.training_loss = training_loss self.validation_loss = validation_loss @classmethod def from_accumulator(cls, acc): """Extract values from TensorFlow event accumulator. Parameters ---------- acc : tensorflow.python.summary.event_accumulator.EventAccumulator TensorFlow event accumulator Returns ------- TFSummary """ tags = acc.Tags() kwargs = {} # extract scalar summaries def extract_scalar(t): events = acc.Scalars(t) steps = np.asarray([e.step for e in events], dtype=np.int) values = np.asarray([e.value for e in events], dtype=np.float64) return pd.Series(index=steps, data=values) for tag in tags['scalars']: for loss_tag in ('/training_loss', '/validation_loss'): if tag.endswith(loss_tag): kwargs[loss_tag[1:]] = extract_scalar(tag) # compose the summary object return TFSummary(**kwargs) def find_tf_summary_dirs(root): """Find all summary directories from the specified root. Directory which contains files of pattern "*.tfevents.*" will be considered as a summary directory. Parameters ---------- root : str Path of the root directory. Yields ------ (str, tuple[str]) A tuple containing the path of summary directory, as well as the file names matching "*tfevents*". """ filenames = [] queue = deque() queue.append(root) while queue: path = queue.popleft() for f in os.listdir(path): fpath = os.path.join(path, f) if os.path.isdir(fpath): queue.append(fpath) elif 'tfevents' in f: filenames.append(f) if filenames: yield path, filenames filenames = [] def read_tf_summary_dir(path): """Read summaries from specified directory. Parameters ---------- path : str Path of the summary directory. Returns ------- TFSummary """ from tensorflow.python.summary.event_accumulator import EventAccumulator acc = EventAccumulator(path) acc.Reload() return TFSummary.from_accumulator(acc)
0.889102
0.458227
import os import sys import logging import tensorflow as tf from invoke import run, exceptions log = logging.getLogger('biomedbert') log.setLevel(logging.INFO) def fine_tune_ner(ner_dataset: str, model_dir: str, model_type: str, bucket_name: str, tpu_name: str, tpu_zone: str, gcp_project: str, tpu_cores: str): """fine tune ner""" use_tpu = True config = 'large_bert_config.json' num_tpu_cores = 8 if tpu_cores is not None: num_tpu_cores = int(tpu_cores) if tpu_name is None: tpu_name = 'false' use_tpu = False if model_type == 'base': # bert base config = 'base_bert_config.json' elif model_type == 'large': # bert large config = 'large_bert_config.json' else: log.info('No config file') sys.exit(1) init_checkpoint = tf.train.latest_checkpoint('gs://{}/{}'.format(bucket_name, model_dir)) vocab_file = 'gs://{}/{}/vocab.txt'.format(bucket_name, model_dir) bert_config_file = 'gs://{}/{}/{}'.format(bucket_name, model_dir, config) output_dir = 'gs://{}/{}/NER_outputs/{}'.format(bucket_name, model_dir, ner_dataset) # output_dir = './NER_outputs/{}'.format(ner_dataset) data_dir = 'gs://{}/datasets/NER/{}'.format(bucket_name, ner_dataset) # print() # print("init_checkpoint:", init_checkpoint) # print("vocab_file:", vocab_file) # print("bert_config_file:", bert_config_file) # print("output_dir:", output_dir) # print("data_dir:", data_dir) # print("tpu_name:", tpu_name) try: run('python3 biobert/run_ner.py --vocab_file={} --bert_config_file={} --init_checkpoint={}' '--do_train=true --do_eval=true --num_train_epochs=10.0 --data_dir={} --output_dir={}' '--num_tpu_cores=128 --use_tpu={} --tpu_name={} --tpu_zone={} --gcp_project={}' '--num_tpu_cores={}'.format( vocab_file, bert_config_file, init_checkpoint, data_dir, output_dir, use_tpu, tpu_name, tpu_zone, gcp_project, num_tpu_cores)) except exceptions.UnexpectedExit: print('Cannot fine tune NER - {}'.format(ner_dataset)) def token_level_evaluation(ner_dataset: str, model_dir: str, model_type: str, bucket_name: str, tpu_name: str, tpu_zone: str, gcp_project: str, tpu_cores: str): """token-level evaluation ner""" use_tpu = True config = 'large_bert_config.json' num_tpu_cores = int(tpu_cores) if tpu_name is None: tpu_name = 'false' use_tpu = False if model_type == 'base': # bert base config = 'base_bert_config.json' elif model_type == 'large': # bert large config = 'large_bert_config.json' else: log.info('No config file') sys.exit(1) init_checkpoint = tf.train.latest_checkpoint('gs://{}/{}'.format(bucket_name, model_dir)) vocab_file = 'gs://{}/{}/vocab.txt'.format(bucket_name, model_dir) bert_config_file = 'gs://{}/{}/{}'.format(bucket_name, model_dir, config) # output_dir = 'gs://{}/{}/NER_outputs/{}'.format(bucket_name, model_dir, ner_dataset) output_dir = './NER_outputs/{}'.format(ner_dataset) data_dir = 'gs://{}/datasets/NER/{}'.format(bucket_name, ner_dataset) try: run('python3 biobert/run_ner.py --vocab_file={} ' '--bert_config_file={} --init_checkpoint={} --do_train=false --do_predict=true ' '--num_train_epochs=10.0 --data_dir={} ' '--output_dir={} --num_tpu_cores=128 --use_tpu={} ' '--tpu_name={} --tpu_zone={} --gcp_project={}, --num_tpu_cores={}'.format( vocab_file, bert_config_file, init_checkpoint, data_dir, output_dir, use_tpu, tpu_name, tpu_zone, gcp_project, num_tpu_cores)) except exceptions.UnexpectedExit: print('Cannot evaluate NER - {}'.format(ner_dataset)) def word_level_prediction(model_dir: str, ner_training_output_dir: str, ner_data_dir: str): """word level evaluation ner""" output_dir = 'gs://ekaba-assets/{}/{}/{}'.format(model_dir, ner_training_output_dir, ner_data_dir) ner_data_dir_path = 'gs://ekaba-assets/datasets/NER/{}'.format(ner_data_dir) try: run('python biobert/biocodes/ner_detoknize.py --token_test_path={}/token_test.txt ' \ '--label_test_path={}/label_test.txt --answer_path={}/test.tsv --output_dir={} '.format( output_dir, output_dir, ner_data_dir_path, output_dir )) except exceptions.UnexpectedExit: print('Cannot do NER word level prediction') try: if not os.path.exists('{}'.format(ner_training_output_dir)): os.makedirs('{}'.format(ner_training_output_dir)) run('gsutil cp gs://ekaba-assets/{}/{}/{}/NER_result_conll.txt {}'.format( model_dir, ner_training_output_dir, ner_data_dir, ner_training_output_dir)) run('perl biobert/biocodes/conlleval.pl < {}/NER_result_conll.txt'.format(ner_training_output_dir)) except exceptions.UnexpectedExit: print('Cannot do NER word level prediction - perl biocodes')
biomedbert_impl/ner_modules.py
import os import sys import logging import tensorflow as tf from invoke import run, exceptions log = logging.getLogger('biomedbert') log.setLevel(logging.INFO) def fine_tune_ner(ner_dataset: str, model_dir: str, model_type: str, bucket_name: str, tpu_name: str, tpu_zone: str, gcp_project: str, tpu_cores: str): """fine tune ner""" use_tpu = True config = 'large_bert_config.json' num_tpu_cores = 8 if tpu_cores is not None: num_tpu_cores = int(tpu_cores) if tpu_name is None: tpu_name = 'false' use_tpu = False if model_type == 'base': # bert base config = 'base_bert_config.json' elif model_type == 'large': # bert large config = 'large_bert_config.json' else: log.info('No config file') sys.exit(1) init_checkpoint = tf.train.latest_checkpoint('gs://{}/{}'.format(bucket_name, model_dir)) vocab_file = 'gs://{}/{}/vocab.txt'.format(bucket_name, model_dir) bert_config_file = 'gs://{}/{}/{}'.format(bucket_name, model_dir, config) output_dir = 'gs://{}/{}/NER_outputs/{}'.format(bucket_name, model_dir, ner_dataset) # output_dir = './NER_outputs/{}'.format(ner_dataset) data_dir = 'gs://{}/datasets/NER/{}'.format(bucket_name, ner_dataset) # print() # print("init_checkpoint:", init_checkpoint) # print("vocab_file:", vocab_file) # print("bert_config_file:", bert_config_file) # print("output_dir:", output_dir) # print("data_dir:", data_dir) # print("tpu_name:", tpu_name) try: run('python3 biobert/run_ner.py --vocab_file={} --bert_config_file={} --init_checkpoint={}' '--do_train=true --do_eval=true --num_train_epochs=10.0 --data_dir={} --output_dir={}' '--num_tpu_cores=128 --use_tpu={} --tpu_name={} --tpu_zone={} --gcp_project={}' '--num_tpu_cores={}'.format( vocab_file, bert_config_file, init_checkpoint, data_dir, output_dir, use_tpu, tpu_name, tpu_zone, gcp_project, num_tpu_cores)) except exceptions.UnexpectedExit: print('Cannot fine tune NER - {}'.format(ner_dataset)) def token_level_evaluation(ner_dataset: str, model_dir: str, model_type: str, bucket_name: str, tpu_name: str, tpu_zone: str, gcp_project: str, tpu_cores: str): """token-level evaluation ner""" use_tpu = True config = 'large_bert_config.json' num_tpu_cores = int(tpu_cores) if tpu_name is None: tpu_name = 'false' use_tpu = False if model_type == 'base': # bert base config = 'base_bert_config.json' elif model_type == 'large': # bert large config = 'large_bert_config.json' else: log.info('No config file') sys.exit(1) init_checkpoint = tf.train.latest_checkpoint('gs://{}/{}'.format(bucket_name, model_dir)) vocab_file = 'gs://{}/{}/vocab.txt'.format(bucket_name, model_dir) bert_config_file = 'gs://{}/{}/{}'.format(bucket_name, model_dir, config) # output_dir = 'gs://{}/{}/NER_outputs/{}'.format(bucket_name, model_dir, ner_dataset) output_dir = './NER_outputs/{}'.format(ner_dataset) data_dir = 'gs://{}/datasets/NER/{}'.format(bucket_name, ner_dataset) try: run('python3 biobert/run_ner.py --vocab_file={} ' '--bert_config_file={} --init_checkpoint={} --do_train=false --do_predict=true ' '--num_train_epochs=10.0 --data_dir={} ' '--output_dir={} --num_tpu_cores=128 --use_tpu={} ' '--tpu_name={} --tpu_zone={} --gcp_project={}, --num_tpu_cores={}'.format( vocab_file, bert_config_file, init_checkpoint, data_dir, output_dir, use_tpu, tpu_name, tpu_zone, gcp_project, num_tpu_cores)) except exceptions.UnexpectedExit: print('Cannot evaluate NER - {}'.format(ner_dataset)) def word_level_prediction(model_dir: str, ner_training_output_dir: str, ner_data_dir: str): """word level evaluation ner""" output_dir = 'gs://ekaba-assets/{}/{}/{}'.format(model_dir, ner_training_output_dir, ner_data_dir) ner_data_dir_path = 'gs://ekaba-assets/datasets/NER/{}'.format(ner_data_dir) try: run('python biobert/biocodes/ner_detoknize.py --token_test_path={}/token_test.txt ' \ '--label_test_path={}/label_test.txt --answer_path={}/test.tsv --output_dir={} '.format( output_dir, output_dir, ner_data_dir_path, output_dir )) except exceptions.UnexpectedExit: print('Cannot do NER word level prediction') try: if not os.path.exists('{}'.format(ner_training_output_dir)): os.makedirs('{}'.format(ner_training_output_dir)) run('gsutil cp gs://ekaba-assets/{}/{}/{}/NER_result_conll.txt {}'.format( model_dir, ner_training_output_dir, ner_data_dir, ner_training_output_dir)) run('perl biobert/biocodes/conlleval.pl < {}/NER_result_conll.txt'.format(ner_training_output_dir)) except exceptions.UnexpectedExit: print('Cannot do NER word level prediction - perl biocodes')
0.201735
0.096153
import multiprocessing import logging import numpy as np from eqsn.qubit_thread import SINGLE_GATE, MERGE_SEND, MERGE_ACCEPT, MEASURE, \ MEASURE_NON_DESTRUCTIVE, GIVE_STATEVECTOR, DOUBLE_GATE, \ CONTROLLED_GATE, NEW_QUBIT, ADD_MERGED_QUBITS_TO_DICT, CONTROLLED_TWO_GATE from eqsn.shared_dict import SharedDict from eqsn.worker_process import WorkerProcess from eqsn.process_picker import ProcessPicker class EQSN(object): """ Main object of EQSN, with this object, all of the Qubits can be controlled. All functions are threadsafe, but at the moment, only one instance should be used. """ __instance = None @staticmethod def get_instance(): if EQSN.__instance is None: return EQSN() return EQSN.__instance def __init__(self): if EQSN.__instance is not None: raise ValueError("Use get instance to get this class") EQSN.__instance = self self.manager = multiprocessing.Manager() cpu_count = multiprocessing.cpu_count() self.process_queue_list = [] for _ in range(cpu_count): q = multiprocessing.Queue() new_worker = WorkerProcess(q) p = multiprocessing.Process(target=new_worker.run, args=()) p.start() self.process_queue_list.append((p, q)) self.process_picker = ProcessPicker.get_instance( cpu_count, self.process_queue_list) # create the shared dict after all the processes have been created. self.shared_dict = SharedDict.get_instance() def new_qubit(self, q_id): """ Creates a new qubit with an id. Args: q_id (String): Id of the new qubit. """ p, q = self.process_picker.get_next_process_queue() q.put([NEW_QUBIT, q_id]) self.shared_dict.set_thread_with_id(q_id, p, q) logging.debug("Created new qubit with id %s.", q_id) def stop_all(self): """ Stops the simulator from running. """ for p, q in self.process_queue_list: q.put(None) p.join() self.shared_dict.stop_shared_dict() self.process_picker.stop_process_picker() EQSN.__instance = None def X_gate(self, q_id): """ Applies the Pauli X gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = np.array([[0, 1], [1, 0]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def Y_gate(self, q_id): """ Applies the Pauli Y gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = np.array([[0, 0 - 1j], [0 + 1j, 0]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def Z_gate(self, q_id): """ Applies the Pauli Z gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = np.array([[1, 0], [0, -1]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def H_gate(self, q_id): """ Applies the Hadamard gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = (1 / 2.0) ** 0.5 * np.array([[1, 1], [1, -1]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def T_gate(self, q_id): """ Applies the T gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = np.array( [[1, 0], [0, (0.7071067811865476 + 0.7071067811865475j)]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def S_gate(self, q_id): """ Applies the S gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = np.array([[1, 0], [0, 1j]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def K_gate(self, q_id): """ Applies the K gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = 0.5 * np.array([[1 + 1j, 1 - 1j], [-1 + 1j, -1 - 1j]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def RX_gate(self, q_id, rad): """ Applies a rotational X gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. rad(int): Rotational degrees in rad. """ mid = np.cos(rad / 2) other = -1j * np.sin(rad / 2) x = np.array([[mid, other], [other, mid]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def RY_gate(self, q_id, rad): """ Applies a rotational Y gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. rad(int): Rotational degrees in rad. """ mid = np.cos(rad / 2) other = np.sin(rad / 2) x = np.array([[mid, -1.0 * other], [other, mid]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def RZ_gate(self, q_id, rad): """ Applies a rotational Z gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. rad(int): Rotational degrees in rad. """ top = np.exp(-1j * (rad / 2)) bot = np.exp(1j * (rad / 2)) x = np.array([[top, 0], [0, bot]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def custom_gate(self, q_id, gate): """ Applies a custom gate to the qubit with q_id. Args: q_id(String): Id of the Qubit to apply the gate on. gate(np.ndarray): unitary 2x2 matrix, of the gate. """ q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, gate, q_id]) def merge_qubits(self, q_id1, q_id2): """ Merges two qubits to one process, if they are not already running in the same process. Args: q_id1 (String): Id of the Qubit merged into q_id2. q_id2 (String): Id of the Qubit merged with q_id1. """ queues = self.shared_dict.get_queues_for_ids([q_id1, q_id2]) if len(queues) == 1: return # Already merged else: # Block the dictionary, that noone can send commands to the qubits, logging.debug("Merge Qubits %s and %s.", q_id1, q_id2) self.shared_dict.block_shared_dict() q1 = queues[0] q2 = queues[1] merge_q = self.manager.Queue() qubits_q = self.manager.Queue() q1.put([MERGE_SEND, q_id1, merge_q, qubits_q]) q2.put([MERGE_ACCEPT, q_id2, merge_q]) qubits = qubits_q.get() q2.put([ADD_MERGED_QUBITS_TO_DICT, q_id2, qubits]) self.shared_dict.change_thread_and_queue_of_ids_nonblocking( qubits, q_id2) self.shared_dict.release_shared_dict() def cnot_gate(self, applied_to_id, controlled_by_id): """ Applies a controlled X gate, where the gate is applied to q_id1 and controlled by q_id2. Args: applied_to_id (String): Id of the Qubit on which the X gate is applied. controlled_by_id (String): Id of the Qubit which controls the gate. """ x = np.array([[0, 1], [1, 0]], dtype=np.csingle) self.merge_qubits(applied_to_id, controlled_by_id) q = self.shared_dict.get_queues_for_ids([applied_to_id])[0] q.put([CONTROLLED_GATE, x, applied_to_id, controlled_by_id]) def cphase_gate(self, applied_to_id, controlled_by_id): """ Applies a controlled Z gate, where the gate is applied to q_id1 and controlled by q_id2. Args: applied_to_id (String): Id of the Qubit on which the X gate is applied. controlled_by_id (String): Id of the Qubit which controls the gate. """ x = np.array([[0, 1], [0, -1]], dtype=np.csingle) self.merge_qubits(applied_to_id, controlled_by_id) q = self.shared_dict.get_queues_for_ids([applied_to_id])[0] q.put([CONTROLLED_GATE, x, applied_to_id, controlled_by_id]) def give_statevector_for(self, q_id): """ Gives the statevector and Qubits of a Qubit and all other Qubits with which the qubit is entangled. Args: q_id(String): Qubit id of the Qubit to get the statevector from. Returns: Tuple. Tuple of a lists and vector, where the first list are the qubits of the statevector and the second list is the statevector. """ ret = self.manager.Queue() q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([GIVE_STATEVECTOR, q_id, ret]) qubits, vector = ret.get() return qubits, vector def custom_two_qubit_gate(self, q_id1, q_id2, gate): """ Applies a two Qubit gate to two Qubits. Args: q_id1(String): ID of the first Qubit of the gate. q_id2(String): ID of the second Qubit of the gate. gate(np.ndarray): 4x4 unitary matrix gate. """ self.merge_qubits(q_id1, q_id2) q = self.shared_dict.get_queues_for_ids([q_id1])[0] q.put([DOUBLE_GATE, gate, q_id1, q_id2]) def custom_two_qubit_control_gate(self, q_id1, q_id2, q_id3, gate): """ Applies a two Qubit gate to two Qubits. Args: q_id1 (String): ID of the first Qubit of the gate. q_id2 (String): ID of the second Qubit of the gate. q_id3 (String): ID of the third Qubit of the gate. gate(np.ndarray): 4x4 unitary matrix gate. """ self.merge_qubits(q_id1, q_id2) self.merge_qubits(q_id1, q_id3) q = self.shared_dict.get_queues_for_ids([q_id1])[0] q.put([CONTROLLED_TWO_GATE, gate, q_id1, q_id2, q_id3]) def custom_controlled_gate(self, applied_to_id, controlled_by_id, gate): """ Applies a custom controlled gate to a Qubit. Args: applied_to_id(String): ID of the qubit to apply the gate to. controlled_by_id(String): ID of the qubit which controls the gate. gate(np.ndarray): Unitary 2x2 matrix which should be applied. """ self.merge_qubits(applied_to_id, controlled_by_id) q = self.shared_dict.get_queues_for_ids([applied_to_id])[0] q.put([CONTROLLED_GATE, gate, applied_to_id, controlled_by_id]) def measure(self, q_id, non_destructive=False): """ Measures a qubit with an id. If non_destructive is False, the qubit is removed from the system, otherwise, the qubit stays in the system after measurement, but its wavefunction collapses. Args: id (String): Id of the Qubit which should be measured. non_destructive(bool): If a qubit should not be removed from the system after measurement. """ ret = self.manager.Queue() q = self.shared_dict.get_queues_for_ids([q_id])[0] if non_destructive: q.put([MEASURE_NON_DESTRUCTIVE, q_id, ret]) else: q.put([MEASURE, q_id, ret]) res = ret.get() if not non_destructive: self.shared_dict.delete_id_and_check_to_join_thread(q_id) logging.debug( "Qubit with id %s has been measured with outcome %d.", q_id, res) return res
eqsn/gates.py
import multiprocessing import logging import numpy as np from eqsn.qubit_thread import SINGLE_GATE, MERGE_SEND, MERGE_ACCEPT, MEASURE, \ MEASURE_NON_DESTRUCTIVE, GIVE_STATEVECTOR, DOUBLE_GATE, \ CONTROLLED_GATE, NEW_QUBIT, ADD_MERGED_QUBITS_TO_DICT, CONTROLLED_TWO_GATE from eqsn.shared_dict import SharedDict from eqsn.worker_process import WorkerProcess from eqsn.process_picker import ProcessPicker class EQSN(object): """ Main object of EQSN, with this object, all of the Qubits can be controlled. All functions are threadsafe, but at the moment, only one instance should be used. """ __instance = None @staticmethod def get_instance(): if EQSN.__instance is None: return EQSN() return EQSN.__instance def __init__(self): if EQSN.__instance is not None: raise ValueError("Use get instance to get this class") EQSN.__instance = self self.manager = multiprocessing.Manager() cpu_count = multiprocessing.cpu_count() self.process_queue_list = [] for _ in range(cpu_count): q = multiprocessing.Queue() new_worker = WorkerProcess(q) p = multiprocessing.Process(target=new_worker.run, args=()) p.start() self.process_queue_list.append((p, q)) self.process_picker = ProcessPicker.get_instance( cpu_count, self.process_queue_list) # create the shared dict after all the processes have been created. self.shared_dict = SharedDict.get_instance() def new_qubit(self, q_id): """ Creates a new qubit with an id. Args: q_id (String): Id of the new qubit. """ p, q = self.process_picker.get_next_process_queue() q.put([NEW_QUBIT, q_id]) self.shared_dict.set_thread_with_id(q_id, p, q) logging.debug("Created new qubit with id %s.", q_id) def stop_all(self): """ Stops the simulator from running. """ for p, q in self.process_queue_list: q.put(None) p.join() self.shared_dict.stop_shared_dict() self.process_picker.stop_process_picker() EQSN.__instance = None def X_gate(self, q_id): """ Applies the Pauli X gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = np.array([[0, 1], [1, 0]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def Y_gate(self, q_id): """ Applies the Pauli Y gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = np.array([[0, 0 - 1j], [0 + 1j, 0]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def Z_gate(self, q_id): """ Applies the Pauli Z gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = np.array([[1, 0], [0, -1]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def H_gate(self, q_id): """ Applies the Hadamard gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = (1 / 2.0) ** 0.5 * np.array([[1, 1], [1, -1]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def T_gate(self, q_id): """ Applies the T gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = np.array( [[1, 0], [0, (0.7071067811865476 + 0.7071067811865475j)]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def S_gate(self, q_id): """ Applies the S gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = np.array([[1, 0], [0, 1j]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def K_gate(self, q_id): """ Applies the K gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. """ x = 0.5 * np.array([[1 + 1j, 1 - 1j], [-1 + 1j, -1 - 1j]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def RX_gate(self, q_id, rad): """ Applies a rotational X gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. rad(int): Rotational degrees in rad. """ mid = np.cos(rad / 2) other = -1j * np.sin(rad / 2) x = np.array([[mid, other], [other, mid]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def RY_gate(self, q_id, rad): """ Applies a rotational Y gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. rad(int): Rotational degrees in rad. """ mid = np.cos(rad / 2) other = np.sin(rad / 2) x = np.array([[mid, -1.0 * other], [other, mid]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def RZ_gate(self, q_id, rad): """ Applies a rotational Z gate to the Qubit with q_id. Args: q_id(String): ID of the Qubit to apply the gate to. rad(int): Rotational degrees in rad. """ top = np.exp(-1j * (rad / 2)) bot = np.exp(1j * (rad / 2)) x = np.array([[top, 0], [0, bot]], dtype=np.csingle) q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, x, q_id]) def custom_gate(self, q_id, gate): """ Applies a custom gate to the qubit with q_id. Args: q_id(String): Id of the Qubit to apply the gate on. gate(np.ndarray): unitary 2x2 matrix, of the gate. """ q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([SINGLE_GATE, gate, q_id]) def merge_qubits(self, q_id1, q_id2): """ Merges two qubits to one process, if they are not already running in the same process. Args: q_id1 (String): Id of the Qubit merged into q_id2. q_id2 (String): Id of the Qubit merged with q_id1. """ queues = self.shared_dict.get_queues_for_ids([q_id1, q_id2]) if len(queues) == 1: return # Already merged else: # Block the dictionary, that noone can send commands to the qubits, logging.debug("Merge Qubits %s and %s.", q_id1, q_id2) self.shared_dict.block_shared_dict() q1 = queues[0] q2 = queues[1] merge_q = self.manager.Queue() qubits_q = self.manager.Queue() q1.put([MERGE_SEND, q_id1, merge_q, qubits_q]) q2.put([MERGE_ACCEPT, q_id2, merge_q]) qubits = qubits_q.get() q2.put([ADD_MERGED_QUBITS_TO_DICT, q_id2, qubits]) self.shared_dict.change_thread_and_queue_of_ids_nonblocking( qubits, q_id2) self.shared_dict.release_shared_dict() def cnot_gate(self, applied_to_id, controlled_by_id): """ Applies a controlled X gate, where the gate is applied to q_id1 and controlled by q_id2. Args: applied_to_id (String): Id of the Qubit on which the X gate is applied. controlled_by_id (String): Id of the Qubit which controls the gate. """ x = np.array([[0, 1], [1, 0]], dtype=np.csingle) self.merge_qubits(applied_to_id, controlled_by_id) q = self.shared_dict.get_queues_for_ids([applied_to_id])[0] q.put([CONTROLLED_GATE, x, applied_to_id, controlled_by_id]) def cphase_gate(self, applied_to_id, controlled_by_id): """ Applies a controlled Z gate, where the gate is applied to q_id1 and controlled by q_id2. Args: applied_to_id (String): Id of the Qubit on which the X gate is applied. controlled_by_id (String): Id of the Qubit which controls the gate. """ x = np.array([[0, 1], [0, -1]], dtype=np.csingle) self.merge_qubits(applied_to_id, controlled_by_id) q = self.shared_dict.get_queues_for_ids([applied_to_id])[0] q.put([CONTROLLED_GATE, x, applied_to_id, controlled_by_id]) def give_statevector_for(self, q_id): """ Gives the statevector and Qubits of a Qubit and all other Qubits with which the qubit is entangled. Args: q_id(String): Qubit id of the Qubit to get the statevector from. Returns: Tuple. Tuple of a lists and vector, where the first list are the qubits of the statevector and the second list is the statevector. """ ret = self.manager.Queue() q = self.shared_dict.get_queues_for_ids([q_id])[0] q.put([GIVE_STATEVECTOR, q_id, ret]) qubits, vector = ret.get() return qubits, vector def custom_two_qubit_gate(self, q_id1, q_id2, gate): """ Applies a two Qubit gate to two Qubits. Args: q_id1(String): ID of the first Qubit of the gate. q_id2(String): ID of the second Qubit of the gate. gate(np.ndarray): 4x4 unitary matrix gate. """ self.merge_qubits(q_id1, q_id2) q = self.shared_dict.get_queues_for_ids([q_id1])[0] q.put([DOUBLE_GATE, gate, q_id1, q_id2]) def custom_two_qubit_control_gate(self, q_id1, q_id2, q_id3, gate): """ Applies a two Qubit gate to two Qubits. Args: q_id1 (String): ID of the first Qubit of the gate. q_id2 (String): ID of the second Qubit of the gate. q_id3 (String): ID of the third Qubit of the gate. gate(np.ndarray): 4x4 unitary matrix gate. """ self.merge_qubits(q_id1, q_id2) self.merge_qubits(q_id1, q_id3) q = self.shared_dict.get_queues_for_ids([q_id1])[0] q.put([CONTROLLED_TWO_GATE, gate, q_id1, q_id2, q_id3]) def custom_controlled_gate(self, applied_to_id, controlled_by_id, gate): """ Applies a custom controlled gate to a Qubit. Args: applied_to_id(String): ID of the qubit to apply the gate to. controlled_by_id(String): ID of the qubit which controls the gate. gate(np.ndarray): Unitary 2x2 matrix which should be applied. """ self.merge_qubits(applied_to_id, controlled_by_id) q = self.shared_dict.get_queues_for_ids([applied_to_id])[0] q.put([CONTROLLED_GATE, gate, applied_to_id, controlled_by_id]) def measure(self, q_id, non_destructive=False): """ Measures a qubit with an id. If non_destructive is False, the qubit is removed from the system, otherwise, the qubit stays in the system after measurement, but its wavefunction collapses. Args: id (String): Id of the Qubit which should be measured. non_destructive(bool): If a qubit should not be removed from the system after measurement. """ ret = self.manager.Queue() q = self.shared_dict.get_queues_for_ids([q_id])[0] if non_destructive: q.put([MEASURE_NON_DESTRUCTIVE, q_id, ret]) else: q.put([MEASURE, q_id, ret]) res = ret.get() if not non_destructive: self.shared_dict.delete_id_and_check_to_join_thread(q_id) logging.debug( "Qubit with id %s has been measured with outcome %d.", q_id, res) return res
0.778565
0.232495
from google.protobuf.message import Message from cryptography.hazmat.primitives.asymmetric import ec import cryptography.hazmat.backends from Crypto.Hash import CMAC from Crypto.Cipher import AES import os import binascii def wrapper_contains_type(wrapper: Message, message_type): if wrapper is None: return False field_name = "message_" + message_type.DESCRIPTOR.full_name.replace(".", "_") return wrapper.HasField(field_name) def wrapper_get_contents(wrapper: Message, message_type=None): if message_type is not None: field_name = "message_" + message_type.DESCRIPTOR.full_name.replace(".", "_") else: field_name = wrapper.WhichOneof("message") return getattr(wrapper, field_name) def crypto_generate_keypair(): private: ec.EllipticCurvePrivateKeyWithSerialization = ec.generate_private_key(ec.SECP256R1(), cryptography.hazmat.backends.default_backend()) public: ec.EllipticCurvePublicKey = private.public_key() ser_private = _crypto_private_to_bytes(private) ser_public = _crypto_public_to_bytes(public) return ser_private, ser_public def crypto_get_nonce(): return os.urandom(16) def crypto_aes_cmac(k: bytes, m: bytes): cobj = CMAC.new(k, ciphermod=AES) cobj.update(m) return cobj.digest() def crypto_ble_f4(u, v, x, z): # f4(U, V, X, Z) = AES-CMAC_X (U || V || Z) m = u + v + z k = x return crypto_aes_cmac(k, m) def crypto_ble_f5(w, n1, n2, a1, a2): salt = binascii.unhexlify("6C88 8391 AAF5 A538 6037 0BDB 5A60 83BE".replace(" ", "")) keyid = binascii.unhexlify("62 74 6c 65".replace(" ", "")) t = crypto_aes_cmac(salt, w) def get_f5_counter(counter: int): m = counter.to_bytes(length=1, byteorder='big') + keyid + n1 + n2 + a1 + a2 length = 256 # Why? m = m + length.to_bytes(length=2, byteorder='big') return crypto_aes_cmac(t, m) mackey = get_f5_counter(0) ltk = get_f5_counter(1) return mackey, ltk def crypto_ble_f6(w, *args): return crypto_aes_cmac(w, b''.join(args)) def _crypto_private_from_bytes(data: bytes) -> ec.EllipticCurvePrivateKey: return ec.derive_private_key( private_value=int.from_bytes(bytes=data, byteorder='big'), curve=ec.SECP256R1(), backend=cryptography.hazmat.backends.default_backend() ) def _crypto_public_from_bytes(data: bytes) -> ec.EllipticCurvePublicKey: return ec.EllipticCurvePublicNumbers.from_encoded_point( curve=ec.SECP256R1(), data=data ).public_key(backend=cryptography.hazmat.backends.default_backend()) def _crypto_private_to_bytes(private: ec.EllipticCurvePrivateKeyWithSerialization) -> bytes: numbers: ec.EllipticCurvePrivateNumbers = private.private_numbers() v: int = numbers.private_value return v.to_bytes(length=32, byteorder='big') def _crypto_public_to_bytes(public: ec.EllipticCurvePublicKey) -> bytes: numbers: ec.EllipticCurvePublicNumbers = public.public_numbers() return numbers.encode_point() def crypto_derive_dhkey(private_bytes: bytes, public_bytes: bytes): private = _crypto_private_from_bytes(private_bytes) public = _crypto_public_from_bytes(public_bytes) shared_key = private.exchange(ec.ECDH(), public) return shared_key if __name__ == "__main__": private_a_raw = binascii.unhexlify( "3f49f6d4 a3c55f38 74c9b3e3 d2103f50 4aff607b eb40b799 5899b8a6 cd3c1abd".replace(" ", "")) private_b_raw = binascii.unhexlify( "55188b3d 32f6bb9a 900afcfb eed4e72a 59cb9ac2 f19d7cfb 6b4fdd49 f47fc5fd".replace(" ", "")) private_b = _crypto_private_from_bytes(private_b_raw) public_b_raw = _crypto_public_to_bytes(private_b.public_key()) print(crypto_derive_dhkey(private_a_raw, public_b_raw))
common/protobuf_test_python/tsp_utils.py
from google.protobuf.message import Message from cryptography.hazmat.primitives.asymmetric import ec import cryptography.hazmat.backends from Crypto.Hash import CMAC from Crypto.Cipher import AES import os import binascii def wrapper_contains_type(wrapper: Message, message_type): if wrapper is None: return False field_name = "message_" + message_type.DESCRIPTOR.full_name.replace(".", "_") return wrapper.HasField(field_name) def wrapper_get_contents(wrapper: Message, message_type=None): if message_type is not None: field_name = "message_" + message_type.DESCRIPTOR.full_name.replace(".", "_") else: field_name = wrapper.WhichOneof("message") return getattr(wrapper, field_name) def crypto_generate_keypair(): private: ec.EllipticCurvePrivateKeyWithSerialization = ec.generate_private_key(ec.SECP256R1(), cryptography.hazmat.backends.default_backend()) public: ec.EllipticCurvePublicKey = private.public_key() ser_private = _crypto_private_to_bytes(private) ser_public = _crypto_public_to_bytes(public) return ser_private, ser_public def crypto_get_nonce(): return os.urandom(16) def crypto_aes_cmac(k: bytes, m: bytes): cobj = CMAC.new(k, ciphermod=AES) cobj.update(m) return cobj.digest() def crypto_ble_f4(u, v, x, z): # f4(U, V, X, Z) = AES-CMAC_X (U || V || Z) m = u + v + z k = x return crypto_aes_cmac(k, m) def crypto_ble_f5(w, n1, n2, a1, a2): salt = binascii.unhexlify("6C88 8391 AAF5 A538 6037 0BDB 5A60 83BE".replace(" ", "")) keyid = binascii.unhexlify("62 74 6c 65".replace(" ", "")) t = crypto_aes_cmac(salt, w) def get_f5_counter(counter: int): m = counter.to_bytes(length=1, byteorder='big') + keyid + n1 + n2 + a1 + a2 length = 256 # Why? m = m + length.to_bytes(length=2, byteorder='big') return crypto_aes_cmac(t, m) mackey = get_f5_counter(0) ltk = get_f5_counter(1) return mackey, ltk def crypto_ble_f6(w, *args): return crypto_aes_cmac(w, b''.join(args)) def _crypto_private_from_bytes(data: bytes) -> ec.EllipticCurvePrivateKey: return ec.derive_private_key( private_value=int.from_bytes(bytes=data, byteorder='big'), curve=ec.SECP256R1(), backend=cryptography.hazmat.backends.default_backend() ) def _crypto_public_from_bytes(data: bytes) -> ec.EllipticCurvePublicKey: return ec.EllipticCurvePublicNumbers.from_encoded_point( curve=ec.SECP256R1(), data=data ).public_key(backend=cryptography.hazmat.backends.default_backend()) def _crypto_private_to_bytes(private: ec.EllipticCurvePrivateKeyWithSerialization) -> bytes: numbers: ec.EllipticCurvePrivateNumbers = private.private_numbers() v: int = numbers.private_value return v.to_bytes(length=32, byteorder='big') def _crypto_public_to_bytes(public: ec.EllipticCurvePublicKey) -> bytes: numbers: ec.EllipticCurvePublicNumbers = public.public_numbers() return numbers.encode_point() def crypto_derive_dhkey(private_bytes: bytes, public_bytes: bytes): private = _crypto_private_from_bytes(private_bytes) public = _crypto_public_from_bytes(public_bytes) shared_key = private.exchange(ec.ECDH(), public) return shared_key if __name__ == "__main__": private_a_raw = binascii.unhexlify( "3f49f6d4 a3c55f38 74c9b3e3 d2103f50 4aff607b eb40b799 5899b8a6 cd3c1abd".replace(" ", "")) private_b_raw = binascii.unhexlify( "55188b3d 32f6bb9a 900afcfb eed4e72a 59cb9ac2 f19d7cfb 6b4fdd49 f47fc5fd".replace(" ", "")) private_b = _crypto_private_from_bytes(private_b_raw) public_b_raw = _crypto_public_to_bytes(private_b.public_key()) print(crypto_derive_dhkey(private_a_raw, public_b_raw))
0.54698
0.124639
import threading import contextlib import uuid from functools import partial from tornado.stack_context import run_with_stack_context, StackContext from tornado.concurrent import wrap as tornado_wrap from barbante.utils.logging import get_logger log = get_logger(__name__) class RequestContext: """ An object responsible for keeping data that must be globally accessible relative to a certain request. """ def __init__(self, tracer_id: str='UNAVAILABLE', endpoint: str='UNAVAILABLE', environment: str='UNAVAILABLE'): self.tracer_id = tracer_id self.endpoint = endpoint self.environment = environment class GlobalContextManager(threading.local): """ Keeps a stack context for each thread in Barbante. This is a singleton and shouldn't be instanced elsewhere but inside this module. """ def __init__(self): super().__init__() self.stack = [] def reset(self): self.stack.clear() def get_context(self) -> RequestContext: if len(self.stack) > 0: return self.stack[-1] else: return RequestContext() @contextlib.contextmanager def push_context(self, context: RequestContext): """ Stacks a new context for this thread. """ self.stack.append(context) yield self.stack.pop() @staticmethod def prepare_context(tracer_id: str, endpoint: str, environment: str): """ :param tracer_id: request tracer id, if there is one :param endpoint: name of the endpoint that was requested :return: a new RequestContext object """ tracer_id = GlobalContextManager.parse_guid(tracer_id) return RequestContext(tracer_id, endpoint, environment) @staticmethod def run_with_new_context(func, tracer_id: str=None, endpoint: str=None, environment: str=None): """ Stacks a new context in the thread's current stack and then run the method ``work``. Does the same as ``new_session`` but in an uglier fashion. Use ``new_session`` if possible. :param func: a function (usually a partial function) with the work to be done. :param tracer_id: an optional tracer id used when logging :param endpoint: name of the endpoint that was requested """ context = GlobalContextManager.prepare_context(tracer_id, endpoint, environment) stack_context = StackContext(partial(global_context_manager.push_context, context)) return run_with_stack_context(stack_context, func) @staticmethod def generate_guid(): return uuid.uuid4() @staticmethod def parse_guid(guid: str, create_if_invalid: bool=True) -> str: """ Tries to parse a given string containing a GUID. If the GUID is not valid or the string is None, generates a new GUID and returns it. :param guid: string with some GUID to be parsed :param create_if_invalid: if True and `guid_str` is None or invalid, the method generates and returns a new UUID :return: the parsed GUID, or a new GUID if the one given is invalid. None if `guid_str` is invalid and `create_if_invalid` is False. """ if guid is None: # No tracer id was passed. Generate one now. tid = GlobalContextManager.generate_guid() if create_if_invalid else None else: try: tid = uuid.UUID(guid) except ValueError: # An invalid UUID was passed. Ignore it and generate a new one. log.warn('An invalid UUID was given: "{}"'.format(guid)) tid = GlobalContextManager.generate_guid() if create_if_invalid else None return str(tid).replace('-', '') # our convention uses UUID without hyphens global_context_manager = GlobalContextManager() """ The global context stack. Every thread will have a unique stack. Each entry in the stack is an object containing data relevant to a certain request being handled. Tornado is responsible for maintaining the stack and automatically switches context every time a new asynchronous operation starts executing or is preempted in behalf of another operation. """ @contextlib.contextmanager def new_context(tracer_id: str=None, endpoint: str=None, environment: str=None): """ Opens a new context. :param tracer_id: string with tracer log information. If not given, one will be generated. :param endpoint: name of the endpoint that was requested :param environment: the customer identifier Usage: with new_context(my_tracer_id): do_some_task_using_the_new_context() """ context = GlobalContextManager.prepare_context(tracer_id, endpoint, environment) stack_context = StackContext(partial(global_context_manager.push_context, context)) with stack_context: yield def get_context() -> RequestContext: """ See GlobalContextManager.get_context() """ return global_context_manager.get_context() def wrap(func): """ See tornado.concurrent.wrap """ return tornado_wrap(func)
barbante/context/context_manager.py
import threading import contextlib import uuid from functools import partial from tornado.stack_context import run_with_stack_context, StackContext from tornado.concurrent import wrap as tornado_wrap from barbante.utils.logging import get_logger log = get_logger(__name__) class RequestContext: """ An object responsible for keeping data that must be globally accessible relative to a certain request. """ def __init__(self, tracer_id: str='UNAVAILABLE', endpoint: str='UNAVAILABLE', environment: str='UNAVAILABLE'): self.tracer_id = tracer_id self.endpoint = endpoint self.environment = environment class GlobalContextManager(threading.local): """ Keeps a stack context for each thread in Barbante. This is a singleton and shouldn't be instanced elsewhere but inside this module. """ def __init__(self): super().__init__() self.stack = [] def reset(self): self.stack.clear() def get_context(self) -> RequestContext: if len(self.stack) > 0: return self.stack[-1] else: return RequestContext() @contextlib.contextmanager def push_context(self, context: RequestContext): """ Stacks a new context for this thread. """ self.stack.append(context) yield self.stack.pop() @staticmethod def prepare_context(tracer_id: str, endpoint: str, environment: str): """ :param tracer_id: request tracer id, if there is one :param endpoint: name of the endpoint that was requested :return: a new RequestContext object """ tracer_id = GlobalContextManager.parse_guid(tracer_id) return RequestContext(tracer_id, endpoint, environment) @staticmethod def run_with_new_context(func, tracer_id: str=None, endpoint: str=None, environment: str=None): """ Stacks a new context in the thread's current stack and then run the method ``work``. Does the same as ``new_session`` but in an uglier fashion. Use ``new_session`` if possible. :param func: a function (usually a partial function) with the work to be done. :param tracer_id: an optional tracer id used when logging :param endpoint: name of the endpoint that was requested """ context = GlobalContextManager.prepare_context(tracer_id, endpoint, environment) stack_context = StackContext(partial(global_context_manager.push_context, context)) return run_with_stack_context(stack_context, func) @staticmethod def generate_guid(): return uuid.uuid4() @staticmethod def parse_guid(guid: str, create_if_invalid: bool=True) -> str: """ Tries to parse a given string containing a GUID. If the GUID is not valid or the string is None, generates a new GUID and returns it. :param guid: string with some GUID to be parsed :param create_if_invalid: if True and `guid_str` is None or invalid, the method generates and returns a new UUID :return: the parsed GUID, or a new GUID if the one given is invalid. None if `guid_str` is invalid and `create_if_invalid` is False. """ if guid is None: # No tracer id was passed. Generate one now. tid = GlobalContextManager.generate_guid() if create_if_invalid else None else: try: tid = uuid.UUID(guid) except ValueError: # An invalid UUID was passed. Ignore it and generate a new one. log.warn('An invalid UUID was given: "{}"'.format(guid)) tid = GlobalContextManager.generate_guid() if create_if_invalid else None return str(tid).replace('-', '') # our convention uses UUID without hyphens global_context_manager = GlobalContextManager() """ The global context stack. Every thread will have a unique stack. Each entry in the stack is an object containing data relevant to a certain request being handled. Tornado is responsible for maintaining the stack and automatically switches context every time a new asynchronous operation starts executing or is preempted in behalf of another operation. """ @contextlib.contextmanager def new_context(tracer_id: str=None, endpoint: str=None, environment: str=None): """ Opens a new context. :param tracer_id: string with tracer log information. If not given, one will be generated. :param endpoint: name of the endpoint that was requested :param environment: the customer identifier Usage: with new_context(my_tracer_id): do_some_task_using_the_new_context() """ context = GlobalContextManager.prepare_context(tracer_id, endpoint, environment) stack_context = StackContext(partial(global_context_manager.push_context, context)) with stack_context: yield def get_context() -> RequestContext: """ See GlobalContextManager.get_context() """ return global_context_manager.get_context() def wrap(func): """ See tornado.concurrent.wrap """ return tornado_wrap(func)
0.692018
0.189577
import simpy, scipy, numpy, random from src.core.linear_congruential_generator import LinearCongruentialGenerator RANDOM_SEED = 42 NUM_COMPONENTES_INDEPENDENTES = 2 QUANTIDADE_TESTES = 5 TEMPO_SIMULACAO = 7 * 24 def tef_uniform(): """return a random value from uniform distribuition""" return scipy.random.uniform(0.0, 8.0) # hours def tef_expo(): """return a random value from exponential distribuition""" return scipy.random.standard_exponential(10) # hours def tempo_do_equipamento_funcionando(): """Return actual processing time for a concrete part.""" return random.normalvariate(10.0, 2.0) def tef(componente_id): """Return time until next failure for a machine.""" if componente_id == 1: return tef_uniform() # TEF COMPONENTE A elif componente_id == 2: return random.expovariate(10) # TEF COMPONENTE B def calcular_z(r1, r2): return numpy.sqrt(-2 * numpy.log(r1)) * numpy.sin(2 * 180 * r2) + numpy.random.randint(8, 10) class EquipamentoDoisComponentesIndependentes(object): def __init__(self, env, name, equipamento_funcionando): self.env = env self.name = name self.tempo_entre_falhas_total = 0 self.broken = False self.componente_A = 1 self.componente_B = 2 # Start "working" and "break_machine" processes for this machine. self.process = env.process(self.working(equipamento_funcionando)) env.process(self.break_machine(self.componente_A)) env.process(self.break_machine(self.componente_B)) def working(self, reparador_de_componente): lcg = LinearCongruentialGenerator() while True: r1 = lcg.generate_random_numbers(1).pop() r2 = lcg.generate_random_numbers(1, initial_seed=r1).pop() + 10 # Gero um x aleatorio usando o Linear Congruential Generator done_in = abs(calcular_z(r1, r2)) # Espero um componente voltar do tempo falhando (simulacao desse evento, periodo falhando, evento) while done_in: try: start = self.env.now yield self.env.timeout(done_in) done_in = 0 except simpy.Interrupt: self.broken = True done_in -= self.env.now - start # Salva o tempo em que o componente esteve falhando self.tempo_entre_falhas_total += done_in # Chama um reparador de componente para faze voltar a funcionar with reparador_de_componente.request(priority=1) as req: yield req yield self.env.timeout(tempo_do_equipamento_funcionando()) self.broken = False def break_machine(self, componente_id): # Funcao de quebra de componente # a funcao tef sendo chamada abaixo leva em consideracao o tipo de componente # se for o componente que segue uma distribuicao uniforme em horas vai ser usado tef_uniform para esse componente # do contrario vai ser usado tef_expo para o outro componente do equipamento while True: yield self.env.timeout(tef(componente_id)) if not self.broken: # Only break the machine if it is currently working. self.process.interrupt() # Analyis/results print('Equipamento - 2 Componentes Independentes\n') print('Resultados depois de %s testes, cada teste de 1 semana (em horas).\n' % QUANTIDADE_TESTES) media_tempo_falhas = 0 for teste_semanal in range(QUANTIDADE_TESTES): # Setup and start the simulation random.seed(RANDOM_SEED) # This helps reproducing the results # Create an environment and start the setup process env = simpy.Environment() equipamento_funcionando = simpy.PreemptiveResource(env, capacity=1) equipamento = EquipamentoDoisComponentesIndependentes(env, 'Equipamento %d', equipamento_funcionando) # Execute! env.run(until=TEMPO_SIMULACAO) print('%s no teste nro %d executou 7 [dias] * 24 [horas] {= %d horas}, falhou %d [horas].\n' % (equipamento.name, teste_semanal, TEMPO_SIMULACAO, equipamento.tempo_entre_falhas_total)) media_tempo_falhas += equipamento.tempo_entre_falhas_total print('Em media o equipamento falha por %d / semana' % (media_tempo_falhas / QUANTIDADE_TESTES)) #Equipamento - 2 Componentes Independentes #Resultados depois de 5 testes, cada teste de 1 semana (em horas). #Equipamento %d no teste nro 0 executou 7 [dias] * 24 [horas] {= 168 horas}, falhou 68 [horas]. #Equipamento %d no teste nro 1 executou 7 [dias] * 24 [horas] {= 168 horas}, falhou 85 [horas]. #Equipamento %d no teste nro 2 executou 7 [dias] * 24 [horas] {= 168 horas}, falhou 64 [horas]. #Equipamento %d no teste nro 3 executou 7 [dias] * 24 [horas] {= 168 horas}, falhou 84 [horas]. #Equipamento %d no teste nro 4 executou 7 [dias] * 24 [horas] {= 168 horas}, falhou 84 [horas]. #Em media o equipamento falha por 77 / semana #Process finished with exit code 0
src/core/problema4.py
import simpy, scipy, numpy, random from src.core.linear_congruential_generator import LinearCongruentialGenerator RANDOM_SEED = 42 NUM_COMPONENTES_INDEPENDENTES = 2 QUANTIDADE_TESTES = 5 TEMPO_SIMULACAO = 7 * 24 def tef_uniform(): """return a random value from uniform distribuition""" return scipy.random.uniform(0.0, 8.0) # hours def tef_expo(): """return a random value from exponential distribuition""" return scipy.random.standard_exponential(10) # hours def tempo_do_equipamento_funcionando(): """Return actual processing time for a concrete part.""" return random.normalvariate(10.0, 2.0) def tef(componente_id): """Return time until next failure for a machine.""" if componente_id == 1: return tef_uniform() # TEF COMPONENTE A elif componente_id == 2: return random.expovariate(10) # TEF COMPONENTE B def calcular_z(r1, r2): return numpy.sqrt(-2 * numpy.log(r1)) * numpy.sin(2 * 180 * r2) + numpy.random.randint(8, 10) class EquipamentoDoisComponentesIndependentes(object): def __init__(self, env, name, equipamento_funcionando): self.env = env self.name = name self.tempo_entre_falhas_total = 0 self.broken = False self.componente_A = 1 self.componente_B = 2 # Start "working" and "break_machine" processes for this machine. self.process = env.process(self.working(equipamento_funcionando)) env.process(self.break_machine(self.componente_A)) env.process(self.break_machine(self.componente_B)) def working(self, reparador_de_componente): lcg = LinearCongruentialGenerator() while True: r1 = lcg.generate_random_numbers(1).pop() r2 = lcg.generate_random_numbers(1, initial_seed=r1).pop() + 10 # Gero um x aleatorio usando o Linear Congruential Generator done_in = abs(calcular_z(r1, r2)) # Espero um componente voltar do tempo falhando (simulacao desse evento, periodo falhando, evento) while done_in: try: start = self.env.now yield self.env.timeout(done_in) done_in = 0 except simpy.Interrupt: self.broken = True done_in -= self.env.now - start # Salva o tempo em que o componente esteve falhando self.tempo_entre_falhas_total += done_in # Chama um reparador de componente para faze voltar a funcionar with reparador_de_componente.request(priority=1) as req: yield req yield self.env.timeout(tempo_do_equipamento_funcionando()) self.broken = False def break_machine(self, componente_id): # Funcao de quebra de componente # a funcao tef sendo chamada abaixo leva em consideracao o tipo de componente # se for o componente que segue uma distribuicao uniforme em horas vai ser usado tef_uniform para esse componente # do contrario vai ser usado tef_expo para o outro componente do equipamento while True: yield self.env.timeout(tef(componente_id)) if not self.broken: # Only break the machine if it is currently working. self.process.interrupt() # Analyis/results print('Equipamento - 2 Componentes Independentes\n') print('Resultados depois de %s testes, cada teste de 1 semana (em horas).\n' % QUANTIDADE_TESTES) media_tempo_falhas = 0 for teste_semanal in range(QUANTIDADE_TESTES): # Setup and start the simulation random.seed(RANDOM_SEED) # This helps reproducing the results # Create an environment and start the setup process env = simpy.Environment() equipamento_funcionando = simpy.PreemptiveResource(env, capacity=1) equipamento = EquipamentoDoisComponentesIndependentes(env, 'Equipamento %d', equipamento_funcionando) # Execute! env.run(until=TEMPO_SIMULACAO) print('%s no teste nro %d executou 7 [dias] * 24 [horas] {= %d horas}, falhou %d [horas].\n' % (equipamento.name, teste_semanal, TEMPO_SIMULACAO, equipamento.tempo_entre_falhas_total)) media_tempo_falhas += equipamento.tempo_entre_falhas_total print('Em media o equipamento falha por %d / semana' % (media_tempo_falhas / QUANTIDADE_TESTES)) #Equipamento - 2 Componentes Independentes #Resultados depois de 5 testes, cada teste de 1 semana (em horas). #Equipamento %d no teste nro 0 executou 7 [dias] * 24 [horas] {= 168 horas}, falhou 68 [horas]. #Equipamento %d no teste nro 1 executou 7 [dias] * 24 [horas] {= 168 horas}, falhou 85 [horas]. #Equipamento %d no teste nro 2 executou 7 [dias] * 24 [horas] {= 168 horas}, falhou 64 [horas]. #Equipamento %d no teste nro 3 executou 7 [dias] * 24 [horas] {= 168 horas}, falhou 84 [horas]. #Equipamento %d no teste nro 4 executou 7 [dias] * 24 [horas] {= 168 horas}, falhou 84 [horas]. #Em media o equipamento falha por 77 / semana #Process finished with exit code 0
0.696268
0.374162
import numpy as np import dash from dash.dependencies import Input, Output import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go import data_loader import data_analysis from data_analysis import Regressor def create_geodict(): map_dict = dict( type="scattergl", # locationmode="country names", # lon=[], # lat=[], x=[], y=[], text=[], mode="markers", marker=dict( size=0, opacity=0.4, color=[] ) ) # geodict = dict( # scope="europe", # projection=dict(type="natural earth"), # showland=True, # landcolor="rgb(250, 250, 250)", # subunitcolor="rgb(217, 217, 217)", # countrycolor="rgb(217, 217, 217)" # #bgcolor="#191A1A", # #countrywidth=1.0, # #subunitwidth=1.0, # #resolution=50 # ) layout = dict( autosize=True, height=750, hovermode="closest", # plot_bgcolor="#191A1A", # paper_bgcolor="#020202", # geo=geodict, xaxis=dict( range=[-5.56, 9.67]), yaxis=dict( range=[41.30, 51.13], scaleanchor='x', scaleratio=1.0) ) return map_dict, layout external_stylesheets = [ 'https://codepen.io/chriddyp/pen/bWLwgP.css' ] app = dash.Dash(external_stylesheets=external_stylesheets) df = data_loader.df df_referendum = data_loader.df_referendum df_latlon = data_loader.df_latlon df_pop_str_edu = data_loader.df_pop_str_edu df_popfiscal = data_loader.df_popfiscal dfcorr = data_loader.dfcorr regressors, rmse, rmse_scores, x_keys, y_keys, data = ( data_analysis.load_regresults()) big_regressors, big_rmse, big_rmse_scores, big_x_keys, big_y_keys, big_data = ( data_analysis.load_regresults( "results/regressors_bigcities_impfeatures.p")) def plot_overall_results(): mainland, overseasterr, abroad = data_loader.get_referendum_by_regions() referendum_regions = ["Mainland", "Overseas Territories", "Abroad"] name_fields = ["Libellé de la commune", "Libellé du département"] top_ca_ix = np.argsort(df["Choix A (%)"].values)[-10:] top_cb_ix = np.argsort(df["Choix B (%)"].values)[-10:] choix_a_frac = df["Choix A"].sum()/(df["Choix A"] + df["Choix B"]).sum() choix_b_frac = df["Choix B"].sum()/(df["Choix A"] + df["Choix B"]).sum() layout = html.Div( [ html.Div( [ html.H3("Overall result"), dcc.Markdown( "We plot the overall result by aggregating valid " "votes from all regions. Referendum results ignore " "abstentions and blanc votes."), html.P("Choix A: " + str(np.round(choix_a_frac*100, 3)) + " %"), html.P("Choix B: " + str(np.round(choix_b_frac*100, 3)) + " %"), html.P("Referendum result: " + ("Choix A" if choix_a_frac > choix_b_frac else "Choix B")), html.H5("Top 10 cities voting for Choix A by percent " "[Commune, Department %]"), html.P(', '.join([ '[' + ', '.join(df.iloc[ix_][name_fields].values) + ', ' + str(np.round(df.iloc[ix_]["Choix A (%)"], 1)) + ']' for ix_ in top_ca_ix])), html.H5("Top 10 cities voting for Choix B by percent " "[Commune, Department %]"), html.P(', '.join([ '[' + ', '.join(df.iloc[ix_][name_fields].values) + ', ' + str(np.round(df.iloc[ix_]["Choix B (%)"], 1)) + ']' for ix_ in top_cb_ix])), html.H5("Results according to Mainland France, French " "Overseas Territories and France Abroad"), dcc.Graph(figure=dict( data=[ dict( x=referendum_regions, y=[mainland[cat], overseasterr[cat], abroad[cat]], name=cat, type="bar" ) for cat in mainland.index], layout={"barmode": 'stack'} ) ) ], className="ten columns" ) ], className="row" ) return layout def plot_histograms(): layout = html.Div( [ html.H3("Histogram of Votes by Category") ] + [ html.Div( [ html.Div( [ dcc.Graph( id='hist_'+key_, figure={ 'data': [ { 'x': df[key_], 'name': key_, 'type': 'histogram', 'hoverinfo': 'x+y', 'title': key_ } ], 'layout': { 'title': key_} } ) ], className="five columns" ) for key_ in options ], className="row" ) for options in (['Choix A (%)', 'Choix B (%)'], ['Abstentions (%)', 'Blancs (%)']) ] ) return layout def plot_correlation(): map_data, layout = create_geodict() layout.pop("xaxis") layout.pop("yaxis") layout = html.Div( [ html.H3("Correlations between the variables"), html.P("We plot the correlation between the various input and " "output variables. All population fields are converted " "to fraction of the population (of the commune) by " "normalising by the total population for that commune."), html.Div( [ dcc.Graph( id='correlations', figure={ 'data': [ { 'x': dfcorr.columns.tolist(), 'y': dfcorr.index.tolist(), 'z': dfcorr.values, 'type': 'heatmap', # 'hoverinfo': 'x+y', 'title': "Correlations" } ], 'layout': { "height": 950, "margin": {'l': 600}} } ) ] ), html.P("We can plot the a scatter of the variables to visually " "examine any potential relationships."), html.Div( [ dcc.Dropdown( id='xcorr_key', options=[ {'label': key_, 'value': key_} for key_ in dfcorr.index], value=dfcorr.index[0] ), dcc.Dropdown( id='ycorr_key', options=[ {'label': key_, 'value': key_} for key_ in dfcorr.columns], value=dfcorr.columns[0] ), dcc.Graph( id="corr_plot", figure=go.Figure(data=[map_data], layout=go.Layout(**layout)) ) ], className="ten columns" ) ], className="row" ) return layout def map_layout(): plot_options = [key_ for key_ in df.keys()[4:] if key_ not in ("Latitude", "Longitude")] map_data, layout = create_geodict() return html.Div( [ html.H3("Visualising the data"), html.P("Data is plotted according to the geographic location of " "the communes. The region is restricted to mainland " "France. The data also contains results from regions " "outside mainland France, this data is not shown in the " "figure."), html.P("Colour values are scaled according to data from the " "entire data set. Some ranges may not correspond to any " "data shown on the map."), html.P("Colous is used to indicate the field of interest." "<br>Marker size is indicative of the 'Inscrits' field."), html.Div( [ dcc.Dropdown( id='map_key', options=[ {'label': key_, 'value': key_} for key_ in plot_options], value='Inscrits' ), dcc.Graph( id="map_view", figure=go.Figure(data=[map_data], layout=go.Layout(**layout)) ) ], className="ten columns" ) ], className="row" ) def feature_importance_graph(): layout = html.Div( [ html.H3("Predictors of Referendum Outcome"), html.P("The feature importances (or coefficients) indicate which " "features in the input influence the predicted values.<br>" "These can be plotted below for the various regressors."), html.Div( [ dcc.Dropdown( id="featimp_regressor", options=[ {"label": key_, "value": key_} for key_ in regressors], value="RandomForest" ), dcc.Dropdown( id="featimp_category", options=[ {"label": key_, "value": key_} for key_ in y_keys], value=y_keys[0] ), dcc.Graph( id="featimp_graph", figure=go.Figure(data=[], layout=go.Layout()) ) ], className="ten columns" ) ], className="row" ) return layout def performance_graph(): layout = html.Div( [ html.H3("Performance of the Regressors"), html.P("We show the predicted vs true values of the corresponding " "outcome variable for the training and testing data sets."), html.Div( [ dcc.Dropdown( id="perf_datasel", options=[ {"label": "All cities/All features", "value": "all"}, {"label": "Big cities (population >10e3) / Important features", "value": "big"}], value="all"), dcc.Dropdown( id="perf_regressor", options=[ {"label": key_, "value": key_} for key_ in regressors], value="RandomForest"), dcc.Dropdown( id="perf_category", options=[ {"label": key_, "value": key_} for key_ in y_keys], value=y_keys[0] ), dcc.Graph( id="perf_graph", figure=go.Figure(data=[], layout=go.Layout()) ) ], className="ten columns" ) ], className="row" ) return layout def serve_layout(): layout = html.Div( [ html.H1("Referendum Analysis"), html.P("We assume the referendum categories (A/B/Abst/Blanc) to " "be the output variables in our analysis. We aim to study " "the main drivers behind a particular category in terms of " "some input variables describing each commune " "(population age, gender, education, etc.)."), html.P("Referendum data contains results from regions outside " "mainland France. Supporting data corresponding to " "population, education, etc. does not list values for " "these regions. Consequently, we constrain our analysis " "of the data to mainland France"), plot_overall_results(), plot_histograms(), plot_correlation(), map_layout(), feature_importance_graph(), performance_graph() ] ) return layout app.layout = serve_layout @app.callback(Output("map_view", "figure"), [Input("map_key", "value")]) def draw_map(map_key): map_data, layout = create_geodict() x = df["Longitude"].values y = df["Latitude"].values hovertext = np.asarray([ name_[0] + ', ' + name_[1] + '<br>' + map_key + ' = ' + str(name_[2]) for name_ in zip(df["Libellé de la commune"].values, df["Libellé du département"].values, df[map_key].values)]) marker_size = df["Inscrits"].values color = df[map_key].values valid_ix = np.isfinite(color) map_data["x"] = x[valid_ix] map_data["y"] = y[valid_ix] map_data["hovertext"] = hovertext[valid_ix] map_data["hoverinfo"] = "text" max_size_raw = df["Inscrits"].max() max_size_marker = 80 map_data["marker"] = dict( opacity=0.5, size=marker_size[valid_ix], sizemode="area", sizeref=2. * max_size_raw / (max_size_marker ** 2), sizemin=2, color=color[valid_ix], colorscale="Jet", showscale=True ) figure = {"data": [map_data], "layout": layout} return figure @app.callback(Output("corr_plot", "figure"), [Input("xcorr_key", "value"), Input("ycorr_key", "value")]) def draw_correlation(xcorr_key, ycorr_key): map_data, layout = create_geodict() layout.pop("xaxis") layout.pop("yaxis") color_key = "Population en 2013 (princ)" map_data["x"] = df[xcorr_key].values map_data["y"] = df[ycorr_key].values map_data["hovertext"] = [ name_[0] + ', ' + name_[1] for name_ in zip(df["Libellé de la commune"].values, df["Libellé du département"].values)] map_data["hoverinfo"] = "x+y+text" max_size_raw = df["Inscrits"].max() max_size_marker = 80 map_data["marker"] = dict( # opacity=0.5, size=df[color_key].values, sizemode="area", sizeref=2. * max_size_raw / (max_size_marker ** 2), sizemin=2, color=df[color_key].values, colorscale="Jet", showscale=True, colorbar={ "title": color_key} ) layout["xaxis"] = { "title": xcorr_key} layout["yaxis"] = { "title": ycorr_key} figure = {"data": [map_data], "layout": layout} return figure @app.callback(Output("featimp_graph", "figure"), [Input("featimp_regressor", "value"), Input("featimp_category", "value")]) def draw_featimp(regr_name, cat_name): cat_idx = np.where(np.asarray(y_keys) == cat_name)[0][0] regr = regressors[regr_name].regressor[cat_idx] feature_importances = getattr( regr, "feature_importances_", None) if feature_importances is None: feature_importances = regr.coef_ trace = dict( type="bar", x=x_keys, y=feature_importances, hoverinfo="x+y", ) layout = { "height": 800, "margin": {'b': 500}} figure = {"data": [trace], "layout": layout} return figure @app.callback(Output("perf_graph", "figure"), [Input("perf_regressor", "value"), Input("perf_category", "value"), Input("perf_datasel", "value")]) def draw_perf(regr_name, cat_name, datasel): cat_idx = np.where(np.asarray(y_keys) == cat_name)[0][0] if datasel == "all": regr = regressors[regr_name].regressor[cat_idx] plot_data = { "train": { 'x': data["default"]["train_x"], 'y': data["default"]["train_y"].values[:, cat_idx]}, "test": { 'x': data["default"]["test_x"], 'y': data["default"]["test_y"].values[:, cat_idx]} } else: regr = big_regressors[regr_name].regressor[cat_idx] plot_data = { "train": { 'x': big_data["default"]["train_x"], 'y': big_data["default"]["train_y"].values[:, cat_idx]}, "test": { 'x': big_data["default"]["test_x"], 'y': big_data["default"]["test_y"].values[:, cat_idx]} } figdata = [] pred_acc = {} for tt_ in plot_data.keys(): x_ = plot_data[tt_]["x"] ytrue_ = plot_data[tt_]["y"] y_ = regr.predict(x_) pred_acc[tt_] = ( "RMSE = " + str(round(data_analysis.mean_squared_error(ytrue_, y_)**0.5, 2)) + ", MAE = " + str(round(data_analysis.mean_absolute_error(ytrue_, y_), 2)) + ", R2 = " + str(round(data_analysis.r2_score(ytrue_, y_), 2))) trace, layout = create_geodict() trace["x"] = ytrue_ trace["y"] = y_ trace["hovertext"] = [ name_[0] + ', ' + name_[1] for name_ in zip(df["Libellé de la commune"].values, df["Libellé du département"].values)] trace["hoverinfo"] = "x+y+text" trace["marker"] = dict( size=4) trace["name"] = tt_.title() + " (" + pred_acc[tt_] + ")" figdata.append(trace) figdata.append( { "type": "scatter", "mode": "lines", "line": { "opacity": 0.5, "color": "black"}, "x": [0, 100], "y": [0, 100], "hovertext": "none", "text": "none", "showlegend": False } ) test_plot, layout = create_geodict() layout.pop("xaxis") layout.pop("yaxis") layout["height"] = 750 layout["width"] = 750 layout["xaxis"] = { "title": "True", "range": [0, 100]} layout["yaxis"] = { "title": "Predicted", "range": [0, 100], "scaleanchor": 'x', "scaleratio": 1.0} layout["title"] = cat_name figure = {"data": figdata, "layout": layout} return figure if __name__ == "__main__": app.run_server(host="0.0.0.0", debug=True, threaded=True)
src/app.py
import numpy as np import dash from dash.dependencies import Input, Output import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go import data_loader import data_analysis from data_analysis import Regressor def create_geodict(): map_dict = dict( type="scattergl", # locationmode="country names", # lon=[], # lat=[], x=[], y=[], text=[], mode="markers", marker=dict( size=0, opacity=0.4, color=[] ) ) # geodict = dict( # scope="europe", # projection=dict(type="natural earth"), # showland=True, # landcolor="rgb(250, 250, 250)", # subunitcolor="rgb(217, 217, 217)", # countrycolor="rgb(217, 217, 217)" # #bgcolor="#191A1A", # #countrywidth=1.0, # #subunitwidth=1.0, # #resolution=50 # ) layout = dict( autosize=True, height=750, hovermode="closest", # plot_bgcolor="#191A1A", # paper_bgcolor="#020202", # geo=geodict, xaxis=dict( range=[-5.56, 9.67]), yaxis=dict( range=[41.30, 51.13], scaleanchor='x', scaleratio=1.0) ) return map_dict, layout external_stylesheets = [ 'https://codepen.io/chriddyp/pen/bWLwgP.css' ] app = dash.Dash(external_stylesheets=external_stylesheets) df = data_loader.df df_referendum = data_loader.df_referendum df_latlon = data_loader.df_latlon df_pop_str_edu = data_loader.df_pop_str_edu df_popfiscal = data_loader.df_popfiscal dfcorr = data_loader.dfcorr regressors, rmse, rmse_scores, x_keys, y_keys, data = ( data_analysis.load_regresults()) big_regressors, big_rmse, big_rmse_scores, big_x_keys, big_y_keys, big_data = ( data_analysis.load_regresults( "results/regressors_bigcities_impfeatures.p")) def plot_overall_results(): mainland, overseasterr, abroad = data_loader.get_referendum_by_regions() referendum_regions = ["Mainland", "Overseas Territories", "Abroad"] name_fields = ["Libellé de la commune", "Libellé du département"] top_ca_ix = np.argsort(df["Choix A (%)"].values)[-10:] top_cb_ix = np.argsort(df["Choix B (%)"].values)[-10:] choix_a_frac = df["Choix A"].sum()/(df["Choix A"] + df["Choix B"]).sum() choix_b_frac = df["Choix B"].sum()/(df["Choix A"] + df["Choix B"]).sum() layout = html.Div( [ html.Div( [ html.H3("Overall result"), dcc.Markdown( "We plot the overall result by aggregating valid " "votes from all regions. Referendum results ignore " "abstentions and blanc votes."), html.P("Choix A: " + str(np.round(choix_a_frac*100, 3)) + " %"), html.P("Choix B: " + str(np.round(choix_b_frac*100, 3)) + " %"), html.P("Referendum result: " + ("Choix A" if choix_a_frac > choix_b_frac else "Choix B")), html.H5("Top 10 cities voting for Choix A by percent " "[Commune, Department %]"), html.P(', '.join([ '[' + ', '.join(df.iloc[ix_][name_fields].values) + ', ' + str(np.round(df.iloc[ix_]["Choix A (%)"], 1)) + ']' for ix_ in top_ca_ix])), html.H5("Top 10 cities voting for Choix B by percent " "[Commune, Department %]"), html.P(', '.join([ '[' + ', '.join(df.iloc[ix_][name_fields].values) + ', ' + str(np.round(df.iloc[ix_]["Choix B (%)"], 1)) + ']' for ix_ in top_cb_ix])), html.H5("Results according to Mainland France, French " "Overseas Territories and France Abroad"), dcc.Graph(figure=dict( data=[ dict( x=referendum_regions, y=[mainland[cat], overseasterr[cat], abroad[cat]], name=cat, type="bar" ) for cat in mainland.index], layout={"barmode": 'stack'} ) ) ], className="ten columns" ) ], className="row" ) return layout def plot_histograms(): layout = html.Div( [ html.H3("Histogram of Votes by Category") ] + [ html.Div( [ html.Div( [ dcc.Graph( id='hist_'+key_, figure={ 'data': [ { 'x': df[key_], 'name': key_, 'type': 'histogram', 'hoverinfo': 'x+y', 'title': key_ } ], 'layout': { 'title': key_} } ) ], className="five columns" ) for key_ in options ], className="row" ) for options in (['Choix A (%)', 'Choix B (%)'], ['Abstentions (%)', 'Blancs (%)']) ] ) return layout def plot_correlation(): map_data, layout = create_geodict() layout.pop("xaxis") layout.pop("yaxis") layout = html.Div( [ html.H3("Correlations between the variables"), html.P("We plot the correlation between the various input and " "output variables. All population fields are converted " "to fraction of the population (of the commune) by " "normalising by the total population for that commune."), html.Div( [ dcc.Graph( id='correlations', figure={ 'data': [ { 'x': dfcorr.columns.tolist(), 'y': dfcorr.index.tolist(), 'z': dfcorr.values, 'type': 'heatmap', # 'hoverinfo': 'x+y', 'title': "Correlations" } ], 'layout': { "height": 950, "margin": {'l': 600}} } ) ] ), html.P("We can plot the a scatter of the variables to visually " "examine any potential relationships."), html.Div( [ dcc.Dropdown( id='xcorr_key', options=[ {'label': key_, 'value': key_} for key_ in dfcorr.index], value=dfcorr.index[0] ), dcc.Dropdown( id='ycorr_key', options=[ {'label': key_, 'value': key_} for key_ in dfcorr.columns], value=dfcorr.columns[0] ), dcc.Graph( id="corr_plot", figure=go.Figure(data=[map_data], layout=go.Layout(**layout)) ) ], className="ten columns" ) ], className="row" ) return layout def map_layout(): plot_options = [key_ for key_ in df.keys()[4:] if key_ not in ("Latitude", "Longitude")] map_data, layout = create_geodict() return html.Div( [ html.H3("Visualising the data"), html.P("Data is plotted according to the geographic location of " "the communes. The region is restricted to mainland " "France. The data also contains results from regions " "outside mainland France, this data is not shown in the " "figure."), html.P("Colour values are scaled according to data from the " "entire data set. Some ranges may not correspond to any " "data shown on the map."), html.P("Colous is used to indicate the field of interest." "<br>Marker size is indicative of the 'Inscrits' field."), html.Div( [ dcc.Dropdown( id='map_key', options=[ {'label': key_, 'value': key_} for key_ in plot_options], value='Inscrits' ), dcc.Graph( id="map_view", figure=go.Figure(data=[map_data], layout=go.Layout(**layout)) ) ], className="ten columns" ) ], className="row" ) def feature_importance_graph(): layout = html.Div( [ html.H3("Predictors of Referendum Outcome"), html.P("The feature importances (or coefficients) indicate which " "features in the input influence the predicted values.<br>" "These can be plotted below for the various regressors."), html.Div( [ dcc.Dropdown( id="featimp_regressor", options=[ {"label": key_, "value": key_} for key_ in regressors], value="RandomForest" ), dcc.Dropdown( id="featimp_category", options=[ {"label": key_, "value": key_} for key_ in y_keys], value=y_keys[0] ), dcc.Graph( id="featimp_graph", figure=go.Figure(data=[], layout=go.Layout()) ) ], className="ten columns" ) ], className="row" ) return layout def performance_graph(): layout = html.Div( [ html.H3("Performance of the Regressors"), html.P("We show the predicted vs true values of the corresponding " "outcome variable for the training and testing data sets."), html.Div( [ dcc.Dropdown( id="perf_datasel", options=[ {"label": "All cities/All features", "value": "all"}, {"label": "Big cities (population >10e3) / Important features", "value": "big"}], value="all"), dcc.Dropdown( id="perf_regressor", options=[ {"label": key_, "value": key_} for key_ in regressors], value="RandomForest"), dcc.Dropdown( id="perf_category", options=[ {"label": key_, "value": key_} for key_ in y_keys], value=y_keys[0] ), dcc.Graph( id="perf_graph", figure=go.Figure(data=[], layout=go.Layout()) ) ], className="ten columns" ) ], className="row" ) return layout def serve_layout(): layout = html.Div( [ html.H1("Referendum Analysis"), html.P("We assume the referendum categories (A/B/Abst/Blanc) to " "be the output variables in our analysis. We aim to study " "the main drivers behind a particular category in terms of " "some input variables describing each commune " "(population age, gender, education, etc.)."), html.P("Referendum data contains results from regions outside " "mainland France. Supporting data corresponding to " "population, education, etc. does not list values for " "these regions. Consequently, we constrain our analysis " "of the data to mainland France"), plot_overall_results(), plot_histograms(), plot_correlation(), map_layout(), feature_importance_graph(), performance_graph() ] ) return layout app.layout = serve_layout @app.callback(Output("map_view", "figure"), [Input("map_key", "value")]) def draw_map(map_key): map_data, layout = create_geodict() x = df["Longitude"].values y = df["Latitude"].values hovertext = np.asarray([ name_[0] + ', ' + name_[1] + '<br>' + map_key + ' = ' + str(name_[2]) for name_ in zip(df["Libellé de la commune"].values, df["Libellé du département"].values, df[map_key].values)]) marker_size = df["Inscrits"].values color = df[map_key].values valid_ix = np.isfinite(color) map_data["x"] = x[valid_ix] map_data["y"] = y[valid_ix] map_data["hovertext"] = hovertext[valid_ix] map_data["hoverinfo"] = "text" max_size_raw = df["Inscrits"].max() max_size_marker = 80 map_data["marker"] = dict( opacity=0.5, size=marker_size[valid_ix], sizemode="area", sizeref=2. * max_size_raw / (max_size_marker ** 2), sizemin=2, color=color[valid_ix], colorscale="Jet", showscale=True ) figure = {"data": [map_data], "layout": layout} return figure @app.callback(Output("corr_plot", "figure"), [Input("xcorr_key", "value"), Input("ycorr_key", "value")]) def draw_correlation(xcorr_key, ycorr_key): map_data, layout = create_geodict() layout.pop("xaxis") layout.pop("yaxis") color_key = "Population en 2013 (princ)" map_data["x"] = df[xcorr_key].values map_data["y"] = df[ycorr_key].values map_data["hovertext"] = [ name_[0] + ', ' + name_[1] for name_ in zip(df["Libellé de la commune"].values, df["Libellé du département"].values)] map_data["hoverinfo"] = "x+y+text" max_size_raw = df["Inscrits"].max() max_size_marker = 80 map_data["marker"] = dict( # opacity=0.5, size=df[color_key].values, sizemode="area", sizeref=2. * max_size_raw / (max_size_marker ** 2), sizemin=2, color=df[color_key].values, colorscale="Jet", showscale=True, colorbar={ "title": color_key} ) layout["xaxis"] = { "title": xcorr_key} layout["yaxis"] = { "title": ycorr_key} figure = {"data": [map_data], "layout": layout} return figure @app.callback(Output("featimp_graph", "figure"), [Input("featimp_regressor", "value"), Input("featimp_category", "value")]) def draw_featimp(regr_name, cat_name): cat_idx = np.where(np.asarray(y_keys) == cat_name)[0][0] regr = regressors[regr_name].regressor[cat_idx] feature_importances = getattr( regr, "feature_importances_", None) if feature_importances is None: feature_importances = regr.coef_ trace = dict( type="bar", x=x_keys, y=feature_importances, hoverinfo="x+y", ) layout = { "height": 800, "margin": {'b': 500}} figure = {"data": [trace], "layout": layout} return figure @app.callback(Output("perf_graph", "figure"), [Input("perf_regressor", "value"), Input("perf_category", "value"), Input("perf_datasel", "value")]) def draw_perf(regr_name, cat_name, datasel): cat_idx = np.where(np.asarray(y_keys) == cat_name)[0][0] if datasel == "all": regr = regressors[regr_name].regressor[cat_idx] plot_data = { "train": { 'x': data["default"]["train_x"], 'y': data["default"]["train_y"].values[:, cat_idx]}, "test": { 'x': data["default"]["test_x"], 'y': data["default"]["test_y"].values[:, cat_idx]} } else: regr = big_regressors[regr_name].regressor[cat_idx] plot_data = { "train": { 'x': big_data["default"]["train_x"], 'y': big_data["default"]["train_y"].values[:, cat_idx]}, "test": { 'x': big_data["default"]["test_x"], 'y': big_data["default"]["test_y"].values[:, cat_idx]} } figdata = [] pred_acc = {} for tt_ in plot_data.keys(): x_ = plot_data[tt_]["x"] ytrue_ = plot_data[tt_]["y"] y_ = regr.predict(x_) pred_acc[tt_] = ( "RMSE = " + str(round(data_analysis.mean_squared_error(ytrue_, y_)**0.5, 2)) + ", MAE = " + str(round(data_analysis.mean_absolute_error(ytrue_, y_), 2)) + ", R2 = " + str(round(data_analysis.r2_score(ytrue_, y_), 2))) trace, layout = create_geodict() trace["x"] = ytrue_ trace["y"] = y_ trace["hovertext"] = [ name_[0] + ', ' + name_[1] for name_ in zip(df["Libellé de la commune"].values, df["Libellé du département"].values)] trace["hoverinfo"] = "x+y+text" trace["marker"] = dict( size=4) trace["name"] = tt_.title() + " (" + pred_acc[tt_] + ")" figdata.append(trace) figdata.append( { "type": "scatter", "mode": "lines", "line": { "opacity": 0.5, "color": "black"}, "x": [0, 100], "y": [0, 100], "hovertext": "none", "text": "none", "showlegend": False } ) test_plot, layout = create_geodict() layout.pop("xaxis") layout.pop("yaxis") layout["height"] = 750 layout["width"] = 750 layout["xaxis"] = { "title": "True", "range": [0, 100]} layout["yaxis"] = { "title": "Predicted", "range": [0, 100], "scaleanchor": 'x', "scaleratio": 1.0} layout["title"] = cat_name figure = {"data": figdata, "layout": layout} return figure if __name__ == "__main__": app.run_server(host="0.0.0.0", debug=True, threaded=True)
0.571049
0.252986
import unittest from unittest import mock from pacsign.common_util import (print_new_line, print_info, print_warning, print_error, print_prompt, exception_handler, run_command, assert_in_error, change_folder_seperator, get_filename, is_windows_os, check_extension, check_extensions, get_unit_size, get_byte_size, get_password, get_standard_hex_string, get_reversed_hex_string, BYTE_ARRAY, CHAR_POINTER) '''test_print_new_line''' def test_print_new_line(): print_new_line() '''test_print_info''' def test_print_info(): print_info_string = mock.MagicMock() print_info_space = mock.MagicMock() print_info_file = mock.MagicMock() print_info_alternate_color = mock.MagicMock() print_info(print_info_string, print_info_space, print_info_file, print_info_alternate_color) '''test_print_warning''' def test_print_warning(): print_warning_string = mock.MagicMock() print_warning_space = mock.MagicMock() print_warning_file = mock.MagicMock() print_warning(print_warning_string, print_warning_space, print_warning_file) '''test_print_error''' def test_print_error(): print_error_string = mock.MagicMock() print_error_space = mock.MagicMock() print_error_file = mock.MagicMock() print_error(print_error_string, print_error_space, print_error_file) '''test_print_prompt''' def test_print_prompt(): print_prompt_string = mock.MagicMock() print_prompt_space = mock.MagicMock() print_prompt_file = mock.MagicMock() print_prompt(print_prompt_string, print_prompt_space, print_prompt_file) '''test_exception_handler''' def test_exception_handler(): exception_handler_etype = mock.MagicMock() exception_handler_value = mock.MagicMock() exception_handler_tb = mock.MagicMock() exception_handler(exception_handler_etype, exception_handler_value, exception_handler_tb) '''test_run_command''' def test_run_command(): run_command_command = mock.MagicMock() run_command_printed_cmd = mock.MagicMock() run_command_return_code = mock.MagicMock() run_command_allow_error = mock.MagicMock() run_command(run_command_command, run_command_printed_cmd, run_command_return_code, run_command_allow_error) '''test_assert_in_error''' def test_assert_in_error(): assert_in_error_boolean = mock.MagicMock() assert_in_error_string = mock.MagicMock() assert_in_error(assert_in_error_boolean, assert_in_error_string) '''test_change_folder_seperator''' def test_change_folder_seperator(): change_folder_seperator_fullpath = mock.MagicMock() change_folder_seperator(change_folder_seperator_fullpath) '''test_get_filename''' def test_get_filename(): get_filename_fullpath = mock.MagicMock() get_filename_space = mock.MagicMock() get_filename(str(get_filename_fullpath), int(get_filename_space)) '''test_is_windows_os''' def test_is_windows_os(): is_windows_os() '''test_check_extension''' def test_check_extension(): check_extension_file = mock.MagicMock() check_extension_extension = mock.MagicMock() check_extension(check_extension_file, check_extension_extension) '''test_check_extensions''' def test_check_extensions(): check_extensions_file = mock.MagicMock() check_extensions_extensions = mock.MagicMock() check_extensions(check_extensions_file, check_extensions_extensions) '''test_get_unit_size''' def test_get_unit_size(): get_unit_size_size = mock.MagicMock() get_unit_size_unit_size = mock.MagicMock() get_unit_size(int(get_unit_size_size), int(get_unit_size_unit_size)) '''test_get_byte_size''' def test_get_byte_size(): get_byte_size_bit = mock.MagicMock() get_byte_size(get_byte_size_bit) '''test_get_password''' @unittest.skip("skipping password test") def test_get_password(): get_password_messages = mock.MagicMock(return_value=["this","test"]) get_password_MIN = mock.MagicMock() get_password_MAX = mock.MagicMock() get_password_comment = mock.MagicMock() get_password(get_password_messages.return_value, int(get_password_MIN), int(get_password_MAX), str(get_password_comment)) '''test_get_standard_hex_string''' def test_get_standard_hex_string(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) get_standard_hex_string_offset = mock.MagicMock(return_value=0) get_standard_hex_string_size = mock.MagicMock() CHAR_POINTER_test.get_standard_hex_string(get_standard_hex_string_offset.return_value, int(get_standard_hex_string_size)) '''test_get_reversed_hex_string''' def test_get_reversed_hex_string(): get_reversed_hex_string_data = mock.MagicMock() get_reversed_hex_string(get_reversed_hex_string_data) '''test_clean''' def test_clean(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) CHAR_POINTER_test.clean() '''test_size''' def test_size(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) CHAR_POINTER_test.size() '''test_append_byte''' def test_append_byte(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_byte_data = mock.MagicMock() BYTE_ARRAY_test.append_byte(append_byte_data) '''test_tofile''' @unittest.skip("Skipping test to learn how to mock a file") def test_tofile(): init_type = mock.MagicMock(return_value="FILE") init_arg = mock.MagicMock(spec=file, wraps=StringIO('test')) BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, init_arg) tofile_file = mock.MagicMock() BYTE_ARRAY_test.tofile(tofile_file) '''test_append_word''' def test_append_word(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_word_data = mock.MagicMock() BYTE_ARRAY_test.append_word(append_word_data) '''test_append_dword''' def test_append_dword(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_dword_data = mock.MagicMock() BYTE_ARRAY_test.append_dword(append_dword_data) '''test_append_qword''' def test_append_qword(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_qword_data = mock.MagicMock() BYTE_ARRAY_test.append_qword(append_qword_data) '''test_append_data''' def test_append_data(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_data_chars = mock.MagicMock() BYTE_ARRAY_test.append_data(append_data_chars) '''test_append_data_swizzled''' def test_append_data_swizzled(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_data_swizzled_chars = mock.MagicMock() BYTE_ARRAY_test.append_data_swizzled(append_data_swizzled_chars) '''test_assign_word''' def test_assign_word(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) assign_word_offset = mock.MagicMock() assign_word_word = mock.MagicMock() BYTE_ARRAY_test.assign_word(int(assign_word_offset), assign_word_word) '''test_assign_dword''' def test_assign_dword(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) assign_dword_offset = mock.MagicMock() assign_dword_dword = mock.MagicMock() BYTE_ARRAY_test.assign_dword(int(assign_dword_offset), assign_dword_dword) '''test_assign_qword''' def test_assign_qword(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) assign_qword_offset = mock.MagicMock() assign_qword_qword = mock.MagicMock() BYTE_ARRAY_test.assign_qword(int(assign_qword_offset), assign_qword_qword) '''test_assign_data''' def test_assign_data(): init_size = mock.MagicMock(return_value=8) CHAR_POINTER_test = CHAR_POINTER(init_size.return_value) assign_data_chars = mock.MagicMock(return_value= b"01234567") CHAR_POINTER_test.assign_data(assign_data_chars.return_value) '''test_null_data''' def test_null_data(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) CHAR_POINTER_test.null_data() '''test_clear_data''' def test_clear_data(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) BYTE_ARRAY_test.clear_data() '''test_get_word''' def test_get_word(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) get_word_offset = mock.MagicMock() BYTE_ARRAY_test.get_word(int(get_word_offset)) '''test_get_dword''' def test_get_dword(): init_size = mock.MagicMock(return_value=4) CHAR_POINTER_test = CHAR_POINTER(init_size.return_value) get_dword_offset = mock.MagicMock(return_value=0) CHAR_POINTER_test.get_dword(get_dword_offset.return_value) '''test_get_qword''' def test_get_qword(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) get_qword_offset = mock.MagicMock() BYTE_ARRAY_test.get_qword(int(get_qword_offset)) '''test_get_string''' def test_get_string(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) get_string_offset = mock.MagicMock() get_string_size = mock.MagicMock() BYTE_ARRAY_test.get_string(int(get_string_offset), int(get_string_size)) '''test_resize''' def test_resize(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) resize_size = mock.MagicMock() BYTE_ARRAY_test.resize(int(resize_size)) '''test_assign_partial_data''' def test_assign_partial_data(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) assign_partial_data_chars = mock.MagicMock(return_value= b"1234") assign_partial_data_source_offset = mock.MagicMock() assign_partial_data_dest_offset = mock.MagicMock(return_value=0) assign_partial_data_size = mock.MagicMock() CHAR_POINTER_test.assign_partial_data(assign_partial_data_chars.return_value, int(assign_partial_data_source_offset), assign_partial_data_dest_offset.return_value, int(assign_partial_data_size)) '''test_compare_data''' @unittest.skip("need to evaluate assert statement") def test_compare_data(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) compare_data_chars = mock.MagicMock(return_value= "") compare_data_error = mock.MagicMock() CHAR_POINTER_test.compare_data(compare_data_chars.return_value, compare_data_error)
python/pacsign/tests/test_common_util.py
import unittest from unittest import mock from pacsign.common_util import (print_new_line, print_info, print_warning, print_error, print_prompt, exception_handler, run_command, assert_in_error, change_folder_seperator, get_filename, is_windows_os, check_extension, check_extensions, get_unit_size, get_byte_size, get_password, get_standard_hex_string, get_reversed_hex_string, BYTE_ARRAY, CHAR_POINTER) '''test_print_new_line''' def test_print_new_line(): print_new_line() '''test_print_info''' def test_print_info(): print_info_string = mock.MagicMock() print_info_space = mock.MagicMock() print_info_file = mock.MagicMock() print_info_alternate_color = mock.MagicMock() print_info(print_info_string, print_info_space, print_info_file, print_info_alternate_color) '''test_print_warning''' def test_print_warning(): print_warning_string = mock.MagicMock() print_warning_space = mock.MagicMock() print_warning_file = mock.MagicMock() print_warning(print_warning_string, print_warning_space, print_warning_file) '''test_print_error''' def test_print_error(): print_error_string = mock.MagicMock() print_error_space = mock.MagicMock() print_error_file = mock.MagicMock() print_error(print_error_string, print_error_space, print_error_file) '''test_print_prompt''' def test_print_prompt(): print_prompt_string = mock.MagicMock() print_prompt_space = mock.MagicMock() print_prompt_file = mock.MagicMock() print_prompt(print_prompt_string, print_prompt_space, print_prompt_file) '''test_exception_handler''' def test_exception_handler(): exception_handler_etype = mock.MagicMock() exception_handler_value = mock.MagicMock() exception_handler_tb = mock.MagicMock() exception_handler(exception_handler_etype, exception_handler_value, exception_handler_tb) '''test_run_command''' def test_run_command(): run_command_command = mock.MagicMock() run_command_printed_cmd = mock.MagicMock() run_command_return_code = mock.MagicMock() run_command_allow_error = mock.MagicMock() run_command(run_command_command, run_command_printed_cmd, run_command_return_code, run_command_allow_error) '''test_assert_in_error''' def test_assert_in_error(): assert_in_error_boolean = mock.MagicMock() assert_in_error_string = mock.MagicMock() assert_in_error(assert_in_error_boolean, assert_in_error_string) '''test_change_folder_seperator''' def test_change_folder_seperator(): change_folder_seperator_fullpath = mock.MagicMock() change_folder_seperator(change_folder_seperator_fullpath) '''test_get_filename''' def test_get_filename(): get_filename_fullpath = mock.MagicMock() get_filename_space = mock.MagicMock() get_filename(str(get_filename_fullpath), int(get_filename_space)) '''test_is_windows_os''' def test_is_windows_os(): is_windows_os() '''test_check_extension''' def test_check_extension(): check_extension_file = mock.MagicMock() check_extension_extension = mock.MagicMock() check_extension(check_extension_file, check_extension_extension) '''test_check_extensions''' def test_check_extensions(): check_extensions_file = mock.MagicMock() check_extensions_extensions = mock.MagicMock() check_extensions(check_extensions_file, check_extensions_extensions) '''test_get_unit_size''' def test_get_unit_size(): get_unit_size_size = mock.MagicMock() get_unit_size_unit_size = mock.MagicMock() get_unit_size(int(get_unit_size_size), int(get_unit_size_unit_size)) '''test_get_byte_size''' def test_get_byte_size(): get_byte_size_bit = mock.MagicMock() get_byte_size(get_byte_size_bit) '''test_get_password''' @unittest.skip("skipping password test") def test_get_password(): get_password_messages = mock.MagicMock(return_value=["this","test"]) get_password_MIN = mock.MagicMock() get_password_MAX = mock.MagicMock() get_password_comment = mock.MagicMock() get_password(get_password_messages.return_value, int(get_password_MIN), int(get_password_MAX), str(get_password_comment)) '''test_get_standard_hex_string''' def test_get_standard_hex_string(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) get_standard_hex_string_offset = mock.MagicMock(return_value=0) get_standard_hex_string_size = mock.MagicMock() CHAR_POINTER_test.get_standard_hex_string(get_standard_hex_string_offset.return_value, int(get_standard_hex_string_size)) '''test_get_reversed_hex_string''' def test_get_reversed_hex_string(): get_reversed_hex_string_data = mock.MagicMock() get_reversed_hex_string(get_reversed_hex_string_data) '''test_clean''' def test_clean(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) CHAR_POINTER_test.clean() '''test_size''' def test_size(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) CHAR_POINTER_test.size() '''test_append_byte''' def test_append_byte(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_byte_data = mock.MagicMock() BYTE_ARRAY_test.append_byte(append_byte_data) '''test_tofile''' @unittest.skip("Skipping test to learn how to mock a file") def test_tofile(): init_type = mock.MagicMock(return_value="FILE") init_arg = mock.MagicMock(spec=file, wraps=StringIO('test')) BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, init_arg) tofile_file = mock.MagicMock() BYTE_ARRAY_test.tofile(tofile_file) '''test_append_word''' def test_append_word(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_word_data = mock.MagicMock() BYTE_ARRAY_test.append_word(append_word_data) '''test_append_dword''' def test_append_dword(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_dword_data = mock.MagicMock() BYTE_ARRAY_test.append_dword(append_dword_data) '''test_append_qword''' def test_append_qword(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_qword_data = mock.MagicMock() BYTE_ARRAY_test.append_qword(append_qword_data) '''test_append_data''' def test_append_data(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_data_chars = mock.MagicMock() BYTE_ARRAY_test.append_data(append_data_chars) '''test_append_data_swizzled''' def test_append_data_swizzled(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) append_data_swizzled_chars = mock.MagicMock() BYTE_ARRAY_test.append_data_swizzled(append_data_swizzled_chars) '''test_assign_word''' def test_assign_word(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) assign_word_offset = mock.MagicMock() assign_word_word = mock.MagicMock() BYTE_ARRAY_test.assign_word(int(assign_word_offset), assign_word_word) '''test_assign_dword''' def test_assign_dword(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) assign_dword_offset = mock.MagicMock() assign_dword_dword = mock.MagicMock() BYTE_ARRAY_test.assign_dword(int(assign_dword_offset), assign_dword_dword) '''test_assign_qword''' def test_assign_qword(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) assign_qword_offset = mock.MagicMock() assign_qword_qword = mock.MagicMock() BYTE_ARRAY_test.assign_qword(int(assign_qword_offset), assign_qword_qword) '''test_assign_data''' def test_assign_data(): init_size = mock.MagicMock(return_value=8) CHAR_POINTER_test = CHAR_POINTER(init_size.return_value) assign_data_chars = mock.MagicMock(return_value= b"01234567") CHAR_POINTER_test.assign_data(assign_data_chars.return_value) '''test_null_data''' def test_null_data(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) CHAR_POINTER_test.null_data() '''test_clear_data''' def test_clear_data(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) BYTE_ARRAY_test.clear_data() '''test_get_word''' def test_get_word(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) get_word_offset = mock.MagicMock() BYTE_ARRAY_test.get_word(int(get_word_offset)) '''test_get_dword''' def test_get_dword(): init_size = mock.MagicMock(return_value=4) CHAR_POINTER_test = CHAR_POINTER(init_size.return_value) get_dword_offset = mock.MagicMock(return_value=0) CHAR_POINTER_test.get_dword(get_dword_offset.return_value) '''test_get_qword''' def test_get_qword(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) get_qword_offset = mock.MagicMock() BYTE_ARRAY_test.get_qword(int(get_qword_offset)) '''test_get_string''' def test_get_string(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) get_string_offset = mock.MagicMock() get_string_size = mock.MagicMock() BYTE_ARRAY_test.get_string(int(get_string_offset), int(get_string_size)) '''test_resize''' def test_resize(): init_type = mock.MagicMock(return_value="STRING") init_arg = mock.MagicMock() BYTE_ARRAY_test = BYTE_ARRAY(init_type.return_value, str(init_arg)) resize_size = mock.MagicMock() BYTE_ARRAY_test.resize(int(resize_size)) '''test_assign_partial_data''' def test_assign_partial_data(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) assign_partial_data_chars = mock.MagicMock(return_value= b"1234") assign_partial_data_source_offset = mock.MagicMock() assign_partial_data_dest_offset = mock.MagicMock(return_value=0) assign_partial_data_size = mock.MagicMock() CHAR_POINTER_test.assign_partial_data(assign_partial_data_chars.return_value, int(assign_partial_data_source_offset), assign_partial_data_dest_offset.return_value, int(assign_partial_data_size)) '''test_compare_data''' @unittest.skip("need to evaluate assert statement") def test_compare_data(): init_size = mock.MagicMock() CHAR_POINTER_test = CHAR_POINTER(int(init_size)) compare_data_chars = mock.MagicMock(return_value= "") compare_data_error = mock.MagicMock() CHAR_POINTER_test.compare_data(compare_data_chars.return_value, compare_data_error)
0.388502
0.176175
import sys, os, requests, datetime, re, logging, logging.handlers import paho.mqtt.publish as publish import paho.mqtt.client as mqtt import pytz import configparser from time import sleep # Debug mode. DEBUG = 0 class PowerstationMqtt(): def init(self): self.powerstation_hostname = os.environ.get('powerstation_hostname','') self.powerstation_instance = os.environ.get('powerstation_instance','') self.mqtt_client_id = os.environ.get('mqtt_client_id','') self.mqtt_host = os.environ.get('mqtt_client_host','') self.mqtt_port = int(os.environ.get('mqtt_client_port','')) self.mqtt_topic = os.environ.get('mqtt_client_root_topic','') self.mqtt_qos = int(os.environ.get('mqtt_qos','')) self.mqtt_retain = eval(os.environ.get('mqtt_retain','')) if eval(os.environ.get('mqtt_auth','')): self.mqtt_username = os.environ.get('mqtt_username','') self.mqtt_password = os.environ.get('mqtt_password','') self.mqtt_auth = { "username": os.environ.get('mqtt_username',''), "password": os.environ.get('mqtt_password','') } else: self.mqtt_auth = None logging.basicConfig(stream=sys.stdout, format='%(asctime)s: %(name)s %(levelname)s: %(message)s') logger = logging.getLogger(__name__) logger.level = logging.INFO formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") handler = logging.handlers.RotatingFileHandler("/log/powerstation-mqtt-" + self.powerstation_instance + ".log", maxBytes=10000000, backupCount=4) handler.setFormatter(formatter) logger.addHandler(handler) statuslogger = logging.getLogger("status") statuslogger.level = logging.INFO statushandler = logging.handlers.RotatingFileHandler("/log/powerstation-mqtt-status-" + self.powerstation_instance + ".log", maxBytes=1000000, backupCount=2) statushandler.setFormatter(formatter) statuslogger.addHandler(statushandler) self.logger = logger self.statuslogger = statuslogger logger.info("initialized") statuslogger.info("initialized") def on_message(self, client, userdata, message): try: if (message.topic.startswith("$SYS/")): self.watchdog = 0 else: self.watchdog = 0 port = message.topic.split('/')[-1] value = str(message.payload.decode("utf-8")) state = "0" if value == "ON" or value == "1": state = "1" url = 'http://' + self.powerstation_hostname + '?cmd=200&json={"port":' + port +',"state":' + state + '}' self.logger.debug(url) requests.get(url) except Exception as e: self.logger.warning(e) pass def run(self): self.logger.info("running") self.statuslogger.info("running") lastRun = datetime.datetime.utcnow() lastEval = datetime.datetime.utcnow() watt = [0.0, 0.0, 0.0, 0.0, 0.0 ,0.0, 0.0] kwh = [0.0, 0.0, 0.0, 0.0, 0.0 ,0.0, 0.0] kwhSent = [0.0, 0.0, 0.0, 0.0, 0.0 ,0.0, 0.0] switch = [0,0,0,0,0,0] lastSentTime = datetime.datetime.fromordinal(1) while True: self.watchdog = 0 sendClientId = self.mqtt_client_id + "Send" client = mqtt.Client(client_id=sendClientId) self.client = client client.enable_logger(logger=self.logger) if self.mqtt_username: self.logger.info("apply credentials") client.username_pw_set(self.mqtt_username, self.mqtt_password) client.reconnect_delay_set(min_delay=1, max_delay=120) client.on_message = self.on_message client.connect(self.mqtt_host, port=self.mqtt_port, keepalive=60, bind_address="") client.loop_start() client.subscribe(self.mqtt_topic + "send/#", qos=1) client.subscribe("$SYS/broker/uptime", qos=1) while self.watchdog < 60: self.watchdog = self.watchdog + 1 try: while True: now = datetime.datetime.utcnow() delta = (now - lastRun).total_seconds() if delta >= 1: lastRun = now break sleep(0.1) self.statuslogger.info("looping") self.logger.debug("Requesting Data") response = requests.get("http://" + self.powerstation_hostname + "?cmd=511", timeout=10.0) now = datetime.datetime.utcnow() deltaEval = (now - lastEval).total_seconds() lastEval = now deltaSent = (now - lastSentTime).total_seconds() jsonData = response.json() if 'data' not in jsonData: self.logger.warning("Data not found") break data = jsonData["data"] self.logger.debug("Processing Data") sWatt = data["watt"] rWatt = [0.0, 0.0, 0.0, 0.0, 0.0 ,0.0, 2.0] rSwitch = data["switch"] for x in range(6): rWatt[x] = float(sWatt[x]) kwh[x] = kwh[x] + rWatt[x] * deltaEval / 3600000.0 rWatt[6] = rWatt[6] + rWatt[x] kwh[6] = kwh[6] + rWatt[x] * deltaEval / 3600000.0 if deltaSent > 600: for x in range(7): topic = self.mqtt_topic + "Port" + str(x) if (x != 6): switch[x] = rSwitch[x] state = "ON" if int(switch[x]) == 0: state = "OFF" self.client.publish(topic + "/switch", payload=state, qos=self.mqtt_qos, retain=self.mqtt_retain) else: topic = self.mqtt_topic + "Total" watt[x] = rWatt[x] self.client.publish(topic + "/watt", payload=watt[x], qos=self.mqtt_qos, retain=self.mqtt_retain) kwhSent[x] = kwh[x] self.client.publish(topic + "/kWh", payload=kwhSent[x], qos=self.mqtt_qos, retain=self.mqtt_retain) lastSentTime = now else: for x in range(7): topic = self.mqtt_topic + "Port" + str(x) if (x != 6): if switch[x] != rSwitch[x]: switch[x] = rSwitch[x] state = "ON" if int(switch[x]) == 0: state = "OFF" self.client.publish(topic + "/switch", payload=state, qos=self.mqtt_qos, retain=self.mqtt_retain) else: topic = self.mqtt_topic + "Total" if abs(watt[x] - rWatt[x]) >= 0.5: watt[x] = rWatt[x] self.client.publish(topic + "/watt", payload=watt[x], qos=self.mqtt_qos, retain=self.mqtt_retain) if abs(kwh[x] - kwhSent[x]) >= 0.01: kwhSent[x] = kwh[x] self.client.publish(topic + "/kWh", payload=kwhSent[x], qos=self.mqtt_qos, retain=self.mqtt_retain) except Exception as e: self.logger.warning(e) pass self.logger.warning("watchdog triggered, restarting mqtt") client.disconnect() d = PowerstationMqtt() d.init() d.run()
app/main.py
import sys, os, requests, datetime, re, logging, logging.handlers import paho.mqtt.publish as publish import paho.mqtt.client as mqtt import pytz import configparser from time import sleep # Debug mode. DEBUG = 0 class PowerstationMqtt(): def init(self): self.powerstation_hostname = os.environ.get('powerstation_hostname','') self.powerstation_instance = os.environ.get('powerstation_instance','') self.mqtt_client_id = os.environ.get('mqtt_client_id','') self.mqtt_host = os.environ.get('mqtt_client_host','') self.mqtt_port = int(os.environ.get('mqtt_client_port','')) self.mqtt_topic = os.environ.get('mqtt_client_root_topic','') self.mqtt_qos = int(os.environ.get('mqtt_qos','')) self.mqtt_retain = eval(os.environ.get('mqtt_retain','')) if eval(os.environ.get('mqtt_auth','')): self.mqtt_username = os.environ.get('mqtt_username','') self.mqtt_password = os.environ.get('mqtt_password','') self.mqtt_auth = { "username": os.environ.get('mqtt_username',''), "password": os.environ.get('mqtt_password','') } else: self.mqtt_auth = None logging.basicConfig(stream=sys.stdout, format='%(asctime)s: %(name)s %(levelname)s: %(message)s') logger = logging.getLogger(__name__) logger.level = logging.INFO formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") handler = logging.handlers.RotatingFileHandler("/log/powerstation-mqtt-" + self.powerstation_instance + ".log", maxBytes=10000000, backupCount=4) handler.setFormatter(formatter) logger.addHandler(handler) statuslogger = logging.getLogger("status") statuslogger.level = logging.INFO statushandler = logging.handlers.RotatingFileHandler("/log/powerstation-mqtt-status-" + self.powerstation_instance + ".log", maxBytes=1000000, backupCount=2) statushandler.setFormatter(formatter) statuslogger.addHandler(statushandler) self.logger = logger self.statuslogger = statuslogger logger.info("initialized") statuslogger.info("initialized") def on_message(self, client, userdata, message): try: if (message.topic.startswith("$SYS/")): self.watchdog = 0 else: self.watchdog = 0 port = message.topic.split('/')[-1] value = str(message.payload.decode("utf-8")) state = "0" if value == "ON" or value == "1": state = "1" url = 'http://' + self.powerstation_hostname + '?cmd=200&json={"port":' + port +',"state":' + state + '}' self.logger.debug(url) requests.get(url) except Exception as e: self.logger.warning(e) pass def run(self): self.logger.info("running") self.statuslogger.info("running") lastRun = datetime.datetime.utcnow() lastEval = datetime.datetime.utcnow() watt = [0.0, 0.0, 0.0, 0.0, 0.0 ,0.0, 0.0] kwh = [0.0, 0.0, 0.0, 0.0, 0.0 ,0.0, 0.0] kwhSent = [0.0, 0.0, 0.0, 0.0, 0.0 ,0.0, 0.0] switch = [0,0,0,0,0,0] lastSentTime = datetime.datetime.fromordinal(1) while True: self.watchdog = 0 sendClientId = self.mqtt_client_id + "Send" client = mqtt.Client(client_id=sendClientId) self.client = client client.enable_logger(logger=self.logger) if self.mqtt_username: self.logger.info("apply credentials") client.username_pw_set(self.mqtt_username, self.mqtt_password) client.reconnect_delay_set(min_delay=1, max_delay=120) client.on_message = self.on_message client.connect(self.mqtt_host, port=self.mqtt_port, keepalive=60, bind_address="") client.loop_start() client.subscribe(self.mqtt_topic + "send/#", qos=1) client.subscribe("$SYS/broker/uptime", qos=1) while self.watchdog < 60: self.watchdog = self.watchdog + 1 try: while True: now = datetime.datetime.utcnow() delta = (now - lastRun).total_seconds() if delta >= 1: lastRun = now break sleep(0.1) self.statuslogger.info("looping") self.logger.debug("Requesting Data") response = requests.get("http://" + self.powerstation_hostname + "?cmd=511", timeout=10.0) now = datetime.datetime.utcnow() deltaEval = (now - lastEval).total_seconds() lastEval = now deltaSent = (now - lastSentTime).total_seconds() jsonData = response.json() if 'data' not in jsonData: self.logger.warning("Data not found") break data = jsonData["data"] self.logger.debug("Processing Data") sWatt = data["watt"] rWatt = [0.0, 0.0, 0.0, 0.0, 0.0 ,0.0, 2.0] rSwitch = data["switch"] for x in range(6): rWatt[x] = float(sWatt[x]) kwh[x] = kwh[x] + rWatt[x] * deltaEval / 3600000.0 rWatt[6] = rWatt[6] + rWatt[x] kwh[6] = kwh[6] + rWatt[x] * deltaEval / 3600000.0 if deltaSent > 600: for x in range(7): topic = self.mqtt_topic + "Port" + str(x) if (x != 6): switch[x] = rSwitch[x] state = "ON" if int(switch[x]) == 0: state = "OFF" self.client.publish(topic + "/switch", payload=state, qos=self.mqtt_qos, retain=self.mqtt_retain) else: topic = self.mqtt_topic + "Total" watt[x] = rWatt[x] self.client.publish(topic + "/watt", payload=watt[x], qos=self.mqtt_qos, retain=self.mqtt_retain) kwhSent[x] = kwh[x] self.client.publish(topic + "/kWh", payload=kwhSent[x], qos=self.mqtt_qos, retain=self.mqtt_retain) lastSentTime = now else: for x in range(7): topic = self.mqtt_topic + "Port" + str(x) if (x != 6): if switch[x] != rSwitch[x]: switch[x] = rSwitch[x] state = "ON" if int(switch[x]) == 0: state = "OFF" self.client.publish(topic + "/switch", payload=state, qos=self.mqtt_qos, retain=self.mqtt_retain) else: topic = self.mqtt_topic + "Total" if abs(watt[x] - rWatt[x]) >= 0.5: watt[x] = rWatt[x] self.client.publish(topic + "/watt", payload=watt[x], qos=self.mqtt_qos, retain=self.mqtt_retain) if abs(kwh[x] - kwhSent[x]) >= 0.01: kwhSent[x] = kwh[x] self.client.publish(topic + "/kWh", payload=kwhSent[x], qos=self.mqtt_qos, retain=self.mqtt_retain) except Exception as e: self.logger.warning(e) pass self.logger.warning("watchdog triggered, restarting mqtt") client.disconnect() d = PowerstationMqtt() d.init() d.run()
0.064234
0.045671
import enum import io import ipaddress import re import struct import socket as sk from typing import Optional, Tuple class UTP: """ uTorrent Transport Protocol BEP29: https://www.bittorrent.org/beps/bep_0029.html """ def __init__(self) -> None: pass PROTOCOL_NAME_v10 = b'BitTorrent protocol' # BitTorrent v1.0 PROTOCOL_V10 = 1 PROTO_VER_MAP = { PROTOCOL_V10: PROTOCOL_NAME_v10, } PROTOCOL_VERS = { len(PROTOCOL_NAME_v10): PROTOCOL_V10 } class WrongMessageError(ValueError): pass class HandshakeError(ConnectionAbortedError): pass class _MessageBytes(bytes): @property def len(self) -> int: return len(self) class Message: MESSAGE_ID = None BASE_LENGTH = 0 PAYLOAD_OFFSET = 5 PAYLOAD_FMT = '' _RE = re.compile(r'(.*)({\d+\.len})(.*)') _SUB = r'\1{}\3' def __init__(self, length_prefix: int = 0, *payload) -> None: self.length_prefix = length_prefix self.payload = payload def to_bytes(self) -> bytes: fmt = ['!I'] args = [self.length_prefix] if self.MESSAGE_ID is not None: fmt.append('B') args.append(self.MESSAGE_ID) fmt.append(self.PAYLOAD_FMT.format(*self.payload)) args.extend(self.payload) return struct.pack(''.join(fmt), *args) @classmethod def from_buf(cls, buf: io.BytesIO, handshaked=False) -> Optional['Message']: cur_pos = buf.tell() len_prefix = buf.read(4) buf.seek(-len(len_prefix), io.SEEK_CUR) if len(len_prefix) < 4: return try: result = cls.from_bytes(buf.read(), handshaked) except WrongMessageError: buf.seek(cur_pos + 4 + int.from_bytes(len_prefix, 'big'), io.SEEK_SET) raise if isinstance(result, Handshake): total_len = 1 + result.length_prefix + 8 + 20 + 20 else: total_len = 4 + result.length_prefix buf.seek(cur_pos + total_len, io.SEEK_SET) return result @classmethod def from_bytes(cls, buf: bytes, handshaked=False) -> 'Message': if not buf: raise WrongMessageError('No data') msg_ids = { 0: Choke, 1: UnChoke, 2: Interested, 3: NotInterested, 4: Have, 5: Bitfield, 6: Request, 7: Piece, 8: Cancel, 9: Port, } try: length, msg_id = struct.unpack_from('!IB', buf) except struct.error: msg = KeepAlive else: msg = msg_ids.get(msg_id) if msg is None: if handshaked: raise WrongMessageError(f'Unrecognized Message: ' f'{buf[:16]}{"..." if len(buf) > 16 else ""}') msg = Handshake return msg._from_bytes(buf) @classmethod def _from_bytes(cls, buf: bytes) -> 'Message': length = struct.unpack_from('!I', buf)[0] fmt = cls._RE.sub(cls._SUB, cls.PAYLOAD_FMT).format(length - cls.BASE_LENGTH) return cls(*struct.unpack_from(f'!{fmt}', buf, cls.PAYLOAD_OFFSET)) class KeepAlive(Message): """ <len=0000> """ BASE_LENGTH = 0 PAYLOAD_OFFSET = 0 def __init__(self): super().__init__(self.BASE_LENGTH) class Choke(Message): """ <len=0001><msg_id=0> """ MESSAGE_ID = 0 BASE_LENGTH = 1 def __init__(self): super().__init__(self.BASE_LENGTH) class UnChoke(Message): """ <len=0001><msg_id=1> """ MESSAGE_ID = 1 BASE_LENGTH = 1 def __init__(self): super().__init__(self.BASE_LENGTH) class Interested(Message): """ <len=0001><msg_id=2> """ MESSAGE_ID = 2 BASE_LENGTH = 1 def __init__(self): super().__init__(self.BASE_LENGTH) class NotInterested(Message): """ <len=0001><msg_id=3> """ MESSAGE_ID = 3 BASE_LENGTH = 1 def __init__(self): super().__init__(self.BASE_LENGTH) class Have(Message): """ <len=0005><msg_id=4><piece_index=I> """ MESSAGE_ID = 4 BASE_LENGTH = 5 PAYLOAD_FMT = 'I' def __init__(self, piece_index: int): super().__init__(self.BASE_LENGTH, piece_index) @property def piece_index(self) -> int: return self.payload[0] class Bitfield(Message): """ <len=0001 + X><msg_id=5><bitfield=?s> """ MESSAGE_ID = 5 BASE_LENGTH = 1 PAYLOAD_FMT = '{0.len}s' def __init__(self, bitfield: bytes): bitfield = _MessageBytes(bitfield) super().__init__(self.BASE_LENGTH + bitfield.len, bitfield) @property def bitfield(self) -> bytes: return self.payload[0] class Request(Message): """ <len=0013><msg_id=6><index=I><begin=I><length=I> """ MESSAGE_ID = 6 BASE_LENGTH = 13 PAYLOAD_FMT = '3I' def __init__(self, index: int, begin: int, length: int): super().__init__(self.BASE_LENGTH, index, begin, length) @property def index(self) -> int: return self.payload[0] @property def begin(self) -> int: return self.payload[1] @property def length(self) -> int: return self.payload[2] class Piece(Message): """ <len=0009 + X><msg_id=7><index=I><begin=I><block=?s> """ MESSAGE_ID = 7 BASE_LENGTH = 9 PAYLOAD_FMT = 'II{2.len}s' def __init__(self, index: int, begin: int, block: bytes): block = _MessageBytes(block) super().__init__(self.BASE_LENGTH + block.len, index, begin, block) @property def index(self) -> int: return self.payload[0] @property def begin(self) -> int: return self.payload[1] @property def block(self) -> bytes: return self.payload[2] class Cancel(Message): """ <len=0013><msg_id=8><index=I><begin=I><length=I> """ MESSAGE_ID = 8 BASE_LENGTH = 13 PAYLOAD_FMT = '3I' def __init__(self, index: int, begin: int, length: int): super().__init__(self.BASE_LENGTH, index, begin, length) @property def index(self) -> int: return self.payload[0] @property def begin(self) -> int: return self.payload[1] @property def length(self) -> int: return self.payload[2] class Port(Message): """ <len=0003><msg_id=9><listen_port=H> """ MESSAGE_ID = 9 BASE_LENGTH = 3 PAYLOAD_FMT = 'H' def __init__(self, listen_port): super().__init__(self.BASE_LENGTH, listen_port) @property def listen_port(self) -> int: return self.payload[0] class Handshake(Message): """ <pstr_len=B><pstr=?s><reserved=Q><info_hash=20s><peer_id=20s> """ PAYLOAD_FMT = '!B{0.len}sQ20s20s' def __init__(self, reserved: int, info_hash: bytes, peer_id: bytes, version=1): pstr = _MessageBytes(PROTO_VER_MAP[version]) super().__init__(pstr.len, pstr, reserved, info_hash, peer_id) @classmethod def _from_bytes(cls, buf: bytes) -> 'Handshake': name_len = buf[0] proto_ver = PROTOCOL_VERS.get(name_len) if not proto_ver: raise HandshakeError(f'Unsupported protocol: ' f'{buf[:16]}{"..." if len(buf) > 16 else ""}') return cls(*struct.unpack_from('!Q20s20s', buf, 1 + name_len), proto_ver) def to_bytes(self) -> bytes: return struct.pack(self.PAYLOAD_FMT.format(*self.payload), self.length_prefix, *self.payload) @property def reserved(self): return self.payload[1] @property def info_hash(self): return self.payload[2] @property def peer_id(self): return self.payload[3] class PeerWireProtocol: KEEP_ALIVE_TIMEOUT = 120 KEEP_ALIVE_BIAS = 5 def __init__(self, peer_id: bytes, ip: str, port: int): self.peer_id = peer_id self.peer_addr = ip, port self.sock: Optional[sk.socket] = None def connect(self): print(f'connect to peer {self.peer_addr}') ipa = ipaddress.ip_address(self.peer_addr[0]) family = sk.AF_INET if ipa.version == 4 else sk.AF_INET6 self.sock = sk.socket(family, sk.SOCK_STREAM) self.sock.settimeout(5) try: self.sock.connect(self.peer_addr) except sk.error: self.disconnect() raise def disconnect(self): if self.sock: self.sock.close() self.sock = None def _send_message(self, msg: Message) -> Message: if not self.sock: self.connect() print(f'send {msg.__class__.__name__}') self.sock.send(msg.to_bytes()) try: resp = self.sock.recv(65536) print(f'received {len(resp)} bytes') except sk.error: self.disconnect() raise try: return Message.from_bytes(resp) # TODO: extended by <reserved & (1 << 20)> # resp[<msg_len>:] except WrongMessageError as e: self.disconnect() raise ConnectionAbortedError(e) def handshake(self, info_hash: bytes, client_peer_id: bytes, reserved: int = 0) -> Tuple[bytes, bytes]: resp = self._send_message(Handshake(reserved, info_hash, client_peer_id)) if self.peer_id and resp.peer_id and self.peer_id == resp.peer_id: self.disconnect() raise HandshakeError('Unexpected peer_id') return resp.peer_id, resp.reserved def bitfield(self, bitfield: bytes): return self._send_message(Bitfield(bitfield))
btorrent/transport/peer.py
import enum import io import ipaddress import re import struct import socket as sk from typing import Optional, Tuple class UTP: """ uTorrent Transport Protocol BEP29: https://www.bittorrent.org/beps/bep_0029.html """ def __init__(self) -> None: pass PROTOCOL_NAME_v10 = b'BitTorrent protocol' # BitTorrent v1.0 PROTOCOL_V10 = 1 PROTO_VER_MAP = { PROTOCOL_V10: PROTOCOL_NAME_v10, } PROTOCOL_VERS = { len(PROTOCOL_NAME_v10): PROTOCOL_V10 } class WrongMessageError(ValueError): pass class HandshakeError(ConnectionAbortedError): pass class _MessageBytes(bytes): @property def len(self) -> int: return len(self) class Message: MESSAGE_ID = None BASE_LENGTH = 0 PAYLOAD_OFFSET = 5 PAYLOAD_FMT = '' _RE = re.compile(r'(.*)({\d+\.len})(.*)') _SUB = r'\1{}\3' def __init__(self, length_prefix: int = 0, *payload) -> None: self.length_prefix = length_prefix self.payload = payload def to_bytes(self) -> bytes: fmt = ['!I'] args = [self.length_prefix] if self.MESSAGE_ID is not None: fmt.append('B') args.append(self.MESSAGE_ID) fmt.append(self.PAYLOAD_FMT.format(*self.payload)) args.extend(self.payload) return struct.pack(''.join(fmt), *args) @classmethod def from_buf(cls, buf: io.BytesIO, handshaked=False) -> Optional['Message']: cur_pos = buf.tell() len_prefix = buf.read(4) buf.seek(-len(len_prefix), io.SEEK_CUR) if len(len_prefix) < 4: return try: result = cls.from_bytes(buf.read(), handshaked) except WrongMessageError: buf.seek(cur_pos + 4 + int.from_bytes(len_prefix, 'big'), io.SEEK_SET) raise if isinstance(result, Handshake): total_len = 1 + result.length_prefix + 8 + 20 + 20 else: total_len = 4 + result.length_prefix buf.seek(cur_pos + total_len, io.SEEK_SET) return result @classmethod def from_bytes(cls, buf: bytes, handshaked=False) -> 'Message': if not buf: raise WrongMessageError('No data') msg_ids = { 0: Choke, 1: UnChoke, 2: Interested, 3: NotInterested, 4: Have, 5: Bitfield, 6: Request, 7: Piece, 8: Cancel, 9: Port, } try: length, msg_id = struct.unpack_from('!IB', buf) except struct.error: msg = KeepAlive else: msg = msg_ids.get(msg_id) if msg is None: if handshaked: raise WrongMessageError(f'Unrecognized Message: ' f'{buf[:16]}{"..." if len(buf) > 16 else ""}') msg = Handshake return msg._from_bytes(buf) @classmethod def _from_bytes(cls, buf: bytes) -> 'Message': length = struct.unpack_from('!I', buf)[0] fmt = cls._RE.sub(cls._SUB, cls.PAYLOAD_FMT).format(length - cls.BASE_LENGTH) return cls(*struct.unpack_from(f'!{fmt}', buf, cls.PAYLOAD_OFFSET)) class KeepAlive(Message): """ <len=0000> """ BASE_LENGTH = 0 PAYLOAD_OFFSET = 0 def __init__(self): super().__init__(self.BASE_LENGTH) class Choke(Message): """ <len=0001><msg_id=0> """ MESSAGE_ID = 0 BASE_LENGTH = 1 def __init__(self): super().__init__(self.BASE_LENGTH) class UnChoke(Message): """ <len=0001><msg_id=1> """ MESSAGE_ID = 1 BASE_LENGTH = 1 def __init__(self): super().__init__(self.BASE_LENGTH) class Interested(Message): """ <len=0001><msg_id=2> """ MESSAGE_ID = 2 BASE_LENGTH = 1 def __init__(self): super().__init__(self.BASE_LENGTH) class NotInterested(Message): """ <len=0001><msg_id=3> """ MESSAGE_ID = 3 BASE_LENGTH = 1 def __init__(self): super().__init__(self.BASE_LENGTH) class Have(Message): """ <len=0005><msg_id=4><piece_index=I> """ MESSAGE_ID = 4 BASE_LENGTH = 5 PAYLOAD_FMT = 'I' def __init__(self, piece_index: int): super().__init__(self.BASE_LENGTH, piece_index) @property def piece_index(self) -> int: return self.payload[0] class Bitfield(Message): """ <len=0001 + X><msg_id=5><bitfield=?s> """ MESSAGE_ID = 5 BASE_LENGTH = 1 PAYLOAD_FMT = '{0.len}s' def __init__(self, bitfield: bytes): bitfield = _MessageBytes(bitfield) super().__init__(self.BASE_LENGTH + bitfield.len, bitfield) @property def bitfield(self) -> bytes: return self.payload[0] class Request(Message): """ <len=0013><msg_id=6><index=I><begin=I><length=I> """ MESSAGE_ID = 6 BASE_LENGTH = 13 PAYLOAD_FMT = '3I' def __init__(self, index: int, begin: int, length: int): super().__init__(self.BASE_LENGTH, index, begin, length) @property def index(self) -> int: return self.payload[0] @property def begin(self) -> int: return self.payload[1] @property def length(self) -> int: return self.payload[2] class Piece(Message): """ <len=0009 + X><msg_id=7><index=I><begin=I><block=?s> """ MESSAGE_ID = 7 BASE_LENGTH = 9 PAYLOAD_FMT = 'II{2.len}s' def __init__(self, index: int, begin: int, block: bytes): block = _MessageBytes(block) super().__init__(self.BASE_LENGTH + block.len, index, begin, block) @property def index(self) -> int: return self.payload[0] @property def begin(self) -> int: return self.payload[1] @property def block(self) -> bytes: return self.payload[2] class Cancel(Message): """ <len=0013><msg_id=8><index=I><begin=I><length=I> """ MESSAGE_ID = 8 BASE_LENGTH = 13 PAYLOAD_FMT = '3I' def __init__(self, index: int, begin: int, length: int): super().__init__(self.BASE_LENGTH, index, begin, length) @property def index(self) -> int: return self.payload[0] @property def begin(self) -> int: return self.payload[1] @property def length(self) -> int: return self.payload[2] class Port(Message): """ <len=0003><msg_id=9><listen_port=H> """ MESSAGE_ID = 9 BASE_LENGTH = 3 PAYLOAD_FMT = 'H' def __init__(self, listen_port): super().__init__(self.BASE_LENGTH, listen_port) @property def listen_port(self) -> int: return self.payload[0] class Handshake(Message): """ <pstr_len=B><pstr=?s><reserved=Q><info_hash=20s><peer_id=20s> """ PAYLOAD_FMT = '!B{0.len}sQ20s20s' def __init__(self, reserved: int, info_hash: bytes, peer_id: bytes, version=1): pstr = _MessageBytes(PROTO_VER_MAP[version]) super().__init__(pstr.len, pstr, reserved, info_hash, peer_id) @classmethod def _from_bytes(cls, buf: bytes) -> 'Handshake': name_len = buf[0] proto_ver = PROTOCOL_VERS.get(name_len) if not proto_ver: raise HandshakeError(f'Unsupported protocol: ' f'{buf[:16]}{"..." if len(buf) > 16 else ""}') return cls(*struct.unpack_from('!Q20s20s', buf, 1 + name_len), proto_ver) def to_bytes(self) -> bytes: return struct.pack(self.PAYLOAD_FMT.format(*self.payload), self.length_prefix, *self.payload) @property def reserved(self): return self.payload[1] @property def info_hash(self): return self.payload[2] @property def peer_id(self): return self.payload[3] class PeerWireProtocol: KEEP_ALIVE_TIMEOUT = 120 KEEP_ALIVE_BIAS = 5 def __init__(self, peer_id: bytes, ip: str, port: int): self.peer_id = peer_id self.peer_addr = ip, port self.sock: Optional[sk.socket] = None def connect(self): print(f'connect to peer {self.peer_addr}') ipa = ipaddress.ip_address(self.peer_addr[0]) family = sk.AF_INET if ipa.version == 4 else sk.AF_INET6 self.sock = sk.socket(family, sk.SOCK_STREAM) self.sock.settimeout(5) try: self.sock.connect(self.peer_addr) except sk.error: self.disconnect() raise def disconnect(self): if self.sock: self.sock.close() self.sock = None def _send_message(self, msg: Message) -> Message: if not self.sock: self.connect() print(f'send {msg.__class__.__name__}') self.sock.send(msg.to_bytes()) try: resp = self.sock.recv(65536) print(f'received {len(resp)} bytes') except sk.error: self.disconnect() raise try: return Message.from_bytes(resp) # TODO: extended by <reserved & (1 << 20)> # resp[<msg_len>:] except WrongMessageError as e: self.disconnect() raise ConnectionAbortedError(e) def handshake(self, info_hash: bytes, client_peer_id: bytes, reserved: int = 0) -> Tuple[bytes, bytes]: resp = self._send_message(Handshake(reserved, info_hash, client_peer_id)) if self.peer_id and resp.peer_id and self.peer_id == resp.peer_id: self.disconnect() raise HandshakeError('Unexpected peer_id') return resp.peer_id, resp.reserved def bitfield(self, bitfield: bytes): return self._send_message(Bitfield(bitfield))
0.70791
0.143158
import json import os from datetime import timedelta from gettext import gettext, ngettext from logging.config import dictConfig from pathlib import Path import arrow import click from flask import Flask from werkzeug.middleware.shared_data import SharedDataMiddleware from lemonade_soapbox import csrf, db, login_manager, migrate from lemonade_soapbox.logging_config import logging_config from lemonade_soapbox.helpers import JSONEncoder, truncate_html, weight from lemonade_soapbox.models import ( Article, List, ListItem, Review, Searchable, Tag, User, ) def create_app(config_name=None): """Factory for the application.""" config_name = config_name or os.getenv("FLASK_ENV", "production") # Configure logging before creating the app logging_path = Path("instance", config_name, "logs") logging_path.mkdir(exist_ok=True) for k in logging_config.get("handlers", {}): h = logging_config["handlers"][k] if "filename" in h: h["filename"] = logging_path / h["filename"] dictConfig(logging_config) app = Flask( "lemonade_soapbox", static_folder="assets", static_host=os.getenv("MAIN_HOST"), host_matching=True, ) # Load instance-specific config, create search index dir app.instance_path = Path(app.instance_path, config_name) app.config["INDEX_PATH"] = app.instance_path / "index" app.config["INDEX_PATH"].mkdir(exist_ok=True) app.config.from_file(app.instance_path / "config.json", json.load) # Nginx handles proxying the media dir in production # This emulates it when developing with Flask's built-in server if app.config["ENV"] == "development" or app.testing: app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 0 app.wsgi_app = SharedDataMiddleware( app.wsgi_app, {"/media": str(app.instance_path / "media")} ) csrf.init_app(app) db.init_app(app) login_manager.init_app(app) login_manager.login_view = "admin.signin" login_manager.login_message_category = "error" migrate.init_app(app, db) app.shell_context_processor( lambda: { 'db': db, 'Article': Article, 'List': List, 'ListItem': ListItem, 'Review': Review, 'Tag': Tag, 'User': User, } ) @app.cli.command() @click.argument("model") @click.option("--per-pass", default=100) def reindex(model, per_pass): """Rebuild the search index for a given model.""" model = globals().get(model) if not model: click.echo("Invalid model name.") elif not issubclass(model, Searchable): click.echo("Model is not Searchable.") else: model.build_index(per_pass=per_pass) click.echo("Indexing complete.") from lemonade_soapbox.views import admin, api, blog, frontend, lists, reviews frontend.bp.register_blueprint( lists.bp, host=os.getenv('MAIN_HOST'), url_prefix='/lists' ) reviews.bp.register_blueprint( lists.bp, host=os.getenv('REVIEW_HOST'), url_prefix='/lists' ) # Register admin and API blueprints on both domains so we can log in to both app.register_blueprint(admin.bp, host=os.getenv('MAIN_HOST'), url_prefix='/meta') app.register_blueprint(api.bp, host=os.getenv('MAIN_HOST'), url_prefix='/api') app.register_blueprint(blog.bp, host=os.getenv('MAIN_HOST'), url_prefix='/blog') app.register_blueprint(frontend.bp, host=os.getenv('MAIN_HOST'), url_prefix='/') app.register_blueprint(reviews.bp, host=os.getenv('REVIEW_HOST'), url_prefix='/') # Configure Jinja env app.jinja_env.add_extension('jinja2.ext.i18n') app.jinja_env.filters.update({'truncate_html': truncate_html, 'weight': weight}) app.jinja_env.globals.update({'arrow': arrow, 'timedelta': timedelta}) app.jinja_env.install_gettext_callables(gettext, ngettext, newstyle=True) app.jinja_env.lstrip_blocks = True app.jinja_env.trim_blocks = True # Override Flask JSON encoder with our own app.json_encoder = JSONEncoder return app
lemonade_soapbox/create_app.py
import json import os from datetime import timedelta from gettext import gettext, ngettext from logging.config import dictConfig from pathlib import Path import arrow import click from flask import Flask from werkzeug.middleware.shared_data import SharedDataMiddleware from lemonade_soapbox import csrf, db, login_manager, migrate from lemonade_soapbox.logging_config import logging_config from lemonade_soapbox.helpers import JSONEncoder, truncate_html, weight from lemonade_soapbox.models import ( Article, List, ListItem, Review, Searchable, Tag, User, ) def create_app(config_name=None): """Factory for the application.""" config_name = config_name or os.getenv("FLASK_ENV", "production") # Configure logging before creating the app logging_path = Path("instance", config_name, "logs") logging_path.mkdir(exist_ok=True) for k in logging_config.get("handlers", {}): h = logging_config["handlers"][k] if "filename" in h: h["filename"] = logging_path / h["filename"] dictConfig(logging_config) app = Flask( "lemonade_soapbox", static_folder="assets", static_host=os.getenv("MAIN_HOST"), host_matching=True, ) # Load instance-specific config, create search index dir app.instance_path = Path(app.instance_path, config_name) app.config["INDEX_PATH"] = app.instance_path / "index" app.config["INDEX_PATH"].mkdir(exist_ok=True) app.config.from_file(app.instance_path / "config.json", json.load) # Nginx handles proxying the media dir in production # This emulates it when developing with Flask's built-in server if app.config["ENV"] == "development" or app.testing: app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 0 app.wsgi_app = SharedDataMiddleware( app.wsgi_app, {"/media": str(app.instance_path / "media")} ) csrf.init_app(app) db.init_app(app) login_manager.init_app(app) login_manager.login_view = "admin.signin" login_manager.login_message_category = "error" migrate.init_app(app, db) app.shell_context_processor( lambda: { 'db': db, 'Article': Article, 'List': List, 'ListItem': ListItem, 'Review': Review, 'Tag': Tag, 'User': User, } ) @app.cli.command() @click.argument("model") @click.option("--per-pass", default=100) def reindex(model, per_pass): """Rebuild the search index for a given model.""" model = globals().get(model) if not model: click.echo("Invalid model name.") elif not issubclass(model, Searchable): click.echo("Model is not Searchable.") else: model.build_index(per_pass=per_pass) click.echo("Indexing complete.") from lemonade_soapbox.views import admin, api, blog, frontend, lists, reviews frontend.bp.register_blueprint( lists.bp, host=os.getenv('MAIN_HOST'), url_prefix='/lists' ) reviews.bp.register_blueprint( lists.bp, host=os.getenv('REVIEW_HOST'), url_prefix='/lists' ) # Register admin and API blueprints on both domains so we can log in to both app.register_blueprint(admin.bp, host=os.getenv('MAIN_HOST'), url_prefix='/meta') app.register_blueprint(api.bp, host=os.getenv('MAIN_HOST'), url_prefix='/api') app.register_blueprint(blog.bp, host=os.getenv('MAIN_HOST'), url_prefix='/blog') app.register_blueprint(frontend.bp, host=os.getenv('MAIN_HOST'), url_prefix='/') app.register_blueprint(reviews.bp, host=os.getenv('REVIEW_HOST'), url_prefix='/') # Configure Jinja env app.jinja_env.add_extension('jinja2.ext.i18n') app.jinja_env.filters.update({'truncate_html': truncate_html, 'weight': weight}) app.jinja_env.globals.update({'arrow': arrow, 'timedelta': timedelta}) app.jinja_env.install_gettext_callables(gettext, ngettext, newstyle=True) app.jinja_env.lstrip_blocks = True app.jinja_env.trim_blocks = True # Override Flask JSON encoder with our own app.json_encoder = JSONEncoder return app
0.497315
0.043063
import logging from django.conf import settings from api.ingest.source import Source from api.rdf.namespace import RDFLIB_FORMAT_DIC, find_valueset from api.rdf.association_model import * from api.rdf.rdf_source import RDFSource import api.lookup.lookup_elasticsearch as lookup_es import pandas as pd logger = logging.getLogger(__name__) HPO_PIPELINE_BASE_URL = 'http://purl.obolibrary.org/obo/hp/hpoa/' class HPODiseasePhenoDS(RDFSource): def __init__(self, target_dir): super().__init__('hpo_disease_phenotypes', target_dir) self.url = f'{HPO_PIPELINE_BASE_URL}phenotype.hpoa' self.df = None self.rdf_filename = "hpo_diseasephenotype" self.pheno_disease_dict = {} def fetch(self): logger.info("Started reading dataset: %s", self.name) self.df = pd.read_csv(self.url, sep='\t', skiprows=4) logger.info("Finished reading dataset: assoications=%d", self.df.size) def map(self): self.df['HPO_ID'] = self.df['HPO_ID'].replace(regex=[':'], value='_') self.df['#DatabaseID'] = self.df['#DatabaseID'].replace(regex=['DECIPHER:'], value=DECIPHER.uri) self.df['#DatabaseID'] = self.df['#DatabaseID'].replace(regex=['OMIM:'], value=OMIM.uri) self.df['#DatabaseID'] = self.df['#DatabaseID'].replace(regex=['ORPHA:'], value=ORPHA.uri) self.df.Reference = self.df.Reference.replace(regex=['DECIPHER:'], value=DECIPHER.uri) self.df.Reference = self.df.Reference.replace(regex=['OMIM:'], value=OMIM.uri) self.df.Reference = self.df.Reference.replace(regex=['ORPHA:'], value=ORPHA.uri) self.df.Reference = self.df.Reference.replace(regex=['PMID:'], value=PMID.uri) self.df.Reference = self.df.Reference.replace(regex=['ISBN-13:'], value=ISBN.uri) self.df.Reference = self.df.Reference.replace(regex=['ISBN-10:'], value=ISBN.uri) self.df.Reference = self.df.Reference.astype(str).replace(regex=['nan'], value='') self.df.Biocuration = self.df.Biocuration.astype(str) logger.info('head: %s', self.df.head()) for index, row in self.df.iterrows(): self.map_association(row) self.resolve_display() logger.info("Finished mapping data: assoications=%d", self.df.size) def write(self): logger.info("Writing rdf fo dataset%s", self.name) self.store.serialize(f'{self.target_dir}/{self.rdf_filename}.{self.rdf_ext}', format=settings.EXPORT_FORMAT, max_depth=3) self.store.remove((None, None, None)) del self.df logger.info("Finished rdf writting for %s with size:%d", self.name, len(self.store)) def map_association(self, row): phenotype = self.store.resource(str(OBO.uri) + row['HPO_ID'].strip()) phenotype.add(RDF.type, PHENO.Phenotype) diseaseRes = self.store.resource(row['#DatabaseID'].strip()) diseaseRes.add(RDF.type, PHENO.Disease) dict_key = row['HPO_ID'] + ":" + row['#DatabaseID'] association = None if dict_key not in self.pheno_disease_dict: association = create_phenotypic_association(self.store, diseaseRes, phenotype) evidence = None if 'IEA' in row['Evidence']: evidence = OBO.ECO_0000501 elif 'PCS' in row['Evidence']: evidence = OBO.ECO_0006016 elif 'ICS' in row['Evidence']: evidence = OBO.ECO_0006018 elif 'TAS' in row['Evidence']: evidence = OBO.ECO_0000033 association.add(OBO.RO_0002558, evidence) self.pheno_disease_dict[dict_key] = association else: association = self.pheno_disease_dict[dict_key] row.Biocuration = row.Biocuration.split(';') creator = [] created_on = None for creator_field in row.Biocuration: creator = (creator_field if creator_field.find('[') == -1 else creator_field[:creator_field.find('[')]) created_on = (creator_field[creator_field.find('[') + 1: len(creator_field) - 1] if creator_field.find('[') > -1 else None) sources = ['https://pubmed.ncbi.nlm.nih.gov/30476213'] for ref in row.Reference.split(";"): if OMIM.uri in ref or DECIPHER.uri in ref or ORPHA.uri in ref: continue sources.append(ref) add_association_provenance(self.store, association, creator=creator, created_on=created_on, source=sources) self.add_association(association) def resolve_display(self): diseases = list(set(self.store.subjects(RDF.type, PHENO.Disease))) diseases_iris = list(map(lambda i:str(i), diseases)) mim_disease=list(set(filter(lambda x: find_valueset(x) == 'OMIM', diseases_iris))) mim_indices = lookup_es.find_entity_by_iris(mim_disease, 'OMIM') logger.info("Resolving iris; total:%d|found:%d", len(mim_disease), len(mim_indices)) self.add_label(mim_disease, mim_indices) decipher_disease=list(set(filter(lambda x: find_valueset(x) == 'DECIPHER', diseases_iris))) decipher_indices = lookup_es.find_entity_by_iris(decipher_disease, 'DECIPHER') logger.info("Resolving iris; total:%d|found:%d", len(decipher_disease), len(decipher_indices)) self.add_label(decipher_disease, decipher_indices) ordo_disease=list(set(filter(lambda x: find_valueset(x) == 'ordo', diseases_iris))) ordo_indices = lookup_es.find_entity_by_iris(ordo_disease, 'ordo') logger.info("Resolving iris; total:%d|found:%d", len(ordo_disease), len(ordo_indices)) self.add_label(ordo_disease, ordo_indices) self.add_phenotype_label() self.add_evidence_label()
api/transformers/hpo_disease_pheno_ds.py
import logging from django.conf import settings from api.ingest.source import Source from api.rdf.namespace import RDFLIB_FORMAT_DIC, find_valueset from api.rdf.association_model import * from api.rdf.rdf_source import RDFSource import api.lookup.lookup_elasticsearch as lookup_es import pandas as pd logger = logging.getLogger(__name__) HPO_PIPELINE_BASE_URL = 'http://purl.obolibrary.org/obo/hp/hpoa/' class HPODiseasePhenoDS(RDFSource): def __init__(self, target_dir): super().__init__('hpo_disease_phenotypes', target_dir) self.url = f'{HPO_PIPELINE_BASE_URL}phenotype.hpoa' self.df = None self.rdf_filename = "hpo_diseasephenotype" self.pheno_disease_dict = {} def fetch(self): logger.info("Started reading dataset: %s", self.name) self.df = pd.read_csv(self.url, sep='\t', skiprows=4) logger.info("Finished reading dataset: assoications=%d", self.df.size) def map(self): self.df['HPO_ID'] = self.df['HPO_ID'].replace(regex=[':'], value='_') self.df['#DatabaseID'] = self.df['#DatabaseID'].replace(regex=['DECIPHER:'], value=DECIPHER.uri) self.df['#DatabaseID'] = self.df['#DatabaseID'].replace(regex=['OMIM:'], value=OMIM.uri) self.df['#DatabaseID'] = self.df['#DatabaseID'].replace(regex=['ORPHA:'], value=ORPHA.uri) self.df.Reference = self.df.Reference.replace(regex=['DECIPHER:'], value=DECIPHER.uri) self.df.Reference = self.df.Reference.replace(regex=['OMIM:'], value=OMIM.uri) self.df.Reference = self.df.Reference.replace(regex=['ORPHA:'], value=ORPHA.uri) self.df.Reference = self.df.Reference.replace(regex=['PMID:'], value=PMID.uri) self.df.Reference = self.df.Reference.replace(regex=['ISBN-13:'], value=ISBN.uri) self.df.Reference = self.df.Reference.replace(regex=['ISBN-10:'], value=ISBN.uri) self.df.Reference = self.df.Reference.astype(str).replace(regex=['nan'], value='') self.df.Biocuration = self.df.Biocuration.astype(str) logger.info('head: %s', self.df.head()) for index, row in self.df.iterrows(): self.map_association(row) self.resolve_display() logger.info("Finished mapping data: assoications=%d", self.df.size) def write(self): logger.info("Writing rdf fo dataset%s", self.name) self.store.serialize(f'{self.target_dir}/{self.rdf_filename}.{self.rdf_ext}', format=settings.EXPORT_FORMAT, max_depth=3) self.store.remove((None, None, None)) del self.df logger.info("Finished rdf writting for %s with size:%d", self.name, len(self.store)) def map_association(self, row): phenotype = self.store.resource(str(OBO.uri) + row['HPO_ID'].strip()) phenotype.add(RDF.type, PHENO.Phenotype) diseaseRes = self.store.resource(row['#DatabaseID'].strip()) diseaseRes.add(RDF.type, PHENO.Disease) dict_key = row['HPO_ID'] + ":" + row['#DatabaseID'] association = None if dict_key not in self.pheno_disease_dict: association = create_phenotypic_association(self.store, diseaseRes, phenotype) evidence = None if 'IEA' in row['Evidence']: evidence = OBO.ECO_0000501 elif 'PCS' in row['Evidence']: evidence = OBO.ECO_0006016 elif 'ICS' in row['Evidence']: evidence = OBO.ECO_0006018 elif 'TAS' in row['Evidence']: evidence = OBO.ECO_0000033 association.add(OBO.RO_0002558, evidence) self.pheno_disease_dict[dict_key] = association else: association = self.pheno_disease_dict[dict_key] row.Biocuration = row.Biocuration.split(';') creator = [] created_on = None for creator_field in row.Biocuration: creator = (creator_field if creator_field.find('[') == -1 else creator_field[:creator_field.find('[')]) created_on = (creator_field[creator_field.find('[') + 1: len(creator_field) - 1] if creator_field.find('[') > -1 else None) sources = ['https://pubmed.ncbi.nlm.nih.gov/30476213'] for ref in row.Reference.split(";"): if OMIM.uri in ref or DECIPHER.uri in ref or ORPHA.uri in ref: continue sources.append(ref) add_association_provenance(self.store, association, creator=creator, created_on=created_on, source=sources) self.add_association(association) def resolve_display(self): diseases = list(set(self.store.subjects(RDF.type, PHENO.Disease))) diseases_iris = list(map(lambda i:str(i), diseases)) mim_disease=list(set(filter(lambda x: find_valueset(x) == 'OMIM', diseases_iris))) mim_indices = lookup_es.find_entity_by_iris(mim_disease, 'OMIM') logger.info("Resolving iris; total:%d|found:%d", len(mim_disease), len(mim_indices)) self.add_label(mim_disease, mim_indices) decipher_disease=list(set(filter(lambda x: find_valueset(x) == 'DECIPHER', diseases_iris))) decipher_indices = lookup_es.find_entity_by_iris(decipher_disease, 'DECIPHER') logger.info("Resolving iris; total:%d|found:%d", len(decipher_disease), len(decipher_indices)) self.add_label(decipher_disease, decipher_indices) ordo_disease=list(set(filter(lambda x: find_valueset(x) == 'ordo', diseases_iris))) ordo_indices = lookup_es.find_entity_by_iris(ordo_disease, 'ordo') logger.info("Resolving iris; total:%d|found:%d", len(ordo_disease), len(ordo_indices)) self.add_label(ordo_disease, ordo_indices) self.add_phenotype_label() self.add_evidence_label()
0.369543
0.183265
from modules import\ constants as c,\ data_controller as dc,\ data_format as df,\ data_handler as dh,\ modal_window as mw,\ swapi # API key names. class _Key: HEADER = 'api_wars' INJECTION_CODE = 'injection code' MODAL_WINDOW = 'modal window' REQUEST = 'request' SUBJECT = 'subject' SWAPI = 'swapi' KEY = _Key() # -------------------------------------------------- api controller --------------------------------------------------- def data_get(request_data: dict) -> dict: """ Prepares data at the client's request. """ if not request_data: return {'valueError': 'None request data.'} if KEY.HEADER not in request_data: return {'valueError': 'Wrong request data.'} subject = _subject_get_proper(request_data[KEY.HEADER][KEY.MODAL_WINDOW][KEY.SUBJECT]) swapi_response = _data_get(request_data[KEY.HEADER][KEY.SWAPI][KEY.REQUEST]) data_prepared = _data_prepare(subject, swapi_response) column_names_prepared = _column_names_prepare(data_prepared[0]) # only the first record return { KEY.HEADER: { KEY.MODAL_WINDOW: { KEY.INJECTION_CODE: mw.html_table_prepare(data_prepared, column_names_prepared) } } } def _data_prepare(subject: str, row_data: list) -> tuple: """ Prepares the data for create html code. """ headers_list = dc.column_names_get_necessary(subject, modal_window=True) prepared_data = dh.data_prepare(row_data, headers_list) prepared_data = df.data_format(subject, prepared_data) return tuple(prepared_data) def _subject_get_proper(subject: str) -> str: """ Returns proper subject. """ if subject in c.COLUMN_NAMES_WITH_PLANETS: subject = c.SUBJECT.PLANETS elif subject in c.COLUMN_NAMES_WITH_PEOPLE: subject = c.SUBJECT.PEOPLE return subject # --------------------------------------------------- api handlers ---------------------------------------------------- def _data_get(request_data: list) -> list: """ Prepares data at the client's request. """ response_data = [] for url in request_data: response_data.append(swapi.get_data(url, full_url=True)) return response_data def _column_names_prepare(data_record: dict) -> tuple: """ Prepares column names. """ column_names = [] for key in data_record: column_names.append(key) return tuple(dh.column_names_prepare(column_names))
modules/api.py
from modules import\ constants as c,\ data_controller as dc,\ data_format as df,\ data_handler as dh,\ modal_window as mw,\ swapi # API key names. class _Key: HEADER = 'api_wars' INJECTION_CODE = 'injection code' MODAL_WINDOW = 'modal window' REQUEST = 'request' SUBJECT = 'subject' SWAPI = 'swapi' KEY = _Key() # -------------------------------------------------- api controller --------------------------------------------------- def data_get(request_data: dict) -> dict: """ Prepares data at the client's request. """ if not request_data: return {'valueError': 'None request data.'} if KEY.HEADER not in request_data: return {'valueError': 'Wrong request data.'} subject = _subject_get_proper(request_data[KEY.HEADER][KEY.MODAL_WINDOW][KEY.SUBJECT]) swapi_response = _data_get(request_data[KEY.HEADER][KEY.SWAPI][KEY.REQUEST]) data_prepared = _data_prepare(subject, swapi_response) column_names_prepared = _column_names_prepare(data_prepared[0]) # only the first record return { KEY.HEADER: { KEY.MODAL_WINDOW: { KEY.INJECTION_CODE: mw.html_table_prepare(data_prepared, column_names_prepared) } } } def _data_prepare(subject: str, row_data: list) -> tuple: """ Prepares the data for create html code. """ headers_list = dc.column_names_get_necessary(subject, modal_window=True) prepared_data = dh.data_prepare(row_data, headers_list) prepared_data = df.data_format(subject, prepared_data) return tuple(prepared_data) def _subject_get_proper(subject: str) -> str: """ Returns proper subject. """ if subject in c.COLUMN_NAMES_WITH_PLANETS: subject = c.SUBJECT.PLANETS elif subject in c.COLUMN_NAMES_WITH_PEOPLE: subject = c.SUBJECT.PEOPLE return subject # --------------------------------------------------- api handlers ---------------------------------------------------- def _data_get(request_data: list) -> list: """ Prepares data at the client's request. """ response_data = [] for url in request_data: response_data.append(swapi.get_data(url, full_url=True)) return response_data def _column_names_prepare(data_record: dict) -> tuple: """ Prepares column names. """ column_names = [] for key in data_record: column_names.append(key) return tuple(dh.column_names_prepare(column_names))
0.62498
0.188473
# This library supports functionality needed for global transcription. # The global transcription method works by considering transcribed words that # co-occur from different sources, near in time (less than 0.1 seconds) # within a conversation, and only keeps the one with the max confidence score, # thus identifiying the speaker for that word. # This library supports: # * Automatic generation of subtitles # * Finding consecutive values in a list # * Identifying duplicate words in a pd.DataFrame within a time window threshold # * Identify duplicates to remove in a pd.DataFrame, unveiling the speaker from __future__ import ( print_function, absolute_import, division, unicode_literals, with_statement, ) # Py2 compatibility import os import pickle import pandas as pd from datetime import datetime, timedelta from itertools import groupby import numpy as np # In[2]: def async_srt_format_timestamp(seconds): seconds = float(seconds) stamp = str(timedelta(seconds=seconds)) first, second, third = stamp.split(":") third = "{:.3f}".format(float(third)) sec, milisec = third.split(".") third = ",".join([sec.zfill(2), milisec]) return ":".join([first.zfill(2), second.zfill(2), third]) def write_subtitles( words, start_times, end_times, speakers=None, max_words=15, wfn=None, max_sub_duration=6, max_words_per_line=5, ): if wfn is not None: with open(wfn, 'w') as f: f.write('') wf = open(wfn, 'a') else: wf = None cnt = 0 i = 0 while i < len(words) - 1: cnt += 1 # Advance i as long as not too much time passes for j in range(i, i + min(max_words, len(words) - i)): if start_times[j] - start_times[i] >= max_sub_duration: break start_time = start_times[i] end_time = end_times[:j][-1] w = words[i:j] if speakers is None: word_str = " ".join(w[:len(w) // 2]) + "\n" + " ".join( w[len(w) // 2:]) else: s = speakers[i:j] who, word_idx = compute_running_consecutive_idx(s) word_str = "\n".join([("Speaker " + str(who[z]) if who[z] > 1 else "Curtis") + ": " + "\n\t".join( [" ".join(w[word_idx[z]:word_idx[z + 1]][ i:min(i + max_words_per_line, j)]) for i in range(0, word_idx[z + 1] - word_idx[z], max_words_per_line)]) for z in range(len(who))]) print(cnt, file=wf) print(async_srt_format_timestamp(start_time), "-->", async_srt_format_timestamp(end_time), file=wf) print(word_str, file=wf) print(file=wf) # Increment start (i) to end (j) of current subtitle i = j # In[3]: def compute_running_consecutive_idx(lst): '''Returns two lists, the first is a list of the consecutive values, and the second list is their associated starting indices. Feel free to zip the two lists together as a single list of tuples if you desire. ''' consec = [(k, sum(1 for i in g)) for k, g in groupby(lst)] val, idx = list(zip(*consec)) idx = np.cumsum([0] + list(idx)) return list(val), list(idx) def compute_duplicates_mask(df, threshold=0.1): '''This function returns a list of True/False boolean values, true whenever a pair of identical words occurs within the threshold (in seconds) of eachother, by different speakers. This is a helper function for find_which_duplicates_to_remove() You may find it helpful to read that docstring as well.''' combined_filter = None for offset in [0, -1]: # Create a filter to determine if two adjacent words started within # threshold seconds. intertimes = np.ediff1d(df['startTime']) close_start = ( abs(np.insert(intertimes, offset, 1)) <= threshold + 1e-6) # Create a filter to determine if two adjacent words ended within 0.1 # seconds. intertimes = np.ediff1d(df['endTime']) close_end = (abs(np.insert(intertimes, offset, 1)) <= threshold + 1e-6) # Combine filters near_filter = close_start | close_end # Create a filter that checks if the speaker is different intertimes = np.ediff1d(df['speaker']) diff_speaker = (np.insert(intertimes, offset, 1) != 0) # Create a filter that checks if the word is the same intertimes = np.ediff1d(df['word'].apply(lambda x: hash(x))) same_word = (np.insert(intertimes, offset, 1) == 0) # Combine filters same_word_diff_speaker = same_word & diff_speaker both = near_filter & same_word_diff_speaker combined_filter = \ both if combined_filter is None else combined_filter | both return combined_filter def find_which_duplicates_to_remove(df, threshold_seconds=0.1): '''This identifies when the same word is picked up and transcribed by multiple speaker's microphones, even though it was only spoken once, by a single speaker. The additional duplicate words are removed from the pandas.DataFrame containing the transcriptions. The duplicate with the highest confidence is the word we keep. It will not remove duplicates spoken by the same speaker - for example "Okay, okay let's go" or "trains go choo choo" -- those examples will be kept in the transcriptions because the duplicated words "okay" and "choo" belong to the same speaker. An issue can occur if two speakers both legitimately say, for example, "Okay" within the time threshold (default is 0.1 seconds) of eachother. In this rare case, a legitimate spoken word may be removed. Returns a list of True/False boolean values with true whenever a word should be removed. ''' if "duplicates" not in df.columns: df["duplicates"] = compute_duplicates_mask(df, threshold=threshold_seconds) remove_mask = [] prev_word = "" c_lst = [] for c, d, w in [tuple(z) for z in df[["confidence", "duplicates", "word"]].itertuples( index=False)]: same_word_as_prev = prev_word == w if d: if same_word_as_prev: # Same word group c_lst.append(c) else: remove_mask = remove_mask + [z != max(c_lst) for z in c_lst] c_lst = [c] else: remove_mask = remove_mask + [z != max(c_lst) for z in c_lst] + [ False] c_lst = [] prev_word = w # Added the last group which gets missed since we don't add until we # detect a new group, and there's no new group after the last group. remove_mask = remove_mask + [z != max(c_lst) for z in c_lst] return remove_mask
egocom/transcription.py
# This library supports functionality needed for global transcription. # The global transcription method works by considering transcribed words that # co-occur from different sources, near in time (less than 0.1 seconds) # within a conversation, and only keeps the one with the max confidence score, # thus identifiying the speaker for that word. # This library supports: # * Automatic generation of subtitles # * Finding consecutive values in a list # * Identifying duplicate words in a pd.DataFrame within a time window threshold # * Identify duplicates to remove in a pd.DataFrame, unveiling the speaker from __future__ import ( print_function, absolute_import, division, unicode_literals, with_statement, ) # Py2 compatibility import os import pickle import pandas as pd from datetime import datetime, timedelta from itertools import groupby import numpy as np # In[2]: def async_srt_format_timestamp(seconds): seconds = float(seconds) stamp = str(timedelta(seconds=seconds)) first, second, third = stamp.split(":") third = "{:.3f}".format(float(third)) sec, milisec = third.split(".") third = ",".join([sec.zfill(2), milisec]) return ":".join([first.zfill(2), second.zfill(2), third]) def write_subtitles( words, start_times, end_times, speakers=None, max_words=15, wfn=None, max_sub_duration=6, max_words_per_line=5, ): if wfn is not None: with open(wfn, 'w') as f: f.write('') wf = open(wfn, 'a') else: wf = None cnt = 0 i = 0 while i < len(words) - 1: cnt += 1 # Advance i as long as not too much time passes for j in range(i, i + min(max_words, len(words) - i)): if start_times[j] - start_times[i] >= max_sub_duration: break start_time = start_times[i] end_time = end_times[:j][-1] w = words[i:j] if speakers is None: word_str = " ".join(w[:len(w) // 2]) + "\n" + " ".join( w[len(w) // 2:]) else: s = speakers[i:j] who, word_idx = compute_running_consecutive_idx(s) word_str = "\n".join([("Speaker " + str(who[z]) if who[z] > 1 else "Curtis") + ": " + "\n\t".join( [" ".join(w[word_idx[z]:word_idx[z + 1]][ i:min(i + max_words_per_line, j)]) for i in range(0, word_idx[z + 1] - word_idx[z], max_words_per_line)]) for z in range(len(who))]) print(cnt, file=wf) print(async_srt_format_timestamp(start_time), "-->", async_srt_format_timestamp(end_time), file=wf) print(word_str, file=wf) print(file=wf) # Increment start (i) to end (j) of current subtitle i = j # In[3]: def compute_running_consecutive_idx(lst): '''Returns two lists, the first is a list of the consecutive values, and the second list is their associated starting indices. Feel free to zip the two lists together as a single list of tuples if you desire. ''' consec = [(k, sum(1 for i in g)) for k, g in groupby(lst)] val, idx = list(zip(*consec)) idx = np.cumsum([0] + list(idx)) return list(val), list(idx) def compute_duplicates_mask(df, threshold=0.1): '''This function returns a list of True/False boolean values, true whenever a pair of identical words occurs within the threshold (in seconds) of eachother, by different speakers. This is a helper function for find_which_duplicates_to_remove() You may find it helpful to read that docstring as well.''' combined_filter = None for offset in [0, -1]: # Create a filter to determine if two adjacent words started within # threshold seconds. intertimes = np.ediff1d(df['startTime']) close_start = ( abs(np.insert(intertimes, offset, 1)) <= threshold + 1e-6) # Create a filter to determine if two adjacent words ended within 0.1 # seconds. intertimes = np.ediff1d(df['endTime']) close_end = (abs(np.insert(intertimes, offset, 1)) <= threshold + 1e-6) # Combine filters near_filter = close_start | close_end # Create a filter that checks if the speaker is different intertimes = np.ediff1d(df['speaker']) diff_speaker = (np.insert(intertimes, offset, 1) != 0) # Create a filter that checks if the word is the same intertimes = np.ediff1d(df['word'].apply(lambda x: hash(x))) same_word = (np.insert(intertimes, offset, 1) == 0) # Combine filters same_word_diff_speaker = same_word & diff_speaker both = near_filter & same_word_diff_speaker combined_filter = \ both if combined_filter is None else combined_filter | both return combined_filter def find_which_duplicates_to_remove(df, threshold_seconds=0.1): '''This identifies when the same word is picked up and transcribed by multiple speaker's microphones, even though it was only spoken once, by a single speaker. The additional duplicate words are removed from the pandas.DataFrame containing the transcriptions. The duplicate with the highest confidence is the word we keep. It will not remove duplicates spoken by the same speaker - for example "Okay, okay let's go" or "trains go choo choo" -- those examples will be kept in the transcriptions because the duplicated words "okay" and "choo" belong to the same speaker. An issue can occur if two speakers both legitimately say, for example, "Okay" within the time threshold (default is 0.1 seconds) of eachother. In this rare case, a legitimate spoken word may be removed. Returns a list of True/False boolean values with true whenever a word should be removed. ''' if "duplicates" not in df.columns: df["duplicates"] = compute_duplicates_mask(df, threshold=threshold_seconds) remove_mask = [] prev_word = "" c_lst = [] for c, d, w in [tuple(z) for z in df[["confidence", "duplicates", "word"]].itertuples( index=False)]: same_word_as_prev = prev_word == w if d: if same_word_as_prev: # Same word group c_lst.append(c) else: remove_mask = remove_mask + [z != max(c_lst) for z in c_lst] c_lst = [c] else: remove_mask = remove_mask + [z != max(c_lst) for z in c_lst] + [ False] c_lst = [] prev_word = w # Added the last group which gets missed since we don't add until we # detect a new group, and there's no new group after the last group. remove_mask = remove_mask + [z != max(c_lst) for z in c_lst] return remove_mask
0.687735
0.376996
from abc import ABC, abstractmethod from copy import deepcopy, copy from typing import Tuple, Optional, Callable, Sequence import numpy as np from scipy.interpolate import interpn from scipy.stats import multivariate_normal, beta from pararealml.core.constrained_problem import ConstrainedProblem from pararealml.core.constraint import apply_constraints_along_last_axis from pararealml.core.mesh import to_cartesian_coordinates VectorizedInitialConditionFunction = \ Callable[[Optional[np.ndarray]], np.ndarray] class InitialCondition(ABC): """ A base class for initial conditions. """ @abstractmethod def y_0(self, x: Optional[np.ndarray]) -> np.ndarray: """ Returns the initial value of y at the points in the spatial domain defined by x. :param x: a 2D array (n, x_dimension) of the spatial coordinates for PDEs and None for ODEs :return: a 2D array (n, y_dimension) of the initial value of y at the coordinates for PDEs and a 1D array (y_dimension) for ODEs """ @abstractmethod def discrete_y_0( self, vertex_oriented: Optional[bool] = None) -> np.ndarray: """ Returns the discretized initial values of y evaluated at the vertices or cell centers of the spatial mesh. :param vertex_oriented: whether the initial conditions are to be evaluated at the vertices or cell centers of the spatial mesh :return: the discretized initial values """ class DiscreteInitialCondition(InitialCondition): """ An initial condition defined by a fixed array of values. """ def __init__( self, cp: ConstrainedProblem, y_0: np.ndarray, vertex_oriented: Optional[bool] = None, interpolation_method: str = 'linear'): """ :param cp: the constrained problem to turn into an initial value problem by providing the initial conditions for it :param y_0: the array containing the initial values of y over a spatial mesh (which may be 0 dimensional in case of an ODE) :param vertex_oriented: whether the initial conditions are evaluated at the vertices or cell centers of the spatial mesh; it the constrained problem is an ODE, it can be None :param interpolation_method: the interpolation method to use to calculate values that do not exactly fall on points of the y_0 grid; if the constrained problem is based on an ODE, it can be None """ if cp.differential_equation.x_dimension and vertex_oriented is None: raise ValueError('vertex orientation must be defined for PDEs') if y_0.shape != cp.y_shape(vertex_oriented): raise ValueError( f'discrete initial value shape {y_0.shape} must match ' 'constrained problem solution shape ' f'{cp.y_shape(vertex_oriented)}') self._cp = cp self._y_0 = np.copy(y_0) self._vertex_oriented = vertex_oriented self._interpolation_method = interpolation_method if vertex_oriented: apply_constraints_along_last_axis( cp.static_y_vertex_constraints, self._y_0) def y_0(self, x: Optional[np.ndarray]) -> np.ndarray: if not self._cp.differential_equation.x_dimension: return np.copy(self._y_0) return interpn( self._cp.mesh.axis_coordinates(self._vertex_oriented), self._y_0, x, method=self._interpolation_method, bounds_error=False, fill_value=None) def discrete_y_0( self, vertex_oriented: Optional[bool] = None) -> np.ndarray: if vertex_oriented is None: vertex_oriented = self._vertex_oriented if not self._cp.differential_equation.x_dimension \ or vertex_oriented == self._vertex_oriented: return np.copy(self._y_0) y_0 = self.y_0(self._cp.mesh.all_index_coordinates(vertex_oriented)) if vertex_oriented: apply_constraints_along_last_axis( self._cp.static_y_vertex_constraints, y_0) return y_0 class ContinuousInitialCondition(InitialCondition): """ An initial condition defined by a function. """ def __init__( self, cp: ConstrainedProblem, y_0_func: VectorizedInitialConditionFunction): """ :param cp: the constrained problem to turn into an initial value problem by providing the initial conditions for it :param y_0_func: the initial value function that returns an array containing the values of y at the spatial coordinates defined by its input """ self._cp = cp self._y_0_func = y_0_func self._discrete_y_0_vertices = self._create_discrete_y_0(True) self._discrete_y_0_cells = self._create_discrete_y_0(False) def y_0(self, x: Optional[np.ndarray]) -> np.ndarray: return self._y_0_func(x) def discrete_y_0( self, vertex_oriented: Optional[bool] = None) -> np.ndarray: return np.copy( self._discrete_y_0_vertices if vertex_oriented else self._discrete_y_0_cells) def _create_discrete_y_0(self, vertex_oriented: bool) -> np.ndarray: """ Creates the discretized initial values of y evaluated at the vertices or cell centers of the spatial mesh. :param vertex_oriented: whether the initial conditions are to be evaluated at the vertices or cell centers of the spatial mesh :return: the discretized initial values """ diff_eq = self._cp.differential_equation if not diff_eq.x_dimension: y_0 = np.array(self._y_0_func(None)) if y_0.shape != self._cp.y_shape(): raise ValueError( 'expected initial condition function output shape to be ' f'{self._cp.y_shape()} but got {y_0.shape}') return y_0 x = self._cp.mesh.all_index_coordinates(vertex_oriented, flatten=True) y_0 = self._y_0_func(x) if y_0.shape != (len(x), diff_eq.y_dimension): raise ValueError( 'expected initial condition function output shape to be ' f'{(len(x), diff_eq.y_dimension)} but got {y_0.shape}') y_0 = y_0.reshape(self._cp.y_shape(vertex_oriented)) if vertex_oriented: apply_constraints_along_last_axis( self._cp.static_y_vertex_constraints, y_0) return y_0 def _convert_coordinates_to_cartesian(self, x: np.ndarray) -> np.ndarray: """ Converts the provided coordinates to Cartesian coordinates. :param x: the coordinates to convert :return: the converted Cartesian coordinates """ cartesian_x = to_cartesian_coordinates( [x[:, i] for i in range(x.shape[1])], self._cp.mesh.coordinate_system_type) return np.stack(cartesian_x, axis=-1) class GaussianInitialCondition(ContinuousInitialCondition): """ An initial condition defined explicitly by Gaussian probability density functions. """ def __init__( self, cp: ConstrainedProblem, means_and_covs: Sequence[Tuple[np.ndarray, np.ndarray]], multipliers: Optional[Sequence[float]] = None): """ :param cp: the constrained problem to turn into an initial value problem by providing the initial conditions for it :param means_and_covs: a sequence of tuples of mean vectors and covariance matrices defining the multivariate Gaussian PDFs corresponding to each element of y_0 :param multipliers: an array of multipliers for each element of the initial y values """ diff_eq = cp.differential_equation if not diff_eq.x_dimension: raise ValueError('constrained problem must be a PDE') if len(means_and_covs) != diff_eq.y_dimension: raise ValueError( f'number of means and covariances ({len(means_and_covs)}) ' f'must match number of y dimensions ({diff_eq.y_dimension})') for mean, cov in means_and_covs: if mean.shape != (diff_eq.x_dimension,): raise ValueError( f'expected mean shape to be {(diff_eq.x_dimension,)} but ' f'got {mean.shape}') if cov.shape != (diff_eq.x_dimension, diff_eq.x_dimension): raise ValueError( 'expected covariance shape to be ' f'{(diff_eq.x_dimension, diff_eq.x_dimension)} but got ' f'{cov.shape}') self._means_and_covs = deepcopy(means_and_covs) if multipliers is not None: if len(multipliers) != diff_eq.y_dimension: raise ValueError( f'length of multipliers ({len(multipliers)}) must match ' f'number of y dimensions ({diff_eq.y_dimension})') self._multipliers = copy(multipliers) else: self._multipliers = [1.] * diff_eq.y_dimension super(GaussianInitialCondition, self).__init__(cp, self._y_0) def _y_0(self, x: Optional[np.ndarray]) -> np.ndarray: """ Calculates and returns the values of the multivariate Gaussian PDFs corresponding to each element of y_0 at x. :param x: the spatial coordinates :return: the initial value of y at the coordinates """ cartesian_x = self._convert_coordinates_to_cartesian(x) y_0 = np.empty((len(x), self._cp.differential_equation.y_dimension)) for i in range(self._cp.differential_equation.y_dimension): mean, cov = self._means_and_covs[i] multiplier = self._multipliers[i] y_0_i = multivariate_normal.pdf(cartesian_x, mean=mean, cov=cov) y_0[:, i] = multiplier * y_0_i return y_0 class BetaInitialCondition(ContinuousInitialCondition): """ An initial condition defined explicitly by Beta probability density functions. """ def __init__( self, cp: ConstrainedProblem, alpha_and_betas: Sequence[Tuple[float, float]]): """ :param cp: the constrained problem to turn into an initial value problem by providing the initial conditions for it :param alpha_and_betas: a sequence of tuples containing the two parameters defining the beta distributions corresponding to each element of y_0 """ diff_eq = cp.differential_equation if diff_eq.x_dimension != 1: raise ValueError('constrained problem must be a 1D PDE') if len(alpha_and_betas) != diff_eq.y_dimension: raise ValueError( f'number of alphas and betas ({len(alpha_and_betas)}) must ' f'match number of y dimensions ({diff_eq.y_dimension})') self._alpha_and_betas = copy(alpha_and_betas) super(BetaInitialCondition, self).__init__(cp, self._y_0) def _y_0(self, x: Optional[np.ndarray]) -> np.ndarray: """ Calculates and returns the values of the beta PDFs corresponding to each element of y_0 at x. :param x: the spatial coordinates :return: the initial value of y at the coordinates """ return np.concatenate([ beta.pdf(x, a, b) for a, b in self._alpha_and_betas ], axis=-1) def vectorize_ic_function( ic_function: Callable[[Optional[Sequence[float]]], Sequence[float]] ) -> VectorizedInitialConditionFunction: """ Vectorizes an initial condition function that operates on a single coordinate sequence so that it can operate on an array of coordinate sequences. The implementation of the vectorized function is nothing more than a for loop over the rows of coordinate sequences in the x argument. :param ic_function: the non-vectorized initial condition function :return: the vectorized initial condition function """ def vectorized_ic_function(x: Optional[np.ndarray]) -> np.ndarray: if x is None: return np.array(ic_function(None)) values = [] for i in range(len(x)): values.append(ic_function(x[i])) return np.array(values) return vectorized_ic_function
pararealml/core/initial_condition.py
from abc import ABC, abstractmethod from copy import deepcopy, copy from typing import Tuple, Optional, Callable, Sequence import numpy as np from scipy.interpolate import interpn from scipy.stats import multivariate_normal, beta from pararealml.core.constrained_problem import ConstrainedProblem from pararealml.core.constraint import apply_constraints_along_last_axis from pararealml.core.mesh import to_cartesian_coordinates VectorizedInitialConditionFunction = \ Callable[[Optional[np.ndarray]], np.ndarray] class InitialCondition(ABC): """ A base class for initial conditions. """ @abstractmethod def y_0(self, x: Optional[np.ndarray]) -> np.ndarray: """ Returns the initial value of y at the points in the spatial domain defined by x. :param x: a 2D array (n, x_dimension) of the spatial coordinates for PDEs and None for ODEs :return: a 2D array (n, y_dimension) of the initial value of y at the coordinates for PDEs and a 1D array (y_dimension) for ODEs """ @abstractmethod def discrete_y_0( self, vertex_oriented: Optional[bool] = None) -> np.ndarray: """ Returns the discretized initial values of y evaluated at the vertices or cell centers of the spatial mesh. :param vertex_oriented: whether the initial conditions are to be evaluated at the vertices or cell centers of the spatial mesh :return: the discretized initial values """ class DiscreteInitialCondition(InitialCondition): """ An initial condition defined by a fixed array of values. """ def __init__( self, cp: ConstrainedProblem, y_0: np.ndarray, vertex_oriented: Optional[bool] = None, interpolation_method: str = 'linear'): """ :param cp: the constrained problem to turn into an initial value problem by providing the initial conditions for it :param y_0: the array containing the initial values of y over a spatial mesh (which may be 0 dimensional in case of an ODE) :param vertex_oriented: whether the initial conditions are evaluated at the vertices or cell centers of the spatial mesh; it the constrained problem is an ODE, it can be None :param interpolation_method: the interpolation method to use to calculate values that do not exactly fall on points of the y_0 grid; if the constrained problem is based on an ODE, it can be None """ if cp.differential_equation.x_dimension and vertex_oriented is None: raise ValueError('vertex orientation must be defined for PDEs') if y_0.shape != cp.y_shape(vertex_oriented): raise ValueError( f'discrete initial value shape {y_0.shape} must match ' 'constrained problem solution shape ' f'{cp.y_shape(vertex_oriented)}') self._cp = cp self._y_0 = np.copy(y_0) self._vertex_oriented = vertex_oriented self._interpolation_method = interpolation_method if vertex_oriented: apply_constraints_along_last_axis( cp.static_y_vertex_constraints, self._y_0) def y_0(self, x: Optional[np.ndarray]) -> np.ndarray: if not self._cp.differential_equation.x_dimension: return np.copy(self._y_0) return interpn( self._cp.mesh.axis_coordinates(self._vertex_oriented), self._y_0, x, method=self._interpolation_method, bounds_error=False, fill_value=None) def discrete_y_0( self, vertex_oriented: Optional[bool] = None) -> np.ndarray: if vertex_oriented is None: vertex_oriented = self._vertex_oriented if not self._cp.differential_equation.x_dimension \ or vertex_oriented == self._vertex_oriented: return np.copy(self._y_0) y_0 = self.y_0(self._cp.mesh.all_index_coordinates(vertex_oriented)) if vertex_oriented: apply_constraints_along_last_axis( self._cp.static_y_vertex_constraints, y_0) return y_0 class ContinuousInitialCondition(InitialCondition): """ An initial condition defined by a function. """ def __init__( self, cp: ConstrainedProblem, y_0_func: VectorizedInitialConditionFunction): """ :param cp: the constrained problem to turn into an initial value problem by providing the initial conditions for it :param y_0_func: the initial value function that returns an array containing the values of y at the spatial coordinates defined by its input """ self._cp = cp self._y_0_func = y_0_func self._discrete_y_0_vertices = self._create_discrete_y_0(True) self._discrete_y_0_cells = self._create_discrete_y_0(False) def y_0(self, x: Optional[np.ndarray]) -> np.ndarray: return self._y_0_func(x) def discrete_y_0( self, vertex_oriented: Optional[bool] = None) -> np.ndarray: return np.copy( self._discrete_y_0_vertices if vertex_oriented else self._discrete_y_0_cells) def _create_discrete_y_0(self, vertex_oriented: bool) -> np.ndarray: """ Creates the discretized initial values of y evaluated at the vertices or cell centers of the spatial mesh. :param vertex_oriented: whether the initial conditions are to be evaluated at the vertices or cell centers of the spatial mesh :return: the discretized initial values """ diff_eq = self._cp.differential_equation if not diff_eq.x_dimension: y_0 = np.array(self._y_0_func(None)) if y_0.shape != self._cp.y_shape(): raise ValueError( 'expected initial condition function output shape to be ' f'{self._cp.y_shape()} but got {y_0.shape}') return y_0 x = self._cp.mesh.all_index_coordinates(vertex_oriented, flatten=True) y_0 = self._y_0_func(x) if y_0.shape != (len(x), diff_eq.y_dimension): raise ValueError( 'expected initial condition function output shape to be ' f'{(len(x), diff_eq.y_dimension)} but got {y_0.shape}') y_0 = y_0.reshape(self._cp.y_shape(vertex_oriented)) if vertex_oriented: apply_constraints_along_last_axis( self._cp.static_y_vertex_constraints, y_0) return y_0 def _convert_coordinates_to_cartesian(self, x: np.ndarray) -> np.ndarray: """ Converts the provided coordinates to Cartesian coordinates. :param x: the coordinates to convert :return: the converted Cartesian coordinates """ cartesian_x = to_cartesian_coordinates( [x[:, i] for i in range(x.shape[1])], self._cp.mesh.coordinate_system_type) return np.stack(cartesian_x, axis=-1) class GaussianInitialCondition(ContinuousInitialCondition): """ An initial condition defined explicitly by Gaussian probability density functions. """ def __init__( self, cp: ConstrainedProblem, means_and_covs: Sequence[Tuple[np.ndarray, np.ndarray]], multipliers: Optional[Sequence[float]] = None): """ :param cp: the constrained problem to turn into an initial value problem by providing the initial conditions for it :param means_and_covs: a sequence of tuples of mean vectors and covariance matrices defining the multivariate Gaussian PDFs corresponding to each element of y_0 :param multipliers: an array of multipliers for each element of the initial y values """ diff_eq = cp.differential_equation if not diff_eq.x_dimension: raise ValueError('constrained problem must be a PDE') if len(means_and_covs) != diff_eq.y_dimension: raise ValueError( f'number of means and covariances ({len(means_and_covs)}) ' f'must match number of y dimensions ({diff_eq.y_dimension})') for mean, cov in means_and_covs: if mean.shape != (diff_eq.x_dimension,): raise ValueError( f'expected mean shape to be {(diff_eq.x_dimension,)} but ' f'got {mean.shape}') if cov.shape != (diff_eq.x_dimension, diff_eq.x_dimension): raise ValueError( 'expected covariance shape to be ' f'{(diff_eq.x_dimension, diff_eq.x_dimension)} but got ' f'{cov.shape}') self._means_and_covs = deepcopy(means_and_covs) if multipliers is not None: if len(multipliers) != diff_eq.y_dimension: raise ValueError( f'length of multipliers ({len(multipliers)}) must match ' f'number of y dimensions ({diff_eq.y_dimension})') self._multipliers = copy(multipliers) else: self._multipliers = [1.] * diff_eq.y_dimension super(GaussianInitialCondition, self).__init__(cp, self._y_0) def _y_0(self, x: Optional[np.ndarray]) -> np.ndarray: """ Calculates and returns the values of the multivariate Gaussian PDFs corresponding to each element of y_0 at x. :param x: the spatial coordinates :return: the initial value of y at the coordinates """ cartesian_x = self._convert_coordinates_to_cartesian(x) y_0 = np.empty((len(x), self._cp.differential_equation.y_dimension)) for i in range(self._cp.differential_equation.y_dimension): mean, cov = self._means_and_covs[i] multiplier = self._multipliers[i] y_0_i = multivariate_normal.pdf(cartesian_x, mean=mean, cov=cov) y_0[:, i] = multiplier * y_0_i return y_0 class BetaInitialCondition(ContinuousInitialCondition): """ An initial condition defined explicitly by Beta probability density functions. """ def __init__( self, cp: ConstrainedProblem, alpha_and_betas: Sequence[Tuple[float, float]]): """ :param cp: the constrained problem to turn into an initial value problem by providing the initial conditions for it :param alpha_and_betas: a sequence of tuples containing the two parameters defining the beta distributions corresponding to each element of y_0 """ diff_eq = cp.differential_equation if diff_eq.x_dimension != 1: raise ValueError('constrained problem must be a 1D PDE') if len(alpha_and_betas) != diff_eq.y_dimension: raise ValueError( f'number of alphas and betas ({len(alpha_and_betas)}) must ' f'match number of y dimensions ({diff_eq.y_dimension})') self._alpha_and_betas = copy(alpha_and_betas) super(BetaInitialCondition, self).__init__(cp, self._y_0) def _y_0(self, x: Optional[np.ndarray]) -> np.ndarray: """ Calculates and returns the values of the beta PDFs corresponding to each element of y_0 at x. :param x: the spatial coordinates :return: the initial value of y at the coordinates """ return np.concatenate([ beta.pdf(x, a, b) for a, b in self._alpha_and_betas ], axis=-1) def vectorize_ic_function( ic_function: Callable[[Optional[Sequence[float]]], Sequence[float]] ) -> VectorizedInitialConditionFunction: """ Vectorizes an initial condition function that operates on a single coordinate sequence so that it can operate on an array of coordinate sequences. The implementation of the vectorized function is nothing more than a for loop over the rows of coordinate sequences in the x argument. :param ic_function: the non-vectorized initial condition function :return: the vectorized initial condition function """ def vectorized_ic_function(x: Optional[np.ndarray]) -> np.ndarray: if x is None: return np.array(ic_function(None)) values = [] for i in range(len(x)): values.append(ic_function(x[i])) return np.array(values) return vectorized_ic_function
0.964018
0.745699
from contextlib import contextmanager from dataclasses import dataclass, field from typing import List from .sqla.models import User, APIKey @dataclass class SQLAlchemyLoginManagerOptions: basepath: str = "/" apipath: str = "/api/v1/" wspath: str = "ws:0.0.0.0:8080/api/v1/ws/" login_path: str = "/api/v1/login" logout_path: str = "/api/v1/logout" register_path: str = "/api/v1/register" login_html_path: str = "/login" logout_html_path: str = "/logout" register_html_path: str = "/register" port: str = "8080" UserClass: object = User APIKeyClass: object = APIKey user_cookie_name: str = "user" user_id_field: str = "id" user_username_field: str = "username" user_password_field: str = "password" user_extra_kwargs: List[str] = field(default_factory=lambda: ["email"]) apikey_id_field: str = "id" user_apikeys_field: str = "apikeys" apikey_user_field: str = "user" user_admin_field: str = "admin" user_admin_value: bool = True class _MockSession(object): def commit(self, *args, **kwargs): pass def add(self, *args, **kwargs): pass def refresh(self, *args, **kwargs): pass def rollback(self, *args, **kwargs): pass class LoginManager(object): def __init__(self, options): pass def is_admin(self, handler): pass def login(self, user, handler): pass def logout(self, handler): pass def register(self, handler): pass def apikeys(self, handler): pass @contextmanager def session(self): yield def get_user(self, id): return None def get_user_from_username_password(self, username, password): return None def get_user_from_key(self, key, secret): return None def new_apikey(self, handler): pass def delete_apikey(self, handler, key_id): pass class SQLAlchemyLoginManager(LoginManager): def __init__(self, sessionmaker, options): if not isinstance(options, SQLAlchemyLoginManagerOptions): raise Exception( "options argument must be an instance of SQLAlchemyLoginManagerOptions. Got: {}".format( type(options) ) ) self._sessionmaker = sessionmaker self._options = options @contextmanager def session(self): session = self._sessionmaker() try: yield session session.commit() except: # noqa: E722 session.rollback() raise finally: session.close() def is_admin(self, handler): with self.session() as session: user = ( session.query(self._options.UserClass) .filter_by(**{self._options.user_id_field: handler.current_user}) .first() ) if ( user and getattr(user, self._options.user_admin_field) == self._options.user_admin_value ): return True return False def get_user(self, id): with self.session() as session: return ( session.query(self._options.UserClass) .filter_by(**{self._options.user_id_field: id}) .first() ) def get_user_from_username_password(self, username, password): if not username or not password: return None with self.session() as session: user = ( session.query(self._options.UserClass) .filter_by(**{self._options.user_username_field: username}) .first() ) if ( user and (user or not password) and (getattr(user, self._options.user_password_field) == password) ): return user return None def get_user_from_key(self, key, secret): if not key or not secret: return None with self.session() as session: apikey = session.query(self._options.APIKeyClass).filter_by(key=key).first() if apikey.secret != secret: return None user = getattr(apikey, self._options.apikey_user_field) return user def login(self, handler, user): if user and getattr(user, self._options.user_id_field): handler.set_secure_cookie( self._options.user_cookie_name, str(getattr(user, self._options.user_id_field)), ) return { self._options.user_id_field: str( getattr(user, self._options.user_id_field) ), self._options.user_username_field: getattr( user, self._options.user_username_field ), } return {} def logout(self, handler): handler.clear_cookie(self._options.user_cookie_name) return {} def apikeys(self, handler): with self.session() as session: user = ( session.query(self._options.UserClass) .filter_by(**{self._options.user_id_field: int(handler.current_user)}) .first() ) if not user: return {} return { str(getattr(a, self._options.apikey_id_field)): a.to_dict() for a in getattr(user, self._options.user_apikeys_field) } def new_apikey(self, handler): with self.session() as session: user = ( session.query(self._options.UserClass) .filter_by(**{self._options.user_id_field: handler.current_user}) .first() ) # new key apikey = self._options.APIKeyClass( **{self._options.apikey_user_field: user} ) session.add(apikey) return apikey.to_dict() def delete_apikey(self, handler, key_id): with self.session() as session: # delete key user = ( session.query(self._options.UserClass) .filter_by(**{self._options.user_id_field: handler.current_user}) .first() ) key = ( session.query(self._options.APIKeyClass) .filter_by( **{self._options.apikey_id_field: int(handler.get_argument("id"))} ) .first() ) if getattr(key, self._options.apikey_user_field) == user: session.delete(key) return key.to_dict() return {} def register(self, handler, user_kwargs): with self.session() as session: new_user = self._options.UserClass(**user_kwargs) session.add(new_user) session.commit() session.refresh(new_user) return self.login(handler, new_user) def login_required(): def _wrapper(meth): def _wrapper2(self): pass return _wrapper2 return _wrapper
tornado_sqlalchemy_login/application.py
from contextlib import contextmanager from dataclasses import dataclass, field from typing import List from .sqla.models import User, APIKey @dataclass class SQLAlchemyLoginManagerOptions: basepath: str = "/" apipath: str = "/api/v1/" wspath: str = "ws:0.0.0.0:8080/api/v1/ws/" login_path: str = "/api/v1/login" logout_path: str = "/api/v1/logout" register_path: str = "/api/v1/register" login_html_path: str = "/login" logout_html_path: str = "/logout" register_html_path: str = "/register" port: str = "8080" UserClass: object = User APIKeyClass: object = APIKey user_cookie_name: str = "user" user_id_field: str = "id" user_username_field: str = "username" user_password_field: str = "password" user_extra_kwargs: List[str] = field(default_factory=lambda: ["email"]) apikey_id_field: str = "id" user_apikeys_field: str = "apikeys" apikey_user_field: str = "user" user_admin_field: str = "admin" user_admin_value: bool = True class _MockSession(object): def commit(self, *args, **kwargs): pass def add(self, *args, **kwargs): pass def refresh(self, *args, **kwargs): pass def rollback(self, *args, **kwargs): pass class LoginManager(object): def __init__(self, options): pass def is_admin(self, handler): pass def login(self, user, handler): pass def logout(self, handler): pass def register(self, handler): pass def apikeys(self, handler): pass @contextmanager def session(self): yield def get_user(self, id): return None def get_user_from_username_password(self, username, password): return None def get_user_from_key(self, key, secret): return None def new_apikey(self, handler): pass def delete_apikey(self, handler, key_id): pass class SQLAlchemyLoginManager(LoginManager): def __init__(self, sessionmaker, options): if not isinstance(options, SQLAlchemyLoginManagerOptions): raise Exception( "options argument must be an instance of SQLAlchemyLoginManagerOptions. Got: {}".format( type(options) ) ) self._sessionmaker = sessionmaker self._options = options @contextmanager def session(self): session = self._sessionmaker() try: yield session session.commit() except: # noqa: E722 session.rollback() raise finally: session.close() def is_admin(self, handler): with self.session() as session: user = ( session.query(self._options.UserClass) .filter_by(**{self._options.user_id_field: handler.current_user}) .first() ) if ( user and getattr(user, self._options.user_admin_field) == self._options.user_admin_value ): return True return False def get_user(self, id): with self.session() as session: return ( session.query(self._options.UserClass) .filter_by(**{self._options.user_id_field: id}) .first() ) def get_user_from_username_password(self, username, password): if not username or not password: return None with self.session() as session: user = ( session.query(self._options.UserClass) .filter_by(**{self._options.user_username_field: username}) .first() ) if ( user and (user or not password) and (getattr(user, self._options.user_password_field) == password) ): return user return None def get_user_from_key(self, key, secret): if not key or not secret: return None with self.session() as session: apikey = session.query(self._options.APIKeyClass).filter_by(key=key).first() if apikey.secret != secret: return None user = getattr(apikey, self._options.apikey_user_field) return user def login(self, handler, user): if user and getattr(user, self._options.user_id_field): handler.set_secure_cookie( self._options.user_cookie_name, str(getattr(user, self._options.user_id_field)), ) return { self._options.user_id_field: str( getattr(user, self._options.user_id_field) ), self._options.user_username_field: getattr( user, self._options.user_username_field ), } return {} def logout(self, handler): handler.clear_cookie(self._options.user_cookie_name) return {} def apikeys(self, handler): with self.session() as session: user = ( session.query(self._options.UserClass) .filter_by(**{self._options.user_id_field: int(handler.current_user)}) .first() ) if not user: return {} return { str(getattr(a, self._options.apikey_id_field)): a.to_dict() for a in getattr(user, self._options.user_apikeys_field) } def new_apikey(self, handler): with self.session() as session: user = ( session.query(self._options.UserClass) .filter_by(**{self._options.user_id_field: handler.current_user}) .first() ) # new key apikey = self._options.APIKeyClass( **{self._options.apikey_user_field: user} ) session.add(apikey) return apikey.to_dict() def delete_apikey(self, handler, key_id): with self.session() as session: # delete key user = ( session.query(self._options.UserClass) .filter_by(**{self._options.user_id_field: handler.current_user}) .first() ) key = ( session.query(self._options.APIKeyClass) .filter_by( **{self._options.apikey_id_field: int(handler.get_argument("id"))} ) .first() ) if getattr(key, self._options.apikey_user_field) == user: session.delete(key) return key.to_dict() return {} def register(self, handler, user_kwargs): with self.session() as session: new_user = self._options.UserClass(**user_kwargs) session.add(new_user) session.commit() session.refresh(new_user) return self.login(handler, new_user) def login_required(): def _wrapper(meth): def _wrapper2(self): pass return _wrapper2 return _wrapper
0.596668
0.062588
ted_iso2tag = { 'arb': 'ar', 'azj': 'az', #aze 'aze': 'az', #aze 'bel': 'be', 'bul': 'bg', 'ben': 'bn', 'bos': 'bs', 'ces': 'cs', 'dan': 'da', 'deu': 'de', 'ell': 'el', 'epo': 'eo', 'spa': 'es', 'est': 'et', 'eus': 'eu', 'pes': 'fa', #fas 'fin': 'fi', 'fra': 'fr', 'glg': 'gl', 'heb': 'he', 'hin': 'hi', 'hrv': 'hr', 'hun': 'hu', 'hye': 'hy', 'ind': 'id', 'ita': 'it', 'jpn': 'ja', 'kat': 'ka', 'kaz': 'kk', 'kor': 'ko', 'kur': 'ku', 'lit': 'lt', 'mkd': 'mk', 'khk': 'mn', #mon 'mon': 'mn', #mon 'mar': 'mr', 'zlm': 'ms', 'mya': 'my', 'nob': 'nb', 'nld': 'nl', 'pol': 'pl', 'por': 'pt', 'ron': 'ro', 'rus': 'ru', 'slk': 'sk', 'slv': 'sl', 'alb': 'sq', #sqi 'sqi': 'sq', #sqi 'srp': 'sr', 'swe': 'sv', 'tam': 'ta', 'tha': 'th', 'tur': 'tr', 'ukr': 'uk', 'urd': 'ur', 'vie': 'vi', 'cmn': 'zh' } ted_tag2iso = { 'ar': 'arb', 'az': 'azj', #aze 'be': 'bel', 'bg': 'bul', 'bn': 'ben', 'bs': 'bos', 'cs': 'ces', 'da': 'dan', 'de': 'deu', 'el': 'ell', #'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'pes', #fas 'fi': 'fin', 'fr': 'fra', 'gl': 'glg', 'he': 'heb', 'hi': 'hin', 'hr': 'hrv', 'hu': 'hun', 'hy': 'hye', 'id': 'ind', 'it': 'ita', 'ja': 'jpn', 'ka': 'kat', 'kk': 'kaz', 'ko': 'kor', 'ku': 'kur', 'lt': 'lit', 'mk': 'mkd', 'mn': 'mon', 'mr': 'mar', 'ms': 'zlm', 'my': 'mya', 'nb': 'nob', 'nl': 'nld', 'pl': 'pol', 'pt': 'por', 'ro': 'ron', 'ru': 'rus', 'sk': 'slk', 'sl': 'slv', 'sq': 'alb', #sqi 'sr': 'srp', 'sv': 'swe', 'ta': 'tam', 'th': 'tha', 'tr': 'tur', 'uk': 'ukr', 'ur': 'urd', 'vi': 'vie', 'zh': 'cmn' } ted_iso2name = { 'arb': 'Arabic', 'aze': 'Azerbaijani', 'azj': 'NorthAzerbaijani', 'bel': 'Belarusian', 'bul': 'Bulgarian', 'ben': 'Bengali', 'bos': 'Bosnian', 'ces': 'Czech', 'dan': 'Danish', 'deu': 'German', 'ell': 'Greek', #'epo': 'Esperanto', 'spa': 'Spanish', 'est': 'Estonian', 'eus': 'Basque', #'fas': 'Persian', 'pes': 'Persian', 'fin': 'Finnish', 'fra': 'French', 'glg': 'Galician', 'heb': 'Hebrew', 'hin': 'Hindi', 'hrv': 'Croatian', 'hun': 'Hungarian', 'hye': 'Armenian', 'ind': 'Indonesian', 'ita': 'Italian', 'jpn': 'Japanese', 'kat': 'Georgian', 'kaz': 'Kazakh', 'kor': 'Korean', 'kur': 'Kurdish', 'lit': 'Lithuanian', 'mkd': 'Macedonian', 'mon': 'Mongolian', 'khk': 'HalhMongolian', 'mar': 'Marathi', 'zlm': 'Malay', 'mya': 'Burmese', 'nob': 'Nor. Bokmål', # 'NorwegianBokmål', 'nld': 'Dutch', 'pol': 'Polish', 'por': 'Portuguese', 'ron': 'Romanian', 'rus': 'Russian', 'slk': 'Slovak', 'slv': 'Slovenian', 'alb': 'Albanian', #sqi 'sqi': 'Albanian', 'srp': 'Serbian', 'swe': 'Swedish', 'tam': 'Tamil', 'tha': 'Thai', 'tur': 'Turkish', 'ukr': 'Ukrainian', 'urd': 'Urdu', 'vie': 'Vietnamese', 'cmn': 'Chinese' } #print(", ".join("\"%s\""%l for l in ted_iso2tag.keys())) # No Esperando! "epo" #ted_iso_langs = ["arb", "azj", "bel", "bul", "ben", "bos", "ces", "dan", "deu", "ell", "spa", "est", "eus", # "pes", "fin", "fra", "glg", "heb", "hin", "hrv", "hun", "hye", "ind", "ita", "jpn", "kat", "kaz", # "kor", "kur", "lit", "mkd", "khk", "mar", "zlm", "mya", "nob", "nld", "pol", "por", "ron", "rus", # "slk", "slv", "alb", "srp", "swe", "tam", "tha", "tur", "ukr", "urd", "vie", "cmn"] ted_iso_langs = ["arb", "aze", "bel", "bul", "ben", "bos", "ces", "dan", "deu", "ell", "spa", "est", "eus", "pes", "fin", "fra", "glg", "heb", "hin", "hrv", "hun", "hye", "ind", "ita", "jpn", "kat", "kaz", "kor", "kur", "lit", "mkd", "mon", "mar", "zlm", "mya", "nob", "nld", "pol", "por", "ron", "rus", "slk", "slv", "sqi", "srp", "swe", "tam", "tha", "tur", "ukr", "urd", "vie", "cmn"] ''' ara -> arb (Standard Arabic) zho -> cmn epo -> NO!!! (Esperanto) fas -> pes mon -> Mongolian is a macrolanguage, then... HalhMongolian (khk) or PeripheralMongolian (mvf)? aze -> azj or azb ? https://en.wikipedia.org/wiki/Azerbaijani_language#North_vs._South_Azerbaijani NorthAzerbaijani (azj) is spoken in Azerbajan... so? ''' ted_tag2family = { 'ar': 'Afroasiatic', 'az': 'Turkic', 'be': 'Indo-European/Balto-Slavic', 'bg': 'Indo-European/Balto-Slavic', 'bn': 'Indo-European/Indo-Iranian', 'bs': 'Indo-European/Balto-Slavic', 'cs': 'Indo-European/Balto-Slavic', 'da': 'Indo-European/Germanic', 'de': 'Indo-European/Germanic', 'el': 'Indo-European/Hellenic', 'eo': 'N.A.', 'es': 'Indo-European/Italic/Romance', 'et': 'Uralic', 'eu': 'Vasconic?', 'fa': 'Indo-European/Indo-Iranian', 'fi': 'Uralic', 'fr': 'Indo-European/Italic/Romance', 'gl': 'Indo-European/Italic/Romance', 'he': 'Afroasiatic', 'hi': 'Indo-European/Indo-Iranian', 'hr': 'Indo-European/Balto-Slavic', 'hu': 'Uralic', 'hy': 'Indo-European/Armenian', 'id': 'Austronesian', 'it': 'Indo-European/Italic/Romance', 'ja': 'Japonic', 'ka': 'Kartvelian', 'kk': 'Turkic', 'ko': 'Koreanic', 'ku': 'Indo-European/Indo-Iranian', 'lt': 'Indo-European/Balto-Slavic', 'mk': 'Indo-European/Balto-Slavic', 'mn': 'Mongolic', 'mr': 'Indo-European/Indo-Iranian', 'ms': 'Austronesian', 'my': 'Sino-Tibetan', 'nb': 'Indo-European/Germanic', 'nl': 'Indo-European/Germanic', 'pl': 'Indo-European/Balto-Slavic', 'pt': 'Indo-European/Italic/Romance', 'ro': 'Indo-European/Italic/Romance', 'ru': 'Indo-European/Balto-Slavic', 'sk': 'Indo-European/Balto-Slavic', 'sl': 'Indo-European/Balto-Slavic', 'sq': 'Indo-European/Albanian', 'sr': 'Indo-European/Balto-Slavic', 'sv': 'Indo-European/Germanic', 'ta': 'Dravidian', 'th': 'Kra-Dai', 'tr': 'Turkic', 'uk': 'Indo-European/Balto-Slavic', 'ur': 'Indo-European/Indo-Iranian', 'vi': 'Austroasiatic', 'zh': 'Sino-Tibetan' } ted_family2tag = { 'Afroasiatic': ['ar', 'he'], 'Austroasiatic': ['vi'], 'Austronesian': ['id', 'ms'], 'Dravidian': ['ta'], 'Indo-European/Albanian': ['sq'], 'Indo-European/Armenian': ['hy'], 'Indo-European/Balto-Slavic': ['be', 'bg', 'bs', 'cs', 'hr', 'lt', 'mk', 'pl', 'ru', 'sk', 'sl', 'sr', 'uk'], 'Indo-European/Germanic': ['da', 'de', 'nb', 'nl', 'sv'], 'Indo-European/Hellenic': ['el'], 'Indo-European/Indo-Iranian': ['bn', 'fa', 'hi', 'ku', 'mr', 'ur'], 'Indo-European/Italic/Romance': ['es', 'fr', 'gl', 'it', 'pt', 'ro'], 'Japonic': ['ja'], 'Kartvelian': ['ka'], 'Koreanic': ['ko'], 'Kra-Dai': ['th'], 'Mongolic': ['mn'], #'N.A.': ['eo'], 'Sino-Tibetan': ['my', 'zh'], 'Turkic': ['az', 'kk', 'tr'], 'Uralic': ['et', 'fi', 'hu'], 'Vasconic?': ['eu'] } ''' families = sorted(list(set(ted_tag2family.values()))) ted_family2tag = {} for fam in families: ted_family2tag[fam] = [] for k,v in ted_tag2family.items(): ted_family2tag[v].append(k) for fam in families: print("'%s': [%s]," % (fam, ", ".join(sorted(["'%s'"%l for l in ted_family2tag[fam]])))) ''' Baseline_Langs = { # Germanic 'eng': 'English', 'swe': 'Swedish', #no WIT 'dan': 'Danish', #no WIT 'deu': 'German', 'nld': 'Dutch', # Romanic 'ron': 'Romanian', 'fra': 'French', 'ita': 'Italian', 'spa': 'Spanish', 'por': 'Portuguese', # Balco-Slavic 'lav': 'Latvian', #no WIT 'lit': 'Lithuanian',#no WIT 'pol': 'Polish', 'slk': 'Slovak', 'ces': 'Czech', 'slv': 'Slovenian', 'bul': 'Bulgarian' } all_iso2name = { 'arb': 'Arabic', 'heb': 'Hebrew', 'kab': 'Kabyle', 'vie': 'Vietnamese', 'ind': 'Indonesian', 'zlm': 'Malay', 'tgl': 'Tagalog', 'kan': 'Kannada', 'mal': 'Malay', 'tam': 'Tamil', 'tel': 'Telugu', 'sqi': 'Albanian', 'hye': 'Armenian', 'bel': 'Belarusian', 'bos': 'Bosnian', 'bul': 'Bulgarian', 'hrv': 'Croatian', 'ces': 'Czech', 'lit': 'Lithuanian', 'mkd': 'Macedonian', 'pol': 'Polish', 'rus': 'Russian', 'srp': 'Serbian', 'slk': 'Slovak', 'slv': 'Slovenian', 'ukr': 'Ukrainian', 'dan': 'Danish', 'nld': 'Dutch', 'eng': 'English', 'deu': 'German', 'isl': 'Icelandic', 'nds': 'Low Saxon', 'nob': 'Norwegian Bokmål', 'swe': 'Swedish', 'ell': 'Greek', 'asm': 'Assamese', 'ben': 'Bengali', 'guj': 'Gujarati', 'hin': 'Hindi', 'kur': 'Kurdish', 'mar': 'Marathi', 'ori': 'Odia', 'pes': 'Persian', 'pan': 'Punjabi', 'urd': 'Urdu', 'fra': 'French', 'glg': 'Galician', 'ita': 'Italian', 'por': 'Portuguese', 'ron': 'Romanian', 'spa': 'Spanish', 'jpn': 'Japanese', 'kat': 'Georgian', 'kor': 'Korean', 'tha': 'Thai', 'mon': 'Mongolian', 'shp': 'Shipibo-Konibo', 'mya': 'Burmese', 'cmn': 'Mandarin Chinese', 'mni': 'Manipuri', 'yue': 'Yue Chinese', 'aze': 'Azerbaijani', 'kaz': 'Kazakh', 'tur': 'Turkish', 'tuk': 'Turkmen', 'est': 'Estonian', 'fin': 'Finnish', 'hun': 'Hungarian', 'eus': 'Basque' } ted_tag2size = { 'kk': 3259, 'be': 4410, 'bn': 4552, 'eu': 5107, 'ms': 5120, 'bs': 5586, 'ur': 5858, 'az': 5863, 'ta': 6135, 'mn': 7461, 'mr': 9706, 'gl': 9870, 'ku': 10294, 'et': 10587, 'ka': 13010, 'nb': 15591, 'hi': 18517, 'sl': 19481, 'my': 20998, 'hy': 21060, 'fi': 23897, 'mk': 24993, 'lt': 41209, 'sq': 43823, 'da': 44269, 'pt': 50834, 'sv': 55696, 'sk': 60528, 'id': 85985, 'th': 96578, 'cs': 101614, 'uk': 106780, 'hr': 120148, 'el': 132254, 'sr': 134631, 'hu': 145039, 'fa': 148885, 'de': 165531, 'vi': 169731, 'bg': 172093, 'pl': 173813, 'ro': 178059, 'tr': 179930, 'nl': 181100, 'fr': 189444, 'es': 193075, 'zh': 197389, 'ja': 201421, 'it': 201575, 'ko': 202897, 'ru': 205458, 'he': 208693, 'ar': 211363 } all_iso_langs = list(all_iso2name.keys()) #print(" ".join(["<%s>|<%s>" % (ted_iso2tag[l], ted_iso2tag[l]) for l in ted_iso_langs]))
src/tedlangs.py
ted_iso2tag = { 'arb': 'ar', 'azj': 'az', #aze 'aze': 'az', #aze 'bel': 'be', 'bul': 'bg', 'ben': 'bn', 'bos': 'bs', 'ces': 'cs', 'dan': 'da', 'deu': 'de', 'ell': 'el', 'epo': 'eo', 'spa': 'es', 'est': 'et', 'eus': 'eu', 'pes': 'fa', #fas 'fin': 'fi', 'fra': 'fr', 'glg': 'gl', 'heb': 'he', 'hin': 'hi', 'hrv': 'hr', 'hun': 'hu', 'hye': 'hy', 'ind': 'id', 'ita': 'it', 'jpn': 'ja', 'kat': 'ka', 'kaz': 'kk', 'kor': 'ko', 'kur': 'ku', 'lit': 'lt', 'mkd': 'mk', 'khk': 'mn', #mon 'mon': 'mn', #mon 'mar': 'mr', 'zlm': 'ms', 'mya': 'my', 'nob': 'nb', 'nld': 'nl', 'pol': 'pl', 'por': 'pt', 'ron': 'ro', 'rus': 'ru', 'slk': 'sk', 'slv': 'sl', 'alb': 'sq', #sqi 'sqi': 'sq', #sqi 'srp': 'sr', 'swe': 'sv', 'tam': 'ta', 'tha': 'th', 'tur': 'tr', 'ukr': 'uk', 'urd': 'ur', 'vie': 'vi', 'cmn': 'zh' } ted_tag2iso = { 'ar': 'arb', 'az': 'azj', #aze 'be': 'bel', 'bg': 'bul', 'bn': 'ben', 'bs': 'bos', 'cs': 'ces', 'da': 'dan', 'de': 'deu', 'el': 'ell', #'eo': 'epo', 'es': 'spa', 'et': 'est', 'eu': 'eus', 'fa': 'pes', #fas 'fi': 'fin', 'fr': 'fra', 'gl': 'glg', 'he': 'heb', 'hi': 'hin', 'hr': 'hrv', 'hu': 'hun', 'hy': 'hye', 'id': 'ind', 'it': 'ita', 'ja': 'jpn', 'ka': 'kat', 'kk': 'kaz', 'ko': 'kor', 'ku': 'kur', 'lt': 'lit', 'mk': 'mkd', 'mn': 'mon', 'mr': 'mar', 'ms': 'zlm', 'my': 'mya', 'nb': 'nob', 'nl': 'nld', 'pl': 'pol', 'pt': 'por', 'ro': 'ron', 'ru': 'rus', 'sk': 'slk', 'sl': 'slv', 'sq': 'alb', #sqi 'sr': 'srp', 'sv': 'swe', 'ta': 'tam', 'th': 'tha', 'tr': 'tur', 'uk': 'ukr', 'ur': 'urd', 'vi': 'vie', 'zh': 'cmn' } ted_iso2name = { 'arb': 'Arabic', 'aze': 'Azerbaijani', 'azj': 'NorthAzerbaijani', 'bel': 'Belarusian', 'bul': 'Bulgarian', 'ben': 'Bengali', 'bos': 'Bosnian', 'ces': 'Czech', 'dan': 'Danish', 'deu': 'German', 'ell': 'Greek', #'epo': 'Esperanto', 'spa': 'Spanish', 'est': 'Estonian', 'eus': 'Basque', #'fas': 'Persian', 'pes': 'Persian', 'fin': 'Finnish', 'fra': 'French', 'glg': 'Galician', 'heb': 'Hebrew', 'hin': 'Hindi', 'hrv': 'Croatian', 'hun': 'Hungarian', 'hye': 'Armenian', 'ind': 'Indonesian', 'ita': 'Italian', 'jpn': 'Japanese', 'kat': 'Georgian', 'kaz': 'Kazakh', 'kor': 'Korean', 'kur': 'Kurdish', 'lit': 'Lithuanian', 'mkd': 'Macedonian', 'mon': 'Mongolian', 'khk': 'HalhMongolian', 'mar': 'Marathi', 'zlm': 'Malay', 'mya': 'Burmese', 'nob': 'Nor. Bokmål', # 'NorwegianBokmål', 'nld': 'Dutch', 'pol': 'Polish', 'por': 'Portuguese', 'ron': 'Romanian', 'rus': 'Russian', 'slk': 'Slovak', 'slv': 'Slovenian', 'alb': 'Albanian', #sqi 'sqi': 'Albanian', 'srp': 'Serbian', 'swe': 'Swedish', 'tam': 'Tamil', 'tha': 'Thai', 'tur': 'Turkish', 'ukr': 'Ukrainian', 'urd': 'Urdu', 'vie': 'Vietnamese', 'cmn': 'Chinese' } #print(", ".join("\"%s\""%l for l in ted_iso2tag.keys())) # No Esperando! "epo" #ted_iso_langs = ["arb", "azj", "bel", "bul", "ben", "bos", "ces", "dan", "deu", "ell", "spa", "est", "eus", # "pes", "fin", "fra", "glg", "heb", "hin", "hrv", "hun", "hye", "ind", "ita", "jpn", "kat", "kaz", # "kor", "kur", "lit", "mkd", "khk", "mar", "zlm", "mya", "nob", "nld", "pol", "por", "ron", "rus", # "slk", "slv", "alb", "srp", "swe", "tam", "tha", "tur", "ukr", "urd", "vie", "cmn"] ted_iso_langs = ["arb", "aze", "bel", "bul", "ben", "bos", "ces", "dan", "deu", "ell", "spa", "est", "eus", "pes", "fin", "fra", "glg", "heb", "hin", "hrv", "hun", "hye", "ind", "ita", "jpn", "kat", "kaz", "kor", "kur", "lit", "mkd", "mon", "mar", "zlm", "mya", "nob", "nld", "pol", "por", "ron", "rus", "slk", "slv", "sqi", "srp", "swe", "tam", "tha", "tur", "ukr", "urd", "vie", "cmn"] ''' ara -> arb (Standard Arabic) zho -> cmn epo -> NO!!! (Esperanto) fas -> pes mon -> Mongolian is a macrolanguage, then... HalhMongolian (khk) or PeripheralMongolian (mvf)? aze -> azj or azb ? https://en.wikipedia.org/wiki/Azerbaijani_language#North_vs._South_Azerbaijani NorthAzerbaijani (azj) is spoken in Azerbajan... so? ''' ted_tag2family = { 'ar': 'Afroasiatic', 'az': 'Turkic', 'be': 'Indo-European/Balto-Slavic', 'bg': 'Indo-European/Balto-Slavic', 'bn': 'Indo-European/Indo-Iranian', 'bs': 'Indo-European/Balto-Slavic', 'cs': 'Indo-European/Balto-Slavic', 'da': 'Indo-European/Germanic', 'de': 'Indo-European/Germanic', 'el': 'Indo-European/Hellenic', 'eo': 'N.A.', 'es': 'Indo-European/Italic/Romance', 'et': 'Uralic', 'eu': 'Vasconic?', 'fa': 'Indo-European/Indo-Iranian', 'fi': 'Uralic', 'fr': 'Indo-European/Italic/Romance', 'gl': 'Indo-European/Italic/Romance', 'he': 'Afroasiatic', 'hi': 'Indo-European/Indo-Iranian', 'hr': 'Indo-European/Balto-Slavic', 'hu': 'Uralic', 'hy': 'Indo-European/Armenian', 'id': 'Austronesian', 'it': 'Indo-European/Italic/Romance', 'ja': 'Japonic', 'ka': 'Kartvelian', 'kk': 'Turkic', 'ko': 'Koreanic', 'ku': 'Indo-European/Indo-Iranian', 'lt': 'Indo-European/Balto-Slavic', 'mk': 'Indo-European/Balto-Slavic', 'mn': 'Mongolic', 'mr': 'Indo-European/Indo-Iranian', 'ms': 'Austronesian', 'my': 'Sino-Tibetan', 'nb': 'Indo-European/Germanic', 'nl': 'Indo-European/Germanic', 'pl': 'Indo-European/Balto-Slavic', 'pt': 'Indo-European/Italic/Romance', 'ro': 'Indo-European/Italic/Romance', 'ru': 'Indo-European/Balto-Slavic', 'sk': 'Indo-European/Balto-Slavic', 'sl': 'Indo-European/Balto-Slavic', 'sq': 'Indo-European/Albanian', 'sr': 'Indo-European/Balto-Slavic', 'sv': 'Indo-European/Germanic', 'ta': 'Dravidian', 'th': 'Kra-Dai', 'tr': 'Turkic', 'uk': 'Indo-European/Balto-Slavic', 'ur': 'Indo-European/Indo-Iranian', 'vi': 'Austroasiatic', 'zh': 'Sino-Tibetan' } ted_family2tag = { 'Afroasiatic': ['ar', 'he'], 'Austroasiatic': ['vi'], 'Austronesian': ['id', 'ms'], 'Dravidian': ['ta'], 'Indo-European/Albanian': ['sq'], 'Indo-European/Armenian': ['hy'], 'Indo-European/Balto-Slavic': ['be', 'bg', 'bs', 'cs', 'hr', 'lt', 'mk', 'pl', 'ru', 'sk', 'sl', 'sr', 'uk'], 'Indo-European/Germanic': ['da', 'de', 'nb', 'nl', 'sv'], 'Indo-European/Hellenic': ['el'], 'Indo-European/Indo-Iranian': ['bn', 'fa', 'hi', 'ku', 'mr', 'ur'], 'Indo-European/Italic/Romance': ['es', 'fr', 'gl', 'it', 'pt', 'ro'], 'Japonic': ['ja'], 'Kartvelian': ['ka'], 'Koreanic': ['ko'], 'Kra-Dai': ['th'], 'Mongolic': ['mn'], #'N.A.': ['eo'], 'Sino-Tibetan': ['my', 'zh'], 'Turkic': ['az', 'kk', 'tr'], 'Uralic': ['et', 'fi', 'hu'], 'Vasconic?': ['eu'] } ''' families = sorted(list(set(ted_tag2family.values()))) ted_family2tag = {} for fam in families: ted_family2tag[fam] = [] for k,v in ted_tag2family.items(): ted_family2tag[v].append(k) for fam in families: print("'%s': [%s]," % (fam, ", ".join(sorted(["'%s'"%l for l in ted_family2tag[fam]])))) ''' Baseline_Langs = { # Germanic 'eng': 'English', 'swe': 'Swedish', #no WIT 'dan': 'Danish', #no WIT 'deu': 'German', 'nld': 'Dutch', # Romanic 'ron': 'Romanian', 'fra': 'French', 'ita': 'Italian', 'spa': 'Spanish', 'por': 'Portuguese', # Balco-Slavic 'lav': 'Latvian', #no WIT 'lit': 'Lithuanian',#no WIT 'pol': 'Polish', 'slk': 'Slovak', 'ces': 'Czech', 'slv': 'Slovenian', 'bul': 'Bulgarian' } all_iso2name = { 'arb': 'Arabic', 'heb': 'Hebrew', 'kab': 'Kabyle', 'vie': 'Vietnamese', 'ind': 'Indonesian', 'zlm': 'Malay', 'tgl': 'Tagalog', 'kan': 'Kannada', 'mal': 'Malay', 'tam': 'Tamil', 'tel': 'Telugu', 'sqi': 'Albanian', 'hye': 'Armenian', 'bel': 'Belarusian', 'bos': 'Bosnian', 'bul': 'Bulgarian', 'hrv': 'Croatian', 'ces': 'Czech', 'lit': 'Lithuanian', 'mkd': 'Macedonian', 'pol': 'Polish', 'rus': 'Russian', 'srp': 'Serbian', 'slk': 'Slovak', 'slv': 'Slovenian', 'ukr': 'Ukrainian', 'dan': 'Danish', 'nld': 'Dutch', 'eng': 'English', 'deu': 'German', 'isl': 'Icelandic', 'nds': 'Low Saxon', 'nob': 'Norwegian Bokmål', 'swe': 'Swedish', 'ell': 'Greek', 'asm': 'Assamese', 'ben': 'Bengali', 'guj': 'Gujarati', 'hin': 'Hindi', 'kur': 'Kurdish', 'mar': 'Marathi', 'ori': 'Odia', 'pes': 'Persian', 'pan': 'Punjabi', 'urd': 'Urdu', 'fra': 'French', 'glg': 'Galician', 'ita': 'Italian', 'por': 'Portuguese', 'ron': 'Romanian', 'spa': 'Spanish', 'jpn': 'Japanese', 'kat': 'Georgian', 'kor': 'Korean', 'tha': 'Thai', 'mon': 'Mongolian', 'shp': 'Shipibo-Konibo', 'mya': 'Burmese', 'cmn': 'Mandarin Chinese', 'mni': 'Manipuri', 'yue': 'Yue Chinese', 'aze': 'Azerbaijani', 'kaz': 'Kazakh', 'tur': 'Turkish', 'tuk': 'Turkmen', 'est': 'Estonian', 'fin': 'Finnish', 'hun': 'Hungarian', 'eus': 'Basque' } ted_tag2size = { 'kk': 3259, 'be': 4410, 'bn': 4552, 'eu': 5107, 'ms': 5120, 'bs': 5586, 'ur': 5858, 'az': 5863, 'ta': 6135, 'mn': 7461, 'mr': 9706, 'gl': 9870, 'ku': 10294, 'et': 10587, 'ka': 13010, 'nb': 15591, 'hi': 18517, 'sl': 19481, 'my': 20998, 'hy': 21060, 'fi': 23897, 'mk': 24993, 'lt': 41209, 'sq': 43823, 'da': 44269, 'pt': 50834, 'sv': 55696, 'sk': 60528, 'id': 85985, 'th': 96578, 'cs': 101614, 'uk': 106780, 'hr': 120148, 'el': 132254, 'sr': 134631, 'hu': 145039, 'fa': 148885, 'de': 165531, 'vi': 169731, 'bg': 172093, 'pl': 173813, 'ro': 178059, 'tr': 179930, 'nl': 181100, 'fr': 189444, 'es': 193075, 'zh': 197389, 'ja': 201421, 'it': 201575, 'ko': 202897, 'ru': 205458, 'he': 208693, 'ar': 211363 } all_iso_langs = list(all_iso2name.keys()) #print(" ".join(["<%s>|<%s>" % (ted_iso2tag[l], ted_iso2tag[l]) for l in ted_iso_langs]))
0.216632
0.291214
from __future__ import print_function ''' =============================================================================== Contains tools to make the sub-models for the Husky application =============================================================================== ''' import keras.backend as K import keras.losses as losses import keras.optimizers as optimizers import numpy as np import tensorflow as tf from keras.constraints import maxnorm from keras.layers.advanced_activations import LeakyReLU from keras.layers import Input, RepeatVector, Reshape from keras.layers import UpSampling2D, Conv2DTranspose from keras.layers import BatchNormalization, Dropout from keras.layers import Dense, Conv2D, Activation, Flatten from keras.layers import Lambda from keras.layers.merge import Add, Multiply from keras.layers.merge import Concatenate from keras.losses import binary_crossentropy from keras.models import Model, Sequential from keras.optimizers import Adam from keras.constraints import max_norm from .planner import * from .data_utils import * def HuskyNumOptions(): return 5 def HuskyNullOption(): return 4 def GetHuskyActorModel(x, num_options, pose_size, dropout_rate=0.5, batchnorm=True): ''' Make an "actor" network that takes in an encoded image and an "option" label and produces the next command to execute. ''' xin = Input([int(d) for d in x.shape[1:]], name="actor_h_in") x0in = Input([int(d) for d in x.shape[1:]], name="actor_h0_in") pose_in = Input((pose_size,), name="actor_pose_in") option_in = Input((num_options,), name="actor_o_in") x = xin x0 = x0in dr, bn = dropout_rate, False use_lrelu = False x = Concatenate(axis=-1)([x, x0]) x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn) # Add arm, gripper y = pose_in y = AddDense(y, 32, "relu", 0., output=True, constraint=3) x = TileOnto(x, y, 32, (8,8), add=False) x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) # Add arm, gripper y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3) x = TileOnto(x, y2, 64, (6,6), add=False) x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) x = Flatten()(x) x = AddDense(x, 512, "relu", dr, output=True, bn=bn) x = AddDense(x, 512, "relu", dr, output=True, bn=bn) # Same setup as the state decoders pose = AddDense(x, pose_size, "linear", 0., output=True) actor = Model([x0in, xin, option_in, pose_in], [pose], name="actor") return actor def GetHuskyPoseModel(x, num_options, pose_size, dropout_rate=0.5, batchnorm=True): ''' Make an "actor" network that takes in an encoded image and an "option" label and produces the next command to execute. ''' xin = Input([int(d) for d in x.shape[1:]], name="pose_h_in") x0in = Input([int(d) for d in x.shape[1:]], name="pose_h0_in") pose_in = Input((pose_size,), name="pose_pose_in") option_in = Input((num_options,), name="pose_o_in") x = xin x0 = x0in dr, bn = dropout_rate, False use_lrelu = False x = Concatenate(axis=-1)([x, x0]) x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn) # Add arm, gripper y = pose_in y = AddDense(y, 32, "relu", 0., output=True, constraint=3) x = TileOnto(x, y, 32, (8,8), add=False) x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) # Add arm, gripper y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3) x = TileOnto(x, y2, 64, (6,6), add=False) x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) x = Flatten()(x) x = AddDense(x, 512, "relu", dr, output=True, bn=bn) x = AddDense(x, 512, "relu", dr, output=True, bn=bn) # Same setup as the state decoders pose = AddDense(x, pose_size, "linear", 0., output=True) pose = Model([x0in, xin, option_in, pose_in], [pose], name="pose") return pose def GetPolicyHuskyData(num_options, option, image, pose, action, label, *args, **kwargs): I = np.array(image) / 255. p = np.array(pose) a = np.array(action) idx = label == option if np.count_nonzero(idx) > 0: I = I[idx] p = p[idx] a = a[idx] I0 = I[0,:,:,:] length = I.shape[0] I0 = np.tile(np.expand_dims(I0,axis=0),[length,1,1,1]) return [I0, I, p], [a] else: return [], [] def GetConditionalHuskyData(validate, no_disc, num_options, image, pose, action, label, prev_label, goal_image, goal_pose, value, *args, **kwargs): I = np.array(image) / 255. p = np.array(pose) a = np.array(action) I_target = np.array(goal_image) / 255. q_target = np.array(goal_pose) oin = np.array(prev_label) o1 = np.array(label) v = np.array(np.array(value) > 1.,dtype=float) I_target2, o2 = GetNextGoal(I_target, o1) I0 = I[0,:,:,:] length = I.shape[0] I0 = np.tile(np.expand_dims(I0,axis=0),[length,1,1,1]) oin_1h = np.squeeze(ToOneHot2D(oin, num_options)) o2_1h = np.squeeze(ToOneHot2D(o2, num_options)) if validate: o1_1h = np.squeeze(ToOneHot2D(o1, num_options)) return ([I0, I, o1, o2, oin], [I_target, I_target2, o1_1h, v, a, o2_1h]) elif no_disc: return [I0, I, o1, o2, oin], [I_target, I_target2,] else: return [I0, I, o1, o2, oin], [I_target, I_target2, o2_1h] def MakeHuskyPolicy(model, encoder, image, pose, action, option, verbose=True): ''' Create a single policy corresponding to option Parameters: ----------- model: definition of model/training configuration encoder: converts to hidden representation image: example of image data pose: example of pose data action: example of action data option: index of the policy to create verbose: should we print model info? ''' img_shape = image.shape[1:] pose_size = pose.shape[-1] action_size = action.shape[-1] if verbose: print("pose_size =", pose_size, "action_size =", action_size) img_in = Input(img_shape,name="policy_img_in") img0_in = Input(img_shape,name="policy_img0_in") pose_in = Input((pose_size,), name="pose_in") ins = [img0_in, img_in, pose_in] dr, bn = model.dropout_rate, False use_lrelu = False x = encoder(img_in) x0 = encoder(img0_in) y = pose_in x = Concatenate(axis=-1)([x, x0]) x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=True, bn=bn) y = AddDense(y, 32, "relu", 0., output=True, constraint=3) x = TileOnto(x, y, 32, (8,8), add=False) x = AddConv2D(x, 32, [3,3], 1, dr, "valid", lrelu=True, bn=bn) x = Flatten()(x) x = AddDense(x, 512, "lrelu", dr, output=True, bn=bn) x = AddDense(x, 512, "lrelu", dr, output=True, bn=bn) action_out = Dense(action_size, name="action_out")(x) policy = Model(ins, [action_out]) policy.compile(loss=model.loss, optimizer=model.getOptimizer()) if verbose: policy.summary() return policy
costar_models/python/costar_models/husky.py
from __future__ import print_function ''' =============================================================================== Contains tools to make the sub-models for the Husky application =============================================================================== ''' import keras.backend as K import keras.losses as losses import keras.optimizers as optimizers import numpy as np import tensorflow as tf from keras.constraints import maxnorm from keras.layers.advanced_activations import LeakyReLU from keras.layers import Input, RepeatVector, Reshape from keras.layers import UpSampling2D, Conv2DTranspose from keras.layers import BatchNormalization, Dropout from keras.layers import Dense, Conv2D, Activation, Flatten from keras.layers import Lambda from keras.layers.merge import Add, Multiply from keras.layers.merge import Concatenate from keras.losses import binary_crossentropy from keras.models import Model, Sequential from keras.optimizers import Adam from keras.constraints import max_norm from .planner import * from .data_utils import * def HuskyNumOptions(): return 5 def HuskyNullOption(): return 4 def GetHuskyActorModel(x, num_options, pose_size, dropout_rate=0.5, batchnorm=True): ''' Make an "actor" network that takes in an encoded image and an "option" label and produces the next command to execute. ''' xin = Input([int(d) for d in x.shape[1:]], name="actor_h_in") x0in = Input([int(d) for d in x.shape[1:]], name="actor_h0_in") pose_in = Input((pose_size,), name="actor_pose_in") option_in = Input((num_options,), name="actor_o_in") x = xin x0 = x0in dr, bn = dropout_rate, False use_lrelu = False x = Concatenate(axis=-1)([x, x0]) x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn) # Add arm, gripper y = pose_in y = AddDense(y, 32, "relu", 0., output=True, constraint=3) x = TileOnto(x, y, 32, (8,8), add=False) x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) # Add arm, gripper y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3) x = TileOnto(x, y2, 64, (6,6), add=False) x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) x = Flatten()(x) x = AddDense(x, 512, "relu", dr, output=True, bn=bn) x = AddDense(x, 512, "relu", dr, output=True, bn=bn) # Same setup as the state decoders pose = AddDense(x, pose_size, "linear", 0., output=True) actor = Model([x0in, xin, option_in, pose_in], [pose], name="actor") return actor def GetHuskyPoseModel(x, num_options, pose_size, dropout_rate=0.5, batchnorm=True): ''' Make an "actor" network that takes in an encoded image and an "option" label and produces the next command to execute. ''' xin = Input([int(d) for d in x.shape[1:]], name="pose_h_in") x0in = Input([int(d) for d in x.shape[1:]], name="pose_h0_in") pose_in = Input((pose_size,), name="pose_pose_in") option_in = Input((num_options,), name="pose_o_in") x = xin x0 = x0in dr, bn = dropout_rate, False use_lrelu = False x = Concatenate(axis=-1)([x, x0]) x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn) # Add arm, gripper y = pose_in y = AddDense(y, 32, "relu", 0., output=True, constraint=3) x = TileOnto(x, y, 32, (8,8), add=False) x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) # Add arm, gripper y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3) x = TileOnto(x, y2, 64, (6,6), add=False) x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn) x = Flatten()(x) x = AddDense(x, 512, "relu", dr, output=True, bn=bn) x = AddDense(x, 512, "relu", dr, output=True, bn=bn) # Same setup as the state decoders pose = AddDense(x, pose_size, "linear", 0., output=True) pose = Model([x0in, xin, option_in, pose_in], [pose], name="pose") return pose def GetPolicyHuskyData(num_options, option, image, pose, action, label, *args, **kwargs): I = np.array(image) / 255. p = np.array(pose) a = np.array(action) idx = label == option if np.count_nonzero(idx) > 0: I = I[idx] p = p[idx] a = a[idx] I0 = I[0,:,:,:] length = I.shape[0] I0 = np.tile(np.expand_dims(I0,axis=0),[length,1,1,1]) return [I0, I, p], [a] else: return [], [] def GetConditionalHuskyData(validate, no_disc, num_options, image, pose, action, label, prev_label, goal_image, goal_pose, value, *args, **kwargs): I = np.array(image) / 255. p = np.array(pose) a = np.array(action) I_target = np.array(goal_image) / 255. q_target = np.array(goal_pose) oin = np.array(prev_label) o1 = np.array(label) v = np.array(np.array(value) > 1.,dtype=float) I_target2, o2 = GetNextGoal(I_target, o1) I0 = I[0,:,:,:] length = I.shape[0] I0 = np.tile(np.expand_dims(I0,axis=0),[length,1,1,1]) oin_1h = np.squeeze(ToOneHot2D(oin, num_options)) o2_1h = np.squeeze(ToOneHot2D(o2, num_options)) if validate: o1_1h = np.squeeze(ToOneHot2D(o1, num_options)) return ([I0, I, o1, o2, oin], [I_target, I_target2, o1_1h, v, a, o2_1h]) elif no_disc: return [I0, I, o1, o2, oin], [I_target, I_target2,] else: return [I0, I, o1, o2, oin], [I_target, I_target2, o2_1h] def MakeHuskyPolicy(model, encoder, image, pose, action, option, verbose=True): ''' Create a single policy corresponding to option Parameters: ----------- model: definition of model/training configuration encoder: converts to hidden representation image: example of image data pose: example of pose data action: example of action data option: index of the policy to create verbose: should we print model info? ''' img_shape = image.shape[1:] pose_size = pose.shape[-1] action_size = action.shape[-1] if verbose: print("pose_size =", pose_size, "action_size =", action_size) img_in = Input(img_shape,name="policy_img_in") img0_in = Input(img_shape,name="policy_img0_in") pose_in = Input((pose_size,), name="pose_in") ins = [img0_in, img_in, pose_in] dr, bn = model.dropout_rate, False use_lrelu = False x = encoder(img_in) x0 = encoder(img0_in) y = pose_in x = Concatenate(axis=-1)([x, x0]) x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=True, bn=bn) y = AddDense(y, 32, "relu", 0., output=True, constraint=3) x = TileOnto(x, y, 32, (8,8), add=False) x = AddConv2D(x, 32, [3,3], 1, dr, "valid", lrelu=True, bn=bn) x = Flatten()(x) x = AddDense(x, 512, "lrelu", dr, output=True, bn=bn) x = AddDense(x, 512, "lrelu", dr, output=True, bn=bn) action_out = Dense(action_size, name="action_out")(x) policy = Model(ins, [action_out]) policy.compile(loss=model.loss, optimizer=model.getOptimizer()) if verbose: policy.summary() return policy
0.800263
0.430746
import numpy as np import pandas as pd def edgelist_from_synapse_df(syn_df, pre_column='pre_pt_root_id', post_column='post_pt_root_id', weight_column='size', agg='count'): """Compute a list of pre to post edges from a synapse-query-style dataframe. Defaults to counting synapses. Parameters ---------- syn_df : pandas.DataFrame DataFrame with columns for pre id, post id, and at least one additional column to use as weight. pre_column : str, optional Column name of the presynaptic ids, by default 'pre_pt_root_id' post_column : str, optional Column name of the postsynaptic ids, by default 'pre_pt_root_id' weight_column : str, optional Column name for values to be aggregated, by default 'size'' agg : str, optional Argument for the pandas groupby aggregation function, by default 'count'. Set to `sum` for using net synapse size instead. Returns ------- pandas.DataFrame DataFrame with pre, post, and weight columns and a row for each edge in graph. """ syn_gb = syn_df.groupby([pre_column, post_column]) edge_list_wide = syn_gb.agg(agg).reset_index() edge_list = edge_list_wide[[pre_column, post_column, weight_column]].rename(columns={weight_column: 'weight'}) return edge_list def adjacency_matrix_from_edgelist(edgelist, pre_column='pre_pt_root_id', post_column='post_pt_root_id', weight_column='weight', id_list=None): """Build an adjacency matrix dataframe from an edgelist dataframe. Parameters ---------- edgelist : pandas.DataFrame Dataframe with pre id, post id, and weight columns and a row for each edge. pre_column : str, optional Name of the presynaptic column, by default 'pre_pt_root_id' post_column : str, optional Name of the postsynaptic column, by default 'post_pt_root_id' weight_column : str, optional Name of the weight column, by default 'weight' id_list : Collection, optional Collection of ids to use for the adjacency matrix indices, preserving order. If id_list is None, it uses exactly the ids in the edgelist. If id_list includes ids not in the edgelist, they become rows/columns with zeros. If id_list does not include ids that are in the edgelist, those edges are ignored. By default None Returns ------- pandas.DataFrame Square dataframe with postsynaptic ids as index, presynaptic ids as columns, and values correspond to the weight column with 0s filled for unshown data. """ el = edgelist.copy() if id_list is None: all_ids = np.unique(np.concatenate([edgelist[pre_column], edgelist[post_column]])) else: all_ids = id_list in_all_ids_pre = np.isin(edgelist[pre_column], all_ids) in_all_ids_post = np.isin(edgelist[post_column], all_ids) el = el[(in_all_ids_post) & (in_all_ids_pre)] el = edgelist.copy() pre_to_add = all_ids[~np.isin(all_ids, edgelist[pre_column])] concat_tuple = [el] if len(pre_to_add)>0: pre_add_df = pd.DataFrame(data={pre_column:pre_to_add, post_column:edgelist[post_column].iloc[0], weight_column:0}) concat_tuple.append(pre_add_df) post_to_add = all_ids[~np.isin(all_ids, edgelist[post_column])] if len(post_to_add)>0: post_add_df = pd.DataFrame(data={pre_column:edgelist[pre_column].iloc[0], post_column:post_to_add, weight_column:0}) concat_tuple.append(post_add_df) el = pd.concat(concat_tuple) Adf = el.pivot_table(index=post_column, columns=pre_column, values=weight_column, fill_value=0) return Adf[all_ids].loc[all_ids] def adjacency_matrix_from_synapse_df(syn_df, pre_column='pre_pt_root_id', post_column='post_pt_root_id', weight_column='size', agg='count', id_list=None): """Convenience function making an adjacency matrix directly from a synapse dataframe. Parameters ---------- syn_df : pandas.DataFrame DataFrame with columns for pre id, post id, and at least one additional column to use as weight. pre_column : str, optional Name of the presynaptic column, by default 'pre_pt_root_id' post_column : str, optional Name of the postsynaptic column, by default 'post_pt_root_id' weight_column : str, optional Name of the weight column, by default 'size' agg : str, optional Argument for the pandas groupby aggregation function, by default 'count'. Set to `sum` for using net synapse size instead. id_list : Collection, optional Collection of ids to use for the adjacency matrix indices, preserving order. If id_list is None, it uses exactly the ids in the edgelist. If id_list includes ids not in the edgelist, they become rows/columns with zeros. If id_list does not include ids that are in the edgelist, those edges are ignored. By default None Returns ------- pandas.DataFrame Square dataframe with postsynaptic ids as index, presynaptic ids as columns, and values correspond to the weight column with 0s filled for unshown data. """ el = edgelist_from_synapse_df(syn_df, pre_column, post_column, weight_column, agg) Adf = adjacency_matrix_from_edgelist(el, pre_column, post_column, weight_column, id_list) return Adf
analysisdatalink/connectivity.py
import numpy as np import pandas as pd def edgelist_from_synapse_df(syn_df, pre_column='pre_pt_root_id', post_column='post_pt_root_id', weight_column='size', agg='count'): """Compute a list of pre to post edges from a synapse-query-style dataframe. Defaults to counting synapses. Parameters ---------- syn_df : pandas.DataFrame DataFrame with columns for pre id, post id, and at least one additional column to use as weight. pre_column : str, optional Column name of the presynaptic ids, by default 'pre_pt_root_id' post_column : str, optional Column name of the postsynaptic ids, by default 'pre_pt_root_id' weight_column : str, optional Column name for values to be aggregated, by default 'size'' agg : str, optional Argument for the pandas groupby aggregation function, by default 'count'. Set to `sum` for using net synapse size instead. Returns ------- pandas.DataFrame DataFrame with pre, post, and weight columns and a row for each edge in graph. """ syn_gb = syn_df.groupby([pre_column, post_column]) edge_list_wide = syn_gb.agg(agg).reset_index() edge_list = edge_list_wide[[pre_column, post_column, weight_column]].rename(columns={weight_column: 'weight'}) return edge_list def adjacency_matrix_from_edgelist(edgelist, pre_column='pre_pt_root_id', post_column='post_pt_root_id', weight_column='weight', id_list=None): """Build an adjacency matrix dataframe from an edgelist dataframe. Parameters ---------- edgelist : pandas.DataFrame Dataframe with pre id, post id, and weight columns and a row for each edge. pre_column : str, optional Name of the presynaptic column, by default 'pre_pt_root_id' post_column : str, optional Name of the postsynaptic column, by default 'post_pt_root_id' weight_column : str, optional Name of the weight column, by default 'weight' id_list : Collection, optional Collection of ids to use for the adjacency matrix indices, preserving order. If id_list is None, it uses exactly the ids in the edgelist. If id_list includes ids not in the edgelist, they become rows/columns with zeros. If id_list does not include ids that are in the edgelist, those edges are ignored. By default None Returns ------- pandas.DataFrame Square dataframe with postsynaptic ids as index, presynaptic ids as columns, and values correspond to the weight column with 0s filled for unshown data. """ el = edgelist.copy() if id_list is None: all_ids = np.unique(np.concatenate([edgelist[pre_column], edgelist[post_column]])) else: all_ids = id_list in_all_ids_pre = np.isin(edgelist[pre_column], all_ids) in_all_ids_post = np.isin(edgelist[post_column], all_ids) el = el[(in_all_ids_post) & (in_all_ids_pre)] el = edgelist.copy() pre_to_add = all_ids[~np.isin(all_ids, edgelist[pre_column])] concat_tuple = [el] if len(pre_to_add)>0: pre_add_df = pd.DataFrame(data={pre_column:pre_to_add, post_column:edgelist[post_column].iloc[0], weight_column:0}) concat_tuple.append(pre_add_df) post_to_add = all_ids[~np.isin(all_ids, edgelist[post_column])] if len(post_to_add)>0: post_add_df = pd.DataFrame(data={pre_column:edgelist[pre_column].iloc[0], post_column:post_to_add, weight_column:0}) concat_tuple.append(post_add_df) el = pd.concat(concat_tuple) Adf = el.pivot_table(index=post_column, columns=pre_column, values=weight_column, fill_value=0) return Adf[all_ids].loc[all_ids] def adjacency_matrix_from_synapse_df(syn_df, pre_column='pre_pt_root_id', post_column='post_pt_root_id', weight_column='size', agg='count', id_list=None): """Convenience function making an adjacency matrix directly from a synapse dataframe. Parameters ---------- syn_df : pandas.DataFrame DataFrame with columns for pre id, post id, and at least one additional column to use as weight. pre_column : str, optional Name of the presynaptic column, by default 'pre_pt_root_id' post_column : str, optional Name of the postsynaptic column, by default 'post_pt_root_id' weight_column : str, optional Name of the weight column, by default 'size' agg : str, optional Argument for the pandas groupby aggregation function, by default 'count'. Set to `sum` for using net synapse size instead. id_list : Collection, optional Collection of ids to use for the adjacency matrix indices, preserving order. If id_list is None, it uses exactly the ids in the edgelist. If id_list includes ids not in the edgelist, they become rows/columns with zeros. If id_list does not include ids that are in the edgelist, those edges are ignored. By default None Returns ------- pandas.DataFrame Square dataframe with postsynaptic ids as index, presynaptic ids as columns, and values correspond to the weight column with 0s filled for unshown data. """ el = edgelist_from_synapse_df(syn_df, pre_column, post_column, weight_column, agg) Adf = adjacency_matrix_from_edgelist(el, pre_column, post_column, weight_column, id_list) return Adf
0.871966
0.572245
from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' module: github_org_teams short_description: Manage GitHub Organization Teams extends_documentation_fragment: opentelekomcloud.gitcontrol.github version_added: "0.0.2" author: "<NAME> (@gtema)" description: - Manages organization teams. options: organization: description: Name of the GitHub organization type: str required: True teams: description: Dictionary of organization teams type: list required: True elements: dict suboptions: slug: description: Team slug type: str required: true description: description: Team description type: str required: false privacy: description: Team privacy option type: str choices: [secret, closed] default: secret parent: description: Slug of the parent team type: str required: false maintainers: description: List of team maintainers type: list required: false members: description: List of team members type: list required: false exclusive: description: | Whether exclusive mode should be enabled. This enforces that not configured, but existing teams as well as team maintainers and members will be deleted. type: bool default: false ''' RETURN = ''' opentelekomcloud.gitcontrol.github_org_teams: description: List of organization teams statuses returned: always type: list list_item: string ''' EXAMPLES = ''' - name: Apply org members opentelekomcloud.gitcontrol.github_org_teams: token: "{{ secret }}" organization: "test_org" teams: team1: description: description of the team maintainer: - userA member: - userB parent: visibility ''' from ansible_collections.opentelekomcloud.gitcontrol.plugins.module_utils.github import GitHubBase class GHOrgTeamsModule(GitHubBase): argument_spec = dict( organization=dict(type='str', required=True), teams=dict( type='list', required=True, elements='dict', options=dict( slug=dict(type='str', required=True), name=dict(type='str', required=False), description=dict(type='str', required=False), privacy=dict(type='str', choices=['secret', 'closed'], default='secret'), parent=dict(type='str', required=False), maintainers=dict( type='list', elements='str', aliases=['maintainer'] ), members=dict( type='list', elements='str', aliases=['member'] ) ) ), exclusive=dict(type='bool', default=False), ) module_kwargs = dict( supports_check_mode=True ) def run(self): status = dict() changed = False (changed, status) = self._manage_org_teams( self.params['organization'], self.params['teams'], self.params['exclusive'], self.ansible.check_mode ) if len(self.errors) == 0: self.exit_json( changed=changed, teams=status ) else: self.fail_json( msg='Failures occured', errors=self.errors, teams=status ) def main(): module = GHOrgTeamsModule() module() if __name__ == "__main__": main()
plugins/modules/github_org_teams.py
from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' module: github_org_teams short_description: Manage GitHub Organization Teams extends_documentation_fragment: opentelekomcloud.gitcontrol.github version_added: "0.0.2" author: "<NAME> (@gtema)" description: - Manages organization teams. options: organization: description: Name of the GitHub organization type: str required: True teams: description: Dictionary of organization teams type: list required: True elements: dict suboptions: slug: description: Team slug type: str required: true description: description: Team description type: str required: false privacy: description: Team privacy option type: str choices: [secret, closed] default: secret parent: description: Slug of the parent team type: str required: false maintainers: description: List of team maintainers type: list required: false members: description: List of team members type: list required: false exclusive: description: | Whether exclusive mode should be enabled. This enforces that not configured, but existing teams as well as team maintainers and members will be deleted. type: bool default: false ''' RETURN = ''' opentelekomcloud.gitcontrol.github_org_teams: description: List of organization teams statuses returned: always type: list list_item: string ''' EXAMPLES = ''' - name: Apply org members opentelekomcloud.gitcontrol.github_org_teams: token: "{{ secret }}" organization: "test_org" teams: team1: description: description of the team maintainer: - userA member: - userB parent: visibility ''' from ansible_collections.opentelekomcloud.gitcontrol.plugins.module_utils.github import GitHubBase class GHOrgTeamsModule(GitHubBase): argument_spec = dict( organization=dict(type='str', required=True), teams=dict( type='list', required=True, elements='dict', options=dict( slug=dict(type='str', required=True), name=dict(type='str', required=False), description=dict(type='str', required=False), privacy=dict(type='str', choices=['secret', 'closed'], default='secret'), parent=dict(type='str', required=False), maintainers=dict( type='list', elements='str', aliases=['maintainer'] ), members=dict( type='list', elements='str', aliases=['member'] ) ) ), exclusive=dict(type='bool', default=False), ) module_kwargs = dict( supports_check_mode=True ) def run(self): status = dict() changed = False (changed, status) = self._manage_org_teams( self.params['organization'], self.params['teams'], self.params['exclusive'], self.ansible.check_mode ) if len(self.errors) == 0: self.exit_json( changed=changed, teams=status ) else: self.fail_json( msg='Failures occured', errors=self.errors, teams=status ) def main(): module = GHOrgTeamsModule() module() if __name__ == "__main__": main()
0.69181
0.188567
from collections import defaultdict from threading import RLock from typing import Any, Callable, Dict, Generator, Hashable, List, Optional, Tuple, TypeVar import numpy as np from pyquaternion import Quaternion from paralleldomain.decoding.common import create_cache_key from paralleldomain.model.class_mapping import ClassDetail from paralleldomain.model.type_aliases import FrameId, SceneName, SensorName from paralleldomain.utilities.any_path import AnyPath from paralleldomain.utilities.coordinate_system import INTERNAL_COORDINATE_SYSTEM, CoordinateSystem from paralleldomain.utilities.fsio import read_json NUIMAGES_IMU_TO_INTERNAL_CS = CoordinateSystem("FLU") > INTERNAL_COORDINATE_SYSTEM # NUIMAGES_TO_INTERNAL_CS = CoordinateSystem("RFU") > INTERNAL_COORDINATE_SYSTEM def load_table(dataset_root: AnyPath, split_name: str, table_name: str) -> List[Dict[str, Any]]: """ Load a table and return it. :param table_name: The name of the table to load. :return: The table dictionary. """ table_path = dataset_root / split_name / f"{table_name}.json" if table_path.exists(): return read_json(table_path) raise ValueError(f"Error: Table {table_name} does not exist!") ItemType = TypeVar("ItemType") class _FixedStorage: def __init__(self): self.stored_tables = dict() self.table_load_locks = defaultdict(RLock) def get_item(self, key: Hashable, loader: Callable[[], ItemType]) -> ItemType: if key not in self.stored_tables: with self.table_load_locks[key]: if key not in self.stored_tables: self.stored_tables[key] = loader() return self.stored_tables[key] class NuImagesDataAccessMixin: _storage = _FixedStorage() def __init__(self, dataset_path: AnyPath, dataset_name: str, split_name: str): """Decodes a NuImages dataset Args: dataset_path: AnyPath to the root folder of a NuImages dataset. split: Split to use within this dataset. Defaults to v1.0-train. Options are [v1.0-mini, v1.0-test, v1.0-train, v1.0-val]. """ self.dataset_name = dataset_name self._dataset_path = dataset_path self.split_name = split_name @property def nu_table_storage(self) -> _FixedStorage: return NuImagesDataAccessMixin._storage def get_unique_id( self, scene_name: Optional[SceneName] = None, sensor_name: Optional[SensorName] = None, frame_id: Optional[FrameId] = None, extra: Optional[str] = None, ) -> str: return create_cache_key( dataset_name=self.dataset_name, scene_name=scene_name, sensor_name=sensor_name, frame_id=frame_id, extra=extra, ) @property def nu_logs(self) -> List[Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_logs") return self.nu_table_storage.get_item( key=_unique_cache_key, loader=lambda: load_table(dataset_root=self._dataset_path, table_name="log", split_name=self.split_name), ) @property def nu_logs_by_log_token(self) -> Dict[str, Dict[str, Any]]: return {log["token"]: log for log in self.nu_logs} @property def nu_samples(self) -> Dict[str, List[Dict[str, Any]]]: _unique_cache_key = self.get_unique_id(extra="nu_samples") def get_nu_samples() -> Dict[str, List[Dict[str, Any]]]: samples = load_table(dataset_root=self._dataset_path, table_name="sample", split_name=self.split_name) log_wise_samples = dict() for s in samples: log_wise_samples.setdefault(s["log_token"], list()).append(s) return log_wise_samples return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_samples, ) @property def nu_sensors(self) -> List[Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_sensors") return self.nu_table_storage.get_item( key=_unique_cache_key, loader=lambda: load_table(dataset_root=self._dataset_path, table_name="sensor", split_name=self.split_name), ) def get_nu_sensor(self, sensor_token: str) -> Dict[str, Any]: return next(iter([sensor for sensor in self.nu_sensors if sensor["token"] == sensor_token]), dict()) @property def nu_samples_data(self) -> Dict[str, Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_samples_data") def get_nu_samples_data() -> Dict[str, Dict[str, Any]]: data = load_table(dataset_root=self._dataset_path, table_name="sample_data", split_name=self.split_name) return {d["token"]: d for d in data} return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_samples_data, ) @property def nu_surface_ann(self) -> Dict[str, List[Dict[str, Any]]]: _unique_cache_key = self.get_unique_id(extra="nu_surface_ann") def get_nu_surface_ann() -> Dict[str, List[Dict[str, Any]]]: data = load_table(dataset_root=self._dataset_path, table_name="surface_ann", split_name=self.split_name) surface_ann = dict() for d in data: surface_ann.setdefault(d["sample_data_token"], list()).append(d) return surface_ann return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_surface_ann, ) @property def nu_object_ann(self) -> Dict[str, List[Dict[str, Any]]]: _unique_cache_key = self.get_unique_id(extra="nu_object_ann") def get_nu_object_ann() -> Dict[str, List[Dict[str, Any]]]: data = load_table(dataset_root=self._dataset_path, table_name="object_ann", split_name=self.split_name) object_ann = dict() for d in data: object_ann.setdefault(d["sample_data_token"], list()).append(d) return object_ann return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_object_ann, ) @property def nu_sample_data_tokens_to_available_anno_types(self) -> Dict[str, Tuple[bool, bool]]: _unique_cache_key = self.get_unique_id(extra="nu_sample_data_tokens_to_available_anno_types") def get_nu_sample_data_tokens_to_available_anno_types() -> Dict[str, Tuple[bool, bool]]: surface_anns = self.nu_surface_ann obj_anns = self.nu_object_ann mapping = dict() for k in surface_anns.keys(): mapping.setdefault(k, [False, False])[0] = True for k in obj_anns.keys(): mapping.setdefault(k, [False, False])[1] = True return mapping return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_sample_data_tokens_to_available_anno_types, ) @property def nu_calibrated_sensors(self) -> Dict[str, Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_calibrated_sensors") def get_nu_calibrated_sensors() -> Dict[str, Dict[str, Any]]: data = load_table( dataset_root=self._dataset_path, table_name="calibrated_sensor", split_name=self.split_name ) return {d["token"]: d for d in data} return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_calibrated_sensors, ) @property def nu_ego_pose(self) -> List[Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_ego_pose") return self.nu_table_storage.get_item( key=_unique_cache_key, loader=lambda: load_table( dataset_root=self._dataset_path, table_name="ego_pose", split_name=self.split_name ), ) def get_nu_ego_pose(self, ego_pose_token: str) -> Dict[str, Any]: return next(iter([pose for pose in self.nu_ego_pose if pose["token"] == ego_pose_token]), dict()) @property def nu_category(self) -> Dict[str, Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_category") def get_nu_category() -> Dict[str, Dict[str, Any]]: data = load_table(dataset_root=self._dataset_path, table_name="category", split_name=self.split_name) return {d["token"]: d for d in data} return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_category, ) @property def nu_attribute(self) -> Dict[str, Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_attribute") def get_nu_attribute() -> Dict[str, Dict[str, Any]]: data = load_table(dataset_root=self._dataset_path, table_name="attribute", split_name=self.split_name) return {d["token"]: d for d in data} return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_attribute, ) @property def nu_name_to_index(self) -> Dict[str, int]: _unique_cache_key = self.get_unique_id(extra="nu_name_to_index") return self.nu_table_storage.get_item( key=_unique_cache_key, loader=lambda: name_to_index_mapping(category=list(self.nu_category.values())), ) def _get_prev_data_ids(self, key_camera_token: str) -> List[str]: sample_data = self.nu_samples_data[key_camera_token] tokens = [key_camera_token] if sample_data["prev"]: tokens += self._get_prev_data_ids(key_camera_token=sample_data["prev"]) return tokens def _get_next_data_ids(self, key_camera_token: str) -> List[str]: sample_data = self.nu_samples_data[key_camera_token] tokens = [key_camera_token] if sample_data["next"]: tokens += self._get_prev_data_ids(key_camera_token=sample_data["next"]) return tokens def get_connected_sample_data_ids(self, key_camera_token: str): prev_ids = self._get_prev_data_ids(key_camera_token=key_camera_token) next_ids = self._get_next_data_ids(key_camera_token=key_camera_token) return list(set(next_ids + prev_ids)) def get_sample_data_with_frame_id(self, log_token: str, frame_id: FrameId) -> Generator[Dict[str, Any], None, None]: samples = self.nu_samples[log_token] key_camera_tokens = [sample["key_camera_token"] for sample in samples] data_dict = self.nu_samples_data for key_camera_token in key_camera_tokens: data = data_dict[key_camera_token] if str(data["timestamp"]) == frame_id: yield data def get_sample_data_id_frame_id_and_sensor_name( self, log_token: str, frame_id: FrameId, sensor_name: SensorName ) -> Optional[str]: return self.nu_sample_data_ids_by_frame_and_sensor(log_token=log_token)[(frame_id, sensor_name)] def nu_sample_data_ids_by_frame_and_sensor(self, log_token: str) -> Dict[Tuple[FrameId, SensorName], str]: _unique_cache_key = self.get_unique_id(extra="nu_sample_data_ids_by_frame_and_sensor", scene_name=log_token) def get_nu_sample_data_ids_by_frame_and_sensor() -> Dict[Tuple[FrameId, SensorName], str]: samples = self.nu_samples[log_token] key_camera_tokens = [sample["key_camera_token"] for sample in samples] mapping = dict() nu_samples_data = self.nu_samples_data for key_camera_token in key_camera_tokens: data = nu_samples_data[key_camera_token] frame_id = str(data["timestamp"]) calib_sensor_token = data["calibrated_sensor_token"] calib_sensor = self.nu_calibrated_sensors[calib_sensor_token] sensor = self.get_nu_sensor(sensor_token=calib_sensor["sensor_token"]) sensor_name = sensor["channel"] mapping[(frame_id, sensor_name)] = key_camera_token return mapping return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_sample_data_ids_by_frame_and_sensor, ) @property def nu_class_infos(self) -> List[ClassDetail]: _unique_cache_key = self.get_unique_id(extra="nu_class_infos") def get_nu_class_infos() -> List[ClassDetail]: name_to_index = name_to_index_mapping(category=list(self.nu_category.values())) details = list() for _, cat in self.nu_category.items(): name = cat["name"] index = name_to_index[name] details.append(ClassDetail(name=name, id=index, meta=dict(description=cat["description"]))) details.append(ClassDetail(name="background", id=name_to_index["background"], meta=dict())) return details return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_class_infos, ) def get_ego_pose(self, log_token: str, frame_id: FrameId) -> np.ndarray: for data in self.get_sample_data_with_frame_id(log_token=log_token, frame_id=frame_id): ego_pose_token = data["ego_pose_token"] ego_pose = self.get_nu_ego_pose(ego_pose_token=ego_pose_token) trans = np.eye(4) trans[:3, :3] = Quaternion(ego_pose["rotation"]).rotation_matrix trans[:3, 3] = np.array(ego_pose["translation"]) trans = NUIMAGES_IMU_TO_INTERNAL_CS @ trans return trans raise ValueError(f"No ego pose for frame id {frame_id}") NUIMAGES_CLASSES = list() def name_to_index_mapping(category: List[Dict[str, Any]]) -> Dict[str, int]: """ Build a mapping from name to index to look up index in O(1) time. :param category: The nuImages category table. :return: The mapping from category name to category index. """ # The 0 index is reserved for non-labelled background; thus, the categories should start from index 1. # Also, sort the categories before looping so that the order is always the same (alphabetical). name_to_index = dict() i = 1 sorted_category: List = sorted(category.copy(), key=lambda k: k["name"]) for c in sorted_category: # Ignore the vehicle.ego and flat.driveable_surface classes first; they will be mapped later. if c["name"] != "vehicle.ego" and c["name"] != "flat.driveable_surface": name_to_index[c["name"]] = i i += 1 assert max(name_to_index.values()) < 24, ( "Error: There are {} classes (excluding vehicle.ego and flat.driveable_surface), " "but there should be 23. Please check your category.json".format(max(name_to_index.values())) ) # Now map the vehicle.ego and flat.driveable_surface classes. name_to_index["flat.driveable_surface"] = 24 name_to_index["vehicle.ego"] = 31 name_to_index["background"] = 0 # Ensure that each class name is uniquely paired with a class index, and vice versa. assert len(name_to_index) == len( set(name_to_index.values()) ), "Error: There are {} class names but {} class indices".format( len(name_to_index), len(set(name_to_index.values())) ) return name_to_index
paralleldomain/decoding/nuimages/common.py
from collections import defaultdict from threading import RLock from typing import Any, Callable, Dict, Generator, Hashable, List, Optional, Tuple, TypeVar import numpy as np from pyquaternion import Quaternion from paralleldomain.decoding.common import create_cache_key from paralleldomain.model.class_mapping import ClassDetail from paralleldomain.model.type_aliases import FrameId, SceneName, SensorName from paralleldomain.utilities.any_path import AnyPath from paralleldomain.utilities.coordinate_system import INTERNAL_COORDINATE_SYSTEM, CoordinateSystem from paralleldomain.utilities.fsio import read_json NUIMAGES_IMU_TO_INTERNAL_CS = CoordinateSystem("FLU") > INTERNAL_COORDINATE_SYSTEM # NUIMAGES_TO_INTERNAL_CS = CoordinateSystem("RFU") > INTERNAL_COORDINATE_SYSTEM def load_table(dataset_root: AnyPath, split_name: str, table_name: str) -> List[Dict[str, Any]]: """ Load a table and return it. :param table_name: The name of the table to load. :return: The table dictionary. """ table_path = dataset_root / split_name / f"{table_name}.json" if table_path.exists(): return read_json(table_path) raise ValueError(f"Error: Table {table_name} does not exist!") ItemType = TypeVar("ItemType") class _FixedStorage: def __init__(self): self.stored_tables = dict() self.table_load_locks = defaultdict(RLock) def get_item(self, key: Hashable, loader: Callable[[], ItemType]) -> ItemType: if key not in self.stored_tables: with self.table_load_locks[key]: if key not in self.stored_tables: self.stored_tables[key] = loader() return self.stored_tables[key] class NuImagesDataAccessMixin: _storage = _FixedStorage() def __init__(self, dataset_path: AnyPath, dataset_name: str, split_name: str): """Decodes a NuImages dataset Args: dataset_path: AnyPath to the root folder of a NuImages dataset. split: Split to use within this dataset. Defaults to v1.0-train. Options are [v1.0-mini, v1.0-test, v1.0-train, v1.0-val]. """ self.dataset_name = dataset_name self._dataset_path = dataset_path self.split_name = split_name @property def nu_table_storage(self) -> _FixedStorage: return NuImagesDataAccessMixin._storage def get_unique_id( self, scene_name: Optional[SceneName] = None, sensor_name: Optional[SensorName] = None, frame_id: Optional[FrameId] = None, extra: Optional[str] = None, ) -> str: return create_cache_key( dataset_name=self.dataset_name, scene_name=scene_name, sensor_name=sensor_name, frame_id=frame_id, extra=extra, ) @property def nu_logs(self) -> List[Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_logs") return self.nu_table_storage.get_item( key=_unique_cache_key, loader=lambda: load_table(dataset_root=self._dataset_path, table_name="log", split_name=self.split_name), ) @property def nu_logs_by_log_token(self) -> Dict[str, Dict[str, Any]]: return {log["token"]: log for log in self.nu_logs} @property def nu_samples(self) -> Dict[str, List[Dict[str, Any]]]: _unique_cache_key = self.get_unique_id(extra="nu_samples") def get_nu_samples() -> Dict[str, List[Dict[str, Any]]]: samples = load_table(dataset_root=self._dataset_path, table_name="sample", split_name=self.split_name) log_wise_samples = dict() for s in samples: log_wise_samples.setdefault(s["log_token"], list()).append(s) return log_wise_samples return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_samples, ) @property def nu_sensors(self) -> List[Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_sensors") return self.nu_table_storage.get_item( key=_unique_cache_key, loader=lambda: load_table(dataset_root=self._dataset_path, table_name="sensor", split_name=self.split_name), ) def get_nu_sensor(self, sensor_token: str) -> Dict[str, Any]: return next(iter([sensor for sensor in self.nu_sensors if sensor["token"] == sensor_token]), dict()) @property def nu_samples_data(self) -> Dict[str, Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_samples_data") def get_nu_samples_data() -> Dict[str, Dict[str, Any]]: data = load_table(dataset_root=self._dataset_path, table_name="sample_data", split_name=self.split_name) return {d["token"]: d for d in data} return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_samples_data, ) @property def nu_surface_ann(self) -> Dict[str, List[Dict[str, Any]]]: _unique_cache_key = self.get_unique_id(extra="nu_surface_ann") def get_nu_surface_ann() -> Dict[str, List[Dict[str, Any]]]: data = load_table(dataset_root=self._dataset_path, table_name="surface_ann", split_name=self.split_name) surface_ann = dict() for d in data: surface_ann.setdefault(d["sample_data_token"], list()).append(d) return surface_ann return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_surface_ann, ) @property def nu_object_ann(self) -> Dict[str, List[Dict[str, Any]]]: _unique_cache_key = self.get_unique_id(extra="nu_object_ann") def get_nu_object_ann() -> Dict[str, List[Dict[str, Any]]]: data = load_table(dataset_root=self._dataset_path, table_name="object_ann", split_name=self.split_name) object_ann = dict() for d in data: object_ann.setdefault(d["sample_data_token"], list()).append(d) return object_ann return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_object_ann, ) @property def nu_sample_data_tokens_to_available_anno_types(self) -> Dict[str, Tuple[bool, bool]]: _unique_cache_key = self.get_unique_id(extra="nu_sample_data_tokens_to_available_anno_types") def get_nu_sample_data_tokens_to_available_anno_types() -> Dict[str, Tuple[bool, bool]]: surface_anns = self.nu_surface_ann obj_anns = self.nu_object_ann mapping = dict() for k in surface_anns.keys(): mapping.setdefault(k, [False, False])[0] = True for k in obj_anns.keys(): mapping.setdefault(k, [False, False])[1] = True return mapping return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_sample_data_tokens_to_available_anno_types, ) @property def nu_calibrated_sensors(self) -> Dict[str, Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_calibrated_sensors") def get_nu_calibrated_sensors() -> Dict[str, Dict[str, Any]]: data = load_table( dataset_root=self._dataset_path, table_name="calibrated_sensor", split_name=self.split_name ) return {d["token"]: d for d in data} return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_calibrated_sensors, ) @property def nu_ego_pose(self) -> List[Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_ego_pose") return self.nu_table_storage.get_item( key=_unique_cache_key, loader=lambda: load_table( dataset_root=self._dataset_path, table_name="ego_pose", split_name=self.split_name ), ) def get_nu_ego_pose(self, ego_pose_token: str) -> Dict[str, Any]: return next(iter([pose for pose in self.nu_ego_pose if pose["token"] == ego_pose_token]), dict()) @property def nu_category(self) -> Dict[str, Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_category") def get_nu_category() -> Dict[str, Dict[str, Any]]: data = load_table(dataset_root=self._dataset_path, table_name="category", split_name=self.split_name) return {d["token"]: d for d in data} return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_category, ) @property def nu_attribute(self) -> Dict[str, Dict[str, Any]]: _unique_cache_key = self.get_unique_id(extra="nu_attribute") def get_nu_attribute() -> Dict[str, Dict[str, Any]]: data = load_table(dataset_root=self._dataset_path, table_name="attribute", split_name=self.split_name) return {d["token"]: d for d in data} return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_attribute, ) @property def nu_name_to_index(self) -> Dict[str, int]: _unique_cache_key = self.get_unique_id(extra="nu_name_to_index") return self.nu_table_storage.get_item( key=_unique_cache_key, loader=lambda: name_to_index_mapping(category=list(self.nu_category.values())), ) def _get_prev_data_ids(self, key_camera_token: str) -> List[str]: sample_data = self.nu_samples_data[key_camera_token] tokens = [key_camera_token] if sample_data["prev"]: tokens += self._get_prev_data_ids(key_camera_token=sample_data["prev"]) return tokens def _get_next_data_ids(self, key_camera_token: str) -> List[str]: sample_data = self.nu_samples_data[key_camera_token] tokens = [key_camera_token] if sample_data["next"]: tokens += self._get_prev_data_ids(key_camera_token=sample_data["next"]) return tokens def get_connected_sample_data_ids(self, key_camera_token: str): prev_ids = self._get_prev_data_ids(key_camera_token=key_camera_token) next_ids = self._get_next_data_ids(key_camera_token=key_camera_token) return list(set(next_ids + prev_ids)) def get_sample_data_with_frame_id(self, log_token: str, frame_id: FrameId) -> Generator[Dict[str, Any], None, None]: samples = self.nu_samples[log_token] key_camera_tokens = [sample["key_camera_token"] for sample in samples] data_dict = self.nu_samples_data for key_camera_token in key_camera_tokens: data = data_dict[key_camera_token] if str(data["timestamp"]) == frame_id: yield data def get_sample_data_id_frame_id_and_sensor_name( self, log_token: str, frame_id: FrameId, sensor_name: SensorName ) -> Optional[str]: return self.nu_sample_data_ids_by_frame_and_sensor(log_token=log_token)[(frame_id, sensor_name)] def nu_sample_data_ids_by_frame_and_sensor(self, log_token: str) -> Dict[Tuple[FrameId, SensorName], str]: _unique_cache_key = self.get_unique_id(extra="nu_sample_data_ids_by_frame_and_sensor", scene_name=log_token) def get_nu_sample_data_ids_by_frame_and_sensor() -> Dict[Tuple[FrameId, SensorName], str]: samples = self.nu_samples[log_token] key_camera_tokens = [sample["key_camera_token"] for sample in samples] mapping = dict() nu_samples_data = self.nu_samples_data for key_camera_token in key_camera_tokens: data = nu_samples_data[key_camera_token] frame_id = str(data["timestamp"]) calib_sensor_token = data["calibrated_sensor_token"] calib_sensor = self.nu_calibrated_sensors[calib_sensor_token] sensor = self.get_nu_sensor(sensor_token=calib_sensor["sensor_token"]) sensor_name = sensor["channel"] mapping[(frame_id, sensor_name)] = key_camera_token return mapping return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_sample_data_ids_by_frame_and_sensor, ) @property def nu_class_infos(self) -> List[ClassDetail]: _unique_cache_key = self.get_unique_id(extra="nu_class_infos") def get_nu_class_infos() -> List[ClassDetail]: name_to_index = name_to_index_mapping(category=list(self.nu_category.values())) details = list() for _, cat in self.nu_category.items(): name = cat["name"] index = name_to_index[name] details.append(ClassDetail(name=name, id=index, meta=dict(description=cat["description"]))) details.append(ClassDetail(name="background", id=name_to_index["background"], meta=dict())) return details return self.nu_table_storage.get_item( key=_unique_cache_key, loader=get_nu_class_infos, ) def get_ego_pose(self, log_token: str, frame_id: FrameId) -> np.ndarray: for data in self.get_sample_data_with_frame_id(log_token=log_token, frame_id=frame_id): ego_pose_token = data["ego_pose_token"] ego_pose = self.get_nu_ego_pose(ego_pose_token=ego_pose_token) trans = np.eye(4) trans[:3, :3] = Quaternion(ego_pose["rotation"]).rotation_matrix trans[:3, 3] = np.array(ego_pose["translation"]) trans = NUIMAGES_IMU_TO_INTERNAL_CS @ trans return trans raise ValueError(f"No ego pose for frame id {frame_id}") NUIMAGES_CLASSES = list() def name_to_index_mapping(category: List[Dict[str, Any]]) -> Dict[str, int]: """ Build a mapping from name to index to look up index in O(1) time. :param category: The nuImages category table. :return: The mapping from category name to category index. """ # The 0 index is reserved for non-labelled background; thus, the categories should start from index 1. # Also, sort the categories before looping so that the order is always the same (alphabetical). name_to_index = dict() i = 1 sorted_category: List = sorted(category.copy(), key=lambda k: k["name"]) for c in sorted_category: # Ignore the vehicle.ego and flat.driveable_surface classes first; they will be mapped later. if c["name"] != "vehicle.ego" and c["name"] != "flat.driveable_surface": name_to_index[c["name"]] = i i += 1 assert max(name_to_index.values()) < 24, ( "Error: There are {} classes (excluding vehicle.ego and flat.driveable_surface), " "but there should be 23. Please check your category.json".format(max(name_to_index.values())) ) # Now map the vehicle.ego and flat.driveable_surface classes. name_to_index["flat.driveable_surface"] = 24 name_to_index["vehicle.ego"] = 31 name_to_index["background"] = 0 # Ensure that each class name is uniquely paired with a class index, and vice versa. assert len(name_to_index) == len( set(name_to_index.values()) ), "Error: There are {} class names but {} class indices".format( len(name_to_index), len(set(name_to_index.values())) ) return name_to_index
0.909397
0.210726
import os from selenium.webdriver.common.keys import Keys from pytest_dash.utils import ( wait_for_text_to_equal, wait_for_element_by_css_selector, ) import dash from dash.dependencies import Input, Output, State import dash_html_components as html import dash_core_components as dcc from dash_bio import VolcanoPlot from tests.dashbio_demos.app_volcano_plot import DATASETS from .test_common_features import access_demo_app APP_NAME = os.path.basename(__file__).replace('test_', '').replace('.py', '') LAYOUT = html.Div( id='test-vp-graph-div', children=[ dcc.Graph( id='test-vp-graph', ), html.Button(id='test-vp-btn', children='click me'), dcc.Input(id='test-vp-param-name-input', value=''), dcc.Input(id='test-vp-param-value-input', value=''), html.Div(id='test-vp-assert-value-div', children='') ] ) PARAM_TYPES = { 'int': int, 'float': float, 'bool': bool, 'str': str } def volcano_plot_test_param_callback( nclicks, param_name, param_value, param_type=None, dataset=DATASETS['SET1']['dataframe'] ): """Create a volcano plot with a single user chosen prop. :param nclicks: (string) html.Button 'n_clicks' Input :param param_name: (string) dcc.Input 'value' State :param param_value: (string) dcc.Input 'value' State :param param_type: (string) one of PARAM_TYPES keys default: None :param dataset: (panda DataFrame): a DataFrame with volcano plot data :return: a dash_bio.VolcanoPlot instance (which is a plotly.graph_objs.Figure instance) """ answer = {'data': [], 'layout': {}} # avoid triggering at the creation of the button in the layout if nclicks is not None: # convert the parameter value to the right type if param_type in PARAM_TYPES: param_value = PARAM_TYPES[param_type](param_value) arg_to_pass = {param_name: param_value} answer = VolcanoPlot( dataset, **arg_to_pass ) return answer # Demo app tests def test_click_app_link_from_gallery(dash_threaded, selenium): access_demo_app(dash_threaded, selenium, APP_NAME) assert selenium.current_url.replace('http://localhost:8050', '') == '/dash-bio/volcano-plot' def test_initial_dataset(dash_threaded, selenium): """Check the default dataset is Set2.""" access_demo_app(dash_threaded, selenium, APP_NAME) wait_for_text_to_equal( selenium, '#vp-dataset-dropdown .Select-value-label', 'Set2' ) def test_change_dataset(dash_threaded, selenium): """Change dataset using the dropdown.""" access_demo_app(dash_threaded, selenium, APP_NAME) dataset_dropdown = wait_for_element_by_css_selector( selenium, '#vp-dataset-dropdown .Select-input input' ) dataset_dropdown.send_keys('Set1') dataset_dropdown.send_keys(Keys.RETURN) wait_for_text_to_equal( selenium, '#vp-dataset-dropdown .Select-value-label', 'Set1' ) def test_lower_genomic_line(dash_threaded, selenium): """Lower the threshold genomic line and verify the change in the highlight points number.""" access_demo_app(dash_threaded, selenium, APP_NAME) # initial check wait_for_text_to_equal(selenium, '#vp-dataset-dropdown .Select-value-label', 'Set2') wait_for_text_to_equal(selenium, '#vp-upper-left', '14') wait_for_text_to_equal(selenium, '#vp-upper-right', '92') threshold = wait_for_element_by_css_selector(selenium, '#vp-genomic-line') lower_bound = wait_for_element_by_css_selector(selenium, '#vp-lower-bound') upper_bound = wait_for_element_by_css_selector(selenium, '#vp-upper-bound') assert int(threshold.get_attribute('value')) == 4 assert int(lower_bound.get_attribute('value')) == -1 assert int(upper_bound.get_attribute('value')) == 1 # lower the threshold threshold.send_keys(Keys.ARROW_DOWN) # number of points in the upper left and upper right quadrants wait_for_text_to_equal(selenium, '#vp-upper-left', '154') wait_for_text_to_equal(selenium, '#vp-upper-right', '271') threshold.send_keys(Keys.ARROW_DOWN) threshold.send_keys(Keys.ARROW_DOWN) threshold.send_keys(Keys.ARROW_DOWN) threshold.send_keys(Keys.ARROW_DOWN) assert int(threshold.get_attribute('value')) == 0 def test_effect_size_min_and_max(dash_threaded, selenium): """Move the lower and upper effect size lines to their max and min, respectively.""" access_demo_app(dash_threaded, selenium, APP_NAME) lower_bound = wait_for_element_by_css_selector(selenium, '#vp-lower-bound') upper_bound = wait_for_element_by_css_selector(selenium, '#vp-upper-bound') lower_bound.send_keys(Keys.ARROW_UP) assert int(lower_bound.get_attribute('value')) == 0 # maximum should be set to 0 lower_bound.send_keys(Keys.ARROW_UP) assert int(lower_bound.get_attribute('value')) == 0 # number of points in the upper left and upper right quadrants wait_for_text_to_equal(selenium, '#vp-upper-left', '24') wait_for_text_to_equal(selenium, '#vp-upper-right', '92') upper_bound.send_keys(Keys.ARROW_DOWN) assert int(upper_bound.get_attribute('value')) == 0 # minimum should be set to 0 upper_bound.send_keys(Keys.ARROW_DOWN) assert int(upper_bound.get_attribute('value')) == 0 # number of points in the upper left and upper right quadrants wait_for_text_to_equal(selenium, '#vp-upper-left', '24') wait_for_text_to_equal(selenium, '#vp-upper-right', '99') # Volcano Plot component tests def template_test_parameters_volcanoplot( dash_threaded, selenium, assert_callback, param_name, param_value, par_type=None ): """Share reusable test code for testing Volcano Plot single parameter assignation.""" dummy_app = dash.Dash(__name__) dummy_app.layout = LAYOUT @dummy_app.callback( Output('test-vp-graph', 'figure'), [Input('test-vp-btn', 'n_clicks')], [ State('test-vp-param-name-input', 'value'), State('test-vp-param-value-input', 'value') ] ) def update_graph(nclicks, par_name, par_value): """Update the figure of the dcc.Graph component when a button is clicked.""" return volcano_plot_test_param_callback(nclicks, par_name, par_value, par_type) @dummy_app.callback( Output('test-vp-assert-value-div', 'children'), [Input('test-vp-graph', 'figure')], [ State('test-vp-btn', 'n_clicks'), State('test-vp-param-value-input', 'value') ] ) def assert_value(fig, nclicks, input_value): return assert_callback(fig, nclicks, input_value) dash_threaded(dummy_app) param_name_input = wait_for_element_by_css_selector(selenium, '#test-vp-param-name-input') param_value_input = wait_for_element_by_css_selector(selenium, '#test-vp-param-value-input') param_name_input.send_keys(param_name) param_value_input.send_keys(param_value) btn = wait_for_element_by_css_selector(selenium, '#test-vp-btn') btn.click() wait_for_text_to_equal(selenium, '#test-vp-assert-value-div', 'PASSED') def test_xlabel(dash_threaded, selenium): """Change xlabel.""" def assert_callback(fig, nclicks, input_value): answer = '' if nclicks is not None: if input_value == fig['layout']['xaxis']['title']['text']: answer = 'PASSED' return answer template_test_parameters_volcanoplot( dash_threaded, selenium, assert_callback, 'xlabel', 'x-label-test' ) def test_ylabel(dash_threaded, selenium): """Change ylabel.""" def assert_callback(fig, nclicks, input_value): answer = '' if nclicks is not None: if input_value == fig['layout']['yaxis']['title']['text']: answer = 'PASSED' return answer template_test_parameters_volcanoplot( dash_threaded, selenium, assert_callback, 'ylabel', 'y-label-test' ) def test_title(dash_threaded, selenium): """Change title.""" def assert_callback(fig, nclicks, input_value): answer = '' if nclicks is not None: if input_value == fig['layout']['title']['text']: answer = 'PASSED' return answer template_test_parameters_volcanoplot( dash_threaded, selenium, assert_callback, 'title', 'x-label-test' )
tests/test_volcano_plot.py
import os from selenium.webdriver.common.keys import Keys from pytest_dash.utils import ( wait_for_text_to_equal, wait_for_element_by_css_selector, ) import dash from dash.dependencies import Input, Output, State import dash_html_components as html import dash_core_components as dcc from dash_bio import VolcanoPlot from tests.dashbio_demos.app_volcano_plot import DATASETS from .test_common_features import access_demo_app APP_NAME = os.path.basename(__file__).replace('test_', '').replace('.py', '') LAYOUT = html.Div( id='test-vp-graph-div', children=[ dcc.Graph( id='test-vp-graph', ), html.Button(id='test-vp-btn', children='click me'), dcc.Input(id='test-vp-param-name-input', value=''), dcc.Input(id='test-vp-param-value-input', value=''), html.Div(id='test-vp-assert-value-div', children='') ] ) PARAM_TYPES = { 'int': int, 'float': float, 'bool': bool, 'str': str } def volcano_plot_test_param_callback( nclicks, param_name, param_value, param_type=None, dataset=DATASETS['SET1']['dataframe'] ): """Create a volcano plot with a single user chosen prop. :param nclicks: (string) html.Button 'n_clicks' Input :param param_name: (string) dcc.Input 'value' State :param param_value: (string) dcc.Input 'value' State :param param_type: (string) one of PARAM_TYPES keys default: None :param dataset: (panda DataFrame): a DataFrame with volcano plot data :return: a dash_bio.VolcanoPlot instance (which is a plotly.graph_objs.Figure instance) """ answer = {'data': [], 'layout': {}} # avoid triggering at the creation of the button in the layout if nclicks is not None: # convert the parameter value to the right type if param_type in PARAM_TYPES: param_value = PARAM_TYPES[param_type](param_value) arg_to_pass = {param_name: param_value} answer = VolcanoPlot( dataset, **arg_to_pass ) return answer # Demo app tests def test_click_app_link_from_gallery(dash_threaded, selenium): access_demo_app(dash_threaded, selenium, APP_NAME) assert selenium.current_url.replace('http://localhost:8050', '') == '/dash-bio/volcano-plot' def test_initial_dataset(dash_threaded, selenium): """Check the default dataset is Set2.""" access_demo_app(dash_threaded, selenium, APP_NAME) wait_for_text_to_equal( selenium, '#vp-dataset-dropdown .Select-value-label', 'Set2' ) def test_change_dataset(dash_threaded, selenium): """Change dataset using the dropdown.""" access_demo_app(dash_threaded, selenium, APP_NAME) dataset_dropdown = wait_for_element_by_css_selector( selenium, '#vp-dataset-dropdown .Select-input input' ) dataset_dropdown.send_keys('Set1') dataset_dropdown.send_keys(Keys.RETURN) wait_for_text_to_equal( selenium, '#vp-dataset-dropdown .Select-value-label', 'Set1' ) def test_lower_genomic_line(dash_threaded, selenium): """Lower the threshold genomic line and verify the change in the highlight points number.""" access_demo_app(dash_threaded, selenium, APP_NAME) # initial check wait_for_text_to_equal(selenium, '#vp-dataset-dropdown .Select-value-label', 'Set2') wait_for_text_to_equal(selenium, '#vp-upper-left', '14') wait_for_text_to_equal(selenium, '#vp-upper-right', '92') threshold = wait_for_element_by_css_selector(selenium, '#vp-genomic-line') lower_bound = wait_for_element_by_css_selector(selenium, '#vp-lower-bound') upper_bound = wait_for_element_by_css_selector(selenium, '#vp-upper-bound') assert int(threshold.get_attribute('value')) == 4 assert int(lower_bound.get_attribute('value')) == -1 assert int(upper_bound.get_attribute('value')) == 1 # lower the threshold threshold.send_keys(Keys.ARROW_DOWN) # number of points in the upper left and upper right quadrants wait_for_text_to_equal(selenium, '#vp-upper-left', '154') wait_for_text_to_equal(selenium, '#vp-upper-right', '271') threshold.send_keys(Keys.ARROW_DOWN) threshold.send_keys(Keys.ARROW_DOWN) threshold.send_keys(Keys.ARROW_DOWN) threshold.send_keys(Keys.ARROW_DOWN) assert int(threshold.get_attribute('value')) == 0 def test_effect_size_min_and_max(dash_threaded, selenium): """Move the lower and upper effect size lines to their max and min, respectively.""" access_demo_app(dash_threaded, selenium, APP_NAME) lower_bound = wait_for_element_by_css_selector(selenium, '#vp-lower-bound') upper_bound = wait_for_element_by_css_selector(selenium, '#vp-upper-bound') lower_bound.send_keys(Keys.ARROW_UP) assert int(lower_bound.get_attribute('value')) == 0 # maximum should be set to 0 lower_bound.send_keys(Keys.ARROW_UP) assert int(lower_bound.get_attribute('value')) == 0 # number of points in the upper left and upper right quadrants wait_for_text_to_equal(selenium, '#vp-upper-left', '24') wait_for_text_to_equal(selenium, '#vp-upper-right', '92') upper_bound.send_keys(Keys.ARROW_DOWN) assert int(upper_bound.get_attribute('value')) == 0 # minimum should be set to 0 upper_bound.send_keys(Keys.ARROW_DOWN) assert int(upper_bound.get_attribute('value')) == 0 # number of points in the upper left and upper right quadrants wait_for_text_to_equal(selenium, '#vp-upper-left', '24') wait_for_text_to_equal(selenium, '#vp-upper-right', '99') # Volcano Plot component tests def template_test_parameters_volcanoplot( dash_threaded, selenium, assert_callback, param_name, param_value, par_type=None ): """Share reusable test code for testing Volcano Plot single parameter assignation.""" dummy_app = dash.Dash(__name__) dummy_app.layout = LAYOUT @dummy_app.callback( Output('test-vp-graph', 'figure'), [Input('test-vp-btn', 'n_clicks')], [ State('test-vp-param-name-input', 'value'), State('test-vp-param-value-input', 'value') ] ) def update_graph(nclicks, par_name, par_value): """Update the figure of the dcc.Graph component when a button is clicked.""" return volcano_plot_test_param_callback(nclicks, par_name, par_value, par_type) @dummy_app.callback( Output('test-vp-assert-value-div', 'children'), [Input('test-vp-graph', 'figure')], [ State('test-vp-btn', 'n_clicks'), State('test-vp-param-value-input', 'value') ] ) def assert_value(fig, nclicks, input_value): return assert_callback(fig, nclicks, input_value) dash_threaded(dummy_app) param_name_input = wait_for_element_by_css_selector(selenium, '#test-vp-param-name-input') param_value_input = wait_for_element_by_css_selector(selenium, '#test-vp-param-value-input') param_name_input.send_keys(param_name) param_value_input.send_keys(param_value) btn = wait_for_element_by_css_selector(selenium, '#test-vp-btn') btn.click() wait_for_text_to_equal(selenium, '#test-vp-assert-value-div', 'PASSED') def test_xlabel(dash_threaded, selenium): """Change xlabel.""" def assert_callback(fig, nclicks, input_value): answer = '' if nclicks is not None: if input_value == fig['layout']['xaxis']['title']['text']: answer = 'PASSED' return answer template_test_parameters_volcanoplot( dash_threaded, selenium, assert_callback, 'xlabel', 'x-label-test' ) def test_ylabel(dash_threaded, selenium): """Change ylabel.""" def assert_callback(fig, nclicks, input_value): answer = '' if nclicks is not None: if input_value == fig['layout']['yaxis']['title']['text']: answer = 'PASSED' return answer template_test_parameters_volcanoplot( dash_threaded, selenium, assert_callback, 'ylabel', 'y-label-test' ) def test_title(dash_threaded, selenium): """Change title.""" def assert_callback(fig, nclicks, input_value): answer = '' if nclicks is not None: if input_value == fig['layout']['title']['text']: answer = 'PASSED' return answer template_test_parameters_volcanoplot( dash_threaded, selenium, assert_callback, 'title', 'x-label-test' )
0.701509
0.283149
import math import numpy as np np.set_printoptions(precision=4) D_TO_R = math.pi / 180. TWOPI = 2.0 * math.pi # point we want to move to phiDegrees = 27. # final orientation relative to current robot orientation x = 3 # center of new location, in rear_wheels coordinates y = -2 rho = .5 phi = phiDegrees * D_TO_R # radius of circle of movement # motion circles from start, in rear_wheels # sc : start circle sc_ccw = np.array([0.0, rho]) sc_cw = np.array([0.0, -rho]) # fc: end_circle fc_ccw = np.array([x - rho * math.sin(phi), y + rho * math.cos(phi)]) fc_cw = np.array([x + rho * math.sin(phi), y - rho * math.cos(phi)]) A = np.array([0., 0.]) B = np.array([x, y]) solutions = [] for start_dir in range(0, 2): for end_dir in range(0, 2): C = sc_ccw if start_dir == 0 else sc_cw D = fc_ccw if end_dir == 0 else fc_cw D_C = D - C a = D_C[0] b = D_C[1] theta = math.atan2(b, a) tsq = a**2 + b**2 if start_dir != end_dir and tsq - 4. * rho **2 < 0.: length = None print('dir: {} {} invalid'.format(start_dir, end_dir)) #solutions.push({start_dir: start_dir, end_dir: end_dir, length: length}) else: if start_dir == end_dir: psi = None alpha = None ssq = tsq beta = theta if start_dir == 0 else -theta else: ssq = tsq - (2 * rho)**2 psi = math.acos(2. * rho / math.sqrt(tsq)) alpha = psi - theta if start_dir == 0 else psi + theta beta = math.pi/ 2. - alpha s = math.sqrt(ssq) beta = beta % TWOPI E = rho * np.array([math.sin(beta), 1. - math.cos(beta)]) if start_dir == 1: E[1] = -E[1] if end_dir == 0: F = np.array([D[0] + rho * math.sin(beta), D[1] - rho * math.cos(beta)]) else: F = np.array([D[0] - rho * math.sin(beta), D[1] + rho * math.cos(beta)]) # RKJ notebook 2020-08-26 if start_dir == 0 and end_dir == 0: gamma = phi - beta elif start_dir == 0 and end_dir == 1: gamma = beta - phi elif start_dir == 1 and end_dir == 0: gamma = beta + phi else: gamma = -beta - phi gamma = gamma % TWOPI c1 = rho * beta c2 = rho * gamma length = s + c1 + c2 print('dir: {} {} length {}'.format(start_dir, end_dir, length)) print(A, B, C, D, E, F) print('s {}, theta {}, phi {}, psi {}, alpha {}, beta {}, gamma {}, c1 {}, c2 {}: ' .format(s, theta, phi, psi, alpha, beta, gamma, c1, c2))
src/bdbd/src/bdbd/test/circles.py
import math import numpy as np np.set_printoptions(precision=4) D_TO_R = math.pi / 180. TWOPI = 2.0 * math.pi # point we want to move to phiDegrees = 27. # final orientation relative to current robot orientation x = 3 # center of new location, in rear_wheels coordinates y = -2 rho = .5 phi = phiDegrees * D_TO_R # radius of circle of movement # motion circles from start, in rear_wheels # sc : start circle sc_ccw = np.array([0.0, rho]) sc_cw = np.array([0.0, -rho]) # fc: end_circle fc_ccw = np.array([x - rho * math.sin(phi), y + rho * math.cos(phi)]) fc_cw = np.array([x + rho * math.sin(phi), y - rho * math.cos(phi)]) A = np.array([0., 0.]) B = np.array([x, y]) solutions = [] for start_dir in range(0, 2): for end_dir in range(0, 2): C = sc_ccw if start_dir == 0 else sc_cw D = fc_ccw if end_dir == 0 else fc_cw D_C = D - C a = D_C[0] b = D_C[1] theta = math.atan2(b, a) tsq = a**2 + b**2 if start_dir != end_dir and tsq - 4. * rho **2 < 0.: length = None print('dir: {} {} invalid'.format(start_dir, end_dir)) #solutions.push({start_dir: start_dir, end_dir: end_dir, length: length}) else: if start_dir == end_dir: psi = None alpha = None ssq = tsq beta = theta if start_dir == 0 else -theta else: ssq = tsq - (2 * rho)**2 psi = math.acos(2. * rho / math.sqrt(tsq)) alpha = psi - theta if start_dir == 0 else psi + theta beta = math.pi/ 2. - alpha s = math.sqrt(ssq) beta = beta % TWOPI E = rho * np.array([math.sin(beta), 1. - math.cos(beta)]) if start_dir == 1: E[1] = -E[1] if end_dir == 0: F = np.array([D[0] + rho * math.sin(beta), D[1] - rho * math.cos(beta)]) else: F = np.array([D[0] - rho * math.sin(beta), D[1] + rho * math.cos(beta)]) # RKJ notebook 2020-08-26 if start_dir == 0 and end_dir == 0: gamma = phi - beta elif start_dir == 0 and end_dir == 1: gamma = beta - phi elif start_dir == 1 and end_dir == 0: gamma = beta + phi else: gamma = -beta - phi gamma = gamma % TWOPI c1 = rho * beta c2 = rho * gamma length = s + c1 + c2 print('dir: {} {} length {}'.format(start_dir, end_dir, length)) print(A, B, C, D, E, F) print('s {}, theta {}, phi {}, psi {}, alpha {}, beta {}, gamma {}, c1 {}, c2 {}: ' .format(s, theta, phi, psi, alpha, beta, gamma, c1, c2))
0.387574
0.534248
import pandas import pathlib import time import sys from InputSimulation import InputSimulation from Colorful import Colorful def changeToStringList(array): return list(map(str, array)) sys.path.append("/home/aholaj/OneDrive/000_WORK/000_Codex/LES-scripting/emulatorScripts/") import ECLAIR_calcs def prepareEMULData(): rootFolderOfEmulatorSets = "/home/aholaj/mounttauskansiot/eclairmount" folderList = {"LVL3Night" :"case_emulator_DESIGN_v3.0.0_LES_ECLAIR_branch_ECLAIRv2.0.cray.fast_LVL3_night", "LVL3Day" : "case_emulator_DESIGN_v3.1.0_LES_ECLAIR_branch_ECLAIRv2.0.cray.fast_LVL3_day" , "LVL4Night" : "case_emulator_DESIGN_v3.2_LES_ECLAIR_branch_ECLAIRv2.0.cray.fast_LVL4_night" , "LVL4Day" : "case_emulator_DESIGN_v3.3_LES_ECLAIR_branch_ECLAIRv2.0.cray.fast_LVL4_day" } identifierPrefix ={"LVL3Night" :"3N", "LVL3Day" : "3D" , "LVL4Night" : "4N" , "LVL4Day" : "4D" } fileLists = {} idLists = {} labelLists = {} colorLists = {} for case in list(folderList): fileLists[case] = InputSimulation.getEmulatorFileList(rootFolderOfEmulatorSets, folderList[case] ) idLists[case] = InputSimulation.getEmulatorIDlist(fileLists[case]) labelLists[case] = idLists[case] colorLists[case] = Colorful.getIndyColorList( len(fileLists[case])) simulationData = {} for case in list(folderList): simulationData[case] = InputSimulation( idCollection= idLists[case], folderCollection= fileLists[case], labelCollection = labelLists[case], colorSet= colorLists[case]) designData = {} for case in list(folderList): designData[case] = InputSimulation.getEmulatorDesignAsDataFrame( pathlib.Path(rootFolderOfEmulatorSets) / folderList[case], identifierPrefix[case]) joinedDF = pandas.merge( simulationData[case].getSimulationDataFrame(), designData[case], on="ID") joinedDF = joinedDF.set_index("ID") pres0= 1017.8 pblh_m_list = [None]*joinedDF.shape[0] for i in range(joinedDF.shape[0]): tpot_pbl = joinedDF.iloc[i]["tpot_pbl"] lwp = joinedDF.iloc[i]["lwp"] pblh = joinedDF.iloc[i]["pblh"] q_pbl = ECLAIR_calcs.solve_rw_lwp( pres0*100., tpot_pbl,lwp*0.001, pblh*100. ) # kg/kg lwp_apu, cloudbase, pblh_m, clw_max = ECLAIR_calcs.calc_lwp( pres0*100., tpot_pbl , pblh*100., q_pbl ) pblh_m_list[i] = pblh_m joinedDF["pblh_m"] = pblh_m_list simulationData[case].setSimulationDataFrame( joinedDF ) csvFolder = "/home/aholaj/OneDrive/000_WORK/000_ARTIKKELIT/001_Manuscript_LES_emulator/data" for case in list(simulationData): simulationData[case].saveDataFrameAsCSV(csvFolder, case + ".csv") # manuscriptSimulationData.saveDataFrameAsCSV(folder, "manuscriptSimulationData.csv") def main(): prepareEMULData() if __name__ == "__main__": start = time.time() main() end = time.time() print("Script completed in " + str(round((end - start),0)) + " seconds")
plottingScripts/00_prepareEMUL.py
import pandas import pathlib import time import sys from InputSimulation import InputSimulation from Colorful import Colorful def changeToStringList(array): return list(map(str, array)) sys.path.append("/home/aholaj/OneDrive/000_WORK/000_Codex/LES-scripting/emulatorScripts/") import ECLAIR_calcs def prepareEMULData(): rootFolderOfEmulatorSets = "/home/aholaj/mounttauskansiot/eclairmount" folderList = {"LVL3Night" :"case_emulator_DESIGN_v3.0.0_LES_ECLAIR_branch_ECLAIRv2.0.cray.fast_LVL3_night", "LVL3Day" : "case_emulator_DESIGN_v3.1.0_LES_ECLAIR_branch_ECLAIRv2.0.cray.fast_LVL3_day" , "LVL4Night" : "case_emulator_DESIGN_v3.2_LES_ECLAIR_branch_ECLAIRv2.0.cray.fast_LVL4_night" , "LVL4Day" : "case_emulator_DESIGN_v3.3_LES_ECLAIR_branch_ECLAIRv2.0.cray.fast_LVL4_day" } identifierPrefix ={"LVL3Night" :"3N", "LVL3Day" : "3D" , "LVL4Night" : "4N" , "LVL4Day" : "4D" } fileLists = {} idLists = {} labelLists = {} colorLists = {} for case in list(folderList): fileLists[case] = InputSimulation.getEmulatorFileList(rootFolderOfEmulatorSets, folderList[case] ) idLists[case] = InputSimulation.getEmulatorIDlist(fileLists[case]) labelLists[case] = idLists[case] colorLists[case] = Colorful.getIndyColorList( len(fileLists[case])) simulationData = {} for case in list(folderList): simulationData[case] = InputSimulation( idCollection= idLists[case], folderCollection= fileLists[case], labelCollection = labelLists[case], colorSet= colorLists[case]) designData = {} for case in list(folderList): designData[case] = InputSimulation.getEmulatorDesignAsDataFrame( pathlib.Path(rootFolderOfEmulatorSets) / folderList[case], identifierPrefix[case]) joinedDF = pandas.merge( simulationData[case].getSimulationDataFrame(), designData[case], on="ID") joinedDF = joinedDF.set_index("ID") pres0= 1017.8 pblh_m_list = [None]*joinedDF.shape[0] for i in range(joinedDF.shape[0]): tpot_pbl = joinedDF.iloc[i]["tpot_pbl"] lwp = joinedDF.iloc[i]["lwp"] pblh = joinedDF.iloc[i]["pblh"] q_pbl = ECLAIR_calcs.solve_rw_lwp( pres0*100., tpot_pbl,lwp*0.001, pblh*100. ) # kg/kg lwp_apu, cloudbase, pblh_m, clw_max = ECLAIR_calcs.calc_lwp( pres0*100., tpot_pbl , pblh*100., q_pbl ) pblh_m_list[i] = pblh_m joinedDF["pblh_m"] = pblh_m_list simulationData[case].setSimulationDataFrame( joinedDF ) csvFolder = "/home/aholaj/OneDrive/000_WORK/000_ARTIKKELIT/001_Manuscript_LES_emulator/data" for case in list(simulationData): simulationData[case].saveDataFrameAsCSV(csvFolder, case + ".csv") # manuscriptSimulationData.saveDataFrameAsCSV(folder, "manuscriptSimulationData.csv") def main(): prepareEMULData() if __name__ == "__main__": start = time.time() main() end = time.time() print("Script completed in " + str(round((end - start),0)) + " seconds")
0.114802
0.290893
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from . import _utilities, _tables __all__ = [ 'GetSubnetResult', 'AwaitableGetSubnetResult', 'get_subnet', ] @pulumi.output_type class GetSubnetResult: """ A collection of values returned by getSubnet. """ def __init__(__self__, allow_ip_requests=None, create_ptr_records=None, custom_field_filter=None, custom_fields=None, description=None, description_match=None, display_hostnames=None, edit_date=None, gateway=None, gateway_id=None, host_discovery_enabled=None, id=None, include_in_ping=None, is_folder=None, is_full=None, linked_subnet_id=None, location_id=None, master_subnet_id=None, nameserver_id=None, parent_subnet_id=None, permissions=None, scan_agent_id=None, section_id=None, show_name=None, subnet_address=None, subnet_id=None, subnet_mask=None, utilization_threshold=None, vlan_id=None, vrf_id=None): if allow_ip_requests and not isinstance(allow_ip_requests, bool): raise TypeError("Expected argument 'allow_ip_requests' to be a bool") pulumi.set(__self__, "allow_ip_requests", allow_ip_requests) if create_ptr_records and not isinstance(create_ptr_records, bool): raise TypeError("Expected argument 'create_ptr_records' to be a bool") pulumi.set(__self__, "create_ptr_records", create_ptr_records) if custom_field_filter and not isinstance(custom_field_filter, dict): raise TypeError("Expected argument 'custom_field_filter' to be a dict") pulumi.set(__self__, "custom_field_filter", custom_field_filter) if custom_fields and not isinstance(custom_fields, dict): raise TypeError("Expected argument 'custom_fields' to be a dict") pulumi.set(__self__, "custom_fields", custom_fields) if description and not isinstance(description, str): raise TypeError("Expected argument 'description' to be a str") pulumi.set(__self__, "description", description) if description_match and not isinstance(description_match, str): raise TypeError("Expected argument 'description_match' to be a str") pulumi.set(__self__, "description_match", description_match) if display_hostnames and not isinstance(display_hostnames, bool): raise TypeError("Expected argument 'display_hostnames' to be a bool") pulumi.set(__self__, "display_hostnames", display_hostnames) if edit_date and not isinstance(edit_date, str): raise TypeError("Expected argument 'edit_date' to be a str") pulumi.set(__self__, "edit_date", edit_date) if gateway and not isinstance(gateway, dict): raise TypeError("Expected argument 'gateway' to be a dict") pulumi.set(__self__, "gateway", gateway) if gateway_id and not isinstance(gateway_id, str): raise TypeError("Expected argument 'gateway_id' to be a str") pulumi.set(__self__, "gateway_id", gateway_id) if host_discovery_enabled and not isinstance(host_discovery_enabled, bool): raise TypeError("Expected argument 'host_discovery_enabled' to be a bool") pulumi.set(__self__, "host_discovery_enabled", host_discovery_enabled) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if include_in_ping and not isinstance(include_in_ping, bool): raise TypeError("Expected argument 'include_in_ping' to be a bool") pulumi.set(__self__, "include_in_ping", include_in_ping) if is_folder and not isinstance(is_folder, bool): raise TypeError("Expected argument 'is_folder' to be a bool") pulumi.set(__self__, "is_folder", is_folder) if is_full and not isinstance(is_full, bool): raise TypeError("Expected argument 'is_full' to be a bool") pulumi.set(__self__, "is_full", is_full) if linked_subnet_id and not isinstance(linked_subnet_id, int): raise TypeError("Expected argument 'linked_subnet_id' to be a int") pulumi.set(__self__, "linked_subnet_id", linked_subnet_id) if location_id and not isinstance(location_id, int): raise TypeError("Expected argument 'location_id' to be a int") pulumi.set(__self__, "location_id", location_id) if master_subnet_id and not isinstance(master_subnet_id, int): raise TypeError("Expected argument 'master_subnet_id' to be a int") pulumi.set(__self__, "master_subnet_id", master_subnet_id) if nameserver_id and not isinstance(nameserver_id, int): raise TypeError("Expected argument 'nameserver_id' to be a int") pulumi.set(__self__, "nameserver_id", nameserver_id) if parent_subnet_id and not isinstance(parent_subnet_id, int): raise TypeError("Expected argument 'parent_subnet_id' to be a int") pulumi.set(__self__, "parent_subnet_id", parent_subnet_id) if permissions and not isinstance(permissions, str): raise TypeError("Expected argument 'permissions' to be a str") pulumi.set(__self__, "permissions", permissions) if scan_agent_id and not isinstance(scan_agent_id, int): raise TypeError("Expected argument 'scan_agent_id' to be a int") pulumi.set(__self__, "scan_agent_id", scan_agent_id) if section_id and not isinstance(section_id, int): raise TypeError("Expected argument 'section_id' to be a int") pulumi.set(__self__, "section_id", section_id) if show_name and not isinstance(show_name, bool): raise TypeError("Expected argument 'show_name' to be a bool") pulumi.set(__self__, "show_name", show_name) if subnet_address and not isinstance(subnet_address, str): raise TypeError("Expected argument 'subnet_address' to be a str") pulumi.set(__self__, "subnet_address", subnet_address) if subnet_id and not isinstance(subnet_id, int): raise TypeError("Expected argument 'subnet_id' to be a int") pulumi.set(__self__, "subnet_id", subnet_id) if subnet_mask and not isinstance(subnet_mask, int): raise TypeError("Expected argument 'subnet_mask' to be a int") pulumi.set(__self__, "subnet_mask", subnet_mask) if utilization_threshold and not isinstance(utilization_threshold, int): raise TypeError("Expected argument 'utilization_threshold' to be a int") pulumi.set(__self__, "utilization_threshold", utilization_threshold) if vlan_id and not isinstance(vlan_id, int): raise TypeError("Expected argument 'vlan_id' to be a int") pulumi.set(__self__, "vlan_id", vlan_id) if vrf_id and not isinstance(vrf_id, int): raise TypeError("Expected argument 'vrf_id' to be a int") pulumi.set(__self__, "vrf_id", vrf_id) @property @pulumi.getter(name="allowIpRequests") def allow_ip_requests(self) -> bool: return pulumi.get(self, "allow_ip_requests") @property @pulumi.getter(name="createPtrRecords") def create_ptr_records(self) -> bool: return pulumi.get(self, "create_ptr_records") @property @pulumi.getter(name="customFieldFilter") def custom_field_filter(self) -> Optional[Mapping[str, Any]]: return pulumi.get(self, "custom_field_filter") @property @pulumi.getter(name="customFields") def custom_fields(self) -> Mapping[str, Any]: return pulumi.get(self, "custom_fields") @property @pulumi.getter def description(self) -> str: return pulumi.get(self, "description") @property @pulumi.getter(name="descriptionMatch") def description_match(self) -> Optional[str]: return pulumi.get(self, "description_match") @property @pulumi.getter(name="displayHostnames") def display_hostnames(self) -> bool: return pulumi.get(self, "display_hostnames") @property @pulumi.getter(name="editDate") def edit_date(self) -> str: return pulumi.get(self, "edit_date") @property @pulumi.getter def gateway(self) -> Mapping[str, Any]: return pulumi.get(self, "gateway") @property @pulumi.getter(name="gatewayId") def gateway_id(self) -> str: return pulumi.get(self, "gateway_id") @property @pulumi.getter(name="hostDiscoveryEnabled") def host_discovery_enabled(self) -> bool: return pulumi.get(self, "host_discovery_enabled") @property @pulumi.getter def id(self) -> str: """ The provider-assigned unique ID for this managed resource. """ return pulumi.get(self, "id") @property @pulumi.getter(name="includeInPing") def include_in_ping(self) -> bool: return pulumi.get(self, "include_in_ping") @property @pulumi.getter(name="isFolder") def is_folder(self) -> bool: return pulumi.get(self, "is_folder") @property @pulumi.getter(name="isFull") def is_full(self) -> bool: return pulumi.get(self, "is_full") @property @pulumi.getter(name="linkedSubnetId") def linked_subnet_id(self) -> int: return pulumi.get(self, "linked_subnet_id") @property @pulumi.getter(name="locationId") def location_id(self) -> int: return pulumi.get(self, "location_id") @property @pulumi.getter(name="masterSubnetId") def master_subnet_id(self) -> int: return pulumi.get(self, "master_subnet_id") @property @pulumi.getter(name="nameserverId") def nameserver_id(self) -> int: return pulumi.get(self, "nameserver_id") @property @pulumi.getter(name="parentSubnetId") def parent_subnet_id(self) -> int: return pulumi.get(self, "parent_subnet_id") @property @pulumi.getter def permissions(self) -> str: return pulumi.get(self, "permissions") @property @pulumi.getter(name="scanAgentId") def scan_agent_id(self) -> int: return pulumi.get(self, "scan_agent_id") @property @pulumi.getter(name="sectionId") def section_id(self) -> int: return pulumi.get(self, "section_id") @property @pulumi.getter(name="showName") def show_name(self) -> bool: return pulumi.get(self, "show_name") @property @pulumi.getter(name="subnetAddress") def subnet_address(self) -> str: return pulumi.get(self, "subnet_address") @property @pulumi.getter(name="subnetId") def subnet_id(self) -> int: return pulumi.get(self, "subnet_id") @property @pulumi.getter(name="subnetMask") def subnet_mask(self) -> int: return pulumi.get(self, "subnet_mask") @property @pulumi.getter(name="utilizationThreshold") def utilization_threshold(self) -> int: return pulumi.get(self, "utilization_threshold") @property @pulumi.getter(name="vlanId") def vlan_id(self) -> int: return pulumi.get(self, "vlan_id") @property @pulumi.getter(name="vrfId") def vrf_id(self) -> int: return pulumi.get(self, "vrf_id") class AwaitableGetSubnetResult(GetSubnetResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetSubnetResult( allow_ip_requests=self.allow_ip_requests, create_ptr_records=self.create_ptr_records, custom_field_filter=self.custom_field_filter, custom_fields=self.custom_fields, description=self.description, description_match=self.description_match, display_hostnames=self.display_hostnames, edit_date=self.edit_date, gateway=self.gateway, gateway_id=self.gateway_id, host_discovery_enabled=self.host_discovery_enabled, id=self.id, include_in_ping=self.include_in_ping, is_folder=self.is_folder, is_full=self.is_full, linked_subnet_id=self.linked_subnet_id, location_id=self.location_id, master_subnet_id=self.master_subnet_id, nameserver_id=self.nameserver_id, parent_subnet_id=self.parent_subnet_id, permissions=self.permissions, scan_agent_id=self.scan_agent_id, section_id=self.section_id, show_name=self.show_name, subnet_address=self.subnet_address, subnet_id=self.subnet_id, subnet_mask=self.subnet_mask, utilization_threshold=self.utilization_threshold, vlan_id=self.vlan_id, vrf_id=self.vrf_id) def get_subnet(custom_field_filter: Optional[Mapping[str, Any]] = None, description: Optional[str] = None, description_match: Optional[str] = None, section_id: Optional[int] = None, subnet_address: Optional[str] = None, subnet_id: Optional[int] = None, subnet_mask: Optional[int] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubnetResult: """ Use this data source to access information about an existing resource. """ __args__ = dict() __args__['customFieldFilter'] = custom_field_filter __args__['description'] = description __args__['descriptionMatch'] = description_match __args__['sectionId'] = section_id __args__['subnetAddress'] = subnet_address __args__['subnetId'] = subnet_id __args__['subnetMask'] = subnet_mask if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('phpipam:index/getSubnet:getSubnet', __args__, opts=opts, typ=GetSubnetResult).value return AwaitableGetSubnetResult( allow_ip_requests=__ret__.allow_ip_requests, create_ptr_records=__ret__.create_ptr_records, custom_field_filter=__ret__.custom_field_filter, custom_fields=__ret__.custom_fields, description=__ret__.description, description_match=__ret__.description_match, display_hostnames=__ret__.display_hostnames, edit_date=__ret__.edit_date, gateway=__ret__.gateway, gateway_id=__ret__.gateway_id, host_discovery_enabled=__ret__.host_discovery_enabled, id=__ret__.id, include_in_ping=__ret__.include_in_ping, is_folder=__ret__.is_folder, is_full=__ret__.is_full, linked_subnet_id=__ret__.linked_subnet_id, location_id=__ret__.location_id, master_subnet_id=__ret__.master_subnet_id, nameserver_id=__ret__.nameserver_id, parent_subnet_id=__ret__.parent_subnet_id, permissions=__ret__.permissions, scan_agent_id=__ret__.scan_agent_id, section_id=__ret__.section_id, show_name=__ret__.show_name, subnet_address=__ret__.subnet_address, subnet_id=__ret__.subnet_id, subnet_mask=__ret__.subnet_mask, utilization_threshold=__ret__.utilization_threshold, vlan_id=__ret__.vlan_id, vrf_id=__ret__.vrf_id)
sdk/python/pulumi_phpipam/get_subnet.py
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from . import _utilities, _tables __all__ = [ 'GetSubnetResult', 'AwaitableGetSubnetResult', 'get_subnet', ] @pulumi.output_type class GetSubnetResult: """ A collection of values returned by getSubnet. """ def __init__(__self__, allow_ip_requests=None, create_ptr_records=None, custom_field_filter=None, custom_fields=None, description=None, description_match=None, display_hostnames=None, edit_date=None, gateway=None, gateway_id=None, host_discovery_enabled=None, id=None, include_in_ping=None, is_folder=None, is_full=None, linked_subnet_id=None, location_id=None, master_subnet_id=None, nameserver_id=None, parent_subnet_id=None, permissions=None, scan_agent_id=None, section_id=None, show_name=None, subnet_address=None, subnet_id=None, subnet_mask=None, utilization_threshold=None, vlan_id=None, vrf_id=None): if allow_ip_requests and not isinstance(allow_ip_requests, bool): raise TypeError("Expected argument 'allow_ip_requests' to be a bool") pulumi.set(__self__, "allow_ip_requests", allow_ip_requests) if create_ptr_records and not isinstance(create_ptr_records, bool): raise TypeError("Expected argument 'create_ptr_records' to be a bool") pulumi.set(__self__, "create_ptr_records", create_ptr_records) if custom_field_filter and not isinstance(custom_field_filter, dict): raise TypeError("Expected argument 'custom_field_filter' to be a dict") pulumi.set(__self__, "custom_field_filter", custom_field_filter) if custom_fields and not isinstance(custom_fields, dict): raise TypeError("Expected argument 'custom_fields' to be a dict") pulumi.set(__self__, "custom_fields", custom_fields) if description and not isinstance(description, str): raise TypeError("Expected argument 'description' to be a str") pulumi.set(__self__, "description", description) if description_match and not isinstance(description_match, str): raise TypeError("Expected argument 'description_match' to be a str") pulumi.set(__self__, "description_match", description_match) if display_hostnames and not isinstance(display_hostnames, bool): raise TypeError("Expected argument 'display_hostnames' to be a bool") pulumi.set(__self__, "display_hostnames", display_hostnames) if edit_date and not isinstance(edit_date, str): raise TypeError("Expected argument 'edit_date' to be a str") pulumi.set(__self__, "edit_date", edit_date) if gateway and not isinstance(gateway, dict): raise TypeError("Expected argument 'gateway' to be a dict") pulumi.set(__self__, "gateway", gateway) if gateway_id and not isinstance(gateway_id, str): raise TypeError("Expected argument 'gateway_id' to be a str") pulumi.set(__self__, "gateway_id", gateway_id) if host_discovery_enabled and not isinstance(host_discovery_enabled, bool): raise TypeError("Expected argument 'host_discovery_enabled' to be a bool") pulumi.set(__self__, "host_discovery_enabled", host_discovery_enabled) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if include_in_ping and not isinstance(include_in_ping, bool): raise TypeError("Expected argument 'include_in_ping' to be a bool") pulumi.set(__self__, "include_in_ping", include_in_ping) if is_folder and not isinstance(is_folder, bool): raise TypeError("Expected argument 'is_folder' to be a bool") pulumi.set(__self__, "is_folder", is_folder) if is_full and not isinstance(is_full, bool): raise TypeError("Expected argument 'is_full' to be a bool") pulumi.set(__self__, "is_full", is_full) if linked_subnet_id and not isinstance(linked_subnet_id, int): raise TypeError("Expected argument 'linked_subnet_id' to be a int") pulumi.set(__self__, "linked_subnet_id", linked_subnet_id) if location_id and not isinstance(location_id, int): raise TypeError("Expected argument 'location_id' to be a int") pulumi.set(__self__, "location_id", location_id) if master_subnet_id and not isinstance(master_subnet_id, int): raise TypeError("Expected argument 'master_subnet_id' to be a int") pulumi.set(__self__, "master_subnet_id", master_subnet_id) if nameserver_id and not isinstance(nameserver_id, int): raise TypeError("Expected argument 'nameserver_id' to be a int") pulumi.set(__self__, "nameserver_id", nameserver_id) if parent_subnet_id and not isinstance(parent_subnet_id, int): raise TypeError("Expected argument 'parent_subnet_id' to be a int") pulumi.set(__self__, "parent_subnet_id", parent_subnet_id) if permissions and not isinstance(permissions, str): raise TypeError("Expected argument 'permissions' to be a str") pulumi.set(__self__, "permissions", permissions) if scan_agent_id and not isinstance(scan_agent_id, int): raise TypeError("Expected argument 'scan_agent_id' to be a int") pulumi.set(__self__, "scan_agent_id", scan_agent_id) if section_id and not isinstance(section_id, int): raise TypeError("Expected argument 'section_id' to be a int") pulumi.set(__self__, "section_id", section_id) if show_name and not isinstance(show_name, bool): raise TypeError("Expected argument 'show_name' to be a bool") pulumi.set(__self__, "show_name", show_name) if subnet_address and not isinstance(subnet_address, str): raise TypeError("Expected argument 'subnet_address' to be a str") pulumi.set(__self__, "subnet_address", subnet_address) if subnet_id and not isinstance(subnet_id, int): raise TypeError("Expected argument 'subnet_id' to be a int") pulumi.set(__self__, "subnet_id", subnet_id) if subnet_mask and not isinstance(subnet_mask, int): raise TypeError("Expected argument 'subnet_mask' to be a int") pulumi.set(__self__, "subnet_mask", subnet_mask) if utilization_threshold and not isinstance(utilization_threshold, int): raise TypeError("Expected argument 'utilization_threshold' to be a int") pulumi.set(__self__, "utilization_threshold", utilization_threshold) if vlan_id and not isinstance(vlan_id, int): raise TypeError("Expected argument 'vlan_id' to be a int") pulumi.set(__self__, "vlan_id", vlan_id) if vrf_id and not isinstance(vrf_id, int): raise TypeError("Expected argument 'vrf_id' to be a int") pulumi.set(__self__, "vrf_id", vrf_id) @property @pulumi.getter(name="allowIpRequests") def allow_ip_requests(self) -> bool: return pulumi.get(self, "allow_ip_requests") @property @pulumi.getter(name="createPtrRecords") def create_ptr_records(self) -> bool: return pulumi.get(self, "create_ptr_records") @property @pulumi.getter(name="customFieldFilter") def custom_field_filter(self) -> Optional[Mapping[str, Any]]: return pulumi.get(self, "custom_field_filter") @property @pulumi.getter(name="customFields") def custom_fields(self) -> Mapping[str, Any]: return pulumi.get(self, "custom_fields") @property @pulumi.getter def description(self) -> str: return pulumi.get(self, "description") @property @pulumi.getter(name="descriptionMatch") def description_match(self) -> Optional[str]: return pulumi.get(self, "description_match") @property @pulumi.getter(name="displayHostnames") def display_hostnames(self) -> bool: return pulumi.get(self, "display_hostnames") @property @pulumi.getter(name="editDate") def edit_date(self) -> str: return pulumi.get(self, "edit_date") @property @pulumi.getter def gateway(self) -> Mapping[str, Any]: return pulumi.get(self, "gateway") @property @pulumi.getter(name="gatewayId") def gateway_id(self) -> str: return pulumi.get(self, "gateway_id") @property @pulumi.getter(name="hostDiscoveryEnabled") def host_discovery_enabled(self) -> bool: return pulumi.get(self, "host_discovery_enabled") @property @pulumi.getter def id(self) -> str: """ The provider-assigned unique ID for this managed resource. """ return pulumi.get(self, "id") @property @pulumi.getter(name="includeInPing") def include_in_ping(self) -> bool: return pulumi.get(self, "include_in_ping") @property @pulumi.getter(name="isFolder") def is_folder(self) -> bool: return pulumi.get(self, "is_folder") @property @pulumi.getter(name="isFull") def is_full(self) -> bool: return pulumi.get(self, "is_full") @property @pulumi.getter(name="linkedSubnetId") def linked_subnet_id(self) -> int: return pulumi.get(self, "linked_subnet_id") @property @pulumi.getter(name="locationId") def location_id(self) -> int: return pulumi.get(self, "location_id") @property @pulumi.getter(name="masterSubnetId") def master_subnet_id(self) -> int: return pulumi.get(self, "master_subnet_id") @property @pulumi.getter(name="nameserverId") def nameserver_id(self) -> int: return pulumi.get(self, "nameserver_id") @property @pulumi.getter(name="parentSubnetId") def parent_subnet_id(self) -> int: return pulumi.get(self, "parent_subnet_id") @property @pulumi.getter def permissions(self) -> str: return pulumi.get(self, "permissions") @property @pulumi.getter(name="scanAgentId") def scan_agent_id(self) -> int: return pulumi.get(self, "scan_agent_id") @property @pulumi.getter(name="sectionId") def section_id(self) -> int: return pulumi.get(self, "section_id") @property @pulumi.getter(name="showName") def show_name(self) -> bool: return pulumi.get(self, "show_name") @property @pulumi.getter(name="subnetAddress") def subnet_address(self) -> str: return pulumi.get(self, "subnet_address") @property @pulumi.getter(name="subnetId") def subnet_id(self) -> int: return pulumi.get(self, "subnet_id") @property @pulumi.getter(name="subnetMask") def subnet_mask(self) -> int: return pulumi.get(self, "subnet_mask") @property @pulumi.getter(name="utilizationThreshold") def utilization_threshold(self) -> int: return pulumi.get(self, "utilization_threshold") @property @pulumi.getter(name="vlanId") def vlan_id(self) -> int: return pulumi.get(self, "vlan_id") @property @pulumi.getter(name="vrfId") def vrf_id(self) -> int: return pulumi.get(self, "vrf_id") class AwaitableGetSubnetResult(GetSubnetResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetSubnetResult( allow_ip_requests=self.allow_ip_requests, create_ptr_records=self.create_ptr_records, custom_field_filter=self.custom_field_filter, custom_fields=self.custom_fields, description=self.description, description_match=self.description_match, display_hostnames=self.display_hostnames, edit_date=self.edit_date, gateway=self.gateway, gateway_id=self.gateway_id, host_discovery_enabled=self.host_discovery_enabled, id=self.id, include_in_ping=self.include_in_ping, is_folder=self.is_folder, is_full=self.is_full, linked_subnet_id=self.linked_subnet_id, location_id=self.location_id, master_subnet_id=self.master_subnet_id, nameserver_id=self.nameserver_id, parent_subnet_id=self.parent_subnet_id, permissions=self.permissions, scan_agent_id=self.scan_agent_id, section_id=self.section_id, show_name=self.show_name, subnet_address=self.subnet_address, subnet_id=self.subnet_id, subnet_mask=self.subnet_mask, utilization_threshold=self.utilization_threshold, vlan_id=self.vlan_id, vrf_id=self.vrf_id) def get_subnet(custom_field_filter: Optional[Mapping[str, Any]] = None, description: Optional[str] = None, description_match: Optional[str] = None, section_id: Optional[int] = None, subnet_address: Optional[str] = None, subnet_id: Optional[int] = None, subnet_mask: Optional[int] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubnetResult: """ Use this data source to access information about an existing resource. """ __args__ = dict() __args__['customFieldFilter'] = custom_field_filter __args__['description'] = description __args__['descriptionMatch'] = description_match __args__['sectionId'] = section_id __args__['subnetAddress'] = subnet_address __args__['subnetId'] = subnet_id __args__['subnetMask'] = subnet_mask if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('phpipam:index/getSubnet:getSubnet', __args__, opts=opts, typ=GetSubnetResult).value return AwaitableGetSubnetResult( allow_ip_requests=__ret__.allow_ip_requests, create_ptr_records=__ret__.create_ptr_records, custom_field_filter=__ret__.custom_field_filter, custom_fields=__ret__.custom_fields, description=__ret__.description, description_match=__ret__.description_match, display_hostnames=__ret__.display_hostnames, edit_date=__ret__.edit_date, gateway=__ret__.gateway, gateway_id=__ret__.gateway_id, host_discovery_enabled=__ret__.host_discovery_enabled, id=__ret__.id, include_in_ping=__ret__.include_in_ping, is_folder=__ret__.is_folder, is_full=__ret__.is_full, linked_subnet_id=__ret__.linked_subnet_id, location_id=__ret__.location_id, master_subnet_id=__ret__.master_subnet_id, nameserver_id=__ret__.nameserver_id, parent_subnet_id=__ret__.parent_subnet_id, permissions=__ret__.permissions, scan_agent_id=__ret__.scan_agent_id, section_id=__ret__.section_id, show_name=__ret__.show_name, subnet_address=__ret__.subnet_address, subnet_id=__ret__.subnet_id, subnet_mask=__ret__.subnet_mask, utilization_threshold=__ret__.utilization_threshold, vlan_id=__ret__.vlan_id, vrf_id=__ret__.vrf_id)
0.718989
0.064949
import dash import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.express as px import plotly.graph_objects as go from app import app """ https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout Layout in Bootstrap is controlled using the grid system. The Bootstrap grid has twelve columns. There are three main layout components in dash-bootstrap-components: Container, Row, and Col. The layout of your app should be built as a series of rows of columns. We set md=4 indicating that on a 'medium' sized or larger screen each column should take up a third of the width. Since we don't specify behaviour on smallersize screens Bootstrap will allow the rows to wrap so as not to squash the content. """ column1 = dbc.Col( [ dcc.Markdown( """ ## Detect Online Hate Speech ******** Online platforms are tackling the problem of hate speech every day, but it can be difficult to draw the line between what is protected under free speech laws and what infringes on others' rights. * The Twitter Hate Speech detector is an app that aims to help identify hateful and offensive online speech. * The model used to make these predictions was trained on a combination of two labeled datasets, with a total of 102,840 tweets. * 56 percent of them were labeled "Normal", 39 percent as "Offensive" and 5 percent as "Hateful". """ ), dcc.Link(dbc.Button('Score Your Tweet', color='primary', style=dict(marginTop=40, marginBottom=200)), href='/predictions') ], md=4, ) colors = ['lightslategray',] * 10 colors[0] = 'crimson' x = ['hate', 'like','n**ga','f**king','n**gas', 'b*tch', 'as*', 'people','get', 'amp'] y = [679, 591, 571, 552, 483, 439, 362, 346, 340, 328] fig = go.Figure(data=[go.Bar( x=x, y=y, marker_color=colors # marker color can be a single color value or an iterable )]) fig.update_layout(title_text='Most Common Words in Tweets Labeled Hateful') column2 = dbc.Col( [ dcc.Graph(figure=fig), ] ) layout = dbc.Row([column1, column2])
pages/index.py
import dash import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.express as px import plotly.graph_objects as go from app import app """ https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout Layout in Bootstrap is controlled using the grid system. The Bootstrap grid has twelve columns. There are three main layout components in dash-bootstrap-components: Container, Row, and Col. The layout of your app should be built as a series of rows of columns. We set md=4 indicating that on a 'medium' sized or larger screen each column should take up a third of the width. Since we don't specify behaviour on smallersize screens Bootstrap will allow the rows to wrap so as not to squash the content. """ column1 = dbc.Col( [ dcc.Markdown( """ ## Detect Online Hate Speech ******** Online platforms are tackling the problem of hate speech every day, but it can be difficult to draw the line between what is protected under free speech laws and what infringes on others' rights. * The Twitter Hate Speech detector is an app that aims to help identify hateful and offensive online speech. * The model used to make these predictions was trained on a combination of two labeled datasets, with a total of 102,840 tweets. * 56 percent of them were labeled "Normal", 39 percent as "Offensive" and 5 percent as "Hateful". """ ), dcc.Link(dbc.Button('Score Your Tweet', color='primary', style=dict(marginTop=40, marginBottom=200)), href='/predictions') ], md=4, ) colors = ['lightslategray',] * 10 colors[0] = 'crimson' x = ['hate', 'like','n**ga','f**king','n**gas', 'b*tch', 'as*', 'people','get', 'amp'] y = [679, 591, 571, 552, 483, 439, 362, 346, 340, 328] fig = go.Figure(data=[go.Bar( x=x, y=y, marker_color=colors # marker color can be a single color value or an iterable )]) fig.update_layout(title_text='Most Common Words in Tweets Labeled Hateful') column2 = dbc.Col( [ dcc.Graph(figure=fig), ] ) layout = dbc.Row([column1, column2])
0.655997
0.402686
import numpy as np from agents.common import PLAYER1, PLAYER2, NO_PLAYER, initialize_game_state, is_valid_action def test_valid_action_allValid(): game = initialize_game_state() for i in {0, 1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column6(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, NO_PLAYER]) for i in {0, 1, 2, 3, 4, 5}: assert is_valid_action(game, i) == False for i in {6}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column5(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, NO_PLAYER, PLAYER1]) for i in {0, 1, 2, 3, 4, 6}: assert is_valid_action(game, i) == False for i in {5}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column4(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, PLAYER1, PLAYER1, NO_PLAYER, PLAYER1, PLAYER1]) for i in {0, 1, 2, 3, 5, 6}: assert is_valid_action(game, i) == False for i in {4}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column3(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, PLAYER1, NO_PLAYER, PLAYER1, PLAYER1, PLAYER1]) for i in {0, 1, 2, 4, 5, 6}: assert is_valid_action(game, i) == False for i in {3}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column2(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, NO_PLAYER, PLAYER1, PLAYER1, PLAYER1, PLAYER1]) for i in {0, 1, 3, 4, 5, 6}: assert is_valid_action(game, i) == False for i in {2}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column1(): game = initialize_game_state() game[-1] = np.array([PLAYER1, NO_PLAYER, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1]) for i in {0, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == False for i in {1}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column0(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1]) for i in {1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == False for i in {0}: assert is_valid_action(game, i) == True def test_valid_action_twoValid_Col_16(): game = initialize_game_state() game[-1] = np.array([PLAYER2, NO_PLAYER, PLAYER1, PLAYER1, PLAYER2, PLAYER1, NO_PLAYER]) for i in {0, 2, 3, 4, 5}: assert is_valid_action(game, i) == False for i in {1, 6}: assert is_valid_action(game, i) == True def test_valid_action_threeValid_Col_156(): game = initialize_game_state() game[-1] = np.array([PLAYER2, NO_PLAYER, PLAYER1, PLAYER1, PLAYER2, NO_PLAYER, NO_PLAYER]) for i in {0, 2, 3, 4}: assert is_valid_action(game, i) == False for i in {1, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_fourValid_Col_0156(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, PLAYER1, PLAYER1, PLAYER2, NO_PLAYER, NO_PLAYER]) for i in {2, 3, 4}: assert is_valid_action(game, i) == False for i in {0, 1, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_fiveValid_Col_01356(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, PLAYER1, NO_PLAYER, PLAYER2, NO_PLAYER, NO_PLAYER]) for i in {2, 4}: assert is_valid_action(game, i) == False for i in {0, 1, 3, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_012345(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, PLAYER1]) for i in {6}: assert is_valid_action(game, i) == False for i in {0, 1, 2, 3, 4, 5}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_012346(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, PLAYER2, NO_PLAYER]) for i in {5}: assert is_valid_action(game, i) == False for i in {0, 1, 2, 3, 4, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_012356(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, PLAYER2, NO_PLAYER, NO_PLAYER]) for i in {4}: assert is_valid_action(game, i) == False for i in {0, 1, 2, 3, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_012456(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, NO_PLAYER, PLAYER2, NO_PLAYER, NO_PLAYER, NO_PLAYER]) for i in {3}: assert is_valid_action(game, i) == False for i in {0, 1, 2, 4, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_013456(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, PLAYER1, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER]) for i in {2}: assert is_valid_action(game, i) == False for i in {0, 1, 3, 4, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_023456(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, PLAYER2, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER]) for i in {1}: assert is_valid_action(game, i) == False for i in {0, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_123456(): game = initialize_game_state() game[-1] = np.array([PLAYER2, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER]) for i in {0}: assert is_valid_action(game, i) == False for i in {1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_noneValid_mixedPlayer(): game = initialize_game_state() game[-1] = np.array([PLAYER2, PLAYER2, PLAYER1, PLAYER1, PLAYER2, PLAYER1, PLAYER1]) for i in {0, 1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == False def test_valid_action_noneValid_singlePlayer2(): game = initialize_game_state() game[-1] = np.array([PLAYER2, PLAYER2, PLAYER2, PLAYER2, PLAYER2, PLAYER2, PLAYER2]) for i in {0, 1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == False def test_valid_action_noneValid_singlePlayer1(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1]) for i in {0, 1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == False def test_valid_action_actionOutOfBounds(): game = initialize_game_state() for i in {0, 1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == True for i in {-1, 7}: assert is_valid_action(game, i) == False
tests/test_agents_common_is_valid_action.py
import numpy as np from agents.common import PLAYER1, PLAYER2, NO_PLAYER, initialize_game_state, is_valid_action def test_valid_action_allValid(): game = initialize_game_state() for i in {0, 1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column6(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, NO_PLAYER]) for i in {0, 1, 2, 3, 4, 5}: assert is_valid_action(game, i) == False for i in {6}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column5(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, NO_PLAYER, PLAYER1]) for i in {0, 1, 2, 3, 4, 6}: assert is_valid_action(game, i) == False for i in {5}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column4(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, PLAYER1, PLAYER1, NO_PLAYER, PLAYER1, PLAYER1]) for i in {0, 1, 2, 3, 5, 6}: assert is_valid_action(game, i) == False for i in {4}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column3(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, PLAYER1, NO_PLAYER, PLAYER1, PLAYER1, PLAYER1]) for i in {0, 1, 2, 4, 5, 6}: assert is_valid_action(game, i) == False for i in {3}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column2(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, NO_PLAYER, PLAYER1, PLAYER1, PLAYER1, PLAYER1]) for i in {0, 1, 3, 4, 5, 6}: assert is_valid_action(game, i) == False for i in {2}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column1(): game = initialize_game_state() game[-1] = np.array([PLAYER1, NO_PLAYER, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1]) for i in {0, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == False for i in {1}: assert is_valid_action(game, i) == True def test_valid_action_oneValid_Column0(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1]) for i in {1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == False for i in {0}: assert is_valid_action(game, i) == True def test_valid_action_twoValid_Col_16(): game = initialize_game_state() game[-1] = np.array([PLAYER2, NO_PLAYER, PLAYER1, PLAYER1, PLAYER2, PLAYER1, NO_PLAYER]) for i in {0, 2, 3, 4, 5}: assert is_valid_action(game, i) == False for i in {1, 6}: assert is_valid_action(game, i) == True def test_valid_action_threeValid_Col_156(): game = initialize_game_state() game[-1] = np.array([PLAYER2, NO_PLAYER, PLAYER1, PLAYER1, PLAYER2, NO_PLAYER, NO_PLAYER]) for i in {0, 2, 3, 4}: assert is_valid_action(game, i) == False for i in {1, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_fourValid_Col_0156(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, PLAYER1, PLAYER1, PLAYER2, NO_PLAYER, NO_PLAYER]) for i in {2, 3, 4}: assert is_valid_action(game, i) == False for i in {0, 1, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_fiveValid_Col_01356(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, PLAYER1, NO_PLAYER, PLAYER2, NO_PLAYER, NO_PLAYER]) for i in {2, 4}: assert is_valid_action(game, i) == False for i in {0, 1, 3, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_012345(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, PLAYER1]) for i in {6}: assert is_valid_action(game, i) == False for i in {0, 1, 2, 3, 4, 5}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_012346(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, PLAYER2, NO_PLAYER]) for i in {5}: assert is_valid_action(game, i) == False for i in {0, 1, 2, 3, 4, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_012356(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, PLAYER2, NO_PLAYER, NO_PLAYER]) for i in {4}: assert is_valid_action(game, i) == False for i in {0, 1, 2, 3, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_012456(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, NO_PLAYER, PLAYER2, NO_PLAYER, NO_PLAYER, NO_PLAYER]) for i in {3}: assert is_valid_action(game, i) == False for i in {0, 1, 2, 4, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_013456(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, NO_PLAYER, PLAYER1, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER]) for i in {2}: assert is_valid_action(game, i) == False for i in {0, 1, 3, 4, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_023456(): game = initialize_game_state() game[-1] = np.array([NO_PLAYER, PLAYER2, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER]) for i in {1}: assert is_valid_action(game, i) == False for i in {0, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_sixValid_Col_123456(): game = initialize_game_state() game[-1] = np.array([PLAYER2, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER, NO_PLAYER]) for i in {0}: assert is_valid_action(game, i) == False for i in {1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == True def test_valid_action_noneValid_mixedPlayer(): game = initialize_game_state() game[-1] = np.array([PLAYER2, PLAYER2, PLAYER1, PLAYER1, PLAYER2, PLAYER1, PLAYER1]) for i in {0, 1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == False def test_valid_action_noneValid_singlePlayer2(): game = initialize_game_state() game[-1] = np.array([PLAYER2, PLAYER2, PLAYER2, PLAYER2, PLAYER2, PLAYER2, PLAYER2]) for i in {0, 1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == False def test_valid_action_noneValid_singlePlayer1(): game = initialize_game_state() game[-1] = np.array([PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1]) for i in {0, 1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == False def test_valid_action_actionOutOfBounds(): game = initialize_game_state() for i in {0, 1, 2, 3, 4, 5, 6}: assert is_valid_action(game, i) == True for i in {-1, 7}: assert is_valid_action(game, i) == False
0.449634
0.53206
from __future__ import unicode_literals from django.conf.urls import patterns, include, url from django.conf.urls.i18n import i18n_patterns from django.contrib import admin from mezzanine.conf import settings import mezzanine_pagedown.urls from tastypie.api import Api from amp import views as amp_views from blogapi.api import AllBlogSlugResource, BlogResource from sitemaps.mobile_sitemaps import DisplayableSitemap as DisplayableMobileSitemap from sitemaps.sitemaps import DisplayableSitemap apiv1 = Api(api_name='v1') apiv1.register(BlogResource()) apiv1.register(AllBlogSlugResource()) admin.autodiscover() urlpatterns = i18n_patterns("", ("^admin/", include(admin.site.urls)), ) if getattr(settings, "PACKAGE_NAME_FILEBROWSER") in settings.INSTALLED_APPS: urlpatterns += i18n_patterns("", ("^admin/media-library/", include("%s.urls" % settings.PACKAGE_NAME_FILEBROWSER)), ) sitemaps = {"sitemaps": {"all": DisplayableSitemap}} mobile_sitemaps = {"sitemaps": {"all": DisplayableMobileSitemap}} urlpatterns += patterns("sitemaps.views", ("^sitemap\.xml$", "index", sitemaps), ("^sitemap_mobile\.xml$", "sitemap", mobile_sitemaps) ) urlpatterns += patterns("feed.view", url("feeds/(?P<format>.*)%s$" % "/", "blog_post_feed", name="blog_post_feed"), url("^blog/feeds/(?P<format>.*)%s$" % "/", "blog_post_feed", name="blog_post_feed") ) urlpatterns += patterns("homepage.views", url("^$", "homepage", name="home"), ) urlpatterns += patterns("", ("^events/", include("events.urls")), ) urlpatterns += patterns('', url("^amp/(?P<slug>.*)%s$" % '/', amp_views.amp_blog_post_detail, name="blog_post_detail"), url("^pagedown/", include(mezzanine_pagedown.urls)), url(r"^api/", include(apiv1.urls)), url(r"^api/app/", include("api.urls")), url(r'^api-token-auth/', 'rest_framework_jwt.views.obtain_jwt_token'), url(r'^api-token-refresh/', 'rest_framework_jwt.views.refresh_jwt_token'), url(r'^api-token-verify/', 'rest_framework_jwt.views.verify_jwt_token'), url(r'^weixin', 'weixin.views.wechat'), url("^", include("mezzanine.urls")), ) handler404 = "mezzanine.core.views.page_not_found" handler500 = "mezzanine.core.views.server_error"
MK_dream/urls.py
from __future__ import unicode_literals from django.conf.urls import patterns, include, url from django.conf.urls.i18n import i18n_patterns from django.contrib import admin from mezzanine.conf import settings import mezzanine_pagedown.urls from tastypie.api import Api from amp import views as amp_views from blogapi.api import AllBlogSlugResource, BlogResource from sitemaps.mobile_sitemaps import DisplayableSitemap as DisplayableMobileSitemap from sitemaps.sitemaps import DisplayableSitemap apiv1 = Api(api_name='v1') apiv1.register(BlogResource()) apiv1.register(AllBlogSlugResource()) admin.autodiscover() urlpatterns = i18n_patterns("", ("^admin/", include(admin.site.urls)), ) if getattr(settings, "PACKAGE_NAME_FILEBROWSER") in settings.INSTALLED_APPS: urlpatterns += i18n_patterns("", ("^admin/media-library/", include("%s.urls" % settings.PACKAGE_NAME_FILEBROWSER)), ) sitemaps = {"sitemaps": {"all": DisplayableSitemap}} mobile_sitemaps = {"sitemaps": {"all": DisplayableMobileSitemap}} urlpatterns += patterns("sitemaps.views", ("^sitemap\.xml$", "index", sitemaps), ("^sitemap_mobile\.xml$", "sitemap", mobile_sitemaps) ) urlpatterns += patterns("feed.view", url("feeds/(?P<format>.*)%s$" % "/", "blog_post_feed", name="blog_post_feed"), url("^blog/feeds/(?P<format>.*)%s$" % "/", "blog_post_feed", name="blog_post_feed") ) urlpatterns += patterns("homepage.views", url("^$", "homepage", name="home"), ) urlpatterns += patterns("", ("^events/", include("events.urls")), ) urlpatterns += patterns('', url("^amp/(?P<slug>.*)%s$" % '/', amp_views.amp_blog_post_detail, name="blog_post_detail"), url("^pagedown/", include(mezzanine_pagedown.urls)), url(r"^api/", include(apiv1.urls)), url(r"^api/app/", include("api.urls")), url(r'^api-token-auth/', 'rest_framework_jwt.views.obtain_jwt_token'), url(r'^api-token-refresh/', 'rest_framework_jwt.views.refresh_jwt_token'), url(r'^api-token-verify/', 'rest_framework_jwt.views.verify_jwt_token'), url(r'^weixin', 'weixin.views.wechat'), url("^", include("mezzanine.urls")), ) handler404 = "mezzanine.core.views.page_not_found" handler500 = "mezzanine.core.views.server_error"
0.278551
0.057573
import xml.etree.ElementTree as ET from argparse import ArgumentParser import sys import re notebook = dict(zip(['+', '-', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'], ['#', 'b', 'A2', 'B2', 'C3', 'D3', 'E3', 'F3', 'G3', 'A3', 'B3', 'C4', 'D4', 'E4', 'F4', 'G4', 'A4', 'B4', 'C5'])) instrumentbook = dict(zip(['mario', 'toad', 'yoshi', 'star', 'flower', 'gameboy', 'dog', 'cat', 'pig', 'swan', 'face', 'plane', 'boat', 'car', 'heart', 'coin', 'plant', 'shyguy', 'ghost'], ['MARIO', 'MUSHROOM', 'YOSHI', 'STAR', 'FLOWER', 'GAMEBOY', 'DOG', 'CAT', 'PIG', 'SWAN', 'FACE', 'PLANE', 'BOAT', 'CAR', 'HEART', 'COIN', 'PIRANHA', 'SHYGUY', 'BOO'])) def convert_from_file(input_filename, output_filename): tree = ET.parse(input_filename) root = tree.getroot() with open(output_filename, "w") as sys.stdout: print("TEMPO: {}, EXT: 0, TIME: {}/4, SOUNDSET: {}".format(root.attrib['tempo'], root.attrib['measure'], root.attrib['soundfont'])) measure = int(root.attrib['measure']) for i, chord in enumerate(root): m = i // measure + 1 b = i % measure if len(chord) is 0: continue print("{}:{},".format(m, b), end='') for instrument in chord: if instrument.tag == "bookmark" or instrument.tag == "speedmark": continue if instrument.tag[0] == 'x': name = instrumentbook[instrument.tag[1:]] mute = "m1" else: name = instrumentbook[instrument.tag] mute = "" notes = re.findall(r'([\+|\-]?[a-zA-z])', instrument.text) for note in notes: if len(note) > 1: print('{} {}{}{},'.format(name, notebook[note[1]], notebook[note[0]], mute), end='') else: print('{} {}{},'.format(name, notebook[note], mute), end='') print('VOL: {}'.format(int(chord.attrib['volume']) * 8)) if __name__ == "__main__": from os import listdir from os.path import isfile, join parser = ArgumentParser(description="Convert a mario paint sequencer song to a super mario paint song.") parser.add_argument("-f", "--file", dest="filename", help="The file from which to read the mario paint sequencer song", metavar="FILE") parser.add_argument("-o", "--output", dest="output", help="The file to which to write the super mario paint song", metavar="FILE") parser.add_argument("-d", "--directory", dest="dir", help="Directory of files which to convert.", metavar="DIR", default="songs") parser.add_argument("-od", "--output-directory", dest="odir", help="Directory to write the results to.", metavar="DIR", default="output") args = parser.parse_args() if args.filename is None: files = [f for f in listdir(args.dir) if isfile(join(args.dir, f))] for f in files: if f[-3:] == 'mss': newfilename = f[:-3] + 'txt' # print("Converting {} to {}".format(f, newfilename)) convert_from_file(join(args.dir, f), join(args.odir, newfilename)) else: if args.output is None: newfilename = args.filename[:-3] + 'output.txt' else: newfilename = args.output convert_from_file(args.filename, newfilename)
mariopaintconverter/mariopaintconverter.py
import xml.etree.ElementTree as ET from argparse import ArgumentParser import sys import re notebook = dict(zip(['+', '-', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'], ['#', 'b', 'A2', 'B2', 'C3', 'D3', 'E3', 'F3', 'G3', 'A3', 'B3', 'C4', 'D4', 'E4', 'F4', 'G4', 'A4', 'B4', 'C5'])) instrumentbook = dict(zip(['mario', 'toad', 'yoshi', 'star', 'flower', 'gameboy', 'dog', 'cat', 'pig', 'swan', 'face', 'plane', 'boat', 'car', 'heart', 'coin', 'plant', 'shyguy', 'ghost'], ['MARIO', 'MUSHROOM', 'YOSHI', 'STAR', 'FLOWER', 'GAMEBOY', 'DOG', 'CAT', 'PIG', 'SWAN', 'FACE', 'PLANE', 'BOAT', 'CAR', 'HEART', 'COIN', 'PIRANHA', 'SHYGUY', 'BOO'])) def convert_from_file(input_filename, output_filename): tree = ET.parse(input_filename) root = tree.getroot() with open(output_filename, "w") as sys.stdout: print("TEMPO: {}, EXT: 0, TIME: {}/4, SOUNDSET: {}".format(root.attrib['tempo'], root.attrib['measure'], root.attrib['soundfont'])) measure = int(root.attrib['measure']) for i, chord in enumerate(root): m = i // measure + 1 b = i % measure if len(chord) is 0: continue print("{}:{},".format(m, b), end='') for instrument in chord: if instrument.tag == "bookmark" or instrument.tag == "speedmark": continue if instrument.tag[0] == 'x': name = instrumentbook[instrument.tag[1:]] mute = "m1" else: name = instrumentbook[instrument.tag] mute = "" notes = re.findall(r'([\+|\-]?[a-zA-z])', instrument.text) for note in notes: if len(note) > 1: print('{} {}{}{},'.format(name, notebook[note[1]], notebook[note[0]], mute), end='') else: print('{} {}{},'.format(name, notebook[note], mute), end='') print('VOL: {}'.format(int(chord.attrib['volume']) * 8)) if __name__ == "__main__": from os import listdir from os.path import isfile, join parser = ArgumentParser(description="Convert a mario paint sequencer song to a super mario paint song.") parser.add_argument("-f", "--file", dest="filename", help="The file from which to read the mario paint sequencer song", metavar="FILE") parser.add_argument("-o", "--output", dest="output", help="The file to which to write the super mario paint song", metavar="FILE") parser.add_argument("-d", "--directory", dest="dir", help="Directory of files which to convert.", metavar="DIR", default="songs") parser.add_argument("-od", "--output-directory", dest="odir", help="Directory to write the results to.", metavar="DIR", default="output") args = parser.parse_args() if args.filename is None: files = [f for f in listdir(args.dir) if isfile(join(args.dir, f))] for f in files: if f[-3:] == 'mss': newfilename = f[:-3] + 'txt' # print("Converting {} to {}".format(f, newfilename)) convert_from_file(join(args.dir, f), join(args.odir, newfilename)) else: if args.output is None: newfilename = args.filename[:-3] + 'output.txt' else: newfilename = args.output convert_from_file(args.filename, newfilename)
0.228329
0.325052
import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) import robin_stocks_modified as robin_stocks from Utils.datetime_funcs import * from Utils import login """Wrappers for bare robin_stocks functions""" def get_stock_historicals(symbol, interval="5minute", span="week"): """Returns the historical data for a SYMBOL with data at every time INTERVAL over a given SPAN.""" assert span in ['day', 'week', 'month', '3month', 'year', '5year'] assert interval in ['5minute', '10minute', 'hour', 'day', 'week'] historicals = robin_stocks.stocks.get_stock_historicals(symbol, interval, span) process_historicals(historicals) return historicals def get_instrument_data(symbol): """Gets all relevant instrument data for symbol.""" all_matches = robin_stocks.stocks.find_instrument_data(symbol) if not all_matches[0]: return None for match in all_matches: if match["symbol"] == symbol: return match return None def get_latest_price(symbol, includeExtendedHours=True): string_array = robin_stocks.stocks.get_latest_price(symbol, includeExtendedHours) return eval(string_array[0]) def get_splits(symbol): return robin_stocks.stocks.get_splits(symbol) """These functions help to manipulate data from the API calls.""" def process_historicals(historicals): """Mutates historical data from Robinhood. This function can be added to over time to enable new functionality.""" for data_point in historicals: date = get_historical_date(data_point) time = utc_to_military(get_historical_time(data_point)) data_point["date"] = date data_point["time"] = time del data_point["begins_at"] del data_point["session"] del data_point["interpolated"] del data_point["symbol"] def bound_historicals(all_historicals, start_date=None, end_date=None): """Returns the historical data of ALL_HISTORICALS filtered to be bounded by the starting and ending dates.""" assert start_date or end_date, "stock_historicals_between_dates must have some boundary date provided" def isValid(date): """This function will return the validity of the argument DATE based on which of starting_date and ending_date have been passed to the enclosing function.""" if end_date and start_date: return is_not_past(date, start_date) and is_not_past(end_date, date) elif not end_date: return is_not_past(date, start_date) else: return is_not_past(end_date, date) relevant_dates = [] for data_point in all_historicals: date = data_point["date"] if isValid(date): relevant_dates.append(data_point) return relevant_dates def get_historical_time(data_point): """Returns the time of a DATA_POINT""" try: return data_point['begins_at'][11:16] except: return None def get_historical_date(data_point): """Returns the date of a DATA_POINT""" try: return data_point['begins_at'][0:10] except: return None
stocks/stocks.py
import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) import robin_stocks_modified as robin_stocks from Utils.datetime_funcs import * from Utils import login """Wrappers for bare robin_stocks functions""" def get_stock_historicals(symbol, interval="5minute", span="week"): """Returns the historical data for a SYMBOL with data at every time INTERVAL over a given SPAN.""" assert span in ['day', 'week', 'month', '3month', 'year', '5year'] assert interval in ['5minute', '10minute', 'hour', 'day', 'week'] historicals = robin_stocks.stocks.get_stock_historicals(symbol, interval, span) process_historicals(historicals) return historicals def get_instrument_data(symbol): """Gets all relevant instrument data for symbol.""" all_matches = robin_stocks.stocks.find_instrument_data(symbol) if not all_matches[0]: return None for match in all_matches: if match["symbol"] == symbol: return match return None def get_latest_price(symbol, includeExtendedHours=True): string_array = robin_stocks.stocks.get_latest_price(symbol, includeExtendedHours) return eval(string_array[0]) def get_splits(symbol): return robin_stocks.stocks.get_splits(symbol) """These functions help to manipulate data from the API calls.""" def process_historicals(historicals): """Mutates historical data from Robinhood. This function can be added to over time to enable new functionality.""" for data_point in historicals: date = get_historical_date(data_point) time = utc_to_military(get_historical_time(data_point)) data_point["date"] = date data_point["time"] = time del data_point["begins_at"] del data_point["session"] del data_point["interpolated"] del data_point["symbol"] def bound_historicals(all_historicals, start_date=None, end_date=None): """Returns the historical data of ALL_HISTORICALS filtered to be bounded by the starting and ending dates.""" assert start_date or end_date, "stock_historicals_between_dates must have some boundary date provided" def isValid(date): """This function will return the validity of the argument DATE based on which of starting_date and ending_date have been passed to the enclosing function.""" if end_date and start_date: return is_not_past(date, start_date) and is_not_past(end_date, date) elif not end_date: return is_not_past(date, start_date) else: return is_not_past(end_date, date) relevant_dates = [] for data_point in all_historicals: date = data_point["date"] if isValid(date): relevant_dates.append(data_point) return relevant_dates def get_historical_time(data_point): """Returns the time of a DATA_POINT""" try: return data_point['begins_at'][11:16] except: return None def get_historical_date(data_point): """Returns the date of a DATA_POINT""" try: return data_point['begins_at'][0:10] except: return None
0.486332
0.4133
import bbio, math class HTU21D(object): I2C_ADDRESS = 0x40 CMD_TEMP = 0xf3 CMD_RH = 0xf5 CMD_USER_REG_READ = 0xe7 CMD_RESET = 0xfe USER_REGISTER_DEFAULT = 0x02 # Reset value of the user register CRC_DIVISOR = 0b100110001 def __init__(self, i2c): self.i2c = i2c if i2c == -1: # Testing mode, don't try to open an I2C interface return i2c.write(self.I2C_ADDRESS, [self.CMD_RESET]) bbio.delay(15) usr_reg = i2c.readTransaction(self.I2C_ADDRESS, self.CMD_USER_REG_READ, 1) assert usr_reg[0] == self.USER_REGISTER_DEFAULT, \ "HTU21D not detected on I2C bus" def getHumidity(self): """ Reads and returns the current relative humidity Received value is checked against a CRC and an AssertionError is thrown if it is invalid. """ self.i2c.write(self.I2C_ADDRESS, [self.CMD_RH]) bbio.delay(50) msb, lsb, crc = self.i2c.read(self.I2C_ADDRESS, 3) raw_value = (msb<<8) | lsb assert self.checkCRC(raw_value, crc), "received invalid data" # Should that really throw an error? # Conversion formula from datasheet: return -6.0 + 125.0 * (raw_value/65536.0) def getTemp(self): """ Reads and returns the current ambient temperature in Celsius Received value is checked against a CRC and an AssertionError is thrown if it is invalid. """ self.i2c.write(self.I2C_ADDRESS, [self.CMD_TEMP]) bbio.delay(50) msb, lsb, crc = self.i2c.read(self.I2C_ADDRESS, 3) raw_value = (msb<<8) | lsb assert self.checkCRC(raw_value, crc), "received invalid data" # Should that really throw an error? # Conversion formula from datasheet: return -46.85 + 175.72 * (raw_value/65536.0) def getTempF(self): """ Reads and returns the current ambient temperature in fahrenheit Received value is checked against a CRC and an AssertionError is thrown if it is invalid. """ tempC = self.getTemp() return tempC * 9./5 + 32 def calculateDewPoint(self, rh, temp): """ Calculates and returns the dew point for the given RH and temp C >>> round(HTU21D(-1).calculateDewPoint(50.0, 25.0)) 14.0 >>> round(HTU21D(-1).calculateDewPoint(65.0, -10.0)) -15.0 """ # A, B and C are constants in the dew point calculations A = 8.1332 B = 1762.39 C = 235.66 pp = 10**(A - B / (temp + C)) # Partial pressure return -(C + B / (math.log(rh * (pp/100.), 10) - A)) def checkCRC(self, value, crc): """ Checks the given 2-byte value against the given CRC Uses the HTU21D's divisor polynomial of x^8 + x^5 + x^4 + 1 given in the datasheet. See http://en.wikipedia.org/wiki/Computation_of_cyclic_redundancy_checks >>> HTU21D(-1).checkCRC(0xDC, 0x79) True >>> HTU21D(-1).checkCRC(0x683A, 0x7C) True >>> HTU21D(-1).checkCRC(0x683A, 0x01) False """ value <<= 8 divisor = self.CRC_DIVISOR << 15 for i in range(16): if value & 1<<(23-i): # There's a 1 above the x^8 bit of the divisor polynomial value ^= divisor divisor >>= 1 if (value & 0xff) == crc: return True return False if __name__ == "__main__": import doctest doctest.testmod()
bbio/libraries/HTU21D/HTU21D.py
import bbio, math class HTU21D(object): I2C_ADDRESS = 0x40 CMD_TEMP = 0xf3 CMD_RH = 0xf5 CMD_USER_REG_READ = 0xe7 CMD_RESET = 0xfe USER_REGISTER_DEFAULT = 0x02 # Reset value of the user register CRC_DIVISOR = 0b100110001 def __init__(self, i2c): self.i2c = i2c if i2c == -1: # Testing mode, don't try to open an I2C interface return i2c.write(self.I2C_ADDRESS, [self.CMD_RESET]) bbio.delay(15) usr_reg = i2c.readTransaction(self.I2C_ADDRESS, self.CMD_USER_REG_READ, 1) assert usr_reg[0] == self.USER_REGISTER_DEFAULT, \ "HTU21D not detected on I2C bus" def getHumidity(self): """ Reads and returns the current relative humidity Received value is checked against a CRC and an AssertionError is thrown if it is invalid. """ self.i2c.write(self.I2C_ADDRESS, [self.CMD_RH]) bbio.delay(50) msb, lsb, crc = self.i2c.read(self.I2C_ADDRESS, 3) raw_value = (msb<<8) | lsb assert self.checkCRC(raw_value, crc), "received invalid data" # Should that really throw an error? # Conversion formula from datasheet: return -6.0 + 125.0 * (raw_value/65536.0) def getTemp(self): """ Reads and returns the current ambient temperature in Celsius Received value is checked against a CRC and an AssertionError is thrown if it is invalid. """ self.i2c.write(self.I2C_ADDRESS, [self.CMD_TEMP]) bbio.delay(50) msb, lsb, crc = self.i2c.read(self.I2C_ADDRESS, 3) raw_value = (msb<<8) | lsb assert self.checkCRC(raw_value, crc), "received invalid data" # Should that really throw an error? # Conversion formula from datasheet: return -46.85 + 175.72 * (raw_value/65536.0) def getTempF(self): """ Reads and returns the current ambient temperature in fahrenheit Received value is checked against a CRC and an AssertionError is thrown if it is invalid. """ tempC = self.getTemp() return tempC * 9./5 + 32 def calculateDewPoint(self, rh, temp): """ Calculates and returns the dew point for the given RH and temp C >>> round(HTU21D(-1).calculateDewPoint(50.0, 25.0)) 14.0 >>> round(HTU21D(-1).calculateDewPoint(65.0, -10.0)) -15.0 """ # A, B and C are constants in the dew point calculations A = 8.1332 B = 1762.39 C = 235.66 pp = 10**(A - B / (temp + C)) # Partial pressure return -(C + B / (math.log(rh * (pp/100.), 10) - A)) def checkCRC(self, value, crc): """ Checks the given 2-byte value against the given CRC Uses the HTU21D's divisor polynomial of x^8 + x^5 + x^4 + 1 given in the datasheet. See http://en.wikipedia.org/wiki/Computation_of_cyclic_redundancy_checks >>> HTU21D(-1).checkCRC(0xDC, 0x79) True >>> HTU21D(-1).checkCRC(0x683A, 0x7C) True >>> HTU21D(-1).checkCRC(0x683A, 0x01) False """ value <<= 8 divisor = self.CRC_DIVISOR << 15 for i in range(16): if value & 1<<(23-i): # There's a 1 above the x^8 bit of the divisor polynomial value ^= divisor divisor >>= 1 if (value & 0xff) == crc: return True return False if __name__ == "__main__": import doctest doctest.testmod()
0.439266
0.388792
from etl.vt import CCARSJobsPostings import os import json import shutil import unittest from unittest.mock import patch class TestETL(unittest.TestCase): """ Unit test for ETL class""" def setUp(self, test_dir="test", v3_api_filename="v3_ccars.json", num_test_items=3): #self.mock_write_url = patch('etl.vt.CCARSJobsPostings.write_url').start() self.ccars = CCARSJobsPostings() self.v3_api_filename = os.path.join(test_dir, v3_api_filename) self.num_test_items = num_test_items self.v3_dst_file_path = None def tearDown(self): pass def copy_v3_test_file(self, link=None, full_path=None): """ Simulates the result of successfully downloading V3 CCARS Job posting data and writing it to disk """ link = self.v3_api_filename shutil.copyfile(link, full_path) self.v3_dst_file_path = full_path def undo_mock_write_url(self): """ Undos the side_effect of mock_write_url """ self.ccars._drop_db() if self.v3_dst_file_path: if os.path.isfile(self.v3_dst_file_path): os.remove(self.v3_dst_file_path) def test_db_connectivity(self): return self.ccars.check_mongo() def test_load_all_V3_API(self): with patch('etl.vt.CCARSJobsPostings.write_url') as mock_write_url: mock_write_url.side_effect = self.copy_v3_test_file all_stats = self.ccars.add_all() # indirectly calls our mocked write_url assert 1 == all_stats['nLinks'], 'Expected number of test downloads did not occur!' # Undo any and all side effects self.undo_mock_write_url() @unittest.skip("... live test skipped, better suited for API test") def test_load_all_V3_API_live(self): # Undo any and all side effects self.undo_mock_write_url() all_stats = self.ccars.add_all() # indirectly calls our mocked write_url assert 1 == all_stats['nLinks'], 'Expected number of test downloads did not occur!' def test_get_stats(self): with patch('etl.vt.CCARSJobsPostings.write_url') as mock_write_url: mock_write_url.side_effect = self.copy_v3_test_file all_stats = self.ccars.add_all() # indirectly calls our mocked write_url stats = json.loads(self.ccars.get_stats()) assert self.num_test_items == stats['count'],\ "Expected number of items not in databse, found {}".format(stats['count']) assert 0 == stats['number_sampled'] == stats['number_not_sampled'],\ "Found non-zero number of sampled, not sampled items! Expected none." # Undo any and all side effects self.undo_mock_write_url()
test/test_etl.py
from etl.vt import CCARSJobsPostings import os import json import shutil import unittest from unittest.mock import patch class TestETL(unittest.TestCase): """ Unit test for ETL class""" def setUp(self, test_dir="test", v3_api_filename="v3_ccars.json", num_test_items=3): #self.mock_write_url = patch('etl.vt.CCARSJobsPostings.write_url').start() self.ccars = CCARSJobsPostings() self.v3_api_filename = os.path.join(test_dir, v3_api_filename) self.num_test_items = num_test_items self.v3_dst_file_path = None def tearDown(self): pass def copy_v3_test_file(self, link=None, full_path=None): """ Simulates the result of successfully downloading V3 CCARS Job posting data and writing it to disk """ link = self.v3_api_filename shutil.copyfile(link, full_path) self.v3_dst_file_path = full_path def undo_mock_write_url(self): """ Undos the side_effect of mock_write_url """ self.ccars._drop_db() if self.v3_dst_file_path: if os.path.isfile(self.v3_dst_file_path): os.remove(self.v3_dst_file_path) def test_db_connectivity(self): return self.ccars.check_mongo() def test_load_all_V3_API(self): with patch('etl.vt.CCARSJobsPostings.write_url') as mock_write_url: mock_write_url.side_effect = self.copy_v3_test_file all_stats = self.ccars.add_all() # indirectly calls our mocked write_url assert 1 == all_stats['nLinks'], 'Expected number of test downloads did not occur!' # Undo any and all side effects self.undo_mock_write_url() @unittest.skip("... live test skipped, better suited for API test") def test_load_all_V3_API_live(self): # Undo any and all side effects self.undo_mock_write_url() all_stats = self.ccars.add_all() # indirectly calls our mocked write_url assert 1 == all_stats['nLinks'], 'Expected number of test downloads did not occur!' def test_get_stats(self): with patch('etl.vt.CCARSJobsPostings.write_url') as mock_write_url: mock_write_url.side_effect = self.copy_v3_test_file all_stats = self.ccars.add_all() # indirectly calls our mocked write_url stats = json.loads(self.ccars.get_stats()) assert self.num_test_items == stats['count'],\ "Expected number of items not in databse, found {}".format(stats['count']) assert 0 == stats['number_sampled'] == stats['number_not_sampled'],\ "Found non-zero number of sampled, not sampled items! Expected none." # Undo any and all side effects self.undo_mock_write_url()
0.452536
0.322259
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score df = pd.read_csv('housing.data.txt', header=None, sep='\s+') df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV'] # Modeling nonlinear relationships in the Housing Dataset X = df[['LSTAT']].values y = df['MEDV'].values regr = LinearRegression() # create quadratic features quadratic = PolynomialFeatures(degree=2) cubic = PolynomialFeatures(degree=3) X_quad = quadratic.fit_transform(X) X_cubic = cubic.fit_transform(X) # fit features X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis] regr = regr.fit(X, y) y_lin_fit = regr.predict(X_fit) linear_r2 = r2_score(y, regr.predict(X)) regr = regr.fit(X_quad, y) y_quad_fit = regr.predict(quadratic.fit_transform(X_fit)) quadratic_r2 = r2_score(y, regr.predict(X_quad)) regr = regr.fit(X_cubic, y) y_cubic_fit = regr.predict(cubic.fit_transform(X_fit)) cubic_r2 = r2_score(y, regr.predict(X_cubic)) # plot results plt.scatter(X, y, label='Training points', color='lightgray') plt.plot(X_fit, y_lin_fit, label='Linear (d=1), $R^2=%.2f$' % linear_r2, color='blue', lw=2, linestyle=':') plt.plot(X_fit, y_quad_fit, label='Quadratic (d=2), $R^2=%.2f$' % quadratic_r2, color='red', lw=2, linestyle='-') plt.plot(X_fit, y_cubic_fit, label='Cubic (d=3), $R^2=%.2f$' % cubic_r2, color='green', lw=2, linestyle='--') plt.xlabel('% lower status of the population [LSTAT]') plt.ylabel('Price in $1000s [MEDV]') plt.legend(loc='upper right') plt.show() # Transforming the dataset # transform features X_log = np.log(X) y_sqrt = np.sqrt(y) # fit features X_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis] regr = regr.fit(X_log, y_sqrt) y_lin_fit = regr.predict(X_fit) linear_r2 = r2_score(y_sqrt, regr.predict(X_log)) # plot results plt.scatter(X_log, y_sqrt, label='Training points', color='lightgray') plt.plot(X_fit, y_lin_fit, label='Linear (d=1), $R^2=%.2f$' % linear_r2, color='blue', lw=2) plt.xlabel('log(% lower status of the population [LSTAT])') plt.ylabel('$\sqrt{Price \; in \; \$1000s \; [MEDV]}$') plt.legend(loc='lower left') plt.tight_layout() plt.show()
Chapter 10/Turning a linear regression model into a curve - polynomial regression/program2.py
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score df = pd.read_csv('housing.data.txt', header=None, sep='\s+') df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV'] # Modeling nonlinear relationships in the Housing Dataset X = df[['LSTAT']].values y = df['MEDV'].values regr = LinearRegression() # create quadratic features quadratic = PolynomialFeatures(degree=2) cubic = PolynomialFeatures(degree=3) X_quad = quadratic.fit_transform(X) X_cubic = cubic.fit_transform(X) # fit features X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis] regr = regr.fit(X, y) y_lin_fit = regr.predict(X_fit) linear_r2 = r2_score(y, regr.predict(X)) regr = regr.fit(X_quad, y) y_quad_fit = regr.predict(quadratic.fit_transform(X_fit)) quadratic_r2 = r2_score(y, regr.predict(X_quad)) regr = regr.fit(X_cubic, y) y_cubic_fit = regr.predict(cubic.fit_transform(X_fit)) cubic_r2 = r2_score(y, regr.predict(X_cubic)) # plot results plt.scatter(X, y, label='Training points', color='lightgray') plt.plot(X_fit, y_lin_fit, label='Linear (d=1), $R^2=%.2f$' % linear_r2, color='blue', lw=2, linestyle=':') plt.plot(X_fit, y_quad_fit, label='Quadratic (d=2), $R^2=%.2f$' % quadratic_r2, color='red', lw=2, linestyle='-') plt.plot(X_fit, y_cubic_fit, label='Cubic (d=3), $R^2=%.2f$' % cubic_r2, color='green', lw=2, linestyle='--') plt.xlabel('% lower status of the population [LSTAT]') plt.ylabel('Price in $1000s [MEDV]') plt.legend(loc='upper right') plt.show() # Transforming the dataset # transform features X_log = np.log(X) y_sqrt = np.sqrt(y) # fit features X_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis] regr = regr.fit(X_log, y_sqrt) y_lin_fit = regr.predict(X_fit) linear_r2 = r2_score(y_sqrt, regr.predict(X_log)) # plot results plt.scatter(X_log, y_sqrt, label='Training points', color='lightgray') plt.plot(X_fit, y_lin_fit, label='Linear (d=1), $R^2=%.2f$' % linear_r2, color='blue', lw=2) plt.xlabel('log(% lower status of the population [LSTAT])') plt.ylabel('$\sqrt{Price \; in \; \$1000s \; [MEDV]}$') plt.legend(loc='lower left') plt.tight_layout() plt.show()
0.831074
0.667039
# Simple test uses just the timepoint of weibo from collections import defaultdict import matplotlib.pyplot as plt import pandas as pd import networkx as nx import pickle from repost_record import RepostRecord FILE_PATH = '/home/hp/Documents/DeepLearning/DataCastle/Weibo/Data/testRepostBeforeFirstHour.txt' FILE_PATH2 = '/home/hp/Documents/DeepLearning/DataCastle/Weibo/Data/testRepostBeforeFirstHour900_969.txt' # Data importer def data_import(): origin = pd.read_table(FILE_PATH, sep='\001', header=None, usecols=[0, 1, 2, 3]) repost_records = [] weibo_ids = origin.iloc[:, 0].values weibo_author_ids = origin.iloc[:, 1].values weibo_reauthor_ids = origin.iloc[:, 2].values weibo_timepoints = origin.iloc[:, 3].values for i in range(origin.shape[0]): repost_record = RepostRecord(weibo_ids[i], weibo_author_ids[i], weibo_reauthor_ids[i], weibo_timepoints[i]) repost_records.append(repost_record) origin2 = pd.read_table(FILE_PATH2, sep='\001', header=None, usecols=[0, 1, 2, 3]) weibo_ids = origin2.iloc[:, 0].values weibo_author_ids = origin2.iloc[:, 1].values weibo_reauthor_ids = origin2.iloc[:, 2].values weibo_timepoints = origin2.iloc[:, 3].values for i in range(origin2.shape[0]): repost_record = RepostRecord(weibo_ids[i], weibo_author_ids[i], weibo_reauthor_ids[i], weibo_timepoints[i]) repost_records.append(repost_record) return repost_records def calc_breadth(repost_records): weibo_repostnum_dict = defaultdict(list) weibo_counter_dict = defaultdict(list) for i in range(len(repost_records)): weibo_repostnum_dict[repost_records[i].weibo_id].append(repost_records[i].weibo_timepoint) for i in range(1, 3001): weibo_id = 'testWeibo' + str(i) time_point = weibo_repostnum_dict[weibo_id] weibo_num = [x + 1 for x in range(len(weibo_repostnum_dict[weibo_id]))] for t, n in zip(time_point, weibo_num): weibo_counter_dict[weibo_id].append((t, n)) return weibo_counter_dict def calc_deepth(repost_records): weibo_repostdeepth_dict = defaultdict(list) i = 0 g = nx.DiGraph() g.add_node(0) weibo_start_id = repost_records[i].weibo_id weibo_start_author_id = repost_records[i].weibo_author_id weibo_author_id = repost_records[i].weibo_author_id weibo_reauthor_id = repost_records[i].weibo_reauthor_id g.add_edge(0, weibo_author_id) g.add_edge(weibo_author_id, weibo_reauthor_id) weibo_repostdeepth_dict[repost_records[i].weibo_id].append((repost_records[i].weibo_timepoint, int(nx.eccentricity(g, v=0)) - 1)) i += 1 while i != len(repost_records) - 1: weibo_cur_id = repost_records[i].weibo_id weibo_author_id = repost_records[i].weibo_author_id weibo_reauthor_id = repost_records[i].weibo_reauthor_id if weibo_start_id == weibo_cur_id: if weibo_author_id not in g.nodes(): g.add_edge(weibo_start_author_id, weibo_author_id) if weibo_reauthor_id not in g.nodes(): g.add_edge(weibo_author_id, weibo_reauthor_id) weibo_repostdeepth_dict[repost_records[i].weibo_id].append((repost_records[i].weibo_timepoint, int(nx.eccentricity(g, v=0)) - 1)) else: g.clear() g.add_node(0) weibo_start_id = repost_records[i].weibo_id weibo_start_author_id = repost_records[i].weibo_author_id weibo_author_id = repost_records[i].weibo_author_id weibo_reauthor_id = repost_records[i].weibo_reauthor_id g.add_edge(0, weibo_author_id) g.add_edge(weibo_author_id, weibo_reauthor_id) i += 1 return weibo_repostdeepth_dict def save_plot_breadth(weibo_counter_dict): with open('weibo_counter_dict.pickle', 'wb') as handle: pickle.dump(weibo_counter_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) plt.clf() for i in range(1, 3001): weibo_id = 'testWeibo' + str(i) axis_x = [] axis_y = [] for x, y in weibo_counter_dict[weibo_id]: axis_x.append(x) axis_y.append(y) plt.plot(axis_x, axis_y) plt.savefig('weibo_counter.png') def save_plot_deepth(weibo_repostdeepth_dict): with open('weibo_repostdeepth_dict.pickle', 'wb') as handle: pickle.dump(weibo_repostdeepth_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) plt.clf() for i in range(1, 3001): weibo_id = 'testWeibo' + str(i) axis_x = [] axis_y = [] for x, y in weibo_repostdeepth_dict[weibo_id]: axis_x.append(x) axis_y.append(y) plt.plot(axis_x, axis_y) plt.savefig('weibo_repostdeepth.png') def main(): repost_records = data_import() weibo_counter_dict = calc_breadth(repost_records) save_plot_breadth(weibo_counter_dict) weibo_repostdeepth_dict = calc_deepth(repost_records) save_plot_deepth(weibo_repostdeepth_dict) if __name__ == '__main__': main()
data_importer.py
# Simple test uses just the timepoint of weibo from collections import defaultdict import matplotlib.pyplot as plt import pandas as pd import networkx as nx import pickle from repost_record import RepostRecord FILE_PATH = '/home/hp/Documents/DeepLearning/DataCastle/Weibo/Data/testRepostBeforeFirstHour.txt' FILE_PATH2 = '/home/hp/Documents/DeepLearning/DataCastle/Weibo/Data/testRepostBeforeFirstHour900_969.txt' # Data importer def data_import(): origin = pd.read_table(FILE_PATH, sep='\001', header=None, usecols=[0, 1, 2, 3]) repost_records = [] weibo_ids = origin.iloc[:, 0].values weibo_author_ids = origin.iloc[:, 1].values weibo_reauthor_ids = origin.iloc[:, 2].values weibo_timepoints = origin.iloc[:, 3].values for i in range(origin.shape[0]): repost_record = RepostRecord(weibo_ids[i], weibo_author_ids[i], weibo_reauthor_ids[i], weibo_timepoints[i]) repost_records.append(repost_record) origin2 = pd.read_table(FILE_PATH2, sep='\001', header=None, usecols=[0, 1, 2, 3]) weibo_ids = origin2.iloc[:, 0].values weibo_author_ids = origin2.iloc[:, 1].values weibo_reauthor_ids = origin2.iloc[:, 2].values weibo_timepoints = origin2.iloc[:, 3].values for i in range(origin2.shape[0]): repost_record = RepostRecord(weibo_ids[i], weibo_author_ids[i], weibo_reauthor_ids[i], weibo_timepoints[i]) repost_records.append(repost_record) return repost_records def calc_breadth(repost_records): weibo_repostnum_dict = defaultdict(list) weibo_counter_dict = defaultdict(list) for i in range(len(repost_records)): weibo_repostnum_dict[repost_records[i].weibo_id].append(repost_records[i].weibo_timepoint) for i in range(1, 3001): weibo_id = 'testWeibo' + str(i) time_point = weibo_repostnum_dict[weibo_id] weibo_num = [x + 1 for x in range(len(weibo_repostnum_dict[weibo_id]))] for t, n in zip(time_point, weibo_num): weibo_counter_dict[weibo_id].append((t, n)) return weibo_counter_dict def calc_deepth(repost_records): weibo_repostdeepth_dict = defaultdict(list) i = 0 g = nx.DiGraph() g.add_node(0) weibo_start_id = repost_records[i].weibo_id weibo_start_author_id = repost_records[i].weibo_author_id weibo_author_id = repost_records[i].weibo_author_id weibo_reauthor_id = repost_records[i].weibo_reauthor_id g.add_edge(0, weibo_author_id) g.add_edge(weibo_author_id, weibo_reauthor_id) weibo_repostdeepth_dict[repost_records[i].weibo_id].append((repost_records[i].weibo_timepoint, int(nx.eccentricity(g, v=0)) - 1)) i += 1 while i != len(repost_records) - 1: weibo_cur_id = repost_records[i].weibo_id weibo_author_id = repost_records[i].weibo_author_id weibo_reauthor_id = repost_records[i].weibo_reauthor_id if weibo_start_id == weibo_cur_id: if weibo_author_id not in g.nodes(): g.add_edge(weibo_start_author_id, weibo_author_id) if weibo_reauthor_id not in g.nodes(): g.add_edge(weibo_author_id, weibo_reauthor_id) weibo_repostdeepth_dict[repost_records[i].weibo_id].append((repost_records[i].weibo_timepoint, int(nx.eccentricity(g, v=0)) - 1)) else: g.clear() g.add_node(0) weibo_start_id = repost_records[i].weibo_id weibo_start_author_id = repost_records[i].weibo_author_id weibo_author_id = repost_records[i].weibo_author_id weibo_reauthor_id = repost_records[i].weibo_reauthor_id g.add_edge(0, weibo_author_id) g.add_edge(weibo_author_id, weibo_reauthor_id) i += 1 return weibo_repostdeepth_dict def save_plot_breadth(weibo_counter_dict): with open('weibo_counter_dict.pickle', 'wb') as handle: pickle.dump(weibo_counter_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) plt.clf() for i in range(1, 3001): weibo_id = 'testWeibo' + str(i) axis_x = [] axis_y = [] for x, y in weibo_counter_dict[weibo_id]: axis_x.append(x) axis_y.append(y) plt.plot(axis_x, axis_y) plt.savefig('weibo_counter.png') def save_plot_deepth(weibo_repostdeepth_dict): with open('weibo_repostdeepth_dict.pickle', 'wb') as handle: pickle.dump(weibo_repostdeepth_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) plt.clf() for i in range(1, 3001): weibo_id = 'testWeibo' + str(i) axis_x = [] axis_y = [] for x, y in weibo_repostdeepth_dict[weibo_id]: axis_x.append(x) axis_y.append(y) plt.plot(axis_x, axis_y) plt.savefig('weibo_repostdeepth.png') def main(): repost_records = data_import() weibo_counter_dict = calc_breadth(repost_records) save_plot_breadth(weibo_counter_dict) weibo_repostdeepth_dict = calc_deepth(repost_records) save_plot_deepth(weibo_repostdeepth_dict) if __name__ == '__main__': main()
0.344003
0.356251
import datetime from unittest.mock import patch from financial_data.blueprint.scraperfy_api import URL_PREFIX from financial_data.tests.fixtures import app, client from flask.testing import FlaskClient from . import BASE_ROUTE from .interface import ValuationIndicatorsInterface from .model import ValuationIndicators from .schema import ValuationIndicatorsSchema from .service import ValuationIndicatorsService #2021-08-07T11:23:52.277828 #datetime.datetime(2021,8,7,11,23,52) def make_valuation_indicator( id: int = 123, symbol: str = 'Teste symbol', searchDate: datetime.datetime = datetime.datetime(2021,8,7,11,23,52), dividendYield: float = 6.48, priceEarnings: float = 22.22, pegRatio: float = 0.56, priceBookValue: float = 4.00, evEbitda: float = 12.57, evEbit: float = 14.73, priceEbitda: float = 13.77, priceEbit: float = 16.15, bookValueShare: float = 3.97, priceAsset: float = 2.11, earningsShare: float = 0.72, pricesSalesRatio: float = 9.92, pricesWorkingCapital: float = 13.20, priceNetCurrentAssets: float = -3.32 ) -> ValuationIndicators: return ValuationIndicators(asset_id=id, asset_symbol=symbol.upper(), search_date=searchDate, dividend_yield=dividendYield, price_earnings=priceEarnings, peg_ratio=pegRatio, price_book_value=priceBookValue, ev_ebitda=evEbitda, ev_ebit=evEbit, price_ebitda=priceEbitda, price_ebit=priceEbit, book_value_share=bookValueShare, price_asset=priceAsset, earnings_share=earningsShare, prices_sales_ratio=pricesSalesRatio, prices_working_capital=pricesWorkingCapital, price_net_current_assets=priceNetCurrentAssets) class TestValuationIndicatorsResource: @patch.object( ValuationIndicatorsService, 'get_all', lambda: [ make_valuation_indicator(123, symbol='Test symbol 1'), make_valuation_indicator(456, symbol='Test symbol 2'), ] ) def test_get(self, client: FlaskClient): with client: results = client.get(f'{URL_PREFIX}{BASE_ROUTE}', follow_redirects=True).get_json() expected = ( ValuationIndicatorsSchema(many=True) .dump( [ make_valuation_indicator(123, symbol='Test symbol 1'), make_valuation_indicator(456, symbol='Test symbol 2'), ] ) ) for r in results: assert r in expected @patch.object( ValuationIndicatorsService, 'create', lambda create_request: ValuationIndicators(**create_request) ) def test_post(self, client: FlaskClient): with client: payload = dict(assetSymbol='B3SA3', dividendYield=9.99, priceEarnings=99.99) result = client.post(f'{URL_PREFIX}{BASE_ROUTE}/', json=payload).get_json() expected = ( ValuationIndicatorsSchema() .dump(ValuationIndicators( asset_symbol=payload['assetSymbol'], dividend_yield=payload['dividendYield'], price_earnings=payload['priceEarnings'] )) ) assert result == expected def fake_update(asset: ValuationIndicators, changes: ValuationIndicatorsInterface) -> ValuationIndicators: '''To simulate an update, just return a new object''' updated_asset = ValuationIndicators( asset_id = asset.asset_id, asset_symbol = changes['asset_symbol'], search_date = datetime.datetime(2021,8,7,11,23,52), dividend_yield = changes['dividend_yield'], price_earnings = changes['price_earnings'], peg_ratio = changes['peg_ratio'], price_book_value = changes['price_book_value'], ev_ebitda = changes ['ev_ebitda'], ev_ebit = changes['ev_ebit'], price_ebitda = changes['price_ebitda'], price_ebit = changes['price_ebit'], book_value_share = changes['book_value_share'], price_asset = changes['price_asset'], earnings_share = changes['earnings_share'], prices_sales_ratio = changes['prices_sales_ratio'], prices_working_capital = changes['prices_working_capital'], price_net_current_assets = changes['price_net_current_assets'] ) return updated_asset class TestValuationIndicatorsSymbolResource: @patch.object( ValuationIndicatorsService, 'get_by_symbol', lambda symbol: make_valuation_indicator(symbol=symbol) ) def test_get(self, client: FlaskClient): with client: result = client.get(f'{URL_PREFIX}{BASE_ROUTE}/TEST SYMBOL').get_json() expected = make_valuation_indicator(symbol = 'Test symbol') assert result['assetSymbol'] == expected.asset_symbol @patch.object(ValuationIndicatorsService, 'delete_by_symbol', lambda symbol: symbol.upper()) def test_delete(self, client: FlaskClient): result = client.delete(f'{URL_PREFIX}{BASE_ROUTE}/Test Symbol').get_json() expected = dict(status='Success', symbol='TEST SYMBOL') assert result == expected @patch.object( ValuationIndicatorsService, 'get_by_symbol', lambda symbol: make_valuation_indicator(symbol=symbol) ) @patch.object(ValuationIndicatorsService, 'update', fake_update) def test_put(self, client: FlaskClient): result = client.put( f'{URL_PREFIX}{BASE_ROUTE}/Test Symbol', json={ 'assetSymbol': 'New Asset', 'searchDate': '2021-08-11T16:31:56.167936', 'dividendYield': '99.99', 'priceEarnings': '88.88', 'pegRatio': '4.69', 'priceBookValue': '3.98', 'evEbitda': '12.49', 'evEbit': '14.63', 'priceEbitda': '13.70', 'priceEbit': '16.06', 'bookValueShare': '3.97', 'priceAsset': '2.10', 'earningsShare': '0.72', 'pricesSalesRatio': '0', 'pricesWorkingCapital': '13.13', 'priceNetCurrentAssets': '-3.30' } ).get_json() expected = ( ValuationIndicatorsSchema() .dump( ValuationIndicators( asset_id = 123, asset_symbol = 'New Asset', search_date = datetime.datetime(2021,8,7,11,23,52), dividend_yield = 99.99, price_earnings = 88.88, peg_ratio = 4.69, price_book_value = 3.98, ev_ebitda = 12.49, ev_ebit = 14.63, price_ebitda = 13.70, price_ebit = 16.06, book_value_share = 3.97, price_asset = 2.10, earnings_share = 0.72, prices_sales_ratio = 0, prices_working_capital = 13.13, price_net_current_assets = -3.30 ) ) ) assert result == expected
financial_data/blueprint/scraperfy_api/asset_scraper/company_indicators/valuation_indicators/controller_test.py
import datetime from unittest.mock import patch from financial_data.blueprint.scraperfy_api import URL_PREFIX from financial_data.tests.fixtures import app, client from flask.testing import FlaskClient from . import BASE_ROUTE from .interface import ValuationIndicatorsInterface from .model import ValuationIndicators from .schema import ValuationIndicatorsSchema from .service import ValuationIndicatorsService #2021-08-07T11:23:52.277828 #datetime.datetime(2021,8,7,11,23,52) def make_valuation_indicator( id: int = 123, symbol: str = 'Teste symbol', searchDate: datetime.datetime = datetime.datetime(2021,8,7,11,23,52), dividendYield: float = 6.48, priceEarnings: float = 22.22, pegRatio: float = 0.56, priceBookValue: float = 4.00, evEbitda: float = 12.57, evEbit: float = 14.73, priceEbitda: float = 13.77, priceEbit: float = 16.15, bookValueShare: float = 3.97, priceAsset: float = 2.11, earningsShare: float = 0.72, pricesSalesRatio: float = 9.92, pricesWorkingCapital: float = 13.20, priceNetCurrentAssets: float = -3.32 ) -> ValuationIndicators: return ValuationIndicators(asset_id=id, asset_symbol=symbol.upper(), search_date=searchDate, dividend_yield=dividendYield, price_earnings=priceEarnings, peg_ratio=pegRatio, price_book_value=priceBookValue, ev_ebitda=evEbitda, ev_ebit=evEbit, price_ebitda=priceEbitda, price_ebit=priceEbit, book_value_share=bookValueShare, price_asset=priceAsset, earnings_share=earningsShare, prices_sales_ratio=pricesSalesRatio, prices_working_capital=pricesWorkingCapital, price_net_current_assets=priceNetCurrentAssets) class TestValuationIndicatorsResource: @patch.object( ValuationIndicatorsService, 'get_all', lambda: [ make_valuation_indicator(123, symbol='Test symbol 1'), make_valuation_indicator(456, symbol='Test symbol 2'), ] ) def test_get(self, client: FlaskClient): with client: results = client.get(f'{URL_PREFIX}{BASE_ROUTE}', follow_redirects=True).get_json() expected = ( ValuationIndicatorsSchema(many=True) .dump( [ make_valuation_indicator(123, symbol='Test symbol 1'), make_valuation_indicator(456, symbol='Test symbol 2'), ] ) ) for r in results: assert r in expected @patch.object( ValuationIndicatorsService, 'create', lambda create_request: ValuationIndicators(**create_request) ) def test_post(self, client: FlaskClient): with client: payload = dict(assetSymbol='B3SA3', dividendYield=9.99, priceEarnings=99.99) result = client.post(f'{URL_PREFIX}{BASE_ROUTE}/', json=payload).get_json() expected = ( ValuationIndicatorsSchema() .dump(ValuationIndicators( asset_symbol=payload['assetSymbol'], dividend_yield=payload['dividendYield'], price_earnings=payload['priceEarnings'] )) ) assert result == expected def fake_update(asset: ValuationIndicators, changes: ValuationIndicatorsInterface) -> ValuationIndicators: '''To simulate an update, just return a new object''' updated_asset = ValuationIndicators( asset_id = asset.asset_id, asset_symbol = changes['asset_symbol'], search_date = datetime.datetime(2021,8,7,11,23,52), dividend_yield = changes['dividend_yield'], price_earnings = changes['price_earnings'], peg_ratio = changes['peg_ratio'], price_book_value = changes['price_book_value'], ev_ebitda = changes ['ev_ebitda'], ev_ebit = changes['ev_ebit'], price_ebitda = changes['price_ebitda'], price_ebit = changes['price_ebit'], book_value_share = changes['book_value_share'], price_asset = changes['price_asset'], earnings_share = changes['earnings_share'], prices_sales_ratio = changes['prices_sales_ratio'], prices_working_capital = changes['prices_working_capital'], price_net_current_assets = changes['price_net_current_assets'] ) return updated_asset class TestValuationIndicatorsSymbolResource: @patch.object( ValuationIndicatorsService, 'get_by_symbol', lambda symbol: make_valuation_indicator(symbol=symbol) ) def test_get(self, client: FlaskClient): with client: result = client.get(f'{URL_PREFIX}{BASE_ROUTE}/TEST SYMBOL').get_json() expected = make_valuation_indicator(symbol = 'Test symbol') assert result['assetSymbol'] == expected.asset_symbol @patch.object(ValuationIndicatorsService, 'delete_by_symbol', lambda symbol: symbol.upper()) def test_delete(self, client: FlaskClient): result = client.delete(f'{URL_PREFIX}{BASE_ROUTE}/Test Symbol').get_json() expected = dict(status='Success', symbol='TEST SYMBOL') assert result == expected @patch.object( ValuationIndicatorsService, 'get_by_symbol', lambda symbol: make_valuation_indicator(symbol=symbol) ) @patch.object(ValuationIndicatorsService, 'update', fake_update) def test_put(self, client: FlaskClient): result = client.put( f'{URL_PREFIX}{BASE_ROUTE}/Test Symbol', json={ 'assetSymbol': 'New Asset', 'searchDate': '2021-08-11T16:31:56.167936', 'dividendYield': '99.99', 'priceEarnings': '88.88', 'pegRatio': '4.69', 'priceBookValue': '3.98', 'evEbitda': '12.49', 'evEbit': '14.63', 'priceEbitda': '13.70', 'priceEbit': '16.06', 'bookValueShare': '3.97', 'priceAsset': '2.10', 'earningsShare': '0.72', 'pricesSalesRatio': '0', 'pricesWorkingCapital': '13.13', 'priceNetCurrentAssets': '-3.30' } ).get_json() expected = ( ValuationIndicatorsSchema() .dump( ValuationIndicators( asset_id = 123, asset_symbol = 'New Asset', search_date = datetime.datetime(2021,8,7,11,23,52), dividend_yield = 99.99, price_earnings = 88.88, peg_ratio = 4.69, price_book_value = 3.98, ev_ebitda = 12.49, ev_ebit = 14.63, price_ebitda = 13.70, price_ebit = 16.06, book_value_share = 3.97, price_asset = 2.10, earnings_share = 0.72, prices_sales_ratio = 0, prices_working_capital = 13.13, price_net_current_assets = -3.30 ) ) ) assert result == expected
0.566258
0.38975
import os import yaml import xlrd from tools import extractor class ReadFile: config_dict = None case_file_list = [] @classmethod def get_config_dict(cls, config_path: str = 'config/config.yaml') -> dict: """读取配置文件,并且转换成字典 :param config_path: 配置文件地址, 默认使用当前项目目录下的config/config.yaml return cls.config_dict """ if cls.config_dict is None: # 指定编码格式解决,win下跑代码抛出错误 with open(config_path, 'r', encoding='utf-8') as file: cls.config_dict = yaml.load( file.read(), Loader=yaml.FullLoader) return cls.config_dict @classmethod def read_config(cls, expr: str = '.'): """默认读取config目录下的config.yaml配置文件,根据传递的expr jsonpath表达式可任意返回任何配置项 :param expr: 提取表达式, 使用jsonpath语法,默认值提取整个读取的对象 return 根据表达式返回的值 """ return extractor(cls.get_config_dict(), expr) @classmethod def read_case_yml(cls, tag=None): """ 读取yml格式用例 :tag: 用例tag,根据入参参数返回用例 """ cls.get_file(cls.read_config('$.file_path.test_case')) print(cls.case_file_list) for file in cls.case_file_list: with open(file, 'r', encoding='utf-8') as f: case_list = yaml.load( f.read(), Loader=yaml.FullLoader) for case_tmp in case_list: case_tag = case_tmp.get('tag') if case_tmp.get('tag') else [] if case_tmp.get('enable'): if tag: if tag in case_tag: yield case_tmp else: yield case_tmp @classmethod def get_file(cls, file_path): """ 读取目录下所有yml,yaml格式用例数据 :file_path: 用例的目录地址 """ file_list = os.listdir(file_path) file_list.sort(key=str.lower) for file in file_list: file = os.path.join(file_path, file) if os.path.isdir(file): cls.get_file(file) if file.split('.')[-1] not in ['yml', 'yaml'] or len(file.split('tmp')) > 1: continue else: cls.case_file_list.append(file) if __name__ == '__main__': tmp = ReadFile() tmp.read_case_yml()
tools/read_file.py
import os import yaml import xlrd from tools import extractor class ReadFile: config_dict = None case_file_list = [] @classmethod def get_config_dict(cls, config_path: str = 'config/config.yaml') -> dict: """读取配置文件,并且转换成字典 :param config_path: 配置文件地址, 默认使用当前项目目录下的config/config.yaml return cls.config_dict """ if cls.config_dict is None: # 指定编码格式解决,win下跑代码抛出错误 with open(config_path, 'r', encoding='utf-8') as file: cls.config_dict = yaml.load( file.read(), Loader=yaml.FullLoader) return cls.config_dict @classmethod def read_config(cls, expr: str = '.'): """默认读取config目录下的config.yaml配置文件,根据传递的expr jsonpath表达式可任意返回任何配置项 :param expr: 提取表达式, 使用jsonpath语法,默认值提取整个读取的对象 return 根据表达式返回的值 """ return extractor(cls.get_config_dict(), expr) @classmethod def read_case_yml(cls, tag=None): """ 读取yml格式用例 :tag: 用例tag,根据入参参数返回用例 """ cls.get_file(cls.read_config('$.file_path.test_case')) print(cls.case_file_list) for file in cls.case_file_list: with open(file, 'r', encoding='utf-8') as f: case_list = yaml.load( f.read(), Loader=yaml.FullLoader) for case_tmp in case_list: case_tag = case_tmp.get('tag') if case_tmp.get('tag') else [] if case_tmp.get('enable'): if tag: if tag in case_tag: yield case_tmp else: yield case_tmp @classmethod def get_file(cls, file_path): """ 读取目录下所有yml,yaml格式用例数据 :file_path: 用例的目录地址 """ file_list = os.listdir(file_path) file_list.sort(key=str.lower) for file in file_list: file = os.path.join(file_path, file) if os.path.isdir(file): cls.get_file(file) if file.split('.')[-1] not in ['yml', 'yaml'] or len(file.split('tmp')) > 1: continue else: cls.case_file_list.append(file) if __name__ == '__main__': tmp = ReadFile() tmp.read_case_yml()
0.222616
0.094636
import logging import traceback from typing import cast, Optional, Set, Tuple, Type import cachetools import requests from .type import ErrorMessage, Measurement, Response, Units, URL, Value class Collector: """Base class for metric collectors.""" TIMEOUT = 10 # Default timeout of 10 seconds RESPONSE_CACHE = cachetools.TTLCache(maxsize=256, ttl=60) # Briefly cache responses to prevent flooding sources MAX_UNITS = 100 # The maximum number of units (e.g. violations, warnings) to send to the server subclasses: Set[Type["Collector"]] = set() def __init_subclass__(cls) -> None: Collector.subclasses.add(cls) super().__init_subclass__() @classmethod def get_subclass(cls, source_and_metric: str) -> Type["Collector"]: """Return the subclass registered for the source/metric name.""" simplified_class_name = source_and_metric.replace("_", "") matching_subclasses = [sc for sc in cls.subclasses if sc.__name__.lower() == simplified_class_name] return matching_subclasses[0] if matching_subclasses else cls @staticmethod def get(metric_type, sources) -> Response: """Connect to the sources to get and parse the measurement for the metric.""" source_responses = [] for source_uuid, source in sources.items(): collector_class = cast(Type[Collector], Collector.get_subclass(f"{source['type']}_{metric_type}")) source_collector = collector_class() source_response = source_collector.get_one(source) source_response["source_uuid"] = source_uuid source_responses.append(source_response) values = [source_response["value"] for source_response in source_responses] value = None if None in values else sum([int(value) for value in values]) return dict(sources=source_responses, value=value) def get_one(self, source) -> Response: """Return the measurement response for one source.""" parameters = source.get("parameters", {}) api_url = self.api_url(**parameters) landing_url = self.landing_url(**parameters) response, connection_error = self.safely_get_source_response(api_url) value, units, parse_error = self.safely_parse_source_response(response, **parameters) return dict(api_url=api_url, landing_url=landing_url, value=value, units=units, connection_error=connection_error, parse_error=parse_error) def landing_url(self, **parameters) -> URL: # pylint: disable=no-self-use """Translate the urls into the landing urls.""" return parameters.get("url", "") def api_url(self, **parameters) -> URL: # pylint: disable=no-self-use """Translate the url into the API url.""" return parameters.get("url", "") @cachetools.cached(RESPONSE_CACHE, key=lambda self, url: cachetools.keys.hashkey(url)) def safely_get_source_response(self, url: URL) -> Tuple[Optional[requests.Response], ErrorMessage]: """Connect to the source and get the data, without failing.""" response, error = None, None try: response = self.get_source_response(url) except Exception: # pylint: disable=broad-except error = traceback.format_exc() return response, error def get_source_response(self, url: URL) -> requests.Response: """Open the url. Raise an exception if the response status isn't 200 or if a time out occurs.""" logging.info("Retrieving %s", url) response = requests.get(url, timeout=self.TIMEOUT) response.raise_for_status() return response def safely_parse_source_response( self, response: requests.Response, **parameters) -> Tuple[Value, Units, ErrorMessage]: """Parse the data from the response, without failing.""" units: Units = [] value, error = None, None if response: try: result = self.parse_source_response(response, **parameters) value, units = result if isinstance(result, tuple) else (result, []) except Exception: # pylint: disable=broad-except error = traceback.format_exc() return value, units[:self.MAX_UNITS], error def parse_source_response(self, response: requests.Response, **parameters) -> Measurement: # pylint: disable=no-self-use,unused-argument """Parse the response to get the measurement for the metric.""" return str(response.text)
components/collector/collector/collector.py
import logging import traceback from typing import cast, Optional, Set, Tuple, Type import cachetools import requests from .type import ErrorMessage, Measurement, Response, Units, URL, Value class Collector: """Base class for metric collectors.""" TIMEOUT = 10 # Default timeout of 10 seconds RESPONSE_CACHE = cachetools.TTLCache(maxsize=256, ttl=60) # Briefly cache responses to prevent flooding sources MAX_UNITS = 100 # The maximum number of units (e.g. violations, warnings) to send to the server subclasses: Set[Type["Collector"]] = set() def __init_subclass__(cls) -> None: Collector.subclasses.add(cls) super().__init_subclass__() @classmethod def get_subclass(cls, source_and_metric: str) -> Type["Collector"]: """Return the subclass registered for the source/metric name.""" simplified_class_name = source_and_metric.replace("_", "") matching_subclasses = [sc for sc in cls.subclasses if sc.__name__.lower() == simplified_class_name] return matching_subclasses[0] if matching_subclasses else cls @staticmethod def get(metric_type, sources) -> Response: """Connect to the sources to get and parse the measurement for the metric.""" source_responses = [] for source_uuid, source in sources.items(): collector_class = cast(Type[Collector], Collector.get_subclass(f"{source['type']}_{metric_type}")) source_collector = collector_class() source_response = source_collector.get_one(source) source_response["source_uuid"] = source_uuid source_responses.append(source_response) values = [source_response["value"] for source_response in source_responses] value = None if None in values else sum([int(value) for value in values]) return dict(sources=source_responses, value=value) def get_one(self, source) -> Response: """Return the measurement response for one source.""" parameters = source.get("parameters", {}) api_url = self.api_url(**parameters) landing_url = self.landing_url(**parameters) response, connection_error = self.safely_get_source_response(api_url) value, units, parse_error = self.safely_parse_source_response(response, **parameters) return dict(api_url=api_url, landing_url=landing_url, value=value, units=units, connection_error=connection_error, parse_error=parse_error) def landing_url(self, **parameters) -> URL: # pylint: disable=no-self-use """Translate the urls into the landing urls.""" return parameters.get("url", "") def api_url(self, **parameters) -> URL: # pylint: disable=no-self-use """Translate the url into the API url.""" return parameters.get("url", "") @cachetools.cached(RESPONSE_CACHE, key=lambda self, url: cachetools.keys.hashkey(url)) def safely_get_source_response(self, url: URL) -> Tuple[Optional[requests.Response], ErrorMessage]: """Connect to the source and get the data, without failing.""" response, error = None, None try: response = self.get_source_response(url) except Exception: # pylint: disable=broad-except error = traceback.format_exc() return response, error def get_source_response(self, url: URL) -> requests.Response: """Open the url. Raise an exception if the response status isn't 200 or if a time out occurs.""" logging.info("Retrieving %s", url) response = requests.get(url, timeout=self.TIMEOUT) response.raise_for_status() return response def safely_parse_source_response( self, response: requests.Response, **parameters) -> Tuple[Value, Units, ErrorMessage]: """Parse the data from the response, without failing.""" units: Units = [] value, error = None, None if response: try: result = self.parse_source_response(response, **parameters) value, units = result if isinstance(result, tuple) else (result, []) except Exception: # pylint: disable=broad-except error = traceback.format_exc() return value, units[:self.MAX_UNITS], error def parse_source_response(self, response: requests.Response, **parameters) -> Measurement: # pylint: disable=no-self-use,unused-argument """Parse the response to get the measurement for the metric.""" return str(response.text)
0.942678
0.163813
import numpy as np from matplotlib import pyplot as plt from numpy import ndarray from scipy.linalg import svd def explained_var(Σ: ndarray) -> ndarray: ΣΣ: ndarray = Σ * Σ return ΣΣ / ΣΣ.sum() def plot_explained_variance(ρ: ndarray, threshold = 0.9, save_to_file: str = ""): plot_range = range(1, len(ρ) + 1) f = plt.figure() plt.plot(plot_range, ρ, 'x-') plt.plot(plot_range, np.cumsum(ρ), 'o-') plt.plot([1, len(ρ)], [threshold, threshold], 'k--') plt.xlabel('Principal component') plt.ylabel('Variance explained') plt.legend(['Individual', 'Cumulative', 'Threshold']) plt.grid() if save_to_file != '': plt.savefig(save_to_file) return f def plot_data_projected_unto_principal_components(projected_data: ndarray, class_labels: ndarray): classes: ndarray = np.unique(class_labels) num_principal_components = projected_data.shape[1] num_rows = 2 num_columns = int(np.ceil((num_principal_components-1)*2/num_rows)) fig, subplots = plt.subplots(num_rows, num_columns) plt.title('Data projected onto Principal components') for d1 in range(num_principal_components): for d2 in range(d1 + 1, num_principal_components): index = d1 + d2 - 1 for klass in classes: class_mask: ndarray = (class_labels == klass) subplots[int(np.floor(index / num_columns)), index % num_columns].plot(data_projected[class_mask, d1], data_projected[class_mask, d2], 'o') #subplots[d1, d2].plt.legend(['With Parkinson\'s', 'Without Parkinson\'s']) #subplots[d1, d2].xlabel(f'PC{d1}') subplots[(d1 + d2 - 1) % num_rows, (d1 + d2 - 1) % num_columns ].set(xlabel=f'PC{d1}', ylabel=f'PC{d2}') def project_data_onto_pcs(data: ndarray, threshold) -> ndarray: if threshold < 0 or threshold > 1: raise ValueError('Threshold must be between 0 and 1') (U, Σ, Vh) = svd(data) V = Vh.T ρ: ndarray = explained_var(Σ) num_pc_to_threshold = (np.cumsum(ρ) < threshold).sum() + 1 data_projected = data @ V[:, :num_pc_to_threshold] # Data projected onto {num_pc_to_threshold} components #plot_explained_variance(ρ, threshold, "PCA_explained_variance.pdf") #print(f"Acceptable threshold: {threshold}\nRequired number of components: {num_pc_to_threshold}") #plot_data_projected_unto_principal_components(data @ V[:, :4], class_labels) return data_projected
PCA.py
import numpy as np from matplotlib import pyplot as plt from numpy import ndarray from scipy.linalg import svd def explained_var(Σ: ndarray) -> ndarray: ΣΣ: ndarray = Σ * Σ return ΣΣ / ΣΣ.sum() def plot_explained_variance(ρ: ndarray, threshold = 0.9, save_to_file: str = ""): plot_range = range(1, len(ρ) + 1) f = plt.figure() plt.plot(plot_range, ρ, 'x-') plt.plot(plot_range, np.cumsum(ρ), 'o-') plt.plot([1, len(ρ)], [threshold, threshold], 'k--') plt.xlabel('Principal component') plt.ylabel('Variance explained') plt.legend(['Individual', 'Cumulative', 'Threshold']) plt.grid() if save_to_file != '': plt.savefig(save_to_file) return f def plot_data_projected_unto_principal_components(projected_data: ndarray, class_labels: ndarray): classes: ndarray = np.unique(class_labels) num_principal_components = projected_data.shape[1] num_rows = 2 num_columns = int(np.ceil((num_principal_components-1)*2/num_rows)) fig, subplots = plt.subplots(num_rows, num_columns) plt.title('Data projected onto Principal components') for d1 in range(num_principal_components): for d2 in range(d1 + 1, num_principal_components): index = d1 + d2 - 1 for klass in classes: class_mask: ndarray = (class_labels == klass) subplots[int(np.floor(index / num_columns)), index % num_columns].plot(data_projected[class_mask, d1], data_projected[class_mask, d2], 'o') #subplots[d1, d2].plt.legend(['With Parkinson\'s', 'Without Parkinson\'s']) #subplots[d1, d2].xlabel(f'PC{d1}') subplots[(d1 + d2 - 1) % num_rows, (d1 + d2 - 1) % num_columns ].set(xlabel=f'PC{d1}', ylabel=f'PC{d2}') def project_data_onto_pcs(data: ndarray, threshold) -> ndarray: if threshold < 0 or threshold > 1: raise ValueError('Threshold must be between 0 and 1') (U, Σ, Vh) = svd(data) V = Vh.T ρ: ndarray = explained_var(Σ) num_pc_to_threshold = (np.cumsum(ρ) < threshold).sum() + 1 data_projected = data @ V[:, :num_pc_to_threshold] # Data projected onto {num_pc_to_threshold} components #plot_explained_variance(ρ, threshold, "PCA_explained_variance.pdf") #print(f"Acceptable threshold: {threshold}\nRequired number of components: {num_pc_to_threshold}") #plot_data_projected_unto_principal_components(data @ V[:, :4], class_labels) return data_projected
0.442877
0.720577
class NBA_Player(): """Instances of NBA_Player represent an NBA Player wingspan: int name: str""" def __init__(self, wingspan: int, name: str) -> None: """Initiate NBA_Player wingspan = passed-in int name = passed-in str returns None""" self.wingspan = wingspan self.name = name class NBA_List(): """Instance of NBA_List represents a list of NBA_Players players: array of NBA_Players""" def __init__(self) -> None: """Initiate NBA_List players = empty array returns None""" self.players = [] def add(self, player: NBA_Player) -> None: """Add NBA_Player to instance of NBA_List returns None""" self.players.append(player) def get(self, wingspan: int) -> NBA_Player: """Get NBA_Player from instance of NBA_List returns NBA_Player""" # If NBA_Player with inputted wingspan exists, return that NBA_Player for i in range(len(self.players)): if self.players[i].wingspan == wingspan: return self.players[i] # If wingpsan is less than NBAPlayer with shortest wingspan, return that NBAPlayer if wingspan < self.players[0].wingspan: return self.players[0] # Else wingspan is greater than NBAPlayer with longest wingspan, return that NBAPlayer else: return self.players[len(self.players) - 1] def get_input(question: str) -> int: """Get input (int) from user returns int""" while True: try: value = int(input(question)) if value < 0: print("Input must be a positive integer, try again: \n") continue break except ValueError: print("Input must be an integer, try again: \n") continue return value def get_payout(inches: int) -> int: """Get payout according to inches (int) returns int""" # Return 2 to the power of inches removed from wingspan, times 10 return (2 ** inches) * 10 # Create instance of NBA_List nba_list = NBA_List() # Fill NBA_List with NBA_Players nba_list.add(NBA_Player(73, "<NAME>")) nba_list.add(NBA_Player(74, "<NAME>")) nba_list.add(NBA_Player(75, "<NAME>")) nba_list.add(NBA_Player(76, "<NAME>")) nba_list.add(NBA_Player(77, "<NAME>")) nba_list.add(NBA_Player(78, "<NAME>")) nba_list.add(NBA_Player(79, "<NAME>")) nba_list.add(NBA_Player(80, "<NAME>")) nba_list.add(NBA_Player(81, "<NAME>")) nba_list.add(NBA_Player(82, "<NAME>")) nba_list.add(NBA_Player(83, "<NAME>")) nba_list.add(NBA_Player(84, "<NAME>")) nba_list.add(NBA_Player(85, "<NAME>")) nba_list.add(NBA_Player(86, "<NAME>")) nba_list.add(NBA_Player(87, "<NAME>")) nba_list.add(NBA_Player(88, "<NAME>")) nba_list.add(NBA_Player(89, "<NAME>")) nba_list.add(NBA_Player(90, "<NAME>")) nba_list.add(NBA_Player(91, "<NAME>")) nba_list.add(NBA_Player(92, "<NAME>")) nba_list.add(NBA_Player(93, "<NAME>")) # Driver current_wingspan = get_input("Enter your current wingspan in inches: \n") current_player = nba_list.get(current_wingspan) print("\nYou are currently closest in wingspan to", current_player.name, "who has a wingspan of", current_player.wingspan, "inches\n") inches = get_input("Enter amount of inches to remove from your wingspan: \n") if (current_wingspan >= inches): new_wingspan = current_wingspan - inches new_player = nba_list.get(new_wingspan) print("\nYour wingspan is now", new_wingspan, "inches, you earned $", get_payout(inches), "and you are now closest in wingspan to", new_player.name, "who has a wingspan of", new_player.wingspan, "inches") else: print("\nYou cannot remove more inches than the length of your current wingspan")
src/script.py
class NBA_Player(): """Instances of NBA_Player represent an NBA Player wingspan: int name: str""" def __init__(self, wingspan: int, name: str) -> None: """Initiate NBA_Player wingspan = passed-in int name = passed-in str returns None""" self.wingspan = wingspan self.name = name class NBA_List(): """Instance of NBA_List represents a list of NBA_Players players: array of NBA_Players""" def __init__(self) -> None: """Initiate NBA_List players = empty array returns None""" self.players = [] def add(self, player: NBA_Player) -> None: """Add NBA_Player to instance of NBA_List returns None""" self.players.append(player) def get(self, wingspan: int) -> NBA_Player: """Get NBA_Player from instance of NBA_List returns NBA_Player""" # If NBA_Player with inputted wingspan exists, return that NBA_Player for i in range(len(self.players)): if self.players[i].wingspan == wingspan: return self.players[i] # If wingpsan is less than NBAPlayer with shortest wingspan, return that NBAPlayer if wingspan < self.players[0].wingspan: return self.players[0] # Else wingspan is greater than NBAPlayer with longest wingspan, return that NBAPlayer else: return self.players[len(self.players) - 1] def get_input(question: str) -> int: """Get input (int) from user returns int""" while True: try: value = int(input(question)) if value < 0: print("Input must be a positive integer, try again: \n") continue break except ValueError: print("Input must be an integer, try again: \n") continue return value def get_payout(inches: int) -> int: """Get payout according to inches (int) returns int""" # Return 2 to the power of inches removed from wingspan, times 10 return (2 ** inches) * 10 # Create instance of NBA_List nba_list = NBA_List() # Fill NBA_List with NBA_Players nba_list.add(NBA_Player(73, "<NAME>")) nba_list.add(NBA_Player(74, "<NAME>")) nba_list.add(NBA_Player(75, "<NAME>")) nba_list.add(NBA_Player(76, "<NAME>")) nba_list.add(NBA_Player(77, "<NAME>")) nba_list.add(NBA_Player(78, "<NAME>")) nba_list.add(NBA_Player(79, "<NAME>")) nba_list.add(NBA_Player(80, "<NAME>")) nba_list.add(NBA_Player(81, "<NAME>")) nba_list.add(NBA_Player(82, "<NAME>")) nba_list.add(NBA_Player(83, "<NAME>")) nba_list.add(NBA_Player(84, "<NAME>")) nba_list.add(NBA_Player(85, "<NAME>")) nba_list.add(NBA_Player(86, "<NAME>")) nba_list.add(NBA_Player(87, "<NAME>")) nba_list.add(NBA_Player(88, "<NAME>")) nba_list.add(NBA_Player(89, "<NAME>")) nba_list.add(NBA_Player(90, "<NAME>")) nba_list.add(NBA_Player(91, "<NAME>")) nba_list.add(NBA_Player(92, "<NAME>")) nba_list.add(NBA_Player(93, "<NAME>")) # Driver current_wingspan = get_input("Enter your current wingspan in inches: \n") current_player = nba_list.get(current_wingspan) print("\nYou are currently closest in wingspan to", current_player.name, "who has a wingspan of", current_player.wingspan, "inches\n") inches = get_input("Enter amount of inches to remove from your wingspan: \n") if (current_wingspan >= inches): new_wingspan = current_wingspan - inches new_player = nba_list.get(new_wingspan) print("\nYour wingspan is now", new_wingspan, "inches, you earned $", get_payout(inches), "and you are now closest in wingspan to", new_player.name, "who has a wingspan of", new_player.wingspan, "inches") else: print("\nYou cannot remove more inches than the length of your current wingspan")
0.778818
0.296629
import argparse class Options: def __init__(self): # Parse options for processing parser = argparse.ArgumentParser(description='CuMix for ZSDG.') parser.add_argument('-root_l', '--root_path', default='/mnt/c61a35bf-fc59-4aab-a996-b254f9ab9052/arfeen', type=str) parser.add_argument('-root_r', '--root_path_remote', default='/home/arfeen/datasets/', type=str) parser.add_argument('-path_cpl', '--checkpoint_path', default='/mnt/c61a35bf-fc59-4aab-a996-b254f9ab9052/arfeen/saved_models/CuMix_only/gpu1/', type=str) parser.add_argument('-path_cpr', '--checkpoint_path_remote', default='/home/arfeen/ZSDG_cumix_gpu1/saved_models/CuMix/', type=str) parser.add_argument('-resume', '--resume_dict', default=None, type=str, help='checkpoint file to resume training from') parser.add_argument('-tr', '--transfer2remote', choices=[0, 1], default=1, type=int, help='use path_cpl/path_cpr for storing models.') parser.add_argument('-data', '--dataset', default='DomainNet', choices=['PACS', 'DomainNet']) # DomainNet specific arguments parser.add_argument('-tv', '--trainvalid', choices=[0, 1], default=0, type=int, help='whether to include val class samples during training.\ 1 if hyperparameter tuning done with val set; 0 if dg_only=1') parser.add_argument('-hd', '--holdout_domain', default='quickdraw', choices=['quickdraw', 'clipart', 'infograph', 'sketch', 'painting']) parser.add_argument('-dg_only', '--dg_only', choices=[0, 1], default=0, type=int, help='ZSDG (0) or just DG(1)') parser.add_argument('-arch', '--backbone', choices=['resnet18', 'resnet50'], default='resnet50', help='Backbone resnet model') # Loss weight & reg. parameters parser.add_argument('-wcce', '--wcce', default=1.0, type=float, help='Weight on Distance based CCE Loss for Sketch and Image Encoders') parser.add_argument('-wimg', '--mixup_w_img', type=float, default=0.001, help='Weight to image mixup CE loss') parser.add_argument('-wfeat', '--mixup_w_feat', type=float, default=2, help='Weight to feature mixup CE loss') parser.add_argument('-wbarlow', '--mixup_w_barlow', type=float, default=0.05, help='Weight to barlow twins loss') parser.add_argument('-wmix_ratio', '--mixup_w_mix_ratio', type=float, default=50, help='Weight to mix ratio prediction mse loss') parser.add_argument('-l2', '--l2_reg', default=5e-5, type=float, help='L2 Weight Decay for optimizer') # Size parameters parser.add_argument('-semsz', '--semantic_emb_size', choices=[200, 300], default=300, type=int, help='Glove vector dimension') parser.add_argument('-imsz', '--image_size', default=224, type=int, help='Input size for query/gallery domain sample') # Model parameters parser.add_argument('-seed', '--seed', type=int, default=0) parser.add_argument('-beta', '--mixup_beta', type=float, default=1, help='mixup interpolation coefficient') # // default : 1 parser.add_argument('-step', '--mixup_step', type=int, default=2, help='Initial warmup steps for domain and class mixing ratios.') #//default: 2 parser.add_argument('-bs', '--batch_size', default=45, type=int) parser.add_argument('-nw', '--num_workers', type=int, default=8, help='Number of workers in data loader') # Optimization parameters parser.add_argument('-e', '--epochs', type=int, default=8, metavar='N', help='Number of epochs to train (default: 100)') parser.add_argument('-lrb', '--lr_net', type=float, default=0.0001, metavar='LR', help='Initial learning rate for backbone') parser.add_argument('-lrc', '--lr_clf', type=float, default=0.001, metavar='LR', help='Initial learning rate for classifier') # I/O parameters parser.add_argument('-log', '--log_interval', type=int, default=400, metavar='N', help='How many batches to wait before logging training status') # Tent parameters parser.add_argument('-tstep', '--tent_step', type=int, default=1, help='Entropy minm steps for Tent') parser.add_argument('-es', '--early_stop', type=int, default=15, help='Early stopping epochs.') # Barlow Twins parameters parser.add_argument('--projector', default='300-300', type=str, metavar='MLP', help='projector MLP') parser.add_argument('-lambd', '--lambd', type=float, default=0.0051, help='redundancy reduction loss weight') self.parser = parser def parse(self): # Parse the arguments return self.parser.parse_args()
src/options/options_cumix.py
import argparse class Options: def __init__(self): # Parse options for processing parser = argparse.ArgumentParser(description='CuMix for ZSDG.') parser.add_argument('-root_l', '--root_path', default='/mnt/c61a35bf-fc59-4aab-a996-b254f9ab9052/arfeen', type=str) parser.add_argument('-root_r', '--root_path_remote', default='/home/arfeen/datasets/', type=str) parser.add_argument('-path_cpl', '--checkpoint_path', default='/mnt/c61a35bf-fc59-4aab-a996-b254f9ab9052/arfeen/saved_models/CuMix_only/gpu1/', type=str) parser.add_argument('-path_cpr', '--checkpoint_path_remote', default='/home/arfeen/ZSDG_cumix_gpu1/saved_models/CuMix/', type=str) parser.add_argument('-resume', '--resume_dict', default=None, type=str, help='checkpoint file to resume training from') parser.add_argument('-tr', '--transfer2remote', choices=[0, 1], default=1, type=int, help='use path_cpl/path_cpr for storing models.') parser.add_argument('-data', '--dataset', default='DomainNet', choices=['PACS', 'DomainNet']) # DomainNet specific arguments parser.add_argument('-tv', '--trainvalid', choices=[0, 1], default=0, type=int, help='whether to include val class samples during training.\ 1 if hyperparameter tuning done with val set; 0 if dg_only=1') parser.add_argument('-hd', '--holdout_domain', default='quickdraw', choices=['quickdraw', 'clipart', 'infograph', 'sketch', 'painting']) parser.add_argument('-dg_only', '--dg_only', choices=[0, 1], default=0, type=int, help='ZSDG (0) or just DG(1)') parser.add_argument('-arch', '--backbone', choices=['resnet18', 'resnet50'], default='resnet50', help='Backbone resnet model') # Loss weight & reg. parameters parser.add_argument('-wcce', '--wcce', default=1.0, type=float, help='Weight on Distance based CCE Loss for Sketch and Image Encoders') parser.add_argument('-wimg', '--mixup_w_img', type=float, default=0.001, help='Weight to image mixup CE loss') parser.add_argument('-wfeat', '--mixup_w_feat', type=float, default=2, help='Weight to feature mixup CE loss') parser.add_argument('-wbarlow', '--mixup_w_barlow', type=float, default=0.05, help='Weight to barlow twins loss') parser.add_argument('-wmix_ratio', '--mixup_w_mix_ratio', type=float, default=50, help='Weight to mix ratio prediction mse loss') parser.add_argument('-l2', '--l2_reg', default=5e-5, type=float, help='L2 Weight Decay for optimizer') # Size parameters parser.add_argument('-semsz', '--semantic_emb_size', choices=[200, 300], default=300, type=int, help='Glove vector dimension') parser.add_argument('-imsz', '--image_size', default=224, type=int, help='Input size for query/gallery domain sample') # Model parameters parser.add_argument('-seed', '--seed', type=int, default=0) parser.add_argument('-beta', '--mixup_beta', type=float, default=1, help='mixup interpolation coefficient') # // default : 1 parser.add_argument('-step', '--mixup_step', type=int, default=2, help='Initial warmup steps for domain and class mixing ratios.') #//default: 2 parser.add_argument('-bs', '--batch_size', default=45, type=int) parser.add_argument('-nw', '--num_workers', type=int, default=8, help='Number of workers in data loader') # Optimization parameters parser.add_argument('-e', '--epochs', type=int, default=8, metavar='N', help='Number of epochs to train (default: 100)') parser.add_argument('-lrb', '--lr_net', type=float, default=0.0001, metavar='LR', help='Initial learning rate for backbone') parser.add_argument('-lrc', '--lr_clf', type=float, default=0.001, metavar='LR', help='Initial learning rate for classifier') # I/O parameters parser.add_argument('-log', '--log_interval', type=int, default=400, metavar='N', help='How many batches to wait before logging training status') # Tent parameters parser.add_argument('-tstep', '--tent_step', type=int, default=1, help='Entropy minm steps for Tent') parser.add_argument('-es', '--early_stop', type=int, default=15, help='Early stopping epochs.') # Barlow Twins parameters parser.add_argument('--projector', default='300-300', type=str, metavar='MLP', help='projector MLP') parser.add_argument('-lambd', '--lambd', type=float, default=0.0051, help='redundancy reduction loss weight') self.parser = parser def parse(self): # Parse the arguments return self.parser.parse_args()
0.615666
0.089654
from __future__ import division import matplotlib.pyplot as plt import numpy as np import netCDF4 as nc import os import glob import fnmatch from collections import namedtuple, OrderedDict import scipy.io as sio from scipy import interpolate, signal from pyproj import Proj,transform import sys sys.path.append('/ocean/ssahu/CANYONS/wcvi/grid/') from grid_alignment import calculate_initial_compass_bearing as cibc from bathy_common import * from matplotlib import path import xarray as xr import pandas as pd import scipy.io as sio import matplotlib.cm as cm import cmocean as cmo import matplotlib.gridspec as gridspec from dateutil.parser import parse from salishsea_tools import geo_tools, viz_tools, tidetools, nc_tools import gsw from scipy.interpolate import interp1d import os path_to_save ='/data/ssahu/NEP36_Extracted_Months/' #'/home/ssahu/saurav/' bathy = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/INV/Bathymetry_EastCoast_NEMO_R036_GEBCO_corr_v14.nc') Z = bathy.variables['Bathymetry'][:] zlevels = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_20140915_00001440_grid_T.nc').variables['deptht'][:32] mask = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/INV/mesh_mask.nc') tmask = mask.variables['tmask'][0,:32,180:350, 480:650] umask = mask.variables['umask'][0,:32,180:350, 480:650] vmask = mask.variables['vmask'][0,:32,180:350, 480:650] mbathy = mask.variables['mbathy'][0,180:350, 480:650] y_wcvi_slice = np.array(np.arange(180,350)) x_wcvi_slice = np.array(np.arange(480,650)) mbathy[mbathy>32] = 32 NEP_2013 = nc.Dataset('/data/ssahu/NEP36_Extracted_Months/NEP36_2013_T_S_Spice_larger_offshore_rho_correct.nc') rho = NEP_2013.variables['density'] def U_timeseries_at_WCVI_locations(grid_U): u_vel = grid_U.variables['uo'][:,:,:,:] vector_u = namedtuple('vector_u', 'u_vel') return vector_u(u_vel) def V_timeseries_at_WCVI_locations(grid_V): v_vel = grid_V.variables['vo'][:,:,:,:] vector_v = namedtuple('vector_v', 'v_vel') return vector_v(v_vel) u_vel = np.empty((180,zlevels.shape[0],1+y_wcvi_slice.shape[0],1+x_wcvi_slice.shape[0])) v_vel = np.empty((180,zlevels.shape[0],1+y_wcvi_slice.shape[0],1+x_wcvi_slice.shape[0])) i = 0 for file in sorted(glob.glob('/data/ssahu/NEP36_2013_summer_hindcast/cut_NEP36-S29_1d*grid_U*.nc')): vector_u = U_timeseries_at_WCVI_locations(nc.Dataset(file)) u_vel[i:i+10,...] = vector_u[0] i = i+10 j = 0 for file in sorted(glob.glob('/data/ssahu/NEP36_2013_summer_hindcast/cut_NEP36-S29_1d*grid_V*.nc')): vector_v = V_timeseries_at_WCVI_locations(nc.Dataset(file)) v_vel[j:j+10,...] = vector_v[0] j = j+10 u_tzyx = np.empty((u_vel.shape[0],zlevels.shape[0],y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) v_tzyx = np.empty_like(u_tzyx) for t in np.arange(u_tzyx.shape[0]): for level in np.arange(zlevels.shape[0]): u_tzyx[t, level,...], v_tzyx[t, level,...] = viz_tools.unstagger(u_vel[t,level,...], v_vel[t, level,...]) u_tzyx[t, level,...] = np.ma.masked_array(u_tzyx[t, level,...], mask= 1- umask[level,:,:,]) v_tzyx[t, level,...] = np.ma.masked_array(v_tzyx[t, level,...], mask= 1- vmask[level,:,:]) znew = np.arange(0,250,0.1) den = np.arange(26,26.5,0.1) tol = 0.01 print("Starting interpolation and data extraction") u_vel_time_iso = np.empty((u_tzyx.shape[0],den.shape[0],y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) v_vel_time_iso = np.empty((v_tzyx.shape[0],den.shape[0],y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) for t in np.arange(u_vel_time_iso.shape[0]): rho_0 = rho[t, :, :, :] - 1000 u_0 = u_tzyx[t, :, :, :] v_0 = v_tzyx[t,:,:,:] u_spec_iso = np.empty((den.shape[0],y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) v_spec_iso = np.empty((den.shape[0],y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) for iso in np.arange(den.shape[0]): u_den = np.empty((y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) v_den = np.empty((y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) for j in np.arange(y_wcvi_slice.shape[0]): u_iso = np.empty(x_wcvi_slice.shape[0]) v_iso = np.empty(x_wcvi_slice.shape[0]) rho_new = np.empty((znew.shape[0],x_wcvi_slice.shape[0])) u_new = np.empty((znew.shape[0],x_wcvi_slice.shape[0])) v_new = np.empty((znew.shape[0],x_wcvi_slice.shape[0])) for i in np.arange(rho_new.shape[1]): f = interp1d(zlevels[:],rho_0[:,j,i],fill_value='extrapolate') g = interp1d(zlevels[:],u_0[:,j,i],fill_value='extrapolate') h = interp1d(zlevels[:],v_0[:,j,i],fill_value='extrapolate') rho_new[:,i] = f(znew[:]) u_new[:,i] = g(znew[:]) v_new[:,i] = h(znew[:]) V = rho_new[:,i] ind = (V>den[iso]-tol)&(V<den[iso]+tol) u_iso[i] = np.nanmean(u_new[ind,i]) v_iso[i] = np.nanmean(v_new[ind,i]) u_den[j,i] = u_iso[i] v_den[j,i] = v_iso[i] u_spec_iso[iso,j,i] = u_den[j,i] v_spec_iso[iso,j,i] = v_den[j,i] u_vel_time_iso[t,iso,j,i] = u_spec_iso[iso,j,i] v_vel_time_iso[t,iso,j,i] = v_spec_iso[iso,j,i] print("Writing the isopycnal data") bdy_file = nc.Dataset(path_to_save + 'short_NEP36_2013_along_isopycnal_larger_offshore_velocities.nc', 'w', zlib=True); bdy_file.createDimension('x', u_vel_time_iso.shape[3]); bdy_file.createDimension('y', u_vel_time_iso.shape[2]); bdy_file.createDimension('isot', u_vel_time_iso.shape[1]); bdy_file.createDimension('time_counter', None); x = bdy_file.createVariable('x', 'int32', ('x',), zlib=True); x.units = 'indices'; x.longname = 'x indices of NEP36'; y = bdy_file.createVariable('y', 'int32', ('y',), zlib=True); y.units = 'indices'; y.longname = 'y indices of NEP36'; isot = bdy_file.createVariable('isot', 'float32', ('isot',), zlib=True); isot.units = 'm'; isot.longname = 'Vertical isopycnal Levels'; time_counter = bdy_file.createVariable('time_counter', 'int32', ('time_counter',), zlib=True); time_counter.units = 's'; time_counter.longname = 'time'; u_velocity = bdy_file.createVariable('u_velocity', 'float32', ('time_counter','isot', 'y', 'x'), zlib=True) v_velocity = bdy_file.createVariable('v_velocity', 'float32', ('time_counter','isot', 'y', 'x'), zlib=True) u_velocity[...] = u_vel_time_iso[...]; v_velocity[...] = u_vel_time_iso[...]; isot[...] = den[:]; x[...] = x_wcvi_slice[:]; y[...] = y_wcvi_slice[:]; bdy_file.close()
along_iso_2013_velocities.py
from __future__ import division import matplotlib.pyplot as plt import numpy as np import netCDF4 as nc import os import glob import fnmatch from collections import namedtuple, OrderedDict import scipy.io as sio from scipy import interpolate, signal from pyproj import Proj,transform import sys sys.path.append('/ocean/ssahu/CANYONS/wcvi/grid/') from grid_alignment import calculate_initial_compass_bearing as cibc from bathy_common import * from matplotlib import path import xarray as xr import pandas as pd import scipy.io as sio import matplotlib.cm as cm import cmocean as cmo import matplotlib.gridspec as gridspec from dateutil.parser import parse from salishsea_tools import geo_tools, viz_tools, tidetools, nc_tools import gsw from scipy.interpolate import interp1d import os path_to_save ='/data/ssahu/NEP36_Extracted_Months/' #'/home/ssahu/saurav/' bathy = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/INV/Bathymetry_EastCoast_NEMO_R036_GEBCO_corr_v14.nc') Z = bathy.variables['Bathymetry'][:] zlevels = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_20140915_00001440_grid_T.nc').variables['deptht'][:32] mask = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/INV/mesh_mask.nc') tmask = mask.variables['tmask'][0,:32,180:350, 480:650] umask = mask.variables['umask'][0,:32,180:350, 480:650] vmask = mask.variables['vmask'][0,:32,180:350, 480:650] mbathy = mask.variables['mbathy'][0,180:350, 480:650] y_wcvi_slice = np.array(np.arange(180,350)) x_wcvi_slice = np.array(np.arange(480,650)) mbathy[mbathy>32] = 32 NEP_2013 = nc.Dataset('/data/ssahu/NEP36_Extracted_Months/NEP36_2013_T_S_Spice_larger_offshore_rho_correct.nc') rho = NEP_2013.variables['density'] def U_timeseries_at_WCVI_locations(grid_U): u_vel = grid_U.variables['uo'][:,:,:,:] vector_u = namedtuple('vector_u', 'u_vel') return vector_u(u_vel) def V_timeseries_at_WCVI_locations(grid_V): v_vel = grid_V.variables['vo'][:,:,:,:] vector_v = namedtuple('vector_v', 'v_vel') return vector_v(v_vel) u_vel = np.empty((180,zlevels.shape[0],1+y_wcvi_slice.shape[0],1+x_wcvi_slice.shape[0])) v_vel = np.empty((180,zlevels.shape[0],1+y_wcvi_slice.shape[0],1+x_wcvi_slice.shape[0])) i = 0 for file in sorted(glob.glob('/data/ssahu/NEP36_2013_summer_hindcast/cut_NEP36-S29_1d*grid_U*.nc')): vector_u = U_timeseries_at_WCVI_locations(nc.Dataset(file)) u_vel[i:i+10,...] = vector_u[0] i = i+10 j = 0 for file in sorted(glob.glob('/data/ssahu/NEP36_2013_summer_hindcast/cut_NEP36-S29_1d*grid_V*.nc')): vector_v = V_timeseries_at_WCVI_locations(nc.Dataset(file)) v_vel[j:j+10,...] = vector_v[0] j = j+10 u_tzyx = np.empty((u_vel.shape[0],zlevels.shape[0],y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) v_tzyx = np.empty_like(u_tzyx) for t in np.arange(u_tzyx.shape[0]): for level in np.arange(zlevels.shape[0]): u_tzyx[t, level,...], v_tzyx[t, level,...] = viz_tools.unstagger(u_vel[t,level,...], v_vel[t, level,...]) u_tzyx[t, level,...] = np.ma.masked_array(u_tzyx[t, level,...], mask= 1- umask[level,:,:,]) v_tzyx[t, level,...] = np.ma.masked_array(v_tzyx[t, level,...], mask= 1- vmask[level,:,:]) znew = np.arange(0,250,0.1) den = np.arange(26,26.5,0.1) tol = 0.01 print("Starting interpolation and data extraction") u_vel_time_iso = np.empty((u_tzyx.shape[0],den.shape[0],y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) v_vel_time_iso = np.empty((v_tzyx.shape[0],den.shape[0],y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) for t in np.arange(u_vel_time_iso.shape[0]): rho_0 = rho[t, :, :, :] - 1000 u_0 = u_tzyx[t, :, :, :] v_0 = v_tzyx[t,:,:,:] u_spec_iso = np.empty((den.shape[0],y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) v_spec_iso = np.empty((den.shape[0],y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) for iso in np.arange(den.shape[0]): u_den = np.empty((y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) v_den = np.empty((y_wcvi_slice.shape[0],x_wcvi_slice.shape[0])) for j in np.arange(y_wcvi_slice.shape[0]): u_iso = np.empty(x_wcvi_slice.shape[0]) v_iso = np.empty(x_wcvi_slice.shape[0]) rho_new = np.empty((znew.shape[0],x_wcvi_slice.shape[0])) u_new = np.empty((znew.shape[0],x_wcvi_slice.shape[0])) v_new = np.empty((znew.shape[0],x_wcvi_slice.shape[0])) for i in np.arange(rho_new.shape[1]): f = interp1d(zlevels[:],rho_0[:,j,i],fill_value='extrapolate') g = interp1d(zlevels[:],u_0[:,j,i],fill_value='extrapolate') h = interp1d(zlevels[:],v_0[:,j,i],fill_value='extrapolate') rho_new[:,i] = f(znew[:]) u_new[:,i] = g(znew[:]) v_new[:,i] = h(znew[:]) V = rho_new[:,i] ind = (V>den[iso]-tol)&(V<den[iso]+tol) u_iso[i] = np.nanmean(u_new[ind,i]) v_iso[i] = np.nanmean(v_new[ind,i]) u_den[j,i] = u_iso[i] v_den[j,i] = v_iso[i] u_spec_iso[iso,j,i] = u_den[j,i] v_spec_iso[iso,j,i] = v_den[j,i] u_vel_time_iso[t,iso,j,i] = u_spec_iso[iso,j,i] v_vel_time_iso[t,iso,j,i] = v_spec_iso[iso,j,i] print("Writing the isopycnal data") bdy_file = nc.Dataset(path_to_save + 'short_NEP36_2013_along_isopycnal_larger_offshore_velocities.nc', 'w', zlib=True); bdy_file.createDimension('x', u_vel_time_iso.shape[3]); bdy_file.createDimension('y', u_vel_time_iso.shape[2]); bdy_file.createDimension('isot', u_vel_time_iso.shape[1]); bdy_file.createDimension('time_counter', None); x = bdy_file.createVariable('x', 'int32', ('x',), zlib=True); x.units = 'indices'; x.longname = 'x indices of NEP36'; y = bdy_file.createVariable('y', 'int32', ('y',), zlib=True); y.units = 'indices'; y.longname = 'y indices of NEP36'; isot = bdy_file.createVariable('isot', 'float32', ('isot',), zlib=True); isot.units = 'm'; isot.longname = 'Vertical isopycnal Levels'; time_counter = bdy_file.createVariable('time_counter', 'int32', ('time_counter',), zlib=True); time_counter.units = 's'; time_counter.longname = 'time'; u_velocity = bdy_file.createVariable('u_velocity', 'float32', ('time_counter','isot', 'y', 'x'), zlib=True) v_velocity = bdy_file.createVariable('v_velocity', 'float32', ('time_counter','isot', 'y', 'x'), zlib=True) u_velocity[...] = u_vel_time_iso[...]; v_velocity[...] = u_vel_time_iso[...]; isot[...] = den[:]; x[...] = x_wcvi_slice[:]; y[...] = y_wcvi_slice[:]; bdy_file.close()
0.224055
0.312331
def first(it): for i in it: return i return None class AggregateMethod(object): '''convenience parent class for various aggregation methods which operate on a single key provides a default implementation of __call__ which checks if the field is in the parentKey or not and dispatches to aggregateParentKey or aggregateRows accordingly ''' def __init__(self, field): self.field = field def __call__(self, parentKey, rows): if self.field in parentKey: return self.aggregateParentKey(parentKey[self.field], len(rows)) return self.aggregateRows(rows) def aggregateParentKey(self, parentKey, rowCount): return parentKey def aggregateRows(self, rows): return None class First(AggregateMethod): def aggregateRows(self, rows): return first(row[self.field] for row in rows) class FirstNonBlank(AggregateMethod): def aggregateRows(self, rows): return first(row[self.field] for row in rows if row[self.field]) class Sum(AggregateMethod): def aggregateParentKey(self, parentKey, rowCount): return rowCount * parentKey def aggregateRows(self, rows): return sum(row[self.field] for row in rows) class Count: def __init__(self): pass def __call__(self, parentKey, rows): return len(rows) class CountDistinct(AggregateMethod): '''Count the number of distinct values in a given field''' def aggregateParentKey(self, parentKey, rowCount): return 1 def aggregateRows(self, rows): return len(set(row[self.field] for row in rows)) class DistinctValues(AggregateMethod): '''return a sorted list of distinct values for a given field''' def aggregateParentKey(self, parentKey, rowCount): return [parentKey] def aggregateRows(self, rows): return sorted({row[self.field] for row in rows}) class AllValues(AggregateMethod): '''return a list (in current order) of values for a given field''' def aggregateParentKey(self, parentKey, rowCount): return [parentKey] * rowCount def aggregateRows(self, rows): return [row[self.field] for row in rows] class ConcatDistinct: '''String-concatenate the distinct set of values using the given string to join the values''' def __init__(self, field, joinStr=','): self.joinStr = joinStr self.field = field def aggregateParentKey(self, parentKey, rowCount): return str(parentKey) def aggregateRows(self, rows): return self.joinStr.join(set(str(row[self.field]) for row in rows)) class Concat: '''String-concatenate all of the values using the given string to join the values''' def __init__(self, field, joinStr=','): self.joinStr = joinStr self.field = field def aggregateParentKey(self, parentKey, rowCount): return self.joinStr.join([str(parentKey)] * rowCount) def aggregateRows(self, rows): return self.joinStr.join(str(row[self.field]) for row in rows) class Value(AggregateMethod): '''returns the given value''' def __call__(self, parentKey, rows): return self.field class Average(AggregateMethod): '''returns the average value for a given field''' def aggregateRows(self, rows): return sum(row[self.field] for row in rows) / len(rows) class WeightedAverage: '''returns the average value for a given field, weighted by another column''' def __init__(self, averageField, weightField): self.averageField = averageField self.weightField = weightField def __call__(self, parentKey, rows): if self.averageField in parentKey: # weighted average of x = x return parentKey[self.averageField] if self.weightField in parentKey: # straight average return sum(row[self.averageField] for row in rows) / len(rows) totalWeight = sum(row[self.weightField] for row in rows) weightedAverage = sum(row[self.averageField] * row[self.weightField] for row in rows) return weightedAverage / totalWeight class Min(AggregateMethod): def aggregateRows(self, rows): return min(row[self.field] for row in rows) class Max(AggregateMethod): def aggregateRows(self, rows): return max(row[self.field] for row in rows) class Span(AggregateMethod): '''return the difference between the greatest and the least''' def aggregateParentKey(self, parentKey, rowCount): return 0 def aggregateRows(self, rows): return max(row[self.field] for row in rows) - min(row[self.field] for row in rows)
hierarchy_aggregate.py
def first(it): for i in it: return i return None class AggregateMethod(object): '''convenience parent class for various aggregation methods which operate on a single key provides a default implementation of __call__ which checks if the field is in the parentKey or not and dispatches to aggregateParentKey or aggregateRows accordingly ''' def __init__(self, field): self.field = field def __call__(self, parentKey, rows): if self.field in parentKey: return self.aggregateParentKey(parentKey[self.field], len(rows)) return self.aggregateRows(rows) def aggregateParentKey(self, parentKey, rowCount): return parentKey def aggregateRows(self, rows): return None class First(AggregateMethod): def aggregateRows(self, rows): return first(row[self.field] for row in rows) class FirstNonBlank(AggregateMethod): def aggregateRows(self, rows): return first(row[self.field] for row in rows if row[self.field]) class Sum(AggregateMethod): def aggregateParentKey(self, parentKey, rowCount): return rowCount * parentKey def aggregateRows(self, rows): return sum(row[self.field] for row in rows) class Count: def __init__(self): pass def __call__(self, parentKey, rows): return len(rows) class CountDistinct(AggregateMethod): '''Count the number of distinct values in a given field''' def aggregateParentKey(self, parentKey, rowCount): return 1 def aggregateRows(self, rows): return len(set(row[self.field] for row in rows)) class DistinctValues(AggregateMethod): '''return a sorted list of distinct values for a given field''' def aggregateParentKey(self, parentKey, rowCount): return [parentKey] def aggregateRows(self, rows): return sorted({row[self.field] for row in rows}) class AllValues(AggregateMethod): '''return a list (in current order) of values for a given field''' def aggregateParentKey(self, parentKey, rowCount): return [parentKey] * rowCount def aggregateRows(self, rows): return [row[self.field] for row in rows] class ConcatDistinct: '''String-concatenate the distinct set of values using the given string to join the values''' def __init__(self, field, joinStr=','): self.joinStr = joinStr self.field = field def aggregateParentKey(self, parentKey, rowCount): return str(parentKey) def aggregateRows(self, rows): return self.joinStr.join(set(str(row[self.field]) for row in rows)) class Concat: '''String-concatenate all of the values using the given string to join the values''' def __init__(self, field, joinStr=','): self.joinStr = joinStr self.field = field def aggregateParentKey(self, parentKey, rowCount): return self.joinStr.join([str(parentKey)] * rowCount) def aggregateRows(self, rows): return self.joinStr.join(str(row[self.field]) for row in rows) class Value(AggregateMethod): '''returns the given value''' def __call__(self, parentKey, rows): return self.field class Average(AggregateMethod): '''returns the average value for a given field''' def aggregateRows(self, rows): return sum(row[self.field] for row in rows) / len(rows) class WeightedAverage: '''returns the average value for a given field, weighted by another column''' def __init__(self, averageField, weightField): self.averageField = averageField self.weightField = weightField def __call__(self, parentKey, rows): if self.averageField in parentKey: # weighted average of x = x return parentKey[self.averageField] if self.weightField in parentKey: # straight average return sum(row[self.averageField] for row in rows) / len(rows) totalWeight = sum(row[self.weightField] for row in rows) weightedAverage = sum(row[self.averageField] * row[self.weightField] for row in rows) return weightedAverage / totalWeight class Min(AggregateMethod): def aggregateRows(self, rows): return min(row[self.field] for row in rows) class Max(AggregateMethod): def aggregateRows(self, rows): return max(row[self.field] for row in rows) class Span(AggregateMethod): '''return the difference between the greatest and the least''' def aggregateParentKey(self, parentKey, rowCount): return 0 def aggregateRows(self, rows): return max(row[self.field] for row in rows) - min(row[self.field] for row in rows)
0.622345
0.591546
import git_api from flask import Flask, render_template, request, session, url_for, redirect, send_from_directory import json import os import shutil from flask_session import Session from dotenv import load_dotenv load_dotenv() app = Flask(__name__, static_url_path='', static_folder='static', template_folder='templates') app.config["SECRET_KEY"] = os.environ["key"] app.config["SESSION_PERMANENT"] = False try: app.config["REPL_USER"] = os.environ["REPL_OWNER"] except KeyError: app.config["REPL_USER"] = "DillonB07" app.config["SESSION_TYPE"] = "filesystem" app.config["static"] = 'static/' git_api.Token(os.environ["token"]) Session(app) # The sessions are created here @app.route('/') def index(): # Reset flask_session folder shutil.rmtree('flask_session') os.mkdir("flask_session") return render_template("index.html") @app.route('/nouser') def nouser(): return render_template("nouser.html") @app.route('/search') def search(): return render_template("search.html", usernick=session.get("usernick"), username=session.get("username"), avatar=session.get("avatar"), userurl=session.get("userurl"), bio=session.get("bio")) @app.route('/searchvalue', methods=["POST", "GET"]) def searchvalue(): if request.method == "POST": session["usernick"] = "No User Exists!" session["username"] = "NoUserExists" session["avatar"] = 'nothing.jpg' session["userurl"] = "https://github.com/404" session["bio"] = "This user does not have a bio" session["avatarYN"] = "True" data2 = request.form["data"] """url = "https://github.com/"+data2 userexist = requests.get(url) status = userexist.status_code if status == 200: pass else: return redirect(url_for("nouser"))""" url = f"https://github.com/{data2}" userurl = url userdata2 = git_api.User(data2).User() data = json.loads(userdata2) name = data["data"]["user"]["name"] bio = data["data"]["user"]["bio"] username = data2 image_url = data["data"]["user"]["avatarLink"] """filename = image_url.split("/")[-1].split('?')[0] res = requests.get(image_url, stream=True) if res.status_code == 200: res.raw.decode_content = True with open(f"static/avatars/{filename}", 'wb') as f: shutil.copyfileobj(res.raw, f) # avatar = f'{app.config["static"]}avatars/{filename}' avatar = f'avatars/{filename}'""" """if session.get("avatarYN") == "True": if avatar != None: session["avatarYN"] = "False" return send_from_directory( app.config['static'], filename, as_attachment=True )""" avatar = image_url session["usernick"] = name if session.get("usernick") is None: session["usernick"] = "No Nickname!" session["username"] = username session["avatar"] = avatar session["userurl"] = userurl session["bio"] = bio if session.get("bio") is None: session["bio"] = "This user does not have a bio" session["avatarYN"] = "False" return redirect(url_for('search')) '''@app.route('/delete_session') def delete_session(): try: session.pop("avatar") session.pop("usernick") session.pop("username") except: return redirect(url_for('index')) return render_template("index.html")''' @app.route('/avatar') def avatar(): return send_from_directory('static/', session["avatar"], as_attachment=True) @app.errorhandler(404) def page_not_found(e): return render_template('404.html') @app.errorhandler(500) def page_not_found2(e): return render_template("500.html") if __name__ == "__main__": app.run(host="0.0.0.0", port=3000)
main.py
import git_api from flask import Flask, render_template, request, session, url_for, redirect, send_from_directory import json import os import shutil from flask_session import Session from dotenv import load_dotenv load_dotenv() app = Flask(__name__, static_url_path='', static_folder='static', template_folder='templates') app.config["SECRET_KEY"] = os.environ["key"] app.config["SESSION_PERMANENT"] = False try: app.config["REPL_USER"] = os.environ["REPL_OWNER"] except KeyError: app.config["REPL_USER"] = "DillonB07" app.config["SESSION_TYPE"] = "filesystem" app.config["static"] = 'static/' git_api.Token(os.environ["token"]) Session(app) # The sessions are created here @app.route('/') def index(): # Reset flask_session folder shutil.rmtree('flask_session') os.mkdir("flask_session") return render_template("index.html") @app.route('/nouser') def nouser(): return render_template("nouser.html") @app.route('/search') def search(): return render_template("search.html", usernick=session.get("usernick"), username=session.get("username"), avatar=session.get("avatar"), userurl=session.get("userurl"), bio=session.get("bio")) @app.route('/searchvalue', methods=["POST", "GET"]) def searchvalue(): if request.method == "POST": session["usernick"] = "No User Exists!" session["username"] = "NoUserExists" session["avatar"] = 'nothing.jpg' session["userurl"] = "https://github.com/404" session["bio"] = "This user does not have a bio" session["avatarYN"] = "True" data2 = request.form["data"] """url = "https://github.com/"+data2 userexist = requests.get(url) status = userexist.status_code if status == 200: pass else: return redirect(url_for("nouser"))""" url = f"https://github.com/{data2}" userurl = url userdata2 = git_api.User(data2).User() data = json.loads(userdata2) name = data["data"]["user"]["name"] bio = data["data"]["user"]["bio"] username = data2 image_url = data["data"]["user"]["avatarLink"] """filename = image_url.split("/")[-1].split('?')[0] res = requests.get(image_url, stream=True) if res.status_code == 200: res.raw.decode_content = True with open(f"static/avatars/{filename}", 'wb') as f: shutil.copyfileobj(res.raw, f) # avatar = f'{app.config["static"]}avatars/{filename}' avatar = f'avatars/{filename}'""" """if session.get("avatarYN") == "True": if avatar != None: session["avatarYN"] = "False" return send_from_directory( app.config['static'], filename, as_attachment=True )""" avatar = image_url session["usernick"] = name if session.get("usernick") is None: session["usernick"] = "No Nickname!" session["username"] = username session["avatar"] = avatar session["userurl"] = userurl session["bio"] = bio if session.get("bio") is None: session["bio"] = "This user does not have a bio" session["avatarYN"] = "False" return redirect(url_for('search')) '''@app.route('/delete_session') def delete_session(): try: session.pop("avatar") session.pop("usernick") session.pop("username") except: return redirect(url_for('index')) return render_template("index.html")''' @app.route('/avatar') def avatar(): return send_from_directory('static/', session["avatar"], as_attachment=True) @app.errorhandler(404) def page_not_found(e): return render_template('404.html') @app.errorhandler(500) def page_not_found2(e): return render_template("500.html") if __name__ == "__main__": app.run(host="0.0.0.0", port=3000)
0.242116
0.054828
import json from flask import Flask, request, redirect, g, render_template, session import requests import base64 import urllib import spotipy import datetime from random import randint # Authentication Steps, paramaters, and responses are defined at https://developer.spotify.com/web-api/authorization-guide/ # Visit this url to see all the steps, parameters, and expected response. app = Flask(__name__) app.secret_key = '<KEY>' #used to run sessions # Client Keys CLIENT_ID = "[YOUR-CLIENT-ID]" CLIENT_SECRET = "[YOUR-CLIENT-SECRET]" # Spotify URLS SPOTIFY_AUTH_URL = "https://accounts.spotify.com/authorize" SPOTIFY_TOKEN_URL = "https://accounts.spotify.com/api/token" SPOTIFY_API_BASE_URL = "https://api.spotify.com" API_VERSION = "v1" SPOTIFY_API_URL = "{}/{}".format(SPOTIFY_API_BASE_URL, API_VERSION) # Server-side Parameters CLIENT_SIDE_URL = "http://127.0.0.1" PORT = 8080 REDIRECT_URI = "{}:{}/callback/q".format(CLIENT_SIDE_URL, PORT) SCOPE = "playlist-modify-public" STATE = "" SHOW_DIALOG_bool = True SHOW_DIALOG_str = str(SHOW_DIALOG_bool).lower() auth_query_parameters = { "response_type": "code", "redirect_uri": REDIRECT_URI, "scope": SCOPE, # "state": STATE, # "show_dialog": SHOW_DIALOG_str, "client_id": CLIENT_ID } @app.route("/") def initPg(): return render_template("index.html") @app.route("/n") def index(): session['yearOfBirth'] = request.args.get("year") # Auth Step 1: Authorization url_args = "&".join(["{}={}".format(key,urllib.quote(val)) for key,val in auth_query_parameters.items()]) auth_url = "{}/?{}".format(SPOTIFY_AUTH_URL, url_args) return redirect(auth_url) @app.route("/callback/q") def callback(): # Auth Step 4: Requests refresh and access tokens auth_token = request.args['code'] code_payload = { "grant_type": "authorization_code", "code": str(auth_token), "redirect_uri": REDIRECT_URI } base64encoded = base64.b64encode("{}:{}".format(CLIENT_ID, CLIENT_SECRET)) headers = {"Authorization": "Basic {}".format(base64encoded)} post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload, headers=headers) # Auth Step 5: Tokens are Returned to Application response_data = json.loads(post_request.text) access_token = response_data["access_token"] refresh_token = response_data["refresh_token"] token_type = response_data["token_type"] expires_in = response_data["expires_in"] # Auth Step 6: Use the access token to access Spotify API authorization_header = {"Authorization":"Bearer {}".format(access_token)} currentYear = datetime.datetime.now().year years = [] YOB = session['yearOfBirth'] year = int(YOB) tracksToAdd = [] tracksToShow = [] while (year <= currentYear): years.append(year) year += 1 sp = spotipy.Spotify(auth=access_token) username = str(sp.current_user()['id']) returnString = '' for i in years: index = randint(0,2) results = sp.search(q='year:' + str(i), type='track', limit = 3) track = results['tracks']['items'][index]['name'] artist = results['tracks']['items'][index] uri = results['tracks']['items'][index] #returnString += str(results) + ' || ' returnString = 'year: ' + str(i) + ' ' + track + ' BY ' + artist['artists'][0]['name'] + ' URI = ' + uri['uri'] tracksToAdd.append(uri['uri']) tracksToShow.append(returnString) #create playist and get its ID sp.user_playlist_create(username, str(YOB) + ' to ' + str(currentYear), public=True) playlistId = str(sp.user_playlists(username)['items'][0]['uri']) #add each track to a playlist sp.user_playlist_add_tracks(username, playlistId, tracksToAdd) return render_template("display.html",sorted_array=tracksToShow) if __name__ == "__main__": app.run(debug=True,port=PORT)
main.py
import json from flask import Flask, request, redirect, g, render_template, session import requests import base64 import urllib import spotipy import datetime from random import randint # Authentication Steps, paramaters, and responses are defined at https://developer.spotify.com/web-api/authorization-guide/ # Visit this url to see all the steps, parameters, and expected response. app = Flask(__name__) app.secret_key = '<KEY>' #used to run sessions # Client Keys CLIENT_ID = "[YOUR-CLIENT-ID]" CLIENT_SECRET = "[YOUR-CLIENT-SECRET]" # Spotify URLS SPOTIFY_AUTH_URL = "https://accounts.spotify.com/authorize" SPOTIFY_TOKEN_URL = "https://accounts.spotify.com/api/token" SPOTIFY_API_BASE_URL = "https://api.spotify.com" API_VERSION = "v1" SPOTIFY_API_URL = "{}/{}".format(SPOTIFY_API_BASE_URL, API_VERSION) # Server-side Parameters CLIENT_SIDE_URL = "http://127.0.0.1" PORT = 8080 REDIRECT_URI = "{}:{}/callback/q".format(CLIENT_SIDE_URL, PORT) SCOPE = "playlist-modify-public" STATE = "" SHOW_DIALOG_bool = True SHOW_DIALOG_str = str(SHOW_DIALOG_bool).lower() auth_query_parameters = { "response_type": "code", "redirect_uri": REDIRECT_URI, "scope": SCOPE, # "state": STATE, # "show_dialog": SHOW_DIALOG_str, "client_id": CLIENT_ID } @app.route("/") def initPg(): return render_template("index.html") @app.route("/n") def index(): session['yearOfBirth'] = request.args.get("year") # Auth Step 1: Authorization url_args = "&".join(["{}={}".format(key,urllib.quote(val)) for key,val in auth_query_parameters.items()]) auth_url = "{}/?{}".format(SPOTIFY_AUTH_URL, url_args) return redirect(auth_url) @app.route("/callback/q") def callback(): # Auth Step 4: Requests refresh and access tokens auth_token = request.args['code'] code_payload = { "grant_type": "authorization_code", "code": str(auth_token), "redirect_uri": REDIRECT_URI } base64encoded = base64.b64encode("{}:{}".format(CLIENT_ID, CLIENT_SECRET)) headers = {"Authorization": "Basic {}".format(base64encoded)} post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload, headers=headers) # Auth Step 5: Tokens are Returned to Application response_data = json.loads(post_request.text) access_token = response_data["access_token"] refresh_token = response_data["refresh_token"] token_type = response_data["token_type"] expires_in = response_data["expires_in"] # Auth Step 6: Use the access token to access Spotify API authorization_header = {"Authorization":"Bearer {}".format(access_token)} currentYear = datetime.datetime.now().year years = [] YOB = session['yearOfBirth'] year = int(YOB) tracksToAdd = [] tracksToShow = [] while (year <= currentYear): years.append(year) year += 1 sp = spotipy.Spotify(auth=access_token) username = str(sp.current_user()['id']) returnString = '' for i in years: index = randint(0,2) results = sp.search(q='year:' + str(i), type='track', limit = 3) track = results['tracks']['items'][index]['name'] artist = results['tracks']['items'][index] uri = results['tracks']['items'][index] #returnString += str(results) + ' || ' returnString = 'year: ' + str(i) + ' ' + track + ' BY ' + artist['artists'][0]['name'] + ' URI = ' + uri['uri'] tracksToAdd.append(uri['uri']) tracksToShow.append(returnString) #create playist and get its ID sp.user_playlist_create(username, str(YOB) + ' to ' + str(currentYear), public=True) playlistId = str(sp.user_playlists(username)['items'][0]['uri']) #add each track to a playlist sp.user_playlist_add_tracks(username, playlistId, tracksToAdd) return render_template("display.html",sorted_array=tracksToShow) if __name__ == "__main__": app.run(debug=True,port=PORT)
0.42919
0.115486
import numpy as np from oap.__conf__ import MARKER, SLICE_SIZE def print_array(array, frame=False, scales=True, slice_size=SLICE_SIZE): """ Prints an optical-array as string, list or numpy-array (1d or 2d | dtype=int) to the console output. :param array: optical-array (particle image) :type array: string, list or numpy-array (1d or 2d | dtype=int) ::param frame: show image frame :type frame: boolean :param scales: show pixel scales :type scales: boolean :param slice_size: width of the optical-array (number of diodes) :type slice_size: integer """ if type(array).__module__ == np.__name__: if array.ndim >= 2: slice_size = len(array[0]) array = np.ravel(array) if frame: print("+ ", end='') for _ in range(slice_size): print("- ", end='') print("+ ") for y in range(int(len(array)/slice_size)): if frame: print("| ", end='') for x in range(slice_size): if array[y*slice_size+x] == 0 or array[y*slice_size+x] == '0': print(" ", end='') else: if scales: print(str(int(array[y*slice_size+x])) + ' ', end='') else: if array[y*slice_size+x] == MARKER['poisson']: print("+ ", end='') else: print("\u2588 ", end='') if frame: print("| ", end='') print() if frame: print("+ ", end='') for _ in range(slice_size): print("- ", end='') print("+ ") def print_separator(separator='-', slice_size=SLICE_SIZE): """ Prints a simple separator for particle previews to the console output. --- optional params --- :param separator: symbol of the separator :type separator: char :param slice_size: width of the optical-array (number of diodes) :type slice_size: integer """ for _ in range(slice_size): print(separator + ' ', end='') print()
oap/utils/console.py
import numpy as np from oap.__conf__ import MARKER, SLICE_SIZE def print_array(array, frame=False, scales=True, slice_size=SLICE_SIZE): """ Prints an optical-array as string, list or numpy-array (1d or 2d | dtype=int) to the console output. :param array: optical-array (particle image) :type array: string, list or numpy-array (1d or 2d | dtype=int) ::param frame: show image frame :type frame: boolean :param scales: show pixel scales :type scales: boolean :param slice_size: width of the optical-array (number of diodes) :type slice_size: integer """ if type(array).__module__ == np.__name__: if array.ndim >= 2: slice_size = len(array[0]) array = np.ravel(array) if frame: print("+ ", end='') for _ in range(slice_size): print("- ", end='') print("+ ") for y in range(int(len(array)/slice_size)): if frame: print("| ", end='') for x in range(slice_size): if array[y*slice_size+x] == 0 or array[y*slice_size+x] == '0': print(" ", end='') else: if scales: print(str(int(array[y*slice_size+x])) + ' ', end='') else: if array[y*slice_size+x] == MARKER['poisson']: print("+ ", end='') else: print("\u2588 ", end='') if frame: print("| ", end='') print() if frame: print("+ ", end='') for _ in range(slice_size): print("- ", end='') print("+ ") def print_separator(separator='-', slice_size=SLICE_SIZE): """ Prints a simple separator for particle previews to the console output. --- optional params --- :param separator: symbol of the separator :type separator: char :param slice_size: width of the optical-array (number of diodes) :type slice_size: integer """ for _ in range(slice_size): print(separator + ' ', end='') print()
0.640523
0.587115
from __future__ import print_function from builtins import str # pylint: disable=redefined-builtin import datetime import pytz from django.conf import settings from django.core import management from django.core.management.base import BaseCommand from ...decorators import handle_lock class Command(BaseCommand): help = 'Executes sequential incremental backups.' def add_arguments(self, parser): parser.add_argument('--start-date', type=str, dest='start_date', required=True, help='Start of date range for incremental backup') parser.add_argument('--end-date', type=str, dest='end_date', required=True, help='End of date range for incremental backup') parser.add_argument('--window-days', dest='window_days', default=7, type=int, help='Number of days for each backup job') parser.add_argument('--filter-sensitive-data', dest='filter_sensitive', action='store_true', help='Filter sensitive data from the backup data points written') @handle_lock def handle(self, *args, **options): here_tz = pytz.timezone(settings.TIME_ZONE) components = options['start_date'].split('-') start_date = datetime.datetime(int(components[0]), int(components[1]), int(components[2]), 0, 0, 0, 0, here_tz).date() components = options['end_date'].split('-') end_date = datetime.datetime(int(components[0]), int(components[1]), int(components[2]), 0, 0, 0, 0, here_tz).date() while start_date <= end_date: local_end_date = start_date + datetime.timedelta(days=(options['window_days'] - 1)) local_end_date = min(local_end_date, end_date) arguments = [ '--start-date', start_date.isoformat(), '--end-date', local_end_date.isoformat() ] if options['filter_sensitive'] is not None and options['filter_sensitive'] is not False: arguments.append('--filter-sensitive-data') print(str(arguments)) management.call_command('incremental_backup', *arguments) start_date = local_end_date + datetime.timedelta(days=1)
management/commands/incremental_backup_batch.py
from __future__ import print_function from builtins import str # pylint: disable=redefined-builtin import datetime import pytz from django.conf import settings from django.core import management from django.core.management.base import BaseCommand from ...decorators import handle_lock class Command(BaseCommand): help = 'Executes sequential incremental backups.' def add_arguments(self, parser): parser.add_argument('--start-date', type=str, dest='start_date', required=True, help='Start of date range for incremental backup') parser.add_argument('--end-date', type=str, dest='end_date', required=True, help='End of date range for incremental backup') parser.add_argument('--window-days', dest='window_days', default=7, type=int, help='Number of days for each backup job') parser.add_argument('--filter-sensitive-data', dest='filter_sensitive', action='store_true', help='Filter sensitive data from the backup data points written') @handle_lock def handle(self, *args, **options): here_tz = pytz.timezone(settings.TIME_ZONE) components = options['start_date'].split('-') start_date = datetime.datetime(int(components[0]), int(components[1]), int(components[2]), 0, 0, 0, 0, here_tz).date() components = options['end_date'].split('-') end_date = datetime.datetime(int(components[0]), int(components[1]), int(components[2]), 0, 0, 0, 0, here_tz).date() while start_date <= end_date: local_end_date = start_date + datetime.timedelta(days=(options['window_days'] - 1)) local_end_date = min(local_end_date, end_date) arguments = [ '--start-date', start_date.isoformat(), '--end-date', local_end_date.isoformat() ] if options['filter_sensitive'] is not None and options['filter_sensitive'] is not False: arguments.append('--filter-sensitive-data') print(str(arguments)) management.call_command('incremental_backup', *arguments) start_date = local_end_date + datetime.timedelta(days=1)
0.455441
0.089773
import unittest from pathlib import Path from typing import List import numpy as np from PIL import Image, ImageFont from resources.fonts import DemoFontPaths from test.utils import demo_training_parameters, demo_picture_parameters from text_depixelizer.HMM.depix_hmm import DepixHMM from text_depixelizer.parameters import PictureParameters, TrainingParameters from text_depixelizer.training_pipeline.windows import Window class TestDepixHmm(unittest.TestCase): demo_picture_parameters: PictureParameters = PictureParameters( block_size=6, pattern=r'\d{8,12}', font=ImageFont.truetype(str(DemoFontPaths.arial), 50) ) def test_train(self): # Arrange training_parameters: TrainingParameters = demo_training_parameters depix_hmm: DepixHMM = DepixHMM(self.demo_picture_parameters, demo_training_parameters) # Act depix_hmm.train() # Assert self.assertEqual(depix_hmm.emission_probabilities.shape[1], training_parameters.n_clusters) self.assertTrue(len(depix_hmm.states) > 5) self.assertEqual(depix_hmm.emission_probabilities.shape, depix_hmm.log_emission_probabilities.shape) def test_evaluate(self): # Arrange depix_hmm: DepixHMM = DepixHMM(self.demo_picture_parameters, demo_training_parameters) depix_hmm.train() # Act accuracy, average_distance = depix_hmm.evaluate() # Assert self.assertGreaterEqual(accuracy, 0) self.assertLessEqual(accuracy, 1) self.assertIsInstance(accuracy, float) self.assertIsInstance(average_distance, float) def test_get_starting_probabilities(self): # Arrange windows: List[Window] = [ Window(characters=('A', 'b'), values=np.ndarray([1, 2, 3]), window_index=0, k=0), Window(characters=('b',), values=np.ndarray([2, 3, 4]), window_index=1, k=0), Window(characters=('b',), values=np.ndarray([3, 4, 5]), window_index=2, k=1), Window(characters=('b', 'c'), values=np.ndarray([4, 5, 6]), window_index=3, k=1), Window(characters=('d',), values=np.ndarray([5, 6, 7]), window_index=4, k=2), Window(characters=('X',), values=np.ndarray([6, 7, 8]), window_index=0, k=3) ] depix_hmm: DepixHMM = DepixHMM(demo_picture_parameters, demo_training_parameters) # Act depix_hmm.calculate_hmm_properties(windows_train=windows) # Assert: Observations self.assertCountEqual(depix_hmm.observations, (0, 1, 2, 3)) # Assert: States self.assertCountEqual(depix_hmm.states, (('A', 'b'), ('b',), ('b', 'c'), ('d',), ('X',))) # Assert: Starting probabilities self.assertEqual(depix_hmm.starting_probabilities[depix_hmm.states.index(('A', 'b'))], 0.5) self.assertEqual(depix_hmm.starting_probabilities[depix_hmm.states.index(('b',))], 0.0) # Assert: Transition Probabilities self.assertEqual(depix_hmm.transition_probabilities.shape, (len(depix_hmm.states), len(depix_hmm.states))) self.assertNotEqual(depix_hmm.transition_probabilities[depix_hmm.states.index(('b',)), depix_hmm.states.index(('b',))], 0) for s in depix_hmm.transition_probabilities.sum(axis=1): self.assertAlmostEqual(s, 1.0, places=3) # Assert Emission Probabilities self.assertEqual(depix_hmm.emission_probabilities.shape, (len(depix_hmm.states), len(depix_hmm.observations))) for s in depix_hmm.emission_probabilities.sum(axis=1): self.assertAlmostEqual(s, 1.0, places=3) def test_test_image(self): # Arrange img_path: Path = Path(__file__).parent.parent.parent / 'examples' / 'arial_50_blocksize-8' / 'pixelized_cropped.png' picture_parameters: PictureParameters = PictureParameters( pattern=r'\d{9}', font=ImageFont.truetype(str(DemoFontPaths.arial), 50), block_size=8, window_size=4 ) training_parameters: TrainingParameters = TrainingParameters( n_img_train=100, n_img_test=1, n_clusters=150 ) depix_hmm: DepixHMM = DepixHMM(picture_parameters, training_parameters) depix_hmm.train() # Act with Image.open(img_path) as img: reconstructed_string: str = depix_hmm.test_image(img) # Assert self.assertIsInstance(reconstructed_string, str)
test/HMM/test_depix_hmm.py
import unittest from pathlib import Path from typing import List import numpy as np from PIL import Image, ImageFont from resources.fonts import DemoFontPaths from test.utils import demo_training_parameters, demo_picture_parameters from text_depixelizer.HMM.depix_hmm import DepixHMM from text_depixelizer.parameters import PictureParameters, TrainingParameters from text_depixelizer.training_pipeline.windows import Window class TestDepixHmm(unittest.TestCase): demo_picture_parameters: PictureParameters = PictureParameters( block_size=6, pattern=r'\d{8,12}', font=ImageFont.truetype(str(DemoFontPaths.arial), 50) ) def test_train(self): # Arrange training_parameters: TrainingParameters = demo_training_parameters depix_hmm: DepixHMM = DepixHMM(self.demo_picture_parameters, demo_training_parameters) # Act depix_hmm.train() # Assert self.assertEqual(depix_hmm.emission_probabilities.shape[1], training_parameters.n_clusters) self.assertTrue(len(depix_hmm.states) > 5) self.assertEqual(depix_hmm.emission_probabilities.shape, depix_hmm.log_emission_probabilities.shape) def test_evaluate(self): # Arrange depix_hmm: DepixHMM = DepixHMM(self.demo_picture_parameters, demo_training_parameters) depix_hmm.train() # Act accuracy, average_distance = depix_hmm.evaluate() # Assert self.assertGreaterEqual(accuracy, 0) self.assertLessEqual(accuracy, 1) self.assertIsInstance(accuracy, float) self.assertIsInstance(average_distance, float) def test_get_starting_probabilities(self): # Arrange windows: List[Window] = [ Window(characters=('A', 'b'), values=np.ndarray([1, 2, 3]), window_index=0, k=0), Window(characters=('b',), values=np.ndarray([2, 3, 4]), window_index=1, k=0), Window(characters=('b',), values=np.ndarray([3, 4, 5]), window_index=2, k=1), Window(characters=('b', 'c'), values=np.ndarray([4, 5, 6]), window_index=3, k=1), Window(characters=('d',), values=np.ndarray([5, 6, 7]), window_index=4, k=2), Window(characters=('X',), values=np.ndarray([6, 7, 8]), window_index=0, k=3) ] depix_hmm: DepixHMM = DepixHMM(demo_picture_parameters, demo_training_parameters) # Act depix_hmm.calculate_hmm_properties(windows_train=windows) # Assert: Observations self.assertCountEqual(depix_hmm.observations, (0, 1, 2, 3)) # Assert: States self.assertCountEqual(depix_hmm.states, (('A', 'b'), ('b',), ('b', 'c'), ('d',), ('X',))) # Assert: Starting probabilities self.assertEqual(depix_hmm.starting_probabilities[depix_hmm.states.index(('A', 'b'))], 0.5) self.assertEqual(depix_hmm.starting_probabilities[depix_hmm.states.index(('b',))], 0.0) # Assert: Transition Probabilities self.assertEqual(depix_hmm.transition_probabilities.shape, (len(depix_hmm.states), len(depix_hmm.states))) self.assertNotEqual(depix_hmm.transition_probabilities[depix_hmm.states.index(('b',)), depix_hmm.states.index(('b',))], 0) for s in depix_hmm.transition_probabilities.sum(axis=1): self.assertAlmostEqual(s, 1.0, places=3) # Assert Emission Probabilities self.assertEqual(depix_hmm.emission_probabilities.shape, (len(depix_hmm.states), len(depix_hmm.observations))) for s in depix_hmm.emission_probabilities.sum(axis=1): self.assertAlmostEqual(s, 1.0, places=3) def test_test_image(self): # Arrange img_path: Path = Path(__file__).parent.parent.parent / 'examples' / 'arial_50_blocksize-8' / 'pixelized_cropped.png' picture_parameters: PictureParameters = PictureParameters( pattern=r'\d{9}', font=ImageFont.truetype(str(DemoFontPaths.arial), 50), block_size=8, window_size=4 ) training_parameters: TrainingParameters = TrainingParameters( n_img_train=100, n_img_test=1, n_clusters=150 ) depix_hmm: DepixHMM = DepixHMM(picture_parameters, training_parameters) depix_hmm.train() # Act with Image.open(img_path) as img: reconstructed_string: str = depix_hmm.test_image(img) # Assert self.assertIsInstance(reconstructed_string, str)
0.826257
0.493775
""" Module to handle ssh credentials (userid, host, private_keys) """ __author__ = "gelpi" __date__ = "$08-March-2019 17:32:38$" import sys import argparse import os from paramiko import SSHClient, AutoAddPolicy, AuthenticationException from biobb_remote.ssh_credentials import SSHCredentials ARGPARSE = argparse.ArgumentParser( description='Credentials manager for biobb_remote' ) ARGPARSE.add_argument( dest='operation', help='Operation: create|get_pubkey', choices=['create', 'get_pubkey', 'get_private', 'host_install', 'host_remove', 'host_check'] ) ARGPARSE.add_argument( '--user', dest='userid', help='User id' ) ARGPARSE.add_argument( '--host', dest='hostname', help='Host name' ) ARGPARSE.add_argument( '--pubkey_path', dest='pubkey_path', help='Public key file path' ) ARGPARSE.add_argument( '--nbits', dest='nbits', type=int, default=2048, help='Number of key bits' ) ARGPARSE.add_argument( '--keys_path', dest='keys_path', help='Credentials file path', required=True ) ARGPARSE.add_argument( '--privkey_path', dest='privkey_path', help='Private key file path' ) ARGPARSE.add_argument( '-v', dest="verbose", action="store_true", help='Output extra information' ) class Credentials(): """ Class to wrap credentials management following biobb_template""" def __init__(self, line_args): self.args = line_args def launch(self): """ Launch execution following biobb_template""" if self.args.operation == 'create': credentials = SSHCredentials( host=self.args.hostname, userid=self.args.userid, generate_key=False ) credentials.generate_key(self.args.nbits) credentials.save( output_path=self.args.keys_path, public_key_path=self.args.pubkey_path, private_key_path=self.args.privkey_path ) if self.args.verbose: print("Credentials stored in", self.args.keys_path) if self.args.pubkey_path is None: print("Public key, add to authorized_keys on remote host") print(credentials.get_public_key()) else: credentials = SSHCredentials() credentials.load_from_file(self.args.keys_path) if self.args.operation == 'get_pubkey': print(credentials.get_public_key()) elif self.args.operation == 'get_private': print(credentials.get_private_key()) elif self.args.operation in ('host_install', 'host_remove', 'host_check'): host_str = '{}@{}'.format(credentials.userid, credentials.host) if self.args.operation == 'host_check': print('Biobb public key {} at {}'.format( 'found' if credentials.check_host_auth() else 'not found', host_str ) ) elif self.args.operation == 'host_install': if not credentials.check_host_auth(): credentials.install_host_auth('bck') if self.args.verbose: print('Biobb keys installed on', host_str) print("Warning: original .ssh/authorize_keys file stored as .ssh/authorized_keys.bck") elif self.args.operation == 'host_remove': print(credentials.check_host_auth()) if credentials.check_host_auth(): credentials.remove_host_auth('biobb') if self.args.verbose: print('Biobb removed from ', host_str) print("Warning: .ssh/authorize_keys file stored as .ssh/authorized_keys.biobb") else: sys.exit("credentials: error: unknown op") def main(): args = ARGPARSE.parse_args() if args.operation == 'create': if args.userid is None or args.hostname is None: sys.exit("ssh_command: error: Userid and hostname are required to create credentials") Credentials(args).launch() if __name__ == '__main__': main()
scripts/credentials.py
""" Module to handle ssh credentials (userid, host, private_keys) """ __author__ = "gelpi" __date__ = "$08-March-2019 17:32:38$" import sys import argparse import os from paramiko import SSHClient, AutoAddPolicy, AuthenticationException from biobb_remote.ssh_credentials import SSHCredentials ARGPARSE = argparse.ArgumentParser( description='Credentials manager for biobb_remote' ) ARGPARSE.add_argument( dest='operation', help='Operation: create|get_pubkey', choices=['create', 'get_pubkey', 'get_private', 'host_install', 'host_remove', 'host_check'] ) ARGPARSE.add_argument( '--user', dest='userid', help='User id' ) ARGPARSE.add_argument( '--host', dest='hostname', help='Host name' ) ARGPARSE.add_argument( '--pubkey_path', dest='pubkey_path', help='Public key file path' ) ARGPARSE.add_argument( '--nbits', dest='nbits', type=int, default=2048, help='Number of key bits' ) ARGPARSE.add_argument( '--keys_path', dest='keys_path', help='Credentials file path', required=True ) ARGPARSE.add_argument( '--privkey_path', dest='privkey_path', help='Private key file path' ) ARGPARSE.add_argument( '-v', dest="verbose", action="store_true", help='Output extra information' ) class Credentials(): """ Class to wrap credentials management following biobb_template""" def __init__(self, line_args): self.args = line_args def launch(self): """ Launch execution following biobb_template""" if self.args.operation == 'create': credentials = SSHCredentials( host=self.args.hostname, userid=self.args.userid, generate_key=False ) credentials.generate_key(self.args.nbits) credentials.save( output_path=self.args.keys_path, public_key_path=self.args.pubkey_path, private_key_path=self.args.privkey_path ) if self.args.verbose: print("Credentials stored in", self.args.keys_path) if self.args.pubkey_path is None: print("Public key, add to authorized_keys on remote host") print(credentials.get_public_key()) else: credentials = SSHCredentials() credentials.load_from_file(self.args.keys_path) if self.args.operation == 'get_pubkey': print(credentials.get_public_key()) elif self.args.operation == 'get_private': print(credentials.get_private_key()) elif self.args.operation in ('host_install', 'host_remove', 'host_check'): host_str = '{}@{}'.format(credentials.userid, credentials.host) if self.args.operation == 'host_check': print('Biobb public key {} at {}'.format( 'found' if credentials.check_host_auth() else 'not found', host_str ) ) elif self.args.operation == 'host_install': if not credentials.check_host_auth(): credentials.install_host_auth('bck') if self.args.verbose: print('Biobb keys installed on', host_str) print("Warning: original .ssh/authorize_keys file stored as .ssh/authorized_keys.bck") elif self.args.operation == 'host_remove': print(credentials.check_host_auth()) if credentials.check_host_auth(): credentials.remove_host_auth('biobb') if self.args.verbose: print('Biobb removed from ', host_str) print("Warning: .ssh/authorize_keys file stored as .ssh/authorized_keys.biobb") else: sys.exit("credentials: error: unknown op") def main(): args = ARGPARSE.parse_args() if args.operation == 'create': if args.userid is None or args.hostname is None: sys.exit("ssh_command: error: Userid and hostname are required to create credentials") Credentials(args).launch() if __name__ == '__main__': main()
0.272315
0.078501
import sys from collections import OrderedDict from ydk.types import Entity as _Entity_ from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class BfdDiagnosticCode(Enum): """ BfdDiagnosticCode (Enum Class) Diagnostic codes defined by BFD. These typically indicate the reason for a change of session state. .. data:: NO_DIAGNOSTIC = 0 No diagnostic code was specified, or the session has not changed state. .. data:: DETECTION_TIMEOUT = 1 The control detection time expired: no BFD packet was received within the required period. .. data:: ECHO_FAILED = 2 The BFD echo function failed - echo packets have not been received for the required period of time. .. data:: FORWARDING_RESET = 3 The forwarding plane in the local system was reset - such that the remote system cannot rely on the forwarding state of the device specifying this error code. .. data:: PATH_DOWN = 4 Signalling outside of BFD specified that the path underlying this session has failed. .. data:: CONCATENATED_PATH_DOWN = 5 When a BFD session runs over a series of path segments, this error code indicates that a subsequent path segment (i.e., one in the transmit path between the source and destination of the session) has failed. .. data:: ADMIN_DOWN = 6 The BFD session has been administratively disabled by the peer. .. data:: REVERSE_CONCATENATED_PATH_DOWN = 7 In the case that a BFD session is running over a series of path segments, this error code indicates that a path segment on the reverse path (i.e., in the transmit direction from the destination to the source of the session) has failed. """ NO_DIAGNOSTIC = Enum.YLeaf(0, "NO_DIAGNOSTIC") DETECTION_TIMEOUT = Enum.YLeaf(1, "DETECTION_TIMEOUT") ECHO_FAILED = Enum.YLeaf(2, "ECHO_FAILED") FORWARDING_RESET = Enum.YLeaf(3, "FORWARDING_RESET") PATH_DOWN = Enum.YLeaf(4, "PATH_DOWN") CONCATENATED_PATH_DOWN = Enum.YLeaf(5, "CONCATENATED_PATH_DOWN") ADMIN_DOWN = Enum.YLeaf(6, "ADMIN_DOWN") REVERSE_CONCATENATED_PATH_DOWN = Enum.YLeaf(7, "REVERSE_CONCATENATED_PATH_DOWN") class BfdSessionState(Enum): """ BfdSessionState (Enum Class) The state of the BFD session according to the system referred to by the context of the leaf. .. data:: UP = 0 The BFD session is perceived to be up by the system. .. data:: DOWN = 1 The BFD session is perceived to be down by the system. .. data:: ADMIN_DOWN = 2 The BFD session is administratively disabled. .. data:: INIT = 3 The BFD session is perceived to be initialising by the system. """ UP = Enum.YLeaf(0, "UP") DOWN = Enum.YLeaf(1, "DOWN") ADMIN_DOWN = Enum.YLeaf(2, "ADMIN_DOWN") INIT = Enum.YLeaf(3, "INIT") class Bfd(_Entity_): """ Configuration and operational state parameters for BFD. .. attribute:: interfaces Interfaces on which BFD sessions are to be enabled **type**\: :py:class:`Interfaces <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd, self).__init__() self._top_entity = None self.yang_name = "bfd" self.yang_parent_name = "openconfig-bfd" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("interfaces", ("interfaces", Bfd.Interfaces))]) self._leafs = OrderedDict() self.interfaces = Bfd.Interfaces() self.interfaces.parent = self self._children_name_map["interfaces"] = "interfaces" self._segment_path = lambda: "openconfig-bfd:bfd" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd, [], name, value) class Interfaces(_Entity_): """ Interfaces on which BFD sessions are to be enabled. .. attribute:: interface Per\-interface configuration and state parameters for BFD **type**\: list of :py:class:`Interface <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces, self).__init__() self.yang_name = "interfaces" self.yang_parent_name = "bfd" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("interface", ("interface", Bfd.Interfaces.Interface))]) self._leafs = OrderedDict() self.interface = YList(self) self._segment_path = lambda: "interfaces" self._absolute_path = lambda: "openconfig-bfd:bfd/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces, [], name, value) class Interface(_Entity_): """ Per\-interface configuration and state parameters for BFD. .. attribute:: id (key) A reference to an identifier for the interface on which BFD is enabled **type**\: str **refers to**\: :py:class:`id <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Config>` .. attribute:: config Configuration parameters for BFD on the specified interface **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Config>` .. attribute:: state Operational state parameters for BFD on the specified interface **type**\: :py:class:`State <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.State>` **config**\: False .. attribute:: interface_ref Reference to an interface or subinterface **type**\: :py:class:`InterfaceRef <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.InterfaceRef>` .. attribute:: micro_bfd_sessions Parameters relating to micro\-BFD sessions associated with the interface **type**\: :py:class:`MicroBfdSessions <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions>` .. attribute:: peers Parameters relating to the BFD peers which are seen over this interface **type**\: :py:class:`Peers <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface, self).__init__() self.yang_name = "interface" self.yang_parent_name = "interfaces" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['id'] self._child_classes = OrderedDict([("config", ("config", Bfd.Interfaces.Interface.Config)), ("state", ("state", Bfd.Interfaces.Interface.State)), ("interface-ref", ("interface_ref", Bfd.Interfaces.Interface.InterfaceRef)), ("micro-bfd-sessions", ("micro_bfd_sessions", Bfd.Interfaces.Interface.MicroBfdSessions)), ("peers", ("peers", Bfd.Interfaces.Interface.Peers))]) self._leafs = OrderedDict([ ('id', (YLeaf(YType.str, 'id'), ['str'])), ]) self.id = None self.config = Bfd.Interfaces.Interface.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = Bfd.Interfaces.Interface.State() self.state.parent = self self._children_name_map["state"] = "state" self.interface_ref = Bfd.Interfaces.Interface.InterfaceRef() self.interface_ref.parent = self self._children_name_map["interface_ref"] = "interface-ref" self.micro_bfd_sessions = Bfd.Interfaces.Interface.MicroBfdSessions() self.micro_bfd_sessions.parent = self self._children_name_map["micro_bfd_sessions"] = "micro-bfd-sessions" self.peers = Bfd.Interfaces.Interface.Peers() self.peers.parent = self self._children_name_map["peers"] = "peers" self._segment_path = lambda: "interface" + "[id='" + str(self.id) + "']" self._absolute_path = lambda: "openconfig-bfd:bfd/interfaces/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface, ['id'], name, value) class Config(_Entity_): """ Configuration parameters for BFD on the specified interface. .. attribute:: id A unique identifier for the interface **type**\: str .. attribute:: enabled When this leaf is set to true then the BFD session is enabled on the specified interface \- if it is set to false, it is administratively disabled **type**\: bool .. attribute:: local_address The source IP address to be used for BFD sessions over this interface **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ .. attribute:: desired_minimum_tx_interval The minimum interval between transmission of BFD control packets that the operator desires. This value is advertised to the peer, however the actual interval used is specified by taking the maximum of desired\-minimum\-tx\-interval and the value of the remote required\-minimum\-receive interval value. This value is specified as an integer number of microseconds **type**\: int **range:** 0..4294967295 **units**\: microseconds .. attribute:: required_minimum_receive The minimum interval between received BFD control packets that this system should support. This value is advertised to the remote peer to indicate the maximum frequency (i.e., minimum inter\-packet interval) between BFD control packets that is acceptable to the local system **type**\: int **range:** 0..4294967295 **units**\: microseconds .. attribute:: detection_multiplier The number of packets that must be missed to declare this session as down. The detection interval for the BFD session is calculated by multiplying the value of the negotiated transmission interval by this value **type**\: int **range:** 1..65535 .. attribute:: enable_per_member_link When this leaf is set to true \- BFD will be enabled on each member interface of the aggregated Ethernet bundle **type**\: bool **default value**\: false """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('id', (YLeaf(YType.str, 'id'), ['str'])), ('enabled', (YLeaf(YType.boolean, 'enabled'), ['bool'])), ('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])), ('desired_minimum_tx_interval', (YLeaf(YType.uint32, 'desired-minimum-tx-interval'), ['int'])), ('required_minimum_receive', (YLeaf(YType.uint32, 'required-minimum-receive'), ['int'])), ('detection_multiplier', (YLeaf(YType.uint16, 'detection-multiplier'), ['int'])), ('enable_per_member_link', (YLeaf(YType.boolean, 'enable-per-member-link'), ['bool'])), ]) self.id = None self.enabled = None self.local_address = None self.desired_minimum_tx_interval = None self.required_minimum_receive = None self.detection_multiplier = None self.enable_per_member_link = None self._segment_path = lambda: "config" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Config, ['id', 'enabled', 'local_address', 'desired_minimum_tx_interval', 'required_minimum_receive', 'detection_multiplier', 'enable_per_member_link'], name, value) class State(_Entity_): """ Operational state parameters for BFD on the specified interface. .. attribute:: id A unique identifier for the interface **type**\: str **config**\: False .. attribute:: enabled When this leaf is set to true then the BFD session is enabled on the specified interface \- if it is set to false, it is administratively disabled **type**\: bool **config**\: False .. attribute:: local_address The source IP address to be used for BFD sessions over this interface **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **config**\: False .. attribute:: desired_minimum_tx_interval The minimum interval between transmission of BFD control packets that the operator desires. This value is advertised to the peer, however the actual interval used is specified by taking the maximum of desired\-minimum\-tx\-interval and the value of the remote required\-minimum\-receive interval value. This value is specified as an integer number of microseconds **type**\: int **range:** 0..4294967295 **config**\: False **units**\: microseconds .. attribute:: required_minimum_receive The minimum interval between received BFD control packets that this system should support. This value is advertised to the remote peer to indicate the maximum frequency (i.e., minimum inter\-packet interval) between BFD control packets that is acceptable to the local system **type**\: int **range:** 0..4294967295 **config**\: False **units**\: microseconds .. attribute:: detection_multiplier The number of packets that must be missed to declare this session as down. The detection interval for the BFD session is calculated by multiplying the value of the negotiated transmission interval by this value **type**\: int **range:** 1..65535 **config**\: False .. attribute:: enable_per_member_link When this leaf is set to true \- BFD will be enabled on each member interface of the aggregated Ethernet bundle **type**\: bool **config**\: False **default value**\: false """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('id', (YLeaf(YType.str, 'id'), ['str'])), ('enabled', (YLeaf(YType.boolean, 'enabled'), ['bool'])), ('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])), ('desired_minimum_tx_interval', (YLeaf(YType.uint32, 'desired-minimum-tx-interval'), ['int'])), ('required_minimum_receive', (YLeaf(YType.uint32, 'required-minimum-receive'), ['int'])), ('detection_multiplier', (YLeaf(YType.uint16, 'detection-multiplier'), ['int'])), ('enable_per_member_link', (YLeaf(YType.boolean, 'enable-per-member-link'), ['bool'])), ]) self.id = None self.enabled = None self.local_address = None self.desired_minimum_tx_interval = None self.required_minimum_receive = None self.detection_multiplier = None self.enable_per_member_link = None self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.State, ['id', 'enabled', 'local_address', 'desired_minimum_tx_interval', 'required_minimum_receive', 'detection_multiplier', 'enable_per_member_link'], name, value) class InterfaceRef(_Entity_): """ Reference to an interface or subinterface .. attribute:: config Configured reference to interface / subinterface **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.InterfaceRef.Config>` .. attribute:: state Operational state for interface\-ref **type**\: :py:class:`State <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.InterfaceRef.State>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.InterfaceRef, self).__init__() self.yang_name = "interface-ref" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("config", ("config", Bfd.Interfaces.Interface.InterfaceRef.Config)), ("state", ("state", Bfd.Interfaces.Interface.InterfaceRef.State))]) self._leafs = OrderedDict() self.config = Bfd.Interfaces.Interface.InterfaceRef.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = Bfd.Interfaces.Interface.InterfaceRef.State() self.state.parent = self self._children_name_map["state"] = "state" self._segment_path = lambda: "interface-ref" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.InterfaceRef, [], name, value) class Config(_Entity_): """ Configured reference to interface / subinterface .. attribute:: interface Reference to a base interface. If a reference to a subinterface is required, this leaf must be specified to indicate the base interface **type**\: str **refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>` .. attribute:: subinterface Reference to a subinterface \-\- this requires the base interface to be specified using the interface leaf in this container. If only a reference to a base interface is requuired, this leaf should not be set **type**\: int **range:** 0..4294967295 **refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Subinterfaces.Subinterface>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.InterfaceRef.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "interface-ref" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('interface', (YLeaf(YType.str, 'interface'), ['str'])), ('subinterface', (YLeaf(YType.str, 'subinterface'), ['int'])), ]) self.interface = None self.subinterface = None self._segment_path = lambda: "config" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.InterfaceRef.Config, ['interface', 'subinterface'], name, value) class State(_Entity_): """ Operational state for interface\-ref .. attribute:: interface Reference to a base interface. If a reference to a subinterface is required, this leaf must be specified to indicate the base interface **type**\: str **refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>` **config**\: False .. attribute:: subinterface Reference to a subinterface \-\- this requires the base interface to be specified using the interface leaf in this container. If only a reference to a base interface is requuired, this leaf should not be set **type**\: int **range:** 0..4294967295 **refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Subinterfaces.Subinterface>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.InterfaceRef.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "interface-ref" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('interface', (YLeaf(YType.str, 'interface'), ['str'])), ('subinterface', (YLeaf(YType.str, 'subinterface'), ['int'])), ]) self.interface = None self.subinterface = None self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.InterfaceRef.State, ['interface', 'subinterface'], name, value) class MicroBfdSessions(_Entity_): """ Parameters relating to micro\-BFD sessions associated with the interface. .. attribute:: micro_bfd_session This list contains configuration and state parameters relating to micro\-BFD session **type**\: list of :py:class:`MicroBfdSession <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.MicroBfdSessions, self).__init__() self.yang_name = "micro-bfd-sessions" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("micro-bfd-session", ("micro_bfd_session", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession))]) self._leafs = OrderedDict() self.micro_bfd_session = YList(self) self._segment_path = lambda: "micro-bfd-sessions" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions, [], name, value) class MicroBfdSession(_Entity_): """ This list contains configuration and state parameters relating to micro\-BFD session. .. attribute:: member_interface (key) A reference to the member interface of the link aggregate **type**\: str **refers to**\: :py:class:`member_interface <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config>` .. attribute:: config Configuration parameters for the micro\-BFD session **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config>` .. attribute:: state Operational state parameters for the micro\-BFD session **type**\: :py:class:`State <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession, self).__init__() self.yang_name = "micro-bfd-session" self.yang_parent_name = "micro-bfd-sessions" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = ['member_interface'] self._child_classes = OrderedDict([("config", ("config", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config)), ("state", ("state", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State))]) self._leafs = OrderedDict([ ('member_interface', (YLeaf(YType.str, 'member-interface'), ['str'])), ]) self.member_interface = None self.config = Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State() self.state.parent = self self._children_name_map["state"] = "state" self._segment_path = lambda: "micro-bfd-session" + "[member-interface='" + str(self.member_interface) + "']" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession, ['member_interface'], name, value) class Config(_Entity_): """ Configuration parameters for the micro\-BFD session. .. attribute:: local_address The local IP address used by the system for the micro\-BFD session specified **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ .. attribute:: remote_address The remote IP destination that should be used by the system for the micro\-BFD session specified **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ .. attribute:: member_interface Reference to a member link of the aggregate interface being described **type**\: str **refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Config>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "micro-bfd-session" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])), ('remote_address', (YLeaf(YType.str, 'remote-address'), ['str','str'])), ('member_interface', (YLeaf(YType.str, 'member-interface'), ['str'])), ]) self.local_address = None self.remote_address = None self.member_interface = None self._segment_path = lambda: "config" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config, ['local_address', 'remote_address', 'member_interface'], name, value) class State(_Entity_): """ Operational state parameters for the micro\-BFD session. .. attribute:: local_address The local IP address used by the system for the micro\-BFD session specified **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **config**\: False .. attribute:: remote_address The remote IP destination that should be used by the system for the micro\-BFD session specified **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **config**\: False .. attribute:: member_interface Reference to a member link of the aggregate interface being described **type**\: str **refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Config>` **config**\: False .. attribute:: session_state The state of the BFD session perceived by the local system **type**\: :py:class:`BfdSessionState <ydk.models.openconfig.openconfig_bfd.BfdSessionState>` **config**\: False .. attribute:: remote_session_state The reported state of the BFD session according to the remote system. This state reflects the last state reported in a BFD control packet **type**\: :py:class:`BfdSessionState <ydk.models.openconfig.openconfig_bfd.BfdSessionState>` **config**\: False .. attribute:: last_failure_time The time of the last transition of the BFD session out of the UP state, expressed as the number of nanoseconds since the Unix epoch **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: failure_transitions The number of times that the BFD session has transitioned out of the UP state **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: local_discriminator A unique identifier used by the local system to identify this BFD session **type**\: str **config**\: False .. attribute:: remote_discriminator A unique identified used by the remote system to identify this BFD session **type**\: str **config**\: False .. attribute:: local_diagnostic_code The local BFD diagnostic code indicating the most recent reason for failure of this BFD session **type**\: :py:class:`BfdDiagnosticCode <ydk.models.openconfig.openconfig_bfd.BfdDiagnosticCode>` **config**\: False .. attribute:: remote_diagnostic_code The remote BFD diagnostic code indicating the remote system's reason for failure of the BFD session **type**\: :py:class:`BfdDiagnosticCode <ydk.models.openconfig.openconfig_bfd.BfdDiagnosticCode>` **config**\: False .. attribute:: remote_minimum_receive_interval The value of the minimum receive interval that was specified in the most recent BFD control packet received from the peer **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: demand_mode_requested This leaf is set to true when the remote system has requested demand mode be run for this session **type**\: bool **config**\: False .. attribute:: remote_authentication_enabled This leaf is set to true when the remote system has specified that authentication is present for the BFD session **type**\: bool **config**\: False .. attribute:: remote_control_plane_independent This leaf is set to true when the remote system has specified that the hardware implementing this BFD session is independent of the control plane's liveliness **type**\: bool **config**\: False .. attribute:: async_ Operational state parameters specifically relating to asynchronous mode of BFD **type**\: :py:class:`Async <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State.Async>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "micro-bfd-session" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("async", ("async_", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State.Async))]) self._leafs = OrderedDict([ ('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])), ('remote_address', (YLeaf(YType.str, 'remote-address'), ['str','str'])), ('member_interface', (YLeaf(YType.str, 'member-interface'), ['str'])), ('session_state', (YLeaf(YType.enumeration, 'session-state'), [('ydk.models.openconfig.openconfig_bfd', 'BfdSessionState', '')])), ('remote_session_state', (YLeaf(YType.enumeration, 'remote-session-state'), [('ydk.models.openconfig.openconfig_bfd', 'BfdSessionState', '')])), ('last_failure_time', (YLeaf(YType.uint64, 'last-failure-time'), ['int'])), ('failure_transitions', (YLeaf(YType.uint64, 'failure-transitions'), ['int'])), ('local_discriminator', (YLeaf(YType.str, 'local-discriminator'), ['str'])), ('remote_discriminator', (YLeaf(YType.str, 'remote-discriminator'), ['str'])), ('local_diagnostic_code', (YLeaf(YType.enumeration, 'local-diagnostic-code'), [('ydk.models.openconfig.openconfig_bfd', 'BfdDiagnosticCode', '')])), ('remote_diagnostic_code', (YLeaf(YType.enumeration, 'remote-diagnostic-code'), [('ydk.models.openconfig.openconfig_bfd', 'BfdDiagnosticCode', '')])), ('remote_minimum_receive_interval', (YLeaf(YType.uint32, 'remote-minimum-receive-interval'), ['int'])), ('demand_mode_requested', (YLeaf(YType.boolean, 'demand-mode-requested'), ['bool'])), ('remote_authentication_enabled', (YLeaf(YType.boolean, 'remote-authentication-enabled'), ['bool'])), ('remote_control_plane_independent', (YLeaf(YType.boolean, 'remote-control-plane-independent'), ['bool'])), ]) self.local_address = None self.remote_address = None self.member_interface = None self.session_state = None self.remote_session_state = None self.last_failure_time = None self.failure_transitions = None self.local_discriminator = None self.remote_discriminator = None self.local_diagnostic_code = None self.remote_diagnostic_code = None self.remote_minimum_receive_interval = None self.demand_mode_requested = None self.remote_authentication_enabled = None self.remote_control_plane_independent = None self.async_ = Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State.Async() self.async_.parent = self self._children_name_map["async_"] = "async" self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State, ['local_address', 'remote_address', 'member_interface', 'session_state', 'remote_session_state', 'last_failure_time', 'failure_transitions', 'local_discriminator', 'remote_discriminator', 'local_diagnostic_code', 'remote_diagnostic_code', 'remote_minimum_receive_interval', 'demand_mode_requested', 'remote_authentication_enabled', 'remote_control_plane_independent'], name, value) class Async(_Entity_): """ Operational state parameters specifically relating to asynchronous mode of BFD. .. attribute:: last_packet_transmitted The date and time at which the last BFD packet was transmitted for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: last_packet_received The date and time at which the last BFD packet was received for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: transmitted_packets The number of packets that have been transmitted by the local system **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: received_packets The number of packets that have been received by the local system from the remote neighbour **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: up_transitions The number of times that the adjacency with the neighbor has transitioned into the up state **type**\: int **range:** 0..18446744073709551615 **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State.Async, self).__init__() self.yang_name = "async" self.yang_parent_name = "state" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('last_packet_transmitted', (YLeaf(YType.uint64, 'last-packet-transmitted'), ['int'])), ('last_packet_received', (YLeaf(YType.uint64, 'last-packet-received'), ['int'])), ('transmitted_packets', (YLeaf(YType.uint64, 'transmitted-packets'), ['int'])), ('received_packets', (YLeaf(YType.uint64, 'received-packets'), ['int'])), ('up_transitions', (YLeaf(YType.uint64, 'up-transitions'), ['int'])), ]) self.last_packet_transmitted = None self.last_packet_received = None self.transmitted_packets = None self.received_packets = None self.up_transitions = None self._segment_path = lambda: "async" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State.Async, ['last_packet_transmitted', 'last_packet_received', 'transmitted_packets', 'received_packets', 'up_transitions'], name, value) class Peers(_Entity_): """ Parameters relating to the BFD peers which are seen over this interface. .. attribute:: peer Parameters relating to the BFD peer specified by the remote address **type**\: list of :py:class:`Peer <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers.Peer>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Peers, self).__init__() self.yang_name = "peers" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("peer", ("peer", Bfd.Interfaces.Interface.Peers.Peer))]) self._leafs = OrderedDict() self.peer = YList(self) self._segment_path = lambda: "peers" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Peers, [], name, value) class Peer(_Entity_): """ Parameters relating to the BFD peer specified by the remote address. .. attribute:: local_discriminator (key) The local discriminator, which is unique for the session on the system **type**\: str **refers to**\: :py:class:`local_discriminator <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers.Peer.State>` **config**\: False .. attribute:: state Operational state parameters for the BFD session **type**\: :py:class:`State <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers.Peer.State>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Peers.Peer, self).__init__() self.yang_name = "peer" self.yang_parent_name = "peers" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = ['local_discriminator'] self._child_classes = OrderedDict([("state", ("state", Bfd.Interfaces.Interface.Peers.Peer.State))]) self._leafs = OrderedDict([ ('local_discriminator', (YLeaf(YType.str, 'local-discriminator'), ['str'])), ]) self.local_discriminator = None self.state = Bfd.Interfaces.Interface.Peers.Peer.State() self.state.parent = self self._children_name_map["state"] = "state" self._segment_path = lambda: "peer" + "[local-discriminator='" + str(self.local_discriminator) + "']" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Peers.Peer, ['local_discriminator'], name, value) class State(_Entity_): """ Operational state parameters for the BFD session. .. attribute:: local_address The IP address used by the local system for this BFD session **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **config**\: False .. attribute:: remote_address The IP address used by the remote system for this BFD session **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **config**\: False .. attribute:: subscribed_protocols Indicates the set of protocols that currently use this BFD session for liveliness detection **type**\: list of :py:class:`INSTALLPROTOCOLTYPE <ydk.models.openconfig.openconfig_policy_types.INSTALLPROTOCOLTYPE>` **config**\: False .. attribute:: session_state The state of the BFD session perceived by the local system **type**\: :py:class:`BfdSessionState <ydk.models.openconfig.openconfig_bfd.BfdSessionState>` **config**\: False .. attribute:: remote_session_state The reported state of the BFD session according to the remote system. This state reflects the last state reported in a BFD control packet **type**\: :py:class:`BfdSessionState <ydk.models.openconfig.openconfig_bfd.BfdSessionState>` **config**\: False .. attribute:: last_failure_time The time of the last transition of the BFD session out of the UP state, expressed as the number of nanoseconds since the Unix epoch **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: failure_transitions The number of times that the BFD session has transitioned out of the UP state **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: local_discriminator A unique identifier used by the local system to identify this BFD session **type**\: str **config**\: False .. attribute:: remote_discriminator A unique identified used by the remote system to identify this BFD session **type**\: str **config**\: False .. attribute:: local_diagnostic_code The local BFD diagnostic code indicating the most recent reason for failure of this BFD session **type**\: :py:class:`BfdDiagnosticCode <ydk.models.openconfig.openconfig_bfd.BfdDiagnosticCode>` **config**\: False .. attribute:: remote_diagnostic_code The remote BFD diagnostic code indicating the remote system's reason for failure of the BFD session **type**\: :py:class:`BfdDiagnosticCode <ydk.models.openconfig.openconfig_bfd.BfdDiagnosticCode>` **config**\: False .. attribute:: remote_minimum_receive_interval The value of the minimum receive interval that was specified in the most recent BFD control packet received from the peer **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: demand_mode_requested This leaf is set to true when the remote system has requested demand mode be run for this session **type**\: bool **config**\: False .. attribute:: remote_authentication_enabled This leaf is set to true when the remote system has specified that authentication is present for the BFD session **type**\: bool **config**\: False .. attribute:: remote_control_plane_independent This leaf is set to true when the remote system has specified that the hardware implementing this BFD session is independent of the control plane's liveliness **type**\: bool **config**\: False .. attribute:: echo Operational state parameters specifically relating to the echo mode of BFD **type**\: :py:class:`Echo <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers.Peer.State.Echo>` **config**\: False .. attribute:: async_ Operational state parameters specifically relating to asynchronous mode of BFD **type**\: :py:class:`Async <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers.Peer.State.Async>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Peers.Peer.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "peer" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("echo", ("echo", Bfd.Interfaces.Interface.Peers.Peer.State.Echo)), ("async", ("async_", Bfd.Interfaces.Interface.Peers.Peer.State.Async))]) self._leafs = OrderedDict([ ('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])), ('remote_address', (YLeaf(YType.str, 'remote-address'), ['str','str'])), ('subscribed_protocols', (YLeafList(YType.identityref, 'subscribed-protocols'), [('ydk.models.openconfig.openconfig_policy_types', 'INSTALLPROTOCOLTYPE')])), ('session_state', (YLeaf(YType.enumeration, 'session-state'), [('ydk.models.openconfig.openconfig_bfd', 'BfdSessionState', '')])), ('remote_session_state', (YLeaf(YType.enumeration, 'remote-session-state'), [('ydk.models.openconfig.openconfig_bfd', 'BfdSessionState', '')])), ('last_failure_time', (YLeaf(YType.uint64, 'last-failure-time'), ['int'])), ('failure_transitions', (YLeaf(YType.uint64, 'failure-transitions'), ['int'])), ('local_discriminator', (YLeaf(YType.str, 'local-discriminator'), ['str'])), ('remote_discriminator', (YLeaf(YType.str, 'remote-discriminator'), ['str'])), ('local_diagnostic_code', (YLeaf(YType.enumeration, 'local-diagnostic-code'), [('ydk.models.openconfig.openconfig_bfd', 'BfdDiagnosticCode', '')])), ('remote_diagnostic_code', (YLeaf(YType.enumeration, 'remote-diagnostic-code'), [('ydk.models.openconfig.openconfig_bfd', 'BfdDiagnosticCode', '')])), ('remote_minimum_receive_interval', (YLeaf(YType.uint32, 'remote-minimum-receive-interval'), ['int'])), ('demand_mode_requested', (YLeaf(YType.boolean, 'demand-mode-requested'), ['bool'])), ('remote_authentication_enabled', (YLeaf(YType.boolean, 'remote-authentication-enabled'), ['bool'])), ('remote_control_plane_independent', (YLeaf(YType.boolean, 'remote-control-plane-independent'), ['bool'])), ]) self.local_address = None self.remote_address = None self.subscribed_protocols = [] self.session_state = None self.remote_session_state = None self.last_failure_time = None self.failure_transitions = None self.local_discriminator = None self.remote_discriminator = None self.local_diagnostic_code = None self.remote_diagnostic_code = None self.remote_minimum_receive_interval = None self.demand_mode_requested = None self.remote_authentication_enabled = None self.remote_control_plane_independent = None self.echo = Bfd.Interfaces.Interface.Peers.Peer.State.Echo() self.echo.parent = self self._children_name_map["echo"] = "echo" self.async_ = Bfd.Interfaces.Interface.Peers.Peer.State.Async() self.async_.parent = self self._children_name_map["async_"] = "async" self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Peers.Peer.State, ['local_address', 'remote_address', 'subscribed_protocols', 'session_state', 'remote_session_state', 'last_failure_time', 'failure_transitions', 'local_discriminator', 'remote_discriminator', 'local_diagnostic_code', 'remote_diagnostic_code', 'remote_minimum_receive_interval', 'demand_mode_requested', 'remote_authentication_enabled', 'remote_control_plane_independent'], name, value) class Echo(_Entity_): """ Operational state parameters specifically relating to the echo mode of BFD. .. attribute:: active This leaf is set to true when echo mode is running between the local and remote system. When it is set to false, solely asynchronous mode is active **type**\: bool **config**\: False .. attribute:: last_packet_transmitted The date and time at which the last BFD packet was transmitted for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: last_packet_received The date and time at which the last BFD packet was received for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: transmitted_packets The number of packets that have been transmitted by the local system **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: received_packets The number of packets that have been received by the local system from the remote neighbour **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: up_transitions The number of times that the adjacency with the neighbor has transitioned into the up state **type**\: int **range:** 0..18446744073709551615 **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Peers.Peer.State.Echo, self).__init__() self.yang_name = "echo" self.yang_parent_name = "state" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('active', (YLeaf(YType.boolean, 'active'), ['bool'])), ('last_packet_transmitted', (YLeaf(YType.uint64, 'last-packet-transmitted'), ['int'])), ('last_packet_received', (YLeaf(YType.uint64, 'last-packet-received'), ['int'])), ('transmitted_packets', (YLeaf(YType.uint64, 'transmitted-packets'), ['int'])), ('received_packets', (YLeaf(YType.uint64, 'received-packets'), ['int'])), ('up_transitions', (YLeaf(YType.uint64, 'up-transitions'), ['int'])), ]) self.active = None self.last_packet_transmitted = None self.last_packet_received = None self.transmitted_packets = None self.received_packets = None self.up_transitions = None self._segment_path = lambda: "echo" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Peers.Peer.State.Echo, ['active', 'last_packet_transmitted', 'last_packet_received', 'transmitted_packets', 'received_packets', 'up_transitions'], name, value) class Async(_Entity_): """ Operational state parameters specifically relating to asynchronous mode of BFD. .. attribute:: last_packet_transmitted The date and time at which the last BFD packet was transmitted for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: last_packet_received The date and time at which the last BFD packet was received for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: transmitted_packets The number of packets that have been transmitted by the local system **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: received_packets The number of packets that have been received by the local system from the remote neighbour **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: up_transitions The number of times that the adjacency with the neighbor has transitioned into the up state **type**\: int **range:** 0..18446744073709551615 **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Peers.Peer.State.Async, self).__init__() self.yang_name = "async" self.yang_parent_name = "state" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('last_packet_transmitted', (YLeaf(YType.uint64, 'last-packet-transmitted'), ['int'])), ('last_packet_received', (YLeaf(YType.uint64, 'last-packet-received'), ['int'])), ('transmitted_packets', (YLeaf(YType.uint64, 'transmitted-packets'), ['int'])), ('received_packets', (YLeaf(YType.uint64, 'received-packets'), ['int'])), ('up_transitions', (YLeaf(YType.uint64, 'up-transitions'), ['int'])), ]) self.last_packet_transmitted = None self.last_packet_received = None self.transmitted_packets = None self.received_packets = None self.up_transitions = None self._segment_path = lambda: "async" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Peers.Peer.State.Async, ['last_packet_transmitted', 'last_packet_received', 'transmitted_packets', 'received_packets', 'up_transitions'], name, value) def clone_ptr(self): self._top_entity = Bfd() return self._top_entity
openconfig/ydk/models/openconfig/openconfig_bfd.py
import sys from collections import OrderedDict from ydk.types import Entity as _Entity_ from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class BfdDiagnosticCode(Enum): """ BfdDiagnosticCode (Enum Class) Diagnostic codes defined by BFD. These typically indicate the reason for a change of session state. .. data:: NO_DIAGNOSTIC = 0 No diagnostic code was specified, or the session has not changed state. .. data:: DETECTION_TIMEOUT = 1 The control detection time expired: no BFD packet was received within the required period. .. data:: ECHO_FAILED = 2 The BFD echo function failed - echo packets have not been received for the required period of time. .. data:: FORWARDING_RESET = 3 The forwarding plane in the local system was reset - such that the remote system cannot rely on the forwarding state of the device specifying this error code. .. data:: PATH_DOWN = 4 Signalling outside of BFD specified that the path underlying this session has failed. .. data:: CONCATENATED_PATH_DOWN = 5 When a BFD session runs over a series of path segments, this error code indicates that a subsequent path segment (i.e., one in the transmit path between the source and destination of the session) has failed. .. data:: ADMIN_DOWN = 6 The BFD session has been administratively disabled by the peer. .. data:: REVERSE_CONCATENATED_PATH_DOWN = 7 In the case that a BFD session is running over a series of path segments, this error code indicates that a path segment on the reverse path (i.e., in the transmit direction from the destination to the source of the session) has failed. """ NO_DIAGNOSTIC = Enum.YLeaf(0, "NO_DIAGNOSTIC") DETECTION_TIMEOUT = Enum.YLeaf(1, "DETECTION_TIMEOUT") ECHO_FAILED = Enum.YLeaf(2, "ECHO_FAILED") FORWARDING_RESET = Enum.YLeaf(3, "FORWARDING_RESET") PATH_DOWN = Enum.YLeaf(4, "PATH_DOWN") CONCATENATED_PATH_DOWN = Enum.YLeaf(5, "CONCATENATED_PATH_DOWN") ADMIN_DOWN = Enum.YLeaf(6, "ADMIN_DOWN") REVERSE_CONCATENATED_PATH_DOWN = Enum.YLeaf(7, "REVERSE_CONCATENATED_PATH_DOWN") class BfdSessionState(Enum): """ BfdSessionState (Enum Class) The state of the BFD session according to the system referred to by the context of the leaf. .. data:: UP = 0 The BFD session is perceived to be up by the system. .. data:: DOWN = 1 The BFD session is perceived to be down by the system. .. data:: ADMIN_DOWN = 2 The BFD session is administratively disabled. .. data:: INIT = 3 The BFD session is perceived to be initialising by the system. """ UP = Enum.YLeaf(0, "UP") DOWN = Enum.YLeaf(1, "DOWN") ADMIN_DOWN = Enum.YLeaf(2, "ADMIN_DOWN") INIT = Enum.YLeaf(3, "INIT") class Bfd(_Entity_): """ Configuration and operational state parameters for BFD. .. attribute:: interfaces Interfaces on which BFD sessions are to be enabled **type**\: :py:class:`Interfaces <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd, self).__init__() self._top_entity = None self.yang_name = "bfd" self.yang_parent_name = "openconfig-bfd" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("interfaces", ("interfaces", Bfd.Interfaces))]) self._leafs = OrderedDict() self.interfaces = Bfd.Interfaces() self.interfaces.parent = self self._children_name_map["interfaces"] = "interfaces" self._segment_path = lambda: "openconfig-bfd:bfd" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd, [], name, value) class Interfaces(_Entity_): """ Interfaces on which BFD sessions are to be enabled. .. attribute:: interface Per\-interface configuration and state parameters for BFD **type**\: list of :py:class:`Interface <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces, self).__init__() self.yang_name = "interfaces" self.yang_parent_name = "bfd" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("interface", ("interface", Bfd.Interfaces.Interface))]) self._leafs = OrderedDict() self.interface = YList(self) self._segment_path = lambda: "interfaces" self._absolute_path = lambda: "openconfig-bfd:bfd/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces, [], name, value) class Interface(_Entity_): """ Per\-interface configuration and state parameters for BFD. .. attribute:: id (key) A reference to an identifier for the interface on which BFD is enabled **type**\: str **refers to**\: :py:class:`id <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Config>` .. attribute:: config Configuration parameters for BFD on the specified interface **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Config>` .. attribute:: state Operational state parameters for BFD on the specified interface **type**\: :py:class:`State <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.State>` **config**\: False .. attribute:: interface_ref Reference to an interface or subinterface **type**\: :py:class:`InterfaceRef <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.InterfaceRef>` .. attribute:: micro_bfd_sessions Parameters relating to micro\-BFD sessions associated with the interface **type**\: :py:class:`MicroBfdSessions <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions>` .. attribute:: peers Parameters relating to the BFD peers which are seen over this interface **type**\: :py:class:`Peers <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface, self).__init__() self.yang_name = "interface" self.yang_parent_name = "interfaces" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['id'] self._child_classes = OrderedDict([("config", ("config", Bfd.Interfaces.Interface.Config)), ("state", ("state", Bfd.Interfaces.Interface.State)), ("interface-ref", ("interface_ref", Bfd.Interfaces.Interface.InterfaceRef)), ("micro-bfd-sessions", ("micro_bfd_sessions", Bfd.Interfaces.Interface.MicroBfdSessions)), ("peers", ("peers", Bfd.Interfaces.Interface.Peers))]) self._leafs = OrderedDict([ ('id', (YLeaf(YType.str, 'id'), ['str'])), ]) self.id = None self.config = Bfd.Interfaces.Interface.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = Bfd.Interfaces.Interface.State() self.state.parent = self self._children_name_map["state"] = "state" self.interface_ref = Bfd.Interfaces.Interface.InterfaceRef() self.interface_ref.parent = self self._children_name_map["interface_ref"] = "interface-ref" self.micro_bfd_sessions = Bfd.Interfaces.Interface.MicroBfdSessions() self.micro_bfd_sessions.parent = self self._children_name_map["micro_bfd_sessions"] = "micro-bfd-sessions" self.peers = Bfd.Interfaces.Interface.Peers() self.peers.parent = self self._children_name_map["peers"] = "peers" self._segment_path = lambda: "interface" + "[id='" + str(self.id) + "']" self._absolute_path = lambda: "openconfig-bfd:bfd/interfaces/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface, ['id'], name, value) class Config(_Entity_): """ Configuration parameters for BFD on the specified interface. .. attribute:: id A unique identifier for the interface **type**\: str .. attribute:: enabled When this leaf is set to true then the BFD session is enabled on the specified interface \- if it is set to false, it is administratively disabled **type**\: bool .. attribute:: local_address The source IP address to be used for BFD sessions over this interface **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ .. attribute:: desired_minimum_tx_interval The minimum interval between transmission of BFD control packets that the operator desires. This value is advertised to the peer, however the actual interval used is specified by taking the maximum of desired\-minimum\-tx\-interval and the value of the remote required\-minimum\-receive interval value. This value is specified as an integer number of microseconds **type**\: int **range:** 0..4294967295 **units**\: microseconds .. attribute:: required_minimum_receive The minimum interval between received BFD control packets that this system should support. This value is advertised to the remote peer to indicate the maximum frequency (i.e., minimum inter\-packet interval) between BFD control packets that is acceptable to the local system **type**\: int **range:** 0..4294967295 **units**\: microseconds .. attribute:: detection_multiplier The number of packets that must be missed to declare this session as down. The detection interval for the BFD session is calculated by multiplying the value of the negotiated transmission interval by this value **type**\: int **range:** 1..65535 .. attribute:: enable_per_member_link When this leaf is set to true \- BFD will be enabled on each member interface of the aggregated Ethernet bundle **type**\: bool **default value**\: false """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('id', (YLeaf(YType.str, 'id'), ['str'])), ('enabled', (YLeaf(YType.boolean, 'enabled'), ['bool'])), ('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])), ('desired_minimum_tx_interval', (YLeaf(YType.uint32, 'desired-minimum-tx-interval'), ['int'])), ('required_minimum_receive', (YLeaf(YType.uint32, 'required-minimum-receive'), ['int'])), ('detection_multiplier', (YLeaf(YType.uint16, 'detection-multiplier'), ['int'])), ('enable_per_member_link', (YLeaf(YType.boolean, 'enable-per-member-link'), ['bool'])), ]) self.id = None self.enabled = None self.local_address = None self.desired_minimum_tx_interval = None self.required_minimum_receive = None self.detection_multiplier = None self.enable_per_member_link = None self._segment_path = lambda: "config" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Config, ['id', 'enabled', 'local_address', 'desired_minimum_tx_interval', 'required_minimum_receive', 'detection_multiplier', 'enable_per_member_link'], name, value) class State(_Entity_): """ Operational state parameters for BFD on the specified interface. .. attribute:: id A unique identifier for the interface **type**\: str **config**\: False .. attribute:: enabled When this leaf is set to true then the BFD session is enabled on the specified interface \- if it is set to false, it is administratively disabled **type**\: bool **config**\: False .. attribute:: local_address The source IP address to be used for BFD sessions over this interface **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **config**\: False .. attribute:: desired_minimum_tx_interval The minimum interval between transmission of BFD control packets that the operator desires. This value is advertised to the peer, however the actual interval used is specified by taking the maximum of desired\-minimum\-tx\-interval and the value of the remote required\-minimum\-receive interval value. This value is specified as an integer number of microseconds **type**\: int **range:** 0..4294967295 **config**\: False **units**\: microseconds .. attribute:: required_minimum_receive The minimum interval between received BFD control packets that this system should support. This value is advertised to the remote peer to indicate the maximum frequency (i.e., minimum inter\-packet interval) between BFD control packets that is acceptable to the local system **type**\: int **range:** 0..4294967295 **config**\: False **units**\: microseconds .. attribute:: detection_multiplier The number of packets that must be missed to declare this session as down. The detection interval for the BFD session is calculated by multiplying the value of the negotiated transmission interval by this value **type**\: int **range:** 1..65535 **config**\: False .. attribute:: enable_per_member_link When this leaf is set to true \- BFD will be enabled on each member interface of the aggregated Ethernet bundle **type**\: bool **config**\: False **default value**\: false """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('id', (YLeaf(YType.str, 'id'), ['str'])), ('enabled', (YLeaf(YType.boolean, 'enabled'), ['bool'])), ('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])), ('desired_minimum_tx_interval', (YLeaf(YType.uint32, 'desired-minimum-tx-interval'), ['int'])), ('required_minimum_receive', (YLeaf(YType.uint32, 'required-minimum-receive'), ['int'])), ('detection_multiplier', (YLeaf(YType.uint16, 'detection-multiplier'), ['int'])), ('enable_per_member_link', (YLeaf(YType.boolean, 'enable-per-member-link'), ['bool'])), ]) self.id = None self.enabled = None self.local_address = None self.desired_minimum_tx_interval = None self.required_minimum_receive = None self.detection_multiplier = None self.enable_per_member_link = None self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.State, ['id', 'enabled', 'local_address', 'desired_minimum_tx_interval', 'required_minimum_receive', 'detection_multiplier', 'enable_per_member_link'], name, value) class InterfaceRef(_Entity_): """ Reference to an interface or subinterface .. attribute:: config Configured reference to interface / subinterface **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.InterfaceRef.Config>` .. attribute:: state Operational state for interface\-ref **type**\: :py:class:`State <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.InterfaceRef.State>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.InterfaceRef, self).__init__() self.yang_name = "interface-ref" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("config", ("config", Bfd.Interfaces.Interface.InterfaceRef.Config)), ("state", ("state", Bfd.Interfaces.Interface.InterfaceRef.State))]) self._leafs = OrderedDict() self.config = Bfd.Interfaces.Interface.InterfaceRef.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = Bfd.Interfaces.Interface.InterfaceRef.State() self.state.parent = self self._children_name_map["state"] = "state" self._segment_path = lambda: "interface-ref" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.InterfaceRef, [], name, value) class Config(_Entity_): """ Configured reference to interface / subinterface .. attribute:: interface Reference to a base interface. If a reference to a subinterface is required, this leaf must be specified to indicate the base interface **type**\: str **refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>` .. attribute:: subinterface Reference to a subinterface \-\- this requires the base interface to be specified using the interface leaf in this container. If only a reference to a base interface is requuired, this leaf should not be set **type**\: int **range:** 0..4294967295 **refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Subinterfaces.Subinterface>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.InterfaceRef.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "interface-ref" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('interface', (YLeaf(YType.str, 'interface'), ['str'])), ('subinterface', (YLeaf(YType.str, 'subinterface'), ['int'])), ]) self.interface = None self.subinterface = None self._segment_path = lambda: "config" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.InterfaceRef.Config, ['interface', 'subinterface'], name, value) class State(_Entity_): """ Operational state for interface\-ref .. attribute:: interface Reference to a base interface. If a reference to a subinterface is required, this leaf must be specified to indicate the base interface **type**\: str **refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>` **config**\: False .. attribute:: subinterface Reference to a subinterface \-\- this requires the base interface to be specified using the interface leaf in this container. If only a reference to a base interface is requuired, this leaf should not be set **type**\: int **range:** 0..4294967295 **refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Subinterfaces.Subinterface>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.InterfaceRef.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "interface-ref" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('interface', (YLeaf(YType.str, 'interface'), ['str'])), ('subinterface', (YLeaf(YType.str, 'subinterface'), ['int'])), ]) self.interface = None self.subinterface = None self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.InterfaceRef.State, ['interface', 'subinterface'], name, value) class MicroBfdSessions(_Entity_): """ Parameters relating to micro\-BFD sessions associated with the interface. .. attribute:: micro_bfd_session This list contains configuration and state parameters relating to micro\-BFD session **type**\: list of :py:class:`MicroBfdSession <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.MicroBfdSessions, self).__init__() self.yang_name = "micro-bfd-sessions" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("micro-bfd-session", ("micro_bfd_session", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession))]) self._leafs = OrderedDict() self.micro_bfd_session = YList(self) self._segment_path = lambda: "micro-bfd-sessions" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions, [], name, value) class MicroBfdSession(_Entity_): """ This list contains configuration and state parameters relating to micro\-BFD session. .. attribute:: member_interface (key) A reference to the member interface of the link aggregate **type**\: str **refers to**\: :py:class:`member_interface <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config>` .. attribute:: config Configuration parameters for the micro\-BFD session **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config>` .. attribute:: state Operational state parameters for the micro\-BFD session **type**\: :py:class:`State <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession, self).__init__() self.yang_name = "micro-bfd-session" self.yang_parent_name = "micro-bfd-sessions" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = ['member_interface'] self._child_classes = OrderedDict([("config", ("config", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config)), ("state", ("state", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State))]) self._leafs = OrderedDict([ ('member_interface', (YLeaf(YType.str, 'member-interface'), ['str'])), ]) self.member_interface = None self.config = Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State() self.state.parent = self self._children_name_map["state"] = "state" self._segment_path = lambda: "micro-bfd-session" + "[member-interface='" + str(self.member_interface) + "']" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession, ['member_interface'], name, value) class Config(_Entity_): """ Configuration parameters for the micro\-BFD session. .. attribute:: local_address The local IP address used by the system for the micro\-BFD session specified **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ .. attribute:: remote_address The remote IP destination that should be used by the system for the micro\-BFD session specified **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ .. attribute:: member_interface Reference to a member link of the aggregate interface being described **type**\: str **refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Config>` """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "micro-bfd-session" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])), ('remote_address', (YLeaf(YType.str, 'remote-address'), ['str','str'])), ('member_interface', (YLeaf(YType.str, 'member-interface'), ['str'])), ]) self.local_address = None self.remote_address = None self.member_interface = None self._segment_path = lambda: "config" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config, ['local_address', 'remote_address', 'member_interface'], name, value) class State(_Entity_): """ Operational state parameters for the micro\-BFD session. .. attribute:: local_address The local IP address used by the system for the micro\-BFD session specified **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **config**\: False .. attribute:: remote_address The remote IP destination that should be used by the system for the micro\-BFD session specified **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **config**\: False .. attribute:: member_interface Reference to a member link of the aggregate interface being described **type**\: str **refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Config>` **config**\: False .. attribute:: session_state The state of the BFD session perceived by the local system **type**\: :py:class:`BfdSessionState <ydk.models.openconfig.openconfig_bfd.BfdSessionState>` **config**\: False .. attribute:: remote_session_state The reported state of the BFD session according to the remote system. This state reflects the last state reported in a BFD control packet **type**\: :py:class:`BfdSessionState <ydk.models.openconfig.openconfig_bfd.BfdSessionState>` **config**\: False .. attribute:: last_failure_time The time of the last transition of the BFD session out of the UP state, expressed as the number of nanoseconds since the Unix epoch **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: failure_transitions The number of times that the BFD session has transitioned out of the UP state **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: local_discriminator A unique identifier used by the local system to identify this BFD session **type**\: str **config**\: False .. attribute:: remote_discriminator A unique identified used by the remote system to identify this BFD session **type**\: str **config**\: False .. attribute:: local_diagnostic_code The local BFD diagnostic code indicating the most recent reason for failure of this BFD session **type**\: :py:class:`BfdDiagnosticCode <ydk.models.openconfig.openconfig_bfd.BfdDiagnosticCode>` **config**\: False .. attribute:: remote_diagnostic_code The remote BFD diagnostic code indicating the remote system's reason for failure of the BFD session **type**\: :py:class:`BfdDiagnosticCode <ydk.models.openconfig.openconfig_bfd.BfdDiagnosticCode>` **config**\: False .. attribute:: remote_minimum_receive_interval The value of the minimum receive interval that was specified in the most recent BFD control packet received from the peer **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: demand_mode_requested This leaf is set to true when the remote system has requested demand mode be run for this session **type**\: bool **config**\: False .. attribute:: remote_authentication_enabled This leaf is set to true when the remote system has specified that authentication is present for the BFD session **type**\: bool **config**\: False .. attribute:: remote_control_plane_independent This leaf is set to true when the remote system has specified that the hardware implementing this BFD session is independent of the control plane's liveliness **type**\: bool **config**\: False .. attribute:: async_ Operational state parameters specifically relating to asynchronous mode of BFD **type**\: :py:class:`Async <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State.Async>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "micro-bfd-session" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("async", ("async_", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State.Async))]) self._leafs = OrderedDict([ ('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])), ('remote_address', (YLeaf(YType.str, 'remote-address'), ['str','str'])), ('member_interface', (YLeaf(YType.str, 'member-interface'), ['str'])), ('session_state', (YLeaf(YType.enumeration, 'session-state'), [('ydk.models.openconfig.openconfig_bfd', 'BfdSessionState', '')])), ('remote_session_state', (YLeaf(YType.enumeration, 'remote-session-state'), [('ydk.models.openconfig.openconfig_bfd', 'BfdSessionState', '')])), ('last_failure_time', (YLeaf(YType.uint64, 'last-failure-time'), ['int'])), ('failure_transitions', (YLeaf(YType.uint64, 'failure-transitions'), ['int'])), ('local_discriminator', (YLeaf(YType.str, 'local-discriminator'), ['str'])), ('remote_discriminator', (YLeaf(YType.str, 'remote-discriminator'), ['str'])), ('local_diagnostic_code', (YLeaf(YType.enumeration, 'local-diagnostic-code'), [('ydk.models.openconfig.openconfig_bfd', 'BfdDiagnosticCode', '')])), ('remote_diagnostic_code', (YLeaf(YType.enumeration, 'remote-diagnostic-code'), [('ydk.models.openconfig.openconfig_bfd', 'BfdDiagnosticCode', '')])), ('remote_minimum_receive_interval', (YLeaf(YType.uint32, 'remote-minimum-receive-interval'), ['int'])), ('demand_mode_requested', (YLeaf(YType.boolean, 'demand-mode-requested'), ['bool'])), ('remote_authentication_enabled', (YLeaf(YType.boolean, 'remote-authentication-enabled'), ['bool'])), ('remote_control_plane_independent', (YLeaf(YType.boolean, 'remote-control-plane-independent'), ['bool'])), ]) self.local_address = None self.remote_address = None self.member_interface = None self.session_state = None self.remote_session_state = None self.last_failure_time = None self.failure_transitions = None self.local_discriminator = None self.remote_discriminator = None self.local_diagnostic_code = None self.remote_diagnostic_code = None self.remote_minimum_receive_interval = None self.demand_mode_requested = None self.remote_authentication_enabled = None self.remote_control_plane_independent = None self.async_ = Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State.Async() self.async_.parent = self self._children_name_map["async_"] = "async" self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State, ['local_address', 'remote_address', 'member_interface', 'session_state', 'remote_session_state', 'last_failure_time', 'failure_transitions', 'local_discriminator', 'remote_discriminator', 'local_diagnostic_code', 'remote_diagnostic_code', 'remote_minimum_receive_interval', 'demand_mode_requested', 'remote_authentication_enabled', 'remote_control_plane_independent'], name, value) class Async(_Entity_): """ Operational state parameters specifically relating to asynchronous mode of BFD. .. attribute:: last_packet_transmitted The date and time at which the last BFD packet was transmitted for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: last_packet_received The date and time at which the last BFD packet was received for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: transmitted_packets The number of packets that have been transmitted by the local system **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: received_packets The number of packets that have been received by the local system from the remote neighbour **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: up_transitions The number of times that the adjacency with the neighbor has transitioned into the up state **type**\: int **range:** 0..18446744073709551615 **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State.Async, self).__init__() self.yang_name = "async" self.yang_parent_name = "state" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('last_packet_transmitted', (YLeaf(YType.uint64, 'last-packet-transmitted'), ['int'])), ('last_packet_received', (YLeaf(YType.uint64, 'last-packet-received'), ['int'])), ('transmitted_packets', (YLeaf(YType.uint64, 'transmitted-packets'), ['int'])), ('received_packets', (YLeaf(YType.uint64, 'received-packets'), ['int'])), ('up_transitions', (YLeaf(YType.uint64, 'up-transitions'), ['int'])), ]) self.last_packet_transmitted = None self.last_packet_received = None self.transmitted_packets = None self.received_packets = None self.up_transitions = None self._segment_path = lambda: "async" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State.Async, ['last_packet_transmitted', 'last_packet_received', 'transmitted_packets', 'received_packets', 'up_transitions'], name, value) class Peers(_Entity_): """ Parameters relating to the BFD peers which are seen over this interface. .. attribute:: peer Parameters relating to the BFD peer specified by the remote address **type**\: list of :py:class:`Peer <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers.Peer>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Peers, self).__init__() self.yang_name = "peers" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("peer", ("peer", Bfd.Interfaces.Interface.Peers.Peer))]) self._leafs = OrderedDict() self.peer = YList(self) self._segment_path = lambda: "peers" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Peers, [], name, value) class Peer(_Entity_): """ Parameters relating to the BFD peer specified by the remote address. .. attribute:: local_discriminator (key) The local discriminator, which is unique for the session on the system **type**\: str **refers to**\: :py:class:`local_discriminator <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers.Peer.State>` **config**\: False .. attribute:: state Operational state parameters for the BFD session **type**\: :py:class:`State <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers.Peer.State>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Peers.Peer, self).__init__() self.yang_name = "peer" self.yang_parent_name = "peers" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = ['local_discriminator'] self._child_classes = OrderedDict([("state", ("state", Bfd.Interfaces.Interface.Peers.Peer.State))]) self._leafs = OrderedDict([ ('local_discriminator', (YLeaf(YType.str, 'local-discriminator'), ['str'])), ]) self.local_discriminator = None self.state = Bfd.Interfaces.Interface.Peers.Peer.State() self.state.parent = self self._children_name_map["state"] = "state" self._segment_path = lambda: "peer" + "[local-discriminator='" + str(self.local_discriminator) + "']" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Peers.Peer, ['local_discriminator'], name, value) class State(_Entity_): """ Operational state parameters for the BFD session. .. attribute:: local_address The IP address used by the local system for this BFD session **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **config**\: False .. attribute:: remote_address The IP address used by the remote system for this BFD session **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **config**\: False .. attribute:: subscribed_protocols Indicates the set of protocols that currently use this BFD session for liveliness detection **type**\: list of :py:class:`INSTALLPROTOCOLTYPE <ydk.models.openconfig.openconfig_policy_types.INSTALLPROTOCOLTYPE>` **config**\: False .. attribute:: session_state The state of the BFD session perceived by the local system **type**\: :py:class:`BfdSessionState <ydk.models.openconfig.openconfig_bfd.BfdSessionState>` **config**\: False .. attribute:: remote_session_state The reported state of the BFD session according to the remote system. This state reflects the last state reported in a BFD control packet **type**\: :py:class:`BfdSessionState <ydk.models.openconfig.openconfig_bfd.BfdSessionState>` **config**\: False .. attribute:: last_failure_time The time of the last transition of the BFD session out of the UP state, expressed as the number of nanoseconds since the Unix epoch **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: failure_transitions The number of times that the BFD session has transitioned out of the UP state **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: local_discriminator A unique identifier used by the local system to identify this BFD session **type**\: str **config**\: False .. attribute:: remote_discriminator A unique identified used by the remote system to identify this BFD session **type**\: str **config**\: False .. attribute:: local_diagnostic_code The local BFD diagnostic code indicating the most recent reason for failure of this BFD session **type**\: :py:class:`BfdDiagnosticCode <ydk.models.openconfig.openconfig_bfd.BfdDiagnosticCode>` **config**\: False .. attribute:: remote_diagnostic_code The remote BFD diagnostic code indicating the remote system's reason for failure of the BFD session **type**\: :py:class:`BfdDiagnosticCode <ydk.models.openconfig.openconfig_bfd.BfdDiagnosticCode>` **config**\: False .. attribute:: remote_minimum_receive_interval The value of the minimum receive interval that was specified in the most recent BFD control packet received from the peer **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: demand_mode_requested This leaf is set to true when the remote system has requested demand mode be run for this session **type**\: bool **config**\: False .. attribute:: remote_authentication_enabled This leaf is set to true when the remote system has specified that authentication is present for the BFD session **type**\: bool **config**\: False .. attribute:: remote_control_plane_independent This leaf is set to true when the remote system has specified that the hardware implementing this BFD session is independent of the control plane's liveliness **type**\: bool **config**\: False .. attribute:: echo Operational state parameters specifically relating to the echo mode of BFD **type**\: :py:class:`Echo <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers.Peer.State.Echo>` **config**\: False .. attribute:: async_ Operational state parameters specifically relating to asynchronous mode of BFD **type**\: :py:class:`Async <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.Peers.Peer.State.Async>` **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Peers.Peer.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "peer" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("echo", ("echo", Bfd.Interfaces.Interface.Peers.Peer.State.Echo)), ("async", ("async_", Bfd.Interfaces.Interface.Peers.Peer.State.Async))]) self._leafs = OrderedDict([ ('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])), ('remote_address', (YLeaf(YType.str, 'remote-address'), ['str','str'])), ('subscribed_protocols', (YLeafList(YType.identityref, 'subscribed-protocols'), [('ydk.models.openconfig.openconfig_policy_types', 'INSTALLPROTOCOLTYPE')])), ('session_state', (YLeaf(YType.enumeration, 'session-state'), [('ydk.models.openconfig.openconfig_bfd', 'BfdSessionState', '')])), ('remote_session_state', (YLeaf(YType.enumeration, 'remote-session-state'), [('ydk.models.openconfig.openconfig_bfd', 'BfdSessionState', '')])), ('last_failure_time', (YLeaf(YType.uint64, 'last-failure-time'), ['int'])), ('failure_transitions', (YLeaf(YType.uint64, 'failure-transitions'), ['int'])), ('local_discriminator', (YLeaf(YType.str, 'local-discriminator'), ['str'])), ('remote_discriminator', (YLeaf(YType.str, 'remote-discriminator'), ['str'])), ('local_diagnostic_code', (YLeaf(YType.enumeration, 'local-diagnostic-code'), [('ydk.models.openconfig.openconfig_bfd', 'BfdDiagnosticCode', '')])), ('remote_diagnostic_code', (YLeaf(YType.enumeration, 'remote-diagnostic-code'), [('ydk.models.openconfig.openconfig_bfd', 'BfdDiagnosticCode', '')])), ('remote_minimum_receive_interval', (YLeaf(YType.uint32, 'remote-minimum-receive-interval'), ['int'])), ('demand_mode_requested', (YLeaf(YType.boolean, 'demand-mode-requested'), ['bool'])), ('remote_authentication_enabled', (YLeaf(YType.boolean, 'remote-authentication-enabled'), ['bool'])), ('remote_control_plane_independent', (YLeaf(YType.boolean, 'remote-control-plane-independent'), ['bool'])), ]) self.local_address = None self.remote_address = None self.subscribed_protocols = [] self.session_state = None self.remote_session_state = None self.last_failure_time = None self.failure_transitions = None self.local_discriminator = None self.remote_discriminator = None self.local_diagnostic_code = None self.remote_diagnostic_code = None self.remote_minimum_receive_interval = None self.demand_mode_requested = None self.remote_authentication_enabled = None self.remote_control_plane_independent = None self.echo = Bfd.Interfaces.Interface.Peers.Peer.State.Echo() self.echo.parent = self self._children_name_map["echo"] = "echo" self.async_ = Bfd.Interfaces.Interface.Peers.Peer.State.Async() self.async_.parent = self self._children_name_map["async_"] = "async" self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Peers.Peer.State, ['local_address', 'remote_address', 'subscribed_protocols', 'session_state', 'remote_session_state', 'last_failure_time', 'failure_transitions', 'local_discriminator', 'remote_discriminator', 'local_diagnostic_code', 'remote_diagnostic_code', 'remote_minimum_receive_interval', 'demand_mode_requested', 'remote_authentication_enabled', 'remote_control_plane_independent'], name, value) class Echo(_Entity_): """ Operational state parameters specifically relating to the echo mode of BFD. .. attribute:: active This leaf is set to true when echo mode is running between the local and remote system. When it is set to false, solely asynchronous mode is active **type**\: bool **config**\: False .. attribute:: last_packet_transmitted The date and time at which the last BFD packet was transmitted for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: last_packet_received The date and time at which the last BFD packet was received for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: transmitted_packets The number of packets that have been transmitted by the local system **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: received_packets The number of packets that have been received by the local system from the remote neighbour **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: up_transitions The number of times that the adjacency with the neighbor has transitioned into the up state **type**\: int **range:** 0..18446744073709551615 **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Peers.Peer.State.Echo, self).__init__() self.yang_name = "echo" self.yang_parent_name = "state" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('active', (YLeaf(YType.boolean, 'active'), ['bool'])), ('last_packet_transmitted', (YLeaf(YType.uint64, 'last-packet-transmitted'), ['int'])), ('last_packet_received', (YLeaf(YType.uint64, 'last-packet-received'), ['int'])), ('transmitted_packets', (YLeaf(YType.uint64, 'transmitted-packets'), ['int'])), ('received_packets', (YLeaf(YType.uint64, 'received-packets'), ['int'])), ('up_transitions', (YLeaf(YType.uint64, 'up-transitions'), ['int'])), ]) self.active = None self.last_packet_transmitted = None self.last_packet_received = None self.transmitted_packets = None self.received_packets = None self.up_transitions = None self._segment_path = lambda: "echo" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Peers.Peer.State.Echo, ['active', 'last_packet_transmitted', 'last_packet_received', 'transmitted_packets', 'received_packets', 'up_transitions'], name, value) class Async(_Entity_): """ Operational state parameters specifically relating to asynchronous mode of BFD. .. attribute:: last_packet_transmitted The date and time at which the last BFD packet was transmitted for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: last_packet_received The date and time at which the last BFD packet was received for this session, expressed as the number of nanoseconds since the Unix Epoch (January 1, 1970, 00\:00 UTC) **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: transmitted_packets The number of packets that have been transmitted by the local system **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: received_packets The number of packets that have been received by the local system from the remote neighbour **type**\: int **range:** 0..18446744073709551615 **config**\: False .. attribute:: up_transitions The number of times that the adjacency with the neighbor has transitioned into the up state **type**\: int **range:** 0..18446744073709551615 **config**\: False """ _prefix = 'oc-bfd' _revision = '2018-11-21' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(Bfd.Interfaces.Interface.Peers.Peer.State.Async, self).__init__() self.yang_name = "async" self.yang_parent_name = "state" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('last_packet_transmitted', (YLeaf(YType.uint64, 'last-packet-transmitted'), ['int'])), ('last_packet_received', (YLeaf(YType.uint64, 'last-packet-received'), ['int'])), ('transmitted_packets', (YLeaf(YType.uint64, 'transmitted-packets'), ['int'])), ('received_packets', (YLeaf(YType.uint64, 'received-packets'), ['int'])), ('up_transitions', (YLeaf(YType.uint64, 'up-transitions'), ['int'])), ]) self.last_packet_transmitted = None self.last_packet_received = None self.transmitted_packets = None self.received_packets = None self.up_transitions = None self._segment_path = lambda: "async" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Bfd.Interfaces.Interface.Peers.Peer.State.Async, ['last_packet_transmitted', 'last_packet_received', 'transmitted_packets', 'received_packets', 'up_transitions'], name, value) def clone_ptr(self): self._top_entity = Bfd() return self._top_entity
0.349089
0.323166
import random import pygame import time from pygame import Surface class Dice(Surface): def __init__(self, x, y, width, roll_options): Surface.__init__(self, (width, width)) self.x = x self.y = y self.width = width self.height = width self.roll_options = roll_options # [1, 2, 3, 4, 5, 6] self.last_roll = 1 self.moves_left = 0 self.dots = [] # Array of dots, colors self.dot_logic = { 1: [5], 2: [1, 9], 3: [1, 5, 9], 4: [1, 3, 7, 9], 5: [1, 3, 5, 7, 9], 6: [1, 3, 4, 6, 7, 9], 9: [1, 2, 3, 4, 5, 6, 7, 8, 9] # For testing purposes } self.color = (255, 255, 255) self.fill(self.color) self.last = pygame.time.get_ticks() # time since last cooldown self.cooldown = 300 # time to give between animated rolls (ms) self.count = 10 # amount of times triggered roll animation (10 is max, resets to 0 to trigger animation) def roll(self): self.last_roll = self.roll_options[random.randint(0, 5)] self.moves_left = self.last_roll self.create_dots(self.last_roll) def change_roll_options(self, roll_options): self.roll_options = roll_options def draw(self, screen): screen.blit(self, (self.x, self.y)) def resize(self, x, y, width): self.x = x self.y = y self.width = width self.height = width return Dice(x, y, width, self.roll_options) def create_dots(self, roll): roll_logic = self.dot_logic[roll] self.dots = [] for n in range(9): if n < 3: y = self.height * 1 / 6 elif n < 6: y = self.height * 3 / 6 else: y = self.height * 5 / 6 if n % 3 == 0: x = self.width * 1 / 6 elif n % 3 == 1: x = self.width * 3 / 6 else: x = self.width * 5 / 6 color = (255, 255, 255) for i in roll_logic: if i == n+1: color = (0, 0, 0) n = pygame.draw.circle(self, color, (x, y), self.width / 10) n = {'dot': n, 'color': color} self.dots.append(n) def animate_roll(self): if self.count < 10: if self.get_cooldown() > self.cooldown: self.roll() self.count += 1 self.set_cooldown() def is_rolling(self): return self.count < 10 def trigger_roll(self): self.count = 0 def get_cooldown(self): now = pygame.time.get_ticks() return now - self.last def set_cooldown(self): self.last = pygame.time.get_ticks()
dice.py
import random import pygame import time from pygame import Surface class Dice(Surface): def __init__(self, x, y, width, roll_options): Surface.__init__(self, (width, width)) self.x = x self.y = y self.width = width self.height = width self.roll_options = roll_options # [1, 2, 3, 4, 5, 6] self.last_roll = 1 self.moves_left = 0 self.dots = [] # Array of dots, colors self.dot_logic = { 1: [5], 2: [1, 9], 3: [1, 5, 9], 4: [1, 3, 7, 9], 5: [1, 3, 5, 7, 9], 6: [1, 3, 4, 6, 7, 9], 9: [1, 2, 3, 4, 5, 6, 7, 8, 9] # For testing purposes } self.color = (255, 255, 255) self.fill(self.color) self.last = pygame.time.get_ticks() # time since last cooldown self.cooldown = 300 # time to give between animated rolls (ms) self.count = 10 # amount of times triggered roll animation (10 is max, resets to 0 to trigger animation) def roll(self): self.last_roll = self.roll_options[random.randint(0, 5)] self.moves_left = self.last_roll self.create_dots(self.last_roll) def change_roll_options(self, roll_options): self.roll_options = roll_options def draw(self, screen): screen.blit(self, (self.x, self.y)) def resize(self, x, y, width): self.x = x self.y = y self.width = width self.height = width return Dice(x, y, width, self.roll_options) def create_dots(self, roll): roll_logic = self.dot_logic[roll] self.dots = [] for n in range(9): if n < 3: y = self.height * 1 / 6 elif n < 6: y = self.height * 3 / 6 else: y = self.height * 5 / 6 if n % 3 == 0: x = self.width * 1 / 6 elif n % 3 == 1: x = self.width * 3 / 6 else: x = self.width * 5 / 6 color = (255, 255, 255) for i in roll_logic: if i == n+1: color = (0, 0, 0) n = pygame.draw.circle(self, color, (x, y), self.width / 10) n = {'dot': n, 'color': color} self.dots.append(n) def animate_roll(self): if self.count < 10: if self.get_cooldown() > self.cooldown: self.roll() self.count += 1 self.set_cooldown() def is_rolling(self): return self.count < 10 def trigger_roll(self): self.count = 0 def get_cooldown(self): now = pygame.time.get_ticks() return now - self.last def set_cooldown(self): self.last = pygame.time.get_ticks()
0.47658
0.316079
import logging import functools from flask import request, abort from flask import _request_ctx_stack as stack from werkzeug import cached_property import datetime import json from ..utility import create_response, decode_base64, json_serial log = logging.getLogger('flask_oauth2-devices') class OAuth2DevicesProvider(object): """ Provide secure services for devices using OAuth2. There are two usage modes. One is binding the Flask app instance:: app = Flask(__name__) oauth = OAuth2DevicesProvider(app) The second possibility is to bind the Flask app later:: oauth = OAuth2DevicesProvider() def create_app(): app = Flask(__name__) oauth.init_app(app) return app """ def __init__(self, app=None): self._before_request_funcs = [] self._after_request_funcs = [] self._invalid_response = None if app: self.init_app(app) def init_app(self, app): """ This callback can be used to initialize an application for the oauth2 provider instance. """ self.app = app app.extensions = getattr(app, 'extensions', {}) app.extensions['oauth2devices.provider.oauth2devices'] = self @cached_property def error_uri(self): """The error page URI. """ error_uri = self.app.config.get('OAUTH2_DEVICES_PROVIDER_ERROR_URI') if error_uri: return error_uri error_endpoint = \ self.app.config.get('OAUTH2_DEVICES_PROVIDER_ERROR_ENDPOINT') if error_endpoint: return url_for(error_endpoint) return '/oauth/errors' def invalid_response(self, f): """Register a function for responsing with invalid request. When an invalid request proceeds to :meth:`require_oauth`, we can handle the request with the registered function. The function accepts one parameter, which is an oauthlib Request object:: @oauth.invalid_response def invalid_require_oauth(req): return jsonify(message=req.error_message), 401 If no function is registered, it will return with ``abort(401)``. """ self._invalid_response = f return f def clientgetter(self, f): """Register a function as the client getter. The function accepts one parameter `client_id`, and it returns a client object with at least these information: - client_id: A random string - client_secret: A random string - client_type: A string represents if it is `confidential` - redirect_uris: A list of redirect uris Implement the client getter: @oauth.clientgetter def get_client(client_id): client = get_client_model(client_id) # Client is an object return client """ self._clientgetter = f return f def authcodesetter(self, f): """Register a function to save the auth code. The setter accepts five parameters, a least - code: our auth_code, if none we will generate one - client_id: the client we want to create a new auth_code for - user_id: the user we want to create a new auth_code for Implement the auth_code setter: @oauth.authcodesetter def save_auth_code(code, client_id, user_id, *args, **kwargs) auth_code_model.save_code(code, client, user_id) """ self._authcodesetter = f return f def authcodegetter(self, f): """ Register a function as the client getter. The function accepts one parameter `code`, and it returns a code object. Implement the auth code getter:: @oauth.authcodegetter def load_auth_code(code): code = get_code_model(code) # Code is an object return code """ self._authcodegetter = f return f def code_handler(self, authorize_link, activate_link, expires_interval, polling_internal): """ Code handler decorator The device requests an auth_code as part of (A) For example, the client makes the following HTTP request using transport-only security (with extra line breaks for display purposes only): POST /oauth/device HTTP/1.1 Host: server.example.com Content-Type: application/x-www-form-urlencoded The authorization server MUST authenticate the client. """ def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): ctx = stack.top if ctx is not None and hasattr(ctx, 'request'): request = ctx.request if request.method != 'POST': log.warn('Attempted a non-post on the code_handler') return create_response({'Allow': 'POST'}, 'must use POST', 405) app = self.getApp(request) if app is None: raise OAuth2Exception( 'Invalid application credentials', type='unauthorized_client' ) auth_code = self._authcodesetter(None, app.client_id, app.user_id) return self.create_oauth2_code_response(auth_code, authorize_link, activate_link, expires_interval, polling_internal) return f(*args, **kwargs) return wrapper return decorator def authorize_handler(self): """ Authorize handler decorator The device uses the auth_code and device code it recieved from (A) and attempts to exchange it for an access token. For example, the client makes the following HTTP request using transport-layer security (with extra line breaks for display purposes only): POST /oauth/device/authorize HTTP/1.1 Host: server.example.com Content-Type: application/x-www-form-urlencoded { "auth_code": "<PASSWORD>" "device_code: "c8fe9de9e6c5f80bc543c492aaa2fbaf2b081601" } """ def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): ctx = stack.top if ctx is not None and hasattr(ctx, 'request'): request = ctx.request if request.method != 'POST': log.warn('Attempted a non-post on the code_handler') return create_response({'Allow': 'POST'}, 'must use POST', 405) data = request.values auth_code = self._authcodegetter(data.get('auth_code')) device_code = data.get('device_code') if device_code is None and auth_code is None: return create_response({}, 'Accepted', 202) if auth_code is None: raise OAuth2Exception( 'This token could not be found', type='invalid_token' ) if auth_code.expires is None \ and auth_code.expires < datetime.utcnow(): raise OAuth2Exception( 'Authorization code has expired', type='invalid_token' ) if auth_code.is_active == 0: raise OAuth2Exception( 'The user has rejected this connection', type='rejected_connection' ) if auth_code.get_device_code() != device_code: raise OAuth2Exception( 'Your user code does not match the device', type='invalid_token' ) access_token = \ auth_code.exchange_for_access_token(auth_code) return self.create_oauth2_token_response(access_token) return f(*args, **kwargs) return wrapper return decorator def _verify_request(self, scopes): """ verify recieved oauth2 data """ if request.method == 'POST': return False uri = request.base_url if request.query_string: uri += '?' + request.query_string.decode('utf-8') data = request.form.to_dict() headers = dict(request.headers) if ['oauth_version', 'oauth_nonce', 'oauth_timestamp\ ', 'user' 'client'] not in data.keys(): return False return True def require_oauth(self, *scopes): """Protect resource with specified scopes.""" def wrapper(f): @functools.wraps(f) def decorated(*args, **kwargs): if hasattr(request, 'oauth') and request.oauth: return f(*args, **kwargs) if self._verify_request(scopes): if self._invalid_response: return self._invalid_response(request) return abort(401) request.oauth = req return f(*args, **kwargs) return decorated return wrapper def create_oauth2_code_response(self, auth_code, authorize_link=None, activate_link=None, expires_interval=0, polling_interval=0): """ The authorization server issues an device code which the device will have prompt the user to authorize before following the activate link to exchange for a access token. The following parameters are added to the entity-body of the HTTP response with a 200 (OK) status code: device_code REQUIRED. The device code generated on the fly for each device. user_code REQUIRED. The auth code issued by the authorization server. authorize_link REQUIRED. The link where auth code can be exchanged for access token. activate_link REQUIRED. The link where auth code can be activated via user consent flow. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. interval REQUIRED. The recommended polling interval. For example: HTTP/1.1 200 OK Content-Type: application/json;charset=UTF-8 Cache-Control: no-store Pragma: no-cache { "device_code": "73de035b2a7bdcb2c092f4bdfe292898e0657a18", "user_code": "656e6075", "authorize_link": "https://api.example.com/oauth/device/authorize", "activate_link": "https://example.com/activate", "expires_in": 3600, "interval": 15 } """ response = create_response({ 'Content-Type': 'application/json', 'Cache-Control': 'no-store', 'Pragma': 'no-cache'}, json.dumps({ 'device_code': auth_code.get_device_code(), 'user_code ': auth_code.code, 'authorize_link': authorize_link, 'activate_link': activate_link, 'expires_in': expires_interval, 'interval': polling_interval}), 200) return response def create_oauth2_token_response(self, access_token): """ The authorization server issues an access token and optional refresh token, and constructs the response by adding the following parameters to the entity-body of the HTTP response with a 200 (OK) status code: access_token REQUIRED. The access token issued by the authorization server. token_type REQUIRED. The type of the token issued as described in Section 7.1. Value is case insensitive. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. refresh_token OPTIONAL. The refresh token, which can be used to obtain new access tokens using the same authorization grant as described in Section 6. scope OPTIONAL, if identical to the scope requested by the client; otherwise, REQUIRED. The scope of the access token as described by Section 3.3. The parameters are included in the entity-body of the HTTP response using the "application/json" media type as defined by [RFC4627]. The parameters are serialized into a JavaScript Object Notation (JSON) structure by adding each parameter at the highest structure level. Parameter names and string values are included as JSON strings. Numerical values are included as JSON numbers. The order of parameters does not matter and can vary. The authorization server MUST include the HTTP "Cache-Control" response header field [RFC2616] with a value of "no-store" in any response containing tokens, credentials, or other sensitive information, as well as the "Pragma" response header field [RFC2616] with a value of "no-cache". For example: HTTP/1.1 200 OK Content-Type: application/json;charset=UTF-8 Cache-Control: no-store Pragma: no-cache { "access_token":"<KEY>", "token_type":"example", "scope":"public private", "expires_in":3600, "refresh_token":"<PASSWORD>" } The client MUST ignore unrecognized value names in the response. The sizes of tokens and other values received from the authorization server are left undefined. The client should avoid making assumptions about value sizes. The authorization server SHOULD document the size of any value it issues. http://tools.ietf.org/html/rfc6749#section-5.1 """ response = create_response({ 'Content-Type': 'application/json', 'Cache-Control': 'no-store', 'Pragma': 'no-cache'}, json.dumps({ 'access_token': access_token.access_token, 'token_type ': access_token.token_type, 'scope': access_token.scopes, 'expires_in': json.dumps(access_token.expires, default=json_serial).replace("\"", "\ "), 'refresh_token': None}), 200) return response def getApp(self, request): # http://tools.ietf.org/html/rfc2617#section-2 client_id = None client_secret = None if "Authorization" in request.headers: auth_header = request.headers['Authorization'] if "basic" in auth_header: auth = decode_base64(auth_header[6:]).split(':') client_id = auth[0] client_secret = auth[1] if client_id is None: raise OAuth2Exception( 'A valid client ID must be provided along with request made', type='invalid_client' ) app = self._clientgetter(client_id) if app is None: raise OAuth2Exception( 'A valid client ID must be provided along with request made', type='invalid_client' ) if client_secret is not None and client_secret == app.client_secret: return app raise OAuth2Exception( 'A valid client secret must be provided along with request made', type='invalid_secret' ) class OAuth2Exception(Exception): """ Class for handling API Excpetions and Errors """ status_code = 400 def __init__(self, message, status_code=None, payload=None): Exception.__init__(self) self.message = message if status_code is not None: self.status_code = status_code self.payload = payload def to_dict(self): rv = dict(self.payload or ()) rv['message'] = self.message return rv
devices/provider/devices.py
import logging import functools from flask import request, abort from flask import _request_ctx_stack as stack from werkzeug import cached_property import datetime import json from ..utility import create_response, decode_base64, json_serial log = logging.getLogger('flask_oauth2-devices') class OAuth2DevicesProvider(object): """ Provide secure services for devices using OAuth2. There are two usage modes. One is binding the Flask app instance:: app = Flask(__name__) oauth = OAuth2DevicesProvider(app) The second possibility is to bind the Flask app later:: oauth = OAuth2DevicesProvider() def create_app(): app = Flask(__name__) oauth.init_app(app) return app """ def __init__(self, app=None): self._before_request_funcs = [] self._after_request_funcs = [] self._invalid_response = None if app: self.init_app(app) def init_app(self, app): """ This callback can be used to initialize an application for the oauth2 provider instance. """ self.app = app app.extensions = getattr(app, 'extensions', {}) app.extensions['oauth2devices.provider.oauth2devices'] = self @cached_property def error_uri(self): """The error page URI. """ error_uri = self.app.config.get('OAUTH2_DEVICES_PROVIDER_ERROR_URI') if error_uri: return error_uri error_endpoint = \ self.app.config.get('OAUTH2_DEVICES_PROVIDER_ERROR_ENDPOINT') if error_endpoint: return url_for(error_endpoint) return '/oauth/errors' def invalid_response(self, f): """Register a function for responsing with invalid request. When an invalid request proceeds to :meth:`require_oauth`, we can handle the request with the registered function. The function accepts one parameter, which is an oauthlib Request object:: @oauth.invalid_response def invalid_require_oauth(req): return jsonify(message=req.error_message), 401 If no function is registered, it will return with ``abort(401)``. """ self._invalid_response = f return f def clientgetter(self, f): """Register a function as the client getter. The function accepts one parameter `client_id`, and it returns a client object with at least these information: - client_id: A random string - client_secret: A random string - client_type: A string represents if it is `confidential` - redirect_uris: A list of redirect uris Implement the client getter: @oauth.clientgetter def get_client(client_id): client = get_client_model(client_id) # Client is an object return client """ self._clientgetter = f return f def authcodesetter(self, f): """Register a function to save the auth code. The setter accepts five parameters, a least - code: our auth_code, if none we will generate one - client_id: the client we want to create a new auth_code for - user_id: the user we want to create a new auth_code for Implement the auth_code setter: @oauth.authcodesetter def save_auth_code(code, client_id, user_id, *args, **kwargs) auth_code_model.save_code(code, client, user_id) """ self._authcodesetter = f return f def authcodegetter(self, f): """ Register a function as the client getter. The function accepts one parameter `code`, and it returns a code object. Implement the auth code getter:: @oauth.authcodegetter def load_auth_code(code): code = get_code_model(code) # Code is an object return code """ self._authcodegetter = f return f def code_handler(self, authorize_link, activate_link, expires_interval, polling_internal): """ Code handler decorator The device requests an auth_code as part of (A) For example, the client makes the following HTTP request using transport-only security (with extra line breaks for display purposes only): POST /oauth/device HTTP/1.1 Host: server.example.com Content-Type: application/x-www-form-urlencoded The authorization server MUST authenticate the client. """ def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): ctx = stack.top if ctx is not None and hasattr(ctx, 'request'): request = ctx.request if request.method != 'POST': log.warn('Attempted a non-post on the code_handler') return create_response({'Allow': 'POST'}, 'must use POST', 405) app = self.getApp(request) if app is None: raise OAuth2Exception( 'Invalid application credentials', type='unauthorized_client' ) auth_code = self._authcodesetter(None, app.client_id, app.user_id) return self.create_oauth2_code_response(auth_code, authorize_link, activate_link, expires_interval, polling_internal) return f(*args, **kwargs) return wrapper return decorator def authorize_handler(self): """ Authorize handler decorator The device uses the auth_code and device code it recieved from (A) and attempts to exchange it for an access token. For example, the client makes the following HTTP request using transport-layer security (with extra line breaks for display purposes only): POST /oauth/device/authorize HTTP/1.1 Host: server.example.com Content-Type: application/x-www-form-urlencoded { "auth_code": "<PASSWORD>" "device_code: "c8fe9de9e6c5f80bc543c492aaa2fbaf2b081601" } """ def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): ctx = stack.top if ctx is not None and hasattr(ctx, 'request'): request = ctx.request if request.method != 'POST': log.warn('Attempted a non-post on the code_handler') return create_response({'Allow': 'POST'}, 'must use POST', 405) data = request.values auth_code = self._authcodegetter(data.get('auth_code')) device_code = data.get('device_code') if device_code is None and auth_code is None: return create_response({}, 'Accepted', 202) if auth_code is None: raise OAuth2Exception( 'This token could not be found', type='invalid_token' ) if auth_code.expires is None \ and auth_code.expires < datetime.utcnow(): raise OAuth2Exception( 'Authorization code has expired', type='invalid_token' ) if auth_code.is_active == 0: raise OAuth2Exception( 'The user has rejected this connection', type='rejected_connection' ) if auth_code.get_device_code() != device_code: raise OAuth2Exception( 'Your user code does not match the device', type='invalid_token' ) access_token = \ auth_code.exchange_for_access_token(auth_code) return self.create_oauth2_token_response(access_token) return f(*args, **kwargs) return wrapper return decorator def _verify_request(self, scopes): """ verify recieved oauth2 data """ if request.method == 'POST': return False uri = request.base_url if request.query_string: uri += '?' + request.query_string.decode('utf-8') data = request.form.to_dict() headers = dict(request.headers) if ['oauth_version', 'oauth_nonce', 'oauth_timestamp\ ', 'user' 'client'] not in data.keys(): return False return True def require_oauth(self, *scopes): """Protect resource with specified scopes.""" def wrapper(f): @functools.wraps(f) def decorated(*args, **kwargs): if hasattr(request, 'oauth') and request.oauth: return f(*args, **kwargs) if self._verify_request(scopes): if self._invalid_response: return self._invalid_response(request) return abort(401) request.oauth = req return f(*args, **kwargs) return decorated return wrapper def create_oauth2_code_response(self, auth_code, authorize_link=None, activate_link=None, expires_interval=0, polling_interval=0): """ The authorization server issues an device code which the device will have prompt the user to authorize before following the activate link to exchange for a access token. The following parameters are added to the entity-body of the HTTP response with a 200 (OK) status code: device_code REQUIRED. The device code generated on the fly for each device. user_code REQUIRED. The auth code issued by the authorization server. authorize_link REQUIRED. The link where auth code can be exchanged for access token. activate_link REQUIRED. The link where auth code can be activated via user consent flow. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. interval REQUIRED. The recommended polling interval. For example: HTTP/1.1 200 OK Content-Type: application/json;charset=UTF-8 Cache-Control: no-store Pragma: no-cache { "device_code": "73de035b2a7bdcb2c092f4bdfe292898e0657a18", "user_code": "656e6075", "authorize_link": "https://api.example.com/oauth/device/authorize", "activate_link": "https://example.com/activate", "expires_in": 3600, "interval": 15 } """ response = create_response({ 'Content-Type': 'application/json', 'Cache-Control': 'no-store', 'Pragma': 'no-cache'}, json.dumps({ 'device_code': auth_code.get_device_code(), 'user_code ': auth_code.code, 'authorize_link': authorize_link, 'activate_link': activate_link, 'expires_in': expires_interval, 'interval': polling_interval}), 200) return response def create_oauth2_token_response(self, access_token): """ The authorization server issues an access token and optional refresh token, and constructs the response by adding the following parameters to the entity-body of the HTTP response with a 200 (OK) status code: access_token REQUIRED. The access token issued by the authorization server. token_type REQUIRED. The type of the token issued as described in Section 7.1. Value is case insensitive. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. refresh_token OPTIONAL. The refresh token, which can be used to obtain new access tokens using the same authorization grant as described in Section 6. scope OPTIONAL, if identical to the scope requested by the client; otherwise, REQUIRED. The scope of the access token as described by Section 3.3. The parameters are included in the entity-body of the HTTP response using the "application/json" media type as defined by [RFC4627]. The parameters are serialized into a JavaScript Object Notation (JSON) structure by adding each parameter at the highest structure level. Parameter names and string values are included as JSON strings. Numerical values are included as JSON numbers. The order of parameters does not matter and can vary. The authorization server MUST include the HTTP "Cache-Control" response header field [RFC2616] with a value of "no-store" in any response containing tokens, credentials, or other sensitive information, as well as the "Pragma" response header field [RFC2616] with a value of "no-cache". For example: HTTP/1.1 200 OK Content-Type: application/json;charset=UTF-8 Cache-Control: no-store Pragma: no-cache { "access_token":"<KEY>", "token_type":"example", "scope":"public private", "expires_in":3600, "refresh_token":"<PASSWORD>" } The client MUST ignore unrecognized value names in the response. The sizes of tokens and other values received from the authorization server are left undefined. The client should avoid making assumptions about value sizes. The authorization server SHOULD document the size of any value it issues. http://tools.ietf.org/html/rfc6749#section-5.1 """ response = create_response({ 'Content-Type': 'application/json', 'Cache-Control': 'no-store', 'Pragma': 'no-cache'}, json.dumps({ 'access_token': access_token.access_token, 'token_type ': access_token.token_type, 'scope': access_token.scopes, 'expires_in': json.dumps(access_token.expires, default=json_serial).replace("\"", "\ "), 'refresh_token': None}), 200) return response def getApp(self, request): # http://tools.ietf.org/html/rfc2617#section-2 client_id = None client_secret = None if "Authorization" in request.headers: auth_header = request.headers['Authorization'] if "basic" in auth_header: auth = decode_base64(auth_header[6:]).split(':') client_id = auth[0] client_secret = auth[1] if client_id is None: raise OAuth2Exception( 'A valid client ID must be provided along with request made', type='invalid_client' ) app = self._clientgetter(client_id) if app is None: raise OAuth2Exception( 'A valid client ID must be provided along with request made', type='invalid_client' ) if client_secret is not None and client_secret == app.client_secret: return app raise OAuth2Exception( 'A valid client secret must be provided along with request made', type='invalid_secret' ) class OAuth2Exception(Exception): """ Class for handling API Excpetions and Errors """ status_code = 400 def __init__(self, message, status_code=None, payload=None): Exception.__init__(self) self.message = message if status_code is not None: self.status_code = status_code self.payload = payload def to_dict(self): rv = dict(self.payload or ()) rv['message'] = self.message return rv
0.711531
0.087252
import os import sys import time sys.path.append(os.getcwd()+'/CPDP') sys.path.append(os.getcwd()+'/JinEnv') sys.path.append(os.getcwd()+'/lib') import CPDP import JinEnv from casadi import * from scipy.integrate import solve_ivp import scipy.io as sio # ---------------------------------------load environment--------------------------------------- env = JinEnv.SinglePendulum() env.initDyn(l=1, m=1, damping_ratio=0.1) env.initCost(wu=.01) # ---------------------------create optimal control object ---------------------------------------- oc = CPDP.COCSys() beta = SX.sym('beta') dyn = beta*env.f oc.setAuxvarVariable(vertcat(beta,env.cost_auxvar)) oc.setStateVariable(env.X) oc.setControlVariable(env.U) oc.setDyn(dyn) path_cost = beta * env.path_cost oc.setPathCost(path_cost) oc.setFinalCost(env.final_cost) # set initial condition ini_state = [0.0, 0.0] # ---------------------- define the loss function and interface function ------------------ # define the interface (only for the state) interface_fn = Function('interface', [oc.state], [oc.state[0]]) diff_interface_fn = Function('diff_interface', [oc.state], [jacobian(oc.state[0], oc.state)]) def getloss_corrections(time_grid, waypoints, opt_sol, auxsys_sol): loss = 0 diff_loss = numpy.zeros(oc.n_auxvar) for k,t in enumerate(time_grid): # solve loss waypoint = waypoints[k,:] measure = interface_fn(opt_sol(t)[0:oc.n_state]).full().flatten() loss += numpy.linalg.norm(waypoint - measure) ** 2 # solve gradient by chain rule dl_dy = measure-waypoint dy_dx = diff_interface_fn(opt_sol(t)[0:oc.n_state]).full() dx_dp = auxsys_sol(t)[0:oc.n_state * oc.n_auxvar].reshape((oc.n_state, oc.n_auxvar)) dl_dp = np.matmul(numpy.matmul(dl_dy,dy_dx),dx_dp) diff_loss += dl_dp return loss, diff_loss # --------------------------- create waypoints using ground truth ---------------------------------------- T = 1 true_parameter = [2, 1, 1] true_time_grid, true_opt_sol = oc.cocSolver(ini_state, T, true_parameter) # env.play_animation(len=1, dt=true_time_grid[1] - true_time_grid[0], state_traj=true_opt_sol(true_time_grid)[:, 0:oc.n_state]) time_tau = true_time_grid[[1, 3, 6, 7, 9]] waypoints = np.zeros((time_tau.size, interface_fn.numel_out())) for k, t in enumerate(time_tau): waypoints[k,:] = interface_fn(true_opt_sol(t)[0:oc.n_state]).full().flatten() # --------------------------- learning process -------------------------------- lr = 1e-2 loss_trace, parameter_trace = [], [] current_parameter = np.array([1, 0.5, 1.5]) parameter_trace += [current_parameter.tolist()] for j in range(int(100)): # initial guess of trajectory based on initial parameters time_grid, opt_sol = oc.cocSolver(ini_state, T, current_parameter) # # Establish the auxiliary control system auxsys_sol = oc.auxSysSolver(time_grid, opt_sol, current_parameter) # Use the chain rule loss, diff_loss = getloss_corrections(time_tau, waypoints, opt_sol, auxsys_sol) # update current_parameter -= lr * diff_loss current_parameter[0] = fmax(current_parameter[0], 0.00000001) # projection loss_trace += [loss] parameter_trace += [current_parameter.tolist()] # print print('iter:', j, 'loss:', loss_trace[-1].tolist()) # save the results save_data = {'parameter_trace': parameter_trace, 'loss_trace': loss_trace, 'learning_rate': lr, 'true_parameter':true_parameter, 'waypoints':waypoints, 'time_grid':time_tau, 'T':T} # sio.savemat('../data/pendulum_results_2.mat', {'results': save_data})
Examples/pendulum_groundtruth.py
import os import sys import time sys.path.append(os.getcwd()+'/CPDP') sys.path.append(os.getcwd()+'/JinEnv') sys.path.append(os.getcwd()+'/lib') import CPDP import JinEnv from casadi import * from scipy.integrate import solve_ivp import scipy.io as sio # ---------------------------------------load environment--------------------------------------- env = JinEnv.SinglePendulum() env.initDyn(l=1, m=1, damping_ratio=0.1) env.initCost(wu=.01) # ---------------------------create optimal control object ---------------------------------------- oc = CPDP.COCSys() beta = SX.sym('beta') dyn = beta*env.f oc.setAuxvarVariable(vertcat(beta,env.cost_auxvar)) oc.setStateVariable(env.X) oc.setControlVariable(env.U) oc.setDyn(dyn) path_cost = beta * env.path_cost oc.setPathCost(path_cost) oc.setFinalCost(env.final_cost) # set initial condition ini_state = [0.0, 0.0] # ---------------------- define the loss function and interface function ------------------ # define the interface (only for the state) interface_fn = Function('interface', [oc.state], [oc.state[0]]) diff_interface_fn = Function('diff_interface', [oc.state], [jacobian(oc.state[0], oc.state)]) def getloss_corrections(time_grid, waypoints, opt_sol, auxsys_sol): loss = 0 diff_loss = numpy.zeros(oc.n_auxvar) for k,t in enumerate(time_grid): # solve loss waypoint = waypoints[k,:] measure = interface_fn(opt_sol(t)[0:oc.n_state]).full().flatten() loss += numpy.linalg.norm(waypoint - measure) ** 2 # solve gradient by chain rule dl_dy = measure-waypoint dy_dx = diff_interface_fn(opt_sol(t)[0:oc.n_state]).full() dx_dp = auxsys_sol(t)[0:oc.n_state * oc.n_auxvar].reshape((oc.n_state, oc.n_auxvar)) dl_dp = np.matmul(numpy.matmul(dl_dy,dy_dx),dx_dp) diff_loss += dl_dp return loss, diff_loss # --------------------------- create waypoints using ground truth ---------------------------------------- T = 1 true_parameter = [2, 1, 1] true_time_grid, true_opt_sol = oc.cocSolver(ini_state, T, true_parameter) # env.play_animation(len=1, dt=true_time_grid[1] - true_time_grid[0], state_traj=true_opt_sol(true_time_grid)[:, 0:oc.n_state]) time_tau = true_time_grid[[1, 3, 6, 7, 9]] waypoints = np.zeros((time_tau.size, interface_fn.numel_out())) for k, t in enumerate(time_tau): waypoints[k,:] = interface_fn(true_opt_sol(t)[0:oc.n_state]).full().flatten() # --------------------------- learning process -------------------------------- lr = 1e-2 loss_trace, parameter_trace = [], [] current_parameter = np.array([1, 0.5, 1.5]) parameter_trace += [current_parameter.tolist()] for j in range(int(100)): # initial guess of trajectory based on initial parameters time_grid, opt_sol = oc.cocSolver(ini_state, T, current_parameter) # # Establish the auxiliary control system auxsys_sol = oc.auxSysSolver(time_grid, opt_sol, current_parameter) # Use the chain rule loss, diff_loss = getloss_corrections(time_tau, waypoints, opt_sol, auxsys_sol) # update current_parameter -= lr * diff_loss current_parameter[0] = fmax(current_parameter[0], 0.00000001) # projection loss_trace += [loss] parameter_trace += [current_parameter.tolist()] # print print('iter:', j, 'loss:', loss_trace[-1].tolist()) # save the results save_data = {'parameter_trace': parameter_trace, 'loss_trace': loss_trace, 'learning_rate': lr, 'true_parameter':true_parameter, 'waypoints':waypoints, 'time_grid':time_tau, 'T':T} # sio.savemat('../data/pendulum_results_2.mat', {'results': save_data})
0.402275
0.264833
from enum import Enum from storey import Flow from storey.dtypes import _termination_obj class EmitPeriod(Enum): FIRST = 1 LAST = 2 class SampleWindow(Flow): """ Emits a single event in a window of `window_size` events, in accordance with `emit_period` and `emit_before_termination`. :param window_size: The size of the window we want to sample a single event from. :param emit_period: What event should this step emit for each `window_size` (default: EmitPeriod.First). Available options: 1.1) EmitPeriod.FIRST - will emit the first event in a window `window_size` events. 1.2) EmitPeriod.LAST - will emit the last event in a window of `window_size` events. :param emit_before_termination: On termination signal, should the step emit the last event it seen (default: False). Available options: 2.1) True - The last event seen will be emitted downstream. 2.2) False - The last event seen will NOT be emitted downstream. """ def __init__( self, window_size: int, emit_period: EmitPeriod = EmitPeriod.FIRST, emit_before_termination: bool = False, **kwargs, ): super().__init__(full_event=True, **kwargs) if window_size <= 1: raise ValueError(f"Expected window_size > 1, found {window_size}") if not isinstance(emit_period, EmitPeriod): raise ValueError(f"Expected emit_period of type `EmitPeriod`, got {type(emit_period)}") self._window_size = window_size self._emit_period = emit_period self._emit_before_termination = emit_before_termination self._count = 0 self._last_event = None async def _do(self, event): if event is _termination_obj: if self._last_event is not None: await self._do_downstream(self._last_event) return await self._do_downstream(_termination_obj) else: self._count += 1 if self._emit_before_termination: self._last_event = event if self._should_emit(): self._last_event = None await self._do_downstream(event) if self._count == self._window_size: self._count = 0 def _should_emit(self): if self._emit_period == EmitPeriod.FIRST and self._count == 1: return True elif self._emit_period == EmitPeriod.LAST and self._count == self._window_size: return True return False
storey/steps/sample.py
from enum import Enum from storey import Flow from storey.dtypes import _termination_obj class EmitPeriod(Enum): FIRST = 1 LAST = 2 class SampleWindow(Flow): """ Emits a single event in a window of `window_size` events, in accordance with `emit_period` and `emit_before_termination`. :param window_size: The size of the window we want to sample a single event from. :param emit_period: What event should this step emit for each `window_size` (default: EmitPeriod.First). Available options: 1.1) EmitPeriod.FIRST - will emit the first event in a window `window_size` events. 1.2) EmitPeriod.LAST - will emit the last event in a window of `window_size` events. :param emit_before_termination: On termination signal, should the step emit the last event it seen (default: False). Available options: 2.1) True - The last event seen will be emitted downstream. 2.2) False - The last event seen will NOT be emitted downstream. """ def __init__( self, window_size: int, emit_period: EmitPeriod = EmitPeriod.FIRST, emit_before_termination: bool = False, **kwargs, ): super().__init__(full_event=True, **kwargs) if window_size <= 1: raise ValueError(f"Expected window_size > 1, found {window_size}") if not isinstance(emit_period, EmitPeriod): raise ValueError(f"Expected emit_period of type `EmitPeriod`, got {type(emit_period)}") self._window_size = window_size self._emit_period = emit_period self._emit_before_termination = emit_before_termination self._count = 0 self._last_event = None async def _do(self, event): if event is _termination_obj: if self._last_event is not None: await self._do_downstream(self._last_event) return await self._do_downstream(_termination_obj) else: self._count += 1 if self._emit_before_termination: self._last_event = event if self._should_emit(): self._last_event = None await self._do_downstream(event) if self._count == self._window_size: self._count = 0 def _should_emit(self): if self._emit_period == EmitPeriod.FIRST and self._count == 1: return True elif self._emit_period == EmitPeriod.LAST and self._count == self._window_size: return True return False
0.919462
0.528473
from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Aggregated1', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('index_day', models.IntegerField()), ('index_method', models.IntegerField()), ('key_n_days', models.IntegerField()), ('key_window_size', models.IntegerField()), ('key_threshold1', models.FloatField()), ('key_threshold2', models.FloatField()), ('TP', models.IntegerField()), ('TN', models.IntegerField()), ('FP', models.IntegerField()), ('FN', models.IntegerField()), ], ), migrations.CreateModel( name='Aggregated2', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('index_method', models.IntegerField()), ('key_n_days', models.IntegerField()), ('key_window_size', models.IntegerField()), ('key_threshold1', models.FloatField()), ('key_threshold2', models.FloatField()), ('TP', models.IntegerField()), ('TN', models.IntegerField()), ('FP', models.IntegerField()), ('FN', models.IntegerField()), ], ), migrations.AddIndex( model_name='aggregated2', index=models.Index(fields=['index_method', 'key_n_days', 'key_window_size', 'key_threshold1', 'key_threshold2'], name='analysis_ag_index_m_ccbaa0_idx'), ), migrations.AddIndex( model_name='aggregated1', index=models.Index(fields=['index_day', 'index_method', 'key_n_days', 'key_window_size', 'key_threshold1', 'key_threshold2'], name='analysis_ag_index_d_d4dba9_idx'), ), ]
django_src/analysis/migrations/0001_initial.py
from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Aggregated1', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('index_day', models.IntegerField()), ('index_method', models.IntegerField()), ('key_n_days', models.IntegerField()), ('key_window_size', models.IntegerField()), ('key_threshold1', models.FloatField()), ('key_threshold2', models.FloatField()), ('TP', models.IntegerField()), ('TN', models.IntegerField()), ('FP', models.IntegerField()), ('FN', models.IntegerField()), ], ), migrations.CreateModel( name='Aggregated2', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('index_method', models.IntegerField()), ('key_n_days', models.IntegerField()), ('key_window_size', models.IntegerField()), ('key_threshold1', models.FloatField()), ('key_threshold2', models.FloatField()), ('TP', models.IntegerField()), ('TN', models.IntegerField()), ('FP', models.IntegerField()), ('FN', models.IntegerField()), ], ), migrations.AddIndex( model_name='aggregated2', index=models.Index(fields=['index_method', 'key_n_days', 'key_window_size', 'key_threshold1', 'key_threshold2'], name='analysis_ag_index_m_ccbaa0_idx'), ), migrations.AddIndex( model_name='aggregated1', index=models.Index(fields=['index_day', 'index_method', 'key_n_days', 'key_window_size', 'key_threshold1', 'key_threshold2'], name='analysis_ag_index_d_d4dba9_idx'), ), ]
0.681833
0.233029
import numpy as np import os import random from agent import Agent import tensorflow as tf tf.compat.v1.disable_eager_execution() from tensorflow.keras.layers import Dense, Input, Activation from tensorflow.keras import Model from tensorflow.keras.optimizers import Adam from tqdm import trange from utils.logger import Logger from utils.visualizer import visualize class DQN(Agent): def __init__(self, config, env): self.gamma = config["gamma"] # reward discount self.learning_rate = config["learning_rate"] self.memory_size = config["memory_size"] self.epsilon = config["epsilon"] # Exploration rate self.epsilon_min = config["epsilon_min"] self.epsilon_decay = config["epsilon_decay"] self.batch_size = config["batch_size"] self.update_frequency = config["update_frequency"] self.num_actions = env.action_space.n self.num_states = env.observation_space.shape[0] self.max_episode = config["max_episode"] self.max_step = config["max_step"] self.render_environment = config["render_environment"] self.result_path = config["result_path"] self.memory = [] self.env = env self.build_agent() self.logger = Logger(config["slide_window"]) def build_agent(self): input = Input(shape=(self.num_states,)) layer = Dense(24, activation='relu')(input) layer = Dense(self.num_actions)(layer) output = Activation('linear')(layer) model = Model(input, output) adam = Adam(lr=self.learning_rate) model.compile(loss='mse', optimizer=adam) return model # Save <s, a ,r, s'> of each step def store_memory(self, state, action, reward, next_state, done): if len(self.memory) > self.memory_size: self.memory.pop(0) self.memory.append([state, action, reward, next_state, done]) def train(self): # Initialize q_network and target_q_network q_network = self.build_agent() target_q_network = self.build_agent() target_q_network.set_weights(q_network.get_weights()) # Populate memory first state = self.env.reset() print("Warming up...") while len(self.memory) < self.batch_size: action = self.env.action_space.sample() next_state, reward, done, info = self.env.step(action) self.store_memory(state, action, reward, next_state, done) if done: state = self.env.reset() print("Warm up complete.") t = trange(self.max_episode) for episode_count in t: state = self.env.reset() current_step = 0 episode_reward = 0 while True: if self.render_environment: self.env.render() # Network predict q_values = q_network.predict(np.reshape(state, (1, self.num_states))).ravel() # Decide if exploring or not if np.random.rand() >= self.epsilon: action = np.argmax(q_values) else: action = random.randrange(self.num_actions) # Perform action next_state, reward, done, info = self.env.step(action) # Store transition episode_reward += reward self.store_memory(state, action, reward, next_state, done) # Decrease exploration if self.epsilon > self.epsilon_min: self.epsilon *= self.epsilon_decay # Sample minibatch from memory minibatch = random.sample(self.memory, self.batch_size) # Transform the minibatch for processing minibatch = list(zip(*minibatch)) # Calculate all td_targets for current minibatch states, actions, rewards, next_states, dones = minibatch batch_q_values = q_network.predict_on_batch(np.array(states)) batch_next_q_values = target_q_network.predict_on_batch(np.array(next_states)) max_next_q_values = np.max(batch_next_q_values, axis=1) td_targets = batch_q_values.copy() for i in range(self.batch_size): td_targets[i][actions[i]] = rewards[i] + self.gamma * (1 - dones[i]) * max_next_q_values[i] # Train network q_network.train_on_batch(np.array(states), np.array(td_targets)) # Copy q_network to target_q_network if done or current_step % self.update_frequency is 0: target_q_network.set_weights(q_network.get_weights()) # For logging and visualizing data if done or current_step > self.max_step: self.logger.log_history(episode_reward, episode_count) self.logger.show_progress(t, episode_reward, episode_count) if episode_count % self.logger.slide_window == 0: visualize(self.logger.rewards, self.logger.running_rewards, self.logger.episode_counts, os.path.join(self.result_path, "DQN.png")) break state = next_state current_step += 1 if __name__ == '__main__': agent = DQN() agent.train()
agents/DQN.py
import numpy as np import os import random from agent import Agent import tensorflow as tf tf.compat.v1.disable_eager_execution() from tensorflow.keras.layers import Dense, Input, Activation from tensorflow.keras import Model from tensorflow.keras.optimizers import Adam from tqdm import trange from utils.logger import Logger from utils.visualizer import visualize class DQN(Agent): def __init__(self, config, env): self.gamma = config["gamma"] # reward discount self.learning_rate = config["learning_rate"] self.memory_size = config["memory_size"] self.epsilon = config["epsilon"] # Exploration rate self.epsilon_min = config["epsilon_min"] self.epsilon_decay = config["epsilon_decay"] self.batch_size = config["batch_size"] self.update_frequency = config["update_frequency"] self.num_actions = env.action_space.n self.num_states = env.observation_space.shape[0] self.max_episode = config["max_episode"] self.max_step = config["max_step"] self.render_environment = config["render_environment"] self.result_path = config["result_path"] self.memory = [] self.env = env self.build_agent() self.logger = Logger(config["slide_window"]) def build_agent(self): input = Input(shape=(self.num_states,)) layer = Dense(24, activation='relu')(input) layer = Dense(self.num_actions)(layer) output = Activation('linear')(layer) model = Model(input, output) adam = Adam(lr=self.learning_rate) model.compile(loss='mse', optimizer=adam) return model # Save <s, a ,r, s'> of each step def store_memory(self, state, action, reward, next_state, done): if len(self.memory) > self.memory_size: self.memory.pop(0) self.memory.append([state, action, reward, next_state, done]) def train(self): # Initialize q_network and target_q_network q_network = self.build_agent() target_q_network = self.build_agent() target_q_network.set_weights(q_network.get_weights()) # Populate memory first state = self.env.reset() print("Warming up...") while len(self.memory) < self.batch_size: action = self.env.action_space.sample() next_state, reward, done, info = self.env.step(action) self.store_memory(state, action, reward, next_state, done) if done: state = self.env.reset() print("Warm up complete.") t = trange(self.max_episode) for episode_count in t: state = self.env.reset() current_step = 0 episode_reward = 0 while True: if self.render_environment: self.env.render() # Network predict q_values = q_network.predict(np.reshape(state, (1, self.num_states))).ravel() # Decide if exploring or not if np.random.rand() >= self.epsilon: action = np.argmax(q_values) else: action = random.randrange(self.num_actions) # Perform action next_state, reward, done, info = self.env.step(action) # Store transition episode_reward += reward self.store_memory(state, action, reward, next_state, done) # Decrease exploration if self.epsilon > self.epsilon_min: self.epsilon *= self.epsilon_decay # Sample minibatch from memory minibatch = random.sample(self.memory, self.batch_size) # Transform the minibatch for processing minibatch = list(zip(*minibatch)) # Calculate all td_targets for current minibatch states, actions, rewards, next_states, dones = minibatch batch_q_values = q_network.predict_on_batch(np.array(states)) batch_next_q_values = target_q_network.predict_on_batch(np.array(next_states)) max_next_q_values = np.max(batch_next_q_values, axis=1) td_targets = batch_q_values.copy() for i in range(self.batch_size): td_targets[i][actions[i]] = rewards[i] + self.gamma * (1 - dones[i]) * max_next_q_values[i] # Train network q_network.train_on_batch(np.array(states), np.array(td_targets)) # Copy q_network to target_q_network if done or current_step % self.update_frequency is 0: target_q_network.set_weights(q_network.get_weights()) # For logging and visualizing data if done or current_step > self.max_step: self.logger.log_history(episode_reward, episode_count) self.logger.show_progress(t, episode_reward, episode_count) if episode_count % self.logger.slide_window == 0: visualize(self.logger.rewards, self.logger.running_rewards, self.logger.episode_counts, os.path.join(self.result_path, "DQN.png")) break state = next_state current_step += 1 if __name__ == '__main__': agent = DQN() agent.train()
0.801781
0.301683
import subprocess from pathlib import Path from typing import List, Optional, Sequence import shutil import os from flacmirror.options import Options # We need this so that the child processes do not catch the signals ... def preexec_function(): os.setpgrp() def check_requirements(options: Options) -> bool: print("Checking program requirements:") requirements: List[Process] = [] if options.albumart in ["resize", "optimize"]: requirements.append(ImageMagick(False)) if options.codec == "vorbis": requirements.append(Oggenc(None, False)) if options.albumart != "discard": requirements.append(VorbisComment(False)) elif options.codec == "opus": requirements.append(Opusenc(None, False)) if options.codec != "discard" or ( options.codec == "vorbis" and options.albumart == "keep" ): requirements.append(Metaflac(False)) fulfilled = True for req in requirements: print(f" {req.executable_status()}") if not req.available(): fulfilled = False print(f" {req.executable_info()}") return fulfilled class Process: def __init__(self, executable: str, debug: bool = False): self.executable = executable self.debug = debug def available(self): return shutil.which(self.executable) is not None def executable_status(self) -> str: available = "\033[92m" + "availble" + "\033[0m" unavailable = "\033[91m" + "unavailble" + "\033[0m" status = available if self.available() else unavailable message = f"{self.executable} ({shutil.which(self.executable)}) [{status}]" return message def executable_info(self) -> str: return "" def print_debug_info(self, args: List[str]): if self.debug: print(f"Calling process: {args}") class FFMPEG(Process): def __init__(self, debug: bool): super().__init__("ffmpeg", debug) def executable_info(self): return 'Can be found on most distros as a package "ffmpeg" ' def extract_picture(self, file: Path) -> bytes: # exctract coverart as jpeg and read it in args = [ self.executable, "loglevel", "panic", "-i", str(file), "-an", "-c:v", "copy", "-f", "mjpeg", "-", ] self.print_debug_info(args) results = subprocess.run( args, capture_output=True, check=True, preexec_fn=preexec_function, ) return results.stdout class Metaflac(Process): def __init__(self, debug: bool): super().__init__("metaflac", debug) def executable_info(self): return 'Part of the package "flac" on most distros' def extract_picture(self, file: Path) -> Optional[bytes]: # exctract coverart as jpeg and read it in args = [ self.executable, str(file), "--export-picture-to", "-", ] self.print_debug_info(args) try: results = subprocess.run( args, capture_output=True, check=True, preexec_fn=preexec_function, ) except subprocess.CalledProcessError as e: if b"FLAC file has no PICTURE block" in e.stderr: return None else: raise e from None return results.stdout class ImageMagick(Process): def __init__(self, debug: bool): super().__init__("convert", debug) def executable_info(self): return 'Part of the package "imagemagick" on most distros' def optimize_picture(self, data: bytes) -> bytes: args = [ self.executable, "-", "-strip", "-interlace", "Plane", "-sampling-factor", "4:2:0", "-colorspace", "sRGB", "-quality", "85%", "jpeg:-", ] self.print_debug_info(args) results = subprocess.run( args, capture_output=True, check=True, input=data, preexec_fn=preexec_function, ) return results.stdout def optimize_and_resize_picture(self, data: bytes, max_width: int) -> bytes: args = [ self.executable, "-", "-strip", "-interlace", "Plane", "-sampling-factor", "4:2:0", "-colorspace", "sRGB", "-resize", f"{max_width}>", "-quality", "85%", "jpeg:-", ] self.print_debug_info(args) results = subprocess.run( args, capture_output=True, check=True, input=data, preexec_fn=preexec_function, ) return results.stdout class Opusenc(Process): def __init__(self, quality: Optional[float], debug: bool): super().__init__("opusenc", debug) self.additional_args: List[str] = [] if quality is not None: self.additional_args.extend(["--bitrate", f"{quality}"]) def executable_info(self): return 'Part of the package "opus-tools" on most distros' def encode( self, input_f: Path, output_f: Path, discard_pictures: bool = False, picture_paths: Optional[Sequence[Path]] = None, ): args = [ self.executable, *self.additional_args, str(input_f), str(output_f), ] if discard_pictures: args.extend(["--discard-pictures"]) if picture_paths is not None: for picture in picture_paths: args.extend(["--picture", f"||||{str(picture)}"]) self.print_debug_info(args) subprocess.run( args, capture_output=True, check=True, preexec_fn=preexec_function ) class Oggenc(Process): def __init__(self, quality: Optional[int], debug: bool): super().__init__("oggenc", debug) self.additional_args: List[str] = [] if quality is not None: self.additional_args.extend(["--quality", f"{quality}"]) def executable_info(self): return 'Part of the package "vorbis-tools" on most distros' def encode( self, input_f: Path, output_f: Path, ): args = [ self.executable, *self.additional_args, str(input_f), "-o", str(output_f), ] self.print_debug_info(args) subprocess.run( args, capture_output=True, check=True, preexec_fn=preexec_function ) class VorbisComment(Process): def __init__(self, debug: bool): super().__init__("vorbiscomment", debug) def executable_info(self): return 'Part of the package "vorbis-tools" on most distros' def add_comment(self, file: Path, key: str, value: str): args = [self.executable, str(file), "-R", "-a"] self.print_debug_info(args) subprocess.run( args, capture_output=True, check=True, input=f"{key}={value}".encode(), preexec_fn=preexec_function, )
flacmirror/processes.py
import subprocess from pathlib import Path from typing import List, Optional, Sequence import shutil import os from flacmirror.options import Options # We need this so that the child processes do not catch the signals ... def preexec_function(): os.setpgrp() def check_requirements(options: Options) -> bool: print("Checking program requirements:") requirements: List[Process] = [] if options.albumart in ["resize", "optimize"]: requirements.append(ImageMagick(False)) if options.codec == "vorbis": requirements.append(Oggenc(None, False)) if options.albumart != "discard": requirements.append(VorbisComment(False)) elif options.codec == "opus": requirements.append(Opusenc(None, False)) if options.codec != "discard" or ( options.codec == "vorbis" and options.albumart == "keep" ): requirements.append(Metaflac(False)) fulfilled = True for req in requirements: print(f" {req.executable_status()}") if not req.available(): fulfilled = False print(f" {req.executable_info()}") return fulfilled class Process: def __init__(self, executable: str, debug: bool = False): self.executable = executable self.debug = debug def available(self): return shutil.which(self.executable) is not None def executable_status(self) -> str: available = "\033[92m" + "availble" + "\033[0m" unavailable = "\033[91m" + "unavailble" + "\033[0m" status = available if self.available() else unavailable message = f"{self.executable} ({shutil.which(self.executable)}) [{status}]" return message def executable_info(self) -> str: return "" def print_debug_info(self, args: List[str]): if self.debug: print(f"Calling process: {args}") class FFMPEG(Process): def __init__(self, debug: bool): super().__init__("ffmpeg", debug) def executable_info(self): return 'Can be found on most distros as a package "ffmpeg" ' def extract_picture(self, file: Path) -> bytes: # exctract coverart as jpeg and read it in args = [ self.executable, "loglevel", "panic", "-i", str(file), "-an", "-c:v", "copy", "-f", "mjpeg", "-", ] self.print_debug_info(args) results = subprocess.run( args, capture_output=True, check=True, preexec_fn=preexec_function, ) return results.stdout class Metaflac(Process): def __init__(self, debug: bool): super().__init__("metaflac", debug) def executable_info(self): return 'Part of the package "flac" on most distros' def extract_picture(self, file: Path) -> Optional[bytes]: # exctract coverart as jpeg and read it in args = [ self.executable, str(file), "--export-picture-to", "-", ] self.print_debug_info(args) try: results = subprocess.run( args, capture_output=True, check=True, preexec_fn=preexec_function, ) except subprocess.CalledProcessError as e: if b"FLAC file has no PICTURE block" in e.stderr: return None else: raise e from None return results.stdout class ImageMagick(Process): def __init__(self, debug: bool): super().__init__("convert", debug) def executable_info(self): return 'Part of the package "imagemagick" on most distros' def optimize_picture(self, data: bytes) -> bytes: args = [ self.executable, "-", "-strip", "-interlace", "Plane", "-sampling-factor", "4:2:0", "-colorspace", "sRGB", "-quality", "85%", "jpeg:-", ] self.print_debug_info(args) results = subprocess.run( args, capture_output=True, check=True, input=data, preexec_fn=preexec_function, ) return results.stdout def optimize_and_resize_picture(self, data: bytes, max_width: int) -> bytes: args = [ self.executable, "-", "-strip", "-interlace", "Plane", "-sampling-factor", "4:2:0", "-colorspace", "sRGB", "-resize", f"{max_width}>", "-quality", "85%", "jpeg:-", ] self.print_debug_info(args) results = subprocess.run( args, capture_output=True, check=True, input=data, preexec_fn=preexec_function, ) return results.stdout class Opusenc(Process): def __init__(self, quality: Optional[float], debug: bool): super().__init__("opusenc", debug) self.additional_args: List[str] = [] if quality is not None: self.additional_args.extend(["--bitrate", f"{quality}"]) def executable_info(self): return 'Part of the package "opus-tools" on most distros' def encode( self, input_f: Path, output_f: Path, discard_pictures: bool = False, picture_paths: Optional[Sequence[Path]] = None, ): args = [ self.executable, *self.additional_args, str(input_f), str(output_f), ] if discard_pictures: args.extend(["--discard-pictures"]) if picture_paths is not None: for picture in picture_paths: args.extend(["--picture", f"||||{str(picture)}"]) self.print_debug_info(args) subprocess.run( args, capture_output=True, check=True, preexec_fn=preexec_function ) class Oggenc(Process): def __init__(self, quality: Optional[int], debug: bool): super().__init__("oggenc", debug) self.additional_args: List[str] = [] if quality is not None: self.additional_args.extend(["--quality", f"{quality}"]) def executable_info(self): return 'Part of the package "vorbis-tools" on most distros' def encode( self, input_f: Path, output_f: Path, ): args = [ self.executable, *self.additional_args, str(input_f), "-o", str(output_f), ] self.print_debug_info(args) subprocess.run( args, capture_output=True, check=True, preexec_fn=preexec_function ) class VorbisComment(Process): def __init__(self, debug: bool): super().__init__("vorbiscomment", debug) def executable_info(self): return 'Part of the package "vorbis-tools" on most distros' def add_comment(self, file: Path, key: str, value: str): args = [self.executable, str(file), "-R", "-a"] self.print_debug_info(args) subprocess.run( args, capture_output=True, check=True, input=f"{key}={value}".encode(), preexec_fn=preexec_function, )
0.738198
0.206914
from nose.tools import eq_ import mock from django.conf import settings from django.contrib.auth.models import User, Group, Permission from funfactory.urlresolvers import reverse from airmozilla.main.models import UserProfile, Event, CuratedGroup from airmozilla.base.tests.test_mozillians import Response, IN_GROUPS from .base import ManageTestCase class TestPermissions(ManageTestCase): def test_unauthorized(self): """ Client with no log in - should be rejected. """ self.client.logout() response = self.client.get(reverse('manage:dashboard')) self.assertRedirects(response, settings.LOGIN_URL + '?next=' + reverse('manage:dashboard')) def test_not_staff(self): """ User is not staff - should be rejected. """ self.user.is_staff = False self.user.save() response = self.client.get(reverse('manage:dashboard')) self.assertRedirects(response, settings.LOGIN_URL + '?next=' + reverse('manage:dashboard')) def test_staff_home(self): """ User is staff - should get an OK homepage. """ response = self.client.get(reverse('manage:dashboard')) eq_(response.status_code, 200) @mock.patch('requests.get') def test_editing_events_with_curated_groups(self, rget): def mocked_get(url, **options): if 'peterbe' in url: print return Response(IN_GROUPS) raise NotImplementedError(url) rget.side_effect = mocked_get self.client.logout() assert self.client.get(reverse('manage:dashboard')).status_code == 302 # now log in as a contributor contributor = User.objects.create_user( 'peter', '<EMAIL>', '<PASSWORD>' ) producers = Group.objects.create(name='Producer') change_event_permission = Permission.objects.get( codename='change_event' ) change_event_others_permission = Permission.objects.get( codename='change_event_others' ) producers.permissions.add(change_event_permission) producers.permissions.add(change_event_others_permission) contributor.groups.add(producers) contributor.is_staff = True contributor.save() UserProfile.objects.create( user=contributor, contributor=True ) assert self.client.login(username='peter', password='<PASSWORD>') event = Event.objects.get(title='Test event') assert event.privacy == Event.PRIVACY_PUBLIC url = reverse('manage:event_edit', args=(event.id,)) response = self.client.get(url) eq_(response.status_code, 200) # the contributor producer can't view it if it's private event.privacy = Event.PRIVACY_COMPANY event.save() response = self.client.get(url) eq_(response.status_code, 302) # but it's ok if it's for contributors event.privacy = Event.PRIVACY_CONTRIBUTORS event.save() response = self.client.get(url) eq_(response.status_code, 200) # but not if the event is only open to certain curated groups curated_group = CuratedGroup.objects.create( event=event, name='badasses' ) response = self.client.get(url) eq_(response.status_code, 302) curated_group.delete() CuratedGroup.objects.create( event=event, name='swedes' ) response = self.client.get(url) eq_(response.status_code, 200)
airmozilla/manage/tests/views/test_permissions.py
from nose.tools import eq_ import mock from django.conf import settings from django.contrib.auth.models import User, Group, Permission from funfactory.urlresolvers import reverse from airmozilla.main.models import UserProfile, Event, CuratedGroup from airmozilla.base.tests.test_mozillians import Response, IN_GROUPS from .base import ManageTestCase class TestPermissions(ManageTestCase): def test_unauthorized(self): """ Client with no log in - should be rejected. """ self.client.logout() response = self.client.get(reverse('manage:dashboard')) self.assertRedirects(response, settings.LOGIN_URL + '?next=' + reverse('manage:dashboard')) def test_not_staff(self): """ User is not staff - should be rejected. """ self.user.is_staff = False self.user.save() response = self.client.get(reverse('manage:dashboard')) self.assertRedirects(response, settings.LOGIN_URL + '?next=' + reverse('manage:dashboard')) def test_staff_home(self): """ User is staff - should get an OK homepage. """ response = self.client.get(reverse('manage:dashboard')) eq_(response.status_code, 200) @mock.patch('requests.get') def test_editing_events_with_curated_groups(self, rget): def mocked_get(url, **options): if 'peterbe' in url: print return Response(IN_GROUPS) raise NotImplementedError(url) rget.side_effect = mocked_get self.client.logout() assert self.client.get(reverse('manage:dashboard')).status_code == 302 # now log in as a contributor contributor = User.objects.create_user( 'peter', '<EMAIL>', '<PASSWORD>' ) producers = Group.objects.create(name='Producer') change_event_permission = Permission.objects.get( codename='change_event' ) change_event_others_permission = Permission.objects.get( codename='change_event_others' ) producers.permissions.add(change_event_permission) producers.permissions.add(change_event_others_permission) contributor.groups.add(producers) contributor.is_staff = True contributor.save() UserProfile.objects.create( user=contributor, contributor=True ) assert self.client.login(username='peter', password='<PASSWORD>') event = Event.objects.get(title='Test event') assert event.privacy == Event.PRIVACY_PUBLIC url = reverse('manage:event_edit', args=(event.id,)) response = self.client.get(url) eq_(response.status_code, 200) # the contributor producer can't view it if it's private event.privacy = Event.PRIVACY_COMPANY event.save() response = self.client.get(url) eq_(response.status_code, 302) # but it's ok if it's for contributors event.privacy = Event.PRIVACY_CONTRIBUTORS event.save() response = self.client.get(url) eq_(response.status_code, 200) # but not if the event is only open to certain curated groups curated_group = CuratedGroup.objects.create( event=event, name='badasses' ) response = self.client.get(url) eq_(response.status_code, 302) curated_group.delete() CuratedGroup.objects.create( event=event, name='swedes' ) response = self.client.get(url) eq_(response.status_code, 200)
0.508056
0.154823
import time import pprint import numpy as np from pytorch_lightning import Callback class TerminalCallback(Callback): """ Terminal callback for terminal logging and metric saving. """ def __init__(self, display=None, output_every_epoch=1, id=None): """ Initialize with keys to listen to and output frequency. """ self.display = display self.data = {} self.durations = list() self.output_every_epoch = output_every_epoch self.id = id self.epoch_start_time = None def on_train_epoch_start(self, trainer, pl_module): """ Save starting time on epoch start. """ self.epoch_start_time = time.time() def on_epoch_end(self, trainer, pl_module): """ Calculate time, log/save metric on epoch end. """ epoch = pl_module.current_epoch + 1 logs = trainer.callback_metrics for key in logs.keys(): if key not in self.data: self.data[key] = list() self.data[key].append((epoch, logs[key].cpu())) def on_validation_epoch_end(self, trainer, pl_module): """ Print training process information """ epoch = pl_module.current_epoch + 1 max_epochs = trainer.max_epochs if self.epoch_start_time is None: duration = -1 else: duration = time.time() - self.epoch_start_time self.durations.append(duration) if self.output_every_epoch >= 1 and (epoch - 1) % self.output_every_epoch == 0: if self.id is None: output = f'Epoch {epoch:4}/{max_epochs} ' else: output = f'[{self.id:06}] Epoch {epoch:4}/{max_epochs} ' # set all keys if display is None if self.display is None: keys_to_display = self.data.keys() else: keys_to_display = self.display for key in keys_to_display: data = self.data[key][-1] _, metric = data output += ' ' + key + f' {metric:.4f}' output += f' Duration {duration:.2f} s' print(output) def on_train_end(self, trainer, pl_module): """ Calculate best metric values and their epoch. """ metric = {} for key in self.data.keys(): epochs = [epoch for epoch, _ in self.data[key]] values = [value for _, value in self.data[key]] min_idx = np.argmin(values, axis=0) metric[f'opt/epoch/{key}'] = epochs[min_idx] metric[f'opt/{key}'] = values[min_idx] self.opt_metric = metric.copy() metric['avg_duration'] = np.mean(self.durations) pprint.pprint(metric) print('\n')
src/pytorch_lightning/callback.py
import time import pprint import numpy as np from pytorch_lightning import Callback class TerminalCallback(Callback): """ Terminal callback for terminal logging and metric saving. """ def __init__(self, display=None, output_every_epoch=1, id=None): """ Initialize with keys to listen to and output frequency. """ self.display = display self.data = {} self.durations = list() self.output_every_epoch = output_every_epoch self.id = id self.epoch_start_time = None def on_train_epoch_start(self, trainer, pl_module): """ Save starting time on epoch start. """ self.epoch_start_time = time.time() def on_epoch_end(self, trainer, pl_module): """ Calculate time, log/save metric on epoch end. """ epoch = pl_module.current_epoch + 1 logs = trainer.callback_metrics for key in logs.keys(): if key not in self.data: self.data[key] = list() self.data[key].append((epoch, logs[key].cpu())) def on_validation_epoch_end(self, trainer, pl_module): """ Print training process information """ epoch = pl_module.current_epoch + 1 max_epochs = trainer.max_epochs if self.epoch_start_time is None: duration = -1 else: duration = time.time() - self.epoch_start_time self.durations.append(duration) if self.output_every_epoch >= 1 and (epoch - 1) % self.output_every_epoch == 0: if self.id is None: output = f'Epoch {epoch:4}/{max_epochs} ' else: output = f'[{self.id:06}] Epoch {epoch:4}/{max_epochs} ' # set all keys if display is None if self.display is None: keys_to_display = self.data.keys() else: keys_to_display = self.display for key in keys_to_display: data = self.data[key][-1] _, metric = data output += ' ' + key + f' {metric:.4f}' output += f' Duration {duration:.2f} s' print(output) def on_train_end(self, trainer, pl_module): """ Calculate best metric values and their epoch. """ metric = {} for key in self.data.keys(): epochs = [epoch for epoch, _ in self.data[key]] values = [value for _, value in self.data[key]] min_idx = np.argmin(values, axis=0) metric[f'opt/epoch/{key}'] = epochs[min_idx] metric[f'opt/{key}'] = values[min_idx] self.opt_metric = metric.copy() metric['avg_duration'] = np.mean(self.durations) pprint.pprint(metric) print('\n')
0.560493
0.271113
import shutil from itertools import product import math import torch import numpy as np import matplotlib.pyplot as plt import matplotlib from diameter_learning.settings import TEST_OUTPUT_PATH from diameter_learning.nets.layers import ( CenterOfMass2DExtractor, GaussianRadiusExtractor, VanillaDiameterExtractor, MomentGaussianRadiusExtractor ) matplotlib.use('Agg') def generate_ellipse_segmentation( shape: tuple, center_of_mass: tuple, ellipse_axis: tuple ): """Generate an ellipse shaped segmentation :param shape: Shape the 2D image :param center_of_mass: Coordinates of the center of mass :param ellipse_axis: ellipse parameters (a, b) in equation `\\left(x/a\\right)^2 + \\left(y/b\\right)^2 = 1` """ # Generate array of indices arr = torch.arange( shape[0] * shape[1] ).reshape( shape[0], shape[1] ) # Transform array of indices into a segmentation segmentation = ( ( (arr // shape[1] - center_of_mass[0]) / ellipse_axis[0] ) ** 2 + ( (arr % shape[1] - center_of_mass[1]) / ellipse_axis[1] ) ** 2 ) < 1 return segmentation def generate_batch_of_segmentation( shape=(64, 64, 16), batch_size=3, number_of_features=2 ): """Generate batch of cylindric segmentation :param shape: 3D shape of the image :param batch_size: Batch size :param number_of_features: number_of_features """ seg_batch = torch.zeros(batch_size, number_of_features, *shape) for nb, nf, nz in product( range(batch_size), range(number_of_features), range(shape[-1]) ): seg_batch[nb, nf, :, :, nz] = generate_ellipse_segmentation( shape[:2], (20 + shape[2] + 3 * nb, 20 + nz), (5 * (nf + 1), 6 * (nf + 1)) ) return seg_batch def test_center_of_mass_2d_extractor_forward(): """Test CenterOfMass2DExtractor forward""" # Clean test output folder shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) TEST_OUTPUT_PATH.mkdir(exist_ok=True) # Generate variables seg_batch = generate_batch_of_segmentation().cpu() center_of_mass_extractor = CenterOfMass2DExtractor() # Test differentiability torch.manual_seed(0) center_of_mass = center_of_mass_extractor.forward( torch.nn.Sigmoid()( torch.nn.Conv3d(2, 2, kernel_size=1, padding=0, dilation=1)( seg_batch.cpu() ) ) ) loss = torch.nn.MSELoss()( center_of_mass.real, torch.rand(center_of_mass.shape) ) loss.backward() assert (loss - 962) ** 2 <= 1 # Test result center_of_mass = center_of_mass_extractor.forward( seg_batch ) assert center_of_mass.shape == (3, 2, 16) # Assess visual results for nb, nf in product( range(seg_batch.shape[0]), range(seg_batch.shape[1]) ): plt.clf() plt.imshow(seg_batch[nb, nf, :, :, 0]) plt.savefig( TEST_OUTPUT_PATH / f'batch_{nb}_feature_{nf}_without_dots.png' ) plt.plot( center_of_mass[nb, nf, 0].real, center_of_mass[nb, nf, 0].imag, 'ro' ) plt.savefig(TEST_OUTPUT_PATH / f'batch_{nb}_feature_{nf}.png') def test_gaussian_radius_extractor_get_radiuses(): """test GaussianRadiusExtractor get_radiuses""" shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) TEST_OUTPUT_PATH.mkdir(exist_ok=True) # Generate radiuses seg_batch = generate_batch_of_segmentation().cpu() gaussian_radius_extractor = GaussianRadiusExtractor( nb_radiuses=24 ) center_of_mass_extractor = CenterOfMass2DExtractor() # Index array index_array = torch.arange( seg_batch.shape[2] * seg_batch.shape[3] ).reshape( seg_batch.shape[2], seg_batch.shape[3] ) x_indices = index_array % seg_batch.shape[3] y_indices = index_array // seg_batch.shape[3] for angle in [-math.pi*11/12, math.pi/3, math.pi*11/12]: filters = gaussian_radius_extractor.get_filter( center_of_mass_extractor(seg_batch), torch.complex(x_indices.float(), y_indices.float()), angle ) assert filters.shape == (3, 2, 64, 64, 16) # Assess visual results for nb, nf in product( range(seg_batch.shape[0]), range(seg_batch.shape[1]) ): plt.clf() plt.imshow(filters[nb, nf, :, :, 0]) plt.savefig( TEST_OUTPUT_PATH / f'batch_{nb}_feature_{nf}_angle_{angle.__round__(2)}.png' ) def test_gaussian_radius_extractor_forward(): """Test GaussianRadiusExtractor forward""" shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) TEST_OUTPUT_PATH.mkdir(exist_ok=True) # Intialize modules and test segmentation seg_batch = generate_batch_of_segmentation().cpu() gaussian_radius_extractor = GaussianRadiusExtractor( nb_radiuses=24, sigma=0.2 ) center_of_mass_extractor = CenterOfMass2DExtractor() # Test differentiability torch.manual_seed(0) input_ = torch.nn.Sigmoid()( torch.nn.Conv3d( 2, 2, kernel_size=1, padding=0, dilation=1)( seg_batch.cpu() ) ) center_of_mass = center_of_mass_extractor(input_) radiuses = gaussian_radius_extractor(input_, center_of_mass) loss = torch.nn.MSELoss()( radiuses, torch.rand(radiuses.shape) ) loss.backward() assert (loss - 610) ** 2 <= 1 # Generate radiuses center_of_mass = center_of_mass_extractor(seg_batch) radiuses = gaussian_radius_extractor.forward( seg_batch, center_of_mass ) assert radiuses.shape == (3, 2, 24, 16) # Test gaussian x_tensor = torch.linspace(-3, 3, 1001) y_tensor = gaussian_radius_extractor.gaussian(x_tensor, 0) gaussian_area = 6 * y_tensor.sum() / 1001 plt.clf() plt.plot(x_tensor, y_tensor) plt.title( f''' Area { 6 * gaussian_radius_extractor.gaussian(x_tensor, 0).sum() / 1001 } ''' ) plt.savefig(TEST_OUTPUT_PATH / 'gaussian.png') assert (gaussian_area - 1) ** 2 < 0.00001 # Assess visual results for nb, nf in product( range(seg_batch.shape[0]), range(seg_batch.shape[1]) ): plt.clf() plt.imshow(seg_batch[nb, nf, :, :, 0]) plt.plot( center_of_mass[nb, nf, 0].real, center_of_mass[nb, nf, 0].imag, 'ro' ) plt.scatter( [ center_of_mass[nb, nf, 0].real + radiuses[nb, nf, j, 0] * np.cos(angle) for j, angle in enumerate(gaussian_radius_extractor.angles) ], [ center_of_mass[nb, nf, 0].imag + radiuses[nb, nf, j, 0] * np.sin(angle) for j, angle in enumerate(gaussian_radius_extractor.angles) ] ) plt.savefig(TEST_OUTPUT_PATH / f'batch_{nb}_feature_{nf}.png') def test_moment_gaussian_radius_extractor_get_centered_plan(): """Test MomentGaussianRadiusExtractor get_centered_plan""" shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) TEST_OUTPUT_PATH.mkdir(exist_ok=True) # Initialize modules and test segmentation for moment in [[0], [1], [2]]: seg_batch = generate_batch_of_segmentation().cpu() gaussian_radius_extractor = MomentGaussianRadiusExtractor( moments=moment, nb_radiuses=24, sigma=0.2 ) center_of_mass_extractor = CenterOfMass2DExtractor() # Index array index_array = torch.arange( seg_batch.shape[2] * seg_batch.shape[3] ).reshape( seg_batch.shape[2], seg_batch.shape[3] ) x_indices = index_array % seg_batch.shape[3] y_indices = index_array // seg_batch.shape[3] centered_plan = gaussian_radius_extractor.get_centered_plan( center_of_mass_extractor(seg_batch), torch.complex(x_indices.float(), y_indices.float()) ) assert centered_plan.shape == (3, 2, 64, 64, 16) # Assess visual results for nb, nf in product( range(seg_batch.shape[0]), range(seg_batch.shape[1]) ): plt.clf() plt.imshow(centered_plan[nb, nf, :, :, 0].abs()) plt.savefig( TEST_OUTPUT_PATH / f'centered_plan_batch_{nb}_feature_{nf}.png' ) def test_moment_gaussian_radius_extractor_forward(): """Test MomentGaussianRadiusExtractor forward""" shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) TEST_OUTPUT_PATH.mkdir(exist_ok=True) # Intialize modules and test segmentation seg_batch = generate_batch_of_segmentation().cpu() gaussian_radius_extractor = MomentGaussianRadiusExtractor( moments=[0], nb_radiuses=24, sigma=0.2 ) center_of_mass_extractor = CenterOfMass2DExtractor() # Test differentiability torch.manual_seed(0) input_ = torch.nn.Sigmoid()( torch.nn.Conv3d( 2, 2, kernel_size=1, padding=0, dilation=1)( seg_batch.cpu() ) ) center_of_mass = center_of_mass_extractor(input_) radiuses = gaussian_radius_extractor(input_, center_of_mass) loss = torch.nn.MSELoss()( radiuses, torch.rand(radiuses.shape) ) loss.backward() assert (loss - 610) ** 2 <= 1 # Generate radiuses center_of_mass = center_of_mass_extractor(seg_batch) radiuses = gaussian_radius_extractor.forward( seg_batch, center_of_mass ) assert radiuses.shape == (1, 3, 2, 24, 16) # Test gaussian x_tensor = torch.linspace(-3, 3, 1001) y_tensor = gaussian_radius_extractor.gaussian(x_tensor, 0) gaussian_area = 6 * y_tensor.sum() / 1001 plt.plot(x_tensor, y_tensor) plt.title( f''' Area {6 * gaussian_radius_extractor.gaussian(x_tensor, 0).sum() / 1001} ''' ) plt.savefig(TEST_OUTPUT_PATH / 'gaussian.png') assert (gaussian_area - 1) ** 2 < 0.00001 # Assess visual results for nb, nf in product( range(seg_batch.shape[0]), range(seg_batch.shape[1]) ): plt.clf() plt.imshow(seg_batch[nb, nf, :, :, 0]) plt.plot( center_of_mass[nb, nf, 0].real, center_of_mass[nb, nf, 0].imag, 'ro' ) plt.scatter( [ center_of_mass[nb, nf, 0].real + radiuses[0, nb, nf, j, 0] * np.cos(angle) for j, angle in enumerate(gaussian_radius_extractor.angles) ], [ center_of_mass[nb, nf, 0].imag + radiuses[0, nb, nf, j, 0] * np.sin(angle) for j, angle in enumerate(gaussian_radius_extractor.angles) ] ) plt.savefig(TEST_OUTPUT_PATH / f'batch_{nb}_feature_{nf}.png') def test_vanilla_diameter_extrator_forward(): """Test VanillaDiameterExtrator forward""" # Test value vanilla_diameter_extractor = VanillaDiameterExtractor(8) x_tensor = torch.tensor( [ 3, 2 * math.sqrt(2), 4, math.sqrt(2), 5, 3 * math.sqrt(2), 2, 3 * math.sqrt(2) ] ).reshape(1, 1, 8, 1) diameters = vanilla_diameter_extractor.forward(x_tensor) assert diameters.shape == (1, 1, 1) assert (diameters.sum() - math.sqrt(73)) ** 2 < 0.0001 # Test backpropagation torch.manual_seed(0) x_tensor = torch.nn.Sigmoid()( torch.nn.Conv2d(1, 1, 1, padding=0)(x_tensor) ) diameters = vanilla_diameter_extractor(x_tensor) loss = torch.nn.MSELoss()(diameters, torch.rand(1, 1, 1)) loss.backward()
test/nets/layers/test_diameter.py
import shutil from itertools import product import math import torch import numpy as np import matplotlib.pyplot as plt import matplotlib from diameter_learning.settings import TEST_OUTPUT_PATH from diameter_learning.nets.layers import ( CenterOfMass2DExtractor, GaussianRadiusExtractor, VanillaDiameterExtractor, MomentGaussianRadiusExtractor ) matplotlib.use('Agg') def generate_ellipse_segmentation( shape: tuple, center_of_mass: tuple, ellipse_axis: tuple ): """Generate an ellipse shaped segmentation :param shape: Shape the 2D image :param center_of_mass: Coordinates of the center of mass :param ellipse_axis: ellipse parameters (a, b) in equation `\\left(x/a\\right)^2 + \\left(y/b\\right)^2 = 1` """ # Generate array of indices arr = torch.arange( shape[0] * shape[1] ).reshape( shape[0], shape[1] ) # Transform array of indices into a segmentation segmentation = ( ( (arr // shape[1] - center_of_mass[0]) / ellipse_axis[0] ) ** 2 + ( (arr % shape[1] - center_of_mass[1]) / ellipse_axis[1] ) ** 2 ) < 1 return segmentation def generate_batch_of_segmentation( shape=(64, 64, 16), batch_size=3, number_of_features=2 ): """Generate batch of cylindric segmentation :param shape: 3D shape of the image :param batch_size: Batch size :param number_of_features: number_of_features """ seg_batch = torch.zeros(batch_size, number_of_features, *shape) for nb, nf, nz in product( range(batch_size), range(number_of_features), range(shape[-1]) ): seg_batch[nb, nf, :, :, nz] = generate_ellipse_segmentation( shape[:2], (20 + shape[2] + 3 * nb, 20 + nz), (5 * (nf + 1), 6 * (nf + 1)) ) return seg_batch def test_center_of_mass_2d_extractor_forward(): """Test CenterOfMass2DExtractor forward""" # Clean test output folder shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) TEST_OUTPUT_PATH.mkdir(exist_ok=True) # Generate variables seg_batch = generate_batch_of_segmentation().cpu() center_of_mass_extractor = CenterOfMass2DExtractor() # Test differentiability torch.manual_seed(0) center_of_mass = center_of_mass_extractor.forward( torch.nn.Sigmoid()( torch.nn.Conv3d(2, 2, kernel_size=1, padding=0, dilation=1)( seg_batch.cpu() ) ) ) loss = torch.nn.MSELoss()( center_of_mass.real, torch.rand(center_of_mass.shape) ) loss.backward() assert (loss - 962) ** 2 <= 1 # Test result center_of_mass = center_of_mass_extractor.forward( seg_batch ) assert center_of_mass.shape == (3, 2, 16) # Assess visual results for nb, nf in product( range(seg_batch.shape[0]), range(seg_batch.shape[1]) ): plt.clf() plt.imshow(seg_batch[nb, nf, :, :, 0]) plt.savefig( TEST_OUTPUT_PATH / f'batch_{nb}_feature_{nf}_without_dots.png' ) plt.plot( center_of_mass[nb, nf, 0].real, center_of_mass[nb, nf, 0].imag, 'ro' ) plt.savefig(TEST_OUTPUT_PATH / f'batch_{nb}_feature_{nf}.png') def test_gaussian_radius_extractor_get_radiuses(): """test GaussianRadiusExtractor get_radiuses""" shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) TEST_OUTPUT_PATH.mkdir(exist_ok=True) # Generate radiuses seg_batch = generate_batch_of_segmentation().cpu() gaussian_radius_extractor = GaussianRadiusExtractor( nb_radiuses=24 ) center_of_mass_extractor = CenterOfMass2DExtractor() # Index array index_array = torch.arange( seg_batch.shape[2] * seg_batch.shape[3] ).reshape( seg_batch.shape[2], seg_batch.shape[3] ) x_indices = index_array % seg_batch.shape[3] y_indices = index_array // seg_batch.shape[3] for angle in [-math.pi*11/12, math.pi/3, math.pi*11/12]: filters = gaussian_radius_extractor.get_filter( center_of_mass_extractor(seg_batch), torch.complex(x_indices.float(), y_indices.float()), angle ) assert filters.shape == (3, 2, 64, 64, 16) # Assess visual results for nb, nf in product( range(seg_batch.shape[0]), range(seg_batch.shape[1]) ): plt.clf() plt.imshow(filters[nb, nf, :, :, 0]) plt.savefig( TEST_OUTPUT_PATH / f'batch_{nb}_feature_{nf}_angle_{angle.__round__(2)}.png' ) def test_gaussian_radius_extractor_forward(): """Test GaussianRadiusExtractor forward""" shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) TEST_OUTPUT_PATH.mkdir(exist_ok=True) # Intialize modules and test segmentation seg_batch = generate_batch_of_segmentation().cpu() gaussian_radius_extractor = GaussianRadiusExtractor( nb_radiuses=24, sigma=0.2 ) center_of_mass_extractor = CenterOfMass2DExtractor() # Test differentiability torch.manual_seed(0) input_ = torch.nn.Sigmoid()( torch.nn.Conv3d( 2, 2, kernel_size=1, padding=0, dilation=1)( seg_batch.cpu() ) ) center_of_mass = center_of_mass_extractor(input_) radiuses = gaussian_radius_extractor(input_, center_of_mass) loss = torch.nn.MSELoss()( radiuses, torch.rand(radiuses.shape) ) loss.backward() assert (loss - 610) ** 2 <= 1 # Generate radiuses center_of_mass = center_of_mass_extractor(seg_batch) radiuses = gaussian_radius_extractor.forward( seg_batch, center_of_mass ) assert radiuses.shape == (3, 2, 24, 16) # Test gaussian x_tensor = torch.linspace(-3, 3, 1001) y_tensor = gaussian_radius_extractor.gaussian(x_tensor, 0) gaussian_area = 6 * y_tensor.sum() / 1001 plt.clf() plt.plot(x_tensor, y_tensor) plt.title( f''' Area { 6 * gaussian_radius_extractor.gaussian(x_tensor, 0).sum() / 1001 } ''' ) plt.savefig(TEST_OUTPUT_PATH / 'gaussian.png') assert (gaussian_area - 1) ** 2 < 0.00001 # Assess visual results for nb, nf in product( range(seg_batch.shape[0]), range(seg_batch.shape[1]) ): plt.clf() plt.imshow(seg_batch[nb, nf, :, :, 0]) plt.plot( center_of_mass[nb, nf, 0].real, center_of_mass[nb, nf, 0].imag, 'ro' ) plt.scatter( [ center_of_mass[nb, nf, 0].real + radiuses[nb, nf, j, 0] * np.cos(angle) for j, angle in enumerate(gaussian_radius_extractor.angles) ], [ center_of_mass[nb, nf, 0].imag + radiuses[nb, nf, j, 0] * np.sin(angle) for j, angle in enumerate(gaussian_radius_extractor.angles) ] ) plt.savefig(TEST_OUTPUT_PATH / f'batch_{nb}_feature_{nf}.png') def test_moment_gaussian_radius_extractor_get_centered_plan(): """Test MomentGaussianRadiusExtractor get_centered_plan""" shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) TEST_OUTPUT_PATH.mkdir(exist_ok=True) # Initialize modules and test segmentation for moment in [[0], [1], [2]]: seg_batch = generate_batch_of_segmentation().cpu() gaussian_radius_extractor = MomentGaussianRadiusExtractor( moments=moment, nb_radiuses=24, sigma=0.2 ) center_of_mass_extractor = CenterOfMass2DExtractor() # Index array index_array = torch.arange( seg_batch.shape[2] * seg_batch.shape[3] ).reshape( seg_batch.shape[2], seg_batch.shape[3] ) x_indices = index_array % seg_batch.shape[3] y_indices = index_array // seg_batch.shape[3] centered_plan = gaussian_radius_extractor.get_centered_plan( center_of_mass_extractor(seg_batch), torch.complex(x_indices.float(), y_indices.float()) ) assert centered_plan.shape == (3, 2, 64, 64, 16) # Assess visual results for nb, nf in product( range(seg_batch.shape[0]), range(seg_batch.shape[1]) ): plt.clf() plt.imshow(centered_plan[nb, nf, :, :, 0].abs()) plt.savefig( TEST_OUTPUT_PATH / f'centered_plan_batch_{nb}_feature_{nf}.png' ) def test_moment_gaussian_radius_extractor_forward(): """Test MomentGaussianRadiusExtractor forward""" shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) TEST_OUTPUT_PATH.mkdir(exist_ok=True) # Intialize modules and test segmentation seg_batch = generate_batch_of_segmentation().cpu() gaussian_radius_extractor = MomentGaussianRadiusExtractor( moments=[0], nb_radiuses=24, sigma=0.2 ) center_of_mass_extractor = CenterOfMass2DExtractor() # Test differentiability torch.manual_seed(0) input_ = torch.nn.Sigmoid()( torch.nn.Conv3d( 2, 2, kernel_size=1, padding=0, dilation=1)( seg_batch.cpu() ) ) center_of_mass = center_of_mass_extractor(input_) radiuses = gaussian_radius_extractor(input_, center_of_mass) loss = torch.nn.MSELoss()( radiuses, torch.rand(radiuses.shape) ) loss.backward() assert (loss - 610) ** 2 <= 1 # Generate radiuses center_of_mass = center_of_mass_extractor(seg_batch) radiuses = gaussian_radius_extractor.forward( seg_batch, center_of_mass ) assert radiuses.shape == (1, 3, 2, 24, 16) # Test gaussian x_tensor = torch.linspace(-3, 3, 1001) y_tensor = gaussian_radius_extractor.gaussian(x_tensor, 0) gaussian_area = 6 * y_tensor.sum() / 1001 plt.plot(x_tensor, y_tensor) plt.title( f''' Area {6 * gaussian_radius_extractor.gaussian(x_tensor, 0).sum() / 1001} ''' ) plt.savefig(TEST_OUTPUT_PATH / 'gaussian.png') assert (gaussian_area - 1) ** 2 < 0.00001 # Assess visual results for nb, nf in product( range(seg_batch.shape[0]), range(seg_batch.shape[1]) ): plt.clf() plt.imshow(seg_batch[nb, nf, :, :, 0]) plt.plot( center_of_mass[nb, nf, 0].real, center_of_mass[nb, nf, 0].imag, 'ro' ) plt.scatter( [ center_of_mass[nb, nf, 0].real + radiuses[0, nb, nf, j, 0] * np.cos(angle) for j, angle in enumerate(gaussian_radius_extractor.angles) ], [ center_of_mass[nb, nf, 0].imag + radiuses[0, nb, nf, j, 0] * np.sin(angle) for j, angle in enumerate(gaussian_radius_extractor.angles) ] ) plt.savefig(TEST_OUTPUT_PATH / f'batch_{nb}_feature_{nf}.png') def test_vanilla_diameter_extrator_forward(): """Test VanillaDiameterExtrator forward""" # Test value vanilla_diameter_extractor = VanillaDiameterExtractor(8) x_tensor = torch.tensor( [ 3, 2 * math.sqrt(2), 4, math.sqrt(2), 5, 3 * math.sqrt(2), 2, 3 * math.sqrt(2) ] ).reshape(1, 1, 8, 1) diameters = vanilla_diameter_extractor.forward(x_tensor) assert diameters.shape == (1, 1, 1) assert (diameters.sum() - math.sqrt(73)) ** 2 < 0.0001 # Test backpropagation torch.manual_seed(0) x_tensor = torch.nn.Sigmoid()( torch.nn.Conv2d(1, 1, 1, padding=0)(x_tensor) ) diameters = vanilla_diameter_extractor(x_tensor) loss = torch.nn.MSELoss()(diameters, torch.rand(1, 1, 1)) loss.backward()
0.871311
0.595493
import re import twint import pandas as pd import matplotlib.pyplot as plt from textblob import TextBlob from collections import Counter from nltk.corpus import stopwords from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator def make_search(username): c = twint.Config() c.Username = username c.Get_replies = True # choose search term (optional) c.Search = ["corona", "virus"] # choose beginning time (narrow results) c.Since = "2020-01-01" c.Until = "2020-04-30" # set limit on total tweets c.Limit = 100000 # no idea, but makes the csv format properly c.Store_csv = True # c.Pandas = True # format of the csv c.Custom['user'] = ["date", "time", "username", "tweet", "link", "likes", "retweets", "replies", "mentions", "hashtags"] # change the name of the csv file c.Output = "new_tweets.csv" twint.run.Search(c) def create_file(): usernames = ["cnn", "foxnews", "bbcworld", "ajenglish", "who"] for username in usernames: make_search(username) create_file() def get_tweet_sentiment(clean_tweets): clean_tweets_df = pd.DataFrame(columns=['tweet', 'sentiment']) for clean_tweet in clean_tweets: sentiment = '' analysis = TextBlob(clean_tweet) # set sentiment if analysis.sentiment.polarity > 0: sentiment = 'positive' elif analysis.sentiment.polarity == 0: sentiment = 'neutral' else: sentiment = 'negative' clean_tweets_df = clean_tweets_df.append({'tweet': clean_tweet, 'sentiment': sentiment}, ignore_index=True) return clean_tweets_df def column_sum_value(column_name): sum = 0 for i in range(len(tweets)): sum += tweets[column_name][i] return sum def engagement_in_month(month): count = 0 engagement = 0 timeArray = [] for row in tweets['date']: timeArray.append(row.split('-')) for i in range(len(timeArray)): if int(timeArray[i][1]) == month: engagement += tweets['likes_count'][i] engagement += tweets['retweets_count'][i] engagement += tweets['replies_count'][i] count += 1 engagement += count return engagement def engagement_in_months(): engagement = [] max = 0 for i in range(1, 5): engagement.append(engagement_in_month(i)) return engagement # split based on words only tweets = pd.read_csv("new_tweets.csv") clean_tweets = [] for index, tweet in tweets.iterrows(): text = re.sub(r'^https?:\/\/.*[\r\n]*', '', tweet.tweet) text = re.sub(r"http\S+", "", text) text = text.replace('twitter', '') text = text.replace('com', '') text = text.replace('pic', '') words = re.split(r'\W+', text) # convert to lower case words = [word.lower() for word in words] # remove remaining tokens that are not alphabetic words = [word for word in words if word.isalpha()] # remove stopwords stop_words = set(stopwords.words('english')) words = [w for w in words if not w in stop_words] clean_tweets.append(' '.join(words)) all_tweets = get_tweet_sentiment(clean_tweets) # picking positive tweets from tweets ptweets = [] for index, tweet in all_tweets.iterrows(): if tweet['sentiment'] == 'positive': ptweets.append(tweet['tweet']) # percentage of positive tweets positive_tweets_percentage = 100 * len(ptweets) / len(all_tweets) print("1) Positive tweets percentage: {} %".format(positive_tweets_percentage)) ntweets = [] for index, tweet in all_tweets.iterrows(): if tweet['sentiment'] == 'negative': ntweets.append(tweet['tweet']) # percentage of negative tweets negative_tweets_percentage = 100 * len(ntweets) / len(all_tweets) print("2) Negative tweets percentage: {} %".format(negative_tweets_percentage)) print( "Neutral tweets percentage: {} %".format(100 * (len(all_tweets) - (len(ntweets) + len(ptweets))) / len(all_tweets))) # WordCloud # Create and generate a word cloud image: wordcloud_positive = WordCloud().generate(' '.join(ptweets)) # Display the generated image: plt.imshow(wordcloud_positive, interpolation='bilinear') plt.axis("off") plt.show() wordcloud_positive.to_file("positive_review.png") # split() returns list of all the words in the string ptweets_split_it = (' '.join(ptweets)).split() # Pass the split_it list to instance of Counter class. ptweets_counter = Counter(ptweets_split_it) ptweets_most_occur = ptweets_counter.most_common(1) print('3) The most occurred word in positive tweets is: ', ptweets_most_occur) # Create and generate a word cloud image: wordcloud_negative = WordCloud().generate(' '.join(ntweets)) # Display the generated image: plt.imshow(wordcloud_negative, interpolation='bilinear') plt.axis("off") plt.show() wordcloud_negative.to_file("negative_review.png") # split() returns list of all the words in the string ntweets_split_it = (' '.join(ntweets)).split() # Pass the split_it list to instance of Counter class. ntweets_counter = Counter(ntweets_split_it) ntweets_most_occur = ntweets_counter.most_common(1) print('4) The most occurred word in negative tweets is: ', ntweets_most_occur) # most and least tweeted month engagement_list = engagement_in_months() highest_tweeted_month = engagement_list.index(max(engagement_list)) + 1 leaset_tweeted_month = engagement_list.index(min(engagement_list)) + 1 # total engagement jan = 1 apr = 4 engaement_in_jan = engagement_in_month(jan) engaement_in_apr = engagement_in_month(apr) difference = engaement_in_apr - engaement_in_jan like_column = "likes_count" retweet_column = "retweets_count" replies_column = "replies_count" total_likes = column_sum_value(like_column) total_retweets = column_sum_value(retweet_column) total_comments = column_sum_value(replies_column) total_tweets = len(tweets) total_engagement = total_likes + total_retweets + total_comments net_tweets = total_tweets + total_comments print("5) net count of positive tweets: ", int(positive_tweets_percentage*net_tweets/100)) print("6) net count of negative tweets: ", int(negative_tweets_percentage*net_tweets/100)) print("7) The month that the people tweeted about the virus the most is: ", highest_tweeted_month) print("8) The month that the people tweeted about the virus the least is: ", leaset_tweeted_month) print("9) Total amount of engagement") print("Total likes: ", total_likes) print("Total comments: ", total_comments) print("Total retweets: ", total_retweets) print("Total engagement: ", total_engagement) print("10) Difference between people tweeted in april and january") print("Tweets engagements in January: ", engaement_in_jan) print("Tweets engagements in April: ", engaement_in_apr) print("Difference: ", difference)
final-project/main.py
import re import twint import pandas as pd import matplotlib.pyplot as plt from textblob import TextBlob from collections import Counter from nltk.corpus import stopwords from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator def make_search(username): c = twint.Config() c.Username = username c.Get_replies = True # choose search term (optional) c.Search = ["corona", "virus"] # choose beginning time (narrow results) c.Since = "2020-01-01" c.Until = "2020-04-30" # set limit on total tweets c.Limit = 100000 # no idea, but makes the csv format properly c.Store_csv = True # c.Pandas = True # format of the csv c.Custom['user'] = ["date", "time", "username", "tweet", "link", "likes", "retweets", "replies", "mentions", "hashtags"] # change the name of the csv file c.Output = "new_tweets.csv" twint.run.Search(c) def create_file(): usernames = ["cnn", "foxnews", "bbcworld", "ajenglish", "who"] for username in usernames: make_search(username) create_file() def get_tweet_sentiment(clean_tweets): clean_tweets_df = pd.DataFrame(columns=['tweet', 'sentiment']) for clean_tweet in clean_tweets: sentiment = '' analysis = TextBlob(clean_tweet) # set sentiment if analysis.sentiment.polarity > 0: sentiment = 'positive' elif analysis.sentiment.polarity == 0: sentiment = 'neutral' else: sentiment = 'negative' clean_tweets_df = clean_tweets_df.append({'tweet': clean_tweet, 'sentiment': sentiment}, ignore_index=True) return clean_tweets_df def column_sum_value(column_name): sum = 0 for i in range(len(tweets)): sum += tweets[column_name][i] return sum def engagement_in_month(month): count = 0 engagement = 0 timeArray = [] for row in tweets['date']: timeArray.append(row.split('-')) for i in range(len(timeArray)): if int(timeArray[i][1]) == month: engagement += tweets['likes_count'][i] engagement += tweets['retweets_count'][i] engagement += tweets['replies_count'][i] count += 1 engagement += count return engagement def engagement_in_months(): engagement = [] max = 0 for i in range(1, 5): engagement.append(engagement_in_month(i)) return engagement # split based on words only tweets = pd.read_csv("new_tweets.csv") clean_tweets = [] for index, tweet in tweets.iterrows(): text = re.sub(r'^https?:\/\/.*[\r\n]*', '', tweet.tweet) text = re.sub(r"http\S+", "", text) text = text.replace('twitter', '') text = text.replace('com', '') text = text.replace('pic', '') words = re.split(r'\W+', text) # convert to lower case words = [word.lower() for word in words] # remove remaining tokens that are not alphabetic words = [word for word in words if word.isalpha()] # remove stopwords stop_words = set(stopwords.words('english')) words = [w for w in words if not w in stop_words] clean_tweets.append(' '.join(words)) all_tweets = get_tweet_sentiment(clean_tweets) # picking positive tweets from tweets ptweets = [] for index, tweet in all_tweets.iterrows(): if tweet['sentiment'] == 'positive': ptweets.append(tweet['tweet']) # percentage of positive tweets positive_tweets_percentage = 100 * len(ptweets) / len(all_tweets) print("1) Positive tweets percentage: {} %".format(positive_tweets_percentage)) ntweets = [] for index, tweet in all_tweets.iterrows(): if tweet['sentiment'] == 'negative': ntweets.append(tweet['tweet']) # percentage of negative tweets negative_tweets_percentage = 100 * len(ntweets) / len(all_tweets) print("2) Negative tweets percentage: {} %".format(negative_tweets_percentage)) print( "Neutral tweets percentage: {} %".format(100 * (len(all_tweets) - (len(ntweets) + len(ptweets))) / len(all_tweets))) # WordCloud # Create and generate a word cloud image: wordcloud_positive = WordCloud().generate(' '.join(ptweets)) # Display the generated image: plt.imshow(wordcloud_positive, interpolation='bilinear') plt.axis("off") plt.show() wordcloud_positive.to_file("positive_review.png") # split() returns list of all the words in the string ptweets_split_it = (' '.join(ptweets)).split() # Pass the split_it list to instance of Counter class. ptweets_counter = Counter(ptweets_split_it) ptweets_most_occur = ptweets_counter.most_common(1) print('3) The most occurred word in positive tweets is: ', ptweets_most_occur) # Create and generate a word cloud image: wordcloud_negative = WordCloud().generate(' '.join(ntweets)) # Display the generated image: plt.imshow(wordcloud_negative, interpolation='bilinear') plt.axis("off") plt.show() wordcloud_negative.to_file("negative_review.png") # split() returns list of all the words in the string ntweets_split_it = (' '.join(ntweets)).split() # Pass the split_it list to instance of Counter class. ntweets_counter = Counter(ntweets_split_it) ntweets_most_occur = ntweets_counter.most_common(1) print('4) The most occurred word in negative tweets is: ', ntweets_most_occur) # most and least tweeted month engagement_list = engagement_in_months() highest_tweeted_month = engagement_list.index(max(engagement_list)) + 1 leaset_tweeted_month = engagement_list.index(min(engagement_list)) + 1 # total engagement jan = 1 apr = 4 engaement_in_jan = engagement_in_month(jan) engaement_in_apr = engagement_in_month(apr) difference = engaement_in_apr - engaement_in_jan like_column = "likes_count" retweet_column = "retweets_count" replies_column = "replies_count" total_likes = column_sum_value(like_column) total_retweets = column_sum_value(retweet_column) total_comments = column_sum_value(replies_column) total_tweets = len(tweets) total_engagement = total_likes + total_retweets + total_comments net_tweets = total_tweets + total_comments print("5) net count of positive tweets: ", int(positive_tweets_percentage*net_tweets/100)) print("6) net count of negative tweets: ", int(negative_tweets_percentage*net_tweets/100)) print("7) The month that the people tweeted about the virus the most is: ", highest_tweeted_month) print("8) The month that the people tweeted about the virus the least is: ", leaset_tweeted_month) print("9) Total amount of engagement") print("Total likes: ", total_likes) print("Total comments: ", total_comments) print("Total retweets: ", total_retweets) print("Total engagement: ", total_engagement) print("10) Difference between people tweeted in april and january") print("Tweets engagements in January: ", engaement_in_jan) print("Tweets engagements in April: ", engaement_in_apr) print("Difference: ", difference)
0.23118
0.138345
import grpc import getgauge.messages.lsp_pb2 as lsp__pb2 import getgauge.messages.messages_pb2 as messages__pb2 class lspServiceStub(object): # missing associated documentation comment in .proto file pass def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetStepNames = channel.unary_unary( '/gauge.messages.lspService/GetStepNames', request_serializer=messages__pb2.StepNamesRequest.SerializeToString, response_deserializer=messages__pb2.StepNamesResponse.FromString, ) self.CacheFile = channel.unary_unary( '/gauge.messages.lspService/CacheFile', request_serializer=messages__pb2.CacheFileRequest.SerializeToString, response_deserializer=lsp__pb2.Empty.FromString, ) self.GetStepPositions = channel.unary_unary( '/gauge.messages.lspService/GetStepPositions', request_serializer=messages__pb2.StepPositionsRequest.SerializeToString, response_deserializer=messages__pb2.StepPositionsResponse.FromString, ) self.GetImplementationFiles = channel.unary_unary( '/gauge.messages.lspService/GetImplementationFiles', request_serializer=lsp__pb2.Empty.SerializeToString, response_deserializer=messages__pb2.ImplementationFileListResponse.FromString, ) self.ImplementStub = channel.unary_unary( '/gauge.messages.lspService/ImplementStub', request_serializer=messages__pb2.StubImplementationCodeRequest.SerializeToString, response_deserializer=messages__pb2.FileDiff.FromString, ) self.ValidateStep = channel.unary_unary( '/gauge.messages.lspService/ValidateStep', request_serializer=messages__pb2.StepValidateRequest.SerializeToString, response_deserializer=messages__pb2.StepValidateResponse.FromString, ) self.Refactor = channel.unary_unary( '/gauge.messages.lspService/Refactor', request_serializer=messages__pb2.RefactorRequest.SerializeToString, response_deserializer=messages__pb2.RefactorResponse.FromString, ) self.GetStepName = channel.unary_unary( '/gauge.messages.lspService/GetStepName', request_serializer=messages__pb2.StepNameRequest.SerializeToString, response_deserializer=messages__pb2.StepNameResponse.FromString, ) self.GetGlobPatterns = channel.unary_unary( '/gauge.messages.lspService/GetGlobPatterns', request_serializer=lsp__pb2.Empty.SerializeToString, response_deserializer=messages__pb2.ImplementationFileGlobPatternResponse.FromString, ) self.KillProcess = channel.unary_unary( '/gauge.messages.lspService/KillProcess', request_serializer=messages__pb2.KillProcessRequest.SerializeToString, response_deserializer=lsp__pb2.Empty.FromString, ) class lspServiceServicer(object): # missing associated documentation comment in .proto file pass def GetStepNames(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CacheFile(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetStepPositions(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetImplementationFiles(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ImplementStub(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ValidateStep(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Refactor(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetStepName(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetGlobPatterns(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def KillProcess(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_lspServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetStepNames': grpc.unary_unary_rpc_method_handler( servicer.GetStepNames, request_deserializer=messages__pb2.StepNamesRequest.FromString, response_serializer=messages__pb2.StepNamesResponse.SerializeToString, ), 'CacheFile': grpc.unary_unary_rpc_method_handler( servicer.CacheFile, request_deserializer=messages__pb2.CacheFileRequest.FromString, response_serializer=lsp__pb2.Empty.SerializeToString, ), 'GetStepPositions': grpc.unary_unary_rpc_method_handler( servicer.GetStepPositions, request_deserializer=messages__pb2.StepPositionsRequest.FromString, response_serializer=messages__pb2.StepPositionsResponse.SerializeToString, ), 'GetImplementationFiles': grpc.unary_unary_rpc_method_handler( servicer.GetImplementationFiles, request_deserializer=lsp__pb2.Empty.FromString, response_serializer=messages__pb2.ImplementationFileListResponse.SerializeToString, ), 'ImplementStub': grpc.unary_unary_rpc_method_handler( servicer.ImplementStub, request_deserializer=messages__pb2.StubImplementationCodeRequest.FromString, response_serializer=messages__pb2.FileDiff.SerializeToString, ), 'ValidateStep': grpc.unary_unary_rpc_method_handler( servicer.ValidateStep, request_deserializer=messages__pb2.StepValidateRequest.FromString, response_serializer=messages__pb2.StepValidateResponse.SerializeToString, ), 'Refactor': grpc.unary_unary_rpc_method_handler( servicer.Refactor, request_deserializer=messages__pb2.RefactorRequest.FromString, response_serializer=messages__pb2.RefactorResponse.SerializeToString, ), 'GetStepName': grpc.unary_unary_rpc_method_handler( servicer.GetStepName, request_deserializer=messages__pb2.StepNameRequest.FromString, response_serializer=messages__pb2.StepNameResponse.SerializeToString, ), 'GetGlobPatterns': grpc.unary_unary_rpc_method_handler( servicer.GetGlobPatterns, request_deserializer=lsp__pb2.Empty.FromString, response_serializer=messages__pb2.ImplementationFileGlobPatternResponse.SerializeToString, ), 'KillProcess': grpc.unary_unary_rpc_method_handler( servicer.KillProcess, request_deserializer=messages__pb2.KillProcessRequest.FromString, response_serializer=lsp__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'gauge.messages.lspService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
getgauge/messages/lsp_pb2_grpc.py
import grpc import getgauge.messages.lsp_pb2 as lsp__pb2 import getgauge.messages.messages_pb2 as messages__pb2 class lspServiceStub(object): # missing associated documentation comment in .proto file pass def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetStepNames = channel.unary_unary( '/gauge.messages.lspService/GetStepNames', request_serializer=messages__pb2.StepNamesRequest.SerializeToString, response_deserializer=messages__pb2.StepNamesResponse.FromString, ) self.CacheFile = channel.unary_unary( '/gauge.messages.lspService/CacheFile', request_serializer=messages__pb2.CacheFileRequest.SerializeToString, response_deserializer=lsp__pb2.Empty.FromString, ) self.GetStepPositions = channel.unary_unary( '/gauge.messages.lspService/GetStepPositions', request_serializer=messages__pb2.StepPositionsRequest.SerializeToString, response_deserializer=messages__pb2.StepPositionsResponse.FromString, ) self.GetImplementationFiles = channel.unary_unary( '/gauge.messages.lspService/GetImplementationFiles', request_serializer=lsp__pb2.Empty.SerializeToString, response_deserializer=messages__pb2.ImplementationFileListResponse.FromString, ) self.ImplementStub = channel.unary_unary( '/gauge.messages.lspService/ImplementStub', request_serializer=messages__pb2.StubImplementationCodeRequest.SerializeToString, response_deserializer=messages__pb2.FileDiff.FromString, ) self.ValidateStep = channel.unary_unary( '/gauge.messages.lspService/ValidateStep', request_serializer=messages__pb2.StepValidateRequest.SerializeToString, response_deserializer=messages__pb2.StepValidateResponse.FromString, ) self.Refactor = channel.unary_unary( '/gauge.messages.lspService/Refactor', request_serializer=messages__pb2.RefactorRequest.SerializeToString, response_deserializer=messages__pb2.RefactorResponse.FromString, ) self.GetStepName = channel.unary_unary( '/gauge.messages.lspService/GetStepName', request_serializer=messages__pb2.StepNameRequest.SerializeToString, response_deserializer=messages__pb2.StepNameResponse.FromString, ) self.GetGlobPatterns = channel.unary_unary( '/gauge.messages.lspService/GetGlobPatterns', request_serializer=lsp__pb2.Empty.SerializeToString, response_deserializer=messages__pb2.ImplementationFileGlobPatternResponse.FromString, ) self.KillProcess = channel.unary_unary( '/gauge.messages.lspService/KillProcess', request_serializer=messages__pb2.KillProcessRequest.SerializeToString, response_deserializer=lsp__pb2.Empty.FromString, ) class lspServiceServicer(object): # missing associated documentation comment in .proto file pass def GetStepNames(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CacheFile(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetStepPositions(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetImplementationFiles(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ImplementStub(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ValidateStep(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Refactor(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetStepName(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetGlobPatterns(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def KillProcess(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_lspServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetStepNames': grpc.unary_unary_rpc_method_handler( servicer.GetStepNames, request_deserializer=messages__pb2.StepNamesRequest.FromString, response_serializer=messages__pb2.StepNamesResponse.SerializeToString, ), 'CacheFile': grpc.unary_unary_rpc_method_handler( servicer.CacheFile, request_deserializer=messages__pb2.CacheFileRequest.FromString, response_serializer=lsp__pb2.Empty.SerializeToString, ), 'GetStepPositions': grpc.unary_unary_rpc_method_handler( servicer.GetStepPositions, request_deserializer=messages__pb2.StepPositionsRequest.FromString, response_serializer=messages__pb2.StepPositionsResponse.SerializeToString, ), 'GetImplementationFiles': grpc.unary_unary_rpc_method_handler( servicer.GetImplementationFiles, request_deserializer=lsp__pb2.Empty.FromString, response_serializer=messages__pb2.ImplementationFileListResponse.SerializeToString, ), 'ImplementStub': grpc.unary_unary_rpc_method_handler( servicer.ImplementStub, request_deserializer=messages__pb2.StubImplementationCodeRequest.FromString, response_serializer=messages__pb2.FileDiff.SerializeToString, ), 'ValidateStep': grpc.unary_unary_rpc_method_handler( servicer.ValidateStep, request_deserializer=messages__pb2.StepValidateRequest.FromString, response_serializer=messages__pb2.StepValidateResponse.SerializeToString, ), 'Refactor': grpc.unary_unary_rpc_method_handler( servicer.Refactor, request_deserializer=messages__pb2.RefactorRequest.FromString, response_serializer=messages__pb2.RefactorResponse.SerializeToString, ), 'GetStepName': grpc.unary_unary_rpc_method_handler( servicer.GetStepName, request_deserializer=messages__pb2.StepNameRequest.FromString, response_serializer=messages__pb2.StepNameResponse.SerializeToString, ), 'GetGlobPatterns': grpc.unary_unary_rpc_method_handler( servicer.GetGlobPatterns, request_deserializer=lsp__pb2.Empty.FromString, response_serializer=messages__pb2.ImplementationFileGlobPatternResponse.SerializeToString, ), 'KillProcess': grpc.unary_unary_rpc_method_handler( servicer.KillProcess, request_deserializer=messages__pb2.KillProcessRequest.FromString, response_serializer=lsp__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'gauge.messages.lspService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
0.523177
0.046313
import pandas as pd from cascade.core.log import getLoggers from cascade.dismod.constants import IntegrandEnum CODELOG, MATHLOG = getLoggers(__name__) def make_average_integrand_cases_from_gbd( ages_df, years_df, sexes, child_locations, include_birth_prevalence=False): """Determine what time and age ranges each integrand should be calculated for based on GBD's expected inputs for the rest of the pipeline. Args: execution_context sexes ([int]): a list of sex_ids to produces results for include_birth_prevalence (bool): If true, produce prevalence (and no other integrand for the Birth age group (id 164) """ age_ranges = [(r.age_group_years_start, r.age_group_years_end) for _, r in ages_df.iterrows()] time_ranges = [(y, y) for y in years_df] rows = [ { "integrand": integrand.name, "age_lower": age_lower, "age_upper": age_upper, "time_lower": time_lower, "time_upper": time_upper, "location": location_id, "sex_id": sex_id, } for integrand in IntegrandEnum for age_lower, age_upper in age_ranges for time_lower, time_upper in time_ranges for sex_id in sexes for location_id in child_locations ] if include_birth_prevalence: birth_prev_rows = [ { "integrand": "prevalence", "age_lower": 0, "age_upper": 0, "time_lower": time_lower, "time_upper": time_upper, "location": location_id, "sex_id": sex_id, } for time_lower, time_upper in time_ranges for sex_id in sexes for location_id in child_locations ] rows.extend(birth_prev_rows) return pd.DataFrame( rows, columns=[ "integrand", "age_lower", "age_upper", "time_lower", "time_upper", "location", "sex_id", ], )
src/cascade/model/integrands.py
import pandas as pd from cascade.core.log import getLoggers from cascade.dismod.constants import IntegrandEnum CODELOG, MATHLOG = getLoggers(__name__) def make_average_integrand_cases_from_gbd( ages_df, years_df, sexes, child_locations, include_birth_prevalence=False): """Determine what time and age ranges each integrand should be calculated for based on GBD's expected inputs for the rest of the pipeline. Args: execution_context sexes ([int]): a list of sex_ids to produces results for include_birth_prevalence (bool): If true, produce prevalence (and no other integrand for the Birth age group (id 164) """ age_ranges = [(r.age_group_years_start, r.age_group_years_end) for _, r in ages_df.iterrows()] time_ranges = [(y, y) for y in years_df] rows = [ { "integrand": integrand.name, "age_lower": age_lower, "age_upper": age_upper, "time_lower": time_lower, "time_upper": time_upper, "location": location_id, "sex_id": sex_id, } for integrand in IntegrandEnum for age_lower, age_upper in age_ranges for time_lower, time_upper in time_ranges for sex_id in sexes for location_id in child_locations ] if include_birth_prevalence: birth_prev_rows = [ { "integrand": "prevalence", "age_lower": 0, "age_upper": 0, "time_lower": time_lower, "time_upper": time_upper, "location": location_id, "sex_id": sex_id, } for time_lower, time_upper in time_ranges for sex_id in sexes for location_id in child_locations ] rows.extend(birth_prev_rows) return pd.DataFrame( rows, columns=[ "integrand", "age_lower", "age_upper", "time_lower", "time_upper", "location", "sex_id", ], )
0.789437
0.448789
import sdk_common import os # Block in charge of generating the Foundation SDK class SDKFoundationGeneration(sdk_common.BuildStepUsingGradle): def __init__(self, logger=None): super(SDKFoundationGeneration, self).__init__('SDK foundation generation', logger) self.gradle_directory = self.common_config.get_config().get_sdk_foundation_generation_directory() self.reset() self.artifacts_parser = self.common_config.get_config().get_new_artifact_log_parser(self) self.branch_name = self.common_config.get_config().get_branch_name() self.github_token = self.common_config.get_config().get_github_token() self.url_with_token = self.common_config.get_config().get_origin_url_combined_with_token() def execute(self): self.print_title() try: self.log_info("Generating SDK Foundation code") if not self.check_whether_api_config_has_changed(): self.log_info("Cloud API specifications have not changed. No need to regenerate the Foundation SDK") return True self.generate_code() except: self.log_error('Failed to generate the Foundation SDK') return False try: self.log_info("Committing generated code to the repository") self.commit_generated_code() except: self.log_error('Failed to commit the Foundation SDK back to the repository') return False self.log_info("Done.") return True def retrieve_folder_location(self, key): if not key: return None self.artifacts_parser.load() return self.clean_path( self.artifacts_parser.get_property(key), False) def check_whether_api_config_has_changed(self): api_config = self.retrieve_folder_location('SDK_API_DEFINITION_DIR') if not api_config or not os.path.exists(api_config): self.log_warning("The folder where API specifications are store does not exist [%s]" % api_config) return False api_config = os.path.realpath(api_config) api_config = os.path.relpath(api_config, os.path.commonprefix([api_config, self.top_directory])) current_hash = self.git_commit_hash() previous_hash = self.git_previous_commit_hash(current_hash) changes = self.git_changes_list('m', previous_hash, current_hash, api_config) changes.extend(self.git_changes_list('a', previous_hash, current_hash, api_config)) self.log_info("%s changes were made in the API specifications [%s] since last commit [%s]" % ( len(changes), api_config, previous_hash)) return changes and len(changes) > 0 def get_list_folders_to_commit(self): folders = self.retrieve_folder_location('SDK_GENERATED_SOURCE_DIRS') if not folders: return [] folders_list = folders.split(os.pathsep) folders = self.retrieve_folder_location('SDK_ALL_SOURCE_DIRS') if folders: folders_list.extend(folders.split(os.pathsep)) return [os.path.realpath(folder) for folder in folders_list if os.path.exists(folder) and os.path.isdir(folder)] def commit_generated_code(self): if not self.common_config.get_config().get_user_name() or not self.common_config.get_config().get_user_email(): self.git_setup_env() commit_hash = self.git_commit_hash() folders = self.get_list_folders_to_commit() if not folders or len(folders) == 0: self.log_warning( "The folders which contain code which may need to be committed back could not be determined") return for folder in folders: folder_path = os.path.relpath(folder, os.path.commonprefix([folder, self.top_directory])) try: self.git_add_folder(folder_path) except: self.log_warning("Folder [%s] could not be added because no changes were found." % folder_path) pass current_changes = self.git_current_changes_list('a') current_changes.extend(self.git_current_changes_list('m')) current_changes.extend(self.git_current_changes_list('d')) if len(current_changes) == 0: self.log_info("Nothing to commit") return try: self.log_info("%s changes were found in the repository due to code generation:" % len(current_changes)) for change in current_changes: self.log_info("- %s" % change) self.git_commit("New foundation SDK for API change [%s]" % commit_hash) if not self.url_with_token: if not self.github_token: raise Exception("The GitHub token has not been set properly") else: raise Exception("The remote URL could not be resolved") self.git_set_remote_url(self.url_with_token) self.git_set_upstream_branch(self.branch_name) self.git_push_and_follow_tags() except: self.log_warning("The changes could not be committed back to the repository") pass def generate_code(self): self.execute_gradle_task('build', ['--rerun-tasks']) self.execute_gradle_task('run') gradle_dir = self.gradle_directory self.gradle_directory = self.common_config.get_config().get_sdk_top_directory() self.reset() self.execute_gradle_task('spotlessApply') self.gradle_directory = gradle_dir self.reset()
scripts/sdk_generate_foundation.py
import sdk_common import os # Block in charge of generating the Foundation SDK class SDKFoundationGeneration(sdk_common.BuildStepUsingGradle): def __init__(self, logger=None): super(SDKFoundationGeneration, self).__init__('SDK foundation generation', logger) self.gradle_directory = self.common_config.get_config().get_sdk_foundation_generation_directory() self.reset() self.artifacts_parser = self.common_config.get_config().get_new_artifact_log_parser(self) self.branch_name = self.common_config.get_config().get_branch_name() self.github_token = self.common_config.get_config().get_github_token() self.url_with_token = self.common_config.get_config().get_origin_url_combined_with_token() def execute(self): self.print_title() try: self.log_info("Generating SDK Foundation code") if not self.check_whether_api_config_has_changed(): self.log_info("Cloud API specifications have not changed. No need to regenerate the Foundation SDK") return True self.generate_code() except: self.log_error('Failed to generate the Foundation SDK') return False try: self.log_info("Committing generated code to the repository") self.commit_generated_code() except: self.log_error('Failed to commit the Foundation SDK back to the repository') return False self.log_info("Done.") return True def retrieve_folder_location(self, key): if not key: return None self.artifacts_parser.load() return self.clean_path( self.artifacts_parser.get_property(key), False) def check_whether_api_config_has_changed(self): api_config = self.retrieve_folder_location('SDK_API_DEFINITION_DIR') if not api_config or not os.path.exists(api_config): self.log_warning("The folder where API specifications are store does not exist [%s]" % api_config) return False api_config = os.path.realpath(api_config) api_config = os.path.relpath(api_config, os.path.commonprefix([api_config, self.top_directory])) current_hash = self.git_commit_hash() previous_hash = self.git_previous_commit_hash(current_hash) changes = self.git_changes_list('m', previous_hash, current_hash, api_config) changes.extend(self.git_changes_list('a', previous_hash, current_hash, api_config)) self.log_info("%s changes were made in the API specifications [%s] since last commit [%s]" % ( len(changes), api_config, previous_hash)) return changes and len(changes) > 0 def get_list_folders_to_commit(self): folders = self.retrieve_folder_location('SDK_GENERATED_SOURCE_DIRS') if not folders: return [] folders_list = folders.split(os.pathsep) folders = self.retrieve_folder_location('SDK_ALL_SOURCE_DIRS') if folders: folders_list.extend(folders.split(os.pathsep)) return [os.path.realpath(folder) for folder in folders_list if os.path.exists(folder) and os.path.isdir(folder)] def commit_generated_code(self): if not self.common_config.get_config().get_user_name() or not self.common_config.get_config().get_user_email(): self.git_setup_env() commit_hash = self.git_commit_hash() folders = self.get_list_folders_to_commit() if not folders or len(folders) == 0: self.log_warning( "The folders which contain code which may need to be committed back could not be determined") return for folder in folders: folder_path = os.path.relpath(folder, os.path.commonprefix([folder, self.top_directory])) try: self.git_add_folder(folder_path) except: self.log_warning("Folder [%s] could not be added because no changes were found." % folder_path) pass current_changes = self.git_current_changes_list('a') current_changes.extend(self.git_current_changes_list('m')) current_changes.extend(self.git_current_changes_list('d')) if len(current_changes) == 0: self.log_info("Nothing to commit") return try: self.log_info("%s changes were found in the repository due to code generation:" % len(current_changes)) for change in current_changes: self.log_info("- %s" % change) self.git_commit("New foundation SDK for API change [%s]" % commit_hash) if not self.url_with_token: if not self.github_token: raise Exception("The GitHub token has not been set properly") else: raise Exception("The remote URL could not be resolved") self.git_set_remote_url(self.url_with_token) self.git_set_upstream_branch(self.branch_name) self.git_push_and_follow_tags() except: self.log_warning("The changes could not be committed back to the repository") pass def generate_code(self): self.execute_gradle_task('build', ['--rerun-tasks']) self.execute_gradle_task('run') gradle_dir = self.gradle_directory self.gradle_directory = self.common_config.get_config().get_sdk_top_directory() self.reset() self.execute_gradle_task('spotlessApply') self.gradle_directory = gradle_dir self.reset()
0.323594
0.062445
from sys import version_info if version_info >= (2,6,0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_inaoqi', [dirname(__file__)]) except ImportError: import _inaoqi return _inaoqi if fp is not None: try: _mod = imp.load_module('_inaoqi', fp, pathname, description) finally: fp.close() return _mod _inaoqi = swig_import_helper() del swig_import_helper else: import _inaoqi del version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. def _swig_setattr_nondynamic(self,class_type,name,value,static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name,None) if method: return method(self,value) if (not static): self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self,class_type,name,value): return _swig_setattr_nondynamic(self,class_type,name,value,0) def _swig_getattr(self,class_type,name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name,None) if method: return method(self) raise AttributeError(name) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except AttributeError: class _object : pass _newclass = 0 class SwigPyIterator(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name) def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract") __repr__ = _swig_repr __swig_destroy__ = _inaoqi.delete_SwigPyIterator __del__ = lambda self : None; def value(self): return _inaoqi.SwigPyIterator_value(self) def incr(self, n=1): return _inaoqi.SwigPyIterator_incr(self, n) def decr(self, n=1): return _inaoqi.SwigPyIterator_decr(self, n) def distance(self, *args): return _inaoqi.SwigPyIterator_distance(self, *args) def equal(self, *args): return _inaoqi.SwigPyIterator_equal(self, *args) def copy(self): return _inaoqi.SwigPyIterator_copy(self) def next(self): return _inaoqi.SwigPyIterator_next(self) def __next__(self): return _inaoqi.SwigPyIterator___next__(self) def previous(self): return _inaoqi.SwigPyIterator_previous(self) def advance(self, *args): return _inaoqi.SwigPyIterator_advance(self, *args) def __eq__(self, *args): return _inaoqi.SwigPyIterator___eq__(self, *args) def __ne__(self, *args): return _inaoqi.SwigPyIterator___ne__(self, *args) def __iadd__(self, *args): return _inaoqi.SwigPyIterator___iadd__(self, *args) def __isub__(self, *args): return _inaoqi.SwigPyIterator___isub__(self, *args) def __add__(self, *args): return _inaoqi.SwigPyIterator___add__(self, *args) def __sub__(self, *args): return _inaoqi.SwigPyIterator___sub__(self, *args) def __iter__(self): return self SwigPyIterator_swigregister = _inaoqi.SwigPyIterator_swigregister SwigPyIterator_swigregister(SwigPyIterator) class StringVector(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, StringVector, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, StringVector, name) __repr__ = _swig_repr def iterator(self): return _inaoqi.StringVector_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): return _inaoqi.StringVector___nonzero__(self) def __bool__(self): return _inaoqi.StringVector___bool__(self) def __len__(self): return _inaoqi.StringVector___len__(self) def pop(self): return _inaoqi.StringVector_pop(self) def __getslice__(self, *args): return _inaoqi.StringVector___getslice__(self, *args) def __setslice__(self, *args): return _inaoqi.StringVector___setslice__(self, *args) def __delslice__(self, *args): return _inaoqi.StringVector___delslice__(self, *args) def __delitem__(self, *args): return _inaoqi.StringVector___delitem__(self, *args) def __getitem__(self, *args): return _inaoqi.StringVector___getitem__(self, *args) def __setitem__(self, *args): return _inaoqi.StringVector___setitem__(self, *args) def append(self, *args): return _inaoqi.StringVector_append(self, *args) def empty(self): return _inaoqi.StringVector_empty(self) def size(self): return _inaoqi.StringVector_size(self) def clear(self): return _inaoqi.StringVector_clear(self) def swap(self, *args): return _inaoqi.StringVector_swap(self, *args) def get_allocator(self): return _inaoqi.StringVector_get_allocator(self) def begin(self): return _inaoqi.StringVector_begin(self) def end(self): return _inaoqi.StringVector_end(self) def rbegin(self): return _inaoqi.StringVector_rbegin(self) def rend(self): return _inaoqi.StringVector_rend(self) def pop_back(self): return _inaoqi.StringVector_pop_back(self) def erase(self, *args): return _inaoqi.StringVector_erase(self, *args) def __init__(self, *args): this = _inaoqi.new_StringVector(*args) try: self.this.append(this) except: self.this = this def push_back(self, *args): return _inaoqi.StringVector_push_back(self, *args) def front(self): return _inaoqi.StringVector_front(self) def back(self): return _inaoqi.StringVector_back(self) def assign(self, *args): return _inaoqi.StringVector_assign(self, *args) def resize(self, *args): return _inaoqi.StringVector_resize(self, *args) def insert(self, *args): return _inaoqi.StringVector_insert(self, *args) def reserve(self, *args): return _inaoqi.StringVector_reserve(self, *args) def capacity(self): return _inaoqi.StringVector_capacity(self) __swig_destroy__ = _inaoqi.delete_StringVector __del__ = lambda self : None; StringVector_swigregister = _inaoqi.StringVector_swigregister StringVector_swigregister(StringVector) def _getDefaultSession(): return _inaoqi._getDefaultSession() _getDefaultSession = _inaoqi._getDefaultSession class broker(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, broker, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, broker, name) __repr__ = _swig_repr def __init__(self, *args): this = _inaoqi.new_broker(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _inaoqi.delete_broker __del__ = lambda self : None; def shutdown(self): return _inaoqi.broker_shutdown(self) def isModulePresent(self, *args): return _inaoqi.broker_isModulePresent(self, *args) def getGlobalModuleList(self): return _inaoqi.broker_getGlobalModuleList(self) def getALBroker(self): return _inaoqi.broker_getALBroker(self) def onDisconnected(self, *args): return _inaoqi.broker_onDisconnected(self, *args) broker_swigregister = _inaoqi.broker_swigregister broker_swigregister(broker) class baseModule(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, baseModule, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, baseModule, name) __repr__ = _swig_repr def __init__(self): this = _inaoqi.new_baseModule() try: self.this.append(this) except: self.this = this __swig_destroy__ = _inaoqi.delete_baseModule __del__ = lambda self : None; def BIND_PYTHON(self, *args): return _inaoqi.baseModule_BIND_PYTHON(self, *args) def _bindWithParam(self, *args): return _inaoqi.baseModule__bindWithParam(self, *args) def exit(self): return _inaoqi.baseModule_exit(self) def getName(self): return _inaoqi.baseModule_getName(self) def getBrokerName(self): return _inaoqi.baseModule_getBrokerName(self) def setModuleDescription(self, *args): return _inaoqi.baseModule_setModuleDescription(self, *args) def addParam(self, *args): return _inaoqi.baseModule_addParam(self, *args) def functionName(self, *args): return _inaoqi.baseModule_functionName(self, *args) def autoBind(self, *args): return _inaoqi.baseModule_autoBind(self, *args) def _methodMissing0(self, *args): return _inaoqi.baseModule__methodMissing0(self, *args) def _methodMissing1(self, *args): return _inaoqi.baseModule__methodMissing1(self, *args) def _methodMissing2(self, *args): return _inaoqi.baseModule__methodMissing2(self, *args) def _methodMissing3(self, *args): return _inaoqi.baseModule__methodMissing3(self, *args) def _methodMissing4(self, *args): return _inaoqi.baseModule__methodMissing4(self, *args) def _methodMissing5(self, *args): return _inaoqi.baseModule__methodMissing5(self, *args) def _methodMissing6(self, *args): return _inaoqi.baseModule__methodMissing6(self, *args) __swig_getmethods__["callPythonMethod0"] = lambda x: _inaoqi.baseModule_callPythonMethod0 if _newclass:callPythonMethod0 = staticmethod(_inaoqi.baseModule_callPythonMethod0) __swig_getmethods__["callPythonMethod1"] = lambda x: _inaoqi.baseModule_callPythonMethod1 if _newclass:callPythonMethod1 = staticmethod(_inaoqi.baseModule_callPythonMethod1) __swig_getmethods__["callPythonMethod2"] = lambda x: _inaoqi.baseModule_callPythonMethod2 if _newclass:callPythonMethod2 = staticmethod(_inaoqi.baseModule_callPythonMethod2) __swig_getmethods__["callPythonMethod3"] = lambda x: _inaoqi.baseModule_callPythonMethod3 if _newclass:callPythonMethod3 = staticmethod(_inaoqi.baseModule_callPythonMethod3) __swig_getmethods__["callPythonMethod4"] = lambda x: _inaoqi.baseModule_callPythonMethod4 if _newclass:callPythonMethod4 = staticmethod(_inaoqi.baseModule_callPythonMethod4) __swig_getmethods__["callPythonMethod5"] = lambda x: _inaoqi.baseModule_callPythonMethod5 if _newclass:callPythonMethod5 = staticmethod(_inaoqi.baseModule_callPythonMethod5) __swig_getmethods__["callPythonMethod6"] = lambda x: _inaoqi.baseModule_callPythonMethod6 if _newclass:callPythonMethod6 = staticmethod(_inaoqi.baseModule_callPythonMethod6) def _fakeMethod0(self): return _inaoqi.baseModule__fakeMethod0(self) def _methodMissing(self): return _inaoqi.baseModule__methodMissing(self) def version(self): return _inaoqi.baseModule_version(self) def registerToBroker(self): return _inaoqi.baseModule_registerToBroker(self) baseModule_swigregister = _inaoqi.baseModule_swigregister baseModule_swigregister(baseModule) def baseModule_callPythonMethod0(*args): return _inaoqi.baseModule_callPythonMethod0(*args) baseModule_callPythonMethod0 = _inaoqi.baseModule_callPythonMethod0 def baseModule_callPythonMethod1(*args): return _inaoqi.baseModule_callPythonMethod1(*args) baseModule_callPythonMethod1 = _inaoqi.baseModule_callPythonMethod1 def baseModule_callPythonMethod2(*args): return _inaoqi.baseModule_callPythonMethod2(*args) baseModule_callPythonMethod2 = _inaoqi.baseModule_callPythonMethod2 def baseModule_callPythonMethod3(*args): return _inaoqi.baseModule_callPythonMethod3(*args) baseModule_callPythonMethod3 = _inaoqi.baseModule_callPythonMethod3 def baseModule_callPythonMethod4(*args): return _inaoqi.baseModule_callPythonMethod4(*args) baseModule_callPythonMethod4 = _inaoqi.baseModule_callPythonMethod4 def baseModule_callPythonMethod5(*args): return _inaoqi.baseModule_callPythonMethod5(*args) baseModule_callPythonMethod5 = _inaoqi.baseModule_callPythonMethod5 def baseModule_callPythonMethod6(*args): return _inaoqi.baseModule_callPythonMethod6(*args) baseModule_callPythonMethod6 = _inaoqi.baseModule_callPythonMethod6 class module(baseModule): __swig_setmethods__ = {} for _s in [baseModule]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{})) __setattr__ = lambda self, name, value: _swig_setattr(self, module, name, value) __swig_getmethods__ = {} for _s in [baseModule]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{})) __getattr__ = lambda self, name: _swig_getattr(self, module, name) __repr__ = _swig_repr def __init__(self, *args): this = _inaoqi.new_module(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _inaoqi.delete_module __del__ = lambda self : None; module_swigregister = _inaoqi.module_swigregister module_swigregister(module) class proxy(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, proxy, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, proxy, name) __repr__ = _swig_repr def __init__(self, *args): this = _inaoqi.new_proxy(*args) try: self.this.append(this) except: self.this = this def pythonCall(self, *args): return _inaoqi.proxy_pythonCall(self, *args) def pythonPCall(self, *args): return _inaoqi.proxy_pythonPCall(self, *args) def wait(self, *args): return _inaoqi.proxy_wait(self, *args) def stop(self, *args): return _inaoqi.proxy_stop(self, *args) def isRunning(self, *args): return _inaoqi.proxy_isRunning(self, *args) def session(self): return _inaoqi.proxy_session(self) __swig_destroy__ = _inaoqi.delete_proxy __del__ = lambda self : None; proxy_swigregister = _inaoqi.proxy_swigregister proxy_swigregister(proxy) def setInstance(*args): return _inaoqi.setInstance(*args) setInstance = _inaoqi.setInstance class ALMemoryProxyPostHandler(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, ALMemoryProxyPostHandler, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, ALMemoryProxyPostHandler, name) def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined") __repr__ = _swig_repr def addMapping(self, *args): return _inaoqi.ALMemoryProxyPostHandler_addMapping(self, *args) def declareEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_declareEvent(self, *args) def exit(self): return _inaoqi.ALMemoryProxyPostHandler_exit(self) def insertData(self, *args): return _inaoqi.ALMemoryProxyPostHandler_insertData(self, *args) def insertListData(self, *args): return _inaoqi.ALMemoryProxyPostHandler_insertListData(self, *args) def raiseEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_raiseEvent(self, *args) def raiseMicroEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_raiseMicroEvent(self, *args) def removeData(self, *args): return _inaoqi.ALMemoryProxyPostHandler_removeData(self, *args) def removeEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_removeEvent(self, *args) def removeMicroEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_removeMicroEvent(self, *args) def setDescription(self, *args): return _inaoqi.ALMemoryProxyPostHandler_setDescription(self, *args) def stop(self, *args): return _inaoqi.ALMemoryProxyPostHandler_stop(self, *args) def subscribeToEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_subscribeToEvent(self, *args) def subscribeToMicroEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_subscribeToMicroEvent(self, *args) def unregisterModuleReference(self, *args): return _inaoqi.ALMemoryProxyPostHandler_unregisterModuleReference(self, *args) def unsubscribeToEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_unsubscribeToEvent(self, *args) def unsubscribeToMicroEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_unsubscribeToMicroEvent(self, *args) __swig_destroy__ = _inaoqi.delete_ALMemoryProxyPostHandler __del__ = lambda self : None; ALMemoryProxyPostHandler_swigregister = _inaoqi.ALMemoryProxyPostHandler_swigregister ALMemoryProxyPostHandler_swigregister(ALMemoryProxyPostHandler) class ALMemoryProxy(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, ALMemoryProxy, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, ALMemoryProxy, name) __repr__ = _swig_repr def __init__(self, *args): this = _inaoqi.new_ALMemoryProxy(*args) try: self.this.append(this) except: self.this = this def getGenericProxy(self): return _inaoqi.ALMemoryProxy_getGenericProxy(self) def addMapping(self, *args): return _inaoqi.ALMemoryProxy_addMapping(self, *args) def declareEvent(self, *args): return _inaoqi.ALMemoryProxy_declareEvent(self, *args) def exit(self): return _inaoqi.ALMemoryProxy_exit(self) def getBrokerName(self): return _inaoqi.ALMemoryProxy_getBrokerName(self) def getData(self, *args): return _inaoqi.ALMemoryProxy_getData(self, *args) def getDataList(self, *args): return _inaoqi.ALMemoryProxy_getDataList(self, *args) def getDataListName(self): return _inaoqi.ALMemoryProxy_getDataListName(self) def getDataOnChange(self, *args): return _inaoqi.ALMemoryProxy_getDataOnChange(self, *args) def getDataPtr(self, *args): return _inaoqi.ALMemoryProxy_getDataPtr(self, *args) def getIntPtr(self, *args): return _inaoqi.ALMemoryProxy_getIntPtr(self, *args) def getFloatPtr(self, *args): return _inaoqi.ALMemoryProxy_getFloatPtr(self, *args) def getDescriptionList(self, *args): return _inaoqi.ALMemoryProxy_getDescriptionList(self, *args) def getEventHistory(self, *args): return _inaoqi.ALMemoryProxy_getEventHistory(self, *args) def getEventList(self): return _inaoqi.ALMemoryProxy_getEventList(self) def getExtractorEvent(self, *args): return _inaoqi.ALMemoryProxy_getExtractorEvent(self, *args) def getListData(self, *args): return _inaoqi.ALMemoryProxy_getListData(self, *args) def getMethodHelp(self, *args): return _inaoqi.ALMemoryProxy_getMethodHelp(self, *args) def getMethodList(self): return _inaoqi.ALMemoryProxy_getMethodList(self) def getMicroEventList(self): return _inaoqi.ALMemoryProxy_getMicroEventList(self) def getModuleHelp(self): return _inaoqi.ALMemoryProxy_getModuleHelp(self) def getSubscribers(self, *args): return _inaoqi.ALMemoryProxy_getSubscribers(self, *args) def getTimestamp(self, *args): return _inaoqi.ALMemoryProxy_getTimestamp(self, *args) def getType(self, *args): return _inaoqi.ALMemoryProxy_getType(self, *args) def getUsage(self, *args): return _inaoqi.ALMemoryProxy_getUsage(self, *args) def insertData(self, *args): return _inaoqi.ALMemoryProxy_insertData(self, *args) def insertListData(self, *args): return _inaoqi.ALMemoryProxy_insertListData(self, *args) def isRunning(self, *args): return _inaoqi.ALMemoryProxy_isRunning(self, *args) def pCall(self): return _inaoqi.ALMemoryProxy_pCall(self) def ping(self): return _inaoqi.ALMemoryProxy_ping(self) def raiseEvent(self, *args): return _inaoqi.ALMemoryProxy_raiseEvent(self, *args) def raiseMicroEvent(self, *args): return _inaoqi.ALMemoryProxy_raiseMicroEvent(self, *args) def removeData(self, *args): return _inaoqi.ALMemoryProxy_removeData(self, *args) def removeEvent(self, *args): return _inaoqi.ALMemoryProxy_removeEvent(self, *args) def removeMicroEvent(self, *args): return _inaoqi.ALMemoryProxy_removeMicroEvent(self, *args) def setDescription(self, *args): return _inaoqi.ALMemoryProxy_setDescription(self, *args) def stop(self, *args): return _inaoqi.ALMemoryProxy_stop(self, *args) def subscribeToEvent(self, *args): return _inaoqi.ALMemoryProxy_subscribeToEvent(self, *args) def subscribeToMicroEvent(self, *args): return _inaoqi.ALMemoryProxy_subscribeToMicroEvent(self, *args) def subscriber(self, *args): return _inaoqi.ALMemoryProxy_subscriber(self, *args) def unregisterModuleReference(self, *args): return _inaoqi.ALMemoryProxy_unregisterModuleReference(self, *args) def unsubscribeToEvent(self, *args): return _inaoqi.ALMemoryProxy_unsubscribeToEvent(self, *args) def unsubscribeToMicroEvent(self, *args): return _inaoqi.ALMemoryProxy_unsubscribeToMicroEvent(self, *args) def version(self): return _inaoqi.ALMemoryProxy_version(self) def wait(self, *args): return _inaoqi.ALMemoryProxy_wait(self, *args) __swig_setmethods__["post"] = _inaoqi.ALMemoryProxy_post_set __swig_getmethods__["post"] = _inaoqi.ALMemoryProxy_post_get if _newclass:post = _swig_property(_inaoqi.ALMemoryProxy_post_get, _inaoqi.ALMemoryProxy_post_set) __swig_destroy__ = _inaoqi.delete_ALMemoryProxy __del__ = lambda self : None; ALMemoryProxy_swigregister = _inaoqi.ALMemoryProxy_swigregister ALMemoryProxy_swigregister(ALMemoryProxy) # This file is compatible with both classic and new-style classes.
pynaoqi-python2.7-2.5.5.5-linux64/lib/python2.7/site-packages/inaoqi.py
from sys import version_info if version_info >= (2,6,0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_inaoqi', [dirname(__file__)]) except ImportError: import _inaoqi return _inaoqi if fp is not None: try: _mod = imp.load_module('_inaoqi', fp, pathname, description) finally: fp.close() return _mod _inaoqi = swig_import_helper() del swig_import_helper else: import _inaoqi del version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. def _swig_setattr_nondynamic(self,class_type,name,value,static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name,None) if method: return method(self,value) if (not static): self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self,class_type,name,value): return _swig_setattr_nondynamic(self,class_type,name,value,0) def _swig_getattr(self,class_type,name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name,None) if method: return method(self) raise AttributeError(name) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except AttributeError: class _object : pass _newclass = 0 class SwigPyIterator(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name) def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract") __repr__ = _swig_repr __swig_destroy__ = _inaoqi.delete_SwigPyIterator __del__ = lambda self : None; def value(self): return _inaoqi.SwigPyIterator_value(self) def incr(self, n=1): return _inaoqi.SwigPyIterator_incr(self, n) def decr(self, n=1): return _inaoqi.SwigPyIterator_decr(self, n) def distance(self, *args): return _inaoqi.SwigPyIterator_distance(self, *args) def equal(self, *args): return _inaoqi.SwigPyIterator_equal(self, *args) def copy(self): return _inaoqi.SwigPyIterator_copy(self) def next(self): return _inaoqi.SwigPyIterator_next(self) def __next__(self): return _inaoqi.SwigPyIterator___next__(self) def previous(self): return _inaoqi.SwigPyIterator_previous(self) def advance(self, *args): return _inaoqi.SwigPyIterator_advance(self, *args) def __eq__(self, *args): return _inaoqi.SwigPyIterator___eq__(self, *args) def __ne__(self, *args): return _inaoqi.SwigPyIterator___ne__(self, *args) def __iadd__(self, *args): return _inaoqi.SwigPyIterator___iadd__(self, *args) def __isub__(self, *args): return _inaoqi.SwigPyIterator___isub__(self, *args) def __add__(self, *args): return _inaoqi.SwigPyIterator___add__(self, *args) def __sub__(self, *args): return _inaoqi.SwigPyIterator___sub__(self, *args) def __iter__(self): return self SwigPyIterator_swigregister = _inaoqi.SwigPyIterator_swigregister SwigPyIterator_swigregister(SwigPyIterator) class StringVector(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, StringVector, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, StringVector, name) __repr__ = _swig_repr def iterator(self): return _inaoqi.StringVector_iterator(self) def __iter__(self): return self.iterator() def __nonzero__(self): return _inaoqi.StringVector___nonzero__(self) def __bool__(self): return _inaoqi.StringVector___bool__(self) def __len__(self): return _inaoqi.StringVector___len__(self) def pop(self): return _inaoqi.StringVector_pop(self) def __getslice__(self, *args): return _inaoqi.StringVector___getslice__(self, *args) def __setslice__(self, *args): return _inaoqi.StringVector___setslice__(self, *args) def __delslice__(self, *args): return _inaoqi.StringVector___delslice__(self, *args) def __delitem__(self, *args): return _inaoqi.StringVector___delitem__(self, *args) def __getitem__(self, *args): return _inaoqi.StringVector___getitem__(self, *args) def __setitem__(self, *args): return _inaoqi.StringVector___setitem__(self, *args) def append(self, *args): return _inaoqi.StringVector_append(self, *args) def empty(self): return _inaoqi.StringVector_empty(self) def size(self): return _inaoqi.StringVector_size(self) def clear(self): return _inaoqi.StringVector_clear(self) def swap(self, *args): return _inaoqi.StringVector_swap(self, *args) def get_allocator(self): return _inaoqi.StringVector_get_allocator(self) def begin(self): return _inaoqi.StringVector_begin(self) def end(self): return _inaoqi.StringVector_end(self) def rbegin(self): return _inaoqi.StringVector_rbegin(self) def rend(self): return _inaoqi.StringVector_rend(self) def pop_back(self): return _inaoqi.StringVector_pop_back(self) def erase(self, *args): return _inaoqi.StringVector_erase(self, *args) def __init__(self, *args): this = _inaoqi.new_StringVector(*args) try: self.this.append(this) except: self.this = this def push_back(self, *args): return _inaoqi.StringVector_push_back(self, *args) def front(self): return _inaoqi.StringVector_front(self) def back(self): return _inaoqi.StringVector_back(self) def assign(self, *args): return _inaoqi.StringVector_assign(self, *args) def resize(self, *args): return _inaoqi.StringVector_resize(self, *args) def insert(self, *args): return _inaoqi.StringVector_insert(self, *args) def reserve(self, *args): return _inaoqi.StringVector_reserve(self, *args) def capacity(self): return _inaoqi.StringVector_capacity(self) __swig_destroy__ = _inaoqi.delete_StringVector __del__ = lambda self : None; StringVector_swigregister = _inaoqi.StringVector_swigregister StringVector_swigregister(StringVector) def _getDefaultSession(): return _inaoqi._getDefaultSession() _getDefaultSession = _inaoqi._getDefaultSession class broker(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, broker, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, broker, name) __repr__ = _swig_repr def __init__(self, *args): this = _inaoqi.new_broker(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _inaoqi.delete_broker __del__ = lambda self : None; def shutdown(self): return _inaoqi.broker_shutdown(self) def isModulePresent(self, *args): return _inaoqi.broker_isModulePresent(self, *args) def getGlobalModuleList(self): return _inaoqi.broker_getGlobalModuleList(self) def getALBroker(self): return _inaoqi.broker_getALBroker(self) def onDisconnected(self, *args): return _inaoqi.broker_onDisconnected(self, *args) broker_swigregister = _inaoqi.broker_swigregister broker_swigregister(broker) class baseModule(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, baseModule, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, baseModule, name) __repr__ = _swig_repr def __init__(self): this = _inaoqi.new_baseModule() try: self.this.append(this) except: self.this = this __swig_destroy__ = _inaoqi.delete_baseModule __del__ = lambda self : None; def BIND_PYTHON(self, *args): return _inaoqi.baseModule_BIND_PYTHON(self, *args) def _bindWithParam(self, *args): return _inaoqi.baseModule__bindWithParam(self, *args) def exit(self): return _inaoqi.baseModule_exit(self) def getName(self): return _inaoqi.baseModule_getName(self) def getBrokerName(self): return _inaoqi.baseModule_getBrokerName(self) def setModuleDescription(self, *args): return _inaoqi.baseModule_setModuleDescription(self, *args) def addParam(self, *args): return _inaoqi.baseModule_addParam(self, *args) def functionName(self, *args): return _inaoqi.baseModule_functionName(self, *args) def autoBind(self, *args): return _inaoqi.baseModule_autoBind(self, *args) def _methodMissing0(self, *args): return _inaoqi.baseModule__methodMissing0(self, *args) def _methodMissing1(self, *args): return _inaoqi.baseModule__methodMissing1(self, *args) def _methodMissing2(self, *args): return _inaoqi.baseModule__methodMissing2(self, *args) def _methodMissing3(self, *args): return _inaoqi.baseModule__methodMissing3(self, *args) def _methodMissing4(self, *args): return _inaoqi.baseModule__methodMissing4(self, *args) def _methodMissing5(self, *args): return _inaoqi.baseModule__methodMissing5(self, *args) def _methodMissing6(self, *args): return _inaoqi.baseModule__methodMissing6(self, *args) __swig_getmethods__["callPythonMethod0"] = lambda x: _inaoqi.baseModule_callPythonMethod0 if _newclass:callPythonMethod0 = staticmethod(_inaoqi.baseModule_callPythonMethod0) __swig_getmethods__["callPythonMethod1"] = lambda x: _inaoqi.baseModule_callPythonMethod1 if _newclass:callPythonMethod1 = staticmethod(_inaoqi.baseModule_callPythonMethod1) __swig_getmethods__["callPythonMethod2"] = lambda x: _inaoqi.baseModule_callPythonMethod2 if _newclass:callPythonMethod2 = staticmethod(_inaoqi.baseModule_callPythonMethod2) __swig_getmethods__["callPythonMethod3"] = lambda x: _inaoqi.baseModule_callPythonMethod3 if _newclass:callPythonMethod3 = staticmethod(_inaoqi.baseModule_callPythonMethod3) __swig_getmethods__["callPythonMethod4"] = lambda x: _inaoqi.baseModule_callPythonMethod4 if _newclass:callPythonMethod4 = staticmethod(_inaoqi.baseModule_callPythonMethod4) __swig_getmethods__["callPythonMethod5"] = lambda x: _inaoqi.baseModule_callPythonMethod5 if _newclass:callPythonMethod5 = staticmethod(_inaoqi.baseModule_callPythonMethod5) __swig_getmethods__["callPythonMethod6"] = lambda x: _inaoqi.baseModule_callPythonMethod6 if _newclass:callPythonMethod6 = staticmethod(_inaoqi.baseModule_callPythonMethod6) def _fakeMethod0(self): return _inaoqi.baseModule__fakeMethod0(self) def _methodMissing(self): return _inaoqi.baseModule__methodMissing(self) def version(self): return _inaoqi.baseModule_version(self) def registerToBroker(self): return _inaoqi.baseModule_registerToBroker(self) baseModule_swigregister = _inaoqi.baseModule_swigregister baseModule_swigregister(baseModule) def baseModule_callPythonMethod0(*args): return _inaoqi.baseModule_callPythonMethod0(*args) baseModule_callPythonMethod0 = _inaoqi.baseModule_callPythonMethod0 def baseModule_callPythonMethod1(*args): return _inaoqi.baseModule_callPythonMethod1(*args) baseModule_callPythonMethod1 = _inaoqi.baseModule_callPythonMethod1 def baseModule_callPythonMethod2(*args): return _inaoqi.baseModule_callPythonMethod2(*args) baseModule_callPythonMethod2 = _inaoqi.baseModule_callPythonMethod2 def baseModule_callPythonMethod3(*args): return _inaoqi.baseModule_callPythonMethod3(*args) baseModule_callPythonMethod3 = _inaoqi.baseModule_callPythonMethod3 def baseModule_callPythonMethod4(*args): return _inaoqi.baseModule_callPythonMethod4(*args) baseModule_callPythonMethod4 = _inaoqi.baseModule_callPythonMethod4 def baseModule_callPythonMethod5(*args): return _inaoqi.baseModule_callPythonMethod5(*args) baseModule_callPythonMethod5 = _inaoqi.baseModule_callPythonMethod5 def baseModule_callPythonMethod6(*args): return _inaoqi.baseModule_callPythonMethod6(*args) baseModule_callPythonMethod6 = _inaoqi.baseModule_callPythonMethod6 class module(baseModule): __swig_setmethods__ = {} for _s in [baseModule]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{})) __setattr__ = lambda self, name, value: _swig_setattr(self, module, name, value) __swig_getmethods__ = {} for _s in [baseModule]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{})) __getattr__ = lambda self, name: _swig_getattr(self, module, name) __repr__ = _swig_repr def __init__(self, *args): this = _inaoqi.new_module(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _inaoqi.delete_module __del__ = lambda self : None; module_swigregister = _inaoqi.module_swigregister module_swigregister(module) class proxy(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, proxy, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, proxy, name) __repr__ = _swig_repr def __init__(self, *args): this = _inaoqi.new_proxy(*args) try: self.this.append(this) except: self.this = this def pythonCall(self, *args): return _inaoqi.proxy_pythonCall(self, *args) def pythonPCall(self, *args): return _inaoqi.proxy_pythonPCall(self, *args) def wait(self, *args): return _inaoqi.proxy_wait(self, *args) def stop(self, *args): return _inaoqi.proxy_stop(self, *args) def isRunning(self, *args): return _inaoqi.proxy_isRunning(self, *args) def session(self): return _inaoqi.proxy_session(self) __swig_destroy__ = _inaoqi.delete_proxy __del__ = lambda self : None; proxy_swigregister = _inaoqi.proxy_swigregister proxy_swigregister(proxy) def setInstance(*args): return _inaoqi.setInstance(*args) setInstance = _inaoqi.setInstance class ALMemoryProxyPostHandler(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, ALMemoryProxyPostHandler, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, ALMemoryProxyPostHandler, name) def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined") __repr__ = _swig_repr def addMapping(self, *args): return _inaoqi.ALMemoryProxyPostHandler_addMapping(self, *args) def declareEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_declareEvent(self, *args) def exit(self): return _inaoqi.ALMemoryProxyPostHandler_exit(self) def insertData(self, *args): return _inaoqi.ALMemoryProxyPostHandler_insertData(self, *args) def insertListData(self, *args): return _inaoqi.ALMemoryProxyPostHandler_insertListData(self, *args) def raiseEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_raiseEvent(self, *args) def raiseMicroEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_raiseMicroEvent(self, *args) def removeData(self, *args): return _inaoqi.ALMemoryProxyPostHandler_removeData(self, *args) def removeEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_removeEvent(self, *args) def removeMicroEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_removeMicroEvent(self, *args) def setDescription(self, *args): return _inaoqi.ALMemoryProxyPostHandler_setDescription(self, *args) def stop(self, *args): return _inaoqi.ALMemoryProxyPostHandler_stop(self, *args) def subscribeToEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_subscribeToEvent(self, *args) def subscribeToMicroEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_subscribeToMicroEvent(self, *args) def unregisterModuleReference(self, *args): return _inaoqi.ALMemoryProxyPostHandler_unregisterModuleReference(self, *args) def unsubscribeToEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_unsubscribeToEvent(self, *args) def unsubscribeToMicroEvent(self, *args): return _inaoqi.ALMemoryProxyPostHandler_unsubscribeToMicroEvent(self, *args) __swig_destroy__ = _inaoqi.delete_ALMemoryProxyPostHandler __del__ = lambda self : None; ALMemoryProxyPostHandler_swigregister = _inaoqi.ALMemoryProxyPostHandler_swigregister ALMemoryProxyPostHandler_swigregister(ALMemoryProxyPostHandler) class ALMemoryProxy(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, ALMemoryProxy, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, ALMemoryProxy, name) __repr__ = _swig_repr def __init__(self, *args): this = _inaoqi.new_ALMemoryProxy(*args) try: self.this.append(this) except: self.this = this def getGenericProxy(self): return _inaoqi.ALMemoryProxy_getGenericProxy(self) def addMapping(self, *args): return _inaoqi.ALMemoryProxy_addMapping(self, *args) def declareEvent(self, *args): return _inaoqi.ALMemoryProxy_declareEvent(self, *args) def exit(self): return _inaoqi.ALMemoryProxy_exit(self) def getBrokerName(self): return _inaoqi.ALMemoryProxy_getBrokerName(self) def getData(self, *args): return _inaoqi.ALMemoryProxy_getData(self, *args) def getDataList(self, *args): return _inaoqi.ALMemoryProxy_getDataList(self, *args) def getDataListName(self): return _inaoqi.ALMemoryProxy_getDataListName(self) def getDataOnChange(self, *args): return _inaoqi.ALMemoryProxy_getDataOnChange(self, *args) def getDataPtr(self, *args): return _inaoqi.ALMemoryProxy_getDataPtr(self, *args) def getIntPtr(self, *args): return _inaoqi.ALMemoryProxy_getIntPtr(self, *args) def getFloatPtr(self, *args): return _inaoqi.ALMemoryProxy_getFloatPtr(self, *args) def getDescriptionList(self, *args): return _inaoqi.ALMemoryProxy_getDescriptionList(self, *args) def getEventHistory(self, *args): return _inaoqi.ALMemoryProxy_getEventHistory(self, *args) def getEventList(self): return _inaoqi.ALMemoryProxy_getEventList(self) def getExtractorEvent(self, *args): return _inaoqi.ALMemoryProxy_getExtractorEvent(self, *args) def getListData(self, *args): return _inaoqi.ALMemoryProxy_getListData(self, *args) def getMethodHelp(self, *args): return _inaoqi.ALMemoryProxy_getMethodHelp(self, *args) def getMethodList(self): return _inaoqi.ALMemoryProxy_getMethodList(self) def getMicroEventList(self): return _inaoqi.ALMemoryProxy_getMicroEventList(self) def getModuleHelp(self): return _inaoqi.ALMemoryProxy_getModuleHelp(self) def getSubscribers(self, *args): return _inaoqi.ALMemoryProxy_getSubscribers(self, *args) def getTimestamp(self, *args): return _inaoqi.ALMemoryProxy_getTimestamp(self, *args) def getType(self, *args): return _inaoqi.ALMemoryProxy_getType(self, *args) def getUsage(self, *args): return _inaoqi.ALMemoryProxy_getUsage(self, *args) def insertData(self, *args): return _inaoqi.ALMemoryProxy_insertData(self, *args) def insertListData(self, *args): return _inaoqi.ALMemoryProxy_insertListData(self, *args) def isRunning(self, *args): return _inaoqi.ALMemoryProxy_isRunning(self, *args) def pCall(self): return _inaoqi.ALMemoryProxy_pCall(self) def ping(self): return _inaoqi.ALMemoryProxy_ping(self) def raiseEvent(self, *args): return _inaoqi.ALMemoryProxy_raiseEvent(self, *args) def raiseMicroEvent(self, *args): return _inaoqi.ALMemoryProxy_raiseMicroEvent(self, *args) def removeData(self, *args): return _inaoqi.ALMemoryProxy_removeData(self, *args) def removeEvent(self, *args): return _inaoqi.ALMemoryProxy_removeEvent(self, *args) def removeMicroEvent(self, *args): return _inaoqi.ALMemoryProxy_removeMicroEvent(self, *args) def setDescription(self, *args): return _inaoqi.ALMemoryProxy_setDescription(self, *args) def stop(self, *args): return _inaoqi.ALMemoryProxy_stop(self, *args) def subscribeToEvent(self, *args): return _inaoqi.ALMemoryProxy_subscribeToEvent(self, *args) def subscribeToMicroEvent(self, *args): return _inaoqi.ALMemoryProxy_subscribeToMicroEvent(self, *args) def subscriber(self, *args): return _inaoqi.ALMemoryProxy_subscriber(self, *args) def unregisterModuleReference(self, *args): return _inaoqi.ALMemoryProxy_unregisterModuleReference(self, *args) def unsubscribeToEvent(self, *args): return _inaoqi.ALMemoryProxy_unsubscribeToEvent(self, *args) def unsubscribeToMicroEvent(self, *args): return _inaoqi.ALMemoryProxy_unsubscribeToMicroEvent(self, *args) def version(self): return _inaoqi.ALMemoryProxy_version(self) def wait(self, *args): return _inaoqi.ALMemoryProxy_wait(self, *args) __swig_setmethods__["post"] = _inaoqi.ALMemoryProxy_post_set __swig_getmethods__["post"] = _inaoqi.ALMemoryProxy_post_get if _newclass:post = _swig_property(_inaoqi.ALMemoryProxy_post_get, _inaoqi.ALMemoryProxy_post_set) __swig_destroy__ = _inaoqi.delete_ALMemoryProxy __del__ = lambda self : None; ALMemoryProxy_swigregister = _inaoqi.ALMemoryProxy_swigregister ALMemoryProxy_swigregister(ALMemoryProxy) # This file is compatible with both classic and new-style classes.
0.129485
0.098642
import pandas as pd import matplotlib.pyplot as plt from collections import Counter from nltk import word_tokenize from nltk.corpus import stopwords import re import pprint import math import os import shutil def main(): statement_document_analysis() frequency_counts() def frequency_counts(): cwd = "../output/statement_text_analysis/" terms = ['risk','risks', ] statements = pd.read_csv("../../../collection/python/output/statement_data.csv") statements['date'] = pd.to_datetime(statements['end_date']) stop_words = stopwords.words('english') corpus_words = [] for i in statements.index: raw_text = statements.loc[i,'file_text'].lower().replace("\n"," ").strip(",") sentences = raw_text.split(". ") for term in terms: term_sents = [] term_words = [] for sentence in sentences: if term in sentence: term_sents.append(sentence) statements.at[i, term+"_sents"] = "|".join(term_sents) for sent in term_sents: for word in word_tokenize(sent): #print(word) if word.isalpha() and word not in stop_words: corpus_words.append(word) term_words.append(word) #print(term_words) statements.at[i,term+"_words"] = "|".join(term_words) corpus_counter = Counter(corpus_words) for term in terms: term_words = [] for meeting_words in statements[term+"_words"]: term_words.extend(meeting_words.split("|")) term_counts = Counter(term_words) print(term.upper()) pprint.pprint(term_counts) statements.loc[1,term+"_word_freqs"] = "{}:{}".format(term.upper(),str(term_counts)) statements.to_csv(cwd+"word_grouping_counts.csv") def statement_document_analysis(): cwd = "../output/statement_text_analysis/" if os.path.exists(cwd): shutil.rmtree(cwd) if not os.path.exists(cwd): os.mkdir(cwd) os.mkdir(cwd+"graphs") terms = [ ['risks','balanced'], ['risks','weighted'], ['risks','maintained'] ] print(terms) statements = pd.read_csv("../../../collection/python/output/statement_data.csv") statements['date'] = pd.to_datetime(statements['end_date']) for i in statements.index: raw_text = statements.loc[i, 'file_text'].lower().replace("\n", " ").strip(",") sentences = raw_text.split(". ") for term in terms: term_sents = [] for sentence in sentences: if term[0] and term[1] in sentence: term_sents.append(sentence) statements.at[i, term[0]+":"+term[1] + "_sents"] = "|".join(term_sents) for term in terms: term_1 = term[0] term_2 = term[1] term_phrase = term_1+":"+term_2 statements[term_phrase] = ((statements.file_text.str.contains(term_1))& (statements.file_text.str.contains(term_2))) statements.sort_values(by="date",inplace=True) plt.plot(statements['date'],statements[term_phrase],'bo',markersize=1) plt.title(term_phrase) graph_path = cwd+"graphs/"+term_phrase.replace(":","_")+".png" if os.path.exists(graph_path): os.rmdir(graph_path) plt.savefig(graph_path) statements.to_csv(cwd+"term_connections.csv") #print(statements) if __name__ == "__main__": main()
src/analysis/python/scripts/indirect/statement_text_analysis.py
import pandas as pd import matplotlib.pyplot as plt from collections import Counter from nltk import word_tokenize from nltk.corpus import stopwords import re import pprint import math import os import shutil def main(): statement_document_analysis() frequency_counts() def frequency_counts(): cwd = "../output/statement_text_analysis/" terms = ['risk','risks', ] statements = pd.read_csv("../../../collection/python/output/statement_data.csv") statements['date'] = pd.to_datetime(statements['end_date']) stop_words = stopwords.words('english') corpus_words = [] for i in statements.index: raw_text = statements.loc[i,'file_text'].lower().replace("\n"," ").strip(",") sentences = raw_text.split(". ") for term in terms: term_sents = [] term_words = [] for sentence in sentences: if term in sentence: term_sents.append(sentence) statements.at[i, term+"_sents"] = "|".join(term_sents) for sent in term_sents: for word in word_tokenize(sent): #print(word) if word.isalpha() and word not in stop_words: corpus_words.append(word) term_words.append(word) #print(term_words) statements.at[i,term+"_words"] = "|".join(term_words) corpus_counter = Counter(corpus_words) for term in terms: term_words = [] for meeting_words in statements[term+"_words"]: term_words.extend(meeting_words.split("|")) term_counts = Counter(term_words) print(term.upper()) pprint.pprint(term_counts) statements.loc[1,term+"_word_freqs"] = "{}:{}".format(term.upper(),str(term_counts)) statements.to_csv(cwd+"word_grouping_counts.csv") def statement_document_analysis(): cwd = "../output/statement_text_analysis/" if os.path.exists(cwd): shutil.rmtree(cwd) if not os.path.exists(cwd): os.mkdir(cwd) os.mkdir(cwd+"graphs") terms = [ ['risks','balanced'], ['risks','weighted'], ['risks','maintained'] ] print(terms) statements = pd.read_csv("../../../collection/python/output/statement_data.csv") statements['date'] = pd.to_datetime(statements['end_date']) for i in statements.index: raw_text = statements.loc[i, 'file_text'].lower().replace("\n", " ").strip(",") sentences = raw_text.split(". ") for term in terms: term_sents = [] for sentence in sentences: if term[0] and term[1] in sentence: term_sents.append(sentence) statements.at[i, term[0]+":"+term[1] + "_sents"] = "|".join(term_sents) for term in terms: term_1 = term[0] term_2 = term[1] term_phrase = term_1+":"+term_2 statements[term_phrase] = ((statements.file_text.str.contains(term_1))& (statements.file_text.str.contains(term_2))) statements.sort_values(by="date",inplace=True) plt.plot(statements['date'],statements[term_phrase],'bo',markersize=1) plt.title(term_phrase) graph_path = cwd+"graphs/"+term_phrase.replace(":","_")+".png" if os.path.exists(graph_path): os.rmdir(graph_path) plt.savefig(graph_path) statements.to_csv(cwd+"term_connections.csv") #print(statements) if __name__ == "__main__": main()
0.039453
0.179351
from collections import defaultdict from itertools import groupby import re #TODO: Figure out a better way to do this #hack to get scripts to run with django import os import sys sys.path.append("..") sys.path.append("../api") os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' from courses.models import Course, Semester, Department, CourseHistory from sandbox_config import IMPORT_DATABASE_NAME, IMPORT_DATABASE_USER, \ IMPORT_DATABASE_PWD from extractor import Extractor def extract_courses(extractor): fields = ('course_id', 'paragraph_number', 'course_description') # course_id is of form CIS110 # course_description is split into paragraphs each with a number tables = ('TEST_PCR_COURSE_DESC_V',) order_by = ('course_id ASC', 'paragraph_number ASC') courses = extractor.select(fields, tables, order_by=order_by) def keyfunc(course): return course[0] # id for id, paragraphs in groupby(courses, key=keyfunc): dept = re.search("[A-Z]*", id).group(0) code = re.search("\d+", id).group(0) description = "\n".join(paragraph for _, _, paragraph in paragraphs) # TODO: Crosslist ID crosslist_id = None yield id, dept, code, description, crosslist_id def extract_course_histories(extractor): fields = ('subject_code', 'course_code', 'term', 'title') # subject code is of form CIS # course code is of form 110 # term is of form 2011A # title is of form INTERMEDIATE FRENCH II tables = ('TEST_PCR_SUMMARY_V',) histories = extractor.select(fields, tables) # course_id is given by course title and number, ie FNAR 123 VIDEO I for subject_code, course_code, term, title in histories: try: course_id = subject_code + course_code + title except TypeError: # title can be null course_id = subject_code + course_code print "% has no title!" % course_id title = course_id year = re.search("\d+", term).group(0) semester = re.search("[A-Z]*", term).group(0) yield course_id, year, semester, title def load(raw_courses, raw_course_histories): # requires department was called first course_names = {} sems_taught = defaultdict(list) for course_id, year, semester, name in raw_course_histories: course_names[course_id] = name sems_taught[course_id].append(Semester(year, semester)) for course_id, dept_id, course_num, description, crosslist_id \ in raw_courses: try: name = course_names[course_id] except KeyError: name = course_id dept = Department.objects.get_or_create(code=dept_id) courses = set() for semester in sems_taught[course_id]: course, _ = Course.objects.get_or_create( name=name, semester=semester, defaults={ "description": description, } ) courses.add(course) histories = set(course.history for course in courses if course.history) if len(histories) > 1: raise "Course %d is already tied to multiple course_histories!" \ % (course_id,) else: if histories: history = histories.pop() # select an arbritrary element else: history = CourseHistory.objects.create( notes="Created from PCR ID: %s" % course_id) for course in courses: course.history = history course.save() if __name__ == "__main__": extractor = Extractor(IMPORT_DATABASE_NAME, IMPORT_DATABASE_USER, IMPORT_DATABASE_PWD) load(extract_courses(extractor), extract_course_histories(extractor))
scripts/obsolete/import_courses.py
from collections import defaultdict from itertools import groupby import re #TODO: Figure out a better way to do this #hack to get scripts to run with django import os import sys sys.path.append("..") sys.path.append("../api") os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' from courses.models import Course, Semester, Department, CourseHistory from sandbox_config import IMPORT_DATABASE_NAME, IMPORT_DATABASE_USER, \ IMPORT_DATABASE_PWD from extractor import Extractor def extract_courses(extractor): fields = ('course_id', 'paragraph_number', 'course_description') # course_id is of form CIS110 # course_description is split into paragraphs each with a number tables = ('TEST_PCR_COURSE_DESC_V',) order_by = ('course_id ASC', 'paragraph_number ASC') courses = extractor.select(fields, tables, order_by=order_by) def keyfunc(course): return course[0] # id for id, paragraphs in groupby(courses, key=keyfunc): dept = re.search("[A-Z]*", id).group(0) code = re.search("\d+", id).group(0) description = "\n".join(paragraph for _, _, paragraph in paragraphs) # TODO: Crosslist ID crosslist_id = None yield id, dept, code, description, crosslist_id def extract_course_histories(extractor): fields = ('subject_code', 'course_code', 'term', 'title') # subject code is of form CIS # course code is of form 110 # term is of form 2011A # title is of form INTERMEDIATE FRENCH II tables = ('TEST_PCR_SUMMARY_V',) histories = extractor.select(fields, tables) # course_id is given by course title and number, ie FNAR 123 VIDEO I for subject_code, course_code, term, title in histories: try: course_id = subject_code + course_code + title except TypeError: # title can be null course_id = subject_code + course_code print "% has no title!" % course_id title = course_id year = re.search("\d+", term).group(0) semester = re.search("[A-Z]*", term).group(0) yield course_id, year, semester, title def load(raw_courses, raw_course_histories): # requires department was called first course_names = {} sems_taught = defaultdict(list) for course_id, year, semester, name in raw_course_histories: course_names[course_id] = name sems_taught[course_id].append(Semester(year, semester)) for course_id, dept_id, course_num, description, crosslist_id \ in raw_courses: try: name = course_names[course_id] except KeyError: name = course_id dept = Department.objects.get_or_create(code=dept_id) courses = set() for semester in sems_taught[course_id]: course, _ = Course.objects.get_or_create( name=name, semester=semester, defaults={ "description": description, } ) courses.add(course) histories = set(course.history for course in courses if course.history) if len(histories) > 1: raise "Course %d is already tied to multiple course_histories!" \ % (course_id,) else: if histories: history = histories.pop() # select an arbritrary element else: history = CourseHistory.objects.create( notes="Created from PCR ID: %s" % course_id) for course in courses: course.history = history course.save() if __name__ == "__main__": extractor = Extractor(IMPORT_DATABASE_NAME, IMPORT_DATABASE_USER, IMPORT_DATABASE_PWD) load(extract_courses(extractor), extract_course_histories(extractor))
0.18385
0.182444
import matplotlib.pyplot as plt import networkx as nx import shapely.geometry.point as point import pycity_base.classes.Weather as Weather import pycity_calc.buildings.building as build_ex import pycity_calc.cities.city as cit import pycity_calc.environments.co2emissions as co2 import pycity_calc.environments.environment as env import pycity_calc.environments.market as mark import pycity_calc.environments.timer as time import pycity_calc.toolbox.networks.network_ops as netop import pycity_calc.visualization.city_visual as citvis def run_example(): """ Run example to create city object of pycity with 3 buildings and street network """ # Create extended environment of pycity_calc year = 2010 timestep = 3600 # Timestep in seconds location = (51.529086, 6.944689) # (latitude, longitute) of Bottrop altitude = 55 # Altitude of Bottrop # Generate timer object timer = time.TimerExtended(timestep=timestep, year=year) # Generate weather object weather = Weather.Weather(timer, useTRY=True, location=location, altitude=altitude) # Generate market object market = mark.Market() # Generate co2 emissions object co2em = co2.Emissions(year=year) # Generate environment environment = env.EnvironmentExtended(timer, weather, prices=market, location=location, co2em=co2em) # Generate city object city_object = cit.City(environment=environment) # Create extended building object extended_building = build_ex.BuildingExtended(environment, build_year=1962, mod_year=2003, build_type=0) # Add 3 extended buildings to city object # Add building entities node_1 = city_object.add_extended_building( extended_building=extended_building, position=point.Point(0, 0)) node_2 = city_object.add_extended_building( extended_building=extended_building, position=point.Point(2, 2)) node_3 = city_object.add_extended_building( extended_building=extended_building, position=point.Point(0.5, 4)) node_4 = city_object.add_extended_building( extended_building=extended_building, position=point.Point(8, 10)) list_to_be_conn = [node_1, node_2, node_3] # Add additional, arbitrary buildings node_str_1 = city_object.add_street_node(position=point.Point(-1, 1)) node_str_2 = city_object.add_street_node(position=point.Point(10, 1)) # Add street edge city_object.add_edge(node_str_1, node_str_2, network_type='street') pos_dict = citvis.get_pos_for_plotting(city=city_object) nx.draw_networkx_nodes(G=city_object, pos=pos_dict, node_color='k', node_shape='s', alpha=0.5) nx.draw_networkx_edges(G=city_object, pos=pos_dict) plt.title('4 Buildings with 1 street') plt.show() plt.close() (min_span_graph, list_new_nodes) = \ netop.gen_min_span_tree_along_street(city=city_object, nodelist=list_to_be_conn) pos_dict = citvis.get_pos_for_plotting(city=min_span_graph) nx.draw_networkx_nodes(G=min_span_graph, pos=pos_dict, node_color='k', node_shape='s', alpha=0.5) nx.draw_networkx_edges(G=min_span_graph, pos=pos_dict) plt.title('Minimum spanning (3 buildings) tree along street') plt.show() print('New nodes: ', list_new_nodes) if __name__ == '__main__': # Execute example run_example()
pycity_calc/test/checks/city_street_min_span_test.py
import matplotlib.pyplot as plt import networkx as nx import shapely.geometry.point as point import pycity_base.classes.Weather as Weather import pycity_calc.buildings.building as build_ex import pycity_calc.cities.city as cit import pycity_calc.environments.co2emissions as co2 import pycity_calc.environments.environment as env import pycity_calc.environments.market as mark import pycity_calc.environments.timer as time import pycity_calc.toolbox.networks.network_ops as netop import pycity_calc.visualization.city_visual as citvis def run_example(): """ Run example to create city object of pycity with 3 buildings and street network """ # Create extended environment of pycity_calc year = 2010 timestep = 3600 # Timestep in seconds location = (51.529086, 6.944689) # (latitude, longitute) of Bottrop altitude = 55 # Altitude of Bottrop # Generate timer object timer = time.TimerExtended(timestep=timestep, year=year) # Generate weather object weather = Weather.Weather(timer, useTRY=True, location=location, altitude=altitude) # Generate market object market = mark.Market() # Generate co2 emissions object co2em = co2.Emissions(year=year) # Generate environment environment = env.EnvironmentExtended(timer, weather, prices=market, location=location, co2em=co2em) # Generate city object city_object = cit.City(environment=environment) # Create extended building object extended_building = build_ex.BuildingExtended(environment, build_year=1962, mod_year=2003, build_type=0) # Add 3 extended buildings to city object # Add building entities node_1 = city_object.add_extended_building( extended_building=extended_building, position=point.Point(0, 0)) node_2 = city_object.add_extended_building( extended_building=extended_building, position=point.Point(2, 2)) node_3 = city_object.add_extended_building( extended_building=extended_building, position=point.Point(0.5, 4)) node_4 = city_object.add_extended_building( extended_building=extended_building, position=point.Point(8, 10)) list_to_be_conn = [node_1, node_2, node_3] # Add additional, arbitrary buildings node_str_1 = city_object.add_street_node(position=point.Point(-1, 1)) node_str_2 = city_object.add_street_node(position=point.Point(10, 1)) # Add street edge city_object.add_edge(node_str_1, node_str_2, network_type='street') pos_dict = citvis.get_pos_for_plotting(city=city_object) nx.draw_networkx_nodes(G=city_object, pos=pos_dict, node_color='k', node_shape='s', alpha=0.5) nx.draw_networkx_edges(G=city_object, pos=pos_dict) plt.title('4 Buildings with 1 street') plt.show() plt.close() (min_span_graph, list_new_nodes) = \ netop.gen_min_span_tree_along_street(city=city_object, nodelist=list_to_be_conn) pos_dict = citvis.get_pos_for_plotting(city=min_span_graph) nx.draw_networkx_nodes(G=min_span_graph, pos=pos_dict, node_color='k', node_shape='s', alpha=0.5) nx.draw_networkx_edges(G=min_span_graph, pos=pos_dict) plt.title('Minimum spanning (3 buildings) tree along street') plt.show() print('New nodes: ', list_new_nodes) if __name__ == '__main__': # Execute example run_example()
0.444806
0.486271
import json import logging import os import re import shutil import string import sys from stat import ST_ATIME, ST_MTIME def parser_file_exists(parser, arg): if not os.path.exists(arg): parser.error("The file <%s> does not exist!" % arg) return arg def import_json(json_file): """Imports a JSON into a python object. Args: json_file (str): file path to json file Returns: object: imported json data object or empty list if file does not exist Raises: json.decoder.JSONDecoderError """ data = [] if os.path.exists(json_file): with open(json_file) as fh: try: data = json.load(fh) except json.decoder.JSONDecodeError: logging.critical("%s was not in valid JSON format" % json_file) raise else: return data return data def fastqs_from_dir(input_dir): """ Grabs single-end or paired-end FASTQ file pairs from a directory with basic assumptions in naming convention. Files are assumed to contain the extension .fq or .fastq and can be compressed. Paired files are assumed to have <sample>_R1.fastq and <sample>_R2.fastq. The read index (_R1, _R2) is required and should closely precede the file extension. Args: input_dir (str): directory containing fastq files Returns: dict of sample id to file path or list of file paths """ exts = ['.fastq', '.fq', '.zip'] input_dir = os.path.abspath(input_dir) if not os.path.isdir(input_dir): logging.debug("Changing input dir from %s to %s" % (input_dir, os.path.dirname(input_dir))) input_dir = os.path.dirname(input_dir) if not os.path.exists(input_dir): return {} pairs = {} # sample name for files without _r1 or _r2 in the name split_pattern = re.compile('(\\%s)' % '|\\'.join(exts)) # split file name based on _r1 and _r2 pattern = re.compile(r'((?s:.*))_[rR][12]') for f in os.listdir(input_dir): if not any(ext in f for ext in exts): continue toks = pattern.split(f) # found _R1 or _R2 if len(toks) == 3: sample_id = toks[1] else: sample_id = split_pattern.split(f)[0] if sample_id in pairs: if isinstance(pairs[sample_id], list): logging.warning("%s has more than 2 paired fastqs in %s" % (sample_id, input_dir)) continue pairs[sample_id] = [pairs[sample_id], os.path.join(input_dir, f)] else: pairs[sample_id] = os.path.join(input_dir, f) logging.info("Found %d unique samples in %s" % (len(pairs), input_dir)) return pairs def copy_file(src, dst): if not os.path.exists(dst): logging.info("Copying %s to %s" % (src, dst)) shutil.copy(src, dst) # update the modified time of this file st = os.stat(dst) os.utime(dst, (st[ST_ATIME], st[ST_MTIME] + 5)) def add_csv_input(csv, dst): """ Parses CLI argument `csv` for `fqc add` and returns the filename string or filename list with tab names. Args: csv (list): list of CSV files or <tab name>,<csv file> pairs dst (str): directory in which to copy input CSV files Returns: file path or list of tab name, file paths suitable for WebTab.filename """ if isinstance(csv, list) and len(csv) > 1: filename = [] # 'Plate 1',plate_1_counts.csv 'Plate 2',plate_2_counts.csv for i in csv: if "," in i: name, filepath = i.split(",") # just use the filename for the subplot label else: name = string.capwords(os.path.basename(i).rpartition(".")[0].replace("_", " ")) filepath = i filepath = os.path.abspath(os.path.expanduser(filepath)) if not os.path.exists(filepath): sys.exit("Input file does not exist: %s" % filepath) copy_file(filepath, os.path.join(dst, os.path.basename(filepath))) filename.append([name, os.path.basename(filepath)]) # no subplots else: if "," in csv[0]: filename = os.path.abspath(csv[0].partition(",")[-1]) else: filename = os.path.abspath(csv[0]) if not os.path.exists(filename): sys.exit("Input file does not exist: %s" % filename) copy_file(filename, os.path.join(dst, os.path.basename(filename))) filename = os.path.basename(filename) return filename
fqc/utils.py
import json import logging import os import re import shutil import string import sys from stat import ST_ATIME, ST_MTIME def parser_file_exists(parser, arg): if not os.path.exists(arg): parser.error("The file <%s> does not exist!" % arg) return arg def import_json(json_file): """Imports a JSON into a python object. Args: json_file (str): file path to json file Returns: object: imported json data object or empty list if file does not exist Raises: json.decoder.JSONDecoderError """ data = [] if os.path.exists(json_file): with open(json_file) as fh: try: data = json.load(fh) except json.decoder.JSONDecodeError: logging.critical("%s was not in valid JSON format" % json_file) raise else: return data return data def fastqs_from_dir(input_dir): """ Grabs single-end or paired-end FASTQ file pairs from a directory with basic assumptions in naming convention. Files are assumed to contain the extension .fq or .fastq and can be compressed. Paired files are assumed to have <sample>_R1.fastq and <sample>_R2.fastq. The read index (_R1, _R2) is required and should closely precede the file extension. Args: input_dir (str): directory containing fastq files Returns: dict of sample id to file path or list of file paths """ exts = ['.fastq', '.fq', '.zip'] input_dir = os.path.abspath(input_dir) if not os.path.isdir(input_dir): logging.debug("Changing input dir from %s to %s" % (input_dir, os.path.dirname(input_dir))) input_dir = os.path.dirname(input_dir) if not os.path.exists(input_dir): return {} pairs = {} # sample name for files without _r1 or _r2 in the name split_pattern = re.compile('(\\%s)' % '|\\'.join(exts)) # split file name based on _r1 and _r2 pattern = re.compile(r'((?s:.*))_[rR][12]') for f in os.listdir(input_dir): if not any(ext in f for ext in exts): continue toks = pattern.split(f) # found _R1 or _R2 if len(toks) == 3: sample_id = toks[1] else: sample_id = split_pattern.split(f)[0] if sample_id in pairs: if isinstance(pairs[sample_id], list): logging.warning("%s has more than 2 paired fastqs in %s" % (sample_id, input_dir)) continue pairs[sample_id] = [pairs[sample_id], os.path.join(input_dir, f)] else: pairs[sample_id] = os.path.join(input_dir, f) logging.info("Found %d unique samples in %s" % (len(pairs), input_dir)) return pairs def copy_file(src, dst): if not os.path.exists(dst): logging.info("Copying %s to %s" % (src, dst)) shutil.copy(src, dst) # update the modified time of this file st = os.stat(dst) os.utime(dst, (st[ST_ATIME], st[ST_MTIME] + 5)) def add_csv_input(csv, dst): """ Parses CLI argument `csv` for `fqc add` and returns the filename string or filename list with tab names. Args: csv (list): list of CSV files or <tab name>,<csv file> pairs dst (str): directory in which to copy input CSV files Returns: file path or list of tab name, file paths suitable for WebTab.filename """ if isinstance(csv, list) and len(csv) > 1: filename = [] # 'Plate 1',plate_1_counts.csv 'Plate 2',plate_2_counts.csv for i in csv: if "," in i: name, filepath = i.split(",") # just use the filename for the subplot label else: name = string.capwords(os.path.basename(i).rpartition(".")[0].replace("_", " ")) filepath = i filepath = os.path.abspath(os.path.expanduser(filepath)) if not os.path.exists(filepath): sys.exit("Input file does not exist: %s" % filepath) copy_file(filepath, os.path.join(dst, os.path.basename(filepath))) filename.append([name, os.path.basename(filepath)]) # no subplots else: if "," in csv[0]: filename = os.path.abspath(csv[0].partition(",")[-1]) else: filename = os.path.abspath(csv[0]) if not os.path.exists(filename): sys.exit("Input file does not exist: %s" % filename) copy_file(filename, os.path.join(dst, os.path.basename(filename))) filename = os.path.basename(filename) return filename
0.402979
0.135546
"""Command line interface to add tasks to 2Doapp.""" import argparse import sys import subprocess import twodolib from . urlhelper import TwoDoTask usage_description = """\ Program to create tasks in 2Do. The default behavior is to print the generated URL to stdout. Please use the '-e' or '--execute'' option, if you want to send the task directly to the 2DoApp. Examples ======== Add a task due tomorrow: task2do -d 1 "Dinner at 8pm." Add a task with high priority: task2do -p 3 "High priority task." - or - task2do --priority 3 "High priority task." Add a task due today and repeated weekly: task2do "change clothes" -d 0 --repeat 2 Add a task due at 6pm today task2do "Watch EX_MACHINA" --due 0 --dueTime 18:00 Add a task due tomorrow, with tags, which is also starred and repeated monthly task2do "Monthly subscription." --tags bill,payment -s --due 1 --repeat 4 Add a task with an url action (open a link) task2do "Read help text" -a url:https://www.2doapp.com/ task2do "Read help text" --action url:https://www.2doapp.com/ Add a Subtask in list personal in project errands: task2do "Buy milk." -l personal --project errands """ ACTION_HELP_TEXT = """action: Supports the following formats: call:<number> - Add a Call action to call the specified number message:<number> - Add a Message action to send a message to the specified number mail:<email> - Add an Email action to send an email the specified email address url:<url to visit> - Add a Browse action to visit the specified URL address visit:<address> - Add a Visit action to visit the specified location google:<search term> - Add a Google action to search the specified keyword Enter the arguments after the colon without the angle brackets. For more details: SEE https://www.2doapp.com/kb/article/url-schemes.html """ def parse_arguments(args): """Return Namespace with parsed command line arguments.""" version = '%(prog)s {}'.format(twodolib.__version__) p = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, prog='task2do', description=usage_description, ) p.add_argument('task', help='Title of the task.') p.add_argument('-a', '--action', help=ACTION_HELP_TEXT) p.add_argument('-d', '--due', default=None, help='Due date. Supports two formats: YYYY-MM-DD - Sets ' 'the date on default due time (based on your ' 'settings), unless due time is specified separately ' 'or ignoreDefaults (-i) is given. OR: Number ' 'of days due from today. Which means: 0 = ' 'today, 1 = tomorrow and so on)') p.add_argument('--dueTime', default=None, help='Due time. Supports 24h format HH:MM.') p.add_argument('-e', '--execute', action='store_true', help='Actually add the task instead of only ' 'printing the URL to stdout.') p.add_argument('-f', '--forParentName', metavar='FOR_PARENTNAME', dest='for_parent_name', help='Title of an existing project or checklist to save the ' 'new task there as a subtask. Also requires the ' 'parent\'s task list.', default=None) p.add_argument('-i', '--ignoreDefaults', action='store_true', default=False, help='Ignore default date / time settings of 2DoApp.') p.add_argument('-l', '--list', metavar='FOR_LIST', dest='for_list', help='Name of an existing list in 2DoApp, ' 'case-insensitive. If missing, the default list or the ' 'currently visible list on screen is used.', default=None) p.add_argument('-n', '--note', help='Notes for the task', default=None) p.add_argument('-p', '--priority', choices=['0', '1', '2', '3'], help='priority: 0 (none), 1 (low), 2 (medium), 3 (high)', default='0') p.add_argument('--project', metavar='IN_PROJECT', dest='in_project', help='Name of an existing project in 2DoApp, into which the' ' task will be pasted. So you can create subtasks.', default=None) p.add_argument('--repeat', default=None, choices=['1', '2', '3', '4'], help='Repeat task: 1 (daily), 2 (weekly), 3 (bi-weekly), ' '4 (monthly))') p.add_argument('-s', '--starred', help='Mark task as starred.', action='store_true', default=False) p.add_argument('--start', default=None, help='Start date and time. Supports the format: ' '"YYYY-MM-DD HH:MM" - Sets the start date to the date ' 'and time specified - OR - Any number with 0 = today, ' '1 = tomorrow and so on)') p.add_argument('-t', '--type', choices=['0', '1', '2'], dest='task_type', help='Type of task to create. The following options are ' 'supported: 0 - Task (default), 1 - Project, ' '2 - Checklist', default='0') p.add_argument('--tags', default=None, help='Comma separated list of tags to assign to the task') p.add_argument('--taskid', default=None, action='store_true', help='Prints taskid, needs the task title and the list.') p.add_argument('-v', '--version', action='version', version=version) return p.parse_args(args) def main(arguments=None): # use arguments to allow testing. """Create a task in 2DoApp.""" if arguments is None: arguments = sys.argv[1:] args = parse_arguments(arguments) t = TwoDoTask(**vars(args)) if args.execute: subprocess.call(['open', t.url()]) elif args.taskid: if t.for_list: taskid = t.get_taskid() if taskid: print(taskid) else: print("No task found!") else: print('Please also provide the list ' 'of the task you are looking for!') else: print(t.url()) if __name__ == '__main__': # pragma: no cover main(sys.argv[1:])
twodolib/cli.py
"""Command line interface to add tasks to 2Doapp.""" import argparse import sys import subprocess import twodolib from . urlhelper import TwoDoTask usage_description = """\ Program to create tasks in 2Do. The default behavior is to print the generated URL to stdout. Please use the '-e' or '--execute'' option, if you want to send the task directly to the 2DoApp. Examples ======== Add a task due tomorrow: task2do -d 1 "Dinner at 8pm." Add a task with high priority: task2do -p 3 "High priority task." - or - task2do --priority 3 "High priority task." Add a task due today and repeated weekly: task2do "change clothes" -d 0 --repeat 2 Add a task due at 6pm today task2do "Watch EX_MACHINA" --due 0 --dueTime 18:00 Add a task due tomorrow, with tags, which is also starred and repeated monthly task2do "Monthly subscription." --tags bill,payment -s --due 1 --repeat 4 Add a task with an url action (open a link) task2do "Read help text" -a url:https://www.2doapp.com/ task2do "Read help text" --action url:https://www.2doapp.com/ Add a Subtask in list personal in project errands: task2do "Buy milk." -l personal --project errands """ ACTION_HELP_TEXT = """action: Supports the following formats: call:<number> - Add a Call action to call the specified number message:<number> - Add a Message action to send a message to the specified number mail:<email> - Add an Email action to send an email the specified email address url:<url to visit> - Add a Browse action to visit the specified URL address visit:<address> - Add a Visit action to visit the specified location google:<search term> - Add a Google action to search the specified keyword Enter the arguments after the colon without the angle brackets. For more details: SEE https://www.2doapp.com/kb/article/url-schemes.html """ def parse_arguments(args): """Return Namespace with parsed command line arguments.""" version = '%(prog)s {}'.format(twodolib.__version__) p = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, prog='task2do', description=usage_description, ) p.add_argument('task', help='Title of the task.') p.add_argument('-a', '--action', help=ACTION_HELP_TEXT) p.add_argument('-d', '--due', default=None, help='Due date. Supports two formats: YYYY-MM-DD - Sets ' 'the date on default due time (based on your ' 'settings), unless due time is specified separately ' 'or ignoreDefaults (-i) is given. OR: Number ' 'of days due from today. Which means: 0 = ' 'today, 1 = tomorrow and so on)') p.add_argument('--dueTime', default=None, help='Due time. Supports 24h format HH:MM.') p.add_argument('-e', '--execute', action='store_true', help='Actually add the task instead of only ' 'printing the URL to stdout.') p.add_argument('-f', '--forParentName', metavar='FOR_PARENTNAME', dest='for_parent_name', help='Title of an existing project or checklist to save the ' 'new task there as a subtask. Also requires the ' 'parent\'s task list.', default=None) p.add_argument('-i', '--ignoreDefaults', action='store_true', default=False, help='Ignore default date / time settings of 2DoApp.') p.add_argument('-l', '--list', metavar='FOR_LIST', dest='for_list', help='Name of an existing list in 2DoApp, ' 'case-insensitive. If missing, the default list or the ' 'currently visible list on screen is used.', default=None) p.add_argument('-n', '--note', help='Notes for the task', default=None) p.add_argument('-p', '--priority', choices=['0', '1', '2', '3'], help='priority: 0 (none), 1 (low), 2 (medium), 3 (high)', default='0') p.add_argument('--project', metavar='IN_PROJECT', dest='in_project', help='Name of an existing project in 2DoApp, into which the' ' task will be pasted. So you can create subtasks.', default=None) p.add_argument('--repeat', default=None, choices=['1', '2', '3', '4'], help='Repeat task: 1 (daily), 2 (weekly), 3 (bi-weekly), ' '4 (monthly))') p.add_argument('-s', '--starred', help='Mark task as starred.', action='store_true', default=False) p.add_argument('--start', default=None, help='Start date and time. Supports the format: ' '"YYYY-MM-DD HH:MM" - Sets the start date to the date ' 'and time specified - OR - Any number with 0 = today, ' '1 = tomorrow and so on)') p.add_argument('-t', '--type', choices=['0', '1', '2'], dest='task_type', help='Type of task to create. The following options are ' 'supported: 0 - Task (default), 1 - Project, ' '2 - Checklist', default='0') p.add_argument('--tags', default=None, help='Comma separated list of tags to assign to the task') p.add_argument('--taskid', default=None, action='store_true', help='Prints taskid, needs the task title and the list.') p.add_argument('-v', '--version', action='version', version=version) return p.parse_args(args) def main(arguments=None): # use arguments to allow testing. """Create a task in 2DoApp.""" if arguments is None: arguments = sys.argv[1:] args = parse_arguments(arguments) t = TwoDoTask(**vars(args)) if args.execute: subprocess.call(['open', t.url()]) elif args.taskid: if t.for_list: taskid = t.get_taskid() if taskid: print(taskid) else: print("No task found!") else: print('Please also provide the list ' 'of the task you are looking for!') else: print(t.url()) if __name__ == '__main__': # pragma: no cover main(sys.argv[1:])
0.519278
0.291595
import json import os import re from contextlib import contextmanager import six from flask import Flask from mlcomp import __version__ from mlcomp.persist import Storage from mlcomp.persist.storage_tree import StorageTree, StorageTreeWatcher from mlcomp.utils import object_to_dict, is_windows, BackgroundWorker from . import config from .views import api_bp, main_bp, storage_bp, report_bp from .utils import MountTree from .webpack import Webpack __all__ = ['BoardApp', 'StorageApp', 'ReportApp'] def norm_url_prefix(url): """Normalize the url prefix.""" url = re.sub(r'[/\\]+', '/', url).rstrip('/') if url != '' and not url.startswith(''): url = '/' + url if url == '/_api': raise ValueError('URL prefix of a storage cannot be `/_api`.') return url class SystemInfo(object): def __init__(self): self.name = 'ML Companion' self.version = __version__ def to_json(self): return json.dumps(object_to_dict(self)) class BaseApp(Flask): def __init__(self): super(BaseApp, self).__init__(__name__) self.config.from_mapping(config) self.webpack = Webpack(self) self.jinja_env.globals.update({ '__system__': SystemInfo(), }) @contextmanager def with_context(self): """Open the context to serve this application.""" yield self class BoardApp(BaseApp): """The board application. Parameters ---------- mappings : dict[str, str] Mappings from URL prefix to directory. disable_watcher : bool Whether or not to disable the file system watcher? (default False) """ def __init__(self, mappings, disable_watcher=False): if not disable_watcher and is_windows(): raise RuntimeError('MLComp Board does not support watching file ' 'system changes on windows yet.') super(BoardApp, self).__init__() # check the mappings self.mappings = { norm_url_prefix(url): path for url, path in six.iteritems(mappings) } # build the storage tree and watcher self.trees = { url: StorageTree(path) for url, path in six.iteritems(self.mappings) } self.mounts = MountTree() for url, tree in six.iteritems(self.trees): self.mounts.mount(url, tree) if disable_watcher: self.watcher = None else: self.watcher = StorageTreeWatcher(six.itervalues(self.trees)) self.watcher.start() # setup the plugins and views self.register_blueprint(main_bp, url_prefix='') self.register_blueprint(api_bp, url_prefix='/_api') self.register_blueprint(storage_bp, url_prefix='/s') @property def is_board_app(self): """This method is provided for `storage_bp`.""" return True class StorageApp(BaseApp): """The single storage application. Parameters ---------- storage_dir : str The path of the storage directory (which contains "storage.json"). disable_watcher : bool Whether or not to disable the file system watcher? (default False) """ def __init__(self, storage_dir, disable_watcher=False): super(StorageApp, self).__init__() # open the storage self.storage_dir = os.path.abspath(storage_dir) self.storage = Storage(self.storage_dir, mode='read') # setup the plugins and views self.register_blueprint(storage_bp, url_prefix='') @property def is_board_app(self): """This method is provided for `storage_bp`.""" return False @contextmanager def with_context(self): worker = BackgroundWorker(self.storage.reload, sleep_seconds=1) try: worker.start() yield self finally: worker.stop() class ReportApp(BaseApp): """The single report file application. Parameters ---------- report_dir : str The path of the report directory (which contains "report.json"). disable_watcher : bool Whether or not to disable the file system watcher? (default False) """ def __init__(self, report_dir, disable_watcher=False): super(ReportApp, self).__init__() # check the report directory self.report_dir = os.path.abspath(report_dir) # setup the plugins and views self.register_blueprint(report_bp, url_prefix='')
mlcomp/board/application.py
import json import os import re from contextlib import contextmanager import six from flask import Flask from mlcomp import __version__ from mlcomp.persist import Storage from mlcomp.persist.storage_tree import StorageTree, StorageTreeWatcher from mlcomp.utils import object_to_dict, is_windows, BackgroundWorker from . import config from .views import api_bp, main_bp, storage_bp, report_bp from .utils import MountTree from .webpack import Webpack __all__ = ['BoardApp', 'StorageApp', 'ReportApp'] def norm_url_prefix(url): """Normalize the url prefix.""" url = re.sub(r'[/\\]+', '/', url).rstrip('/') if url != '' and not url.startswith(''): url = '/' + url if url == '/_api': raise ValueError('URL prefix of a storage cannot be `/_api`.') return url class SystemInfo(object): def __init__(self): self.name = 'ML Companion' self.version = __version__ def to_json(self): return json.dumps(object_to_dict(self)) class BaseApp(Flask): def __init__(self): super(BaseApp, self).__init__(__name__) self.config.from_mapping(config) self.webpack = Webpack(self) self.jinja_env.globals.update({ '__system__': SystemInfo(), }) @contextmanager def with_context(self): """Open the context to serve this application.""" yield self class BoardApp(BaseApp): """The board application. Parameters ---------- mappings : dict[str, str] Mappings from URL prefix to directory. disable_watcher : bool Whether or not to disable the file system watcher? (default False) """ def __init__(self, mappings, disable_watcher=False): if not disable_watcher and is_windows(): raise RuntimeError('MLComp Board does not support watching file ' 'system changes on windows yet.') super(BoardApp, self).__init__() # check the mappings self.mappings = { norm_url_prefix(url): path for url, path in six.iteritems(mappings) } # build the storage tree and watcher self.trees = { url: StorageTree(path) for url, path in six.iteritems(self.mappings) } self.mounts = MountTree() for url, tree in six.iteritems(self.trees): self.mounts.mount(url, tree) if disable_watcher: self.watcher = None else: self.watcher = StorageTreeWatcher(six.itervalues(self.trees)) self.watcher.start() # setup the plugins and views self.register_blueprint(main_bp, url_prefix='') self.register_blueprint(api_bp, url_prefix='/_api') self.register_blueprint(storage_bp, url_prefix='/s') @property def is_board_app(self): """This method is provided for `storage_bp`.""" return True class StorageApp(BaseApp): """The single storage application. Parameters ---------- storage_dir : str The path of the storage directory (which contains "storage.json"). disable_watcher : bool Whether or not to disable the file system watcher? (default False) """ def __init__(self, storage_dir, disable_watcher=False): super(StorageApp, self).__init__() # open the storage self.storage_dir = os.path.abspath(storage_dir) self.storage = Storage(self.storage_dir, mode='read') # setup the plugins and views self.register_blueprint(storage_bp, url_prefix='') @property def is_board_app(self): """This method is provided for `storage_bp`.""" return False @contextmanager def with_context(self): worker = BackgroundWorker(self.storage.reload, sleep_seconds=1) try: worker.start() yield self finally: worker.stop() class ReportApp(BaseApp): """The single report file application. Parameters ---------- report_dir : str The path of the report directory (which contains "report.json"). disable_watcher : bool Whether or not to disable the file system watcher? (default False) """ def __init__(self, report_dir, disable_watcher=False): super(ReportApp, self).__init__() # check the report directory self.report_dir = os.path.abspath(report_dir) # setup the plugins and views self.register_blueprint(report_bp, url_prefix='')
0.806624
0.080141
from logging import getLogger from re import compile as regex_compile from shutil import which from subprocess import CalledProcessError, run as process_run, DEVNULL from threading import Thread from typing import Callable, Dict, List from . import AbstractPlugin, ChatCommandEventData, ChatOutputEventData, Event from ..utils import SafeTemporaryDirectory # PLUGINS class GitPlugin(AbstractPlugin): """ Git plugin class """ def __init__(self, name: str, publish_event: Callable[[Event], None], repositories: Dict[str, str]): super().__init__(name, publish_event) self._repositories = repositories self._branch_pattern = regex_compile(r'[a-zA-Z0-9/_:\.\-\+\*]+') def apply_event(self, event: Event): # Is valid plugin command? if isinstance(event.data, ChatCommandEventData): data: ChatCommandEventData = event.data args = data.command.split(maxsplit=3) args_number = len(args) if args_number == 1 and args[0] == 'help': self._publish_event_data(ChatOutputEventData('Command: git [<REPOSITORY> [<BRANCH>]]', event.publisher, data.channel_id)) if args_number >= 1 and args[0] == 'git': # Evaluate command arguments if args_number == 1: # Show available repositories self._publish_event_data(ChatOutputEventData('Repositories: ' + ', '.join(sorted(self._repositories.keys())), event.publisher, data.channel_id)) else: repository = self._repositories.get(args[1]) if not repository: self._publish_event_data(ChatOutputEventData('Repository not found!', event.publisher, data.channel_id)) else: if args_number == 2: # Show available repository branches def run(): try: self._publish_event_data(ChatOutputEventData( 'Branches: ' + ', '.join(_get_repository_branches(repository)), event.publisher, data.channel_id )) except (OSError, CalledProcessError) as ex: self._log.error('Git failed: %s', ex) self._publish_event_data(ChatOutputEventData('Branches request failed!', event.publisher, data.channel_id)) Thread(target=run, daemon=True).start() else: # args_number == 3 branch = args[2] if not self._branch_pattern.fullmatch(branch): self._publish_event_data(ChatOutputEventData('Invalid branch name!', event.publisher, data.channel_id)) else: # Show last commits of repository branch def run(): try: self._publish_event_data(ChatOutputEventData( 'Commits:\n' + _get_repository_branch_commits(repository, branch), event.publisher, data.channel_id )) except (OSError, CalledProcessError) as ex: self._log.error('Git failed: %s', ex) self._publish_event_data(ChatOutputEventData('Commits request failed!', event.publisher, data.channel_id)) Thread(target=run, daemon=True).start() # REGISTRATION def register_plugins(workers: List[AbstractPlugin], publish_event: Callable[[Event], None], config: Dict[str,Dict[str,str]], _env: Dict[str, str]): """ Register local plugins to bot """ repositories = config.get('plugin.git') if repositories: if which('git'): workers.append(GitPlugin('git', publish_event, repositories)) else: getLogger(__name__).warning('Git plugins require git installed!') # HELPERS def _get_repository_branches(repository: str) -> List[str]: return list(sorted(filter( None, map( lambda line: line.partition('refs/heads/')[2], process_run( ['git', 'ls-remote', '-h', repository], capture_output=True, text=True, check=True ).stdout.splitlines() ) ))) def _get_repository_branch_commits(repository: str, branch: str) -> str: with SafeTemporaryDirectory(prefix='botlet_', suffix='.git') as dir_path: process_run( ['git', 'clone', repository, '-b', branch, '--depth=3', '--bare', '--filter=blob:none', '-q', dir_path], stdout=DEVNULL, stderr=DEVNULL, check=True ) return process_run( ['git', 'log', '-n3', '--no-decorate', '--format=[%h] %s (by %cn, %cr)'], cwd=dir_path, capture_output=True, text=True, check=True ).stdout.strip()
botlet/plugins/git.py
from logging import getLogger from re import compile as regex_compile from shutil import which from subprocess import CalledProcessError, run as process_run, DEVNULL from threading import Thread from typing import Callable, Dict, List from . import AbstractPlugin, ChatCommandEventData, ChatOutputEventData, Event from ..utils import SafeTemporaryDirectory # PLUGINS class GitPlugin(AbstractPlugin): """ Git plugin class """ def __init__(self, name: str, publish_event: Callable[[Event], None], repositories: Dict[str, str]): super().__init__(name, publish_event) self._repositories = repositories self._branch_pattern = regex_compile(r'[a-zA-Z0-9/_:\.\-\+\*]+') def apply_event(self, event: Event): # Is valid plugin command? if isinstance(event.data, ChatCommandEventData): data: ChatCommandEventData = event.data args = data.command.split(maxsplit=3) args_number = len(args) if args_number == 1 and args[0] == 'help': self._publish_event_data(ChatOutputEventData('Command: git [<REPOSITORY> [<BRANCH>]]', event.publisher, data.channel_id)) if args_number >= 1 and args[0] == 'git': # Evaluate command arguments if args_number == 1: # Show available repositories self._publish_event_data(ChatOutputEventData('Repositories: ' + ', '.join(sorted(self._repositories.keys())), event.publisher, data.channel_id)) else: repository = self._repositories.get(args[1]) if not repository: self._publish_event_data(ChatOutputEventData('Repository not found!', event.publisher, data.channel_id)) else: if args_number == 2: # Show available repository branches def run(): try: self._publish_event_data(ChatOutputEventData( 'Branches: ' + ', '.join(_get_repository_branches(repository)), event.publisher, data.channel_id )) except (OSError, CalledProcessError) as ex: self._log.error('Git failed: %s', ex) self._publish_event_data(ChatOutputEventData('Branches request failed!', event.publisher, data.channel_id)) Thread(target=run, daemon=True).start() else: # args_number == 3 branch = args[2] if not self._branch_pattern.fullmatch(branch): self._publish_event_data(ChatOutputEventData('Invalid branch name!', event.publisher, data.channel_id)) else: # Show last commits of repository branch def run(): try: self._publish_event_data(ChatOutputEventData( 'Commits:\n' + _get_repository_branch_commits(repository, branch), event.publisher, data.channel_id )) except (OSError, CalledProcessError) as ex: self._log.error('Git failed: %s', ex) self._publish_event_data(ChatOutputEventData('Commits request failed!', event.publisher, data.channel_id)) Thread(target=run, daemon=True).start() # REGISTRATION def register_plugins(workers: List[AbstractPlugin], publish_event: Callable[[Event], None], config: Dict[str,Dict[str,str]], _env: Dict[str, str]): """ Register local plugins to bot """ repositories = config.get('plugin.git') if repositories: if which('git'): workers.append(GitPlugin('git', publish_event, repositories)) else: getLogger(__name__).warning('Git plugins require git installed!') # HELPERS def _get_repository_branches(repository: str) -> List[str]: return list(sorted(filter( None, map( lambda line: line.partition('refs/heads/')[2], process_run( ['git', 'ls-remote', '-h', repository], capture_output=True, text=True, check=True ).stdout.splitlines() ) ))) def _get_repository_branch_commits(repository: str, branch: str) -> str: with SafeTemporaryDirectory(prefix='botlet_', suffix='.git') as dir_path: process_run( ['git', 'clone', repository, '-b', branch, '--depth=3', '--bare', '--filter=blob:none', '-q', dir_path], stdout=DEVNULL, stderr=DEVNULL, check=True ) return process_run( ['git', 'log', '-n3', '--no-decorate', '--format=[%h] %s (by %cn, %cr)'], cwd=dir_path, capture_output=True, text=True, check=True ).stdout.strip()
0.525612
0.102889
import nxppy import getpass import display import mysql import hashlib import sys import tty import termios import logging import thread import time import RPi.GPIO as GPIO #Enable debug logging into log DEBUG=True #Enable printing informations to std. output VERBOSE=True class Actions: incomming=1 outcomming=2 breakstart=3 breakend=4 if(DEBUG): logging.basicConfig(format='%(asctime)s %(message)s',filename='attendance.log', level=logging.DEBUG) def debug(message): logging.debug(message) def onScreen(message): if(VERBOSE): print(message) def readNfc(action): if(action==48):#0 - Incomming onScreen("Logging In...") display.lcdWriteFirstLine("Hello..") onScreen("Hello..") display.lcdWriteSecondLine("Swipe your Card") onScreen('Swipe your card') uid="" while True: try: #GPIO.cleanup() mifare = nxppy.Mifare() uid = mifare.select() if uid != "": break #print(uid) except nxppy.SelectError: # SelectError is raised if no card is in the field. pass time.sleep(1) mysql.insertReading(uid,Actions.incomming,1) cardcheck=mysql.checkcard(uid) if cardcheck==0: display.lcdWriteFirstLine("Hello.USER") display.lcdWriteSecondLine(uid) print "Hello User, ",uid #print (uid) time.sleep(3) display.lcdWriteFirstLine("Processing...") display.lcdWriteSecondLine("Plz wait...") time.sleep(4) temp=input("Press any key to continue ") res=0 while True: res=mysql.checkamtpay(uid) if res!= 0 : break print res display.lcdWriteFirstLine("Amount to pay : ") display.lcdWriteSecondLine(str(res)) time.sleep(3) display.lcdWriteFirstLine("Enter pin to ") display.lcdWriteSecondLine("confirm") time.sleep(1) pin="" pin=getpass.getpass("Enter pin to confirm ") pin=str(pin) pin=hashlib.md5(pin).hexdigest() bal=mysql.authenticat(uid) if bal[1]==pin: if bal[0]>=res: mysql.payment(bal[0]-res,uid) mysql.clearvalues() display.lcdWriteFirstLine("Tranx Success") display.lcdWriteSecondLine("Thank You !!") time.sleep(4) else: display.lcdWriteFirstLine("Tranx failed") display.lcdWriteSecondLine("Insuffient balance") mysql.clearvalues() time.sleep(3) else: display.lcdWriteFirstLine("Tranx failed") display.lcdWriteSecondLine("Wrong pin") mysql.clearvalues() time.sleep(3) elif cardcheck==1: display.lcdWriteFirstLine("Tranx failed") display.lcdWriteSecondLine("Invalid Card") mysql.clearvalues() time.sleep(3) #Sleep a little, so the information about last action on display is readable by humans time.sleep(1) #Backing up the input attributes, so we can change it for reading single #character without hitting enter each time fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) def getOneKey(): try: tty.setcbreak(sys.stdin.fileno()) ch = sys.stdin.read(1) return ord(ch) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) displayTime=True def printDateToDisplay(): while True: #Display current time on display, until global variable is set if displayTime!=True: thread.exit() #display.lcdWriteFirstLine(time.strftime("%d.%m. %H:%M:%S", time.localtime())) onScreen(time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())) time.sleep(1) def main(): #GPIO.cleanup() try: #display.initGpio() display.init() while True: display.lcdWriteFirstLine("Welcome..") display.lcdWriteSecondLine("Choose an action...") print "choose an action" global displayTime displayTime=True #Start new thread to show curent datetime on display # and wait for user input on keyboard #thr = thread.start_new_thread(printDateToDisplay, ()) a = getOneKey() displayTime=False if 47 < a < 58: readNfc(a) except KeyboardInterrupt: GPIO.cleanup() pass GPIO.cleanup() if __name__ == '__main__': debug("----------========== Starting session! ==========----------") main()
souce-code/nfc.py
import nxppy import getpass import display import mysql import hashlib import sys import tty import termios import logging import thread import time import RPi.GPIO as GPIO #Enable debug logging into log DEBUG=True #Enable printing informations to std. output VERBOSE=True class Actions: incomming=1 outcomming=2 breakstart=3 breakend=4 if(DEBUG): logging.basicConfig(format='%(asctime)s %(message)s',filename='attendance.log', level=logging.DEBUG) def debug(message): logging.debug(message) def onScreen(message): if(VERBOSE): print(message) def readNfc(action): if(action==48):#0 - Incomming onScreen("Logging In...") display.lcdWriteFirstLine("Hello..") onScreen("Hello..") display.lcdWriteSecondLine("Swipe your Card") onScreen('Swipe your card') uid="" while True: try: #GPIO.cleanup() mifare = nxppy.Mifare() uid = mifare.select() if uid != "": break #print(uid) except nxppy.SelectError: # SelectError is raised if no card is in the field. pass time.sleep(1) mysql.insertReading(uid,Actions.incomming,1) cardcheck=mysql.checkcard(uid) if cardcheck==0: display.lcdWriteFirstLine("Hello.USER") display.lcdWriteSecondLine(uid) print "Hello User, ",uid #print (uid) time.sleep(3) display.lcdWriteFirstLine("Processing...") display.lcdWriteSecondLine("Plz wait...") time.sleep(4) temp=input("Press any key to continue ") res=0 while True: res=mysql.checkamtpay(uid) if res!= 0 : break print res display.lcdWriteFirstLine("Amount to pay : ") display.lcdWriteSecondLine(str(res)) time.sleep(3) display.lcdWriteFirstLine("Enter pin to ") display.lcdWriteSecondLine("confirm") time.sleep(1) pin="" pin=getpass.getpass("Enter pin to confirm ") pin=str(pin) pin=hashlib.md5(pin).hexdigest() bal=mysql.authenticat(uid) if bal[1]==pin: if bal[0]>=res: mysql.payment(bal[0]-res,uid) mysql.clearvalues() display.lcdWriteFirstLine("Tranx Success") display.lcdWriteSecondLine("Thank You !!") time.sleep(4) else: display.lcdWriteFirstLine("Tranx failed") display.lcdWriteSecondLine("Insuffient balance") mysql.clearvalues() time.sleep(3) else: display.lcdWriteFirstLine("Tranx failed") display.lcdWriteSecondLine("Wrong pin") mysql.clearvalues() time.sleep(3) elif cardcheck==1: display.lcdWriteFirstLine("Tranx failed") display.lcdWriteSecondLine("Invalid Card") mysql.clearvalues() time.sleep(3) #Sleep a little, so the information about last action on display is readable by humans time.sleep(1) #Backing up the input attributes, so we can change it for reading single #character without hitting enter each time fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) def getOneKey(): try: tty.setcbreak(sys.stdin.fileno()) ch = sys.stdin.read(1) return ord(ch) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) displayTime=True def printDateToDisplay(): while True: #Display current time on display, until global variable is set if displayTime!=True: thread.exit() #display.lcdWriteFirstLine(time.strftime("%d.%m. %H:%M:%S", time.localtime())) onScreen(time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())) time.sleep(1) def main(): #GPIO.cleanup() try: #display.initGpio() display.init() while True: display.lcdWriteFirstLine("Welcome..") display.lcdWriteSecondLine("Choose an action...") print "choose an action" global displayTime displayTime=True #Start new thread to show curent datetime on display # and wait for user input on keyboard #thr = thread.start_new_thread(printDateToDisplay, ()) a = getOneKey() displayTime=False if 47 < a < 58: readNfc(a) except KeyboardInterrupt: GPIO.cleanup() pass GPIO.cleanup() if __name__ == '__main__': debug("----------========== Starting session! ==========----------") main()
0.05512
0.075551
from rest_framework import status from rest_framework.response import Response from lottee_new.helpers import get_object_or_none from my_user.serializers import UserSerializer, UpdatePasswordSerializer, ResetPasswordSerializer from my_user.models import User from rest_framework.generics import CreateAPIView, RetrieveUpdateAPIView, UpdateAPIView from rest_framework.permissions import IsAuthenticated, AllowAny from django.contrib.auth import get_user_model from rest_framework.decorators import api_view, permission_classes from number.models import Number from number.serializers import NumberSerializer @api_view(['POST']) @permission_classes([AllowAny]) def have_account(request): # Проверка статуса регистрации почты user = get_object_or_none(User, identifier=request.data['identifier']) if user and request.data['have'] or not user and not request.data['have']: return Response(status=status.HTTP_200_OK) return Response(status=status.HTTP_404_NOT_FOUND) @api_view(['PATCH']) def reset_password(request): user = get_object_or_none(User, identifier=request.data['identifier']) if user: serializer = ResetPasswordSerializer(user, data=request.data) if serializer.is_valid(): serializer.save() return Response(status=status.HTTP_200_OK) return Response(status=status.HTTP_412_PRECONDITION_FAILED) class UserCreateView(CreateAPIView): permission_classes = [AllowAny] queryset = get_user_model() serializer_class = UserSerializer class UserRetrieveUpdateView(RetrieveUpdateAPIView): permission_classes = [IsAuthenticated] queryset = get_user_model() serializer_class = UserSerializer def retrieve(self, request, *args, **kwargs): serializer = self.get_serializer(request.user) return Response( status=status.HTTP_200_OK, data={'user': serializer.data} ) def partial_update(self, request, *args, **kwargs): serializer = self.serializer_class( instance=request.user, data=request.data, partial=True ) serializer.is_valid(raise_exception=True) serializer.save() return Response( status=status.HTTP_200_OK, data=serializer.data ) class UpdatePasswordView(UpdateAPIView): queryset = User.objects.all() serializer_class = UpdatePasswordSerializer permission_classes = [IsAuthenticated] def update(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response(status=status.HTTP_200_OK)
my_user/views.py
from rest_framework import status from rest_framework.response import Response from lottee_new.helpers import get_object_or_none from my_user.serializers import UserSerializer, UpdatePasswordSerializer, ResetPasswordSerializer from my_user.models import User from rest_framework.generics import CreateAPIView, RetrieveUpdateAPIView, UpdateAPIView from rest_framework.permissions import IsAuthenticated, AllowAny from django.contrib.auth import get_user_model from rest_framework.decorators import api_view, permission_classes from number.models import Number from number.serializers import NumberSerializer @api_view(['POST']) @permission_classes([AllowAny]) def have_account(request): # Проверка статуса регистрации почты user = get_object_or_none(User, identifier=request.data['identifier']) if user and request.data['have'] or not user and not request.data['have']: return Response(status=status.HTTP_200_OK) return Response(status=status.HTTP_404_NOT_FOUND) @api_view(['PATCH']) def reset_password(request): user = get_object_or_none(User, identifier=request.data['identifier']) if user: serializer = ResetPasswordSerializer(user, data=request.data) if serializer.is_valid(): serializer.save() return Response(status=status.HTTP_200_OK) return Response(status=status.HTTP_412_PRECONDITION_FAILED) class UserCreateView(CreateAPIView): permission_classes = [AllowAny] queryset = get_user_model() serializer_class = UserSerializer class UserRetrieveUpdateView(RetrieveUpdateAPIView): permission_classes = [IsAuthenticated] queryset = get_user_model() serializer_class = UserSerializer def retrieve(self, request, *args, **kwargs): serializer = self.get_serializer(request.user) return Response( status=status.HTTP_200_OK, data={'user': serializer.data} ) def partial_update(self, request, *args, **kwargs): serializer = self.serializer_class( instance=request.user, data=request.data, partial=True ) serializer.is_valid(raise_exception=True) serializer.save() return Response( status=status.HTTP_200_OK, data=serializer.data ) class UpdatePasswordView(UpdateAPIView): queryset = User.objects.all() serializer_class = UpdatePasswordSerializer permission_classes = [IsAuthenticated] def update(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response(status=status.HTTP_200_OK)
0.522446
0.081082
import netCDF4 import numpy as np def _valid_x(var): names = ["longitude", "grid_longitude", "projection_x_coordinate"] units = [ "degrees_east", "degree_east", "degree_E", "degrees_E", "degreeE", "degreesE", ] if getattr(var, "standard_name", None) in names: return True if getattr(var, "axis", "None").lower() == "x": return True # Units are mandatory, fail if not present. if var.units in units: return True return False def _valid_y(var): names = ["latitude", "grid_latitude", "projection_y_coordinate"] units = [ "degrees_north", "degree_north", "degree_N", "degrees_N", "degreeN", "degreesN", ] if getattr(var, "standard_name", None) in names: return True if getattr(var, "axis", "None").lower() == "y": return True # Units are mandatory, fail if not present. if var.units in units: return True return False def _mandatory_attr(var, attribute): if not hasattr(var, attribute): raise ValueError( f"Could not find required attribute {attribute} in {var}." ) return def get_mesh_var(nc): """Returns the mesh_topology variable for `nc` (netCDF4.Dataset object).""" mesh_var = nc.get_variables_by_attributes(cf_role="mesh_topology") if not mesh_var: raise ValueError( f"Could not find mesh_topology variable in the dataset {nc}" ) if len(mesh_var) > 1: raise ValueError( f"Expected 1 mesh_topology variable, found {len(mesh_var)}." ) mesh_var = mesh_var[0] _mandatory_attr(mesh_var, attribute="node_coordinates") _mandatory_attr(mesh_var, attribute="topology_dimension") if mesh_var.topology_dimension not in (1, 2): raise ValueError( f"Expected mesh dimension to be 1 or 2, got {mesh_var.topology_dimension}." ) return mesh_var def connectivity_array(connectivity, num_ind): """Returns the connectivity array for its correspdonding `netCDF4.Variable` according to UGRID-1.0. """ array = connectivity[:] if not issubclass(array.dtype.type, np.integer): array = np.int_(array) if array.shape[0] == num_ind: array = array.T start_index = int(getattr(connectivity, "start_index", 0)) if start_index >= 1: array -= start_index # FIXME: This won't work for more than one flag value. flag_values = getattr(connectivity, "flag_values", None) if flag_values: array[array == flag_values - start_index] = flag_values return array def ugrid(nc): """Parse UGRID conventions. Take a netCDF4.Dataset object or a netCDF4 file/url string and returns a dictionary with the grid nodes, edges, and connectivy matrix. """ if isinstance(nc, netCDF4.Dataset): pass else: nc = netCDF4.Dataset(nc) mesh_var = get_mesh_var(nc) valid_coords = ( "node_coordinates", "face_coordinates", "edge_coordinates", "boundary_coordinates", ) valid_connectivity = { "face_node_connectivity": 3, "face_face_connectivity": 3, "boundary_node_connectivity": 2, "edge_node_connectivity": 2, } # Used for compatibility with pyugrid. rename = { "node_coordinates": "nodes", "face_node_connectivity": "faces", "boundary_node_connectivity": "boundaries", "edge_node_connectivity": "edges", } grid = {} for key, value in mesh_var.__dict__.items(): if key in valid_coords: coord_names = mesh_var.getncattr(key).strip().split() for name in coord_names: if _valid_x(nc[name]): x = nc[name][:] elif _valid_y(nc[name]): y = nc[name][:] else: raise ValueError( f"Could not recognize axis for {nc[name]}" ) grid.update({key: {"x": x, "y": y}}) if key in valid_connectivity.keys(): connectivity = nc[mesh_var.getncattr(key).strip()] num_ind = valid_connectivity[key] array = connectivity_array(connectivity, num_ind) grid.update({key: array}) return {rename.get(k, k): v for k, v in grid.items()}
gridgeo/ugrid.py
import netCDF4 import numpy as np def _valid_x(var): names = ["longitude", "grid_longitude", "projection_x_coordinate"] units = [ "degrees_east", "degree_east", "degree_E", "degrees_E", "degreeE", "degreesE", ] if getattr(var, "standard_name", None) in names: return True if getattr(var, "axis", "None").lower() == "x": return True # Units are mandatory, fail if not present. if var.units in units: return True return False def _valid_y(var): names = ["latitude", "grid_latitude", "projection_y_coordinate"] units = [ "degrees_north", "degree_north", "degree_N", "degrees_N", "degreeN", "degreesN", ] if getattr(var, "standard_name", None) in names: return True if getattr(var, "axis", "None").lower() == "y": return True # Units are mandatory, fail if not present. if var.units in units: return True return False def _mandatory_attr(var, attribute): if not hasattr(var, attribute): raise ValueError( f"Could not find required attribute {attribute} in {var}." ) return def get_mesh_var(nc): """Returns the mesh_topology variable for `nc` (netCDF4.Dataset object).""" mesh_var = nc.get_variables_by_attributes(cf_role="mesh_topology") if not mesh_var: raise ValueError( f"Could not find mesh_topology variable in the dataset {nc}" ) if len(mesh_var) > 1: raise ValueError( f"Expected 1 mesh_topology variable, found {len(mesh_var)}." ) mesh_var = mesh_var[0] _mandatory_attr(mesh_var, attribute="node_coordinates") _mandatory_attr(mesh_var, attribute="topology_dimension") if mesh_var.topology_dimension not in (1, 2): raise ValueError( f"Expected mesh dimension to be 1 or 2, got {mesh_var.topology_dimension}." ) return mesh_var def connectivity_array(connectivity, num_ind): """Returns the connectivity array for its correspdonding `netCDF4.Variable` according to UGRID-1.0. """ array = connectivity[:] if not issubclass(array.dtype.type, np.integer): array = np.int_(array) if array.shape[0] == num_ind: array = array.T start_index = int(getattr(connectivity, "start_index", 0)) if start_index >= 1: array -= start_index # FIXME: This won't work for more than one flag value. flag_values = getattr(connectivity, "flag_values", None) if flag_values: array[array == flag_values - start_index] = flag_values return array def ugrid(nc): """Parse UGRID conventions. Take a netCDF4.Dataset object or a netCDF4 file/url string and returns a dictionary with the grid nodes, edges, and connectivy matrix. """ if isinstance(nc, netCDF4.Dataset): pass else: nc = netCDF4.Dataset(nc) mesh_var = get_mesh_var(nc) valid_coords = ( "node_coordinates", "face_coordinates", "edge_coordinates", "boundary_coordinates", ) valid_connectivity = { "face_node_connectivity": 3, "face_face_connectivity": 3, "boundary_node_connectivity": 2, "edge_node_connectivity": 2, } # Used for compatibility with pyugrid. rename = { "node_coordinates": "nodes", "face_node_connectivity": "faces", "boundary_node_connectivity": "boundaries", "edge_node_connectivity": "edges", } grid = {} for key, value in mesh_var.__dict__.items(): if key in valid_coords: coord_names = mesh_var.getncattr(key).strip().split() for name in coord_names: if _valid_x(nc[name]): x = nc[name][:] elif _valid_y(nc[name]): y = nc[name][:] else: raise ValueError( f"Could not recognize axis for {nc[name]}" ) grid.update({key: {"x": x, "y": y}}) if key in valid_connectivity.keys(): connectivity = nc[mesh_var.getncattr(key).strip()] num_ind = valid_connectivity[key] array = connectivity_array(connectivity, num_ind) grid.update({key: array}) return {rename.get(k, k): v for k, v in grid.items()}
0.594434
0.369685
from typing import Optional, Tuple import tensorflow as tf from typeguard import check_argument_types from neuralmonkey.encoders.attentive import Attentive from neuralmonkey.model.model_part import ModelPart, FeedDict from neuralmonkey.logging import log from neuralmonkey.nn.noisy_gru_cell import NoisyGRUCell from neuralmonkey.nn.ortho_gru_cell import OrthoGRUCell from neuralmonkey.nn.utils import dropout from neuralmonkey.dataset import Dataset from neuralmonkey.vocabulary import Vocabulary # pylint: disable=invalid-name RNNCellTuple = Tuple[tf.contrib.rnn.RNNCell, tf.contrib.rnn.RNNCell] # pylint: enable=invalid-name # pylint: disable=too-many-instance-attributes class SentenceEncoder(ModelPart, Attentive): """A class that manages parts of the computation graph that are used for encoding of input sentences. It uses a bidirectional RNN. This version of the encoder does not support factors. Should you want to use them, use FactoredEncoder instead. """ # pylint: disable=too-many-arguments,too-many-locals def __init__(self, name: str, vocabulary: Vocabulary, data_id: str, embedding_size: int, rnn_size: int, attention_state_size: Optional[int] = None, max_input_len: Optional[int] = None, dropout_keep_prob: float = 1.0, attention_type: type = None, attention_fertility: int = 3, use_noisy_activations: bool = False, parent_encoder: Optional["SentenceEncoder"] = None, save_checkpoint: Optional[str] = None, load_checkpoint: Optional[str] = None) -> None: """Create a new instance of the sentence encoder. Arguments: vocabulary: Input vocabulary data_id: Identifier of the data series fed to this encoder name: An unique identifier for this encoder max_input_len: Maximum length of an encoded sequence embedding_size: The size of the embedding vector assigned to each word rnn_size: The size of the encoder's hidden state. Note that the actual encoder output state size will be twice as long because it is the result of concatenation of forward and backward hidden states. Keyword arguments: dropout_keep_prob: The dropout keep probability (default 1.0) attention_type: The class that is used for creating attention mechanism (default None) attention_state_size: The size of the attention inner state. If None, use the size of the encoder hidden state. (defalult None) attention_fertility: Fertility parameter used with CoverageAttention (default 3). """ ModelPart.__init__(self, name, save_checkpoint, load_checkpoint) Attentive.__init__( self, attention_type, attention_fertility=attention_fertility, attention_state_size=attention_state_size) check_argument_types() self.vocabulary = vocabulary self.data_id = data_id self.max_input_len = max_input_len self.embedding_size = embedding_size self.rnn_size = rnn_size self.dropout_keep_prob = dropout_keep_prob self.use_noisy_activations = use_noisy_activations self.parent_encoder = parent_encoder if max_input_len is not None and max_input_len <= 0: raise ValueError("Input length must be a positive integer.") if embedding_size <= 0: raise ValueError("Embedding size must be a positive integer.") if rnn_size <= 0: raise ValueError("RNN size must be a positive integer.") log("Initializing sentence encoder, name: '{}'" .format(self.name)) with self.use_scope(): self._create_input_placeholders() with tf.variable_scope('input_projection'): self._create_embedding_matrix() embedded_inputs = self._embed(self.inputs) # type: tf.Tensor self.embedded_inputs = embedded_inputs fw_cell, bw_cell = self.rnn_cells() # type: RNNCellTuple outputs_bidi_tup, encoded_tup = tf.nn.bidirectional_dynamic_rnn( fw_cell, bw_cell, embedded_inputs, sequence_length=self.sentence_lengths, dtype=tf.float32) self.hidden_states = tf.concat(outputs_bidi_tup, 2) with tf.variable_scope('attention_tensor'): self.__attention_tensor = dropout( self.hidden_states, self.dropout_keep_prob, self.train_mode) self.encoded = tf.concat(encoded_tup, 1) log("Sentence encoder initialized") @property def _attention_tensor(self): return self.__attention_tensor @property def _attention_mask(self): # TODO tohle je proti OOP prirode return self.input_mask @property def states_mask(self): return self.input_mask @property def vocabulary_size(self): return len(self.vocabulary) def _create_input_placeholders(self): """Creates input placeholder nodes in the computation graph""" self.train_mode = tf.placeholder(tf.bool, shape=[], name="train_mode") self.inputs = tf.placeholder(tf.int32, shape=[None, None], name="encoder_input") self.input_mask = tf.placeholder( tf.float32, shape=[None, None], name="encoder_padding") self.sentence_lengths = tf.to_int32( tf.reduce_sum(self.input_mask, 1)) def _create_embedding_matrix(self): """Create variables and operations for embedding the input words. If parent encoder is specified, we reuse its embedding matrix """ # NOTE the note from the decoder's embedding matrix function applies # here also if self.parent_encoder is not None: self.embedding_matrix = self.parent_encoder.embedding_matrix else: self.embedding_matrix = tf.get_variable( "word_embeddings", [self.vocabulary_size, self.embedding_size], initializer=tf.random_normal_initializer(stddev=0.01)) def _embed(self, inputs: tf.Tensor) -> tf.Tensor: """Embed the input using the embedding matrix and apply dropout Arguments: inputs: The Tensor to be embedded and dropped out. """ embedded = tf.nn.embedding_lookup(self.embedding_matrix, inputs) return dropout(embedded, self.dropout_keep_prob, self.train_mode) def rnn_cells(self) -> RNNCellTuple: """Return the graph template to for creating RNN memory cells""" if self.parent_encoder is not None: return self.parent_encoder.rnn_cells() if self.use_noisy_activations: return(NoisyGRUCell(self.rnn_size, self.train_mode), NoisyGRUCell(self.rnn_size, self.train_mode)) return (OrthoGRUCell(self.rnn_size), OrthoGRUCell(self.rnn_size)) def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict: """Populate the feed dictionary with the encoder inputs. Encoder input placeholders: ``encoder_input``: Stores indices to the vocabulary, shape (batch, time) ``encoder_padding``: Stores the padding (ones and zeros, indicating valid words and positions after the end of sentence, shape (batch, time) ``train_mode``: Boolean scalar specifying the mode (train vs runtime) Arguments: dataset: The dataset to use train: Boolean flag telling whether it is training time """ # pylint: disable=invalid-name fd = {} # type: FeedDict fd[self.train_mode] = train sentences = dataset.get_series(self.data_id) vectors, paddings = self.vocabulary.sentences_to_tensor( list(sentences), self.max_input_len, pad_to_max_len=False, train_mode=train) # as sentences_to_tensor returns lists of shape (time, batch), # we need to transpose fd[self.inputs] = list(zip(*vectors)) fd[self.input_mask] = list(zip(*paddings)) return fd
neuralmonkey/encoders/sentence_encoder.py
from typing import Optional, Tuple import tensorflow as tf from typeguard import check_argument_types from neuralmonkey.encoders.attentive import Attentive from neuralmonkey.model.model_part import ModelPart, FeedDict from neuralmonkey.logging import log from neuralmonkey.nn.noisy_gru_cell import NoisyGRUCell from neuralmonkey.nn.ortho_gru_cell import OrthoGRUCell from neuralmonkey.nn.utils import dropout from neuralmonkey.dataset import Dataset from neuralmonkey.vocabulary import Vocabulary # pylint: disable=invalid-name RNNCellTuple = Tuple[tf.contrib.rnn.RNNCell, tf.contrib.rnn.RNNCell] # pylint: enable=invalid-name # pylint: disable=too-many-instance-attributes class SentenceEncoder(ModelPart, Attentive): """A class that manages parts of the computation graph that are used for encoding of input sentences. It uses a bidirectional RNN. This version of the encoder does not support factors. Should you want to use them, use FactoredEncoder instead. """ # pylint: disable=too-many-arguments,too-many-locals def __init__(self, name: str, vocabulary: Vocabulary, data_id: str, embedding_size: int, rnn_size: int, attention_state_size: Optional[int] = None, max_input_len: Optional[int] = None, dropout_keep_prob: float = 1.0, attention_type: type = None, attention_fertility: int = 3, use_noisy_activations: bool = False, parent_encoder: Optional["SentenceEncoder"] = None, save_checkpoint: Optional[str] = None, load_checkpoint: Optional[str] = None) -> None: """Create a new instance of the sentence encoder. Arguments: vocabulary: Input vocabulary data_id: Identifier of the data series fed to this encoder name: An unique identifier for this encoder max_input_len: Maximum length of an encoded sequence embedding_size: The size of the embedding vector assigned to each word rnn_size: The size of the encoder's hidden state. Note that the actual encoder output state size will be twice as long because it is the result of concatenation of forward and backward hidden states. Keyword arguments: dropout_keep_prob: The dropout keep probability (default 1.0) attention_type: The class that is used for creating attention mechanism (default None) attention_state_size: The size of the attention inner state. If None, use the size of the encoder hidden state. (defalult None) attention_fertility: Fertility parameter used with CoverageAttention (default 3). """ ModelPart.__init__(self, name, save_checkpoint, load_checkpoint) Attentive.__init__( self, attention_type, attention_fertility=attention_fertility, attention_state_size=attention_state_size) check_argument_types() self.vocabulary = vocabulary self.data_id = data_id self.max_input_len = max_input_len self.embedding_size = embedding_size self.rnn_size = rnn_size self.dropout_keep_prob = dropout_keep_prob self.use_noisy_activations = use_noisy_activations self.parent_encoder = parent_encoder if max_input_len is not None and max_input_len <= 0: raise ValueError("Input length must be a positive integer.") if embedding_size <= 0: raise ValueError("Embedding size must be a positive integer.") if rnn_size <= 0: raise ValueError("RNN size must be a positive integer.") log("Initializing sentence encoder, name: '{}'" .format(self.name)) with self.use_scope(): self._create_input_placeholders() with tf.variable_scope('input_projection'): self._create_embedding_matrix() embedded_inputs = self._embed(self.inputs) # type: tf.Tensor self.embedded_inputs = embedded_inputs fw_cell, bw_cell = self.rnn_cells() # type: RNNCellTuple outputs_bidi_tup, encoded_tup = tf.nn.bidirectional_dynamic_rnn( fw_cell, bw_cell, embedded_inputs, sequence_length=self.sentence_lengths, dtype=tf.float32) self.hidden_states = tf.concat(outputs_bidi_tup, 2) with tf.variable_scope('attention_tensor'): self.__attention_tensor = dropout( self.hidden_states, self.dropout_keep_prob, self.train_mode) self.encoded = tf.concat(encoded_tup, 1) log("Sentence encoder initialized") @property def _attention_tensor(self): return self.__attention_tensor @property def _attention_mask(self): # TODO tohle je proti OOP prirode return self.input_mask @property def states_mask(self): return self.input_mask @property def vocabulary_size(self): return len(self.vocabulary) def _create_input_placeholders(self): """Creates input placeholder nodes in the computation graph""" self.train_mode = tf.placeholder(tf.bool, shape=[], name="train_mode") self.inputs = tf.placeholder(tf.int32, shape=[None, None], name="encoder_input") self.input_mask = tf.placeholder( tf.float32, shape=[None, None], name="encoder_padding") self.sentence_lengths = tf.to_int32( tf.reduce_sum(self.input_mask, 1)) def _create_embedding_matrix(self): """Create variables and operations for embedding the input words. If parent encoder is specified, we reuse its embedding matrix """ # NOTE the note from the decoder's embedding matrix function applies # here also if self.parent_encoder is not None: self.embedding_matrix = self.parent_encoder.embedding_matrix else: self.embedding_matrix = tf.get_variable( "word_embeddings", [self.vocabulary_size, self.embedding_size], initializer=tf.random_normal_initializer(stddev=0.01)) def _embed(self, inputs: tf.Tensor) -> tf.Tensor: """Embed the input using the embedding matrix and apply dropout Arguments: inputs: The Tensor to be embedded and dropped out. """ embedded = tf.nn.embedding_lookup(self.embedding_matrix, inputs) return dropout(embedded, self.dropout_keep_prob, self.train_mode) def rnn_cells(self) -> RNNCellTuple: """Return the graph template to for creating RNN memory cells""" if self.parent_encoder is not None: return self.parent_encoder.rnn_cells() if self.use_noisy_activations: return(NoisyGRUCell(self.rnn_size, self.train_mode), NoisyGRUCell(self.rnn_size, self.train_mode)) return (OrthoGRUCell(self.rnn_size), OrthoGRUCell(self.rnn_size)) def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict: """Populate the feed dictionary with the encoder inputs. Encoder input placeholders: ``encoder_input``: Stores indices to the vocabulary, shape (batch, time) ``encoder_padding``: Stores the padding (ones and zeros, indicating valid words and positions after the end of sentence, shape (batch, time) ``train_mode``: Boolean scalar specifying the mode (train vs runtime) Arguments: dataset: The dataset to use train: Boolean flag telling whether it is training time """ # pylint: disable=invalid-name fd = {} # type: FeedDict fd[self.train_mode] = train sentences = dataset.get_series(self.data_id) vectors, paddings = self.vocabulary.sentences_to_tensor( list(sentences), self.max_input_len, pad_to_max_len=False, train_mode=train) # as sentences_to_tensor returns lists of shape (time, batch), # we need to transpose fd[self.inputs] = list(zip(*vectors)) fd[self.input_mask] = list(zip(*paddings)) return fd
0.933203
0.382603
from MySQL import * from Software import * import json class DB(object): def __init__(self): # Init Database Connection. self.db = MySQL() self.db.connect() def getSoftware(self,id=None,name=None): # Function for getting information for a specific software. if id != None: sql = """SELECT * FROM `softwares` WHERE ID = {}""".format(id) elif name!= None: sql = """SELECT * FROM `softwares` WHERE Name = {}""".format(name) else: sql = """SELECT * FROM `softwares`""" result = self.db.select(sql) resultSet = [Software(item[0],item[1],item[2],item[3]) for item in result] return resultSet def getChangelogIDs(self,softwareID): # Get all ids for a single software. sql = """SELECT ID FROM `data` WHERE SoftwareID = """ + str(softwareID) selectResults = self.db.select(sql) resultSet = [item[0] for item in selectResults] return resultSet def addChangelog(self,softwareID,changelog): # add a piece of changelog to database. id = changelog['id'] verison = changelog['version'] time = changelog['time'] content = json.dumps(changelog['content']) sql = """INSERT INTO data (`ID`, `SoftwareID`, `Time`, `Version`, `Detail`) VALUES (%s, %s, %s, %s, %s)""" self.db.insert(sql,(id,softwareID,time,verison,content)) def getSMSSubscribers(self,softwareID): # Get the list of all SMS subscribers for specific software. sql = """SELECT phone FROM `phone` WHERE softwareID =""" + str(softwareID) selectResults = self.db.select(sql) resultSet = [item[0] for item in selectResults] return resultSet def getEmailSubscribers(self,softwareID): # Get the list of all Email subscribers for specific software. sql = """SELECT email FROM `email` WHERE softwareID =""" + str(softwareID) selectResults = self.db.select(sql) resultSet = [item[0] for item in selectResults] return resultSet def addChangelogs(self,softwareID,changelogs): # Add a list of changelogs to the database. params = [(changelog['id'], softwareID, changelog['time'], changelog['version'], json.dumps(changelog['content'])) for changelog in changelogs] sql = """INSERT INTO data (`ID`, `SoftwareID`, `Time`, `Version`, `Detail`) VALUES (%s, %s, %s, %s, %s)""" self.db.insertMany(sql,params) def __del__(self): self.db.close() if __name__ == '__main__': db = DB() print(db.getSMSSubscribers(5))
db.py
from MySQL import * from Software import * import json class DB(object): def __init__(self): # Init Database Connection. self.db = MySQL() self.db.connect() def getSoftware(self,id=None,name=None): # Function for getting information for a specific software. if id != None: sql = """SELECT * FROM `softwares` WHERE ID = {}""".format(id) elif name!= None: sql = """SELECT * FROM `softwares` WHERE Name = {}""".format(name) else: sql = """SELECT * FROM `softwares`""" result = self.db.select(sql) resultSet = [Software(item[0],item[1],item[2],item[3]) for item in result] return resultSet def getChangelogIDs(self,softwareID): # Get all ids for a single software. sql = """SELECT ID FROM `data` WHERE SoftwareID = """ + str(softwareID) selectResults = self.db.select(sql) resultSet = [item[0] for item in selectResults] return resultSet def addChangelog(self,softwareID,changelog): # add a piece of changelog to database. id = changelog['id'] verison = changelog['version'] time = changelog['time'] content = json.dumps(changelog['content']) sql = """INSERT INTO data (`ID`, `SoftwareID`, `Time`, `Version`, `Detail`) VALUES (%s, %s, %s, %s, %s)""" self.db.insert(sql,(id,softwareID,time,verison,content)) def getSMSSubscribers(self,softwareID): # Get the list of all SMS subscribers for specific software. sql = """SELECT phone FROM `phone` WHERE softwareID =""" + str(softwareID) selectResults = self.db.select(sql) resultSet = [item[0] for item in selectResults] return resultSet def getEmailSubscribers(self,softwareID): # Get the list of all Email subscribers for specific software. sql = """SELECT email FROM `email` WHERE softwareID =""" + str(softwareID) selectResults = self.db.select(sql) resultSet = [item[0] for item in selectResults] return resultSet def addChangelogs(self,softwareID,changelogs): # Add a list of changelogs to the database. params = [(changelog['id'], softwareID, changelog['time'], changelog['version'], json.dumps(changelog['content'])) for changelog in changelogs] sql = """INSERT INTO data (`ID`, `SoftwareID`, `Time`, `Version`, `Detail`) VALUES (%s, %s, %s, %s, %s)""" self.db.insertMany(sql,params) def __del__(self): self.db.close() if __name__ == '__main__': db = DB() print(db.getSMSSubscribers(5))
0.430387
0.139338
import torch from torch import nn import torch.nn.functional as F __all__ = ['SplAtConv3d'] class SplAtConv3d(nn.Module): """Split-Attention Conv3d """ def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, radix=2, norm_layer="GN", dropblock_prob=0.0): super(SplAtConv3d, self).__init__() inter_channels = max(in_channels*radix//2, 8) self.radix = radix self.cardinality = groups self.channels = channels self.conv = nn.Conv3d(in_channels, channels*radix, kernel_size, stride, padding, dilation, groups=groups*radix, bias=bias) self.bn0 = nn.BatchNorm3d(num_features=channels*radix) if norm_layer=="BN" else \ nn.GroupNorm(num_groups=channels // 2, num_channels=channels*radix) self.relu = nn.ReLU(inplace=True) self.fc1 = nn.Conv3d(channels, inter_channels, 1, groups=self.cardinality) self.bn1 = nn.BatchNorm3d(num_features=inter_channels) if norm_layer=="BN" else \ nn.GroupNorm(num_groups=inter_channels // 4, num_channels=inter_channels) self.fc2 = nn.Conv3d(inter_channels, channels*radix, 1, groups=self.cardinality) self.dropblock = nn.Dropout(p=dropblock_prob) if dropblock_prob > 0.0 else nn.Sequential() self.rsoftmax = rSoftMax(radix, groups) def forward(self, x): x = self.conv(x) x = self.bn0(x) x = self.dropblock(x) x = self.relu(x) batch, rchannel = x.shape[:2] if self.radix > 1: splited = torch.split(x, rchannel//self.radix, dim=1) gap = sum(splited) else: gap = x gap = F.adaptive_avg_pool3d(gap, 1) gap = self.fc1(gap) gap = self.bn1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, -1, 1, 1, 1) if self.radix > 1: attens = torch.split(atten, rchannel // self.radix, dim=1) out = sum([att * split for (att, split) in zip(attens, splited)]) else: out = atten * x return out.contiguous() class rSoftMax(nn.Module): def __init__(self, radix, cardinality): super().__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x
segmentation/zf/network/blocks/splat3D.py
import torch from torch import nn import torch.nn.functional as F __all__ = ['SplAtConv3d'] class SplAtConv3d(nn.Module): """Split-Attention Conv3d """ def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, radix=2, norm_layer="GN", dropblock_prob=0.0): super(SplAtConv3d, self).__init__() inter_channels = max(in_channels*radix//2, 8) self.radix = radix self.cardinality = groups self.channels = channels self.conv = nn.Conv3d(in_channels, channels*radix, kernel_size, stride, padding, dilation, groups=groups*radix, bias=bias) self.bn0 = nn.BatchNorm3d(num_features=channels*radix) if norm_layer=="BN" else \ nn.GroupNorm(num_groups=channels // 2, num_channels=channels*radix) self.relu = nn.ReLU(inplace=True) self.fc1 = nn.Conv3d(channels, inter_channels, 1, groups=self.cardinality) self.bn1 = nn.BatchNorm3d(num_features=inter_channels) if norm_layer=="BN" else \ nn.GroupNorm(num_groups=inter_channels // 4, num_channels=inter_channels) self.fc2 = nn.Conv3d(inter_channels, channels*radix, 1, groups=self.cardinality) self.dropblock = nn.Dropout(p=dropblock_prob) if dropblock_prob > 0.0 else nn.Sequential() self.rsoftmax = rSoftMax(radix, groups) def forward(self, x): x = self.conv(x) x = self.bn0(x) x = self.dropblock(x) x = self.relu(x) batch, rchannel = x.shape[:2] if self.radix > 1: splited = torch.split(x, rchannel//self.radix, dim=1) gap = sum(splited) else: gap = x gap = F.adaptive_avg_pool3d(gap, 1) gap = self.fc1(gap) gap = self.bn1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, -1, 1, 1, 1) if self.radix > 1: attens = torch.split(atten, rchannel // self.radix, dim=1) out = sum([att * split for (att, split) in zip(attens, splited)]) else: out = atten * x return out.contiguous() class rSoftMax(nn.Module): def __init__(self, radix, cardinality): super().__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x
0.957893
0.409221
import fnmatch import os import plistlib import re import shutil import subprocess import sys from distutils import dir_util from nfbuild import NFBuild class NFBuildWindows(NFBuild): def __init__(self): super(self.__class__, self).__init__() self.project_file = 'build.ninja' def installClangFormat(self): clang_format_vulcan_file = os.path.join('tools', 'clang-format.vulcan') clang_format_extraction_folder = self.vulcanDownload( clang_format_vulcan_file, 'clang-format-5.0.0') self.clang_format_binary = os.path.join( os.path.join( os.path.join( clang_format_extraction_folder, 'clang-format'), 'bin'), 'clang-format') def installNinja(self): ninja_vulcan_file = os.path.join( os.path.join( os.path.join( os.path.join('tools', 'buildtools'), 'spotify_buildtools'), 'software'), 'ninja.vulcan') ninja_extraction_folder = self.vulcanDownload( ninja_vulcan_file, 'ninja-1.6.0') self.ninja_binary = os.path.join( ninja_extraction_folder, 'ninja') if 'PATH' not in os.environ: os.environ['PATH'] = '' if len(os.environ['PATH']) > 0: os.environ['PATH'] += os.pathsep os.environ['PATH'] += ninja_extraction_folder def installMake(self): make_vulcan_file = os.path.join('tools', 'make.vulcan') make_extraction_folder = self.vulcanDownload( make_vulcan_file, 'make-4.2.1') make_bin_folder = os.path.join( make_extraction_folder, 'bin') os.environ['PATH'] += os.pathsep + make_bin_folder def installVulcanDependencies(self, android=False): super(self.__class__, self).installVulcanDependencies(android) self.installClangFormat() self.installMake() if android: self.installNinja() def generateProject(self, ios=False, android=False, android_arm=False): self.use_ninja = android or android_arm cmake_call = [ self.cmake_binary, '..', '-GNinja'] if android or android_arm: android_abi = 'x86_64' android_toolchain_name = 'x86_64-llvm' if android_arm: android_abi = 'arm64-v8a' android_toolchain_name = 'arm64-llvm' cmake_call.extend([ '-DANDROID=1', '-DCMAKE_TOOLCHAIN_FILE=' + self.android_ndk_folder + '/build/cmake/android.toolchain.cmake', '-DANDROID_NDK=' + self.android_ndk_folder, '-DANDROID_ABI=' + android_abi, '-DANDROID_NATIVE_API_LEVEL=21', '-DANDROID_TOOLCHAIN_NAME=' + android_toolchain_name, '-DANDROID_WINDOWS=1', '-DANDROID_STL=c++_shared']) cmake_result = subprocess.call(cmake_call, cwd=self.build_directory) if cmake_result != 0: sys.exit(cmake_result) def buildTarget(self, target, sdk='macosx', arch='x86_64'): result = subprocess.call([ self.ninja_binary, '-C', self.build_directory, '-f', self.project_file, target]) if result != 0: sys.exit(result)
ci/nfbuildwindows.py
import fnmatch import os import plistlib import re import shutil import subprocess import sys from distutils import dir_util from nfbuild import NFBuild class NFBuildWindows(NFBuild): def __init__(self): super(self.__class__, self).__init__() self.project_file = 'build.ninja' def installClangFormat(self): clang_format_vulcan_file = os.path.join('tools', 'clang-format.vulcan') clang_format_extraction_folder = self.vulcanDownload( clang_format_vulcan_file, 'clang-format-5.0.0') self.clang_format_binary = os.path.join( os.path.join( os.path.join( clang_format_extraction_folder, 'clang-format'), 'bin'), 'clang-format') def installNinja(self): ninja_vulcan_file = os.path.join( os.path.join( os.path.join( os.path.join('tools', 'buildtools'), 'spotify_buildtools'), 'software'), 'ninja.vulcan') ninja_extraction_folder = self.vulcanDownload( ninja_vulcan_file, 'ninja-1.6.0') self.ninja_binary = os.path.join( ninja_extraction_folder, 'ninja') if 'PATH' not in os.environ: os.environ['PATH'] = '' if len(os.environ['PATH']) > 0: os.environ['PATH'] += os.pathsep os.environ['PATH'] += ninja_extraction_folder def installMake(self): make_vulcan_file = os.path.join('tools', 'make.vulcan') make_extraction_folder = self.vulcanDownload( make_vulcan_file, 'make-4.2.1') make_bin_folder = os.path.join( make_extraction_folder, 'bin') os.environ['PATH'] += os.pathsep + make_bin_folder def installVulcanDependencies(self, android=False): super(self.__class__, self).installVulcanDependencies(android) self.installClangFormat() self.installMake() if android: self.installNinja() def generateProject(self, ios=False, android=False, android_arm=False): self.use_ninja = android or android_arm cmake_call = [ self.cmake_binary, '..', '-GNinja'] if android or android_arm: android_abi = 'x86_64' android_toolchain_name = 'x86_64-llvm' if android_arm: android_abi = 'arm64-v8a' android_toolchain_name = 'arm64-llvm' cmake_call.extend([ '-DANDROID=1', '-DCMAKE_TOOLCHAIN_FILE=' + self.android_ndk_folder + '/build/cmake/android.toolchain.cmake', '-DANDROID_NDK=' + self.android_ndk_folder, '-DANDROID_ABI=' + android_abi, '-DANDROID_NATIVE_API_LEVEL=21', '-DANDROID_TOOLCHAIN_NAME=' + android_toolchain_name, '-DANDROID_WINDOWS=1', '-DANDROID_STL=c++_shared']) cmake_result = subprocess.call(cmake_call, cwd=self.build_directory) if cmake_result != 0: sys.exit(cmake_result) def buildTarget(self, target, sdk='macosx', arch='x86_64'): result = subprocess.call([ self.ninja_binary, '-C', self.build_directory, '-f', self.project_file, target]) if result != 0: sys.exit(result)
0.14734
0.055669
import asyncio from pathlib import Path from ssl import SSLContext from typing import Any, Awaitable, Dict, List, Optional, Sequence, Tuple, Union import cytoolz as tlz import ujson as json from aiohttp import TCPConnector from aiohttp.typedefs import StrOrURL from aiohttp_client_cache import CachedSession, SQLiteBackend from . import utils from .exceptions import InvalidInputValue from .utils import EXPIRE, BaseRetriever __all__ = ["retrieve", "delete_url_cache", "retrieve_text", "retrieve_json", "retrieve_binary"] async def async_session( url_kwds: Tuple[Tuple[int, StrOrURL, Dict[StrOrURL, Any]], ...], read: str, r_kwds: Dict[str, Any], request_method: str, cache_name: Path, family: int, timeout: float = 5.0, expire_after: float = EXPIRE, ssl: Union[SSLContext, bool, None] = None, disable: bool = False, ) -> Awaitable[Union[str, bytes, Dict[str, Any]]]: """Create an async session for sending requests. Parameters ---------- url_kwds : list of tuples of urls and payloads A list of URLs or URLs with their payloads to be retrieved. read : str The method for returning the request; ``binary`` (bytes), ``json``, and ``text``. r_kwds : dict Keywords to pass to the response read function. ``{"content_type": None}`` if read is ``json`` else it's empty. request_method : str The request type; GET or POST. cache_name : str Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. family : int TCP socket family timeout : float, optional Timeout for the request, defaults to 5.0. expire_after : int, optional Expiration time for the cache in seconds, defaults to -1 (never expire). ssl : bool or SSLContext, optional SSLContext to use for the connection, defaults to None. Set to False to disable SSL cetification verification. disable : bool, optional If ``True`` temporarily disable caching requests and get new responses from the server, defaults to False. Returns ------- asyncio.gather An async gather function """ cache = SQLiteBackend( cache_name=cache_name, expire_after=expire_after, allowed_methods=("GET", "POST"), timeout=timeout, ) connector = TCPConnector(family=family, ssl=ssl) async with CachedSession( json_serialize=json.dumps, cache=cache, connector=connector, trust_env=True, ) as session: _session = session.disabled() if disable else session async with _session: request_func = getattr(session, request_method.lower()) tasks = ( utils.retriever(uid, url, kwds, request_func, read, r_kwds) for uid, url, kwds in url_kwds ) return await asyncio.gather(*tasks) # type: ignore def delete_url_cache( url: StrOrURL, request_method: str = "GET", cache_name: Optional[Union[Path, str]] = None, **kwargs: Dict[str, Any], ) -> None: """Delete cached response associated with ``url``, along with its history (if applicable). Parameters ---------- url : str URL to be deleted from the cache request_method : str, optional HTTP request method to be deleted from the cache, defaults to ``GET``. cache_name : str, optional Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. kwargs : dict, optional Keywords to pass to the ``cache.delete_url()``. """ loop, new_loop = utils.get_event_loop() asyncio.set_event_loop(loop) request_method = request_method.upper() valid_methods = ["GET", "POST"] if request_method not in valid_methods: raise InvalidInputValue("method", valid_methods) loop.run_until_complete( utils.delete_url(url, request_method, utils.create_cachefile(cache_name), **kwargs) ) if new_loop: loop.close() def retrieve( urls: Sequence[StrOrURL], read: str, request_kwds: Optional[Sequence[Dict[str, Any]]] = None, request_method: str = "GET", max_workers: int = 8, cache_name: Optional[Union[Path, str]] = None, family: str = "both", timeout: float = 5.0, expire_after: float = EXPIRE, ssl: Union[SSLContext, bool, None] = None, disable: bool = False, ) -> List[Union[str, Dict[str, Any], bytes]]: r"""Send async requests. Parameters ---------- urls : list of str List of URLs. read : str Method for returning the request; ``binary``, ``json``, and ``text``. request_kwds : list of dict, optional List of requests keywords corresponding to input URLs (1 on 1 mapping), defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. request_method : str, optional Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. max_workers : int, optional Maximum number of async processes, defaults to 8. cache_name : str, optional Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. family : str, optional TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4 or IPv6 only pass ``ipv4`` or ``ipv6``, respectively. timeout : float, optional Timeout for the request, defaults to 5.0. expire_after : int, optional Expiration time for response caching in seconds, defaults to -1 (never expire). ssl : bool or SSLContext, optional SSLContext to use for the connection, defaults to None. Set to False to disable SSL cetification verification. disable : bool, optional If ``True`` temporarily disable caching requests and get new responses from the server, defaults to False. Returns ------- list List of responses in the order of input URLs. Examples -------- >>> import async_retriever as ar >>> stations = ["01646500", "08072300", "11073495"] >>> url = "https://waterservices.usgs.gov/nwis/site" >>> urls, kwds = zip( ... *[ ... (url, {"params": {"format": "rdb", "sites": s, "siteStatus": "all"}}) ... for s in stations ... ] ... ) >>> resp = ar.retrieve(urls, "text", request_kwds=kwds) >>> resp[0].split('\n')[-2].split('\t')[1] '01646500' """ inp = BaseRetriever(urls, read, request_kwds, request_method, cache_name, family) loop, new_loop = utils.get_event_loop() asyncio.set_event_loop(loop) session = tlz.partial( async_session, read=inp.read, r_kwds=inp.r_kwds, request_method=inp.request_method, cache_name=inp.cache_name, family=inp.family, timeout=timeout, expire_after=expire_after, ssl=ssl, disable=disable, ) chunked_reqs = tlz.partition_all(max_workers, inp.url_kwds) results = (loop.run_until_complete(session(url_kwds=c)) for c in chunked_reqs) resp = [r for _, r in sorted(tlz.concat(results))] if new_loop: loop.close() return resp def retrieve_text( urls: Sequence[StrOrURL], request_kwds: Optional[Sequence[Dict[str, Any]]] = None, request_method: str = "GET", max_workers: int = 8, cache_name: Optional[Union[Path, str]] = None, family: str = "both", timeout: float = 5.0, expire_after: float = EXPIRE, ssl: Union[SSLContext, bool, None] = None, disable: bool = False, ) -> List[str]: r"""Send async requests and get the response as ``text``. Parameters ---------- urls : list of str List of URLs. request_kwds : list of dict, optional List of requests keywords corresponding to input URLs (1 on 1 mapping), defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. request_method : str, optional Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. max_workers : int, optional Maximum number of async processes, defaults to 8. cache_name : str, optional Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. family : str, optional TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4 or IPv6 only pass ``ipv4`` or ``ipv6``, respectively. timeout : float, optional Timeout for the request in seconds, defaults to 5.0. expire_after : int, optional Expiration time for response caching in seconds, defaults to -1 (never expire). ssl : bool or SSLContext, optional SSLContext to use for the connection, defaults to None. Set to False to disable SSL cetification verification. disable : bool, optional If ``True`` temporarily disable caching requests and get new responses from the server, defaults to False. Returns ------- list List of responses in the order of input URLs. Examples -------- >>> import async_retriever as ar >>> stations = ["01646500", "08072300", "11073495"] >>> url = "https://waterservices.usgs.gov/nwis/site" >>> urls, kwds = zip( ... *[ ... (url, {"params": {"format": "rdb", "sites": s, "siteStatus": "all"}}) ... for s in stations ... ] ... ) >>> resp = ar.retrieve_text(urls, kwds) >>> resp[0].split('\n')[-2].split('\t')[1] '01646500' """ resp: List[str] = retrieve( # type: ignore urls, "text", request_kwds, request_method, max_workers, cache_name, family, timeout, expire_after, ssl, disable, ) return resp def retrieve_json( urls: Sequence[StrOrURL], request_kwds: Optional[Sequence[Dict[str, Any]]] = None, request_method: str = "GET", max_workers: int = 8, cache_name: Optional[Union[Path, str]] = None, family: str = "both", timeout: float = 5.0, expire_after: float = EXPIRE, ssl: Union[SSLContext, bool, None] = None, disable: bool = False, ) -> List[Dict[str, Any]]: r"""Send async requests and get the response as ``json``. Parameters ---------- urls : list of str List of URLs. request_kwds : list of dict, optional List of requests keywords corresponding to input URLs (1 on 1 mapping), defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. request_method : str, optional Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. max_workers : int, optional Maximum number of async processes, defaults to 8. cache_name : str, optional Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. family : str, optional TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4 or IPv6 only pass ``ipv4`` or ``ipv6``, respectively. timeout : float, optional Timeout for the request, defaults to 5.0. expire_after : int, optional Expiration time for response caching in seconds, defaults to -1 (never expire). ssl : bool or SSLContext, optional SSLContext to use for the connection, defaults to None. Set to False to disable SSL cetification verification. disable : bool, optional If ``True`` temporarily disable caching requests and get new responses from the server, defaults to False. Returns ------- dict List of responses in the order of input URLs. Examples -------- >>> import async_retriever as ar >>> urls = ["https://labs.waterdata.usgs.gov/api/nldi/linked-data/comid/position"] >>> kwds = [ ... { ... "params": { ... "f": "json", ... "coords": "POINT(-68.325 45.0369)", ... }, ... }, ... ] >>> r = ar.retrieve_json(urls, kwds) >>> print(r[0]["features"][0]["properties"]["identifier"]) 2675320 """ resp: List[Dict[str, Any]] = retrieve( # type: ignore urls, "json", request_kwds, request_method, max_workers, cache_name, family, timeout, expire_after, ssl, disable, ) return resp def retrieve_binary( urls: Sequence[StrOrURL], request_kwds: Optional[Sequence[Dict[str, Any]]] = None, request_method: str = "GET", max_workers: int = 8, cache_name: Optional[Union[Path, str]] = None, family: str = "both", timeout: float = 5.0, expire_after: float = EXPIRE, ssl: Union[SSLContext, bool, None] = None, disable: bool = False, ) -> List[bytes]: r"""Send async requests and get the response as ``bytes``. Parameters ---------- urls : list of str List of URLs. request_kwds : list of dict, optional List of requests keywords corresponding to input URLs (1 on 1 mapping), defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. request_method : str, optional Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. max_workers : int, optional Maximum number of async processes, defaults to 8. cache_name : str, optional Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. family : str, optional TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4 or IPv6 only pass ``ipv4`` or ``ipv6``, respectively. timeout : float, optional Timeout for the request, defaults to 5.0. expire_after : int, optional Expiration time for response caching in seconds, defaults to -1 (never expire). ssl : bool or SSLContext, optional SSLContext to use for the connection, defaults to None. Set to False to disable SSL cetification verification. disable : bool, optional If ``True`` temporarily disable caching requests and get new responses from the server, defaults to False. Returns ------- bytes List of responses in the order of input URLs. """ resp: List[bytes] = retrieve( # type: ignore urls, "binary", request_kwds, request_method, max_workers, cache_name, family, timeout, expire_after, ssl, disable, ) return resp
async_retriever/async_retriever.py
import asyncio from pathlib import Path from ssl import SSLContext from typing import Any, Awaitable, Dict, List, Optional, Sequence, Tuple, Union import cytoolz as tlz import ujson as json from aiohttp import TCPConnector from aiohttp.typedefs import StrOrURL from aiohttp_client_cache import CachedSession, SQLiteBackend from . import utils from .exceptions import InvalidInputValue from .utils import EXPIRE, BaseRetriever __all__ = ["retrieve", "delete_url_cache", "retrieve_text", "retrieve_json", "retrieve_binary"] async def async_session( url_kwds: Tuple[Tuple[int, StrOrURL, Dict[StrOrURL, Any]], ...], read: str, r_kwds: Dict[str, Any], request_method: str, cache_name: Path, family: int, timeout: float = 5.0, expire_after: float = EXPIRE, ssl: Union[SSLContext, bool, None] = None, disable: bool = False, ) -> Awaitable[Union[str, bytes, Dict[str, Any]]]: """Create an async session for sending requests. Parameters ---------- url_kwds : list of tuples of urls and payloads A list of URLs or URLs with their payloads to be retrieved. read : str The method for returning the request; ``binary`` (bytes), ``json``, and ``text``. r_kwds : dict Keywords to pass to the response read function. ``{"content_type": None}`` if read is ``json`` else it's empty. request_method : str The request type; GET or POST. cache_name : str Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. family : int TCP socket family timeout : float, optional Timeout for the request, defaults to 5.0. expire_after : int, optional Expiration time for the cache in seconds, defaults to -1 (never expire). ssl : bool or SSLContext, optional SSLContext to use for the connection, defaults to None. Set to False to disable SSL cetification verification. disable : bool, optional If ``True`` temporarily disable caching requests and get new responses from the server, defaults to False. Returns ------- asyncio.gather An async gather function """ cache = SQLiteBackend( cache_name=cache_name, expire_after=expire_after, allowed_methods=("GET", "POST"), timeout=timeout, ) connector = TCPConnector(family=family, ssl=ssl) async with CachedSession( json_serialize=json.dumps, cache=cache, connector=connector, trust_env=True, ) as session: _session = session.disabled() if disable else session async with _session: request_func = getattr(session, request_method.lower()) tasks = ( utils.retriever(uid, url, kwds, request_func, read, r_kwds) for uid, url, kwds in url_kwds ) return await asyncio.gather(*tasks) # type: ignore def delete_url_cache( url: StrOrURL, request_method: str = "GET", cache_name: Optional[Union[Path, str]] = None, **kwargs: Dict[str, Any], ) -> None: """Delete cached response associated with ``url``, along with its history (if applicable). Parameters ---------- url : str URL to be deleted from the cache request_method : str, optional HTTP request method to be deleted from the cache, defaults to ``GET``. cache_name : str, optional Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. kwargs : dict, optional Keywords to pass to the ``cache.delete_url()``. """ loop, new_loop = utils.get_event_loop() asyncio.set_event_loop(loop) request_method = request_method.upper() valid_methods = ["GET", "POST"] if request_method not in valid_methods: raise InvalidInputValue("method", valid_methods) loop.run_until_complete( utils.delete_url(url, request_method, utils.create_cachefile(cache_name), **kwargs) ) if new_loop: loop.close() def retrieve( urls: Sequence[StrOrURL], read: str, request_kwds: Optional[Sequence[Dict[str, Any]]] = None, request_method: str = "GET", max_workers: int = 8, cache_name: Optional[Union[Path, str]] = None, family: str = "both", timeout: float = 5.0, expire_after: float = EXPIRE, ssl: Union[SSLContext, bool, None] = None, disable: bool = False, ) -> List[Union[str, Dict[str, Any], bytes]]: r"""Send async requests. Parameters ---------- urls : list of str List of URLs. read : str Method for returning the request; ``binary``, ``json``, and ``text``. request_kwds : list of dict, optional List of requests keywords corresponding to input URLs (1 on 1 mapping), defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. request_method : str, optional Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. max_workers : int, optional Maximum number of async processes, defaults to 8. cache_name : str, optional Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. family : str, optional TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4 or IPv6 only pass ``ipv4`` or ``ipv6``, respectively. timeout : float, optional Timeout for the request, defaults to 5.0. expire_after : int, optional Expiration time for response caching in seconds, defaults to -1 (never expire). ssl : bool or SSLContext, optional SSLContext to use for the connection, defaults to None. Set to False to disable SSL cetification verification. disable : bool, optional If ``True`` temporarily disable caching requests and get new responses from the server, defaults to False. Returns ------- list List of responses in the order of input URLs. Examples -------- >>> import async_retriever as ar >>> stations = ["01646500", "08072300", "11073495"] >>> url = "https://waterservices.usgs.gov/nwis/site" >>> urls, kwds = zip( ... *[ ... (url, {"params": {"format": "rdb", "sites": s, "siteStatus": "all"}}) ... for s in stations ... ] ... ) >>> resp = ar.retrieve(urls, "text", request_kwds=kwds) >>> resp[0].split('\n')[-2].split('\t')[1] '01646500' """ inp = BaseRetriever(urls, read, request_kwds, request_method, cache_name, family) loop, new_loop = utils.get_event_loop() asyncio.set_event_loop(loop) session = tlz.partial( async_session, read=inp.read, r_kwds=inp.r_kwds, request_method=inp.request_method, cache_name=inp.cache_name, family=inp.family, timeout=timeout, expire_after=expire_after, ssl=ssl, disable=disable, ) chunked_reqs = tlz.partition_all(max_workers, inp.url_kwds) results = (loop.run_until_complete(session(url_kwds=c)) for c in chunked_reqs) resp = [r for _, r in sorted(tlz.concat(results))] if new_loop: loop.close() return resp def retrieve_text( urls: Sequence[StrOrURL], request_kwds: Optional[Sequence[Dict[str, Any]]] = None, request_method: str = "GET", max_workers: int = 8, cache_name: Optional[Union[Path, str]] = None, family: str = "both", timeout: float = 5.0, expire_after: float = EXPIRE, ssl: Union[SSLContext, bool, None] = None, disable: bool = False, ) -> List[str]: r"""Send async requests and get the response as ``text``. Parameters ---------- urls : list of str List of URLs. request_kwds : list of dict, optional List of requests keywords corresponding to input URLs (1 on 1 mapping), defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. request_method : str, optional Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. max_workers : int, optional Maximum number of async processes, defaults to 8. cache_name : str, optional Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. family : str, optional TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4 or IPv6 only pass ``ipv4`` or ``ipv6``, respectively. timeout : float, optional Timeout for the request in seconds, defaults to 5.0. expire_after : int, optional Expiration time for response caching in seconds, defaults to -1 (never expire). ssl : bool or SSLContext, optional SSLContext to use for the connection, defaults to None. Set to False to disable SSL cetification verification. disable : bool, optional If ``True`` temporarily disable caching requests and get new responses from the server, defaults to False. Returns ------- list List of responses in the order of input URLs. Examples -------- >>> import async_retriever as ar >>> stations = ["01646500", "08072300", "11073495"] >>> url = "https://waterservices.usgs.gov/nwis/site" >>> urls, kwds = zip( ... *[ ... (url, {"params": {"format": "rdb", "sites": s, "siteStatus": "all"}}) ... for s in stations ... ] ... ) >>> resp = ar.retrieve_text(urls, kwds) >>> resp[0].split('\n')[-2].split('\t')[1] '01646500' """ resp: List[str] = retrieve( # type: ignore urls, "text", request_kwds, request_method, max_workers, cache_name, family, timeout, expire_after, ssl, disable, ) return resp def retrieve_json( urls: Sequence[StrOrURL], request_kwds: Optional[Sequence[Dict[str, Any]]] = None, request_method: str = "GET", max_workers: int = 8, cache_name: Optional[Union[Path, str]] = None, family: str = "both", timeout: float = 5.0, expire_after: float = EXPIRE, ssl: Union[SSLContext, bool, None] = None, disable: bool = False, ) -> List[Dict[str, Any]]: r"""Send async requests and get the response as ``json``. Parameters ---------- urls : list of str List of URLs. request_kwds : list of dict, optional List of requests keywords corresponding to input URLs (1 on 1 mapping), defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. request_method : str, optional Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. max_workers : int, optional Maximum number of async processes, defaults to 8. cache_name : str, optional Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. family : str, optional TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4 or IPv6 only pass ``ipv4`` or ``ipv6``, respectively. timeout : float, optional Timeout for the request, defaults to 5.0. expire_after : int, optional Expiration time for response caching in seconds, defaults to -1 (never expire). ssl : bool or SSLContext, optional SSLContext to use for the connection, defaults to None. Set to False to disable SSL cetification verification. disable : bool, optional If ``True`` temporarily disable caching requests and get new responses from the server, defaults to False. Returns ------- dict List of responses in the order of input URLs. Examples -------- >>> import async_retriever as ar >>> urls = ["https://labs.waterdata.usgs.gov/api/nldi/linked-data/comid/position"] >>> kwds = [ ... { ... "params": { ... "f": "json", ... "coords": "POINT(-68.325 45.0369)", ... }, ... }, ... ] >>> r = ar.retrieve_json(urls, kwds) >>> print(r[0]["features"][0]["properties"]["identifier"]) 2675320 """ resp: List[Dict[str, Any]] = retrieve( # type: ignore urls, "json", request_kwds, request_method, max_workers, cache_name, family, timeout, expire_after, ssl, disable, ) return resp def retrieve_binary( urls: Sequence[StrOrURL], request_kwds: Optional[Sequence[Dict[str, Any]]] = None, request_method: str = "GET", max_workers: int = 8, cache_name: Optional[Union[Path, str]] = None, family: str = "both", timeout: float = 5.0, expire_after: float = EXPIRE, ssl: Union[SSLContext, bool, None] = None, disable: bool = False, ) -> List[bytes]: r"""Send async requests and get the response as ``bytes``. Parameters ---------- urls : list of str List of URLs. request_kwds : list of dict, optional List of requests keywords corresponding to input URLs (1 on 1 mapping), defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. request_method : str, optional Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. max_workers : int, optional Maximum number of async processes, defaults to 8. cache_name : str, optional Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. family : str, optional TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4 or IPv6 only pass ``ipv4`` or ``ipv6``, respectively. timeout : float, optional Timeout for the request, defaults to 5.0. expire_after : int, optional Expiration time for response caching in seconds, defaults to -1 (never expire). ssl : bool or SSLContext, optional SSLContext to use for the connection, defaults to None. Set to False to disable SSL cetification verification. disable : bool, optional If ``True`` temporarily disable caching requests and get new responses from the server, defaults to False. Returns ------- bytes List of responses in the order of input URLs. """ resp: List[bytes] = retrieve( # type: ignore urls, "binary", request_kwds, request_method, max_workers, cache_name, family, timeout, expire_after, ssl, disable, ) return resp
0.884096
0.175786
from copy import deepcopy from spec import ( FAR_FUTURE_EPOCH, GENESIS_EPOCH, MAX_DEPOSIT_AMOUNT, SLOTS_PER_EPOCH, ZERO_HASH, BeaconBlock, DepositData, DepositInput, Eth1Data, Validator, int_to_bytes48, merkle_root, get_genesis_beacon_state, get_block_root, get_state_root, get_empty_block, advance_slot, process_block, state_transition, ) def get_sample_genesis_validator(index): return Validator( pubkey=int_to_bytes48(index), withdrawal_credentials=ZERO_HASH, activation_epoch=GENESIS_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, initiated_exit=False, slashed=False, ) def add_validators_to_genesis(state, num_validators): # currently bypassing normal deposit route # TODO: get merkle root working and use normal genesis_deposits state.validator_registry = [ get_sample_genesis_validator(i) for i in range(num_validators) ] state.validator_balances = [ int(MAX_DEPOSIT_AMOUNT) for i in range(num_validators) ] def construct_empty_block_for_next_slot(state): empty_block = get_empty_block() empty_block.slot = state.slot + 1 previous_block_header = deepcopy(state.latest_block_header) if previous_block_header.state_root == ZERO_HASH: previous_block_header.state_root = state.hash_tree_root() empty_block.previous_block_root = previous_block_header.hash_tree_root() return empty_block def test_slot_transition(state): test_state = deepcopy(state) advance_slot(test_state) assert test_state.slot == state.slot + 1 assert get_state_root(test_state, state.slot) == state.hash_tree_root() return test_state def test_empty_block_transition(state): test_state = deepcopy(state) block = construct_empty_block_for_next_slot(state) advance_slot(test_state) process_block(test_state, block) assert len(test_state.eth1_data_votes) == len(state.eth1_data_votes) + 1 assert get_block_root(test_state, state.slot) == block.previous_block_root def test_skipped_slots(state): test_state = deepcopy(state) block = construct_empty_block_for_next_slot(test_state) block.slot += 3 state_transition(test_state, block) assert test_state.slot == block.slot for slot in range(state.slot, test_state.slot): assert get_block_root(test_state, slot) == block.previous_block_root def test_empty_epoch_transition(state): test_state = deepcopy(state) block = construct_empty_block_for_next_slot(test_state) block.slot += SLOTS_PER_EPOCH state_transition(test_state, block) assert test_state.slot == block.slot for slot in range(state.slot, test_state.slot): assert get_block_root(test_state, slot) == block.previous_block_root def sanity_tests(): print("Buidling state with 100 validators...") genesis_state = get_genesis_beacon_state( [], 0, Eth1Data( deposit_root="\x00"*32, block_hash="\x00"*32 ), ) add_validators_to_genesis(genesis_state, 100) print("done!") print() print("Running some sanity check tests...") test_slot_transition(genesis_state) test_empty_block_transition(genesis_state) test_skipped_slots(genesis_state) test_empty_epoch_transition(genesis_state) print("done!") if __name__ == "__main__": sanity_tests()
spec_pythonizer/sanity_check.py
from copy import deepcopy from spec import ( FAR_FUTURE_EPOCH, GENESIS_EPOCH, MAX_DEPOSIT_AMOUNT, SLOTS_PER_EPOCH, ZERO_HASH, BeaconBlock, DepositData, DepositInput, Eth1Data, Validator, int_to_bytes48, merkle_root, get_genesis_beacon_state, get_block_root, get_state_root, get_empty_block, advance_slot, process_block, state_transition, ) def get_sample_genesis_validator(index): return Validator( pubkey=int_to_bytes48(index), withdrawal_credentials=ZERO_HASH, activation_epoch=GENESIS_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, initiated_exit=False, slashed=False, ) def add_validators_to_genesis(state, num_validators): # currently bypassing normal deposit route # TODO: get merkle root working and use normal genesis_deposits state.validator_registry = [ get_sample_genesis_validator(i) for i in range(num_validators) ] state.validator_balances = [ int(MAX_DEPOSIT_AMOUNT) for i in range(num_validators) ] def construct_empty_block_for_next_slot(state): empty_block = get_empty_block() empty_block.slot = state.slot + 1 previous_block_header = deepcopy(state.latest_block_header) if previous_block_header.state_root == ZERO_HASH: previous_block_header.state_root = state.hash_tree_root() empty_block.previous_block_root = previous_block_header.hash_tree_root() return empty_block def test_slot_transition(state): test_state = deepcopy(state) advance_slot(test_state) assert test_state.slot == state.slot + 1 assert get_state_root(test_state, state.slot) == state.hash_tree_root() return test_state def test_empty_block_transition(state): test_state = deepcopy(state) block = construct_empty_block_for_next_slot(state) advance_slot(test_state) process_block(test_state, block) assert len(test_state.eth1_data_votes) == len(state.eth1_data_votes) + 1 assert get_block_root(test_state, state.slot) == block.previous_block_root def test_skipped_slots(state): test_state = deepcopy(state) block = construct_empty_block_for_next_slot(test_state) block.slot += 3 state_transition(test_state, block) assert test_state.slot == block.slot for slot in range(state.slot, test_state.slot): assert get_block_root(test_state, slot) == block.previous_block_root def test_empty_epoch_transition(state): test_state = deepcopy(state) block = construct_empty_block_for_next_slot(test_state) block.slot += SLOTS_PER_EPOCH state_transition(test_state, block) assert test_state.slot == block.slot for slot in range(state.slot, test_state.slot): assert get_block_root(test_state, slot) == block.previous_block_root def sanity_tests(): print("Buidling state with 100 validators...") genesis_state = get_genesis_beacon_state( [], 0, Eth1Data( deposit_root="\x00"*32, block_hash="\x00"*32 ), ) add_validators_to_genesis(genesis_state, 100) print("done!") print() print("Running some sanity check tests...") test_slot_transition(genesis_state) test_empty_block_transition(genesis_state) test_skipped_slots(genesis_state) test_empty_epoch_transition(genesis_state) print("done!") if __name__ == "__main__": sanity_tests()
0.321247
0.465691
import os, sys, json, re, shutil from utils.queryBuilder import postQuery def prep_inputs(ml_dir, ctx_file, in_file): # get context with open(ctx_file) as f: j = json.load(f) # get kwargs kwargs = j #mstarch - with containerization, "kwargs" are in context at top level #json.loads(j['rule']['kwargs']) # get classmap file and version cm_file = os.path.basename(kwargs['classmap_file'].strip()) match = re.search(r'classmap_(datav.*?)\.json', cm_file) if not match: raise RuntimeError("Failed to extract classmap version: %s" % cm_file) cm_version = match.group(1) # get features file and version ft_file = os.path.basename(kwargs['feat_file'].strip()) match = re.search(r'(featv.*?)\.json', ft_file) if not match: raise RuntimeError("Failed to extract feature version: %s" % ft_file) ft_version = match.group(1) # set classifier ID clf_version = kwargs['clf_version'] clf_type = kwargs['clf_type'] username = j['username'] #mstarch - username is a paramemter rule_name = j['name'] #mstarch - rule_name is a parameter clf_name = "predictor_model-phunw_clfv%s_%s_%s-%s-%s" % (clf_version, cm_version, ft_version, username, rule_name) # get urls ret, status = postQuery({ 'query': j['query']}) #mstarch - passthrough is now a parameter urls = [i['url'] for i in ret] # create input json input = { "clf_name": clf_name, "clf_type": clf_type, "classmap_file": cm_file, "feat_file": ft_file, "crossvalidate": 0, "saveclf": 1, "cacheoutput": 0, "urls": urls, } # create product directory and chdir os.makedirs(clf_name) os.chdir(clf_name) # write input file with open(in_file, 'w') as f: json.dump(input, f, indent=2) # copy classmap and feature files shutil.copy(os.path.join(ml_dir, 'classmaps', cm_file), cm_file) shutil.copy(os.path.join(ml_dir, 'features', ft_file), ft_file) if __name__ == "__main__": prep_inputs(sys.argv[1], sys.argv[2], sys.argv[3])
ariaml/trainPredictor_inputPrep.py
import os, sys, json, re, shutil from utils.queryBuilder import postQuery def prep_inputs(ml_dir, ctx_file, in_file): # get context with open(ctx_file) as f: j = json.load(f) # get kwargs kwargs = j #mstarch - with containerization, "kwargs" are in context at top level #json.loads(j['rule']['kwargs']) # get classmap file and version cm_file = os.path.basename(kwargs['classmap_file'].strip()) match = re.search(r'classmap_(datav.*?)\.json', cm_file) if not match: raise RuntimeError("Failed to extract classmap version: %s" % cm_file) cm_version = match.group(1) # get features file and version ft_file = os.path.basename(kwargs['feat_file'].strip()) match = re.search(r'(featv.*?)\.json', ft_file) if not match: raise RuntimeError("Failed to extract feature version: %s" % ft_file) ft_version = match.group(1) # set classifier ID clf_version = kwargs['clf_version'] clf_type = kwargs['clf_type'] username = j['username'] #mstarch - username is a paramemter rule_name = j['name'] #mstarch - rule_name is a parameter clf_name = "predictor_model-phunw_clfv%s_%s_%s-%s-%s" % (clf_version, cm_version, ft_version, username, rule_name) # get urls ret, status = postQuery({ 'query': j['query']}) #mstarch - passthrough is now a parameter urls = [i['url'] for i in ret] # create input json input = { "clf_name": clf_name, "clf_type": clf_type, "classmap_file": cm_file, "feat_file": ft_file, "crossvalidate": 0, "saveclf": 1, "cacheoutput": 0, "urls": urls, } # create product directory and chdir os.makedirs(clf_name) os.chdir(clf_name) # write input file with open(in_file, 'w') as f: json.dump(input, f, indent=2) # copy classmap and feature files shutil.copy(os.path.join(ml_dir, 'classmaps', cm_file), cm_file) shutil.copy(os.path.join(ml_dir, 'features', ft_file), ft_file) if __name__ == "__main__": prep_inputs(sys.argv[1], sys.argv[2], sys.argv[3])
0.159839
0.151028
from api import models from django.core.management.base import BaseCommand import csv import dateutil.parser def export_file(): file_name = "dpu_data.csv" models.Space.objects.all().delete() models.Doorway.objects.all().delete() models.DPU.objects.all().delete() models.Events.objects.all().delete() models.RealtimeSpaceData.objects.all().delete() spaces = models.Space.objects.bulk_create([models.Space(name=x) for x in "ABCDFE"]) doors = models.Doorway.objects.bulk_create( [ models.Doorway(name=x, egress_spc=spaces[1], ingress_spc=spaces[2]) for x in reversed("ZXCVW") ] ) dpu_mapping = { "423": models.DPU.objects.get_or_create(name="423", door=doors[1]), "283": models.DPU.objects.get_or_create(name="283", door=doors[0]), } records = [] with open(file_name, newline="") as csvfile: for r in list(csv.reader(csvfile))[1:]: dt, direction, dpu = r direction = int(direction) records.append( dict( created_at=dateutil.parser.isoparse(dt), direction=direction, id=dpu, ) ) dpu, _ = dpu_mapping.get(dpu) inn, out = models.DPU.objects.motion_direction(dpu, direction) rev, _ = models.RealtimeSpaceData.objects.get_or_create(space=inn) rev.count += 1 rev.save() _ = models.Events.objects.create( door=dpu.door, space=inn, direction=direction, new_count=rev.count ) rev, _ = models.RealtimeSpaceData.objects.get_or_create(space=out) rev.count -= 1 rev.save() _ = models.Events.objects.create( door=dpu.door, space=out, direction=direction, new_count=rev.count ) class Command(BaseCommand): help = "Load dpu from a csv file" def handle(self, *args, **kwargs): export_file() self.stdout.write("Done")
api/management/commands/load_dpu.py
from api import models from django.core.management.base import BaseCommand import csv import dateutil.parser def export_file(): file_name = "dpu_data.csv" models.Space.objects.all().delete() models.Doorway.objects.all().delete() models.DPU.objects.all().delete() models.Events.objects.all().delete() models.RealtimeSpaceData.objects.all().delete() spaces = models.Space.objects.bulk_create([models.Space(name=x) for x in "ABCDFE"]) doors = models.Doorway.objects.bulk_create( [ models.Doorway(name=x, egress_spc=spaces[1], ingress_spc=spaces[2]) for x in reversed("ZXCVW") ] ) dpu_mapping = { "423": models.DPU.objects.get_or_create(name="423", door=doors[1]), "283": models.DPU.objects.get_or_create(name="283", door=doors[0]), } records = [] with open(file_name, newline="") as csvfile: for r in list(csv.reader(csvfile))[1:]: dt, direction, dpu = r direction = int(direction) records.append( dict( created_at=dateutil.parser.isoparse(dt), direction=direction, id=dpu, ) ) dpu, _ = dpu_mapping.get(dpu) inn, out = models.DPU.objects.motion_direction(dpu, direction) rev, _ = models.RealtimeSpaceData.objects.get_or_create(space=inn) rev.count += 1 rev.save() _ = models.Events.objects.create( door=dpu.door, space=inn, direction=direction, new_count=rev.count ) rev, _ = models.RealtimeSpaceData.objects.get_or_create(space=out) rev.count -= 1 rev.save() _ = models.Events.objects.create( door=dpu.door, space=out, direction=direction, new_count=rev.count ) class Command(BaseCommand): help = "Load dpu from a csv file" def handle(self, *args, **kwargs): export_file() self.stdout.write("Done")
0.402979
0.159381
import sys import os import asyncio import socket import struct import subprocess import time import logging import random import inspect import threading from threading import Thread from threading import Lock from threading import Condition from enum import Enum import traceback import configparser from topo import TopoHelper """ Extension of socket to handle recv and send of special data """ class ExSocket: def __init__(self, sock): self.sock = sock def recvall(self, nbytes): res = [] sock = self.sock nread = 0 while nread < nbytes: chunk = self.sock.recv(min(nbytes - nread, 1024)) nread += len(chunk) res.append(chunk) return b''.join(res) def recvint(self): return struct.unpack('@i', self.recvall(4))[0] def sendint(self, n): return self.sock.send(struct.pack('@i', n)) def sendstr(self, s): size = 0 size += self.sendint(len(s)) size += self.sock.send(s.encode()) return size def recvstr(self): slen = self.recvint() return self.recvall(slen).decode() def log_args(level=logging.INFO): """Decorator to log arguments passed to func.""" def inner_func(func): line_no = inspect.getsourcelines(func)[-1] @wraps(func) def return_func(*args, **kwargs): arg_list = list("{!r}".format(arg) for arg in args) arg_list.extend( "{}={!r}".format(key, val) for key, val in kwargs.iteritems()) msg = arg_log_fmt.format( name=func.__name__, arg_str=", ".join(arg_list)) logging.getLogger('').log(level, msg) return func(*args, **kwargs) return return_func return inner_func class State(Enum): CMD = 1 FIN = 2 UNKNOWN = 3 class TrackerHandler: def __init__(self, reader, writer, tracker, worker_id): self.reader = reader self.writer = writer self.tracker = tracker self.worker_id = worker_id self.state = State.FIN self.cmd = None def handle(self): if self.state == State.FIN: self.cmd = self.recvstr() self.state = State.CMD elif self.state == State.CMD: if self.cmd == 'print': self.handle_print() elif self.cmd == 'start': self.handle_start() elif self.cmd == 'register': self.handle_register() elif self.cmd == 'barrier': self.handle_barrier() elif self.cmd == 'exclude': self.handle_exclude() elif self.cmd == 'unexclude': self.handle_unexclude() elif self.cmd == 'heartbeat': self.handle_heartbeat() elif self.cmd == 'shutdown': return False self.state = State.FIN self.cmd = None return True def handle_start(self): rank = self.recvint() self.tracker.tracker_lock.acquire() self.tracker.worker_id_to_ranks[self.worker_id] = rank self.addr = self.recvstr() self.tracker.tracker_lock.release() self.tracker.rank_cond.acquire() self.tracker.rank_counter += 1 if self.tracker.rank_counter != self.tracker.nworker: self.tracker.rank_cond.wait() else: self.tracker.rank_counter = 0 self.tracker.realloc_ranks() self.tracker.rank_cond.notify_all() self.tracker.rank_cond.release() self.rank = self.tracker.worker_id_to_ranks[self.worker_id] self.tracker.rank_cond.acquire() self.tracker.addrs[self.rank] = self.addr if len(self.tracker.addrs) != self.tracker.nworker: self.tracker.rank_cond.wait() else: self.tracker.rank_cond.notify_all() self.tracker.rank_cond.release() # send world size self.sendint(self.tracker.nworker) # send rank self.tracker.tracker_lock.acquire() self.sendint(self.rank) num_conn = 0 num_accept = 0 for rank, addr in self.tracker.addrs.items(): if rank < self.rank: num_conn += 1 elif rank > self.rank: num_accept += 1 self.sendint(num_conn) self.sendint(num_accept) for rank, addr in self.tracker.addrs.items(): if rank < self.rank: self.sendstr(addr) self.sendint(rank) self.tracker.tracker_lock.release() def handle_print(self): msg = self.recvstr() if self.rank != -1: msg = 'rank %d: %s ' % (self.rank, msg.strip()) logging.info(msg) '''A distributed lock impletentation, only communicator or group with same name can continue, otherwise will be blocked ''' def handle_exclude(self): comm = self.recvstr() self.tracker.comm_lock.acquire() if self.tracker.last_comm != comm: if self.tracker.last_comm == None: self.tracker.last_comm = comm else: if not self.tracker.comm_added[comm]: self.tracker.pending_comms.add(comm) self.tracker.comm_added[comm] = True self.sendstr('exclude_undone') self.tracker.comm_lock.release() else: self.sendstr('exclude_done') self.tracker.comm_lock.release() def handle_unexclude(self): comm = self.recvstr() self.tracker.comm_cond.acquire() self.tracker.lock_counter += 1 if self.tracker.lock_counter != self.tracker.nworker: self.tracker.comm_cond.wait() else: self.tracker.lock_counter = 0 self.tracker.comm_lock.acquire() if len(self.tracker.pending_comms): self.tracker.last_comm = self.tracker.pending_comms.pop() else: self.tracker.last_comm = None self.tracker.comm_lock.release() self.tracker.comm_cond.notify_all() self.tracker.comm_cond.release() self.sendstr('unexclude_done') def handle_barrier(self): name = yield from self.recvstr() self.tracker.name_to_barrier_conds[name].acquire() self.tracker.name_to_barrier_counter[name] += 1 if self.tracker.name_to_barrier_counter[name] != self.tracker.nworker: self.tracker.name_to_barrier_conds[name].wait() else: self.tracker.name_to_barrier_counter[name] = 0 self.tracker.name_to_barrier_conds[name].notify_all() self.tracker.name_to_barrier_conds[name].release() self.sendstr("barrier_done") def handle_register(self): name = yield from self.recvstr() self.tracker.register_lock.acquire() if name not in self.tracker.names: self.tracker.names.add(name) self.tracker.name_to_ranks[name] = set() self.tracker.name_to_barrier_counter[name] = 0 self.tracker.name_to_barrier_conds[name] = Condition() self.tracker.name_to_barrier_locks[name] = Lock() self.tracker.comm_added[name] = False self.tracker.name_to_ranks[name].add(self.rank) self.tracker.register_lock.release() '''keep heartbeat''' def handle_heartbeat(self): self.tracker.last_heartbeat_timepoint[self.worker_id] = time.time() self.sendstr('heartbeat_done') def recvint(self): data = yield from seld.reader.read(4) return struct.unpack('@i', data)[0] def recvstr(self): data = yield from self.reader.read(4) length = struct.unpack('@i', data)[0] data = yield from self.reader.read(length) return def sendint(self, data): return self.sock.sendint(data) def sendstr(self, data): return self.sock.sendstr(data) class Tracker: def __init__(self, host_ip, port, nworker): self.cur_rank = 0 # trakcer addr self.host_ip = host_ip self.port = port self.nworker = nworker # create track sock then lisen self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.bind((host_ip, port)) self.sock.listen(128) self.addrs = dict() # communicator name associated members # register related self.names = set() self.name_to_ranks = dict() self.register_lock = Lock() self.last_rank = 0 self.worker_id_to_ranks = dict() self.rank_cond = threading.Condition() self.rank_counter = 0 # thread associated members self.tracker_lock = Lock() self.name_lock = Lock() # barrier related self.name_to_barrier_counter = dict() self.name_to_barrier_conds = dict() self.name_to_barrier_locks = dict() # exclude related self.last_comm = None self.pending_comms = set() self.comm_added = dict() self.comm_lock = Lock() # heartbeat related self.last_heartbeat_timepoint = dict() self.lock_counter = 0 self.comm_cond = Condition() # construct initial tree map self.topohelper = TopoHelper() self.tree_map, self.parent_map, self.ring_map = self.topohelper.get_link_map( nworker) # assing worker id self.worker_id = 0 self.worker_id_lock = Lock() def start(self, loop): """ Starts the TCP server, so that it listens on port 12345. For each worker that connects, the accept_worker method gets called. This method runs the loop until the server sockets are ready to accept connections. """ self.server = loop.run_until_complete( asyncio.streams.start_server( self._accept_worker, self.host_ip, self.port, loop=loop)) logging.info('start listen on %s:%d' % (self.host_ip, self.port)) def stop(self, loop): """ Stops the TCP server, i.e. closes the listening socket(s). This method runs the loop until the server sockets are closed. """ if self.server is not None: self.server.close() loop.run_until_complete(self.server.wait_closed()) self.server = None def _accept_worker(self, worker_reader, worker_writer): """ This method accepts a new worker connection and creates a Task to handle this worker. self.workers is updated to keep track of the new worker. """ with self.worker_id_lock: self.worker_id += 1 # start a new Task to handle this specific worker connection task = asyncio.Task( self._handle_worker(self.worker_id, worker_reader, worker_writer)) self.workers[task] = (worker_reader, worker_writer) def worker_done(task): logging.info("worker task done") del self.workers[task] task.add_done_callback(worker_done) @asyncio.coroutine def _handle_worker(self, worker_id, worker_reader, worker_writer): """ This method actually does the work to handle the requests for a specific worker. The protocol is line oriented, so there is a main loop that reads a line with a request and then sends out one or more lines back to the worker with the result. """ handler = TrackerHandler(worker_reader, worker_writer, self, worker_id) while True: yield from handler.handle() def realloc_ranks(self): existing_ranks = set() for worker_id, rank in self.worker_id_to_ranks.items(): if rank != -1: existing_ranks.add(rank) last_rank = 0 for worker_id, rank in self.worker_id_to_ranks.items(): if rank != -1: continue else: while last_rank in existing_ranks: last_rank += 1 self.worker_id_to_ranks[worker_id] = last_rank last_rank += 1 def worker_envs(self): """ get enviroment variables for workers can be passed in as args or envs """ common_envs = { 'RDC_TRACKER_URI': self.host_ip, 'RDC_TRACKER_PORT': self.port, 'RDC_HEARTBEAT_INTERVAL': 500, } return common_envs def submit(nworker, fun_submit, host_ip='auto', pscmd=None): """submit job Paramaters ---------- nworker : int number of workers fun_sumbit : func the function to submit the jobs for servers and workers host_ip : str, optional the host ip of the root node pscmd : """ # start the root host_ip, port, envs = utils.basic_tracker_config(host_ip) tracker = Tracker(host_ip=host_ip, port=port, nworker=nworker) tracker.start(loop) envs.update(tracker.worker_envs()) # start the workers fun_submit(nworker, envs) # wait the root finished try: loop.run_forever() except KeyboardInterrupt: tracker.stop(loop) loop.close()
tracker/tracker_aio.py
import sys import os import asyncio import socket import struct import subprocess import time import logging import random import inspect import threading from threading import Thread from threading import Lock from threading import Condition from enum import Enum import traceback import configparser from topo import TopoHelper """ Extension of socket to handle recv and send of special data """ class ExSocket: def __init__(self, sock): self.sock = sock def recvall(self, nbytes): res = [] sock = self.sock nread = 0 while nread < nbytes: chunk = self.sock.recv(min(nbytes - nread, 1024)) nread += len(chunk) res.append(chunk) return b''.join(res) def recvint(self): return struct.unpack('@i', self.recvall(4))[0] def sendint(self, n): return self.sock.send(struct.pack('@i', n)) def sendstr(self, s): size = 0 size += self.sendint(len(s)) size += self.sock.send(s.encode()) return size def recvstr(self): slen = self.recvint() return self.recvall(slen).decode() def log_args(level=logging.INFO): """Decorator to log arguments passed to func.""" def inner_func(func): line_no = inspect.getsourcelines(func)[-1] @wraps(func) def return_func(*args, **kwargs): arg_list = list("{!r}".format(arg) for arg in args) arg_list.extend( "{}={!r}".format(key, val) for key, val in kwargs.iteritems()) msg = arg_log_fmt.format( name=func.__name__, arg_str=", ".join(arg_list)) logging.getLogger('').log(level, msg) return func(*args, **kwargs) return return_func return inner_func class State(Enum): CMD = 1 FIN = 2 UNKNOWN = 3 class TrackerHandler: def __init__(self, reader, writer, tracker, worker_id): self.reader = reader self.writer = writer self.tracker = tracker self.worker_id = worker_id self.state = State.FIN self.cmd = None def handle(self): if self.state == State.FIN: self.cmd = self.recvstr() self.state = State.CMD elif self.state == State.CMD: if self.cmd == 'print': self.handle_print() elif self.cmd == 'start': self.handle_start() elif self.cmd == 'register': self.handle_register() elif self.cmd == 'barrier': self.handle_barrier() elif self.cmd == 'exclude': self.handle_exclude() elif self.cmd == 'unexclude': self.handle_unexclude() elif self.cmd == 'heartbeat': self.handle_heartbeat() elif self.cmd == 'shutdown': return False self.state = State.FIN self.cmd = None return True def handle_start(self): rank = self.recvint() self.tracker.tracker_lock.acquire() self.tracker.worker_id_to_ranks[self.worker_id] = rank self.addr = self.recvstr() self.tracker.tracker_lock.release() self.tracker.rank_cond.acquire() self.tracker.rank_counter += 1 if self.tracker.rank_counter != self.tracker.nworker: self.tracker.rank_cond.wait() else: self.tracker.rank_counter = 0 self.tracker.realloc_ranks() self.tracker.rank_cond.notify_all() self.tracker.rank_cond.release() self.rank = self.tracker.worker_id_to_ranks[self.worker_id] self.tracker.rank_cond.acquire() self.tracker.addrs[self.rank] = self.addr if len(self.tracker.addrs) != self.tracker.nworker: self.tracker.rank_cond.wait() else: self.tracker.rank_cond.notify_all() self.tracker.rank_cond.release() # send world size self.sendint(self.tracker.nworker) # send rank self.tracker.tracker_lock.acquire() self.sendint(self.rank) num_conn = 0 num_accept = 0 for rank, addr in self.tracker.addrs.items(): if rank < self.rank: num_conn += 1 elif rank > self.rank: num_accept += 1 self.sendint(num_conn) self.sendint(num_accept) for rank, addr in self.tracker.addrs.items(): if rank < self.rank: self.sendstr(addr) self.sendint(rank) self.tracker.tracker_lock.release() def handle_print(self): msg = self.recvstr() if self.rank != -1: msg = 'rank %d: %s ' % (self.rank, msg.strip()) logging.info(msg) '''A distributed lock impletentation, only communicator or group with same name can continue, otherwise will be blocked ''' def handle_exclude(self): comm = self.recvstr() self.tracker.comm_lock.acquire() if self.tracker.last_comm != comm: if self.tracker.last_comm == None: self.tracker.last_comm = comm else: if not self.tracker.comm_added[comm]: self.tracker.pending_comms.add(comm) self.tracker.comm_added[comm] = True self.sendstr('exclude_undone') self.tracker.comm_lock.release() else: self.sendstr('exclude_done') self.tracker.comm_lock.release() def handle_unexclude(self): comm = self.recvstr() self.tracker.comm_cond.acquire() self.tracker.lock_counter += 1 if self.tracker.lock_counter != self.tracker.nworker: self.tracker.comm_cond.wait() else: self.tracker.lock_counter = 0 self.tracker.comm_lock.acquire() if len(self.tracker.pending_comms): self.tracker.last_comm = self.tracker.pending_comms.pop() else: self.tracker.last_comm = None self.tracker.comm_lock.release() self.tracker.comm_cond.notify_all() self.tracker.comm_cond.release() self.sendstr('unexclude_done') def handle_barrier(self): name = yield from self.recvstr() self.tracker.name_to_barrier_conds[name].acquire() self.tracker.name_to_barrier_counter[name] += 1 if self.tracker.name_to_barrier_counter[name] != self.tracker.nworker: self.tracker.name_to_barrier_conds[name].wait() else: self.tracker.name_to_barrier_counter[name] = 0 self.tracker.name_to_barrier_conds[name].notify_all() self.tracker.name_to_barrier_conds[name].release() self.sendstr("barrier_done") def handle_register(self): name = yield from self.recvstr() self.tracker.register_lock.acquire() if name not in self.tracker.names: self.tracker.names.add(name) self.tracker.name_to_ranks[name] = set() self.tracker.name_to_barrier_counter[name] = 0 self.tracker.name_to_barrier_conds[name] = Condition() self.tracker.name_to_barrier_locks[name] = Lock() self.tracker.comm_added[name] = False self.tracker.name_to_ranks[name].add(self.rank) self.tracker.register_lock.release() '''keep heartbeat''' def handle_heartbeat(self): self.tracker.last_heartbeat_timepoint[self.worker_id] = time.time() self.sendstr('heartbeat_done') def recvint(self): data = yield from seld.reader.read(4) return struct.unpack('@i', data)[0] def recvstr(self): data = yield from self.reader.read(4) length = struct.unpack('@i', data)[0] data = yield from self.reader.read(length) return def sendint(self, data): return self.sock.sendint(data) def sendstr(self, data): return self.sock.sendstr(data) class Tracker: def __init__(self, host_ip, port, nworker): self.cur_rank = 0 # trakcer addr self.host_ip = host_ip self.port = port self.nworker = nworker # create track sock then lisen self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.bind((host_ip, port)) self.sock.listen(128) self.addrs = dict() # communicator name associated members # register related self.names = set() self.name_to_ranks = dict() self.register_lock = Lock() self.last_rank = 0 self.worker_id_to_ranks = dict() self.rank_cond = threading.Condition() self.rank_counter = 0 # thread associated members self.tracker_lock = Lock() self.name_lock = Lock() # barrier related self.name_to_barrier_counter = dict() self.name_to_barrier_conds = dict() self.name_to_barrier_locks = dict() # exclude related self.last_comm = None self.pending_comms = set() self.comm_added = dict() self.comm_lock = Lock() # heartbeat related self.last_heartbeat_timepoint = dict() self.lock_counter = 0 self.comm_cond = Condition() # construct initial tree map self.topohelper = TopoHelper() self.tree_map, self.parent_map, self.ring_map = self.topohelper.get_link_map( nworker) # assing worker id self.worker_id = 0 self.worker_id_lock = Lock() def start(self, loop): """ Starts the TCP server, so that it listens on port 12345. For each worker that connects, the accept_worker method gets called. This method runs the loop until the server sockets are ready to accept connections. """ self.server = loop.run_until_complete( asyncio.streams.start_server( self._accept_worker, self.host_ip, self.port, loop=loop)) logging.info('start listen on %s:%d' % (self.host_ip, self.port)) def stop(self, loop): """ Stops the TCP server, i.e. closes the listening socket(s). This method runs the loop until the server sockets are closed. """ if self.server is not None: self.server.close() loop.run_until_complete(self.server.wait_closed()) self.server = None def _accept_worker(self, worker_reader, worker_writer): """ This method accepts a new worker connection and creates a Task to handle this worker. self.workers is updated to keep track of the new worker. """ with self.worker_id_lock: self.worker_id += 1 # start a new Task to handle this specific worker connection task = asyncio.Task( self._handle_worker(self.worker_id, worker_reader, worker_writer)) self.workers[task] = (worker_reader, worker_writer) def worker_done(task): logging.info("worker task done") del self.workers[task] task.add_done_callback(worker_done) @asyncio.coroutine def _handle_worker(self, worker_id, worker_reader, worker_writer): """ This method actually does the work to handle the requests for a specific worker. The protocol is line oriented, so there is a main loop that reads a line with a request and then sends out one or more lines back to the worker with the result. """ handler = TrackerHandler(worker_reader, worker_writer, self, worker_id) while True: yield from handler.handle() def realloc_ranks(self): existing_ranks = set() for worker_id, rank in self.worker_id_to_ranks.items(): if rank != -1: existing_ranks.add(rank) last_rank = 0 for worker_id, rank in self.worker_id_to_ranks.items(): if rank != -1: continue else: while last_rank in existing_ranks: last_rank += 1 self.worker_id_to_ranks[worker_id] = last_rank last_rank += 1 def worker_envs(self): """ get enviroment variables for workers can be passed in as args or envs """ common_envs = { 'RDC_TRACKER_URI': self.host_ip, 'RDC_TRACKER_PORT': self.port, 'RDC_HEARTBEAT_INTERVAL': 500, } return common_envs def submit(nworker, fun_submit, host_ip='auto', pscmd=None): """submit job Paramaters ---------- nworker : int number of workers fun_sumbit : func the function to submit the jobs for servers and workers host_ip : str, optional the host ip of the root node pscmd : """ # start the root host_ip, port, envs = utils.basic_tracker_config(host_ip) tracker = Tracker(host_ip=host_ip, port=port, nworker=nworker) tracker.start(loop) envs.update(tracker.worker_envs()) # start the workers fun_submit(nworker, envs) # wait the root finished try: loop.run_forever() except KeyboardInterrupt: tracker.stop(loop) loop.close()
0.354768
0.079424
import numpy as np from envs.task import Task class TaskReal(Task): def __init__(self, env_robot=None, time_step=None, max_steps=None, step_limit=None, action_dim=None, max_vel=None, max_rad=None, ft_obs_only=None, limit_ft=None, max_ft=None, max_position_range=None, dist_threshold=None): super().__init__(max_steps=max_steps, action_dim=action_dim, step_limit=step_limit, max_vel=max_vel, max_rad=max_rad, ft_obs_only=ft_obs_only, limit_ft=limit_ft, time_step=time_step, max_ft=max_ft, max_position_range=max_position_range, dist_threshold=dist_threshold) self.env = env_robot() def reset(self): self.max_dist = self.dist_to_target() self._env_step_counter = 0 self._observation = self.get_extended_observation() return np.array(self._observation) def get_member_pose(self): return self.env.get_member_pose() def get_target_pose(self): return self.env.get_target_pose() def get_force_torque(self): return self.env.get_force_torque() def step2(self, delta): reward, done, num_success = self.reward() if done: if self.action_dim > 3: last_delta = [0.0] * 6 self.env.apply_action_pose(last_delta, 1) else: last_delta = [0.0] * 3 self.env.apply_action_position(last_delta, 1) else: if self.action_dim > 3: self.env.apply_action_pose(delta, 0) else: self.env.apply_action_position(delta, 0) self._env_step_counter += 1 self._observation = self.get_extended_observation() return np.array(self._observation), reward, done, {"num_success": num_success}
envs/task_real.py
import numpy as np from envs.task import Task class TaskReal(Task): def __init__(self, env_robot=None, time_step=None, max_steps=None, step_limit=None, action_dim=None, max_vel=None, max_rad=None, ft_obs_only=None, limit_ft=None, max_ft=None, max_position_range=None, dist_threshold=None): super().__init__(max_steps=max_steps, action_dim=action_dim, step_limit=step_limit, max_vel=max_vel, max_rad=max_rad, ft_obs_only=ft_obs_only, limit_ft=limit_ft, time_step=time_step, max_ft=max_ft, max_position_range=max_position_range, dist_threshold=dist_threshold) self.env = env_robot() def reset(self): self.max_dist = self.dist_to_target() self._env_step_counter = 0 self._observation = self.get_extended_observation() return np.array(self._observation) def get_member_pose(self): return self.env.get_member_pose() def get_target_pose(self): return self.env.get_target_pose() def get_force_torque(self): return self.env.get_force_torque() def step2(self, delta): reward, done, num_success = self.reward() if done: if self.action_dim > 3: last_delta = [0.0] * 6 self.env.apply_action_pose(last_delta, 1) else: last_delta = [0.0] * 3 self.env.apply_action_position(last_delta, 1) else: if self.action_dim > 3: self.env.apply_action_pose(delta, 0) else: self.env.apply_action_position(delta, 0) self._env_step_counter += 1 self._observation = self.get_extended_observation() return np.array(self._observation), reward, done, {"num_success": num_success}
0.537041
0.170577
import cv2 import numpy as np import imutils import time import math import sys sys.path.append('../../utils') from video_recorder import RasPiCamera global win_pts # Mouse Click Event # https://www.pyimagesearch.com/2015/03/09/capturing-mouse-click-events-with-python-and-opencv/ def onmouse(event, x, y, flags, param): global win_pts if event == cv2.EVENT_LBUTTONUP: print(f"x: {x} , y: {y}") if len(win_pts) < 2: win_pts.append((x, y)) else: win_pts.pop(0) win_pts.append((x,y)) class block_calibration: def __init__(self, _f_v = None, _obj_ht = None, _obj_wt = None): self.f_v = _f_v self.obj_ht = _obj_ht self.obj_wt = _obj_wt def calc_z_dist(self, v_pix): z_dist = (self.f_v*self.obj_ht)/v_pix return z_dist def calc_h_offset(self, v_pix, c_pix): z_dist = self.calc_z_dist(v_pix) h_dist = (z_dist*c_pix)/self.f_v return h_dist def calc_angle(self, v_pix, c_pix): h_dist = self.calc_h_offset(v_pix, c_pix) z_dist = self.calc_z_dist(v_pix) angle = math.degrees(math.atan(abs(h_dist)/z_dist)) if h_dist > 0 : angle = -angle return angle def get_f(self, camera:RasPiCamera): global win_pts print('Entering Calibraion Block') win_pts = [] win_name = 'calibration_block' img = camera.capture() cv2.imshow(win_name, img) cv2.setMouseCallback(win_name, onmouse) while 1: cv2.imshow(win_name, img) k = cv2.waitKey(1) & 0xFF # exit if k == 27 or k == ord('q'): # esc key print("exit") break # New Image if k == ord('n'): img = camera.capture() win_pts = [] # cancel selected points if k == ord('c'): win_pts = [] # Vertical calibration if k == ord('v'): print("Vertical Calibration") v_pix = abs(win_pts[0][1] - win_pts[1][1]) print(f"Vertical pixels = {v_pix} ({win_pts[0][1]} - {win_pts[1][1]})") print(f"Horizantal pixels = {abs(win_pts[0][0] - win_pts[1][0])}") obj_dist = int(input("Enter z-axis distance(cm): ")) self.obj_ht = int(input("Enter object height(cm): ")) self.f_v = (obj_dist*v_pix)/self.obj_ht print(f'Vertical focal length calculated : {self.f_v} (cm)') # Horizontal Calibration if k == ord('h'): print("Horizantal Calibration") h_pix = abs(win_pts[0][0] - win_pts[1][0]) print(f"Vertical pixels = abs(win_pts[0][1] - win_pts[1][1])") print(f"Horizontal pixels = {v_pix}") obj_dist = int(input("Enter z-axis distance(cm): ")) self.obj_wt = int(input("Enter object width(cm): ")) self.f_h = (obj_dist*h_pix)/self.obj_wt print(f'Vertical focal length calculated : {self.f_h} (cm)') # Test mode Vertical if k == ord("l"): print('Test Mode Vertical') v_pix = abs(win_pts[0][1] - win_pts[1][1]) print(f"Vertical pixels = {v_pix}") z_dist = self.calc_z_dist(v_pix) print(f'z - axis distance calculated : {z_dist} (cm)') # Test Angle if k == ord('a'): print("Angle Testing") print(img.shape) c_pix = img.shape[1]/2 - win_pts[0][0] print(f"Pixels from center = {c_pix}") h_dist = self.calc_h_offset(v_pix, c_pix) angle = self.calc_angle(v_pix, c_pix) print(f'h_dist = {h_dist} cm') print(f'z_dist = {z_dist} cm') print(f'angle = {angle} degrees') # Test mode horizontal if k == ord("w"): print('Test Mode Horizontal') h_pix = abs(win_pts[0][0] - win_pts[1][0]) print(f"Horizontal pixels = {h_pix}") z_dist = (self.f_h*self.obj_wt)/h_pix print(f'z - axis distance calculated : {z_dist} (cm)') # plot points for pt in win_pts: cv2.circle(img, pt, 4, (255, 255, 0), -1) cv2.destroyWindow(win_name) class process_block: def __init__(self, _calib:block_calibration , color = "red"): self.calib = _calib if color == "red": self.low = np.array([0, 70, 50]) self.high = np.array([180, 255, 255]) self.hsv_low = np.array([10, 255, 255]) self.hsv_high = np.array([170, 70, 50]) def center_hieght(self, frame): """ HSV Thresolding of the Red color in given frame """ img = frame.copy() img = cv2.medianBlur(img,5) # Covert BGR to HSV hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Threshold the HSV image to get only Red colors mask1 = cv2.inRange(hsv, self.low, self.hsv_low) mask2 = cv2.inRange(hsv, self.hsv_high, self.high) mask = cv2.bitwise_or(mask1,mask2) # Morphological Operation - Opening mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) # cv2.imshow("mask", mask) # Find contours contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours = contours[0] if imutils.is_cv2() else contours[1] # Finding object parameters self.center_obj = None self.hieght_pix = None if len(contours) > 0: c = max(contours, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) # Reference : https://stackoverflow.com/questions/22470902/understanding-moments-function-in-opencv M = cv2.moments(c) self.center_obj = (int(M["m10"]/M["m00"]), int(M["m01"]/M["m00"])) self.hieght_pix = None if radius > 20: cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2) cv2.circle(img, self.center_obj, 5, (0, 0, 255), -1) x_min = max(int(x) - int(radius) - 10, 0) x_max = min(int(x) + int(radius) + 10 , img.shape[1]) y_min = max(int(y) - int(radius) - 10, 0) y_max = min(int(y) + int(radius) + 10 , img.shape[0]) img = cv2.rectangle(img, (x_min, y_min), (x_max, y_max), (255, 255, 0), 2) roi = mask[y_min : y_max, x_min : x_max] roi = roi.T row_or = np.zeros(roi.shape[1]) for row in roi: row_or = np.logical_or(row_or, row) row_or = np.where(row_or == True)[0] if row_or.size > 0: self.hieght_pix = abs(row_or[0] - row_or[-1]) return img, self.hieght_pix, self.center_obj def predict_dist_angle(self, img_shape): if self.hieght_pix != None: c_pix = img_shape[1]/2 - self.center_obj[0] return self.calib.calc_z_dist(self.hieght_pix), \ self.calib.calc_angle(self.hieght_pix, c_pix) else: return None, None
Assignments/HW9/process_block.py
import cv2 import numpy as np import imutils import time import math import sys sys.path.append('../../utils') from video_recorder import RasPiCamera global win_pts # Mouse Click Event # https://www.pyimagesearch.com/2015/03/09/capturing-mouse-click-events-with-python-and-opencv/ def onmouse(event, x, y, flags, param): global win_pts if event == cv2.EVENT_LBUTTONUP: print(f"x: {x} , y: {y}") if len(win_pts) < 2: win_pts.append((x, y)) else: win_pts.pop(0) win_pts.append((x,y)) class block_calibration: def __init__(self, _f_v = None, _obj_ht = None, _obj_wt = None): self.f_v = _f_v self.obj_ht = _obj_ht self.obj_wt = _obj_wt def calc_z_dist(self, v_pix): z_dist = (self.f_v*self.obj_ht)/v_pix return z_dist def calc_h_offset(self, v_pix, c_pix): z_dist = self.calc_z_dist(v_pix) h_dist = (z_dist*c_pix)/self.f_v return h_dist def calc_angle(self, v_pix, c_pix): h_dist = self.calc_h_offset(v_pix, c_pix) z_dist = self.calc_z_dist(v_pix) angle = math.degrees(math.atan(abs(h_dist)/z_dist)) if h_dist > 0 : angle = -angle return angle def get_f(self, camera:RasPiCamera): global win_pts print('Entering Calibraion Block') win_pts = [] win_name = 'calibration_block' img = camera.capture() cv2.imshow(win_name, img) cv2.setMouseCallback(win_name, onmouse) while 1: cv2.imshow(win_name, img) k = cv2.waitKey(1) & 0xFF # exit if k == 27 or k == ord('q'): # esc key print("exit") break # New Image if k == ord('n'): img = camera.capture() win_pts = [] # cancel selected points if k == ord('c'): win_pts = [] # Vertical calibration if k == ord('v'): print("Vertical Calibration") v_pix = abs(win_pts[0][1] - win_pts[1][1]) print(f"Vertical pixels = {v_pix} ({win_pts[0][1]} - {win_pts[1][1]})") print(f"Horizantal pixels = {abs(win_pts[0][0] - win_pts[1][0])}") obj_dist = int(input("Enter z-axis distance(cm): ")) self.obj_ht = int(input("Enter object height(cm): ")) self.f_v = (obj_dist*v_pix)/self.obj_ht print(f'Vertical focal length calculated : {self.f_v} (cm)') # Horizontal Calibration if k == ord('h'): print("Horizantal Calibration") h_pix = abs(win_pts[0][0] - win_pts[1][0]) print(f"Vertical pixels = abs(win_pts[0][1] - win_pts[1][1])") print(f"Horizontal pixels = {v_pix}") obj_dist = int(input("Enter z-axis distance(cm): ")) self.obj_wt = int(input("Enter object width(cm): ")) self.f_h = (obj_dist*h_pix)/self.obj_wt print(f'Vertical focal length calculated : {self.f_h} (cm)') # Test mode Vertical if k == ord("l"): print('Test Mode Vertical') v_pix = abs(win_pts[0][1] - win_pts[1][1]) print(f"Vertical pixels = {v_pix}") z_dist = self.calc_z_dist(v_pix) print(f'z - axis distance calculated : {z_dist} (cm)') # Test Angle if k == ord('a'): print("Angle Testing") print(img.shape) c_pix = img.shape[1]/2 - win_pts[0][0] print(f"Pixels from center = {c_pix}") h_dist = self.calc_h_offset(v_pix, c_pix) angle = self.calc_angle(v_pix, c_pix) print(f'h_dist = {h_dist} cm') print(f'z_dist = {z_dist} cm') print(f'angle = {angle} degrees') # Test mode horizontal if k == ord("w"): print('Test Mode Horizontal') h_pix = abs(win_pts[0][0] - win_pts[1][0]) print(f"Horizontal pixels = {h_pix}") z_dist = (self.f_h*self.obj_wt)/h_pix print(f'z - axis distance calculated : {z_dist} (cm)') # plot points for pt in win_pts: cv2.circle(img, pt, 4, (255, 255, 0), -1) cv2.destroyWindow(win_name) class process_block: def __init__(self, _calib:block_calibration , color = "red"): self.calib = _calib if color == "red": self.low = np.array([0, 70, 50]) self.high = np.array([180, 255, 255]) self.hsv_low = np.array([10, 255, 255]) self.hsv_high = np.array([170, 70, 50]) def center_hieght(self, frame): """ HSV Thresolding of the Red color in given frame """ img = frame.copy() img = cv2.medianBlur(img,5) # Covert BGR to HSV hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Threshold the HSV image to get only Red colors mask1 = cv2.inRange(hsv, self.low, self.hsv_low) mask2 = cv2.inRange(hsv, self.hsv_high, self.high) mask = cv2.bitwise_or(mask1,mask2) # Morphological Operation - Opening mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) # cv2.imshow("mask", mask) # Find contours contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours = contours[0] if imutils.is_cv2() else contours[1] # Finding object parameters self.center_obj = None self.hieght_pix = None if len(contours) > 0: c = max(contours, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) # Reference : https://stackoverflow.com/questions/22470902/understanding-moments-function-in-opencv M = cv2.moments(c) self.center_obj = (int(M["m10"]/M["m00"]), int(M["m01"]/M["m00"])) self.hieght_pix = None if radius > 20: cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2) cv2.circle(img, self.center_obj, 5, (0, 0, 255), -1) x_min = max(int(x) - int(radius) - 10, 0) x_max = min(int(x) + int(radius) + 10 , img.shape[1]) y_min = max(int(y) - int(radius) - 10, 0) y_max = min(int(y) + int(radius) + 10 , img.shape[0]) img = cv2.rectangle(img, (x_min, y_min), (x_max, y_max), (255, 255, 0), 2) roi = mask[y_min : y_max, x_min : x_max] roi = roi.T row_or = np.zeros(roi.shape[1]) for row in roi: row_or = np.logical_or(row_or, row) row_or = np.where(row_or == True)[0] if row_or.size > 0: self.hieght_pix = abs(row_or[0] - row_or[-1]) return img, self.hieght_pix, self.center_obj def predict_dist_angle(self, img_shape): if self.hieght_pix != None: c_pix = img_shape[1]/2 - self.center_obj[0] return self.calib.calc_z_dist(self.hieght_pix), \ self.calib.calc_angle(self.hieght_pix, c_pix) else: return None, None
0.410402
0.175009
import ConfigParser import json import pickle import sys import warnings import mysql.connector from pandas import DataFrame from sklearn.cross_validation import KFold from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import RidgeClassifier from sklearn.metrics import f1_score, accuracy_score from sklearn.pipeline import Pipeline from classes.text import Transform warnings.filterwarnings("ignore") reload(sys) sys.setdefaultencoding('utf-8') config = ConfigParser.RawConfigParser(allow_no_value=True) config.read('configs.ini') class Classifier: def read_data(self, page_size): jira_fieldname = config.get("jira", "fieldname") cnx = mysql.connector.connect(user=config.get("mysqld", "user"), password=config.get("mysqld", "password"), host=config.get("mysqld", "host"), database=config.get("mysqld", "database")) cursor = cnx.cursor() query = "SELECT * FROM issues ORDER BY `key` ASC LIMIT %s,%s"; startFrom = 0 while 1: cursor.execute(query, (startFrom, page_size)) sys.stdout.write('.') sys.stdout.flush() for (key, jsonIssue) in cursor: try: issue_details = json.loads(jsonIssue) except ValueError: continue try: transform = Transform(config); issue_details = transform.process_issue_structure(issue_details) except Exception, e: print str(e) continue if 'description' in issue_details['fields'] \ and issue_details['fields'][jira_fieldname] is not None \ and issue_details['key']: yield issue_details['fields']['description'], issue_details['fields'][jira_fieldname]['value'], \ issue_details['key'] startFrom += page_size if cursor.rowcount < page_size: break def build_data_frame(self): rows = [] index = [] page_size = config.getint("mysqld", "pagesize") for text, classification, key in self.read_data(page_size): if key and text and classification: rows.append({'text': text, 'class': classification}) index.append(key) print 'ready for data_frame' data_frame = DataFrame(rows, index=index) return data_frame def get_training_set(self): data = DataFrame({'text': [], 'class': []}) data = data.append(self.build_data_frame()) # data = data.reindex(numpy.random.permutation(data.index)) return data def get_predictions(self, data): pipeline = Pipeline([ ('count_vectorizer', TfidfVectorizer(strip_accents='unicode', analyzer='word', max_df=0.5, min_df=2, sublinear_tf=True)), ('classifier', RidgeClassifier(tol=1e-2, solver="lsqr")) ]) return pipeline.fit(data['text'], data['class']) def get_predictions_from_cache_if_possible(self): filename = "cache/training" try: file = open(filename, "r") serializedPipeline = file.read() file.close() return pickle.loads(serializedPipeline) except IOError: pipeline = self.get_predictions(self.get_training_set()) serializedPipeline = pickle.dumps(pipeline) file = open(filename, "w") file.write(serializedPipeline) file.close() return pipeline def get_accuracy(self, pipeline, data): k_fold = KFold(n=len(data), n_folds=10) scores = [] a_scores = [] for train_indices, test_indices in k_fold: train_text = data.iloc[train_indices]['text'].values train_y = data.iloc[train_indices]['class'].values.astype(str) test_text = data.iloc[test_indices]['text'].values test_y = data.iloc[test_indices]['class'].values.astype(str) pipeline.fit(train_text, train_y) predictions = pipeline.predict(test_text) score = f1_score(test_y, predictions) a_score = accuracy_score(test_y, predictions) scores.append(score) a_scores.append(a_score) print('Total tasks classified :', len(data)) print('F1 Score :', sum(scores) / len(scores)) print('Accuracy Score :', sum(a_scores) / len(a_scores)) # classifier = Classifier(); # data = classifier.get_training_set() # classifier.get_accuracy(classifier.get_predictions(data), data)
classes/classifier.py
import ConfigParser import json import pickle import sys import warnings import mysql.connector from pandas import DataFrame from sklearn.cross_validation import KFold from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import RidgeClassifier from sklearn.metrics import f1_score, accuracy_score from sklearn.pipeline import Pipeline from classes.text import Transform warnings.filterwarnings("ignore") reload(sys) sys.setdefaultencoding('utf-8') config = ConfigParser.RawConfigParser(allow_no_value=True) config.read('configs.ini') class Classifier: def read_data(self, page_size): jira_fieldname = config.get("jira", "fieldname") cnx = mysql.connector.connect(user=config.get("mysqld", "user"), password=config.get("mysqld", "password"), host=config.get("mysqld", "host"), database=config.get("mysqld", "database")) cursor = cnx.cursor() query = "SELECT * FROM issues ORDER BY `key` ASC LIMIT %s,%s"; startFrom = 0 while 1: cursor.execute(query, (startFrom, page_size)) sys.stdout.write('.') sys.stdout.flush() for (key, jsonIssue) in cursor: try: issue_details = json.loads(jsonIssue) except ValueError: continue try: transform = Transform(config); issue_details = transform.process_issue_structure(issue_details) except Exception, e: print str(e) continue if 'description' in issue_details['fields'] \ and issue_details['fields'][jira_fieldname] is not None \ and issue_details['key']: yield issue_details['fields']['description'], issue_details['fields'][jira_fieldname]['value'], \ issue_details['key'] startFrom += page_size if cursor.rowcount < page_size: break def build_data_frame(self): rows = [] index = [] page_size = config.getint("mysqld", "pagesize") for text, classification, key in self.read_data(page_size): if key and text and classification: rows.append({'text': text, 'class': classification}) index.append(key) print 'ready for data_frame' data_frame = DataFrame(rows, index=index) return data_frame def get_training_set(self): data = DataFrame({'text': [], 'class': []}) data = data.append(self.build_data_frame()) # data = data.reindex(numpy.random.permutation(data.index)) return data def get_predictions(self, data): pipeline = Pipeline([ ('count_vectorizer', TfidfVectorizer(strip_accents='unicode', analyzer='word', max_df=0.5, min_df=2, sublinear_tf=True)), ('classifier', RidgeClassifier(tol=1e-2, solver="lsqr")) ]) return pipeline.fit(data['text'], data['class']) def get_predictions_from_cache_if_possible(self): filename = "cache/training" try: file = open(filename, "r") serializedPipeline = file.read() file.close() return pickle.loads(serializedPipeline) except IOError: pipeline = self.get_predictions(self.get_training_set()) serializedPipeline = pickle.dumps(pipeline) file = open(filename, "w") file.write(serializedPipeline) file.close() return pipeline def get_accuracy(self, pipeline, data): k_fold = KFold(n=len(data), n_folds=10) scores = [] a_scores = [] for train_indices, test_indices in k_fold: train_text = data.iloc[train_indices]['text'].values train_y = data.iloc[train_indices]['class'].values.astype(str) test_text = data.iloc[test_indices]['text'].values test_y = data.iloc[test_indices]['class'].values.astype(str) pipeline.fit(train_text, train_y) predictions = pipeline.predict(test_text) score = f1_score(test_y, predictions) a_score = accuracy_score(test_y, predictions) scores.append(score) a_scores.append(a_score) print('Total tasks classified :', len(data)) print('F1 Score :', sum(scores) / len(scores)) print('Accuracy Score :', sum(a_scores) / len(a_scores)) # classifier = Classifier(); # data = classifier.get_training_set() # classifier.get_accuracy(classifier.get_predictions(data), data)
0.380183
0.197348
import unittest import pytest from reinvent_scoring.scoring import CustomSum from reinvent_scoring.scoring.enums import ROCSInputFileTypesEnum from unittest_reinvent.fixtures.paths import ROCS_SHAPE_QUERY from reinvent_scoring.scoring.enums import ROCSSimilarityMeasuresEnum, ROCSSpecificParametersEnum from reinvent_scoring.scoring.enums import ScoringFunctionComponentNameEnum from reinvent_scoring.scoring.enums import ComponentSpecificParametersEnum from reinvent_scoring.scoring.enums import TransformationTypeEnum, TransformationParametersEnum from unittest_reinvent.fixtures.test_data import CELECOXIB, METAMIZOLE from unittest_reinvent.scoring_tests.scoring_3d.fixtures import component_parameters @pytest.mark.integration class TestParallelRocsSimilarityWithTransformation(unittest.TestCase): def setUp(self): sf_enum = ScoringFunctionComponentNameEnum() sim_measure_enum = ROCSSimilarityMeasuresEnum() csp_enum = ComponentSpecificParametersEnum() rsp_enum = ROCSSpecificParametersEnum() input_type_enum = ROCSInputFileTypesEnum() tt_enum = TransformationTypeEnum() specific_parameters = { rsp_enum.SHAPE_WEIGHT: 0.5, rsp_enum.COLOR_WEIGHT: 0.5, rsp_enum.SIM_MEASURE: sim_measure_enum.REF_TVERSKY, rsp_enum.ROCS_INPUT: ROCS_SHAPE_QUERY, rsp_enum.INPUT_TYPE: input_type_enum.SHAPE_QUERY, csp_enum.TRANSFORMATION: { TransformationParametersEnum.LOW: 0.3, TransformationParametersEnum.HIGH: 0.7, TransformationParametersEnum.K: 1, TransformationParametersEnum.TRANSFORMATION_TYPE: tt_enum.REVERSE_SIGMOID } } ts_parameters = component_parameters(component_type=sf_enum.PARALLEL_ROCS_SIMILARITY, name="parallel_rocs_similarity", specific_parameters=specific_parameters) self.sf_state = CustomSum(parameters=[ts_parameters]) def test_rocs_similarity_1(self): smiles = [CELECOXIB] score = self.sf_state.get_final_score(smiles=smiles) self.assertAlmostEqual(score.total_score, [1.0], delta=0.01) def test_rocs_similarity_2(self): smiles = [METAMIZOLE] score = self.sf_state.get_final_score(smiles=smiles) self.assertAlmostEqual(score.total_score, [1.0], delta=0.01)
unittest_reinvent/scoring_tests/scoring_3d/test_parallel_rocs_similarity_with_transformation.py
import unittest import pytest from reinvent_scoring.scoring import CustomSum from reinvent_scoring.scoring.enums import ROCSInputFileTypesEnum from unittest_reinvent.fixtures.paths import ROCS_SHAPE_QUERY from reinvent_scoring.scoring.enums import ROCSSimilarityMeasuresEnum, ROCSSpecificParametersEnum from reinvent_scoring.scoring.enums import ScoringFunctionComponentNameEnum from reinvent_scoring.scoring.enums import ComponentSpecificParametersEnum from reinvent_scoring.scoring.enums import TransformationTypeEnum, TransformationParametersEnum from unittest_reinvent.fixtures.test_data import CELECOXIB, METAMIZOLE from unittest_reinvent.scoring_tests.scoring_3d.fixtures import component_parameters @pytest.mark.integration class TestParallelRocsSimilarityWithTransformation(unittest.TestCase): def setUp(self): sf_enum = ScoringFunctionComponentNameEnum() sim_measure_enum = ROCSSimilarityMeasuresEnum() csp_enum = ComponentSpecificParametersEnum() rsp_enum = ROCSSpecificParametersEnum() input_type_enum = ROCSInputFileTypesEnum() tt_enum = TransformationTypeEnum() specific_parameters = { rsp_enum.SHAPE_WEIGHT: 0.5, rsp_enum.COLOR_WEIGHT: 0.5, rsp_enum.SIM_MEASURE: sim_measure_enum.REF_TVERSKY, rsp_enum.ROCS_INPUT: ROCS_SHAPE_QUERY, rsp_enum.INPUT_TYPE: input_type_enum.SHAPE_QUERY, csp_enum.TRANSFORMATION: { TransformationParametersEnum.LOW: 0.3, TransformationParametersEnum.HIGH: 0.7, TransformationParametersEnum.K: 1, TransformationParametersEnum.TRANSFORMATION_TYPE: tt_enum.REVERSE_SIGMOID } } ts_parameters = component_parameters(component_type=sf_enum.PARALLEL_ROCS_SIMILARITY, name="parallel_rocs_similarity", specific_parameters=specific_parameters) self.sf_state = CustomSum(parameters=[ts_parameters]) def test_rocs_similarity_1(self): smiles = [CELECOXIB] score = self.sf_state.get_final_score(smiles=smiles) self.assertAlmostEqual(score.total_score, [1.0], delta=0.01) def test_rocs_similarity_2(self): smiles = [METAMIZOLE] score = self.sf_state.get_final_score(smiles=smiles) self.assertAlmostEqual(score.total_score, [1.0], delta=0.01)
0.622574
0.399314
import threading import logging from contextlib import contextmanager from DataJoin.common import data_join_service_pb2 as data_join_pb from DataJoin.utils.process_manager import ProcessorManager from DataJoin.config import sync_example_id_nums from DataJoin.data_join.raw_data_loader import InitRawDataLoading class ExampleIdProducer(object): def __init__(self, peer_client, raw_data_dir, partition_id, rank_id, raw_data_options, mode, init_raw_data_loading_object): self._lock = threading.Lock() self._peer_client = peer_client self._raw_data_dir = raw_data_dir self._rank_id = rank_id self._mode = mode self._raw_data_options = raw_data_options self._init_loading = init_raw_data_loading_object self._partition_id = partition_id self._processor_start = False self._processor_routine = dict() def start_processors(self): with self._lock: if not self._processor_start: self._processor_routine.update(example_id_sender_processor=ProcessorManager( 'example_id_sender_processor', self._send_example_id_processor, self._impl_send_example_id_factor, 6)) for key, processor in self._processor_routine.items(): processor.active_processor() self._processor_start = True self._enable_example_id_sender_processor() def stop_processors(self): wait_stop = True with self._lock: if self._processor_start: wait_stop = True self._processor_start = False if wait_stop: for processor in self._processor_routine.values(): processor.inactive_processor() def _enable_example_id_sender_processor(self): self._processor_routine['example_id_sender_processor'].enable_processor() def _send_example_id_processor(self, init_loading): if not init_loading.follower_finished: with self._impl_example_id_sender(init_loading) as sender: init_loading.follower_finished = sender() if init_loading.partition_finished: self._finish_send_example_id_to_consumer(init_loading) def _impl_send_example_id_factor(self): with self._lock: if self._init_loading is not None: self._processor_routine['example_id_sender_processor'].build_impl_processor_parameter( self._init_loading ) return self._init_loading is not None @contextmanager def _impl_example_id_sender(self, init_loading): init_loading.acquire_stale_with_sender() def sender(): next_index, follower_finished = \ self._start_notify_consumer_to_sync_partition(init_loading) if follower_finished: return True examples_list = [] for (key, example) in init_loading.item_dict.items(): examples_list.append(example) if len(examples_list) > sync_example_id_nums: self._send_example_ids_to_consumer(examples_list, init_loading) examples_list = [] if len(examples_list) >= 0: self._send_example_ids_to_consumer(examples_list, init_loading, True) init_loading.partition_finished = True return False yield sender init_loading.release_stale_with_sender() def _start_notify_consumer_to_sync_partition(self, init_loading): example_producer_request = data_join_pb.StartPartitionRequest( rank_id=self._rank_id, partition_id=init_loading.partition_id ) example_consumer_response = self._peer_client.StartPartition(example_producer_request) if example_consumer_response.status.code != 0: raise RuntimeError( "call example consumer for starting to send partition_id Failed :for " \ "partition_id: %s, error_msg :%s" % ( init_loading.partition_id, example_consumer_response.status.error_message) ) return example_consumer_response.next_index, example_consumer_response.finished def _send_example_ids_to_consumer(self, examples, init_loading, finished=False): send_examples = data_join_pb.SyncContent( lite_example_ids=data_join_pb.LiteExampleIds( partition_id=init_loading.partition_id, begin_index=0, finished=finished ) ) if len(examples) > 0: for exam in examples: send_examples.lite_example_ids.example_id.append(exam.example_id) send_examples.lite_example_ids.event_time.append(exam.event_time) request = data_join_pb.SyncPartitionRequest( rank_id=self._rank_id, partition_id=init_loading.partition_id, compressed=False, content_bytes=send_examples.SerializeToString() ) response = self._peer_client.SyncPartition(request) if response.code != 0: raise RuntimeError( "Example Id send {} example ids Failed," \ "error msg {}".format(len(examples), response.error_message) ) def _finish_send_example_id_to_consumer(self, init_loading): if not init_loading.follower_finished: logging.info("notified example id consumer send example has been finished") request = data_join_pb.FinishPartitionRequest( rank_id=self._rank_id, partition_id=init_loading.partition_id ) response = self._peer_client.FinishPartition(request) if response.status.code != 0: raise RuntimeError( "notify example id consumer finish partition Failed" \ "error msg: {}".format(response.status.error_message) ) init_loading.follower_finished = response.finished if not init_loading.follower_finished: logging.info("Example id Consumer is still appending example id into queue " \ "for partition_id %d ", init_loading.partition_id) return False logging.info("Example id Consumer has finished append example id into queue " \ "for partition_id %d ", init_loading.partition_id) return True
src/DataJoin/data_join/example_id_producer.py
import threading import logging from contextlib import contextmanager from DataJoin.common import data_join_service_pb2 as data_join_pb from DataJoin.utils.process_manager import ProcessorManager from DataJoin.config import sync_example_id_nums from DataJoin.data_join.raw_data_loader import InitRawDataLoading class ExampleIdProducer(object): def __init__(self, peer_client, raw_data_dir, partition_id, rank_id, raw_data_options, mode, init_raw_data_loading_object): self._lock = threading.Lock() self._peer_client = peer_client self._raw_data_dir = raw_data_dir self._rank_id = rank_id self._mode = mode self._raw_data_options = raw_data_options self._init_loading = init_raw_data_loading_object self._partition_id = partition_id self._processor_start = False self._processor_routine = dict() def start_processors(self): with self._lock: if not self._processor_start: self._processor_routine.update(example_id_sender_processor=ProcessorManager( 'example_id_sender_processor', self._send_example_id_processor, self._impl_send_example_id_factor, 6)) for key, processor in self._processor_routine.items(): processor.active_processor() self._processor_start = True self._enable_example_id_sender_processor() def stop_processors(self): wait_stop = True with self._lock: if self._processor_start: wait_stop = True self._processor_start = False if wait_stop: for processor in self._processor_routine.values(): processor.inactive_processor() def _enable_example_id_sender_processor(self): self._processor_routine['example_id_sender_processor'].enable_processor() def _send_example_id_processor(self, init_loading): if not init_loading.follower_finished: with self._impl_example_id_sender(init_loading) as sender: init_loading.follower_finished = sender() if init_loading.partition_finished: self._finish_send_example_id_to_consumer(init_loading) def _impl_send_example_id_factor(self): with self._lock: if self._init_loading is not None: self._processor_routine['example_id_sender_processor'].build_impl_processor_parameter( self._init_loading ) return self._init_loading is not None @contextmanager def _impl_example_id_sender(self, init_loading): init_loading.acquire_stale_with_sender() def sender(): next_index, follower_finished = \ self._start_notify_consumer_to_sync_partition(init_loading) if follower_finished: return True examples_list = [] for (key, example) in init_loading.item_dict.items(): examples_list.append(example) if len(examples_list) > sync_example_id_nums: self._send_example_ids_to_consumer(examples_list, init_loading) examples_list = [] if len(examples_list) >= 0: self._send_example_ids_to_consumer(examples_list, init_loading, True) init_loading.partition_finished = True return False yield sender init_loading.release_stale_with_sender() def _start_notify_consumer_to_sync_partition(self, init_loading): example_producer_request = data_join_pb.StartPartitionRequest( rank_id=self._rank_id, partition_id=init_loading.partition_id ) example_consumer_response = self._peer_client.StartPartition(example_producer_request) if example_consumer_response.status.code != 0: raise RuntimeError( "call example consumer for starting to send partition_id Failed :for " \ "partition_id: %s, error_msg :%s" % ( init_loading.partition_id, example_consumer_response.status.error_message) ) return example_consumer_response.next_index, example_consumer_response.finished def _send_example_ids_to_consumer(self, examples, init_loading, finished=False): send_examples = data_join_pb.SyncContent( lite_example_ids=data_join_pb.LiteExampleIds( partition_id=init_loading.partition_id, begin_index=0, finished=finished ) ) if len(examples) > 0: for exam in examples: send_examples.lite_example_ids.example_id.append(exam.example_id) send_examples.lite_example_ids.event_time.append(exam.event_time) request = data_join_pb.SyncPartitionRequest( rank_id=self._rank_id, partition_id=init_loading.partition_id, compressed=False, content_bytes=send_examples.SerializeToString() ) response = self._peer_client.SyncPartition(request) if response.code != 0: raise RuntimeError( "Example Id send {} example ids Failed," \ "error msg {}".format(len(examples), response.error_message) ) def _finish_send_example_id_to_consumer(self, init_loading): if not init_loading.follower_finished: logging.info("notified example id consumer send example has been finished") request = data_join_pb.FinishPartitionRequest( rank_id=self._rank_id, partition_id=init_loading.partition_id ) response = self._peer_client.FinishPartition(request) if response.status.code != 0: raise RuntimeError( "notify example id consumer finish partition Failed" \ "error msg: {}".format(response.status.error_message) ) init_loading.follower_finished = response.finished if not init_loading.follower_finished: logging.info("Example id Consumer is still appending example id into queue " \ "for partition_id %d ", init_loading.partition_id) return False logging.info("Example id Consumer has finished append example id into queue " \ "for partition_id %d ", init_loading.partition_id) return True
0.527803
0.0745
if False: # no seccomp on Amazon Lambda :-( import os, sys, errno from pyseccomp import * f = SyscallFilter(defaction=KILL) f.add_rule(ALLOW, "open", Arg(1, MASKED_EQ, os.O_RDONLY, os.O_RDONLY | os.O_RDWR | os.O_WRONLY)) f.add_rule(ALLOW, "openat", Arg(2, MASKED_EQ, os.O_RDONLY, os.O_RDONLY | os.O_RDWR | os.O_WRONLY)) f.add_rule(ALLOW, "read") f.add_rule(ALLOW, "write", Arg(0, EQ, sys.stdout.fileno())) f.add_rule(ALLOW, "write", Arg(0, EQ, sys.stderr.fileno())) f.add_rule(ALLOW, "close") f.add_rule(ALLOW, "getdents64") f.add_rule(ALLOW, "exit_group") f.add_rule(ALLOW, "rt_sigaction") f.add_rule(ALLOW, "sigaltstack") f.add_rule(ALLOW, "brk") f.add_rule(ALLOW, "lseek") f.add_rule(ALLOW, "fstat") f.add_rule(ALLOW, "mmap") f.add_rule(ALLOW, "mprotect") f.add_rule(ALLOW, "stat") f.add_rule(ALLOW, "ioctl", Arg(1, EQ, 0x5401)) # TCGETS f.add_rule(ALLOW, "fcntl") f.load() from contextlib import redirect_stdout, redirect_stderr import traceback import io, sys import pprint def sandbox(data): if 'eval' in data: mod_code = data['code'] eval_code = data['eval'] raw_memory = data['memory'] f = io.StringIO() try: with redirect_stdout(f): with redirect_stderr(f): memory = eval(raw_memory) from types import ModuleType mod = ModuleType('botcode') mod.memory = memory exec(compile(mod_code,"bot-code.py",'exec'), mod.__dict__) ret = exec(compile(eval_code,"eval-code.py",'single'), mod.__dict__) except: exception = traceback.format_exc(limit=-1) return {'exception': exception} else: return { 'output': f.getvalue(), 'new_memory' : pprint.pformat(mod.memory, indent=2, width=50) } elif 'message' in data: mod_code = data['code'] sender = data['sender'] text = data['text'] raw_memory = data['memory'] response = None f = io.StringIO() try: with redirect_stdout(f): with redirect_stderr(f): memory = eval(raw_memory) from types import ModuleType mod = ModuleType('botcode') mod.memory = memory exec(compile(mod_code,"bot-code.py",'exec'), mod.__dict__) if data['message'] == 'private': if 'private_message' in mod.__dict__: response = mod.private_message(sender, text) elif data['message'] == 'group': if 'group_message' in mod.__dict__: response = mod.group_message(sender, text) except: exception = traceback.format_exc(limit=-1) return {'exception': exception} else: return { 'response': response, 'new_memory' : pprint.pformat(mod.memory, indent=2, width=50) } elif 'test' in data: mod_code = data['code'] raw_memory = data['memory'] f = io.StringIO() try: with redirect_stdout(f): with redirect_stderr(f): memory = eval(raw_memory) from types import ModuleType mod = ModuleType('botcode') mod.memory = memory exec(compile(mod_code,"bot-code.py",'exec'), mod.__dict__) if 'test' in mod.__dict__: mod.test() except SyntaxError as e: exception = str(e) return {'error': exception} except: exception = traceback.format_exc(limit=-1) return {'error': exception} else: return {'error': None} else: return {'error': "Could not find out what to do"} def lambda_handler(event, context): return sandbox(event) if __name__ == '__main__': import json data = json.load(sys.stdin) ret = sandbox(data) print(json.dumps(ret))
backend/sandbox/sandbox.py
if False: # no seccomp on Amazon Lambda :-( import os, sys, errno from pyseccomp import * f = SyscallFilter(defaction=KILL) f.add_rule(ALLOW, "open", Arg(1, MASKED_EQ, os.O_RDONLY, os.O_RDONLY | os.O_RDWR | os.O_WRONLY)) f.add_rule(ALLOW, "openat", Arg(2, MASKED_EQ, os.O_RDONLY, os.O_RDONLY | os.O_RDWR | os.O_WRONLY)) f.add_rule(ALLOW, "read") f.add_rule(ALLOW, "write", Arg(0, EQ, sys.stdout.fileno())) f.add_rule(ALLOW, "write", Arg(0, EQ, sys.stderr.fileno())) f.add_rule(ALLOW, "close") f.add_rule(ALLOW, "getdents64") f.add_rule(ALLOW, "exit_group") f.add_rule(ALLOW, "rt_sigaction") f.add_rule(ALLOW, "sigaltstack") f.add_rule(ALLOW, "brk") f.add_rule(ALLOW, "lseek") f.add_rule(ALLOW, "fstat") f.add_rule(ALLOW, "mmap") f.add_rule(ALLOW, "mprotect") f.add_rule(ALLOW, "stat") f.add_rule(ALLOW, "ioctl", Arg(1, EQ, 0x5401)) # TCGETS f.add_rule(ALLOW, "fcntl") f.load() from contextlib import redirect_stdout, redirect_stderr import traceback import io, sys import pprint def sandbox(data): if 'eval' in data: mod_code = data['code'] eval_code = data['eval'] raw_memory = data['memory'] f = io.StringIO() try: with redirect_stdout(f): with redirect_stderr(f): memory = eval(raw_memory) from types import ModuleType mod = ModuleType('botcode') mod.memory = memory exec(compile(mod_code,"bot-code.py",'exec'), mod.__dict__) ret = exec(compile(eval_code,"eval-code.py",'single'), mod.__dict__) except: exception = traceback.format_exc(limit=-1) return {'exception': exception} else: return { 'output': f.getvalue(), 'new_memory' : pprint.pformat(mod.memory, indent=2, width=50) } elif 'message' in data: mod_code = data['code'] sender = data['sender'] text = data['text'] raw_memory = data['memory'] response = None f = io.StringIO() try: with redirect_stdout(f): with redirect_stderr(f): memory = eval(raw_memory) from types import ModuleType mod = ModuleType('botcode') mod.memory = memory exec(compile(mod_code,"bot-code.py",'exec'), mod.__dict__) if data['message'] == 'private': if 'private_message' in mod.__dict__: response = mod.private_message(sender, text) elif data['message'] == 'group': if 'group_message' in mod.__dict__: response = mod.group_message(sender, text) except: exception = traceback.format_exc(limit=-1) return {'exception': exception} else: return { 'response': response, 'new_memory' : pprint.pformat(mod.memory, indent=2, width=50) } elif 'test' in data: mod_code = data['code'] raw_memory = data['memory'] f = io.StringIO() try: with redirect_stdout(f): with redirect_stderr(f): memory = eval(raw_memory) from types import ModuleType mod = ModuleType('botcode') mod.memory = memory exec(compile(mod_code,"bot-code.py",'exec'), mod.__dict__) if 'test' in mod.__dict__: mod.test() except SyntaxError as e: exception = str(e) return {'error': exception} except: exception = traceback.format_exc(limit=-1) return {'error': exception} else: return {'error': None} else: return {'error': "Could not find out what to do"} def lambda_handler(event, context): return sandbox(event) if __name__ == '__main__': import json data = json.load(sys.stdin) ret = sandbox(data) print(json.dumps(ret))
0.214527
0.128498
import json from typing import Dict, Tuple from authlib.common.encoding import json_dumps from authlib.jose import OKPKey from didcomm.common.types import ( VerificationMethodType, VerificationMaterial, VerificationMaterialFormat, ) from didcomm.errors import DIDCommValueError from didcomm.secrets.secrets_resolver import Secret def jwk_to_secret(jwk: dict) -> Secret: """ Converts a JWK dict to a new Secret instance. :param jwk: JWK as dict :return: a new Secret instance """ return Secret( kid=jwk["kid"], type=VerificationMethodType.JSON_WEB_KEY_2020, verification_material=VerificationMaterial( format=VerificationMaterialFormat.JWK, value=json_dumps(jwk) ), ) def secret_to_jwk_dict(secret: Secret) -> Dict: """ Converts a Secret to a JWK dict. Should be used for Secrets in JWK format only. :param secret: s Secret to be converted :return: JWK dict """ # assume JWK secrets only if secret.verification_material.format != VerificationMaterialFormat.JWK: raise DIDCommValueError( f"Unsupported format {secret.verification_material.format}" ) res = json.loads(secret.verification_material.value) res["kid"] = secret.kid return res def generate_ed25519_keys_as_jwk_dict() -> Tuple[dict, dict]: """ Generates ED25519 private and public keys as JWK dicts. :return: private and public keys as JWK dicts """ key = OKPKey.generate_key("Ed25519", is_private=True) private_key_jwk_dict = key.as_dict(is_private=True) public_key_jwk_dict = key.as_dict() return private_key_jwk_dict, public_key_jwk_dict def generate_x25519_keys_as_jwk_dict() -> Tuple[dict, dict]: """ Generates X25519 private and public keys as JWK dicts. :return: private and public keys as JWK dicts """ key = OKPKey.generate_key("X25519", is_private=True) private_key_jwk_dict = key.as_dict(is_private=True) public_key_jwk_dict = key.as_dict() return private_key_jwk_dict, public_key_jwk_dict
didcomm/secrets/secrets_util.py
import json from typing import Dict, Tuple from authlib.common.encoding import json_dumps from authlib.jose import OKPKey from didcomm.common.types import ( VerificationMethodType, VerificationMaterial, VerificationMaterialFormat, ) from didcomm.errors import DIDCommValueError from didcomm.secrets.secrets_resolver import Secret def jwk_to_secret(jwk: dict) -> Secret: """ Converts a JWK dict to a new Secret instance. :param jwk: JWK as dict :return: a new Secret instance """ return Secret( kid=jwk["kid"], type=VerificationMethodType.JSON_WEB_KEY_2020, verification_material=VerificationMaterial( format=VerificationMaterialFormat.JWK, value=json_dumps(jwk) ), ) def secret_to_jwk_dict(secret: Secret) -> Dict: """ Converts a Secret to a JWK dict. Should be used for Secrets in JWK format only. :param secret: s Secret to be converted :return: JWK dict """ # assume JWK secrets only if secret.verification_material.format != VerificationMaterialFormat.JWK: raise DIDCommValueError( f"Unsupported format {secret.verification_material.format}" ) res = json.loads(secret.verification_material.value) res["kid"] = secret.kid return res def generate_ed25519_keys_as_jwk_dict() -> Tuple[dict, dict]: """ Generates ED25519 private and public keys as JWK dicts. :return: private and public keys as JWK dicts """ key = OKPKey.generate_key("Ed25519", is_private=True) private_key_jwk_dict = key.as_dict(is_private=True) public_key_jwk_dict = key.as_dict() return private_key_jwk_dict, public_key_jwk_dict def generate_x25519_keys_as_jwk_dict() -> Tuple[dict, dict]: """ Generates X25519 private and public keys as JWK dicts. :return: private and public keys as JWK dicts """ key = OKPKey.generate_key("X25519", is_private=True) private_key_jwk_dict = key.as_dict(is_private=True) public_key_jwk_dict = key.as_dict() return private_key_jwk_dict, public_key_jwk_dict
0.796213
0.198452
import os import pandas as pd from parsel import Selector from time import sleep from selenium import webdriver from webdriver_manager.chrome import ChromeDriverManager from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.by import By import configparser config = configparser.ConfigParser() config.read('config.ini') if config.get('HEADLESS', 'headless') == 'Yes': # install webdrive when needed runing headless opts=webdriver.ChromeOptions() opts.headless=True driver = webdriver.Chrome(ChromeDriverManager().install() ,options=opts) else: # install webdrive when needed runing browser driver = webdriver.Chrome(ChromeDriverManager().install()) print('\nExecuting Linkedin Login...') # driver.get method() will navigate to a page given by the URL address driver.get('https://www.linkedin.com/login') # locate email form by element_by_id username = driver.find_element_by_id('username') # send_keys() to simulate key strokes username.send_keys(config.get('LINKEDIN_LOGIN', 'email')) # locate password form by_class_name password = driver.find_element_by_id('password') # send_keys() to simulate key strokes password.send_keys(config.get('LINKEDIN_LOGIN', 'password')) # locate submit button by_class_name log_in_button = driver.find_element_by_class_name('btn__primary--large') # locate submit button by_xpath log_in_button = driver.find_element_by_xpath('//*[@type="submit"]') log_in_button.click() print('\nStarting Posting Search...') # driver goest to the jobs page driver.get('https://www.linkedin.com/jobs/') sleep(2) # Start search term search_job = driver.find_element_by_xpath('//*[@type="text"]') search_job.send_keys(config.get('LINKEDIN_LOGIN', 'search_term')) sleep(1) #search.send_keys(Keys.RETURN) # location search_location = driver.find_element_by_xpath('//input[starts-with(@id,"jobs-search-box-location")]') search_location.send_keys(Keys.COMMAND, 'a') #COMMAND is the mac keyboard control search_location.send_keys(Keys.BACKSPACE) search_location.send_keys(config.get('LINKEDIN_LOGIN', 'country')) search_location.send_keys(Keys.RETURN) sleep(3) # Gets the URL from the search result linkedin_result = driver.current_url # Scroll job list to the end of first page recentList = driver.find_elements_by_class_name('jobs-search-results__list-item') for list in recentList : driver.execute_script("arguments[0].scrollIntoView();", list) sleep(0.1) # Get full list of positions name position_name = driver.find_elements_by_class_name('job-card-list__title') position_name = [url.text for url in position_name] position_name len(position_name) # Get listing Company Name company_name = driver.find_elements_by_css_selector('.job-card-container__company-name') company_name = [url.text for url in company_name] company_name len(company_name) # Get listing location job_location = driver.find_elements_by_xpath('//div[starts-with(@class,"artdeco-entity-lockup__caption")]') job_location = [url.text for url in job_location] job_location len(job_location) # Get full list of links positions position_link = driver.find_elements_by_css_selector("div.artdeco-entity-lockup__title > a") position_link = [link.get_attribute("href") for link in position_link] position_link len(position_link) urls_linkedin = [] for lin in position_link: terminator = lin.index('?') urls_linkedin.append(lin[:terminator]) if os.path.isfile('opportunities.csv') is True: opportunities = pd.read_csv('opportunities.csv') else: dict = {'Job Title': [], 'Company Name': [], 'Location': [], 'Direct URL': [], 'TrimmedLinkedin' : [],'LinkedinLink': []} df = pd.DataFrame(dict) df.to_csv('opportunities.csv',mode = 'a', header = True, index = False) opportunities = pd.read_csv('opportunities.csv') print('\nTotal posts: ',len(position_link)) print('\nStart buinding direct links list ...') main_window_name = driver.window_handles[0] def write_to_csv(posname,compname,joblocation,direct,link): dict = {'Job Title': [posname], 'Company Name': [compname], 'Location': [joblocation], 'Direct URL': [direct],'TrimmedLinkedin' : [urlslin], 'LinkedinLink': [link]} df = pd.DataFrame(dict) df.to_csv('opportunities.csv',mode = 'a', header = False, index = False) def apply_position(): apply_btn = driver.find_element_by_xpath("//button[contains(@class,'jobs-apply-button')]") apply_btn.click() #driver.execute_script("window.open('http://google.com', 'new_window')") sleep(5) #print(driver.window_handles[counter]) window_name = driver.window_handles[1] driver.switch_to.window(window_name=window_name) direct_url.append(driver.current_url) driver.close() sleep(5) driver.switch_to.window(window_name=main_window_name) #counter += 1 #print('Current counter = ', counter) direct_url = [] for link in position_link : driver.get(link) sleep(3) # status = 'not applied' try: try: driver.find_element_by_xpath("//a//li-icon[contains(@type,'document-icon')]") direct_url.append('Applied') #counter += 1 #print('Current counter = ', counter) except NoSuchElementException: driver.find_element_by_xpath("//button//li-icon[contains(@type,'linkedin-bug')]") direct_url.append('Easy Apply') sleep(5) # window_name = driver.window_handles[counter] driver.switch_to.window(window_name=main_window_name) #counter += 1 #print('Current counter = ', counter) except NoSuchElementException: apply_position() def validate_url(urlslin): emp_df = pd.read_csv('opportunities.csv',usecols=[4]) # print(emp_df) # f2 = ['https://www.linkedin.com/jobs/view/2257024918/?eBP=JOB_SEARCH_ORGANIC&recommendedFlavor=COMPANY_RECRUIT&refId=3051f9a6-115e-47c3-a266-fe1fc163d1b3&trackingId=FteGSeadtXOUrgJHqXbVxw%3D%3D&trk=flagship3_search_srp_jobs'] f2 = [urlslin] if f2 in emp_df.values: print('TRUE') return 'TRUE' else: print('FALSE') return 'FALSE' print('\nWriting data to CSV...') count_exist = 0 count_inexist = 0 for posname,compname,joblocation,direct,urlslin,link in zip(position_name,company_name,job_location,direct_url,urls_linkedin,position_link): print(urlslin) x = validate_url(urlslin) if x == 'TRUE': print('Position exists: ',count_exist) break else: count_inexist += 1 print('Positions being added: ',count_inexist) write_to_csv(posname,compname,joblocation,direct,link) print('\nBUILDING REPORT --------') sleep(3) print('Total positions found: ',len(position_name)) print('Total new positions added: ',count_inexist) print('Total repeated positions: ',len(position_name)-count_inexist)
linkedin_job_scraping.py
import os import pandas as pd from parsel import Selector from time import sleep from selenium import webdriver from webdriver_manager.chrome import ChromeDriverManager from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.by import By import configparser config = configparser.ConfigParser() config.read('config.ini') if config.get('HEADLESS', 'headless') == 'Yes': # install webdrive when needed runing headless opts=webdriver.ChromeOptions() opts.headless=True driver = webdriver.Chrome(ChromeDriverManager().install() ,options=opts) else: # install webdrive when needed runing browser driver = webdriver.Chrome(ChromeDriverManager().install()) print('\nExecuting Linkedin Login...') # driver.get method() will navigate to a page given by the URL address driver.get('https://www.linkedin.com/login') # locate email form by element_by_id username = driver.find_element_by_id('username') # send_keys() to simulate key strokes username.send_keys(config.get('LINKEDIN_LOGIN', 'email')) # locate password form by_class_name password = driver.find_element_by_id('password') # send_keys() to simulate key strokes password.send_keys(config.get('LINKEDIN_LOGIN', 'password')) # locate submit button by_class_name log_in_button = driver.find_element_by_class_name('btn__primary--large') # locate submit button by_xpath log_in_button = driver.find_element_by_xpath('//*[@type="submit"]') log_in_button.click() print('\nStarting Posting Search...') # driver goest to the jobs page driver.get('https://www.linkedin.com/jobs/') sleep(2) # Start search term search_job = driver.find_element_by_xpath('//*[@type="text"]') search_job.send_keys(config.get('LINKEDIN_LOGIN', 'search_term')) sleep(1) #search.send_keys(Keys.RETURN) # location search_location = driver.find_element_by_xpath('//input[starts-with(@id,"jobs-search-box-location")]') search_location.send_keys(Keys.COMMAND, 'a') #COMMAND is the mac keyboard control search_location.send_keys(Keys.BACKSPACE) search_location.send_keys(config.get('LINKEDIN_LOGIN', 'country')) search_location.send_keys(Keys.RETURN) sleep(3) # Gets the URL from the search result linkedin_result = driver.current_url # Scroll job list to the end of first page recentList = driver.find_elements_by_class_name('jobs-search-results__list-item') for list in recentList : driver.execute_script("arguments[0].scrollIntoView();", list) sleep(0.1) # Get full list of positions name position_name = driver.find_elements_by_class_name('job-card-list__title') position_name = [url.text for url in position_name] position_name len(position_name) # Get listing Company Name company_name = driver.find_elements_by_css_selector('.job-card-container__company-name') company_name = [url.text for url in company_name] company_name len(company_name) # Get listing location job_location = driver.find_elements_by_xpath('//div[starts-with(@class,"artdeco-entity-lockup__caption")]') job_location = [url.text for url in job_location] job_location len(job_location) # Get full list of links positions position_link = driver.find_elements_by_css_selector("div.artdeco-entity-lockup__title > a") position_link = [link.get_attribute("href") for link in position_link] position_link len(position_link) urls_linkedin = [] for lin in position_link: terminator = lin.index('?') urls_linkedin.append(lin[:terminator]) if os.path.isfile('opportunities.csv') is True: opportunities = pd.read_csv('opportunities.csv') else: dict = {'Job Title': [], 'Company Name': [], 'Location': [], 'Direct URL': [], 'TrimmedLinkedin' : [],'LinkedinLink': []} df = pd.DataFrame(dict) df.to_csv('opportunities.csv',mode = 'a', header = True, index = False) opportunities = pd.read_csv('opportunities.csv') print('\nTotal posts: ',len(position_link)) print('\nStart buinding direct links list ...') main_window_name = driver.window_handles[0] def write_to_csv(posname,compname,joblocation,direct,link): dict = {'Job Title': [posname], 'Company Name': [compname], 'Location': [joblocation], 'Direct URL': [direct],'TrimmedLinkedin' : [urlslin], 'LinkedinLink': [link]} df = pd.DataFrame(dict) df.to_csv('opportunities.csv',mode = 'a', header = False, index = False) def apply_position(): apply_btn = driver.find_element_by_xpath("//button[contains(@class,'jobs-apply-button')]") apply_btn.click() #driver.execute_script("window.open('http://google.com', 'new_window')") sleep(5) #print(driver.window_handles[counter]) window_name = driver.window_handles[1] driver.switch_to.window(window_name=window_name) direct_url.append(driver.current_url) driver.close() sleep(5) driver.switch_to.window(window_name=main_window_name) #counter += 1 #print('Current counter = ', counter) direct_url = [] for link in position_link : driver.get(link) sleep(3) # status = 'not applied' try: try: driver.find_element_by_xpath("//a//li-icon[contains(@type,'document-icon')]") direct_url.append('Applied') #counter += 1 #print('Current counter = ', counter) except NoSuchElementException: driver.find_element_by_xpath("//button//li-icon[contains(@type,'linkedin-bug')]") direct_url.append('Easy Apply') sleep(5) # window_name = driver.window_handles[counter] driver.switch_to.window(window_name=main_window_name) #counter += 1 #print('Current counter = ', counter) except NoSuchElementException: apply_position() def validate_url(urlslin): emp_df = pd.read_csv('opportunities.csv',usecols=[4]) # print(emp_df) # f2 = ['https://www.linkedin.com/jobs/view/2257024918/?eBP=JOB_SEARCH_ORGANIC&recommendedFlavor=COMPANY_RECRUIT&refId=3051f9a6-115e-47c3-a266-fe1fc163d1b3&trackingId=FteGSeadtXOUrgJHqXbVxw%3D%3D&trk=flagship3_search_srp_jobs'] f2 = [urlslin] if f2 in emp_df.values: print('TRUE') return 'TRUE' else: print('FALSE') return 'FALSE' print('\nWriting data to CSV...') count_exist = 0 count_inexist = 0 for posname,compname,joblocation,direct,urlslin,link in zip(position_name,company_name,job_location,direct_url,urls_linkedin,position_link): print(urlslin) x = validate_url(urlslin) if x == 'TRUE': print('Position exists: ',count_exist) break else: count_inexist += 1 print('Positions being added: ',count_inexist) write_to_csv(posname,compname,joblocation,direct,link) print('\nBUILDING REPORT --------') sleep(3) print('Total positions found: ',len(position_name)) print('Total new positions added: ',count_inexist) print('Total repeated positions: ',len(position_name)-count_inexist)
0.165425
0.056314
import pandas as pandas import numpy as np from sklearn.metrics import pairwise_distances from sklearn.preprocessing import StandardScaler from scipy import stats import seaborn as sns import matplotlib.pyplot as plt def normalize_btwn_0_1(list_obj): """ Takes a list and normalizes the values from 0 (smallest) to 1(largest) """ return (list_obj-min(list_obj))/(max(list_obj)-min(list_obj)) def get_pairwise(behav_vct,type="absolute-dist",norm=True): """ Takes a vector of behavioral scores (one per subject) and returns the vectorized upper triangle of a similarity matrix constructed using: A) absolute distance B) average, or C) one formulation of the "AnnaK" principle (i.e., high-high pairs are most alike, low-low pairs are most dissimilar, and high-low pairs show intermediate similarity). (all high scorers are alike, all low scorers are low-scoring in their own way) """ # Get dims n_subs = len(behav_vct) # Initialize output mtx = np.zeros((n_subs,n_subs)) if norm: behav_vct = normalize_btwn_0_1(behav_vct) # Fill in matrix for i in range(n_subs): for j in range(n_subs): if type == 'low-alike': mtx[i,j] = max(behav_vct[i], behav_vct[j]) elif type == 'high-alike': mtx[i,j] = (1 - min(behav_vct[i], behav_vct[j])) #/n_subs elif type == 'average': mtx[i,j] = (behav_vct[i]+behav_vct[j])/2 elif type == 'absolute-dist': mtx[i,j] = np.absolute(behav_vct[i]-behav_vct[j]) # Compute upper triangle vct = mtx[np.triu_indices(mtx.shape[0], k=1)] return vct, mtx def shuffle(df, type="pandas"): """ Take a DataFrame where the columns are variables and the observations are the rows (e.g., row indices are subject IDs), and randomly shuffles the row indices. """ if type == "pandas": perm_data = df.copy() perm_data['new_id'] = np.random.permutation(perm_data.index) # assign new index perm_data.index = (perm_data['new_id']) # get rid of index name perm_data.index.name = None # get rid of added column perm_data = perm_data.drop(['new_id'], axis=1) elif type == "numpy": perm_data = np.random.permutation(df) # Now have subjects x variables DataFrame with subject IDs randomly shuffled. return perm_data def zscore_df(df, var_list): """ Takes DateFrame and z-scores values within each of the columns """ scaler = StandardScaler() return scaler.fit_transform(df[var_list])
lsan_tools/math.py
import pandas as pandas import numpy as np from sklearn.metrics import pairwise_distances from sklearn.preprocessing import StandardScaler from scipy import stats import seaborn as sns import matplotlib.pyplot as plt def normalize_btwn_0_1(list_obj): """ Takes a list and normalizes the values from 0 (smallest) to 1(largest) """ return (list_obj-min(list_obj))/(max(list_obj)-min(list_obj)) def get_pairwise(behav_vct,type="absolute-dist",norm=True): """ Takes a vector of behavioral scores (one per subject) and returns the vectorized upper triangle of a similarity matrix constructed using: A) absolute distance B) average, or C) one formulation of the "AnnaK" principle (i.e., high-high pairs are most alike, low-low pairs are most dissimilar, and high-low pairs show intermediate similarity). (all high scorers are alike, all low scorers are low-scoring in their own way) """ # Get dims n_subs = len(behav_vct) # Initialize output mtx = np.zeros((n_subs,n_subs)) if norm: behav_vct = normalize_btwn_0_1(behav_vct) # Fill in matrix for i in range(n_subs): for j in range(n_subs): if type == 'low-alike': mtx[i,j] = max(behav_vct[i], behav_vct[j]) elif type == 'high-alike': mtx[i,j] = (1 - min(behav_vct[i], behav_vct[j])) #/n_subs elif type == 'average': mtx[i,j] = (behav_vct[i]+behav_vct[j])/2 elif type == 'absolute-dist': mtx[i,j] = np.absolute(behav_vct[i]-behav_vct[j]) # Compute upper triangle vct = mtx[np.triu_indices(mtx.shape[0], k=1)] return vct, mtx def shuffle(df, type="pandas"): """ Take a DataFrame where the columns are variables and the observations are the rows (e.g., row indices are subject IDs), and randomly shuffles the row indices. """ if type == "pandas": perm_data = df.copy() perm_data['new_id'] = np.random.permutation(perm_data.index) # assign new index perm_data.index = (perm_data['new_id']) # get rid of index name perm_data.index.name = None # get rid of added column perm_data = perm_data.drop(['new_id'], axis=1) elif type == "numpy": perm_data = np.random.permutation(df) # Now have subjects x variables DataFrame with subject IDs randomly shuffled. return perm_data def zscore_df(df, var_list): """ Takes DateFrame and z-scores values within each of the columns """ scaler = StandardScaler() return scaler.fit_transform(df[var_list])
0.7237
0.557966