arxiv_id
stringlengths
0
16
text
stringlengths
10
1.65M
import sys from os.path import join, normpath, dirname # import packages in trainer sys.path.append(join(dirname(__file__), '..', 'trainer')) from preprocessor import PreProcessor import tensorflow as tf import numpy as np import pandas as pd import news_classes import pickle import news_classes from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer from config import SERVER_HOST, SERVER_PORT from config import MONGO_DB_HOST, MONGO_DB_PORT from tap_news_utils.mongodb_client import MongoDBClient from tensorflow.keras.models import load_model MODEL_FILE = normpath(join(dirname(__file__), '../model/keras_model.h5')) VARS_FILE = normpath(join(dirname(__file__), '../model/vars')) TOKS_FILE = normpath(join(dirname(__file__), '../model/tokenizer')) MODEL_UPDATE_LAG_IN_SECONDS = 10 N_CLASSES = 0 VOCAB_SIZE = 0 MAX_DOCUMENT_LENGTH = 0 EMBED_DIM = 0 tokenizer = None classifier = None def restoreVars(): with open(VARS_FILE, 'rb') as f: vars = pickle.load(f) global VOCAB_SIZE VOCAB_SIZE = vars['VOCAB_SIZE'] global EMBED_DIM EMBED_DIM = vars['EMBED_DIM'] global MAX_DOCUMENT_LENGTH MAX_DOCUMENT_LENGTH = vars['MAX_DOCUMENT_LENGTH'] global N_CLASSES N_CLASSES = vars['N_CLASSES'] with open(TOKS_FILE, 'rb') as f: global tokenizer tokenizer = pickle.load(f) def loadModel(): global classifier classifier = load_model(MODEL_FILE) restoreVars() loadModel() print ("Model loaded.") def classify(text): text = PreProcessor.clean_text(text) data = np.array([text]) data = tokenizer.texts_to_sequences(data) data = tf.keras.preprocessing.sequence.pad_sequences(data, maxlen=MAX_DOCUMENT_LENGTH) y_predicted = np.argmax(classifier.predict(data), axis=1) topic = news_classes.class_map[str(y_predicted[0]+1)] return topic def backfill(): print ('begin backfilling') db = MongoDBClient(MONGO_DB_HOST, MONGO_DB_PORT).get_db() cursor = db['news'].find({}) count = 0 for news in cursor: count += 1 print(count) if 'class' not in news: print('Populating classes...') description = news['description'] if description is None: description = news['title'] topic = classify(description) news['class'] = topic db['news'].replace_one({'digest': news['digest']}, news, upsert=True) backfill() print("Backfill news.") # Threading RPC Server RPC_SERVER = SimpleJSONRPCServer((SERVER_HOST, SERVER_PORT)) RPC_SERVER.register_function(classify, 'classify') print (("Starting RPC server on %s:%d" % (SERVER_HOST, SERVER_PORT))) RPC_SERVER.serve_forever()
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: image.py # Author: Qian Ge <geqian1001@gmail.com> import scipy.misc import numpy as np from PIL import Image def resize_image_with_smallest_side(image, small_size): """ Resize single image array with smallest side = small_size and keep the original aspect ratio. Args: image (np.array): 2-D image of shape [height, width] or 3-D image of shape [height, width, channels] or 4-D of shape [1, height, width, channels]. small_size (int): A 1-D int. The smallest side of resize image. """ im_shape = image.shape shape_dim = len(im_shape) assert shape_dim <= 4 and shape_dim >= 2,\ 'Wrong format of image!Shape is {}'.format(im_shape) if shape_dim == 4: image = np.squeeze(image, axis=0) height = float(im_shape[1]) width = float(im_shape[2]) else: height = float(im_shape[0]) width = float(im_shape[1]) if height <= width: new_height = int(small_size) new_width = int(new_height/height * width) else: new_width = int(small_size) new_height = int(new_width/width * height) if shape_dim == 2: # im = np.array(Image.fromarray(image).resize((new_height, new_width))) im = scipy.misc.imresize(image, (new_height, new_width)) elif shape_dim == 3: # im = np.array(Image.fromarray(image).resize((new_height, new_width, image.shape[2]))) im = scipy.misc.imresize(image, (new_height, new_width, image.shape[2])) else: # im = np.array(Image.fromarray(image).resize((new_height, new_width, image.shape[3]))) im = scipy.misc.imresize(image, (new_height, new_width, im_shape[3])) im = np.expand_dims(im, axis=0) return im def im_normalize(im): return scipy.misc.imresize(im, [256, 256])
# This file is part of GenMap and released under the MIT License, see LICENSE. # Author: Takuya Kojima import networkx as nx import copy ALU_node_exp = "ALU_{pos[0]}_{pos[1]}" SE_node_exp = "SE_{id}_{name}_{pos[0]}_{pos[1]}" CONST_node_exp = "CONST_{index}" IN_PORT_node_exp = "IN_PORT_{index}" OUT_PORT_node_exp = "OUT_PORT_{index}" DEFAULT_EDGE_WEIGHT = {"ALU": 1, "SE": 1, "IN_PORT": 0, "OUT_PORT": 0, "Const": 0} class PEArrayModel(): class InvalidConfigError(Exception): pass def __init__(self, conf): '''Constructor of this class Args: conf (XML Element): Specification of the PE array It must contain following attributes name: architecture name width: PE array width height: PE array height const_reg: the number of constant registers input_port: the number of input port output_port: the number of output port inout_port: the number of inout port Please note you can use either a pair of input_port and output port or inout_port. And also, it must contain a child elements whose tag name is "PE", "PREG", "IN_PORT" or "OUT_PORT". "PE" Element will be composed of an "ALU" Element "SE" Elements And also, it must have its coordinate as a attribute named "coord" If body bias control is supported, the domain name of its PE is needed as a attribute named "bbdomain" Examples: <PE coord="(0, 0)">...</PE> <PE coord="(0, 0)" bbdomain="0">...</PE> "ALU" Element will be composed of "operation" Elements "input" Elements "SE" Element will be composed of "output" Elements And also, it must have its id as a attribute named "id" Example: <SE id="0">...</SE> "operation" Element has an inner text of the operation name supported by the ALU. Also, it must have an attributes named "value" for configration data. Example: <operation value="1" >ADD</operation> "output" Element is a output link of the SE. It must have its own name as a attribute named "name" Example: <output name="OUT_NORTH">...</output> "input" Element will be used to specify the connection between two Elements. It must have 3 essential attributes and 3 optional attrbutes as follows. Essential: name: name of this connection type: a type of source node value: integer value of configuration data Optional: coord: coordinate of the source node If type is ALU or SE, it is necessary. id: id of the SE node If type is SE, it is necessay. src_name: a source SE's link name If type is SE, it is necessay. index: index of the node If type is Const or IN_PORT, it is necessay Examples: <input name="ALU" type="ALU" value="0", coord="(0,0)"/> <input name="IN_EAST" type="SE" id="0" value="2" src_name="OUT_WEST" coord="(1, 0)"/> <input name="IN_SOUTH" type="IN_PORT" value="1" index="0" /> <input name="IN_CONST_A" type="Const" value="6" index="0"/> "PREG" Elements are pipeline registers. It must have a attribute "vpos" as a vetical position. If vpos = y, it means the pipeline register is placed between (y - 1)th PE rows and yth PE rows. "OUT_PORT" Element will be composed of "input" elements "IN_PORT" and "OUT_PORT" can have an optional attiribute named "pos". It is used to specify where each port is placed in the PE array. This attribute is valid for the following values: "bottom", "top", "left", "right" Raise: If there exist invalid configurations, it will raise ValueError or InvalidConfigError ''' # init all member variables self.__network = nx.DiGraph() self.__width = 0 self.__height = 0 self.__arch_name = "" self.__const_reg_range = [] self.__in_port_range = [] self.__out_port_range = [] self.__inout_used = True self.__link_weight = {} # operation list supported by the PEs # 1st index: pos_x # 2nd index: pos_y # value : list of opcodes self.__operation_list = [] # resources for each body bias domain # 1st key: domain name # 2nd key: "ALU" or "SE" # value: list of resource node names self.__bb_domains = {} # pipeline regs positions (list) self.__preg_positions = [] # SE list # 1st key: coord # 2nd key: SE ID # value : set of SE node nodes self.__se_lists = {} # SEs whose return_only attr is True self.__return_only_se = [] # network configuration data value # 1st key: successor node name # 2nd key: predecessor node name # value : configuration value self.__config_net_table = {} # operation configuration data value # 1st index: x_pos # 2nd index: y_pos # 3rd key: opcode # value ; configration value self.__config_op_table = [] # output names of SE # key: SE node name # value: output name defined by config file self.__output_names = {} # list of PEs, which are available for routing # values are coordinate of the ALUs self.__routing_ALU = [] # opcode for routing # key: node name of ALU # values: opcode self.__routing_opcode = {} # const attributes self.__infini_const = False # heterogeneity of ISA self.__heteroISA = False # ALU list for each op # keys: opcode name # values: list of ALU coord self.__supported_ALU = {} # IO placement self.__input_pos = {"left": [], "right": [], "top": [], "bottom": []} self.__output_pos = {"left": [], "right": [], "top": [], "bottom": []} # get architecture name name_str = conf.get("name") if name_str == None: raise self.InvalidConfigError("missing attribute of architecure name: name") else: if name_str == "": raise ValueError("Ivalid attribute of archirecture name: name") else: self.__arch_name = name_str # init PE array width width_str = conf.get("width") if width_str == None: raise self.InvalidConfigError("missing PE array attribute: width") elif width_str.isdigit() == False: raise ValueError("Invalid PE array attribute: width") else: self.__width = int(width_str) if not self.__width > 0: raise ValueError("PE width must be greater than 0") # init PE array height height_str = conf.get("height") if height_str == None: raise self.InvalidConfigError("missing PE array attribute: height") elif height_str.isdigit() == False: raise ValueError("Invalid PE array attribute: height") else: self.__height = int(height_str) if not self.__height > 0: raise ValueError("PE height must be greater than 0") # init PE array const const_str = conf.get("const_reg") if const_str == None: raise self.InvalidConfigError("missing PE array attribute: const_reg") elif const_str.isdigit() == False: if const_str == "X" or const_str == "x": self.__infini_const = True else: raise ValueError("Invalid PE array attribute: const_reg") else: self.__const_reg_range = list(range(int(const_str))) for c_reg in self.__const_reg_range: self.__network.add_node(CONST_node_exp.format(index=c_reg)) # init PE array inout port inoutport_str = conf.get("inout_port") if inoutport_str == None: self.__inout_used = False elif inoutport_str.isdigit() == False: raise ValueError("Invalid PE array attribute: inout_port") else: inout_size = int(inoutport_str) if inout_size <= 0: print("Warnning: the size of inout port is less than or equal to zero") self.__inout_used = False else: # virtually create both inpout port and output port self.__in_port_range = list(range(inout_size)) self.__out_port_range = list(range(inout_size)) for i in self.__in_port_range: self.__network.add_node(IN_PORT_node_exp.format(index=i)) if not self.__inout_used: # init PE array inport inport_str = conf.get("input_port") if inport_str == None: raise self.InvalidConfigError("missing PE array attribute: input_port") elif inport_str.isdigit() == False: raise ValueError("Invalid PE array attribute: input_port") else: self.__in_port_range = list(range(int(inport_str))) for iport in self.__in_port_range: self.__network.add_node(IN_PORT_node_exp.format(index=iport)) # init PE array outport outport_str = conf.get("output_port") if outport_str == None: raise self.InvalidConfigError("missing PE array attribute: output_port") elif outport_str.isdigit() == False: raise ValueError("Invalid PE array attribute: output_port") else: self.__out_port_range = list(range(int(outport_str))) # init operation list self.__operation_list = [[[] for y in range(self.__height)] for x in range(self.__width)] # init conf op table self.__config_op_table = [[{} for y in range(self.__height)] for x in range(self.__width)] # get PREG configs pregs = [preg for preg in conf if preg.tag == "PREG"] for preg in pregs: vpos_str = preg.get("vpos") if vpos_str is None: raise self.InvalidConfigError("missing PREG vertical position") elif vpos_str.isdigit() == False: raise ValueError("Invalid PREG vertical position: " + vpos_str) else: self.__preg_positions.append(int(vpos_str)) self.__preg_positions.sort() # init SE list for x in range(self.__width): for y in range(self.__height): self.__se_lists[(x,y)] = {} # get PE configs PEs = [pe for pe in conf if pe.tag == "PE"] # check config number if len(PEs) != self.__width * self.__height: raise self.InvalidConfigError("The number of PE configs is {0}" + \ "but the specified array size is {1}x{2}".format(len(PEs), self.__width, self.__height)) # load PE configs & add to network connections = {} for pe in PEs: # check coordinate coord_str = pe.get("coord") if coord_str is None: raise self.InvalidConfigError("missing PE coordinate") elif coord_str: (x, y) = self.__coord_str2tuple(coord_str) # check body bias domain if not pe.get("bbdomain") is None: if not pe.get("bbdomain") in self.__bb_domains.keys(): self.__bb_domains[pe.get("bbdomain")] = {"ALU": [], "SE": []} # ALU if len(list(pe.iter("ALU"))) != 1: raise self.InvalidConfigError("missing an ALU for PE({0}) or it has more than one PEs".format((x, y))) ALU = list(pe.iter("ALU"))[0] self.__network.add_node(ALU_node_exp.format(pos=(x, y))) if not pe.get("bbdomain") is None: self.__bb_domains[pe.get("bbdomain")]["ALU"].append(ALU_node_exp.format(pos=(x, y))) for op in ALU.iter("operation"): if str(op.text) != "": self.__operation_list[x][y].append(str(op.text)) else: raise self.InvalidConfigError("Empty opcode for ALU at " + str((x, y))) conf_val_str = op.get("value") if not conf_val_str is None: try: conf_val = int(conf_val_str) except ValueError: raise ValueError("Invalid configration value for opcode: " \ + str(op.text)) self.__config_op_table[x][y][op.text] = conf_val else: raise self.InvalidConfigError("missing configuration value for opcode: "\ + str(op.text)) # check routing ability route_op = op.get("route") if not route_op is None: route_op = str(route_op) if route_op not in ["true", "false"]: raise self.InvalidConfigError("unknown value for route attribute of opcode: " + route_op + \ "Either \"true\" or \"false\" must be specified") elif route_op == "true": self.__routing_ALU.append((x, y)) self.__routing_opcode[ALU_node_exp.format(pos=(x, y))] = op.text connections[ALU_node_exp.format(pos=(x, y))] = ALU.iter("input") # SE for se in pe.iter("SE"): # check id of the SE if se.get("id") is None: raise self.InvalidConfigError("missing SE id") else: try: se_id = int(se.get("id")) except ValueError: raise ValueError("Invalid SE id: " + se.get("id")) for output in se.iter("output"): if output.get("name") is None: raise self.InvalidConfigError("missing output name of SE at ({0}, {1})".format((x, y))) se_node_name = SE_node_exp.format(pos=(x, y), name=output.get("name"), id=se_id) self.__network.add_node(se_node_name) if not pe.get("bbdomain") is None: self.__bb_domains[pe.get("bbdomain")]["SE"].append(\ se_node_name) connections[se_node_name] = output.iter("input") if not se_id in self.__se_lists[(x, y)].keys(): self.__se_lists[(x, y)][se_id] = set() self.__se_lists[(x, y)][se_id].add(se_node_name) self.__output_names[se_node_name] = output.get("name") if output.get("return_only") == "True": self.__return_only_se.append(se_node_name) # get output connections for ele in conf: if ele.tag == "OUT_PORT": if ele.get("index") is None: raise self.InvalidConfigError("missing OUT_PORT index") else: try: oport_index = int(ele.get("index")) except ValueError: raise ValueError("Invalid OUT_PORT index: " + ele.get("index")) if not oport_index in self.__out_port_range: raise self.InvalidConfigError("OUT_PORT index {0} is out of range".format(oport_index)) connections[OUT_PORT_node_exp.format(index=oport_index)] = ele.iter("input") # check position setting if not ele.get("pos") is None: pos = ele.get("pos") if not pos in self.__output_pos.keys(): raise self.InvalidConfigError("Unknown OUT_PORT {0} position: {1}".format(oport_index, pos)) else: self.__output_pos[pos].append(oport_index) # make connections for dst, srcs in connections.items(): self.__make_connection(dst, srcs) # get input position setting for ele in conf: if ele.tag == "IN_PORT": if ele.get("index") is None: raise self.InvalidConfigError("missing IN_PORT index") else: try: iport_index = int(ele.get("index")) except ValueError: raise ValueError("Invalid IN_PORT index: " + ele.get("index")) if not iport_index in self.__in_port_range: raise self.InvalidConfigError("IN_PORT index {0} is out of range".format(iport_index)) if not ele.get("pos") is None: pos = ele.get("pos") if not pos in self.__input_pos.keys(): raise self.InvalidConfigError("Unknown IN_PORT {0} position: {1}".format(iport_index, pos)) else: self.__input_pos[pos].append(iport_index) # sort of io position list in asc order for v in self.__input_pos.values(): v.sort() for v in self.__output_pos.values(): v.sort() # check io position setting lack = set(self.__in_port_range) - \ set([inner for outer in self.__input_pos.values() \ for inner in outer]) if len(self.__in_port_range) > len(lack) > 0: print("Warning: positions for some IN_PORTs ({0}) are not specified".format(lack)) lack = set(self.__out_port_range) - \ set([inner for outer in self.__output_pos.values() \ for inner in outer]) if len(self.__out_port_range) > len(lack) > 0: print("Warning: positions for some OUT_PORTs ({0}) are not specified".format(lack)) # set node attributes nx.set_node_attributes(self.__network, True, "free") # set edge attributes self.setInitEdgeAttr("free", True) # analyze ISA base = set(self.__operation_list[0][0]) for x in range(self.__width): for y in range(self.__height): if base != set(self.__operation_list[x][y]): self.__heteroISA = True break all_ops = set([op for cols in self.__operation_list\ for pe in cols for op in pe]) for op in all_ops: self.__supported_ALU[op] = \ [(x, y) for x in range(self.__width) \ for y in range(self.__height) \ if op in self.__operation_list[x][y]] def __reg_config_net_table(self, dst, src, value): """Regists configuration data table for PE array network. Args: dst (str): destionation node name src (str): source node name value (int): configration value when routing from src to dst Returns: None """ if not dst in self.__config_net_table.keys(): self.__config_net_table[dst] = {} self.__config_net_table[dst][src] = value def __make_connection(self, dst, srcs): """Makes connction from multiple source nodes to a destionation node Args: dst (src): destionation node name srcs (list of XML Element): source node Returns: None """ for src in srcs: # parse input info attr = self.__parse_input(dst, src) if attr["type"] == "ALU": # add edge from ALU if not attr["coord"] is None: alu = ALU_node_exp.format(pos=attr["coord"]) if alu in self.__network.nodes(): src_node = alu else: raise self.InvalidConfigError(alu + " is not exist") else: raise self.InvalidConfigError("missing coordinate of ALU connected to " + dst) elif attr["type"] == "SE": # add edge from SE if attr["id"] is None or attr["coord"] is None or attr["src_name"] is None: raise self.InvalidConfigError("missing id, coordinate, or src name of SE connected to " + dst) else: se = SE_node_exp.format(pos=attr["coord"], id=attr["id"], name=attr["src_name"]) if se in self.__network.nodes(): src_node = se else: raise self.InvalidConfigError(se + " is not exist") elif attr["type"] == "Const": # add edge from const reg if attr["index"] is None: raise self.InvalidConfigError("missing index of const register connected to " + dst) else: if attr["index"] in self.__const_reg_range: src_node = CONST_node_exp.format(index=attr["index"]) else: raise self.InvalidConfigError(str(attr["index"]) + " is out of range for const registers") elif attr["type"] == "IN_PORT": # add edge from Input Port if attr["index"] is None: raise self.InvalidConfigError("missing index of input port connected to " + dst) else: if attr["index"] in self.__in_port_range: src_node = IN_PORT_node_exp.format(index=attr["index"]) else: raise self.InvalidConfigError(str(attr["index"]) + " is out of range for input port") else: raise self.InvalidConfigError("known connection type {0}".format(attr["type"])) # add edge and config table entry self.__network.add_edge(src_node, dst, weight = attr["weight"]) self.__reg_config_net_table(dst, src_node, attr["conf_value"]) # save the init weight value to restore self.__link_weight[(src_node, dst)] = attr["weight"] def __coord_str2tuple(self, s): """convert a string of 2D coordinate to a tuple """ try: (x, y) = tuple([int(v) for v in s.strip("()").split(",")]) except ValueError: raise self.InvalidConfigError("Invalid PE coordinate " + s) if x < 0 or x >= self.__width or y < 0 or y >= self.__height: raise self.InvalidConfigError("Coordinate " + s + " is out of range") return (x, y) def __parse_input(self, dst, input_connection): """Parses Input XML elements. Args: dst (str): destionation node name input_connection (XML Element): an input to dst node Returns: dict: Parsed items. """ # get connection name label = input_connection.get("name") if label is None: raise self.InvalidConfigError("missing input name connected to " + dst) # get connection type con_type = input_connection.get("type") if con_type is None: raise self.InvalidConfigError("missing connection type connected to" + dst) # get coord of input_connection (if any) coord_str = input_connection.get("coord") if not coord_str is None: src_coord = self.__coord_str2tuple(coord_str) else: src_coord = None # get index of input_connection (if any) index_str = input_connection.get("index") if not index_str is None: try: src_index = int(index_str) except ValueError: raise ValueError("Invalid index of " + dst + ": " + src_index) else: src_index = None # get id of input_connection (if any) id_str = input_connection.get("id") if not id_str is None: try: src_id = int(id_str) except ValueError: raise ValueError("Invalid id of " + dst + ": " + id_str) else: src_id = None # get src_name of input_connection (if any) src_name = input_connection.get("src_name") # get link weight weight_str = input_connection.get("weight") if not weight_str is None: try: weight = float(weight_str) except ValueError: raise ValueError("Invalid link weight value of " + weight_str) else: weight = DEFAULT_EDGE_WEIGHT[con_type] # get configuration value # config value is essential except for OUT_PORTs conf_val_str = input_connection.get("value") if not conf_val_str is None: try: conf_val = int(conf_val_str) except ValueError: raise ValueError("Invalid configuration value for input " + label + " for " + \ dst + ": ", conf_val_str) elif not self.isOUT_PORT(dst): raise self.InvalidConfigError("missing configuration value for input " + \ label + " for " + dst) else: conf_val = 0 return {"label": label, "type": con_type, "coord": src_coord, \ "index": src_index, "id": src_id, "src_name": src_name,\ "conf_value": conf_val, "weight": weight} # getter method def getArchName(self): '''Returns architecture name of this model Args: None Return: str: architecture name ''' return self.__arch_name def getNetwork(self): '''Returns a networkx object as the PE array model Args: None Return: networkx DiGraph: PE array network ''' return copy.deepcopy(self.__network) def getNodeName(self, etype, pos=None, index=None, se_id=None, link_name=None): '''Returns a node name of PE array network Args: etype (str): type of the element Available types are "ALU", "SE", "Const", "IN_PORT" and "OUT_PORT" pos (tuple-like): position of the element) It is necessary for "ALU" and "SE" index (int): index of some elements Its is necessary for "Const", "IN_PORT" and "OUT_PORT" se_id (int): id of the SE Elements link_name (str): name of output link name of the SE Return: str: the node name if exist if not exist, returns null string ''' if etype == "ALU" and not pos is None: node_name = ALU_node_exp.format(pos=pos) elif etype == "SE" and not pos is None and \ not id is None and not link_name is None: node_name = SE_node_exp.format(pos=pos, id=se_id, name=link_name) elif etype == "Const" and not index is None: node_name = CONST_node_exp.format(index=index) elif etype == "IN_PORT" and not index is None: node_name = IN_PORT_node_exp.format(index=index) elif etype == "OUT_PORT" and not index is None: node_name = OUT_PORT_node_exp.format(index=index) else: raise ValueError("Known node type: " + etype) if self.__network.has_node(node_name): return node_name else: raise TypeError(node_name + " does not exist") def setInitEdgeAttr(self, attr_name, attr, edge_type = None): """ Set initial attributes to edges in the network model. Args: attr_name (str): a name of the attr it is used as a attribute name of networkx digraph attr (int): attr value edge_type (str): edge type if it is None, all edges are initilized with the value. The type name must be the same name as the Element name, and edge type is same as that of the predecessor node element. Returns: None """ if edge_type is None: nx.set_edge_attributes(self.__network, attr, attr_name) else: if edge_type == "ALU": edges = {(u, v): {attr_name: attr} for u, v in self.__network.edges() if u.find("ALU") == 0} elif edge_type == "SE": edges = {(u, v): {attr_name: attr} for u, v in self.__network.edges() if u.find("SE") == 0} elif edge_type == "Const": edges = {(u, v): {attr_name: attr} for u, v in self.__network.edges() if u.find("CONST") == 0} elif edge_type == "IN_PORT": edges = {(u, v): {attr_name: attr} for u, v in self.__network.edges() if u.find("IN_PORT") == 0} elif edge_type == "OUT_PORT": edges = {(u, v): {attr_name: attr} for u, v in self.__network.edges() if v.find("OUT_PORT") == 0} nx.set_edge_attributes(self.__network, edges) def getBBdomains(self): """Returns body bias domains of the PE array. Args: None Returns: dictionary: body bias domain information keys: domain name values: PE positions in the domain """ return self.__bb_domains def getSize(self): """ Returns PE array size. Args: None Returns: tuple: width, height of the PE array """ return (self.__width, self.__height) def getLinkWeight(self, edge): return self.__link_weight[edge] def getConstRegs(self): """Returns const register names of the network. """ return [CONST_node_exp.format(index=i) for i in self.__const_reg_range] def getInputPorts(self): """Returns input port names of the network. """ return [IN_PORT_node_exp.format(index=i) for i in self.__in_port_range] def getOutputPorts(self): """Returns output port names of the network. """ return [OUT_PORT_node_exp.format(index=i) for i in self.__in_port_range] def getInoutPorts(self): """Returns pairs of input port and output port """ if self.__inout_used: return [(IN_PORT_node_exp.format(index=i), OUT_PORT_node_exp.format(index=i)) \ for i in self.__in_port_range] else: return [] def getInputPortsByPos(self, pos): """Returns input port names at the specified pos of the network. Args: pos (str): position of input ports as following: "left", "right", "top", "bottom" """ if pos in self.__input_pos: return [IN_PORT_node_exp.format(index=i) \ for i in self.__input_pos[pos]] else: return [] def getOutputPortsByPos(self, pos): """Returns output port names at the specified pos of the network. Args: pos (str): position of output ports as following: "left", "right", "top", "bottom" """ if pos in self.__output_pos: return [OUT_PORT_node_exp.format(index=i) \ for i in self.__output_pos[pos]] else: return [] def get_PE_resources(self, coord): return {"ALU": self.getNodeName("ALU", coord),\ "SE": self.__se_lists[coord]} def getPregPositions(self): return self.__preg_positions def getFreeSEs(self, routed_graph, x_range=None, y_range=None): """Gets unused SEs. Args: routed_graph (networkx graph): routed graph Optional: x_range (range): width range y_range (range): column range Reterns: list: unused SEs """ rtn_list = [] # check range if x_range is None: x_range = range(self.__width) if y_range is None: y_range = range(self.__height) for x in x_range: for y in y_range: se_set = set([se for subset in self.__se_lists[(x, y)].values() for se in subset]) for se in se_set: if self.__network.nodes[se]["free"] == True: rtn_list.append(se) return rtn_list def getStageDomains(self, preg_config, remove_return_se = False): """Gets resources for each pipeline stage Args: preg_config (list of Boolean): pipeline register configuration if n-th value is True, n-th preg is activated Optional remove_return_se (Boolean): eliminates return only SE except for last stage default: False Returns: list: resources for each pipeline stage 1st index is to specify the stage 2nd index is to specify the resource node """ stage = 0 rtn_list = [[] for stage in range(sum(preg_config) + 1)] # activated preg positions active_preg_positions = [self.__preg_positions[i] for i in range(len(self.__preg_positions)) if preg_config[i] == True] # get nodes for each stage se_set = set() for y in range(self.__height): if stage < len(active_preg_positions): if active_preg_positions[stage] <= y: stage += 1 # add ALU rtn_list[stage].extend([ALU_node_exp.format(pos=(x, y)) for x in range(self.__width)]) # add SE for x in range(self.__width): se_set = set([se for subset in self.__se_lists[(x, y)].values() for se in subset]) rtn_list[stage].extend([se for se in se_set if not se in self.__return_only_se]) else: if remove_return_se: rtn_list[-1].extend(list(set(self.__return_only_se) & se_set)) else: rtn_list[-1].extend(self.__return_only_se) return rtn_list def getPregNumber(self): """Returns pipeline register number of the architecture """ return len(self.__preg_positions) def isNeedConstRoute(self): """Returns whether const routing is needed """ return not self.__infini_const def isIOShared(self): """Returns whether inout port is used instead of input/output port """ return self.__inout_used def isHeteroISA(self): """Returns heterogeneity of ISA """ return self.__heteroISA def getSupportedOps(self): """Returns supported operations """ return list(self.__supported_ALU.keys()) def getSupportedALUs(self, opcode): """Returns ALU list supporting the specified opcode """ try: return self.__supported_ALU[opcode] except KeyError: return [] @staticmethod def isSE(node_name): """Check whether the node is SE or not. Args: node_name (str): a name of node Returns: bool: if the node is SE, return True. otherwise return False. """ return node_name.find("SE") == 0 @staticmethod def isALU(node_name): """Check whether the node is SE or not. Args: node_name (str): a name of node Returns: bool: if the node is ALU, return True. otherwise return False. """ return node_name.find("ALU") == 0 @staticmethod def isOUT_PORT(node_name): """Check whether the node is OUT_PORT or not. Args: node_name (str): a name of node Returns: bool: if the node is OUT_PORT, return True. otherwise return False. """ return node_name.find("OUT_PORT") == 0 @staticmethod def isIN_PORT(node_name): """Check whether the node is OUT_PORT or not. Args: node_name (str): a name of node Returns: bool: if the node is OUT_PORT, return True. otherwise return False. """ return node_name.find("IN_PORT") == 0 def getOperationList(self, coord): """Returns operation list supported by an ALU. Args: coord (int, int): a coordinate of the ALU. Returns: list: the operation list. If the coordinate is out of range, return empty list. """ (x, y) = coord if x < 0 or x >= self.__width or y < 0 or y >= self.__height: return [] else: return self.__operation_list[x][y] def getOpConfValue(self, coord, opcode): """Gets configration value of ALU Args: coord (tuple): coordinate of the ALU opcode (str): opcode assigned to the ALU Returns: int: configration value """ x, y = coord return self.__config_op_table[x][y][opcode] def getNetConfValue(self, dst, src): """Gets configration value of SE Args: dst: a node of SE output channel src: a node connected to the output Returns: int: configration value """ return self.__config_net_table[dst][src] def getWireName(self, se): """Gets an output channel name""" return self.__output_names[se] def isRoutingALU(self, pos): """Checks whether the ALU can work for routing node Args: pos: coordinate of the ALU Returns: bool: if the ALU can work for routing node otherwise return False. """ if pos in self.__routing_ALU: return True def getRoutingOpcode(self, node): """Gets opcode for routing Args: node: node name for ALU Returns: str: opcode for the routing """ return self.__routing_opcode[node]
from collections import deque import random import numpy as np import sys print("Init...") class RingBuf: def __init__(self, size): # Pro-tip: when implementing a ring buffer, always allocate one extra element, # this way, self.start == self.end always means the buffer is EMPTY, whereas # if you allocate exactly the right number of elements, it could also mean # the buffer is full. This greatly simplifies the rest of the code. self.data = [None] * (size + 1) self.start = 0 self.end = 0 def append(self, element): self.data[self.end] = element self.end = (self.end + 1) % len(self.data) # end == start and yet we just added one element. This means the buffer has one # too many element. Remove the first element by incrementing start. if self.end == self.start: self.start = (self.start + 1) % len(self.data) def __getitem__(self, idx): return self.data[(self.start + idx) % len(self.data)] def __len__(self): if self.end < self.start: return self.end + len(self.data) - self.start else: return self.end - self.start def __iter__(self): for i in range(len(self)): yield self[i] def sample(self, batch_size): batch_output = [] for _ in range(batch_size): idx = random.randint(0, self.__len__() - 1) batch_output.append(self.__getitem__(idx)) return batch_output class RingBuffer: def __init__(self,buffer_size): self.max_buffer_size = buffer_size self.current_index = 0 self.buffer = [None]* self.max_buffer_size self.stored_elements = 0 def append(self,item): # print("Appending element",item) self.buffer[self.current_index] = item self.current_index = (self.current_index + 1) % self.max_buffer_size self.stored_elements += 1 def random_pick(self,n_elem): picks = [] for _ in range(n_elem): # rand_index = random.randint(0,min(self.stored_elements,self.max_buffer_size)-1) rand_index = np.random.randint(0,min(self.stored_elements,self.max_buffer_size)-1) picks.append(self.buffer[rand_index]) return picks memory_size=400000 queue = deque(maxlen=memory_size) # ring = RingBuf(memory_size) ring2 = RingBuffer(memory_size) print("Filling...") for i in range(2 * memory_size): elem = np.zeros(dtype="uint8", shape=(84)) queue.append(elem) # ring.append(elem) ring2.append(elem) print("Sampling...") for i in range(100000): sample = random.sample(queue, 32) # sample = ring.sample(32) sample = ring2.random_pick(32) if len(sample) < 32: print("Error")
import networkx as nx from network2tikz import plot from graphNxUtils import nxWeightedGraphFromFile from collections import deque #https://networkx.github.io/documentation/stable/tutorial.html #https://pypi.org/project/network2tikz/ g = nxWeightedGraphFromFile("./testCases/input007.txt") #print(g.edges.data()) #requires matlibplot: nx.draw(g) style = {} style['node_label'] = [str(x) for x in g.nodes] style['edge_curved'] = 0 style['layout'] = 'fruchterman_reingold' style['edge_label'] = [wt for (x,y,wt) in g.edges.data('weight')] plot(g,'mygraph.tex',**style) #now the real fun begins... # Let's write a function that: # given a graph G, two vertices, a, b returns # a path from a to be if one exists # let's adapt DFS to solve this problem def findPath(g, a, b): """ Finds a path in the graph g from the vertex a to the vertex b if one exists. No conditions (shortest distance or weighted path) are placed on the resulting path. Returns None if no such path exists """ #maps verticies to strings for their status # including "unvisited", "visited", "processed" status = {x:"unvisited" for x in g.nodes} s = [] # my stack: append or pop s.append(a) status[a] = "visited" while len(s) > 0: curr = s[-1] # check if you've found b: if curr == b: return s # select the next vertex to visit... next = None #print("curr = " + str(curr)) for y in g.neighbors(curr): if status[y] == "unvisited": next = y break if next is None: #done with curr, need to backtrack s.pop() status[curr] = "processed" else: # go to the *next* vertex: status[next] = "visited" s.append(next) return None def bfsTree(g, a): """ returns a BFS tree resulting from one BFS run starting at vertex a """ t = nx.Graph() t.add_nodes_from(range(len(g.nodes))) q = deque() #popleft(), append() #maps verticies to strings for their status # including "unvisited", "visited", "processed" status = {x:"unvisited" for x in g.nodes} #start with a: status[a] = "visited" q.append(a) while len(q) > 0: curr = q.popleft() for y in g.neighbors(curr): if status[y] == "unvisited": status[y] = "visited" q.append(y) # add the edge from curr to y to the BFS tree t.add_edge(curr, y, weight=g[curr][y]['weight']) status[curr] = "processed" return t t = bfsTree(g, 0) plot(t,'mytree.tex',**style) nx.write_edgelist(g, "foo.txt")
import numpy as np from sklearn.datasets import load_diabetes diabetes = load_diabetes() columns_names = diabetes.feature_names y = diabetes.target X = diabetes.data # Splitting features and target datasets into: train and test from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30) # Training a Linear Regression model with fit() from sklearn.linear_model import LinearRegression lm = LinearRegression() lm.fit(X_train, y_train) # Predicting the results for our test dataset predicted_values = lm.predict(X_test) # Printing the residuals: difference between real and predicted for (real, predicted) in list(zip(y_test, predicted_values)): print(f"Value: {real:.2f}, pred: {predicted:.2f}, diff: {(real - predicted):.2f}") # Plotting the residuals: difference between real and predicted import matplotlib.pyplot as plt import seaborn as sns sns.set(palette="inferno") residuals = y_test - predicted_values sns.scatterplot(y_test, predicted_values) plt.plot([0, 50], [0, 50], '--') plt.xlabel('Real Value') plt.ylabel('Predicted Value') plt.show() sns.scatterplot(y_test, residuals) plt.plot([50, 0], [0, 0], '--') plt.xlabel('Real Value') plt.ylabel('Residual (difference)') plt.show() sns.distplot(residuals, bins=20, kde=False) plt.plot([0, 0], [50, 0], '--') plt.title('Residual (difference) Distribution') plt.show() # Understanding the error that we want to minimize from sklearn import metrics print(f"Printing MAE error(avg abs residual): {metrics.mean_absolute_error(y_test, predicted_values)}") print(f"Printing MSE error: {metrics.mean_squared_error(y_test, predicted_values)}") print(f"Printing RMSE error: {np.sqrt(metrics.mean_squared_error(y_test, predicted_values))}")
''' Agents: stop/random/shortest/seq2seq ''' import json import sys import numpy as np import random from collections import namedtuple import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import torch.distributions as D from utils import vocab_pad_idx, vocab_eos_idx, flatten, structured_map, try_cuda #from env import FOLLOWER_MODEL_ACTIONS, FOLLOWER_ENV_ACTIONS, IGNORE_ACTION_INDEX, LEFT_ACTION_INDEX, RIGHT_ACTION_INDEX, START_ACTION_INDEX, END_ACTION_INDEX, FORWARD_ACTION_INDEX, index_action_tuple InferenceState = namedtuple("InferenceState", "prev_inference_state, world_state, observation, flat_index, last_action, last_action_embedding, action_count, score, h_t, c_t, last_alpha") Cons = namedtuple("Cons", "first, rest") def cons_to_list(cons): l = [] while True: l.append(cons.first) cons = cons.rest if cons is None: break return l def backchain_inference_states(last_inference_state): states = [] observations = [] actions = [] inf_state = last_inference_state scores = [] last_score = None attentions = [] while inf_state is not None: states.append(inf_state.world_state) observations.append(inf_state.observation) actions.append(inf_state.last_action) attentions.append(inf_state.last_alpha) if last_score is not None: scores.append(last_score - inf_state.score) last_score = inf_state.score inf_state = inf_state.prev_inference_state scores.append(last_score) return list(reversed(states)), list(reversed(observations)), list(reversed(actions))[1:], list(reversed(scores))[1:], list(reversed(attentions))[1:] # exclude start action def least_common_viewpoint_path(inf_state_a, inf_state_b): # return inference states traversing from A to X, then from Y to B, # where X and Y are the least common ancestors of A and B respectively that share a viewpointId path_to_b_by_viewpoint = { } b = inf_state_b b_stack = Cons(b, None) while b is not None: path_to_b_by_viewpoint[b.world_state.viewpointId] = b_stack b = b.prev_inference_state b_stack = Cons(b, b_stack) a = inf_state_a path_from_a = [a] while a is not None: vp = a.world_state.viewpointId if vp in path_to_b_by_viewpoint: path_to_b = cons_to_list(path_to_b_by_viewpoint[vp]) assert path_from_a[-1].world_state.viewpointId == path_to_b[0].world_state.viewpointId return path_from_a + path_to_b[1:] a = a.prev_inference_state path_from_a.append(a) raise AssertionError("no common ancestor found") def batch_instructions_from_encoded(encoded_instructions, max_length, reverse=False, sort=False): # encoded_instructions: list of lists of token indices (should not be padded, or contain BOS or EOS tokens) #seq_tensor = np.array(encoded_instructions) # make sure pad does not start any sentence num_instructions = len(encoded_instructions) seq_tensor = np.full((num_instructions, max_length), vocab_pad_idx) seq_lengths = [] for i, inst in enumerate(encoded_instructions): if len(inst) > 0: assert inst[-1] != vocab_eos_idx if reverse: inst = inst[::-1] inst = np.concatenate((inst, [vocab_eos_idx])) inst = inst[:max_length] seq_tensor[i,:len(inst)] = inst seq_lengths.append(len(inst)) seq_tensor = torch.from_numpy(seq_tensor) if sort: seq_lengths, perm_idx = torch.from_numpy(np.array(seq_lengths)).sort(0, True) seq_lengths = list(seq_lengths) seq_tensor = seq_tensor[perm_idx] mask = (seq_tensor == vocab_pad_idx)[:, :max(seq_lengths)] ret_tp = try_cuda(Variable(seq_tensor, requires_grad=False).long()), \ try_cuda(mask.byte()), \ seq_lengths if sort: ret_tp = ret_tp + (list(perm_idx),) return ret_tp class BaseAgent(object): ''' Base class for an R2R agent to generate and save trajectories. ''' def __init__(self, env, results_path): self.env = env self.results_path = results_path random.seed(1) self.results = {} self.losses = [] # For learning agents def write_results(self): results = {} for key, item in self.results.items(): results[key] = { 'instr_id': item['instr_id'], 'trajectory': item['trajectory'], } with open(self.results_path, 'w') as f: json.dump(results, f) def rollout(self): ''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] ''' raise NotImplementedError @staticmethod def get_agent(name): return globals()[name+"Agent"] def test(self): self.env.reset_epoch() self.losses = [] self.results = {} # We rely on env showing the entire batch before repeating anything #print 'Testing %s' % self.__class__.__name__ looped = False rollout_scores = [] beam_10_scores = [] while True: rollout_results = self.rollout() # if self.feedback == 'argmax': # beam_results = self.beam_search(1, load_next_minibatch=False) # assert len(rollout_results) == len(beam_results) # for rollout_traj, beam_trajs in zip(rollout_results, beam_results): # assert rollout_traj['instr_id'] == beam_trajs[0]['instr_id'] # assert rollout_traj['trajectory'] == beam_trajs[0]['trajectory'] # assert np.allclose(rollout_traj['score'], beam_trajs[0]['score']) # print("passed check: beam_search with beam_size=1") # # self.env.set_beam_size(10) # beam_results = self.beam_search(10, load_next_minibatch=False) # assert len(rollout_results) == len(beam_results) # for rollout_traj, beam_trajs in zip(rollout_results, beam_results): # rollout_score = rollout_traj['score'] # rollout_scores.append(rollout_score) # beam_score = beam_trajs[0]['score'] # beam_10_scores.append(beam_score) # # assert rollout_score <= beam_score # self.env.set_beam_size(1) # # print("passed check: beam_search with beam_size=10") # if self.feedback == 'teacher' and self.beam_size == 1: # rollout_loss = self.loss # path_obs, path_actions, encoded_instructions = self.env.gold_obs_actions_and_instructions(self.episode_len, load_next_minibatch=False) # for i in range(len(rollout_results)): # assert rollout_results[i]['actions'] == path_actions[i] # assert [o1['viewpoint'] == o2['viewpoint'] # for o1, o2 in zip(rollout_results[i]['observations'], path_obs[i])] # trajs, loss = self._score_obs_actions_and_instructions(path_obs, path_actions, encoded_instructions) # for traj, rollout in zip(trajs, rollout_results): # assert traj['instr_id'] == rollout['instr_id'] # assert traj['actions'] == rollout['actions'] # assert np.allclose(traj['score'], rollout['score']) # assert np.allclose(rollout_loss.data[0], loss.data[0]) # print('passed score test') for result in rollout_results: if result['instr_id'] in self.results: looped = True else: self.results[result['instr_id']] = result if looped: break # if self.feedback == 'argmax': # print("avg rollout score: ", np.mean(rollout_scores)) # print("avg beam 10 score: ", np.mean(beam_10_scores)) return self.results def path_element_from_observation(ob): return (ob['viewpoint'], ob['heading'], ob['elevation']) class StopAgent(BaseAgent): ''' An agent that doesn't move! ''' def rollout(self): world_states = self.env.reset() obs = self.env.observe(world_states) traj = [{ 'instr_id': ob['instr_id'], 'trajectory': [path_element_from_observation(ob) ] } for ob in obs] return traj class RandomAgent(BaseAgent): ''' An agent that picks a random direction then tries to go straight for five viewpoint steps and then stops. ''' def rollout(self): world_states = self.env.reset() obs = self.env.observe(world_states) traj = [{ 'instr_id': ob['instr_id'], 'trajectory': [path_element_from_observation(ob)] } for ob in obs] ended = [False] * len(obs) self.steps = [0] * len(obs) for t in range(6): actions = [] for i, ob in enumerate(obs): if self.steps[i] >= 5: actions.append(0) # do nothing, i.e. end ended[i] = True elif self.steps[i] == 0: a = np.random.randint(len(ob['adj_loc_list']) - 1) + 1 actions.append(a) # choose a random adjacent loc self.steps[i] += 1 else: assert len(ob['adj_loc_list']) > 1 actions.append(1) # go forward self.steps[i] += 1 world_states = self.env.step(world_states, actions, obs) obs = self.env.observe(world_states) for i,ob in enumerate(obs): if not ended[i]: traj[i]['trajectory'].append(path_element_from_observation(ob)) return traj class ShortestAgent(BaseAgent): ''' An agent that always takes the shortest path to goal. ''' def rollout(self): world_states = self.env.reset() #obs = self.env.observe(world_states) all_obs, all_actions = self.env.shortest_paths_to_goals(world_states, 20) return [ { 'instr_id': obs[0]['instr_id'], # end state will appear twice because stop action is a no-op, so exclude it 'trajectory': [path_element_from_observation(ob) for ob in obs[:-1]] } for obs in all_obs ] class Seq2SeqAgent(BaseAgent): ''' An agent based on an LSTM seq2seq model with attention. ''' # For now, the agent can't pick which forward move to make - just the one in the middle # env_actions = FOLLOWER_ENV_ACTIONS # start_index = START_ACTION_INDEX # ignore_index = IGNORE_ACTION_INDEX # forward_index = FORWARD_ACTION_INDEX # end_index = END_ACTION_INDEX feedback_options = ['teacher', 'argmax', 'sample'] def __init__(self, env, results_path, encoder, decoder, episode_len=10, beam_size=1, reverse_instruction=True, max_instruction_length=80): super(Seq2SeqAgent, self).__init__(env, results_path) self.encoder = encoder self.decoder = decoder self.episode_len = episode_len self.losses = [] self.criterion = nn.CrossEntropyLoss(ignore_index=-1) self.beam_size = beam_size self.reverse_instruction = reverse_instruction self.max_instruction_length = max_instruction_length # @staticmethod # def n_inputs(): # return len(FOLLOWER_MODEL_ACTIONS) # # @staticmethod # def n_outputs(): # return len(FOLLOWER_MODEL_ACTIONS)-2 # Model doesn't output start or ignore def _feature_variables(self, obs, beamed=False): ''' Extract precomputed features into variable. ''' feature_lists = list(zip(*[ob['feature'] for ob in (flatten(obs) if beamed else obs)])) assert len(feature_lists) == len(self.env.image_features_list) batched = [] for featurizer, feature_list in zip(self.env.image_features_list, feature_lists): batched.append(featurizer.batch_features(feature_list)) return batched def _action_variable(self, obs): # get the maximum number of actions of all sample in this batch max_num_a = -1 for i, ob in enumerate(obs): max_num_a = max(max_num_a, len(ob['adj_loc_list'])) is_valid = np.zeros((len(obs), max_num_a), np.float32) action_embedding_dim = obs[0]['action_embedding'].shape[-1] action_embeddings = np.zeros( (len(obs), max_num_a, action_embedding_dim), dtype=np.float32) for i, ob in enumerate(obs): adj_loc_list = ob['adj_loc_list'] num_a = len(adj_loc_list) is_valid[i, 0:num_a] = 1. for n_a, adj_dict in enumerate(adj_loc_list): action_embeddings[i, :num_a, :] = ob['action_embedding'] return ( Variable(torch.from_numpy(action_embeddings), requires_grad=False).cuda(), Variable(torch.from_numpy(is_valid), requires_grad=False).cuda(), is_valid) def _teacher_action(self, obs, ended): ''' Extract teacher actions into variable. ''' a = torch.LongTensor(len(obs)) for i,ob in enumerate(obs): # Supervised teacher only moves one axis at a time a[i] = ob['teacher'] if not ended[i] else -1 return try_cuda(Variable(a, requires_grad=False)) def _proc_batch(self, obs, beamed=False): encoded_instructions = [ob['instr_encoding'] for ob in (flatten(obs) if beamed else obs)] return batch_instructions_from_encoded(encoded_instructions, self.max_instruction_length, reverse=self.reverse_instruction) def rollout(self): if self.beam_size == 1: return self._rollout_with_loss() else: assert self.beam_size >= 1 beams, _, _ = self.beam_search(self.beam_size) return [beam[0] for beam in beams] def _score_obs_actions_and_instructions(self, path_obs, path_actions, encoded_instructions): batch_size = len(path_obs) assert len(path_actions) == batch_size assert len(encoded_instructions) == batch_size for path_o, path_a in zip(path_obs, path_actions): assert len(path_o) == len(path_a) + 1 seq, seq_mask, seq_lengths, perm_indices = \ batch_instructions_from_encoded( encoded_instructions, self.max_instruction_length, reverse=self.reverse_instruction, sort=True) loss = 0 ctx, h_t, c_t = self.encoder(seq, seq_lengths) u_t_prev = self.decoder.u_begin.expand(batch_size, -1) # init action ended = np.array([False] * batch_size) sequence_scores = try_cuda(torch.zeros(batch_size)) traj = [{ 'instr_id': path_o[0]['instr_id'], 'trajectory': [path_element_from_observation(path_o[0])], 'actions': [], 'scores': [], 'observations': [path_o[0]], 'instr_encoding': path_o[0]['instr_encoding'] } for path_o in path_obs] obs = None for t in range(self.episode_len): next_obs = [] next_target_list = [] for perm_index, src_index in enumerate(perm_indices): path_o = path_obs[src_index] path_a = path_actions[src_index] if t < len(path_a): next_target_list.append(path_a[t]) next_obs.append(path_o[t]) else: next_target_list.append(-1) next_obs.append(obs[perm_index]) obs = next_obs target = try_cuda(Variable(torch.LongTensor(next_target_list), requires_grad=False)) f_t_list = self._feature_variables(obs) # Image features from obs all_u_t, is_valid, _ = self._action_variable(obs) assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature' h_t, c_t, alpha, logit, alpha_v = self.decoder( u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx, seq_mask) # Mask outputs of invalid actions logit[is_valid == 0] = -float('inf') # Supervised training loss += self.criterion(logit, target) # Determine next model inputs a_t = torch.clamp(target, min=0) # teacher forcing # update the previous action u_t_prev = all_u_t[np.arange(batch_size), a_t, :].detach() action_scores = -F.cross_entropy(logit, target, ignore_index=-1, reduce=False).data sequence_scores += action_scores # Save trajectory output for perm_index, src_index in enumerate(perm_indices): ob = obs[perm_index] if not ended[perm_index]: traj[src_index]['trajectory'].append(path_element_from_observation(ob)) traj[src_index]['score'] = float(sequence_scores[perm_index]) traj[src_index]['scores'].append(action_scores[perm_index]) traj[src_index]['actions'].append(a_t.data[perm_index]) # traj[src_index]['observations'].append(ob) # Update ended list for i in range(batch_size): action_idx = a_t[i].data[0] if action_idx == 0: ended[i] = True # Early exit if all ended if ended.all(): break return traj, loss def _rollout_with_loss(self): initial_world_states = self.env.reset(sort=True) initial_obs = self.env.observe(initial_world_states) initial_obs = np.array(initial_obs) batch_size = len(initial_obs) # get mask and lengths seq, seq_mask, seq_lengths = self._proc_batch(initial_obs) # Forward through encoder, giving initial hidden state and memory cell for decoder # TODO consider not feeding this into the decoder, and just using attention self.loss = 0 feedback = self.feedback ctx,h_t,c_t = self.encoder(seq, seq_lengths) # Record starting point traj = [{ 'instr_id': ob['instr_id'], 'trajectory': [path_element_from_observation(ob)], 'actions': [], 'scores': [], 'observations': [ob], 'instr_encoding': ob['instr_encoding'] } for ob in initial_obs] obs = initial_obs world_states = initial_world_states # Initial action u_t_prev = self.decoder.u_begin.expand(batch_size, -1) # init action ended = np.array([False] * batch_size) # Indices match permuation of the model, not env # Do a sequence rollout and calculate the loss env_action = [None] * batch_size sequence_scores = try_cuda(torch.zeros(batch_size)) for t in range(self.episode_len): f_t_list = self._feature_variables(obs) # Image features from obs all_u_t, is_valid, _ = self._action_variable(obs) assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature' h_t, c_t, alpha, logit, alpha_v = self.decoder( u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx, seq_mask) # Mask outputs of invalid actions logit[is_valid == 0] = -float('inf') # Supervised training target = self._teacher_action(obs, ended) self.loss += self.criterion(logit, target) # Determine next model inputs if feedback == 'teacher': # turn -1 (ignore) to 0 (stop) so that the action is executable a_t = torch.clamp(target, min=0) elif feedback == 'argmax': _,a_t = logit.max(1) # student forcing - argmax a_t = a_t.detach() elif feedback == 'sample': probs = F.softmax(logit, dim=1) # sampling an action from model # Further mask probs where agent can't move forward # Note input to `D.Categorical` does not have to sum up to 1 # http://pytorch.org/docs/stable/torch.html#torch.multinomial probs[is_valid == 0] = 0. m = D.Categorical(probs) a_t = m.sample() else: sys.exit('Invalid feedback option') # update the previous action u_t_prev = all_u_t[np.arange(batch_size), a_t, :].detach() action_scores = -F.cross_entropy(logit, a_t, ignore_index=-1, reduce=False).data sequence_scores += action_scores # dfried: I changed this so that the ended list is updated afterward; this causes <end> to be added as the last action, along with its score, and the final world state will be duplicated (to more closely match beam search) # Make environment action for i in range(batch_size): action_idx = a_t[i].data[0] env_action[i] = action_idx world_states = self.env.step(world_states, env_action, obs) obs = self.env.observe(world_states) # print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, world_states[0], a_t.data[0], sequence_scores[0])) # Save trajectory output for i,ob in enumerate(obs): if not ended[i]: traj[i]['trajectory'].append(path_element_from_observation(ob)) traj[i]['score'] = sequence_scores[i] traj[i]['scores'].append(action_scores[i]) traj[i]['actions'].append(a_t.data[i]) traj[i]['observations'].append(ob) # Update ended list for i in range(batch_size): action_idx = a_t[i].data[0] if action_idx == 0: ended[i] = True # Early exit if all ended if ended.all(): break #self.losses.append(self.loss.data[0] / self.episode_len) # shouldn't divide by the episode length because of masking self.losses.append(self.loss.data[0]) return traj def beam_search(self, beam_size, load_next_minibatch=True, mask_undo=False): assert self.env.beam_size >= beam_size world_states = self.env.reset(sort=True, beamed=True, load_next_minibatch=load_next_minibatch) obs = self.env.observe(world_states, beamed=True) batch_size = len(world_states) # get mask and lengths seq, seq_mask, seq_lengths = self._proc_batch(obs, beamed=True) # Forward through encoder, giving initial hidden state and memory cell for decoder ctx,h_t,c_t = self.encoder(seq, seq_lengths) completed = [] for _ in range(batch_size): completed.append([]) beams = [ [InferenceState(prev_inference_state=None, world_state=ws[0], observation=o[0], flat_index=i, last_action=-1, last_action_embedding=self.decoder.u_begin.view(-1), action_count=0, score=0.0, h_t=None, c_t=None, last_alpha=None)] for i, (ws, o) in enumerate(zip(world_states, obs)) ] # Do a sequence rollout and calculate the loss for t in range(self.episode_len): flat_indices = [] beam_indices = [] u_t_list = [] for beam_index, beam in enumerate(beams): for inf_state in beam: beam_indices.append(beam_index) flat_indices.append(inf_state.flat_index) u_t_list.append(inf_state.last_action_embedding) u_t_prev = torch.stack(u_t_list, dim=0) assert len(u_t_prev.shape) == 2 flat_obs = flatten(obs) f_t_list = self._feature_variables(flat_obs) # Image features from obs all_u_t, is_valid, is_valid_numpy = self._action_variable(flat_obs) assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature' h_t, c_t, alpha, logit, alpha_v = self.decoder( u_t_prev, all_u_t, f_t_list[0], h_t[flat_indices], c_t[flat_indices], ctx[beam_indices], seq_mask[beam_indices]) # Mask outputs of invalid actions logit[is_valid == 0] = -float('inf') # # Mask outputs where agent can't move forward # no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs] if mask_undo: masked_logit = logit.clone() else: masked_logit = logit log_probs = F.log_softmax(logit, dim=1).data # force ending if we've reached the max time steps # if t == self.episode_len - 1: # action_scores = log_probs[:,self.end_index].unsqueeze(-1) # action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index)) # else: #action_scores, action_indices = log_probs.topk(min(beam_size, logit.size()[1]), dim=1) _, action_indices = masked_logit.data.topk(min(beam_size, logit.size()[1]), dim=1) action_scores = log_probs.gather(1, action_indices) assert action_scores.size() == action_indices.size() start_index = 0 new_beams = [] assert len(beams) == len(world_states) all_successors = [] for beam_index, (beam, beam_world_states, beam_obs) in enumerate(zip(beams, world_states, obs)): successors = [] end_index = start_index + len(beam) assert len(beam_world_states) == len(beam) assert len(beam_obs) == len(beam) if beam: for inf_index, (inf_state, world_state, ob, action_score_row, action_index_row) in \ enumerate(zip(beam, beam_world_states, beam_obs, action_scores[start_index:end_index], action_indices[start_index:end_index])): flat_index = start_index + inf_index for action_score, action_index in zip(action_score_row, action_index_row): if is_valid_numpy[flat_index, action_index] == 0: continue successors.append( InferenceState(prev_inference_state=inf_state, world_state=world_state, # will be updated later after successors are pruned observation=ob, # will be updated later after successors are pruned flat_index=flat_index, last_action=action_index, last_action_embedding=all_u_t[flat_index, action_index].detach(), action_count=inf_state.action_count + 1, score=float(inf_state.score + action_score), h_t=None, c_t=None, last_alpha=alpha[flat_index].data) ) start_index = end_index successors = sorted(successors, key=lambda t: t.score, reverse=True)[:beam_size] all_successors.append(successors) successor_world_states = [ [inf_state.world_state for inf_state in successors] for successors in all_successors ] successor_env_actions = [ [inf_state.last_action for inf_state in successors] for successors in all_successors ] successor_last_obs = [ [inf_state.observation for inf_state in successors] for successors in all_successors ] successor_world_states = self.env.step(successor_world_states, successor_env_actions, successor_last_obs, beamed=True) successor_obs = self.env.observe(successor_world_states, beamed=True) all_successors = structured_map(lambda inf_state, world_state, obs: inf_state._replace(world_state=world_state, observation=obs), all_successors, successor_world_states, successor_obs, nested=True) # if all_successors[0]: # print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, all_successors[0][0].world_state, all_successors[0][0].last_action, all_successors[0][0].score)) for beam_index, successors in enumerate(all_successors): new_beam = [] for successor in successors: if successor.last_action == 0 or t == self.episode_len - 1: completed[beam_index].append(successor) else: new_beam.append(successor) if len(completed[beam_index]) >= beam_size: new_beam = [] new_beams.append(new_beam) beams = new_beams world_states = [ [inf_state.world_state for inf_state in beam] for beam in beams ] obs = [ [inf_state.observation for inf_state in beam] for beam in beams ] # Early exit if all ended if not any(beam for beam in beams): break trajs = [] for this_completed in completed: assert this_completed this_trajs = [] for inf_state in sorted(this_completed, key=lambda t: t.score, reverse=True)[:beam_size]: path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state) # this will have messed-up headings for (at least some) starting locations because of # discretization, so read from the observations instead ## path = [(obs.viewpointId, state.heading, state.elevation) ## for state in path_states] trajectory = [path_element_from_observation(ob) for ob in path_observations] this_trajs.append({ 'instr_id': path_observations[0]['instr_id'], 'instr_encoding': path_observations[0]['instr_encoding'], 'trajectory': trajectory, 'observations': path_observations, 'actions': path_actions, 'score': inf_state.score, 'scores': path_scores, 'attentions': path_attentions }) trajs.append(this_trajs) traversed_lists = None # todo return trajs, completed, traversed_lists def state_factored_search(self, completion_size, successor_size, load_next_minibatch=True, mask_undo=False, first_n_ws_key=4): assert self.env.beam_size >= successor_size world_states = self.env.reset(sort=True, beamed=True, load_next_minibatch=load_next_minibatch) initial_obs = self.env.observe(world_states, beamed=True) batch_size = len(world_states) # get mask and lengths seq, seq_mask, seq_lengths = self._proc_batch(initial_obs, beamed=True) # Forward through encoder, giving initial hidden state and memory cell for decoder ctx,h_t,c_t = self.encoder(seq, seq_lengths) completed = [] completed_holding = [] for _ in range(batch_size): completed.append({}) completed_holding.append({}) state_cache = [ {ws[0][0:first_n_ws_key]: (InferenceState(prev_inference_state=None, world_state=ws[0], observation=o[0], flat_index=None, last_action=-1, last_action_embedding=self.decoder.u_begin.view(-1), action_count=0, score=0.0, h_t=h_t[i], c_t=c_t[i], last_alpha=None), True)} for i, (ws, o) in enumerate(zip(world_states, initial_obs)) ] beams = [[inf_state for world_state, (inf_state, expanded) in sorted(instance_cache.items())] for instance_cache in state_cache] # sorting is a noop here since each instance_cache should only contain one # traversed_lists = None # list of inference states containing states in order of the states being expanded last_expanded_list = [] traversed_lists = [] for beam in beams: assert len(beam) == 1 first_state = beam[0] last_expanded_list.append(first_state) traversed_lists.append([first_state]) def update_traversed_lists(new_visited_inf_states): assert len(new_visited_inf_states) == len(last_expanded_list) assert len(new_visited_inf_states) == len(traversed_lists) for instance_index, instance_states in enumerate(new_visited_inf_states): last_expanded = last_expanded_list[instance_index] # todo: if this passes, shouldn't need traversed_lists assert last_expanded.world_state.viewpointId == traversed_lists[instance_index][-1].world_state.viewpointId for inf_state in instance_states: path_from_last_to_next = least_common_viewpoint_path(last_expanded, inf_state) # path_from_last should include last_expanded's world state as the first element, so check and drop that assert path_from_last_to_next[0].world_state.viewpointId == last_expanded.world_state.viewpointId assert path_from_last_to_next[-1].world_state.viewpointId == inf_state.world_state.viewpointId traversed_lists[instance_index].extend(path_from_last_to_next[1:]) last_expanded = inf_state last_expanded_list[instance_index] = last_expanded # Do a sequence rollout and calculate the loss while any(len(comp) < completion_size for comp in completed): beam_indices = [] u_t_list = [] h_t_list = [] c_t_list = [] flat_obs = [] for beam_index, beam in enumerate(beams): for inf_state in beam: beam_indices.append(beam_index) u_t_list.append(inf_state.last_action_embedding) h_t_list.append(inf_state.h_t.unsqueeze(0)) c_t_list.append(inf_state.c_t.unsqueeze(0)) flat_obs.append(inf_state.observation) u_t_prev = torch.stack(u_t_list, dim=0) assert len(u_t_prev.shape) == 2 f_t_list = self._feature_variables(flat_obs) # Image features from obs all_u_t, is_valid, is_valid_numpy = self._action_variable(flat_obs) h_t = torch.cat(h_t_list, dim=0) c_t = torch.cat(c_t_list, dim=0) assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature' h_t, c_t, alpha, logit, alpha_v = self.decoder( u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx[beam_indices], seq_mask[beam_indices]) # Mask outputs of invalid actions logit[is_valid == 0] = -float('inf') # # Mask outputs where agent can't move forward # no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs] if mask_undo: masked_logit = logit.clone() else: masked_logit = logit log_probs = F.log_softmax(logit, dim=1).data # force ending if we've reached the max time steps # if t == self.episode_len - 1: # action_scores = log_probs[:,self.end_index].unsqueeze(-1) # action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index)) # else: #_, action_indices = masked_logit.data.topk(min(successor_size, logit.size()[1]), dim=1) _, action_indices = masked_logit.data.topk(logit.size()[1], dim=1) # todo: fix this action_scores = log_probs.gather(1, action_indices) assert action_scores.size() == action_indices.size() start_index = 0 assert len(beams) == len(world_states) all_successors = [] for beam_index, (beam, beam_world_states) in enumerate(zip(beams, world_states)): successors = [] end_index = start_index + len(beam) assert len(beam_world_states) == len(beam) if beam: for inf_index, (inf_state, world_state, action_score_row) in \ enumerate(zip(beam, beam_world_states, log_probs[start_index:end_index])): flat_index = start_index + inf_index for action_index, action_score in enumerate(action_score_row): if is_valid_numpy[flat_index, action_index] == 0: continue successors.append( InferenceState(prev_inference_state=inf_state, world_state=world_state, # will be updated later after successors are pruned observation=flat_obs[flat_index], # will be updated later after successors are pruned flat_index=None, last_action=action_index, last_action_embedding=all_u_t[flat_index, action_index].detach(), action_count=inf_state.action_count + 1, score=inf_state.score + action_score, h_t=h_t[flat_index], c_t=c_t[flat_index], last_alpha=alpha[flat_index].data) ) start_index = end_index successors = sorted(successors, key=lambda t: t.score, reverse=True) all_successors.append(successors) successor_world_states = [ [inf_state.world_state for inf_state in successors] for successors in all_successors ] successor_env_actions = [ [inf_state.last_action for inf_state in successors] for successors in all_successors ] successor_last_obs = [ [inf_state.observation for inf_state in successors] for successors in all_successors ] successor_world_states = self.env.step(successor_world_states, successor_env_actions, successor_last_obs, beamed=True) all_successors = structured_map(lambda inf_state, world_state: inf_state._replace(world_state=world_state), all_successors, successor_world_states, nested=True) # if all_successors[0]: # print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, all_successors[0][0].world_state, all_successors[0][0].last_action, all_successors[0][0].score)) assert len(all_successors) == len(state_cache) new_beams = [] for beam_index, (successors, instance_cache) in enumerate(zip(all_successors, state_cache)): # early stop if we've already built a sizable completion list instance_completed = completed[beam_index] instance_completed_holding = completed_holding[beam_index] if len(instance_completed) >= completion_size: new_beams.append([]) continue for successor in successors: ws_keys = successor.world_state[0:first_n_ws_key] if successor.last_action == 0 or successor.action_count == self.episode_len: if ws_keys not in instance_completed_holding or instance_completed_holding[ws_keys][0].score < successor.score: instance_completed_holding[ws_keys] = (successor, False) else: if ws_keys not in instance_cache or instance_cache[ws_keys][0].score < successor.score: instance_cache[ws_keys] = (successor, False) # third value: did this come from completed_holding? uncompleted_to_consider = ((ws_keys, inf_state, False) for (ws_keys, (inf_state, expanded)) in instance_cache.items() if not expanded) completed_to_consider = ((ws_keys, inf_state, True) for (ws_keys, (inf_state, expanded)) in instance_completed_holding.items() if not expanded) import itertools import heapq to_consider = itertools.chain(uncompleted_to_consider, completed_to_consider) ws_keys_and_inf_states = heapq.nlargest(successor_size, to_consider, key=lambda pair: pair[1].score) new_beam = [] for ws_keys, inf_state, is_completed in ws_keys_and_inf_states: if is_completed: assert instance_completed_holding[ws_keys] == (inf_state, False) instance_completed_holding[ws_keys] = (inf_state, True) if ws_keys not in instance_completed or instance_completed[ws_keys].score < inf_state.score: instance_completed[ws_keys] = inf_state else: instance_cache[ws_keys] = (inf_state, True) new_beam.append(inf_state) if len(instance_completed) >= completion_size: new_beams.append([]) else: new_beams.append(new_beam) beams = new_beams # Early exit if all ended if not any(beam for beam in beams): break world_states = [ [inf_state.world_state for inf_state in beam] for beam in beams ] successor_obs = self.env.observe(world_states, beamed=True) beams = structured_map(lambda inf_state, obs: inf_state._replace(observation=obs), beams, successor_obs, nested=True) update_traversed_lists(beams) completed_list = [] for this_completed in completed: completed_list.append(sorted(this_completed.values(), key=lambda t: t.score, reverse=True)[:completion_size]) completed_ws = [ [inf_state.world_state for inf_state in comp_l] for comp_l in completed_list ] completed_obs = self.env.observe(completed_ws, beamed=True) completed_list = structured_map(lambda inf_state, obs: inf_state._replace(observation=obs), completed_list, completed_obs, nested=True) # TODO: consider moving observations and this update earlier so that we don't have to traverse as far back update_traversed_lists(completed_list) # TODO: sanity check the traversed lists here trajs = [] for this_completed in completed_list: assert this_completed this_trajs = [] for inf_state in this_completed: path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state) # this will have messed-up headings for (at least some) starting locations because of # discretization, so read from the observations instead ## path = [(obs.viewpointId, state.heading, state.elevation) ## for state in path_states] trajectory = [path_element_from_observation(ob) for ob in path_observations] this_trajs.append({ 'instr_id': path_observations[0]['instr_id'], 'instr_encoding': path_observations[0]['instr_encoding'], 'trajectory': trajectory, 'observations': path_observations, 'actions': path_actions, 'score': inf_state.score, 'scores': path_scores, 'attentions': path_attentions }) trajs.append(this_trajs) # completed_list: list of lists of final inference states corresponding to the candidates, one list per instance # traversed_lists: list of "physical states" that the robot has explored, one per instance return trajs, completed_list, traversed_lists def set_beam_size(self, beam_size): if self.env.beam_size < beam_size: self.env.set_beam_size(beam_size) self.beam_size = beam_size def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, beam_size=1): ''' Evaluate once on each instruction in the current environment ''' if not allow_cheat: # permitted for purpose of calculating validation loss only assert feedback in ['argmax', 'sample'] # no cheating by using teacher at test time! self.feedback = feedback if use_dropout: self.encoder.train() self.decoder.train() else: self.encoder.eval() self.decoder.eval() self.set_beam_size(beam_size) return super(Seq2SeqAgent, self).test() def train(self, encoder_optimizer, decoder_optimizer, n_iters, feedback='teacher'): ''' Train for a given number of iterations ''' assert all(f in self.feedback_options for f in feedback.split("+")) self.feedback = feedback self.encoder.train() self.decoder.train() self.losses = [] it = range(1, n_iters + 1) try: import tqdm it = tqdm.tqdm(it) except: pass for _ in it: encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() self._rollout_with_loss() self.loss.backward() encoder_optimizer.step() decoder_optimizer.step() def _encoder_and_decoder_paths(self, base_path): return base_path + "_enc", base_path + "_dec" def save(self, path): ''' Snapshot models ''' encoder_path, decoder_path = self._encoder_and_decoder_paths(path) torch.save(self.encoder.state_dict(), encoder_path) torch.save(self.decoder.state_dict(), decoder_path) def load(self, path, **kwargs): ''' Loads parameters (but not training state) ''' encoder_path, decoder_path = self._encoder_and_decoder_paths(path) self.encoder.load_state_dict(torch.load(encoder_path, **kwargs)) self.decoder.load_state_dict(torch.load(decoder_path, **kwargs))
# Copyright 2021 Ibrahim Ayed, Emmanuel de Bézenac, Mickaël Chen, Jean-Yves Franceschi, Sylvain Lamprier, Patrick Gallinari # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import math import os import torch import numpy as np from PIL import Image from torch import distributions as D from torch.utils.data import IterableDataset, TensorDataset fixed_datasets = ['two_couples_2d', 'two_singles_1d', 'modes_1d', 'modes_1d_overlap'] sampled_distributions = ['gaussian', 'image'] distributions = sampled_distributions + fixed_datasets class SampleDataset(IterableDataset): def __init__(self, distribution, transform): super(SampleDataset).__init__() self.distribution = distribution self.transform = transform def __iter__(self): return self def __next__(self): return self.transform.apply(self.distribution.sample()) def distribution_factory(config, config_attr, source): distribution = config_attr('distribution') n = config_attr('nb_components') if not source or not config.generator: dim = config.data_dim else: dim = [1] * (len(config.data_dim) - 1) + [config.gen_in_dim] assert distribution in distributions and n > 0 mixture_distribution = D.Categorical(torch.ones(n)) if distribution == 'gaussian': mean = torch.tensor(config_attr('loc')) std = torch.tensor(config_attr('scale')) assert (len(std) == 1 or len(std) == dim[-1]) and (len(mean) == 1 or len(mean) == dim[-1]) if n == 1: component_distribution = D.Normal(mean * torch.ones(dim), std) else: assert len(dim) == 1 and dim[0] <= 2 radius = config_attr('mix_dev') if dim[0] == 1: centers = (torch.arange(n) - (n - 1) / 2).unsqueeze(1) else: radius = config_attr('mix_dev') delta_theta = 2 * math.pi / n centers_x = torch.cos(delta_theta * torch.arange(n)) centers_y = torch.sqrt(1 - centers_x ** 2) * torch.sign(torch.arange(n) - n / 2) centers = torch.stack([centers_x, centers_y], dim=1) component_distribution = D.Independent(D.Normal(mean + radius * centers, std), 1) elif distribution == 'image': assert config.generator or (len(dim) == 1 and dim[0] == 2) loc = config_attr('loc') scale = config_attr('scale') assert len(scale) <= 2 and len(loc) <= 2 if len(scale) == 1: scale *= 2 if len(loc) == 1: loc *= 2 img_path = os.path.join(config.data_path, config_attr('img_name')) image = torch.from_numpy(np.array((Image.open(img_path).convert('L')))) # To greyscale h, w = image.size() xx = loc[0] - scale[0] * torch.linspace(-1, 1, w) yy = loc[1] + scale[1] * torch.linspace(-1, 1, h) xx, yy = torch.meshgrid(xx, yy) means = torch.stack([xx.flatten(), yy.flatten()], dim=1) std = torch.tensor([scale[0] / w, scale[1] / h]) image = (image.max() - image).T.flip(1).flipud() # White is zero probability probs = image.flatten() # Build mixture distribution representing the image n = len(probs) assert n > 1 mixture_distribution = D.Categorical(probs) component_distribution = D.Independent(D.Normal(means, std), 1) else: raise ValueError(f'No distribution named `{distribution}`') if n == 1: return component_distribution else: return D.MixtureSameFamily(mixture_distribution, component_distribution) def transform_collate_fn(transform, batch): return torch.from_numpy(np.asarray(transform.apply(torch.stack([batch[i] for i in range(len(batch))])))) def tensor_dataset_collate_fn(batch): return torch.stack([batch[i][0] for i in range(len(batch))]) def get_dataset(config, transform, source): def config_attr(attr): return getattr(config, prefix + attr, None) prefix = 'in_' if source else 'out_' nb_samples = config_attr('nb_samples') assert nb_samples >= 0 assert ((config_attr('batch_size') > 0 and config.gen_nb_z > 0) or config_attr('nb_samples') > 0) if config_attr('distribution') in sampled_distributions: distribution = distribution_factory(config, config_attr, source) if nb_samples == 0: return SampleDataset(distribution, transform), functools.partial(transform_collate_fn, transform) else: transformed_samples = torch.from_numpy(np.asarray(transform.apply(distribution.sample([nb_samples])))) return TensorDataset(transformed_samples), tensor_dataset_collate_fn elif config_attr('distribution') in fixed_datasets: data = create_fixed_dataset(config, config_attr, source) transformed_samples = torch.from_numpy(np.asarray(transform.apply(data))) return TensorDataset(transformed_samples), tensor_dataset_collate_fn raise NotImplementedError(config_attr('distribution')) def create_fixed_dataset(config, config_attr, source): if config_attr('distribution') == 'two_couples_2d': if source: return np.array([[1., 1], [1, -1.]]) else: return np.array([[-1., -1], [-1, 1]]) if config_attr('distribution') == 'two_singles_1d': if source: return np.array([[-1.,],]) else: return np.array([[1.,],]) if config_attr('distribution') == 'modes_1d': if source: return np.array([[-1.2,], [-1.34,], [-0.8,], [-0.7,], [-0.3], ]) * 1.5 else: return np.array([[0.6,], [1.,], [1.23,], [1.32,], [0.2,]]) * 1.5 if config_attr('distribution') == 'modes_1d_overlap': if source: return np.array([[-1.2,], [-1.34,], [-0.8,], [-0.7,], [-0.2], ]) * 1.5 else: return np.array([[0.6,], [1.,], [1.23,], [1.32,], [-1.1,], [-1.,], [-1.8,]]) * 1.5 raise NotImplementedError(config_attr('distribution'))
import numpy as np from src.functions import sigmoid, softmax, relu from src.estimators import mse, cross_entropy from src.optimizers import adam_default, momentum_default from src.progressive import Progressive from random import randint from utilities import get_device_data, scale_output_0_1, get_accuracy import pandas as pd #from multiprocessing import freeze_support #from celosia import Celosia from sklearn.preprocessing import MinMaxScaler def get_error_progressive(name, inputs, outputs): i = inputs.shape[1] # number of colums in the input o = outputs.shape[1] # number of colums in the output w = None # None means randomly initialize weights nn = Progressive(name, mse, 1, None) # input layer nn.add_layer(4, relu, 0.0, w, i) # hidden layers nn.add_layer(6, relu, 0.0, w) nn.add_layer(6, relu, 0.0, w) # output layer nn.add_layer(o, sigmoid, 0.0, w) #epochs = [2000, 2000, 2000, 2000, 2000] epochs = [100] err = [] for ep in epochs: e = nn.train(inputs, outputs, ep, 4000, debug=True) err.append(e) y_pred = nn.output(inputs) Y_pred = scale_output_0_1(y_pred) Y = outputs (accuracy, fp, fn) = get_accuracy(Y, Y_pred) print ('name={}, accuracy={}, false-positive={}, false-negative={}'.format(name, accuracy, fp, fn)) return err def evaluate(name, device): (X, Y) = get_device_data(device, 2000, 2000, anomaly_label=0) #(X, Y) = get_data(device, 1980, 1980, anomaly_label=0) #celosia = Celosia() #mid = celosia.get_mid(X) #Y_pred = celosia.label_data(mid, 0.24) #(accuracy, fp, fn) = celosia.get_accuracy(Y, Y_pred) #print ('name={}, accuracy={}, false-positive={}, false-negative={}'.format(name, accuracy, fp, fn)) err = get_error_progressive(name, X, Y) print (err) devices = [('Danmini', 'Danmini_Doorbell'), ('Ecobee', 'Ecobee_Thermostat'), ('Ennio', 'Ennio_Doorbell'), ('Philips B120N10', 'Philips_B120N10_Baby_Monitor'), ('Provision PT737E', 'Provision_PT_737E_Security_Camera'), ('Provision PT838', 'Provision_PT_838_Security_Camera'), ('Samsung SNH1011', 'Samsung_SNH_1011_N_Webcam'), ('SimpleHome XCS71002', 'SimpleHome_XCS7_1002_WHT_Security_Camera'), ('SimpleHome XCS71003', 'SimpleHome_XCS7_1003_WHT_Security_Camera'), ] devices = [('Danmini', 'Danmini_Doorbell')] def main(): for device in devices: evaluate(device[0], device[1]) if __name__ == '__main__': #freeze_support() main()
from transformers import Trainer from transformers.trainer_callback import TrainerState import datasets import os import torch from torch.utils.data import RandomSampler, Sampler, Dataset, DataLoader from typing import Iterator, Optional, Sequence, List, TypeVar, Generic, Sized import numpy as np import math from curriculum_utils import CurriculumSamplerHyperbole, CurriculumSamplerDifficultyBiased import matplotlib.pyplot as plt from collections import Counter class MyDataset(Dataset): def __init__(self, n): self.n = n self.x = np.arange(n) def __len__(self): return self.n def __getitem__(self, i): return self.x[i] def show_hist_hyperbole(dataset_size: int, num_train_epochs: int, n_see: int, n_bins: int, window_width: int, ro, id=-1): total_samples = [] state = TrainerState(num_train_epochs=num_train_epochs, epoch=0) dataset = MyDataset(dataset_size) sampler = CurriculumSamplerHyperbole(dataset, state, n_bins, window_width, n_see, ro) samples = list(sampler) k = len(dataset) // n_bins step = 0 for i in range(0, len(samples), k): plt.cla() plt.clf() plt.title(f'Number of views. step #{i}') # plt.title(f'ro = {ro}') # plt.ylim([0, math.ceil(dataset_size / n_bins)]) # plt.ylim([0, 500]) plt.xlabel(f'samples (indices in sorted dataset). n_bins={n_bins}, window_width={window_width}, ro={ro}') plt.ylabel('number') plt.hist(samples[i: i + k], list(range(0, dataset_size, math.ceil(dataset_size / n_bins))) + [dataset_size]) # plt.show() plt.savefig(f'movie/hist_{step:03d}.png') step += 1 plt.cla() plt.clf() plt.title(f'Final number of views. ro = {ro}') # plt.hist(total_samples, list(range(0, dataset_size, math.ceil(dataset_size / n_bins))) + [dataset_size]) plt.hist(samples, bins=n_bins) plt.show() # plt.savefig(f'movie/hist_{id:03d}.png') counter = Counter(samples) values = counter.values() print(min(values), max(values), sum(values) / dataset_size) def show_hist_difficulty_biased(dataset_size: int, num_train_epochs: int, n_see: int, n_bins: int): total_samples = [] state = TrainerState(num_train_epochs=num_train_epochs, epoch=0) dataset = MyDataset(dataset_size) sampler = CurriculumSamplerDifficultyBiased(dataset, state, n_bins, n_see) samples = list(sampler) k = len(dataset) // n_bins step = 0 # for i in range(0, len(samples), k): # plt.cla() # plt.clf() # plt.title(f'Number of views. step #{i}') # # plt.title(f'ro = {ro}') # # plt.ylim([0, math.ceil(dataset_size / n_bins)]) # # plt.ylim([0, 500]) # plt.xlabel(f'samples (indices in sorted dataset). n_bins={n_bins}') # plt.ylabel('number') # plt.hist(samples[i: i + k], list(range(0, dataset_size, math.ceil(dataset_size / n_bins))) + [dataset_size]) # # plt.show() # plt.savefig(f'movie/hist_{step:03d}.png') # step += 1 plt.cla() plt.clf() plt.title(f'Final number of views') # plt.hist(total_samples, list(range(0, dataset_size, math.ceil(dataset_size / n_bins))) + [dataset_size]) plt.hist(samples, bins=100) plt.show() # plt.savefig(f'movie/hist_{id:03d}.png') counter = Counter(samples) values = counter.values() print(min(values), max(values), sum(values) / dataset_size) if __name__ == '__main__': # show_hist( # dataset_size=100000, # n_bins=50, # window_width=8, # n_see=5 # ) show_hist_hyperbole( dataset_size=100000, n_see=3, num_train_epochs=1, n_bins=10, window_width=3, ro=0.5 ) # show_hist_difficulty_biased( # dataset_size=100000, # n_see=3, # num_train_epochs=1, # n_bins=10, # ) # ros = np.linspace(0.05, 5, 50) # for i, ro in enumerate(ros): # show_hist_hyperbole( # dataset_size=100000, # n_bins=50, # window_width=8, # n_see=5, # ro=ro, # id=i # )
import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui import numpy as np import time import serial import threading sample_amount = 2000 time_buffer = [0 for x in range(sample_amount)] data_buffer = [0 for x in range(sample_amount)] trigger_buffer = [0 for x in range(sample_amount)] full_samples = [] ser = serial.Serial() ser.baudrate = 2000000 ser.port = 'COM8' ser.open() stop_plot = False current_sample = 0 def get_data(): global sample_amount, stop_plot, time_buffer, data_buffer, trigger_buffer, current_sample time.sleep(3) ser.flushInput() full_samples.append([]) full_samples[current_sample-1].extend(([], [])) while 1: try: val = ser.readline() val = val[:-2].decode('utf-8').split(',') if len(data_buffer) > sample_amount: data_buffer.pop(0) time_buffer.pop(0) trigger_buffer.pop(0) time_sample = int(val[0]) / 1000. data_sample = int(val[1]) * 5. / 1023. trigger_sample = int(val[2]) time_buffer.append(time_sample) data_buffer.append(data_sample) trigger_buffer.append(trigger_sample) full_samples[current_sample][0].append(time_sample) full_samples[current_sample][1].append(data_sample) full_samples[current_sample][2].append(trigger_sample) except: pass if stop_plot: ser.close() break pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'b') win = pg.GraphicsWindow() win.setWindowTitle('test') graph = win.addPlot() curve = graph.plot(time_buffer, data_buffer) def update(): curve.setData(time_buffer, data_buffer) data = threading.Thread(target=get_data) data.start() timer = pg.QtCore.QTimer() timer.timeout.connect(update) timer.start(5) if __name__ == '__main__': import sys if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): QtGui.QApplication.instance().exec_()
import tempfile import unittest from pathlib import Path import torch import numpy as np import SimpleITK as sitk from ..utils import TorchioTestCase from torchio.data import io class TestIO(TorchioTestCase): """Tests for `io` module.""" def setUp(self): super().setUp() self.write_dicom() string = ( '1.5 0.18088 -0.124887 0.65072 ' '-0.20025 0.965639 -0.165653 -11.6452 ' '0.0906326 0.18661 0.978245 11.4002 ' '0 0 0 1 ' ) tensor = torch.from_numpy(np.fromstring(string, sep=' ').reshape(4, 4)) self.matrix = tensor def write_dicom(self): self.dicom_dir = self.dir / 'dicom' self.dicom_dir.mkdir(exist_ok=True) self.dicom_path = self.dicom_dir / 'dicom.dcm' self.nii_path = self.get_image_path('read_image') writer = sitk.ImageFileWriter() writer.SetFileName(str(self.dicom_path)) image = sitk.ReadImage(str(self.nii_path)) image = sitk.Cast(image, sitk.sitkUInt16) image = image[0] # dicom reader supports 2D only writer.Execute(image) def test_read_image(self): # I need to find something readable by nib but not sitk io.read_image(self.nii_path) io.read_image(self.nii_path, itk_first=True) def test_read_dicom_file(self): io.read_image(self.dicom_path) def test_read_dicom_dir(self): io.read_image(self.dicom_dir) def test_dicom_dir_missing(self): with self.assertRaises(FileNotFoundError): io._read_dicom('missing') def test_dicom_dir_no_files(self): empty = self.dir / 'empty' empty.mkdir() with self.assertRaises(FileNotFoundError): io._read_dicom(empty) def write_read_matrix(self, suffix): out_path = self.dir / f'matrix{suffix}' io.write_matrix(self.matrix, out_path) matrix = io.read_matrix(out_path) assert torch.allclose(matrix, self.matrix) def test_matrix_itk(self): self.write_read_matrix('.tfm') self.write_read_matrix('.h5') def test_matrix_txt(self): self.write_read_matrix('.txt')
""" from __future__ import print_function import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn import torch.optim as optim import os,argparse import numpy as np class EvoCNNModel(nn.Module): def __init__(self): super(EvoCNNModel, self).__init__() #ANCHOR-generated_init def forward(self, x): #ANCHOR-generate_forward out = out.view(out.size(0), -1) out = self.linear(out) return out """
import numpy as np from copy import deepcopy from matchingmarkets.algorithms.basic import arbitraryMatch """ Meta Algorithms define the time of matching They also dictate who gets passed into a matching algorithm Inputs are a Market object output is a dict of directed matches """ def meta_always(Market, match=arbitraryMatch, verbose=False, **kwargs): """ Attempts everyone, every period """ if verbose: print("meta_always") return match(Market, Market.Agents, verbose=verbose, **kwargs) def meta_periodic(Market, match=arbitraryMatch, period=1, verbose=False, **kwargs): """ Attempts matches on everyone every x periods """ if verbose: print("meta_periodic") if -1 < period and period < 1: period = 1 if period < -1: period = -period if Market.time % int(period) == 0: return match(Market, Market.Agents, verbose=verbose, **kwargs) else: return dict() def meta_patient(Market, match=arbitraryMatch, a=np.inf, verbose=False, **kwargs): """ Patient(a) algorithm from Akbarpour et al. (2014) a is default set to infinity, so patient() algorithm by default Attempts match if agent is critical or if his sojourn==a """ if verbose: print("meta_patient") AgentList = [ag for ag in Market.Agents if ag.is_critical or ag.sojourn == a] return match(Market, AgentList, verbose=verbose, **kwargs) def meta_greedy(Market, match=arbitraryMatch, verbose=False, **kwargs): """ Attempts match if agent is entering market """ if verbose: print("meta_greedy") AgentList = [ag for ag in Market.Agents if ag.sojourn == 0] return match(Market, AgentList, verbose=verbose, **kwargs) def meta_agents_critical(Market, match=arbitraryMatch, agents=5, num_critical=5, verbose=False, **kwargs): """ Attempts matches on critical agents if number of critical agents above input Also attempts matches if certain number of agents in market, based on input """ if verbose: print("meta_agents_critical") if len(Market.Agents) == agents: return match(Market, Market.Agents, verbose=verbose, **kwargs) if Market.critical() > num_critical: AgentList = [ag for ag in Market.Agents if ag.is_critical] return match(Market, AgentList, verbose=verbose, **kwargs) else: return dict()
''' testing hysteretic_q learning on the boutilier ''' from matplotlib import pyplot as plt import numpy as np from environments.env_boutilier import Boutilier from learning_algorithms.hysteretic_q_boutilier import HystereticAgentBoutilier episodes = 1000 epochs = 300 exp_rate = 0.01 exp_rate_decay = 0.999 def run_episode(): env = Boutilier() learning = HystereticAgentBoutilier(environment=env, exploration_rate=exp_rate) for i in range(epochs): learning.step() reward_1, reward_2 = learning.get_rewards() rewards_1, rewards_2 = learning.get_averaged_rewards() rewards_1 = np.asarray(rewards_1) rewards_2 = np.asarray(rewards_2) return rewards_2 + rewards_1 if __name__ == "__main__": overall = np.zeros(shape=(epochs - 1,)) for episode in range(episodes): overall += run_episode() print("Episode ", episode) plt.plot(overall / episodes) plt.xlabel("Epochs") plt.ylabel("Averaged Rewards (Averaged over all episodes)") plt.show()
__author__ = 'mangalbhaskar' __version__ = '2.0' """ ## Description: # -------------------------------------------------------- # Utility functions # - Uses 3rd paty lib `arrow` for timezone and timestamp handling # - http://zetcode.com/python/arrow/ # -------------------------------------------------------- # Copyright (c) 2020 mangalbhaskar # Licensed under [see LICENSE for details] # Written by mangalbhaskar # -------------------------------------------------------- """ import os import errno import sys import json import uuid import random import colorsys import string import numpy as np import pandas as pd import yaml import arrow from easydict import EasyDict as edict import logging log = logging.getLogger('__main__.'+__name__) # print("common::log.info:{}".format(log.info)) # print("common::log.parent:{}".format(log.parent)) _date_format_ = 'YYYY-MM-DD HH:mm:ss ZZ' _timestamp_format_ = "{:%d%m%y_%H%M%S}" class NumpyEncoder(json.JSONEncoder): """Special json encoder for numpy types Ref: * https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable """ def default(self, obj): if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64)): return int(obj) elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)): return float(obj) elif isinstance(obj,(np.ndarray,)): #### This is the fix return obj.tolist() return json.JSONEncoder.default(self, obj) def numpy_to_json(json_input): """As Numpy Array Is Not Json serializable, it converts the numpy array to serialiazable json string It uses the custom class: NumpyEncoder Ref: * https://stackoverflow.com/questions/17043860/python-dump-dict-to-json-file * https://stackoverflow.com/questions/32468278/list-as-an-entry-in-a-dict-not-json-serializable * https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable """ json_str = json.dumps(json_input, cls=NumpyEncoder) return json_str def now(): """returns the date with timezone in consistent way. This to be used specifically when creating serializable objects with dates to store in the database in particular. """ now = arrow.now() date_time_zone = now.format(_date_format_) return date_time_zone def timestamp(): """returns the timestamp in the `_timestamp_format_` format """ import datetime ts = (_timestamp_format_).format(datetime.datetime.now()) return ts def modified_on(filepath, ts=False): """returns the last modified timestamp with timezone. Ref: * https://stackoverflow.com/questions/237079/how-to-get-file-creation-modification-date-times-in-python """ modified_on = arrow.Arrow.fromtimestamp(os.stat(filepath).st_mtime).format(_date_format_) if ts: modified_on = timestamp_from_datestring(modified_on) return modified_on def date_from_timestamp(ts): """returns the date object from the given string date in the `_date_format_` TODO: some warning to the call to get function for api change in the future release """ ar = arrow.get(ts, _date_format_) dt = ar.date() return dt def timestamp_from_datestring(dt): """returns the timestamp in the `_timestamp_format_` given the date string in the `_date_format_` format TODO: some warning to the call to get function for api change in the future release """ ar = arrow.get(dt, _date_format_) ts = (_timestamp_format_).format(ar.datetime) return ts def log(text, array=None): """Prints a text message. And, optionally, if a Numpy array is provided it prints it's shape, min, and max values. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla """ if array is not None: text = text.ljust(25) text += ("shape: {:20} ".format(str(array.shape))) if array.size: text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max())) else: text += ("min: {:10} max: {:10}".format("","")) text += " {}".format(array.dtype) log.info(text) def add_path(path): if path not in sys.path: sys.path.insert(0, path) def yaml_load(filepath): """Safe load YAML file as easy dictionary object """ fc = None with open(filepath, 'r') as f: # fc = edict(yaml.load(f)) fc = edict(yaml.safe_load(f)) return fc def yaml_safe_dump(filepath, o): """Create yaml file from python dictionary object """ with open(filepath,'w') as f: yaml.safe_dump(o, f, default_flow_style=False) def json_dump(filepath, o): """Create json file from python dictionary object """ with open(filepath,'w') as f: f.write(json.dumps(o)) def get_write_to_file_fn(file_ext): """Returns the appropriate write to file function based on the file extension """ if file_ext == '.json': writefn = json_dump elif file_ext == '.yml' or file_ext == '.yaml': writefn = yaml_safe_dump else: writefn = None return writefn def loadcfg(cfgfile): ## Configurations datacfg = yaml_load(cfgfile) # log.info("datacfg: {}".format(datacfg)) return datacfg def mkdir_p(path): """ mkdir -p` linux command functionality References: * https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python """ try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def read_csv_line(filepath): """Read CSV Line as a generator. Efficiently handles for large csv files. """ delimiter = ',' with open(filepath, 'r') as f: gen = (i for i in f) # next(gen) yield next(gen).rstrip('\n').split(delimiter) for line in gen: # log.info(line) yield line.rstrip('\n').split(delimiter) def createUUID(prefix='uid'): """Create uuid4 specific UUID which uses pseudo random generators. Further, uuid is prefixed using 3 letter acronym to visually differentiate among them Some prefix used when generating UUIDs are: ant - annotation img - image lbl - label / cat - category train - for training predict - for prediction evaluate - for evaluation References: * https://pynative.com/python-uuid-module-to-generate-universally-unique-identifiers/ * https://stackoverflow.com/questions/703035/when-are-you-truly-forced-to-use-uuid-as-part-of-the-design/786541 """ # return prefix+'-'+str(format(int(time.time()),'02x') + format(math.floor(1e7*random.random()),'02x')) return prefix+'-'+str(uuid.uuid4()) def get_hash(dictionary): """Takes a dictionary as input and provides a unique hash value based on the values in the dictionary. All the values in the dictionary after converstion to string are concatenated and then the HEX hash is generated :param dictionary: A python dictionary :return: A HEX hash Credit: https://gitlab.com/calledbymountains/cvdatasetmanagement/blob/master/utils/gen_utils.py """ if not isinstance(dictionary, dict): raise ValueError('The argument must be ap ython dictionary.') str_input = reduce(lambda x, y: str(x) + str(y), list(dictionary.values())) str_input = ''.join(random.sample(str_input, len(str_input))) hash_object = hashlib.shake_128(str_input.encode()) output = hash_object.hexdigest(12) return output def merge_csv(files): """Utility function to concatenate multiple `.csv` files into single `.csv` file References: * https://stackoverflow.com/questions/2512386/how-to-merge-200-csv-files-in-python """ merged_files = {} if len(files) > 0: clist = [ pd.read_csv(f) for f in files ] merged_files = pd.concat(clist) ## use only for quick testing # merged_files.to_csv( "merged_files-STATS.csv", index=False ) return merged_files def merge_dict(o, keys_to_uppercase=False): """Utility function to assist in merging python dictionary objects. It uses python way which is tricky to do so, by separating keys and values of dictionary objection into separate data structure """ print("log: {}".format(log)) # log.info("-------") K = [] V = [] if len(o) > 0: for d in o: k = list(d.keys()) if keys_to_uppercase: k = [ key.upper() for key in k ] v = list(d.values()) # log.info("len(k), len(v): {}, {}".format(len(k), len(v))) K += k V += v # log.info("K: {}".format(K)) # log.info("K length: {}".format(len(K))) # log.info("V length: {}".format(len(V))) ## mergedjson is not util later point qw K,V provides for greater flexibility ## and let the caller take care of merging using zip # mergedjson = dict(zip(K,V)) return dict(zip(K,V)) def merge_dicts(*dict_args): """ Credits: https://stackoverflow.com/a/26853961 Ref: https://stackoverflow.com/questions/38987/how-do-i-merge-two-dictionaries-in-a-single-expression Given any number of dicts, shallow copy and merge into a new dict, precedence goes to key value pairs in latter dicts. Performance Analysis: import timeit min(timeit.repeat(lambda: merge_dicts(x, y))) """ result = {} for dictionary in dict_args: # result.update(dictionary) @@ original result.update(dictionary.copy()) return result def merge_json(files): """Merge the json files. It uses python way which is tricky to do so, by separating keys and values of json file into separate data structure """ K = [] V = [] if len(files) > 0: for f in files: with open(f,'r') as fr: d = json.load(fr) k = list(d.keys()) v = list(d.values()) log.debug("len(k), len(v): {}, {}".format(len(k), len(v))) K += k V += v # log.info("K: {}".format(K)) log.debug("K length: {}".format(len(K))) log.debug("V length: {}".format(len(V))) ## mergedjson is not util later point qw K,V provides for greater flexibility ## and let the caller take care of merging using zip # mergedjson = dict(zip(K,V)) return K,V def get_only_files_in_dir(path): """returns file in a director as a generator Usage: list( get_only_files_in_dir(path) ) """ for file in os.listdir(path): if os.path.isfile(os.path.join(path, file)): yield os.path.join(path, file) def getBasePath(path): """Ensures the last Directory of a path in a consistent ways Base path is returned for a file or path It takes care of trailing slash for a file or a directory Test Cases and Expected Results: >>> p='$HOME/Documents/ai-ml-dl-gaze/AIML_Annotation/ods_job_230119/annotations/hmddb/140219_140856/ANNOTATIONS_140219_140856.json' >>> p1='$HOME/Documents/ai-ml-dl-gaze/AIML_Annotation/ods_job_230119/annotations/hmddb/140219_140856/' >>> p2='$HOME/Documents/ai-ml-dl-gaze/AIML_Annotation/ods_job_230119/annotations/hmddb/140219_140856' p3: if file actually exists >>> p3='$HOME/Documents/ai-ml-dl-gaze/AIML_Annotation/ods_job_230119/annotations/hmddb/140219_140856/ANNOTATIONS_140219_140856.json/' All of the above cases should return same base path: >>> $HOME/Documents/ai-ml-dl-gaze/AIML_Annotation/ods_job_230119/annotations/hmddb/140219_140856 p4: if file actually does NOT exists >>> p4='$HOME/Documents/ai-ml-dl-gaze/AIML_Annotation/ods_job_230119/annotations/hmddb/140219_140856/ANNOTATIONS_140219_140856.jsonasdas/' >>> $HOME/Documents/ai-ml-dl-gaze/AIML_Annotation/ods_job_230119/annotations/hmddb/140219_140856/ANNOTATIONS_140219_140856.jsonasdas """ if os.path.isdir(path): base_path = os.path.join(path,'') else: base_path = os.path.join(os.path.dirname(path),'') ## _bp = base_path.rstrip(os.path.sep) if os.path.isfile(_bp): _bp = getBasePath(_bp) return _bp def random_colors(N, bright=True): """Generate random colors. To get visually distinct colors, generate them in HSV space then convert to RGB. """ brightness = 1.0 if bright else 0.7 hsv = [(i / N, 1, brightness) for i in range(N)] colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) random.shuffle(colors) return colors def dict_keys_to_lowercase_with_filter(d, fltr=None): """Converts all the dictionary keys to lowercase in recursive function call expect for the specific fltr key TODO: fltr as an list """ d_mod = { k if fltr==k else k.lower() :d[k] if not isinstance(d[k],dict) else dict_keys_to_lowercase_with_filter(d[k], fltr) for k in d.keys() } return d_mod def dict_keys_to_lowercase(d): """Converts all the dictionary keys to lowercase in recursive function call """ d_mod = { k.lower():d[k] if not isinstance(d[k],dict) else dict_keys_to_lowercase(d[k]) for k in d.keys() } return d_mod def str2list(word, dl=','): """ Test cases: 'train' => ['train'] 'train,val,test' => ['train', 'val', 'test'] 'train, val, test' => ['train', 'val', 'test'] 'train, val, ,test' => ['train', 'val', 'test'] 'train, val, ,' => ['train', 'val'] """ import re regexp = re.compile(r'['+dl+']') if regexp.search(word): x = word.split(',') x = [i.strip() for i in x if i.strip() ] else: x = [word] return x def id_generator(size=5, chars=string.ascii_uppercase + string.digits): """ generator expression Credit: https://stackoverflow.com/users/20862/ignacio-vazquez-abrams https://stackoverflow.com/a/2257449 """ # s = ''.join(random.choices(chars, k=size)) s = ''.join(random.SystemRandom().choice(chars) for _ in range(size)) return s def chain(_cb, *args, **kwargs): """ The proper way to check properties of duck-typed objects is to ask them if they quack, not to see if they fit in a duck-sized container. Ref: https://realpython.com/python-kwargs-and-args/ https://stackoverflow.com/questions/624926/how-do-i-detect-whether-a-python-variable-is-a-function """ if _cb and callable(_cb): return _cb(*args, **kwargs) def raise_error(error_type, msg): """TODO: custom error handler """ log.info("raise_error: {}".format(error_type))
from __future__ import print_function, division from sympy.core import S, Add, Mul, sympify, Symbol, Dummy from sympy.core.compatibility import u from sympy.core.exprtools import factor_terms from sympy.core.function import (Function, Derivative, ArgumentIndexError, AppliedUndef) from sympy.core.numbers import pi from sympy.functions.elementary.miscellaneous import sqrt from sympy.functions.elementary.piecewise import Piecewise from sympy.core.expr import Expr from sympy.core.relational import Eq from sympy.core.logic import fuzzy_not from sympy.functions.elementary.exponential import exp, exp_polar from sympy.functions.elementary.trigonometric import atan2 ############################################################################### ######################### REAL and IMAGINARY PARTS ############################ ############################################################################### class re(Function): """ Returns real part of expression. This function performs only elementary analysis and so it will fail to decompose properly more complicated expressions. If completely simplified result is needed then use Basic.as_real_imag() or perform complex expansion on instance of this function. Examples ======== >>> from sympy import re, im, I, E >>> from sympy.abc import x, y >>> re(2*E) 2*E >>> re(2*I + 17) 17 >>> re(2*I) 0 >>> re(im(x) + x*I + 2) 2 See Also ======== im """ is_real = True unbranched = True # implicitely works on the projection to C @classmethod def eval(cls, arg): if arg is S.NaN: return S.NaN elif arg.is_real: return arg elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real: return S.Zero elif arg.is_Function and arg.func is conjugate: return re(arg.args[0]) else: included, reverted, excluded = [], [], [] args = Add.make_args(arg) for term in args: coeff = term.as_coefficient(S.ImaginaryUnit) if coeff is not None: if not coeff.is_real: reverted.append(coeff) elif not term.has(S.ImaginaryUnit) and term.is_real: excluded.append(term) else: # Try to do some advanced expansion. If # impossible, don't try to do re(arg) again # (because this is what we are trying to do now). real_imag = term.as_real_imag(ignore=arg) if real_imag: excluded.append(real_imag[0]) else: included.append(term) if len(args) != len(included): a, b, c = (Add(*xs) for xs in [included, reverted, excluded]) return cls(a) - im(b) + c def as_real_imag(self, deep=True, **hints): """ Returns the real number with a zero imaginary part. """ return (self, S.Zero) def _eval_derivative(self, x): if x.is_real or self.args[0].is_real: return re(Derivative(self.args[0], x, evaluate=True)) if x.is_imaginary or self.args[0].is_imaginary: return -S.ImaginaryUnit \ * im(Derivative(self.args[0], x, evaluate=True)) def _eval_rewrite_as_im(self, arg): return self.args[0] - im(self.args[0]) def _eval_is_algebraic(self): return self.args[0].is_algebraic def _sage_(self): import sage.all as sage return sage.real_part(self.args[0]._sage_()) class im(Function): """ Returns imaginary part of expression. This function performs only elementary analysis and so it will fail to decompose properly more complicated expressions. If completely simplified result is needed then use Basic.as_real_imag() or perform complex expansion on instance of this function. Examples ======== >>> from sympy import re, im, E, I >>> from sympy.abc import x, y >>> im(2*E) 0 >>> re(2*I + 17) 17 >>> im(x*I) re(x) >>> im(re(x) + y) im(y) See Also ======== re """ is_real = True unbranched = True # implicitely works on the projection to C @classmethod def eval(cls, arg): if arg is S.NaN: return S.NaN elif arg.is_real: return S.Zero elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real: return -S.ImaginaryUnit * arg elif arg.is_Function and arg.func is conjugate: return -im(arg.args[0]) else: included, reverted, excluded = [], [], [] args = Add.make_args(arg) for term in args: coeff = term.as_coefficient(S.ImaginaryUnit) if coeff is not None: if not coeff.is_real: reverted.append(coeff) else: excluded.append(coeff) elif term.has(S.ImaginaryUnit) or not term.is_real: # Try to do some advanced expansion. If # impossible, don't try to do im(arg) again # (because this is what we are trying to do now). real_imag = term.as_real_imag(ignore=arg) if real_imag: excluded.append(real_imag[1]) else: included.append(term) if len(args) != len(included): a, b, c = (Add(*xs) for xs in [included, reverted, excluded]) return cls(a) + re(b) + c def as_real_imag(self, deep=True, **hints): """ Return the imaginary part with a zero real part. Examples ======== >>> from sympy.functions import im >>> from sympy import I >>> im(2 + 3*I).as_real_imag() (3, 0) """ return (self, S.Zero) def _eval_derivative(self, x): if x.is_real or self.args[0].is_real: return im(Derivative(self.args[0], x, evaluate=True)) if x.is_imaginary or self.args[0].is_imaginary: return -S.ImaginaryUnit \ * re(Derivative(self.args[0], x, evaluate=True)) def _sage_(self): import sage.all as sage return sage.imag_part(self.args[0]._sage_()) def _eval_rewrite_as_re(self, arg): return self.args[0] - re(self.args[0]) def _eval_is_algebraic(self): return self.args[0].is_algebraic ############################################################################### ############### SIGN, ABSOLUTE VALUE, ARGUMENT and CONJUGATION ################ ############################################################################### class sign(Function): """ Returns the complex sign of an expression: If the expresssion is real the sign will be: * 1 if expression is positive * 0 if expression is equal to zero * -1 if expression is negative If the expresssion is imaginary the sign will be: * I if im(expression) is positive * -I if im(expression) is negative Otherwise an unevaluated expression will be returned. When evaluated, the result (in general) will be ``cos(arg(expr)) + I*sin(arg(expr))``. Examples ======== >>> from sympy.functions import sign >>> from sympy.core.numbers import I >>> sign(-1) -1 >>> sign(0) 0 >>> sign(-3*I) -I >>> sign(1 + I) sign(1 + I) >>> _.evalf() 0.707106781186548 + 0.707106781186548*I See Also ======== Abs, conjugate """ is_finite = True is_complex = True def doit(self, **hints): if self.args[0].is_zero is False: return self.args[0] / Abs(self.args[0]) return self @classmethod def eval(cls, arg): # handle what we can if arg.is_Mul: c, args = arg.as_coeff_mul() unk = [] s = sign(c) for a in args: if a.is_negative: s = -s elif a.is_positive: pass else: ai = im(a) if a.is_imaginary and ai.is_comparable: # i.e. a = I*real s *= S.ImaginaryUnit if ai.is_negative: # can't use sign(ai) here since ai might not be # a Number s = -s else: unk.append(a) if c is S.One and len(unk) == len(args): return None return s * cls(arg._new_rawargs(*unk)) if arg is S.NaN: return S.NaN if arg.is_zero: # it may be an Expr that is zero return S.Zero if arg.is_positive: return S.One if arg.is_negative: return S.NegativeOne if arg.is_Function: if arg.func is sign: return arg if arg.is_imaginary: if arg.is_Pow and arg.exp is S.Half: # we catch this because non-trivial sqrt args are not expanded # e.g. sqrt(1-sqrt(2)) --x--> to I*sqrt(sqrt(2) - 1) return S.ImaginaryUnit arg2 = -S.ImaginaryUnit * arg if arg2.is_positive: return S.ImaginaryUnit if arg2.is_negative: return -S.ImaginaryUnit def _eval_Abs(self): if fuzzy_not(self.args[0].is_zero): return S.One def _eval_conjugate(self): return sign(conjugate(self.args[0])) def _eval_derivative(self, x): if self.args[0].is_real: from sympy.functions.special.delta_functions import DiracDelta return 2 * Derivative(self.args[0], x, evaluate=True) \ * DiracDelta(self.args[0]) elif self.args[0].is_imaginary: from sympy.functions.special.delta_functions import DiracDelta return 2 * Derivative(self.args[0], x, evaluate=True) \ * DiracDelta(-S.ImaginaryUnit * self.args[0]) def _eval_is_nonnegative(self): if self.args[0].is_nonnegative: return True def _eval_is_nonpositive(self): if self.args[0].is_nonpositive: return True def _eval_is_imaginary(self): return self.args[0].is_imaginary def _eval_is_integer(self): return self.args[0].is_real def _eval_is_zero(self): return self.args[0].is_zero def _eval_power(self, other): if ( fuzzy_not(self.args[0].is_zero) and other.is_integer and other.is_even ): return S.One def _sage_(self): import sage.all as sage return sage.sgn(self.args[0]._sage_()) def _eval_rewrite_as_Piecewise(self, arg): if arg.is_real: return Piecewise((1, arg > 0), (-1, arg < 0), (0, True)) def _eval_rewrite_as_Heaviside(self, arg): from sympy import Heaviside if arg.is_real: return Heaviside(arg)*2-1 def _eval_simplify(self, ratio, measure): return self.func(self.args[0].factor()) class Abs(Function): """ Return the absolute value of the argument. This is an extension of the built-in function abs() to accept symbolic values. If you pass a SymPy expression to the built-in abs(), it will pass it automatically to Abs(). Examples ======== >>> from sympy import Abs, Symbol, S >>> Abs(-1) 1 >>> x = Symbol('x', real=True) >>> Abs(-x) Abs(x) >>> Abs(x**2) x**2 >>> abs(-x) # The Python built-in Abs(x) Note that the Python built-in will return either an Expr or int depending on the argument:: >>> type(abs(-1)) <... 'int'> >>> type(abs(S.NegativeOne)) <class 'sympy.core.numbers.One'> Abs will always return a sympy object. See Also ======== sign, conjugate """ is_real = True is_negative = False unbranched = True def fdiff(self, argindex=1): """ Get the first derivative of the argument to Abs(). Examples ======== >>> from sympy.abc import x >>> from sympy.functions import Abs >>> Abs(-x).fdiff() sign(x) """ if argindex == 1: return sign(self.args[0]) else: raise ArgumentIndexError(self, argindex) def _eval_refine(self): arg = self.args[0] if arg.is_zero: return S.Zero if arg.is_nonnegative: return arg if arg.is_nonpositive: return -arg if arg.is_Add: expr_list = [] for _arg in Add.make_args(arg): if _arg.is_negative or _arg.is_negative is None: return None if _arg.is_zero: expr_list.append(S.Zero) elif _arg.is_nonnegative: expr_list.append(_arg) elif _arg.is_nonpositive: expr_list.append(-_arg) if expr_list: return Add(*expr_list) return arg @classmethod def eval(cls, arg): from sympy.simplify.simplify import signsimp from sympy.core.basic import Atom from sympy.core.function import expand_mul if hasattr(arg, '_eval_Abs'): obj = arg._eval_Abs() if obj is not None: return obj if not isinstance(arg, Expr): raise TypeError("Bad argument type for Abs(): %s" % type(arg)) # handle what we can arg = signsimp(arg, evaluate=False) if arg.is_Mul: known = [] unk = [] for t in Mul.make_args(arg): tnew = cls(t) if tnew.func is cls: unk.append(tnew.args[0]) else: known.append(tnew) known = Mul(*known) unk = cls(Mul(*unk), evaluate=False) if unk else S.One return known*unk if arg is S.NaN: return S.NaN if arg.is_Pow: base, exponent = arg.as_base_exp() if base.is_real: if exponent.is_integer: if exponent.is_even: return arg if base is S.NegativeOne: return S.One if base.func is cls and exponent is S.NegativeOne: return arg return Abs(base)**exponent if base.is_positive == True: return base**re(exponent) return (-base)**re(exponent)*exp(-S.Pi*im(exponent)) if isinstance(arg, exp): return exp(re(arg.args[0])) if isinstance(arg, AppliedUndef): return if arg.is_Add and arg.has(S.Infinity, S.NegativeInfinity): if any(a.is_infinite for a in arg.as_real_imag()): return S.Infinity if arg.is_zero: return S.Zero if arg.is_nonnegative: return arg if arg.is_nonpositive: return -arg if arg.is_imaginary: arg2 = -S.ImaginaryUnit * arg if arg2.is_nonnegative: return arg2 # reject result if all new conjugates are just wrappers around # an expression that was already in the arg conj = arg.conjugate() new_conj = conj.atoms(conjugate) - arg.atoms(conjugate) if new_conj and all(arg.has(i.args[0]) for i in new_conj): return if arg != conj and arg != -conj: ignore = arg.atoms(Abs) abs_free_arg = arg.xreplace(dict([(i, Dummy(real=True)) for i in ignore])) unk = [a for a in abs_free_arg.free_symbols if a.is_real is None] if not unk or not all(conj.has(conjugate(u)) for u in unk): return sqrt(expand_mul(arg*conj)) def _eval_is_integer(self): if self.args[0].is_real: return self.args[0].is_integer def _eval_is_nonzero(self): return fuzzy_not(self._args[0].is_zero) def _eval_is_zero(self): return self._args[0].is_zero def _eval_is_positive(self): is_z = self.is_zero if is_z is not None: return not is_z def _eval_is_rational(self): if self.args[0].is_real: return self.args[0].is_rational def _eval_is_even(self): if self.args[0].is_real: return self.args[0].is_even def _eval_is_odd(self): if self.args[0].is_real: return self.args[0].is_odd def _eval_is_algebraic(self): return self.args[0].is_algebraic def _eval_power(self, exponent): if self.args[0].is_real and exponent.is_integer: if exponent.is_even: return self.args[0]**exponent elif exponent is not S.NegativeOne and exponent.is_Integer: return self.args[0]**(exponent - 1)*self return def _eval_nseries(self, x, n, logx): direction = self.args[0].leadterm(x)[0] s = self.args[0]._eval_nseries(x, n=n, logx=logx) when = Eq(direction, 0) return Piecewise( ((s.subs(direction, 0)), when), (sign(direction)*s, True), ) def _sage_(self): import sage.all as sage return sage.abs_symbolic(self.args[0]._sage_()) def _eval_derivative(self, x): if self.args[0].is_real or self.args[0].is_imaginary: return Derivative(self.args[0], x, evaluate=True) \ * sign(conjugate(self.args[0])) return (re(self.args[0]) * Derivative(re(self.args[0]), x, evaluate=True) + im(self.args[0]) * Derivative(im(self.args[0]), x, evaluate=True)) / Abs(self.args[0]) def _eval_rewrite_as_Heaviside(self, arg): # Note this only holds for real arg (since Heaviside is not defined # for complex arguments). from sympy import Heaviside if arg.is_real: return arg*(Heaviside(arg) - Heaviside(-arg)) def _eval_rewrite_as_Piecewise(self, arg): if arg.is_real: return Piecewise((arg, arg >= 0), (-arg, True)) def _eval_rewrite_as_sign(self, arg): from sympy import sign return arg/sign(arg) class arg(Function): """ Returns the argument (in radians) of a complex number. For a real number, the argument is always 0. Examples ======== >>> from sympy.functions import arg >>> from sympy import I, sqrt >>> arg(2.0) 0 >>> arg(I) pi/2 >>> arg(sqrt(2) + I*sqrt(2)) pi/4 """ is_real = True is_finite = True @classmethod def eval(cls, arg): if not arg.is_Atom: c, arg_ = factor_terms(arg).as_coeff_Mul() if arg_.is_Mul: arg_ = Mul(*[a if (sign(a) not in (-1, 1)) else sign(a) for a in arg_.args]) arg_ = sign(c)*arg_ else: arg_ = arg if arg_.atoms(AppliedUndef): return x, y = re(arg_), im(arg_) rv = atan2(y, x) if rv.is_number: return rv if arg_ != arg: return cls(arg_, evaluate=False) def _eval_derivative(self, t): x, y = re(self.args[0]), im(self.args[0]) return (x * Derivative(y, t, evaluate=True) - y * Derivative(x, t, evaluate=True)) / (x**2 + y**2) def _eval_rewrite_as_atan2(self, arg): x, y = re(self.args[0]), im(self.args[0]) return atan2(y, x) class conjugate(Function): """ Returns the `complex conjugate` Ref[1] of an argument. In mathematics, the complex conjugate of a complex number is given by changing the sign of the imaginary part. Thus, the conjugate of the complex number :math:`a + ib` (where a and b are real numbers) is :math:`a - ib` Examples ======== >>> from sympy import conjugate, I >>> conjugate(2) 2 >>> conjugate(I) -I See Also ======== sign, Abs References ========== .. [1] http://en.wikipedia.org/wiki/Complex_conjugation """ @classmethod def eval(cls, arg): obj = arg._eval_conjugate() if obj is not None: return obj def _eval_Abs(self): return Abs(self.args[0], evaluate=True) def _eval_adjoint(self): return transpose(self.args[0]) def _eval_conjugate(self): return self.args[0] def _eval_derivative(self, x): if x.is_real: return conjugate(Derivative(self.args[0], x, evaluate=True)) elif x.is_imaginary: return -conjugate(Derivative(self.args[0], x, evaluate=True)) def _eval_transpose(self): return adjoint(self.args[0]) def _eval_is_algebraic(self): return self.args[0].is_algebraic class transpose(Function): """ Linear map transposition. """ @classmethod def eval(cls, arg): obj = arg._eval_transpose() if obj is not None: return obj def _eval_adjoint(self): return conjugate(self.args[0]) def _eval_conjugate(self): return adjoint(self.args[0]) def _eval_transpose(self): return self.args[0] class adjoint(Function): """ Conjugate transpose or Hermite conjugation. """ @classmethod def eval(cls, arg): obj = arg._eval_adjoint() if obj is not None: return obj obj = arg._eval_transpose() if obj is not None: return conjugate(obj) def _eval_adjoint(self): return self.args[0] def _eval_conjugate(self): return transpose(self.args[0]) def _eval_transpose(self): return conjugate(self.args[0]) def _latex(self, printer, exp=None, *args): arg = printer._print(self.args[0]) tex = r'%s^{\dag}' % arg if exp: tex = r'\left(%s\right)^{%s}' % (tex, printer._print(exp)) return tex def _pretty(self, printer, *args): from sympy.printing.pretty.stringpict import prettyForm pform = printer._print(self.args[0], *args) if printer._use_unicode: pform = pform**prettyForm(u('\N{DAGGER}')) else: pform = pform**prettyForm('+') return pform ############################################################################### ############### HANDLING OF POLAR NUMBERS ##################################### ############################################################################### class polar_lift(Function): """ Lift argument to the Riemann surface of the logarithm, using the standard branch. >>> from sympy import Symbol, polar_lift, I >>> p = Symbol('p', polar=True) >>> x = Symbol('x') >>> polar_lift(4) 4*exp_polar(0) >>> polar_lift(-4) 4*exp_polar(I*pi) >>> polar_lift(-I) exp_polar(-I*pi/2) >>> polar_lift(I + 2) polar_lift(2 + I) >>> polar_lift(4*x) 4*polar_lift(x) >>> polar_lift(4*p) 4*p See Also ======== sympy.functions.elementary.exponential.exp_polar periodic_argument """ is_polar = True is_comparable = False # Cannot be evalf'd. @classmethod def eval(cls, arg): from sympy import exp_polar, pi, I, arg as argument if arg.is_number: ar = argument(arg) # In general we want to affirm that something is known, # e.g. `not ar.has(argument) and not ar.has(atan)` # but for now we will just be more restrictive and # see that it has evaluated to one of the known values. if ar in (0, pi/2, -pi/2, pi): return exp_polar(I*ar)*abs(arg) if arg.is_Mul: args = arg.args else: args = [arg] included = [] excluded = [] positive = [] for arg in args: if arg.is_polar: included += [arg] elif arg.is_positive: positive += [arg] else: excluded += [arg] if len(excluded) < len(args): if excluded: return Mul(*(included + positive))*polar_lift(Mul(*excluded)) elif included: return Mul(*(included + positive)) else: return Mul(*positive)*exp_polar(0) def _eval_evalf(self, prec): """ Careful! any evalf of polar numbers is flaky """ return self.args[0]._eval_evalf(prec) def _eval_Abs(self): return Abs(self.args[0], evaluate=True) class periodic_argument(Function): """ Represent the argument on a quotient of the Riemann surface of the logarithm. That is, given a period P, always return a value in (-P/2, P/2], by using exp(P*I) == 1. >>> from sympy import exp, exp_polar, periodic_argument, unbranched_argument >>> from sympy import I, pi >>> unbranched_argument(exp(5*I*pi)) pi >>> unbranched_argument(exp_polar(5*I*pi)) 5*pi >>> periodic_argument(exp_polar(5*I*pi), 2*pi) pi >>> periodic_argument(exp_polar(5*I*pi), 3*pi) -pi >>> periodic_argument(exp_polar(5*I*pi), pi) 0 See Also ======== sympy.functions.elementary.exponential.exp_polar polar_lift : Lift argument to the Riemann surface of the logarithm principal_branch """ @classmethod def _getunbranched(cls, ar): from sympy import exp_polar, log, polar_lift if ar.is_Mul: args = ar.args else: args = [ar] unbranched = 0 for a in args: if not a.is_polar: unbranched += arg(a) elif a.func is exp_polar: unbranched += a.exp.as_real_imag()[1] elif a.is_Pow: re, im = a.exp.as_real_imag() unbranched += re*unbranched_argument( a.base) + im*log(abs(a.base)) elif a.func is polar_lift: unbranched += arg(a.args[0]) else: return None return unbranched @classmethod def eval(cls, ar, period): # Our strategy is to evaluate the argument on the Riemann surface of the # logarithm, and then reduce. # NOTE evidently this means it is a rather bad idea to use this with # period != 2*pi and non-polar numbers. from sympy import ceiling, oo, atan2, atan, polar_lift, pi, Mul if not period.is_positive: return None if period == oo and isinstance(ar, principal_branch): return periodic_argument(*ar.args) if ar.func is polar_lift and period >= 2*pi: return periodic_argument(ar.args[0], period) if ar.is_Mul: newargs = [x for x in ar.args if not x.is_positive] if len(newargs) != len(ar.args): return periodic_argument(Mul(*newargs), period) unbranched = cls._getunbranched(ar) if unbranched is None: return None if unbranched.has(periodic_argument, atan2, arg, atan): return None if period == oo: return unbranched if period != oo: n = ceiling(unbranched/period - S(1)/2)*period if not n.has(ceiling): return unbranched - n def _eval_evalf(self, prec): from sympy import ceiling, oo z, period = self.args if period == oo: unbranched = periodic_argument._getunbranched(z) if unbranched is None: return self return unbranched._eval_evalf(prec) ub = periodic_argument(z, oo)._eval_evalf(prec) return (ub - ceiling(ub/period - S(1)/2)*period)._eval_evalf(prec) def unbranched_argument(arg): from sympy import oo return periodic_argument(arg, oo) class principal_branch(Function): """ Represent a polar number reduced to its principal branch on a quotient of the Riemann surface of the logarithm. This is a function of two arguments. The first argument is a polar number `z`, and the second one a positive real number of infinity, `p`. The result is "z mod exp_polar(I*p)". >>> from sympy import exp_polar, principal_branch, oo, I, pi >>> from sympy.abc import z >>> principal_branch(z, oo) z >>> principal_branch(exp_polar(2*pi*I)*3, 2*pi) 3*exp_polar(0) >>> principal_branch(exp_polar(2*pi*I)*3*z, 2*pi) 3*principal_branch(z, 2*pi) See Also ======== sympy.functions.elementary.exponential.exp_polar polar_lift : Lift argument to the Riemann surface of the logarithm periodic_argument """ is_polar = True is_comparable = False # cannot always be evalf'd @classmethod def eval(self, x, period): from sympy import oo, exp_polar, I, Mul, polar_lift, Symbol if isinstance(x, polar_lift): return principal_branch(x.args[0], period) if period == oo: return x ub = periodic_argument(x, oo) barg = periodic_argument(x, period) if ub != barg and not ub.has(periodic_argument) \ and not barg.has(periodic_argument): pl = polar_lift(x) def mr(expr): if not isinstance(expr, Symbol): return polar_lift(expr) return expr pl = pl.replace(polar_lift, mr) if not pl.has(polar_lift): res = exp_polar(I*(barg - ub))*pl if not res.is_polar and not res.has(exp_polar): res *= exp_polar(0) return res if not x.free_symbols: c, m = x, () else: c, m = x.as_coeff_mul(*x.free_symbols) others = [] for y in m: if y.is_positive: c *= y else: others += [y] m = tuple(others) arg = periodic_argument(c, period) if arg.has(periodic_argument): return None if arg.is_number and (unbranched_argument(c) != arg or (arg == 0 and m != () and c != 1)): if arg == 0: return abs(c)*principal_branch(Mul(*m), period) return principal_branch(exp_polar(I*arg)*Mul(*m), period)*abs(c) if arg.is_number and ((abs(arg) < period/2) == True or arg == period/2) \ and m == (): return exp_polar(arg*I)*abs(c) def _eval_evalf(self, prec): from sympy import exp, pi, I z, period = self.args p = periodic_argument(z, period)._eval_evalf(prec) if abs(p) > pi or p == -pi: return self # Cannot evalf for this argument. return (abs(z)*exp(I*p))._eval_evalf(prec) def _polarify(eq, lift, pause=False): from sympy import Integral if eq.is_polar: return eq if eq.is_number and not pause: return polar_lift(eq) if isinstance(eq, Symbol) and not pause and lift: return polar_lift(eq) elif eq.is_Atom: return eq elif eq.is_Add: r = eq.func(*[_polarify(arg, lift, pause=True) for arg in eq.args]) if lift: return polar_lift(r) return r elif eq.is_Function: return eq.func(*[_polarify(arg, lift, pause=False) for arg in eq.args]) elif isinstance(eq, Integral): # Don't lift the integration variable func = _polarify(eq.function, lift, pause=pause) limits = [] for limit in eq.args[1:]: var = _polarify(limit[0], lift=False, pause=pause) rest = _polarify(limit[1:], lift=lift, pause=pause) limits.append((var,) + rest) return Integral(*((func,) + tuple(limits))) else: return eq.func(*[_polarify(arg, lift, pause=pause) if isinstance(arg, Expr) else arg for arg in eq.args]) def polarify(eq, subs=True, lift=False): """ Turn all numbers in eq into their polar equivalents (under the standard choice of argument). Note that no attempt is made to guess a formal convention of adding polar numbers, expressions like 1 + x will generally not be altered. Note also that this function does not promote exp(x) to exp_polar(x). If ``subs`` is True, all symbols which are not already polar will be substituted for polar dummies; in this case the function behaves much like posify. If ``lift`` is True, both addition statements and non-polar symbols are changed to their polar_lift()ed versions. Note that lift=True implies subs=False. >>> from sympy import polarify, sin, I >>> from sympy.abc import x, y >>> expr = (-x)**y >>> expr.expand() (-x)**y >>> polarify(expr) ((_x*exp_polar(I*pi))**_y, {_x: x, _y: y}) >>> polarify(expr)[0].expand() _x**_y*exp_polar(_y*I*pi) >>> polarify(x, lift=True) polar_lift(x) >>> polarify(x*(1+y), lift=True) polar_lift(x)*polar_lift(y + 1) Adds are treated carefully: >>> polarify(1 + sin((1 + I)*x)) (sin(_x*polar_lift(1 + I)) + 1, {_x: x}) """ if lift: subs = False eq = _polarify(sympify(eq), lift) if not subs: return eq reps = dict([(s, Dummy(s.name, polar=True)) for s in eq.free_symbols]) eq = eq.subs(reps) return eq, dict([(r, s) for s, r in reps.items()]) def _unpolarify(eq, exponents_only, pause=False): if isinstance(eq, bool) or eq.is_Atom: return eq if not pause: if eq.func is exp_polar: return exp(_unpolarify(eq.exp, exponents_only)) if eq.func is principal_branch and eq.args[1] == 2*pi: return _unpolarify(eq.args[0], exponents_only) if ( eq.is_Add or eq.is_Mul or eq.is_Boolean or eq.is_Relational and ( eq.rel_op in ('==', '!=') and 0 in eq.args or eq.rel_op not in ('==', '!=')) ): return eq.func(*[_unpolarify(x, exponents_only) for x in eq.args]) if eq.func is polar_lift: return _unpolarify(eq.args[0], exponents_only) if eq.is_Pow: expo = _unpolarify(eq.exp, exponents_only) base = _unpolarify(eq.base, exponents_only, not (expo.is_integer and not pause)) return base**expo if eq.is_Function and getattr(eq.func, 'unbranched', False): return eq.func(*[_unpolarify(x, exponents_only, exponents_only) for x in eq.args]) return eq.func(*[_unpolarify(x, exponents_only, True) for x in eq.args]) def unpolarify(eq, subs={}, exponents_only=False): """ If p denotes the projection from the Riemann surface of the logarithm to the complex line, return a simplified version eq' of `eq` such that p(eq') == p(eq). Also apply the substitution subs in the end. (This is a convenience, since ``unpolarify``, in a certain sense, undoes polarify.) >>> from sympy import unpolarify, polar_lift, sin, I >>> unpolarify(polar_lift(I + 2)) 2 + I >>> unpolarify(sin(polar_lift(I + 7))) sin(7 + I) """ if isinstance(eq, bool): return eq eq = sympify(eq) if subs != {}: return unpolarify(eq.subs(subs)) changed = True pause = False if exponents_only: pause = True while changed: changed = False res = _unpolarify(eq, exponents_only, pause) if res != eq: changed = True eq = res if isinstance(res, bool): return res # Finally, replacing Exp(0) by 1 is always correct. # So is polar_lift(0) -> 0. return res.subs({exp_polar(0): 1, polar_lift(0): 0}) # /cyclic/ from sympy.core import basic as _ _.abs_ = Abs del _
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~## ## ## ## This file forms part of the Underworld geophysics modelling application. ## ## ## ## For full license and copyright information, please refer to the LICENSE.md file ## ## located at the project root, or contact the authors. ## ## ## ##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~## """ This module provides different drawing objects for visualisation in Underworld. """ import underworld as _underworld import underworld._stgermain as _stgermain import underworld.swarm as _swarmMod import underworld.mesh as _uwmesh from underworld.function import Function as _Function import underworld.libUnderworld as _libUnderworld import json as _json #TODO: Drawing Objects to implement # HistoricalSwarmTrajectory # # Maybe later... # SwarmShapes, SwarmRGB, SwarmVectors # EigenVectors, EigenVectorCrossSection class ColourMap(_stgermain.StgCompoundComponent): """ The ColourMap class provides functionality for mapping colours to numerical values. Parameters ---------- colours: str, list List of colours to use for drawing object colour map. Provided as a string or as a list of strings. Example, "red blue", or ["red", "blue"] valueRange: tuple, list User defined value range to apply to colour map. Provided as a tuple of floats (minValue, maxValue). If none is provided, the value range will be determined automatically. logScale: bool Bool to determine if the colourMap should use a logarithmic scale. discrete: bool Bool to determine if a discrete colour map should be used. Discrete colour maps do not interpolate between colours and instead use nearest neighbour for colouring. """ _selfObjectName = "_cm" _objectsDict = { "_cm": "lucColourMap" } def __init__(self, colours="diverge", valueRange=None, logScale=False, discrete=False, **kwargs): if not hasattr(self, "properties"): self.properties = {} if not isinstance(colours,(str,list,tuple)): raise TypeError("'colours' object passed in must be of python type 'str', 'list' or 'tuple'") self.properties.update({"colours" : _json.dumps(colours)}) #User-defined props in kwargs self.properties.update(kwargs) dict((k.lower(), v) for k, v in self.properties.items()) if valueRange != None: # is valueRange correctly defined, ie list of length 2 made of numbers if not isinstance( valueRange, (list,tuple)): raise TypeError("'valueRange' must be of type 'list' or 'tuple'") if len(valueRange) != 2: raise ValueError("'valueRange' must have 2 real values") for item in valueRange: if not isinstance( item, (int, float) ): raise TypeError("'valueRange' must contain real numbers") if not valueRange[0] < valueRange[1]: raise ValueError("The first number of the valueRange list must be smaller than the second number") # valueRange arg is good self.properties.update({"range" : [valueRange[0], valueRange[1]]}) else: self.properties.update({"range" : [0.0, 0.0]}) # ignored if not isinstance(logScale, bool): raise TypeError("'logScale' parameter must be of 'bool' type.") self._logScale = logScale self.properties.update({"logscale" : logScale}) if not isinstance(discrete, bool): raise TypeError("'discrete' parameter must be of 'bool' type.") self.properties.update({"discrete" : discrete}) # build parent super(ColourMap,self).__init__() def _add_to_stg_dict(self,componentDictionary): # call parents method super(ColourMap,self)._add_to_stg_dict(componentDictionary) #dict methods def update(self, newdict): self.properties.update(newdict) def __getitem__(self, key): return self.properties[key] def __setitem__(self, key, item): self.properties[key] = item def _getProperties(self): #Convert properties to string return '\n'.join(['%s=%s' % (k,v) for k,v in self.properties.items()]); class Drawing(_stgermain.StgCompoundComponent): """ This is the base class for all drawing objects but can also be instantiated as is for direct/custom drawing. Note that the defaults here are often overridden by the child objects. Parameters ---------- colours: str, list. See ColourMap class docstring for further information colourMap: visualisation.objects.ColourMap A ColourMap object for the object to use. This should not be specified if 'colours' is specified. opacity: float Opacity of object. If provided, must take values from 0. to 1. colourBar: bool Bool to determine if a colour bar should be rendered. valueRange: tuple, list See ColourMap class docstring for further information logScale: bool See ColourMap class docstring for further information discrete: bool See ColourMap class docstring for further information """ _selfObjectName = "_dr" _objectsDict = { "_dr": "lucDrawingObject" } # child should replace _dr with own derived type def __init__(self, name=None, colours=None, colourMap="", colourBar=False, valueRange=None, logScale=False, discrete=False, *args, **kwargs): if not hasattr(self, "properties"): self.properties = {} if colours and colourMap: raise RuntimeError("You should specify 'colours' or a 'colourMap', but not both.") if colourMap: self._colourMap = colourMap elif colours: self._colourMap = ColourMap(colours=colours, valueRange=valueRange, logScale=logScale) elif colourMap is not None: self._colourMap = ColourMap(valueRange=valueRange, logScale=logScale) else: self._colourMap = None if not isinstance(discrete, bool): raise TypeError("'discrete' parameter must be of 'bool' type.") if discrete and self._colourMap: self._colourMap["discrete"] = True #User-defined props in kwargs self.properties.update(kwargs) dict((k.lower(), v) for k, v in self.properties.items()) if not isinstance(colourBar, bool): raise TypeError("'colourBar' parameter must be of 'bool' type.") self._colourBar = None if colourBar and self._colourMap: #Create the associated colour bar self._colourBar = ColourBar(colourMap=self._colourMap) if name: self.properties["name"] = str(name) self.resetDrawing() # build parent super(Drawing,self).__init__(*args) def _add_to_stg_dict(self,componentDictionary): # call parents method super(Drawing,self)._add_to_stg_dict(componentDictionary) # add an empty(ish) drawing object. children should fill it out. componentDictionary[self._dr.name].update( { "properties" :self._getProperties(), "ColourMap" :self._colourMap._cm.name if self._colourMap else None } ) #dict methods def update(self, newdict): self.properties.update(newdict) def __getitem__(self, key): return self.properties[key] def __setitem__(self, key, item): self.properties[key] = item def _getProperties(self): #Convert properties to string return '\n'.join(['%s=%s' % (k,v) for k,v in self.properties.items()]); def render(self, viewer): #Place any custom geometry output in this method, called after database creation #General purpose plotting via LavaVu #Plot all custom data drawn on provided object try: obj = viewer.objects[self.properties["name"]] if not obj: raise KeyError("Object not found") except KeyError as e: print(self.properties["name"] + " Object lookup error: " + str(e)) return obj["renderer"] = self.geomType output = False if len(self.vertices): obj.vertices(self.vertices) output = True if len(self.vectors): obj.vectors(self.vectors) output = True if len(self.scalars): obj.values(self.scalars) output = True if len(self.labels): obj.labels(self.labels) output = True #Update the database if output: viewer.app.update(obj.ref, True) def resetDrawing(self): #Clear direct drawing data self.vertices = [] self.vectors = [] self.scalars = [] self.labels = [] self.geomType = None #Direct drawing methods def label(self, text, pos=(0.,0.,0.), font="sans", scaling=1): """ Writes a label string Parameters ---------- text: str label text. pos: tuple X,Y,Z position to place the label. font : str label font (small/fixed/sans/serif/vector). scaling : float label font scaling (for "vector" font only). """ self.geomType = "labels" self.vertices.append(pos) self.labels.append(text) self.properties.update({"font" : font, "fontscale" : scaling}) #Merge def point(self, pos=(0.,0.,0.)): """ Draws a point Parameters ---------- pos : tuple X,Y,Z position to place the point """ self.geomType = "points" self.vertices.append(pos) def line(self, start=(0.,0.,0.), end=(0.,0.,0.)): """ Draws a line Parameters ---------- start : tuple X,Y,Z position to start line end : tuple X,Y,Z position to end line """ self.geomType = "lines" self.vertices.append(start) self.vertices.append(end) def vector(self, position=(0.,0.,0.), vector=(0.,0.,0.)): """ Draws a vector Parameters ---------- position : tuple X,Y,Z position to centre vector on vector : tuple X,Y,Z vector value """ self.geomType = "vectors" self.vertices.append(position) self.vectors.append(vector) @property def colourBar(self): """ colourBar (object): return colour bar of drawing object, create if doesn't yet exist. """ if not self._colourBar: self._colourBar = ColourBar(colourMap=self._colourMap) return self._colourBar @property def colourMap(self): """ colourMap (object): return colour map of drawing object """ return self._colourMap def getdata(self, viewer, typename): #Experimental: grabbing data from a LavaVu object alldata = [] obj = viewer.objects[self.properties["name"]] if obj: #Force viewer open to trigger surface optimisation etc viewer.app.resetViews() #Get data elements list dataset = obj.data() for geom in dataset: #Grab a copy of the data data = geom.copy(typename) alldata.append(data) return alldata class ColourBar(Drawing): """ The ColourBar drawing object draws a colour bar for the provided colour map. Parameters ---------- colourMap: visualisation.objects.ColourMap Colour map for which the colour bar will be drawn. """ def __init__(self, colourMap, *args, **kwargs): #Default properties self.properties = {"colourbar" : 1} # build parent super(ColourBar,self).__init__(colourMap=colourMap, *args, **kwargs) class CrossSection(Drawing): """ This drawing object class defines a cross-section plane, derived classes plot data over this cross section See parent class for further parameter details. Also see property docstrings. Parameters --------- mesh : underworld.mesh.FeMesh Mesh over which cross section is rendered. fn : underworld.function.Function Function used to determine values to render. crossSection : str Cross Section definition, eg. z=0. resolution : list(unsigned) Surface sampling resolution. onMesh : boolean Sample the mesh nodes directly, as opposed to sampling across a regular grid. This flag should be used in particular where a mesh has been deformed. """ _objectsDict = { "_dr": "lucCrossSection" } def __init__(self, mesh, fn, crossSection="", resolution=[100,100,1], colourBar=True, offsetEdges=None, onMesh=False, *args, **kwargs): self._onMesh = onMesh #Check the mesh has a valid vertGridId, if not then we can't use onMesh #(Invalid should be -1, but is read as unsigned (4294967295) from python for some reason # valid value should be small, so just treat any large integer as invalid) if mesh._mesh.vertGridId > 1000 or mesh._mesh.vertGridId < 0: self._onMesh = False self._fn = _underworld.function.Function.convert(fn) if not isinstance(mesh,_uwmesh.FeMesh): raise TypeError("'mesh' object passed in must be of type 'FeMesh'") self._mesh = mesh if not isinstance(crossSection,str): raise ValueError("'crossSection' parameter must be of python type 'str'") self._crossSection = crossSection self._offsetEdges = offsetEdges if not isinstance( resolution, (list,tuple)): if isinstance(resolution,int): resolution = [resolution,resolution,1] else: raise TypeError("'resolution' passed in must be of type 'int', 'list' or 'tuple'") for el in resolution: if not isinstance(el,int): raise TypeError("'resolution' elements must be of python type 'int'") if el < 1: raise ValueError("'resolution' elements must be greater than zero") self._resolution = resolution # build parent super(CrossSection,self).__init__(colourBar=colourBar, *args, **kwargs) def _setup(self): _libUnderworld.gLucifer._lucCrossSection_SetFn( self._cself, self._fn._fncself ) if self._offsetEdges != None: self._dr.offsetEdges = self._offsetEdges def _add_to_stg_dict(self,componentDictionary): # lets build up component dictionary # call parents method super(CrossSection,self)._add_to_stg_dict(componentDictionary) componentDictionary[self._dr.name].update( { "Mesh": self._mesh._cself.name, "crossSection": self._crossSection, "resolutionA" : self._resolution[0], "resolutionB" : self._resolution[1], "onMesh" : self._onMesh } ) @property def crossSection(self): """ crossSection (str): Cross Section definition, eg;: z=0. """ return self._crossSection class Surface(CrossSection): """ This drawing object class draws a surface using the provided scalar field. See parent class for further parameter details. Also see property docstrings. Parameters --------- mesh : underworld.mesh.FeMesh Mesh over which cross section is rendered. fn : underworld.function.Function Function used to determine values to render. drawSides : str Sides (x,y,z,X,Y,Z) for which the surface should be drawn. For example, "xyzXYZ" would render the provided function across all surfaces of the domain in 3D. In 2D, this object always renders across the entire domain. """ _objectsDict = { "_dr" : "lucScalarField" } def __init__(self, mesh, fn, drawSides="xyzXYZ", colourBar=True, onMesh=None, *args, **kwargs): if onMesh is None: #Default onMesh=True for faster drawing and accuracy #(Will be disabled in CrossSection if mesh does not support it) onMesh = True #Default onMesh=False if less than 64 nodes #(Smooth/better output on interpolated mesh for low res meshes) if mesh.nodesGlobal < 64 or "resolution" in kwargs: onMesh = False if not isinstance(drawSides,str): raise ValueError("'drawSides' parameter must be of python type 'str'") self._drawSides = drawSides # build parent super(Surface,self).__init__( mesh=mesh, fn=fn, colourBar=colourBar, onMesh=onMesh, *args, **kwargs ) #Merge with default properties is3d = len(self._crossSection) == 0 defaults = {"cullface" : is3d, "lit" : is3d} defaults.update(self.properties) self.properties = defaults def _add_to_stg_dict(self,componentDictionary): # lets build up component dictionary # append random string to provided name to ensure unique component names # call parents method super(Surface,self)._add_to_stg_dict(componentDictionary) componentDictionary[self._dr.name]["drawSides"] = self._drawSides def _setup(self): _libUnderworld.gLucifer._lucCrossSection_SetFn( self._cself, self._fn._fncself ) def __del__(self): super(Surface,self).__del__() class Contours(CrossSection): """ This drawing object class draws contour lines in a cross section using the provided scalar field. See parent class for further parameter details. Also see property docstrings. Parameters --------- mesh : underworld.mesh.FeMesh Mesh over which cross section is rendered. fn : underworld.function.Function Function used to determine values to render. labelFormat: str Format string (printf style) used to print a contour label, eg: " %g K" unitScaling: Scaling factor to apply to value when printing labels interval: float Interval between contour lines limits: tuple, list User defined minimum and maximum limits for the contours. Provided as a tuple/list of floats (minValue, maxValue). If none is provided, the limits will be determined automatically. """ _objectsDict = { "_dr" : "lucContourCrossSection" } def __init__(self, mesh, fn, labelFormat="", unitScaling=1.0, interval=0.33, limits=(0.0, 0.0), *args, **kwargs): if not isinstance(labelFormat,str): raise ValueError("'labelFormat' parameter must be of python type 'str'") self._labelFormat = labelFormat if not isinstance( unitScaling, (int, float) ): raise TypeError("'unitScaling' must contain a number") self._unitScaling = unitScaling if not isinstance( interval, (int, float) ): raise TypeError("'interval' must contain a number") self._interval = interval # is limits correctly defined, ie list of length 2 made of numbers if not isinstance( limits, (list,tuple)): raise TypeError("'limits' must be of type 'list' or 'tuple'") if len(limits) != 2: raise ValueError("'limits' must have 2 real values") for item in limits: if not isinstance( item, (int, float) ): raise TypeError("'limits' must contain real numbers") if not limits[0] <= limits[1]: raise ValueError("The first number of the limits list must be smaller than the second number") self._limits = limits # build parent super(Contours,self).__init__( mesh=mesh, fn=fn, *args, **kwargs) #Default properties self.properties.update({"lit" : False}) def _add_to_stg_dict(self,componentDictionary): # lets build up component dictionary # append random string to provided name to ensure unique component names # call parents method super(Contours,self)._add_to_stg_dict(componentDictionary) componentDictionary[self._dr.name]["unitScaling"] = self._unitScaling componentDictionary[self._dr.name][ "interval"] = self._interval componentDictionary[self._dr.name]["minIsovalue"] = self._limits[0] componentDictionary[self._dr.name]["maxIsovalue"] = self._limits[1] def _setup(self): #Override dictionary setting, it seems to left-trim strings self._dr.labelFormat = self._labelFormat _libUnderworld.gLucifer._lucCrossSection_SetFn( self._cself, self._fn._fncself ) def __del__(self): super(Contours,self).__del__() class Points(Drawing): """ This drawing object class draws a swarm of points. See parent class for further parameter details. Also see property docstrings. Parameters --------- swarm : underworld.swarm.Swarm Swarm which provides locations for point rendering. fn_colour : underworld.function.Function Function used to determine colour to render particle. This function should return float/double values. fn_mask : underworld.function.Function Function used to determine if a particle should be rendered. This function should return bool values. fn_size : underworld.function.Function Function used to determine size to render particle. This function should return float/double values. """ _objectsDict = { "_dr": "lucSwarmViewer" } def __init__(self, swarm, fn_colour=None, fn_mask=None, fn_size=None, colourVariable=None, colourBar=True, *args, **kwargs): if not isinstance(swarm,_swarmMod.Swarm): raise TypeError("'swarm' object passed in must be of type 'Swarm'") self._swarm = swarm self._fn_colour = None if fn_colour != None: self._fn_colour = _underworld.function.Function.convert(fn_colour) else: colourBar = False self._fn_mask = None if fn_mask != None: self._fn_mask = _underworld.function.Function.convert(fn_mask) self._fn_size = None if fn_size != None: self._fn_size = _underworld.function.Function.convert(fn_size) # build parent super(Points,self).__init__(colourBar=colourBar, *args, **kwargs) def _add_to_stg_dict(self,componentDictionary): # lets build up component dictionary super(Points,self)._add_to_stg_dict(componentDictionary) componentDictionary[ self._cself.name ][ "Swarm" ] = self._swarm._cself.name def _setup(self): fnc_ptr = None if self._fn_colour: fnc_ptr = self._fn_colour._fncself fnm_ptr = None if self._fn_mask: fnm_ptr = self._fn_mask._fncself fns_ptr = None if self._fn_size: fns_ptr = self._fn_size._fncself _libUnderworld.gLucifer._lucSwarmViewer_SetFn( self._cself, fnc_ptr, fnm_ptr, fns_ptr, None ) class _GridSampler3D(CrossSection): """ This drawing object class samples a regular grid in 3D. resolution : list(unsigned) Number of samples in the I,J,K directions. """ _objectsDict = { "_dr": None } #Abstract class, Set by child def __init__(self, resolution=[16,16,16], *args, **kwargs): self._resolution = resolution # build parent super(_GridSampler3D,self).__init__(resolution=resolution, *args, **kwargs) def _add_to_stg_dict(self,componentDictionary): # lets build up component dictionary # call parents method super(_GridSampler3D,self)._add_to_stg_dict(componentDictionary) componentDictionary[self._dr.name].update( { "resolutionX": self._resolution[0], "resolutionY": self._resolution[1], "resolutionZ": self._resolution[2] } ) class VectorArrows(_GridSampler3D): """ This drawing object class draws vector arrows corresponding to the provided vector field. See parent class for further parameter details. Also see property docstrings. Parameters --------- mesh : underworld.mesh.FeMesh Mesh over which vector arrows are rendered. fn : underworld.function.Function Function used to determine vectors to render. Function should return a vector of floats/doubles of appropriate dimensionality. arrowHead : float The size of the head of the arrow. If > 1.0 is ratio to arrow radius If in range [0.,1.] is ratio to arrow length scaling : float Scaling for entire arrow. autoscale : bool Scaling based on field min/max glyphs : int Type of glyph to render for vector arrow. 0: Line, 1 or more: 3d arrow, higher number => better quality. resolution : list(unsigned) Number of samples in the I,J,K directions. """ _objectsDict = { "_dr": "lucVectorArrows" } def __init__(self, mesh, fn, resolution=[16, 16, 16], autoscale=True, *args, **kwargs): self._autoscale = autoscale # build parent super(VectorArrows,self).__init__( mesh=mesh, fn=fn, resolution=resolution, colours=None, colourMap=None, colourBar=False, autoscale=autoscale, *args, **kwargs) def _add_to_stg_dict(self,componentDictionary): # lets build up component dictionary # call parents method super(VectorArrows,self)._add_to_stg_dict(componentDictionary) componentDictionary[self._dr.name].update( { "dynamicRange": self._autoscale } ) class Volume(_GridSampler3D): """ This drawing object class draws a volume using the provided scalar field. See parent class for further parameter details. Also see property docstrings. Parameters --------- mesh : underworld.mesh.FeMesh Mesh over which object is rendered. fn : underworld.function.Function Function used to determine colour values. Function should return a vector of floats/doubles of appropriate dimensionality. resolution : list(unsigned) Number of samples in the I,J,K directions. """ _objectsDict = { "_dr": "lucFieldSampler" } def __init__(self, mesh, fn, resolution=[64,64,64], colourBar=True, *args, **kwargs): # build parent if mesh.dim == 2: raise ValueError("Volume rendering requires a three dimensional mesh.") super(Volume,self).__init__( mesh=mesh, fn=fn, resolution=resolution, colourBar=colourBar, *args, **kwargs) def _add_to_stg_dict(self,componentDictionary): # lets build up component dictionary # call parents method super(Volume,self)._add_to_stg_dict(componentDictionary) class Sampler(Drawing): """ The Sampler class provides functionality for sampling a field at a number of provided vertices. Parameters ---------- vertices: list,array List of vertices to sample the field at, either a list or numpy array mesh : underworld.mesh.FeMesh Mesh over which the values are sampled fn : underworld.function.Function Function used to get the sampled values. """ _objectsDict = { "_dr": "lucSampler" } def __init__(self, mesh, fn, *args, **kwargs): if not isinstance(mesh,_uwmesh.FeMesh): raise TypeError("'mesh' object passed in must be of type 'FeMesh'") self._mesh = mesh self._fn = None if fn != None: self._fn = _underworld.function.Function.convert(fn) # build parent super(Sampler,self).__init__(*args, **kwargs) def _add_to_stg_dict(self,componentDictionary): # call parents method super(Sampler,self)._add_to_stg_dict(componentDictionary) componentDictionary[self._dr.name]["Mesh"] = self._mesh._cself.name def _setup(self): fnc_ptr = None if self._fn: fn_ptr = self._fn._fncself _libUnderworld.gLucifer._lucSampler_SetFn( self._cself, fn_ptr ) def sample(self, vertices): sz = len(vertices)/3*self._cself.fieldComponentCount import numpy values = numpy.zeros(sz, dtype='float32') _libUnderworld.gLucifer.lucSampler_SampleField( self._cself, vertices, values) return values class IsoSurface(Volume): """ This drawing object class draws an isosurface using the provided scalar field. See parent class for further parameter details. Also see property docstrings. Parameters --------- mesh : underworld.mesh.FeMesh Mesh over which object is rendered. fn : underworld.function.Function Function used to determine surface position. Function should return a vector of floats/doubles. fn_colour : underworld.function.Function Function used to determine colour of surface. resolution : list(unsigned) Number of samples in the I,J,K directions. isovalue : float Isovalue to plot. isovalues : list of float List of multiple isovalues to plot. """ def __init__(self, mesh, fn, fn_colour=None, resolution=[64,64,64], colourBar=True, isovalue=None, *args, **kwargs): # build parent if mesh.dim == 2: raise ValueError("Isosurface requires a three dimensional mesh.") # Validate isovalue(s) params if not "isovalues" in kwargs: if isovalue is None: raise ValueError("Isosurface requires either 'isovalue' value or 'isovalues' list parameter.") kwargs["isovalues"] = [isovalue] self._sampler = None if fn_colour != None: self._sampler = Sampler(mesh, fn_colour) super(IsoSurface,self).__init__( mesh=mesh, fn=fn, resolution=resolution, colourBar=colourBar, *args, **kwargs) self.geomType = "triangles" def _add_to_stg_dict(self,componentDictionary): # lets build up component dictionary # call parents method super(IsoSurface,self)._add_to_stg_dict(componentDictionary) def _setup(self): if self._sampler: self._sampler._setup() def render(self, viewer): # FieldSampler has exported a 3d volume to the database, # now we can use LavaVu to generate isosurface triangles isobj = viewer.objects[self.properties["name"]] if isobj: #Force viewer open to trigger surface optimisation viewer.app.resetViews() #Generate isosurface in same object, convert and delete volume, update db isobj.isosurface(name=None, convert=True, updatedb=True) else: print("Object not found: " + self.properties["name"]) def parallel_render(self, viewer, rank): #If this method defined, is run by all procs to process # any render output that must be done in parallel isobj = viewer.objects[self.properties["name"]] if isobj and self._sampler: #If coloured by another field, get the vertices, sample and load values #Clear existing values isobj.cleardata() #Get data elements list dataset = isobj.data() for geom in dataset: #Grab a view of the vertex data verts = geom.get("vertices") if len(verts): #Sample over tri vertices values = self._sampler.sample(verts) #Update data on root if rank == 0: #Update element with the sampled data values geom.set("sampledfield", values) #Write the colour data back to db on root if rank == 0: isobj.update("triangles") class Mesh(Drawing): """ This drawing object class draws a mesh. See parent class for further parameter details. Also see property docstrings. Parameters ---------- mesh : underworld.mesh.FeMesh Mesh to render. nodeNumbers : bool Bool to determine whether global node numbers should be rendered. segmentsPerEdge : unsigned Number of segments to render per cell/element edge. For higher order mesh, more segments are useful to render mesh curvature correctly. """ _objectsDict = { "_dr": "lucMeshViewer" } def __init__( self, mesh, nodeNumbers=False, segmentsPerEdge=1, *args, **kwargs ): if not isinstance(mesh,_uwmesh.FeMesh): raise TypeError("'mesh' object passed in must be of type 'FeMesh'") self._mesh = mesh if not isinstance(nodeNumbers,bool): raise TypeError("'nodeNumbers' flag must be of type 'bool'") self._nodeNumbers = nodeNumbers if not isinstance(segmentsPerEdge,int) or segmentsPerEdge < 1: raise TypeError("'segmentsPerEdge' must be a positive 'int'") self._segmentsPerEdge = segmentsPerEdge #Default properties self.properties = {"lit" : False, "font" : "small", "fontscale" : 0.5, "pointsize" : 5 if self._nodeNumbers else 1, "pointtype" : 2 if self._nodeNumbers else 4} # build parent super(Mesh,self).__init__( colourMap=None, colourBar=False, *args, **kwargs ) def _add_to_stg_dict(self,componentDictionary): # lets build up component dictionary # append random string to provided name to ensure unique component names # call parents method super(Mesh,self)._add_to_stg_dict(componentDictionary) componentDictionary[self._dr.name][ "Mesh"] = self._mesh._cself.name componentDictionary[self._dr.name]["nodeNumbers"] = self._nodeNumbers componentDictionary[self._dr.name][ "segments"] = self._segmentsPerEdge
import threading import unittest from queue import Queue, Empty from typing import Iterable import numpy as np from cltl.combot.infra.event import Event from cltl.combot.infra.event.memory import SynchronousEventBus from cltl.backend.api.microphone import AudioParameters from cltl.backend.spi.audio import AudioSource from cltl.vad.api import VAD from cltl_service.backend.schema import AudioSignalStarted from cltl_service.vad.service import VadService def wait(lock: threading.Event): if not lock.wait(1): raise unittest.TestCase.failureException("Latch timed out") def test_source(start, speech_started, speech_ended): class TestSource(AudioSource): zeros = np.zeros((16, 1), dtype=np.int16) ones = np.ones((16, 1), dtype=np.int16) def __init__(self, url, offset, length): self.offset = offset // 16 @property def audio(self) -> Iterable[np.array]: """ Frames: [0,0,1,1,0,0,0,1,1,1,0] Expected start offsets: 0, 5 Expected speech offsets: 2, 7 """ if self.offset > 9: return if self.offset != 0 and self.offset != 5: print("Failure:", self.offset) raise unittest.TestCase.failureException(self.offset) if self.offset == 0: yield from [self.zeros, self.zeros] wait(start) start.clear() yield self.ones speech_started.set() yield self.ones wait(start) speech_started.clear() speech_ended.set() start.clear() yield self.zeros wait(start) start.clear() yield from [self.zeros, self.zeros] yield self.ones speech_started.set() wait(start) yield from [self.ones, self.ones] yield self.zeros @property def rate(self): return 16000 @property def channels(self): return 1 @property def frame_size(self): return 16 @property def depth(self): return 2 return TestSource class DummyVad(VAD): def is_vad(self, audio_frame: np.array, sampling_rate: int) -> bool: return audio_frame.sum() > 0 def detect_vad(self, audio_frames: Iterable[np.array], sampling_rate: int, blocking: bool = True, timeout: int = 0) -> [Iterable[np.array], int, int]: is_vad = False offset = 0 speech = [] for last, frame in enumerate(audio_frames): if is_vad and not self.is_vad(frame, sampling_rate): return speech, offset, last + 1 if not is_vad and self.is_vad(frame, sampling_rate): is_vad = True offset = last if is_vad: speech.append(frame) return speech, offset, last + 1 class TestVAD(unittest.TestCase): def setUp(self) -> None: self.event_bus = SynchronousEventBus() self.vad_service = None def tearDown(self) -> None: if self.vad_service: self.vad_service.stop() def test_events_from_vad_service(self): start = threading.Event() speech_started = threading.Event() speech_ended = threading.Event() self.vad_service = VadService("mic_topic", "vad_topic", DummyVad(), test_source(start, speech_started, speech_ended), self.event_bus, None) self.vad_service.start() audio_started = AudioSignalStarted.create("1", 0, ["cltl-storage:audio/1"], AudioParameters(16000, 1, 16, 2)) self.event_bus.publish("mic_topic", Event.for_payload(audio_started)) events = Queue() def receive_event(event): events.put(event) self.event_bus.subscribe("vad_topic", receive_event) start.set() wait(speech_started) with self.assertRaises(Empty): events.get(block=True, timeout=0.1) start.set() wait(speech_ended) event = events.get(block=True, timeout=0.1) self.assertEqual(2 * 16, event.payload.mention.segment[0].start) self.assertEqual(4 * 16, event.payload.mention.segment[0].stop) start.set() wait(speech_started) with self.assertRaises(Empty): events.get(block=True, timeout=0.1) start.set() event = events.get(block=True, timeout=0.1) self.assertEqual(7 * 16, event.payload.mention.segment[0].start) self.assertEqual(10 * 16, event.payload.mention.segment[0].stop)
import SocketServer import threading import numpy as np import cv2 import sys import serial from keras.models import load_model from self_driver_helper import SelfDriver ultrasonic_data = None # BaseRequestHandler is used to process incoming requests class UltrasonicHandler(SocketServer.BaseRequestHandler): data = " " def handle(self): while self.data: self.data = self.request.recv(1024) ultrasonic_data = float(self.data.split('.')[0]) print(ultrasonic_data) # VideoStreamHandler uses streams which are file-like objects for communication class VideoStreamHandler(SocketServer.StreamRequestHandler): # Include port and baudrate, with timeout of 1 second ser = serial.Serial("/dev/ttyUSB0", 115200, timeout=1) model = load_model("saved_model/nn_model.h5")##################################################### def handle(self): stream_bytes = b'' driver = SelfDriver(self.ser) try: # stream video frames one by one while True: stream_bytes += self.rfile.read(1024) first = stream_bytes.find(b'\xff\xd8') last = stream_bytes.find(b'\xff\xd9') if first != -1 and last != -1: jpg = stream_bytes[first:last + 2] stream_bytes = stream_bytes[last + 2:] gray = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8), cv2.IMREAD_GRAYSCALE) #image = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8), cv2.IMREAD_COLOR) # lower half of the image height, width = gray.shape roi = gray[int(height/2):height, :] cv2.imshow('image', roi) image_array = roi.flatten().astype(np.float32) prediction = self.model.predict_classes(image_array) print("Prediction is", prediction) # get prediction and then steer if(int(ultrasonic_data) < 40): print("Stopping car because of obstacle.") self.driver.stop() ultrasonic_data = None driver.steer(prediction) finally: cv2.destroyAllWindows() sys.exit() class SelfDriverServer(object): def __init__(self, host, portUS, portCam): self.host = host self.portUS = portUS self.portCam = portCam def startUltrasonicServer(self): # Create the Ultrasonic server, binding to localhost on port 50001 server = SocketServer.TCPServer((self.host, self.portUS), UltrasonicHandler) server.serve_forever() def startVideoServer(self): # Create the video server, binding to localhost on port 50002 server = SocketServer.TCPServer((self.host, self.portCam), VideoStreamHandler) server.serve_forever() def start(self): ultrasonic_thread = threading.Thread(target=self.startUltrasonicServer) ultrasonic_thread.daemon = True ultrasonic_thread.start() self.startVideoServer() if __name__ == "__main__": # From SocketServer documentation HOST, PORTUS, PORTCAM = '192.168.0.15', 50001, 50002 sdc = SelfDriverServer(HOST, PORTUS, PORTCAM) sdc.start()
import matplotlib.pyplot as plt import numpy as np import quantecon as qe import seaborn as sb from wald_class import * c = 1.25 L0 = 25 L1 = 25 a0, b0 = 2.5, 2.0 a1, b1 = 2.0, 2.5 m = 25 f0 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=a0, b=b0), 1e-6, np.inf) f0 = f0 / np.sum(f0) f1 = np.clip(st.beta.pdf(np.linspace(0, 1, m), a=a1, b=b1), 1e-6, np.inf) f1 = f1 / np.sum(f1) # Make sure sums to 1 # Create an instance of our WaldFriedman class wf = WaldFriedman(c, L0, L1, f0, f1, m=m) # Solve using qe's `compute_fixed_point` function J = qe.compute_fixed_point(wf.bellman_operator, np.zeros(m), error_tol=1e-7, verbose=False, print_skip=10, max_iter=500) lb, ub = wf.find_cutoff_rule(J) # Get draws ndraws = 500 cdist, tdist = wf.stopping_dist(ndraws=ndraws) fig, ax = plt.subplots(2, 2, figsize=(12, 9)) ax[0, 0].plot(f0, label=r"$f_0$") ax[0, 0].plot(f1, label=r"$f_1$") ax[0, 0].set_ylabel(r"probability of $z_k$", size=14) ax[0, 0].set_xlabel(r"$k$", size=14) ax[0, 0].set_title("Distributions", size=14) ax[0, 0].legend(fontsize=14) ax[0, 1].plot(wf.pgrid, J) ax[0, 1].annotate(r"$\beta$", xy=(lb+0.025, 0.5), size=14) ax[0, 1].annotate(r"$\alpha$", xy=(ub+0.025, 0.5), size=14) ax[0, 1].vlines(lb, 0.0, wf.payoff_choose_f1(lb), linestyle="--") ax[0, 1].vlines(ub, 0.0, wf.payoff_choose_f0(ub), linestyle="--") ax[0, 1].set_ylim(0, 0.5*max(L0, L1)) ax[0, 1].set_ylabel("cost", size=14) ax[0, 1].set_xlabel(r"$p_k$", size=14) ax[0, 1].set_title(r"Value function $J$", size=14) # Histogram the stopping times ax[1, 0].hist(tdist, bins=np.max(tdist)) ax[1, 0].set_title("Stopping times over {} replications".format(ndraws), size=14) ax[1, 0].set_xlabel("time", size=14) ax[1, 0].set_ylabel("number of stops", size=14) ax[1, 0].annotate("mean = {}".format(np.mean(tdist)), xy=(max(tdist)/2, max(np.histogram(tdist, bins=max(tdist))[0])/2), size=16) ax[1, 1].hist(cdist, bins=2) ax[1, 1].set_title("Correct decisions over {} replications".format(ndraws), size=14) ax[1, 1].annotate("% correct = {}".format(np.mean(cdist)), xy=(0.05, ndraws/2), size=16) fig.tight_layout() plt.show()
# A simple convolutional layer # @Time: 12/5/21 # @Author: lnblanke # @Email: fjh314.84@gmail.com # @File: conv.py.py from .layer import Layer import numpy as np class Conv(Layer): def __init__(self, kernal_size: int, filters: int, padding: str, name = None): super().__init__(name) self.kernel_size = kernal_size self.filters = filters self.padding = padding self.weights = np.random.randn(filters, kernal_size, kernal_size) self.input = None if padding != "same" and padding != "valid": raise NameError("The name of padding is not found!") def feedforward(self, input_vector: np.ndarray): h, w = input_vector.shape self.input = input_vector if self.kernel_size > h or self.kernel_size > w: raise ArithmeticError("Negative dimension encountered!") if self.padding == "same": output = np.zeros((h, w, self.filters)) else: output = np.empty((h - self.kernel_size + 1, w - self.kernel_size + 1, self.filters)) for i in range(h - self.kernel_size + 1): for j in range(w - self.kernel_size + 1): region = input_vector[i: i + self.kernel_size, j: j + self.kernel_size] output[i, j] = np.sum(region * self.weights, axis = (1, 2)) return output def backprop(self, dy_dx: np.ndarray, learning_rate): dev = np.zeros((self.filters, self.kernel_size, self.kernel_size)) h, w = self.input.shape for i in range(h - self.kernel_size + 1): for j in range(w - self.kernel_size + 1): region = self.input[i: i + self.kernel_size, j: j + self.kernel_size] for k in range(self.filters): dev[k] += dy_dx[i, j, k] * region self.weights -= learning_rate * dev
import numpy as np from pypropack import svdp from scipy.sparse import csr_matrix np.random.seed(0) # Create a random matrix A = np.random.random((10, 20)) # compute SVD via propack and lapack u, sigma, v = svdp(csr_matrix(A), 3) u1, sigma1, v1 = np.linalg.svd(A, full_matrices=False) # print the results np.set_printoptions(suppress=True, precision=8) print np.dot(u.T, u1) print print sigma print sigma1 print print np.dot(v, v1.T)
''' Orthogonal polynomials ''' import numpy as np def evaluate_orthonormal_polynomials(X, max_degree, measure, interval=(0, 1),derivative = 0): r''' Evaluate orthonormal polynomials in :math:`X`. The endpoints of `interval` can be specified when `measure` is uniform or Chebyshev. :param X: Locations of desired evaluations :type X: One dimensional np.array :param max_degree: Maximal degree of polynomial basis. :param measure: Orthogonality measure. `u` for uniform, `c` for Chebyshev, `h` for Gauss/Hermite :param interval: Domain :type interval: tuple or list :rtype: numpy.array of size :code:`X.shape[0] x (max_degree+1)` ''' max_degree = int(max_degree) if derivative and measure not in ['t','u','h']: raise ValueError('Derivative not implemented for Chebyshev polynomials') if derivative>1 and measure not in ['h','t']: raise ValueError('Second derivative only implemented for Taylor and Hermite polynomials') if measure in ['u', 'c']: Xtilde = (X - (interval[1] + interval[0]) / 2.) / ((interval[1] - interval[0]) / 2.) if measure == 'u': y = legendre_polynomials(Xtilde, max_degree + 1) if derivative ==0: return y if derivative > 0: for _ in range(derivative): y1 = np.zeros(y.shape) y1[:,1] = np.sqrt(3)*np.ones(y.shape[0]) for j in range(2,max_degree+1): y1[:,j] = np.sqrt(2*j+1)*((2*j-1)*y[:,j-1]/np.sqrt(2 * j - 1) + y1[:,j-2]/np.sqrt(2*j-3)) y=y1 return (2/(interval[1]-interval[0]))**derivative*y elif measure == 'c': return chebyshev_polynomials(Xtilde, max_degree + 1) elif measure == 'h': Xtilde = X / interval[1] y = hermite_polynomials(Xtilde, max_degree + 1) factorial = np.ones((1,max_degree+1)) for i in range(1,max_degree+1): factorial[0,i:]*=i orthonormalizer = 1 / np.sqrt(factorial) if derivative>0: y /= orthonormalizer for _ in range(derivative): y = np.concatenate([np.zeros([X.shape[0],1]),np.arange(1,y.shape[1])**1*y[:,:-1]],axis=1) y = 1/interval[1]**derivative*y y *= orthonormalizer return y elif measure == 't': y = taylor_polynomials(X,max_degree+1) if derivative>0: for _ in range(derivative): y = np.concatenate([np.zeros([X.shape[0],1]),np.arange(1,y.shape[1])*y[:,:-1]],axis=1) return y def taylor_polynomials(X,N): return X.reshape(-1,1)**np.arange(N) def chebyshev_polynomials(X, N): r''' Evaluate the orthonormal Chebyshev polynomials on :math:`([-1,1],dx/2)` in :math:`X\subset [-1,1]` :param X: Locations of desired evaluations :type X: One dimensional np.array :param N: Number of polynomials :rtype: numpy.array of shape :code:`X.shape[0]xN` ''' out = np.zeros((X.shape[0], N)) deg = N - 1 orthonormalizer = np.concatenate((np.array([1]).reshape(1, 1), np.sqrt(2) * np.ones((1, deg))), axis=1) if deg < 1: out = np.ones((X.shape[0], 1)) else: out[:, 0] = np.ones((X.shape[0],)) out[:, 1] = X for n in range(1, deg): out[:, n + 1] = 2 * X * out[:, n] - out[:, n - 1] return out * orthonormalizer def legendre_polynomials(X, N): r''' Evaluate the orthonormal Legendre polynomials on :math:`([-1,1],dx/2)` in :math:`X\subset [-1,1]` :param X: Locations of desired evaluations :type X: One dimensional np.array :param N: Number of polynomials :rtype: numpy.array of shape :code:`X.shape[0]xN` ''' out = np.zeros((X.shape[0], N)) deg = N - 1 orthonormalizer = np.sqrt(2 * (np.arange(deg + 1)) + 1) if deg < 1: out = np.ones((X.shape[0], 1)) else: out[:, 0] = np.ones((X.shape[0],)) out[:, 1] = X for n in range(1, deg): out[:, n + 1] = 1. / (n + 1) * ((2 * n + 1) * X * out[:, n] - n * out[:, n - 1]) return out * orthonormalizer def hermite_polynomials(X, N): r''' Evaluate the orthonormal Hermite polynomials on :math:`(\mathbb{R},\frac{1}{\sqrt{2\pi}}\exp(-x^2/2)dx)` in :math:`X\subset\mathbb{R}` :param X: Locations of desired evaluations :type X: One dimensional np.array :param N: Number of polynomials :rtype: numpy.array of shape :code:`X.shape[0] x N` ''' out = np.zeros((X.shape[0], N)) deg = N - 1 factorial = np.ones((1,N)) for i in range(1,N): factorial[0,i:]*=i orthonormalizer = 1 / np.sqrt(factorial) if deg < 1: out = np.ones((X.shape[0], 1)) else: out[:, 0] = np.ones((X.shape[0],)) out[:, 1] = X for n in range(1, deg): out[:, n + 1] = X * out[:, n] - n * out[:, n - 1] return out * orthonormalizer
from CHECLabPy.core.io import HDF5Reader, HDF5Writer from sstcam_sandbox import get_data from os.path import dirname, abspath import numpy as np import pandas as pd from IPython import embed DIR = abspath(dirname(__file__)) def process(path, output): with HDF5Reader(path) as reader: df = reader.read("data") d_list = [] for extractor, group in df.groupby("extractor"): params = dict(extractor=extractor) for key, group_key in group.groupby("key"): charge = group_key['charge'].values params[f'mean_{key}'] = np.mean(charge) params[f'std_{key}'] = np.std(charge) d_list.append(params) df_output = pd.DataFrame(d_list) df_output['sn_on_50'] = df_output['mean_on_50'] / df_output['std_off'] df_output['sn_on_3'] = df_output['mean_on_3'] / df_output['std_off'] with HDF5Writer(output) as writer: writer.write(data=df_output) def main(): path = get_data("d190520_charge_extraction/data/charge.h5") output = get_data("d190520_charge_extraction/data/analysis.h5") process(path, output) if __name__ == '__main__': main()
"""Module for handling operations on both databases: media and clusters.""" import itertools import logging import multiprocessing from pathlib import Path import pandas as pd from filecluster.configuration import Config, CLUSTER_DF_COLUMNS from filecluster.filecluster_types import ClustersDataFrame from filecluster.update_clusters import get_or_create_library_cluster_ini_as_dataframe from numpy import int64 from pandas.core.frame import DataFrame from typing import Union, List, Tuple log_fmt = "%(levelname).1s %(message)s" logging.basicConfig(format=log_fmt) logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) def get_existing_clusters_info( config: Config, ) -> Tuple[ClustersDataFrame, List[Path], List[str]]: """Scan library, find existing clusters and empty or non-compliant folders.""" # TODO: Any non-empty subfolder of year folder should contain .cluster.ini # file (see: Runmageddon example). Non-empty means - contains media files n_cpu = multiprocessing.cpu_count() logger.debug(f"Setting-up multiprocessing pool with {n_cpu} processes") pool = multiprocessing.Pool(processes=n_cpu) logger.debug("Pool ready to use") watch_folders = config.watch_folders # NOTE: these requires refactoring in scan_library_dir() # TODO: KS: 2020-12-26: folders than contains no media files but subfolders non_compliant_folders = [] # totally empty folders (no files, no dirs) empty_folder_list = [] # is there a reason for using watch folders (library folders)? # do we have enabled duplicates or existing cluster functionalities use_watch_folders = ( config.skip_duplicated_existing_in_libs or config.assign_to_clusters_existing_in_libs ) # Start scanning watch folders to get cluster information if use_watch_folders and len(watch_folders): tuples = [ get_or_create_library_cluster_ini_as_dataframe( lib, pool, config.force_deep_scan ) for lib in watch_folders ] dfs, empty_folder_list = map(list, zip(*tuples)) df = pd.concat(dfs, axis=0) df.index = range(len(df)) df = df.reset_index() df = df.rename(columns={"index": "cluster_id"}) # Flatten the list of empty directories: empty_folder_list = list(itertools.chain(*empty_folder_list)) else: df = pd.DataFrame(columns=CLUSTER_DF_COLUMNS) return ClustersDataFrame(df), empty_folder_list, non_compliant_folders def get_new_cluster_id_from_dataframe(df_clusters: DataFrame) -> Union[int64, int]: """Return cluster id value that is greater than all already used cluster ids. If there are gaps, there will be no first not-used returned. """ cluster_ids = df_clusters.cluster_id.dropna().values if len(cluster_ids) > 0: last_cluster = max(cluster_ids) new_cluster_id = last_cluster + 1 else: new_cluster_id = 1 return new_cluster_id
''' This is based on efficientdet's evaluator.py https://github.com/rwightman/efficientdet-pytorch/blob/678bae1597eb083e05b033ee3eb585877282279a/effdet/evaluator.py This altered version removes the required distributed code because Determined's custom reducer will handle all distributed training. ''' import torch import abc import json import logging import time import numpy as np from pycocotools.cocoeval import COCOeval from .utils import FakeParser # FIXME experimenting with speedups for OpenImages eval, it's slow #import pyximport; py_importer, pyx_importer = pyximport.install(pyimport=True) import effdet.evaluation.detection_evaluator as tfm_eval #pyximport.uninstall(py_importer, pyx_importer) class Evaluator: def __init__(self, pred_yxyx=False): self.pred_yxyx = pred_yxyx self.img_indices = [] self.predictions = [] def add_predictions(self, detections, target): img_indices = target['img_idx'] detections = detections.cpu().numpy() img_indices = img_indices.cpu().numpy() for img_idx, img_dets in zip(img_indices, detections): self.img_indices.append(img_idx) self.predictions.append(img_dets) def _coco_predictions(self): # generate coco-style predictions coco_predictions = [] coco_ids = [] for img_idx, img_dets in zip(self.img_indices, self.predictions): img_id = self._dataset.img_ids[img_idx] coco_ids.append(img_id) if self.pred_yxyx: # to xyxy img_dets[:, 0:4] = img_dets[:, [1, 0, 3, 2]] # to xywh img_dets[:, 2] -= img_dets[:, 0] img_dets[:, 3] -= img_dets[:, 1] for det in img_dets: score = float(det[4]) if score < .001: # stop when below this threshold, scores in descending order break coco_det = dict( image_id=int(img_id), bbox=det[0:4].tolist(), score=score, category_id=int(det[5])) coco_predictions.append(coco_det) return coco_predictions, coco_ids @abc.abstractmethod def evaluate(self): pass class CocoEvaluator(Evaluator): def __init__(self, dataset, pred_yxyx=False): super().__init__(pred_yxyx=pred_yxyx) self._dataset = dataset.parser self.coco_api = dataset.parser.coco def reset(self): self.img_indices = [] self.predictions = [] def evaluate(self): coco_predictions, coco_ids = self._coco_predictions() json.dump(coco_predictions, open('./temp.json', 'w'), indent=4) results = self.coco_api.loadRes('./temp.json') coco_eval = COCOeval(self.coco_api, results, 'bbox') coco_eval.params.imgIds = coco_ids # score only ids we've used coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() metric = coco_eval.stats[0] # mAP 0.5-0.95 print ("metric res: ", metric) return metric class TfmEvaluator(Evaluator): """ Tensorflow Models Evaluator Wrapper """ def __init__( self, dataset, distributed=False, pred_yxyx=False, evaluator_cls=tfm_eval.ObjectDetectionEvaluator): super().__init__(pred_yxyx=pred_yxyx) self._evaluator = evaluator_cls(categories=dataset.parser.cat_dicts) self._eval_metric_name = self._evaluator._metric_names[0] self._dataset = dataset.parser def reset(self): self._evaluator.clear() self.img_indices = [] self.predictions = [] def evaluate(self): for img_idx, img_dets in zip(self.img_indices, self.predictions): gt = self._dataset.get_ann_info(img_idx) self._evaluator.add_single_ground_truth_image_info(img_idx, gt) bbox = img_dets[:, 0:4] if self.pred_yxyx else img_dets[:, [1, 0, 3, 2]] det = dict(bbox=bbox, score=img_dets[:, 4], cls=img_dets[:, 5]) self._evaluator.add_single_detected_image_info(img_idx, det) metrics = self._evaluator.evaluate() map_metric = metrics[self._eval_metric_name] self.reset() return map_metric class PascalEvaluator(TfmEvaluator): def __init__(self, dataset, pred_yxyx=False): super().__init__( dataset, pred_yxyx=pred_yxyx, evaluator_cls=tfm_eval.PascalDetectionEvaluator) class OpenImagesEvaluator(TfmEvaluator): def __init__(self, dataset, pred_yxyx=False): super().__init__( dataset, pred_yxyx=pred_yxyx, evaluator_cls=tfm_eval.OpenImagesDetectionEvaluator) class FakeEvaluator(Evaluator): def __init__(self, dataset, pred_yxyx=False): super().__init__(pred_yxyx=pred_yxyx) self._dataset = FakeParser() def reset(self): self.img_indices = [] self.predictions = [] def evaluate(self): self._dataset.create_fake_img_ids(len(self.img_indices)) # Confirm coco_preds still works. # Skip the rest in CocoEvaluator because there isn't an ann_file coco_predictions, coco_ids = self._coco_predictions() return .001 # return fake metric value def create_evaluator(name, dataset, pred_yxyx=False, context=None): # FIXME support OpenImages Challenge2019 metric w/ image level label consideration if context.get_hparam("fake_data"): return FakeEvaluator(dataset, pred_yxyx=pred_yxyx) elif 'coco' in name: return CocoEvaluator(dataset, pred_yxyx=pred_yxyx) elif 'openimages' in name: return OpenImagesEvaluator(dataset, pred_yxyx=pred_yxyx) else: return PascalEvaluator(dataset, pred_yxyx=pred_yxyx)
#!python # -*- coding: utf-8 -*- """ Plot elevation and azimuth or a star for a given time range, e.g. an observation night. You may have to run "pip install astroplan astropy" to install required libraries. """ from astroplan import Observer from astropy.time import Time from astropy.coordinates import SkyCoord, EarthLocation from astropy import units as u from matplotlib import pyplot as plt from matplotlib.dates import AutoDateFormatter, AutoDateLocator observer = Observer(name='Filipe?', location=EarthLocation(lon=-8.365, lat=+37.132, height=100.0)) # algol star = SkyCoord(47.04221855, 40.95564667, unit=u.deg, frame='icrs') # start and end time of observation t0 = Time('2017-12-06 20:00') t1 = Time('2017-12-07 08:00') points = 200 ts = Time([t0 + (t1 - t0) * i / points for i in range(points + 1)]) aa = observer.altaz(ts, star) axes = plt.axes() axes.plot(ts.plot_date, aa.az.to(u.deg), label="azimuth (degree)") axes.plot(ts.plot_date, aa.alt.to(u.deg), label="elevation (degree)") axes.legend() axes.grid() locator = AutoDateLocator() axes.xaxis.set_major_locator(locator) axes.xaxis.set_major_formatter(AutoDateFormatter(locator)) plt.show()
"""Fitting peaks data with theoretical curve: 'A_0 + A · (t - t_0) · exp(- k · (t - t_0))'. Fitting peaks data with theoretical curve: 'A_0 + A · (t - t_0) · exp(- k · (t - t_0))' using Levenberg–Marquardt (LM) algorithm (see. https://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm). Typical usage example: import lmfit as lm # Data selection data_path = 'some/data/path' track = 24 df = lm.read_data(data_path) data = df[track] # Select required series # Fitting settings expansion = 2 fp = 200 # Theoretical curve parameters A = 7 k = 1 # Fitting curves theoretical_curves, parameters, sds, baseline = lm.fit_peaks( data, baseline=None, expansion=expansion, fp=fp, A=A, k=k) # Showing results print('Parameters: ', parameters, 'standard deviations: ', sds, 'baseline: ', baseline) # Plotting data.plot() for tc in theoretical_curves: tc.plot() """ from typing import List from typing import Tuple import numpy as np import pandas as pd from scipy import optimize from . import data_extraction as de def _theoretical_curve(solution: List[float], t: List[float]) -> List[float]: """Builds theoretical curve with the found parameters. Builds theoretical curve 'A_0 + A · (t - t_0) · exp(- k · (t - t_0))' with the found parameters `A_0`, `A`, `t_0`, `k`. Args: solution: List of an arguments `A_0`, `A`, `t_0`, `k`. t: List of times corresponding to peak times. Returns: list: Theoretical curve data. """ return solution[0] + solution[1] * (t - solution[2]) * np.exp(- solution[3] * (t - solution[2])) def _vec_curve(Y: List[float], t: List[float], roots: List[float]) -> List[float]: """Prepares curve for LM algorithm. Prepares curve 'A_0 + A · (t - t_0) · exp(- k · (t - t_0))' for LM algorithm for finding `A_0`, `A`, `t_0`, `k` parameters. Args: Y: List of an experimental data. t: List of times. roots: Initial parameters data. Returns: list: Prepared curve for LM algorithm with vectorized parameters. """ return Y - (roots[0] + roots[1] * (t - roots[2]) * np.exp(- roots[3] * (t - roots[2]))) def _auto_baseline(data: pd.Series): """Automatically builds a baseline as `mean(data) + std(data)`. Args: data: Initial data. Returns: float: Baseline value. """ numeric_values = list(filter(lambda value: not np.isnan(value), data)) return np.mean(numeric_values) + np.std(numeric_values) def _check_peak(A_0, A, t_0, sds, peak): """Checks peak on some conditions. Check peak with conditions: 1. A_0 / A * exp > 1. 2. t_0 > 0 3. std(sds) < 100 4. peak amplitude > 5 Args: A_0: `A_0` parameter. A: `A` parameter. t_0: `t_0` parameter. qtf: Error. peak: Peak data. Returns: bool: `True` if conditions are done, else `False`. """ peak_amplitude = np.max(peak) - np.min(peak) if (A_0 / A * np.exp(1) > 1) and (t_0 > 0) and (np.std(sds) < 100) and (peak_amplitude > 5): return True else: return False def fit_peaks( data: pd.Series, baseline: float = None, expansion: int = 2, fp: int = 200, A: float = 7, k: float = 1) -> Tuple[List[pd.Series], List[List[float]], List[List[float]], float]: """Calculates theoretical parameters from an experimental curve with help of LM algorithm. Calculates theoretical parameters from an experimental curve with help of LM algorithm and returns theoretical curves, these parameters and their standard deviations. Args: data: Initial data. baseline: Data separator line. Will be automatically calculated if `None` with std(data) + mean(data). Default: None expansion: Value at which the peaks will expand (on both directions). fp: Number of fitting points. A: `A` parameter. k: `k` parameter. Returns: List[pd.Series]: List of theoretical curves, List[List[float]]: List of calculated parameters `A_0`, `A`, `t_0`, `k`, List[List[float]]: List of standard deviations of `A_0`, `A`, `t_0`, `k`, float: Baseline value. """ baseline = _auto_baseline(data) if baseline is None else baseline data_above_baseline = de.baseline_cut(data, baseline) peaks = de.separate_peaks(data_above_baseline) embedded_peaks = de.add_expansions(data, peaks, expansion) theoretical_curves = [] parameters = [] sds = [] for peak in embedded_peaks: # Initial parameters A_0 = peak.iloc[0] t_0 = peak.index[0] sol = optimize.root( lambda x: _vec_curve(peak.values, peak.index, x), [A_0, A, t_0, k], method='lm') if sol.success: sol_parameters = np.abs(sol.x) sol_variances = np.diag(sol.cov_x) # variances = diag(covariance) sol_sds = np.sqrt(sol_variances) # standard_deviations = sqrt(variances) if _check_peak(A_0, A, t_0, sol_sds, peak): fitting_times = np.linspace(peak.index[0], peak.index[-1], fp) theoretical_curve_data = _theoretical_curve(sol_parameters, fitting_times) theoretical_curve_series = pd.Series(data=theoretical_curve_data, index=fitting_times) theoretical_curves.append(theoretical_curve_series) parameters.append(sol_parameters) sds.append(sol_sds) return theoretical_curves, parameters, sds, baseline
import numpy as np from bokeh.plotting import figure, output_file, show output_file("image.html", title="image.py example") x = np.linspace(0, 10, 250) y = np.linspace(0, 10, 250) xx, yy = np.meshgrid(x, y) d = np.sin(xx)*np.cos(yy) p = figure(width=400, height=400) p.x_range.range_padding = p.y_range.range_padding = 0 p.image(image=[d], x=0, y=0, dw=10, dh=10, palette="Sunset11", level="image") p.grid.grid_line_width = 0.5 show(p)
import tensorflow as tf from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ReduceLROnPlateau import tensorflow.keras.backend as K import numpy as np import pickle import json from data.data import load_data from modeling.model import build_model from modeling.helpers import load_config, make_dir from modeling.prediction import make_predict from modeling.key2video import create_img_video """ Set callbacks and execute training process. """ class MyCallback(tf.keras.callbacks.Callback): """ Set the custom callback class to print learning rate every 5 epochs and save model and prediction every 10 epochs. (Given periods are just for sample data(train, dev, test, each set has 5). Freely change values as you want.) :method __init__: call required parameter :method on_epoch_end: execute at the end of each epoch """ def __init__(self, name, cfg, X, y, decoder_input_array, output_file, output_gloss, output_skels): """ Call required parameters. :param inheritance from default keras callbacks(ex. epoch) :param self.cfg: configuration dictionary :param self.X: X_dev from data.py :param self.y: y_dev from data.py :param self.decoder_input_array: decoder_input_array_dev from data.py :param self.output_file: output_file_dev :param self.output_gloss: output_gloss_dev :param self.output_skels: output_skels_dev """ super().__init__() #inheritance from tf.keras.callbacks.Callback # additional params self.cfg = cfg self.X = X self.y = y self.decoder_input_array = decoder_input_array self.output_file = output_file self.output_gloss = output_gloss self.output_skels = output_skels def on_epoch_end(self, epoch, logs=None): """ executed in the end of every epochs. """ # Print learning rate every 5 epochs if epoch > 0 and epoch % 5 == 0: lr = float(tf.keras.backend.get_value(self.model.optimizer.lr)) print('learning rate : ', lr) # Save model and prediction every 10 epochs if epoch > 0 and epoch % 10 == 0: model_path = self.cfg["model_path"] self.model.save(model_path+"model.h5") result_path = self.cfg["result_path"] make_predict(self.cfg, self.model, self.X, self.y, self.decoder_input_array, self.output_file, self.output_gloss, self.output_skels, result_path, epoch, best=False) def Train(cfg: dict) -> None: """ Execute train process with the base configs. :param cfg: configuration dictionary (Base.yaml) """ # Load train, dev data X_train, y_train, decoder_input_array_train, mel_spectro_data_array_train, max_X, vocab_size_source = load_data(cfg=cfg, mode="train") X_dev, y_dev, decoder_input_array_dev, mel_spectro_data_array_dev = load_data(cfg=cfg, mode="dev") print("---------------------------------------------------") print("Complete: Load train, dev data") print("---------------------------------------------------") # Make result directories model_path = cfg["model_path"] make_dir(model_path) #"./Models/" result_path = cfg["result_path"] make_dir(result_path) # "./Models/result/" print("---------------------------------------------------") print("Complete: Make result directories") print("---------------------------------------------------") # Save real json, img, video before training json_path = result_path + "json/" make_dir(json_path) # "./Models/result/json/" img_path = result_path + "img_video/" make_dir(img_path) # "./Models/result/"img_video/" data_path = cfg["data_path"] with open(data_path + 'out_files_dev' +'.pickle', 'rb') as f: output_file = pickle.load(f) with open(data_path + 'out_gloss_dev' +'.pickle', 'rb') as f: output_gloss = pickle.load(f) with open(data_path + 'out_skels_dev' +'.pickle', 'rb') as f: output_skels = pickle.load(f) real_json_path = json_path + 'real/' make_dir(real_json_path) real_img_path = img_path + 'real/' make_dir(real_img_path) for i in range(len(X_dev)): leng = output_skels[i] real = y_dev[i].tolist()[:leng] filename = str(output_file[i]) + '_' + str(output_gloss[i]) + '_real' + '.json' with open(real_json_path + filename, 'w', encoding='utf-8') as make_file: json.dump(real, make_file, indent="\t") #make img & video create_img_video(real_json_path, real_img_path, filename) print("---------------------------------------------------") print("Complete: Save real json, img and video files") print("---------------------------------------------------") # Build the tacotron model model = build_model(cfg=cfg, max_X=max_X, vocab_size_source=vocab_size_source) print("---------------------------------------------------") print("Complete: Build model") print("---------------------------------------------------") # Set Optimizer(Adam) and Loss(MSE) opt = Adam() model.compile(optimizer=opt, loss=['mean_squared_error', 'mean_squared_error']) # original was 'mean_absolute_error' # Set Callback options ### callback1: customized callback (save model and make prediction every 1000 epochs) first_callback = MyCallback('save_jsonfile', cfg, X_dev, y_dev, decoder_input_array_dev, output_file, output_gloss, output_skels) ### callback2: best model save (update best model.h5 every 10 epochs) best_path = model_path + "best_model.h5" best_callback = tf.keras.callbacks.ModelCheckpoint( filepath=best_path, monitor='val_loss', save_best_only=True, save_weights_only=False, verbose=1, period=10) ### callback3: learning rate scheduler (reduce LR by 20% when there is no enhancement of val_loss every 100 epochs) patience = cfg["training"].get("patience", 10) decrease_factor = cfg["training"].get("decrease_factor", 0.2) min_LR = cfg["training"].get("min_LR", 0.00001) reduceLR = ReduceLROnPlateau( monitor='val_loss', factor=decrease_factor, patience=patience, min_lr=min_LR) ### (optional callback) # 1. early stopping #early_stopping = tf.keras.callbacks.EarlyStopping(monitor='loss', min_delta=0, patience = 20) print("---------------------------------------------------") print("Start training!") print("---------------------------------------------------") # Fit Model batch_size = cfg["training"].get("batch_size", 2) epochs = cfg["training"].get("epoch", 100) train_history = model.fit([X_train, decoder_input_array_train], mel_spectro_data_array_train, epochs=epochs, batch_size=batch_size, shuffle=False, verbose=1, validation_data=([X_dev, decoder_input_array_dev], mel_spectro_data_array_dev), callbacks = [first_callback, best_callback, reduceLR]) #total 3 callbacks print("---------------------------------------------------") print("Finish Training! Save the last model and prediction.") print("---------------------------------------------------") # Save the last Model(100 epoch) and prediction model.save(model_path + 'model.h5') make_predict(cfg, model, X_dev, y_dev, decoder_input_array_dev, output_file, output_gloss, output_skels, result_path, epochs, best=False) print("---------------------------------------------------") print("Congrats! All works well~!") print("---------------------------------------------------")
import pandas as pd import numpy as np print(pd.options.display.max_rows) #by default it is 60 pd.options.display.max_rows = 5 print(pd.options.display.max_rows) #Now it is 10 df = pd.read_csv('/media/nahid/New Volume/GitHub/Pandas/sample.csv') print(df) ''' company numEmps category ... state fundedDate raisedAmt 0 LifeLock NaN web ... AZ 1-May-07 6850000 1 LifeLock NaN web ... AZ 1-Oct-06 6000000 .. ... ... ... ... ... ... ... 97 MeeVee NaN web ... CA 1-Feb-06 6500000 98 MeeVee NaN web ... CA 1-Aug-06 8000000 ''' pd.options.display.max_rows = 100 print(df) #print successfully all the rows in sample.csv data file #we also can use instead of pd.options.display.max_rows = 5 #*************pd.get_option(arg), set_option(agr,val)******************* a = pd.get_option("display.max_rows") print(a) #100 pd.set_option("display.max_rows", 20) a = pd.get_option("display.max_rows") print(a) #20 #*******************Example******************************************** df = pd.DataFrame(np.random.randn(10,5)) print(df) print(df.shape) pd.set_option("max_rows",5) print(df) ''' 0 1 2 3 4 0 -0.957296 0.779242 -1.625559 2.116592 -0.269248 1 0.109035 -0.003971 -0.746726 -1.271288 -0.643878 .. ... ... ... ... ... 8 -0.550164 0.972242 2.426685 0.408818 -0.136869 9 -0.472941 -0.624765 0.228406 -0.368229 0.101187 ''' #Here we can see 4 line only because the rows evenly spread here #***************Stretching the DataFrame across pages***************** df = pd.DataFrame(np.random.randn(5,7)) print(df) ''' 0 1 2 ... 9 10 11 0 0.017573 0.533890 -1.039920 ... 1.055588 0.230688 -1.185961 1 0.994916 1.730381 -0.265662 ... -0.637594 -0.468516 -1.197159 2 -0.470152 -0.702236 -0.249671 ... 0.956581 -1.167124 -0.775214 3 -0.113243 0.110624 0.822606 ... 1.375379 -0.564423 0.292864 4 -0.681015 -0.001743 0.170444 ... 0.387591 -0.009591 -0.263648 ''' '''display.expand_frame_repr allows from the representation of dataframes to stretch across pages, warpped over the full columns vs row-wise''' pd.set_option("expand_frame_repr", True) pd.options.display.expand_frame_repr = 15 print(df) #https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.set_option.html pd.set_option('max_colwidth',6) dict = {"Name":pd.Series(["Nahid Hassan", "Rafi", "Meem"]), "Age":pd.Series([21,22,21]), "Weight":pd.Series([48,75,76]), "Height":pd.Series([5.3, 5.8, 5.6])} df = pd.DataFrame(dict) print(df,"\n") ''' Name Age Weight Height 0 Na... 21 48 5.3 1 Rafi 22 75 5.8 2 Meem 21 76 5.6 ''' pd.set_option('max_colwidth',13) print(df,"\n") ''' Name Age Weight Height 0 Nahid Hassan 21 48 5.3 1 Rafi 22 75 5.8 2 Meem 21 76 5.6 ''' #*****************use reset method******************** pd.reset_option('max_rows') pd.reset_option('expand_frame_repr') pd.reset_option('max_colwidth') pd.set_option('precision',3) df = pd.DataFrame(np.random.randn(5,4)) print(df) ''' 0 1 2 3 0 -0.213 -0.518 0.305 0.462 1 -0.812 1.295 -0.891 -1.596 2 -0.511 0.602 -0.174 -0.617 3 0.438 -0.863 -0.318 0.494 4 -0.348 0.584 0.083 0.365 ''' #***********Setting the threshold value below which numbers will be set to zero*********** pd.set_option('chop_threshold',2) print(df) ''' 0 1 2 3 0 0.0 0.0 0.0 0.0 1 0.0 0.0 0.0 0.0 2 0.0 0.0 0.0 0.0 3 0.0 0.0 0.0 0.0 4 0.0 0.0 0.0 0.0 ''' pd.reset_option('chop_threshold') print(df) ''' 0 1 2 3 0 1.195 1.842 0.851 -0.949 1 -1.089 -1.170 1.841 -0.593 2 0.735 -0.322 -1.021 -0.079 3 0.721 1.008 1.689 -0.579 4 -0.858 -0.244 0.758 0.304 '''
""" Pre-training Bidirectional Encoder Representations from Transformers ========================================================================================= This example shows how to pre-train a BERT model with Gluon NLP Toolkit. @article{devlin2018bert, title={BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding}, author={Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina}, journal={arXiv preprint arXiv:1810.04805}, year={2018} } """ # coding: utf-8 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint:disable=redefined-outer-name,logging-format-interpolation import os import argparse import random import logging import glob import time import numpy as np import mxnet as mx from mxnet import gluon from mxnet.gluon.data import DataLoader import gluonnlp as nlp from gluonnlp.utils import Parallelizable, Parallel from gluonnlp.metric import MaskedAccuracy from gluonnlp.model import get_model from gluonnlp.data.batchify import Tuple, Stack, Pad from gluonnlp.data import SimpleDatasetStream, FixedBucketSampler, NumpyDataset from utils import profile from fp16_utils import FP16Trainer parser = argparse.ArgumentParser(description='BERT pretraining example.') parser.add_argument('--num_steps', type=int, default=20, help='Number of optimization steps') parser.add_argument('--num_buckets', type=int, default=1, help='Number of buckets for variable length sequence sampling') parser.add_argument('--dtype', type=str, default='float32', help='data dtype') parser.add_argument('--batch_size', type=int, default=8, help='Batch size per GPU.') parser.add_argument('--accumulate', type=int, default=1, help='Number of batches for gradient accumulation.') parser.add_argument('--batch_size_eval', type=int, default=8, help='Batch size per GPU for evaluation.') parser.add_argument('--dataset_name', type=str, default='book_corpus_wiki_en_uncased', help='The dataset from which the vocabulary is created. ' 'Options include book_corpus_wiki_en_uncased, book_corpus_wiki_en_cased. ' 'Default is book_corpus_wiki_en_uncased') parser.add_argument('--model', type=str, default='bert_12_768_12', help='Pre-trained model to run fine-tuning on.') parser.add_argument('--pretrained', action='store_true', help='Load the a pre-trained BERT model.') parser.add_argument('--data', type=str, default=None, help='Path to training data.') parser.add_argument('--data_eval', type=str, default=None, help='Path to evaluation data.') parser.add_argument('--ckpt_dir', type=str, required=True, help='Path to checkpoint directory') parser.add_argument('--start_step', type=int, default=0, help='Start optimization step from the checkpoint.') parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate') parser.add_argument('--warmup_ratio', type=float, default=0.1, help='ratio of warmup steps used in NOAM\'s stepsize schedule') parser.add_argument('--log_interval', type=int, default=10, help='Report interval') parser.add_argument('--ckpt_interval', type=int, default=250000, help='Checkpoint interval') parser.add_argument('--gpus', type=str, default='0', help='List of GPUs to use. e.g. 1,3') parser.add_argument('--kvstore', type=str, default='device', help='KVStore type') parser.add_argument('--seed', type=int, default=0, help='Random seed') parser.add_argument('--verbose', action='store_true', help='verbose logging') parser.add_argument('--profile', action='store_true', help='profile the program') parser.add_argument('--by-token', action='store_true', help='set batch size by the number of tokens in the batch') parser.add_argument('--eval_only', action='store_true', help='Only run the evaluation') args = parser.parse_args() os.environ['MXNET_KVSTORE_USETREE'] = '1' # logging level = logging.DEBUG if args.verbose else logging.INFO logging.getLogger().setLevel(level) logging.info(args) def load_model(ctx): """get model""" # model pretrained = args.pretrained dataset = args.dataset_name model, vocabulary = get_model(args.model, dataset_name=dataset, pretrained=pretrained, ctx=ctx) if not pretrained: model.initialize(init=mx.init.Normal(0.02), ctx=ctx) if args.ckpt_dir and args.start_step: # Cast the model in case we're loading a fine-tuned float16 model. model.cast(args.dtype) param_path = os.path.join(args.ckpt_dir, '%07d.params'%args.start_step) model.load_parameters(param_path, ctx=ctx) logging.info('Loading step %d checkpoints from %s.', args.start_step, param_path) model.cast(args.dtype) model.hybridize(static_alloc=True) # losses nsp_loss = gluon.loss.SoftmaxCELoss() mlm_loss = gluon.loss.SoftmaxCELoss() nsp_loss.hybridize(static_alloc=True) mlm_loss.hybridize(static_alloc=True) return model, nsp_loss, mlm_loss, vocabulary def get_dataset(data, batch_size, num_ctxes, is_train, store): """create dataset""" data = data split_sampler = nlp.data.SplitSampler(len(glob.glob(data)), num_parts=store.num_workers, part_index=store.rank) stream = nlp.data.PrefetchingStream(SimpleDatasetStream(NumpyDataset, data, split_sampler)) def get_dataloader(dataset): """create data loader based on the dataset chunk""" t0 = time.time() lengths = dataset.get_field('valid_lengths') logging.debug('Num samples = %d', len(lengths)) # A batch includes: input_id, masked_id, masked_position, masked_weight, # next_sentence_label, segment_id, valid_length batchify_fn = Tuple(Pad(), Pad(), Pad(), Pad(), Stack(), Pad(), Stack()) if args.by_token: # sharded data loader sampler = nlp.data.FixedBucketSampler(lengths=lengths, # batch_size per shard batch_size=batch_size, num_buckets=args.num_buckets, shuffle=is_train, use_average_length=True, num_shards=num_ctxes) dataloader = nlp.data.ShardedDataLoader(dataset, batch_sampler=sampler, batchify_fn=batchify_fn, num_workers=num_ctxes) logging.debug('Batch Sampler:\n%s', sampler.stats()) else: sampler = FixedBucketSampler(lengths, batch_size=batch_size * num_ctxes, num_buckets=args.num_buckets, ratio=0, shuffle=is_train) dataloader = DataLoader(dataset=dataset, batch_sampler=sampler, batchify_fn=batchify_fn, num_workers=1) logging.debug('Batch Sampler:\n%s', sampler.stats()) t1 = time.time() logging.debug('Dataloader creation cost = %.2f s', t1 - t0) return dataloader stream = stream.transform(get_dataloader) return stream def split_and_load(arrs, ctx): """split and load arrays to a list of contexts""" assert isinstance(arrs, (list, tuple)) if len(ctx) == 1: return [[arr.as_in_context(ctx[0]) for arr in arrs]] else: # split and load loaded_arrs = [gluon.utils.split_and_load(arr, ctx, even_split=False) for arr in arrs] return zip(*loaded_arrs) def forward(data, model, mlm_loss, nsp_loss, vocab_size): """forward computation for evaluation""" (input_id, masked_id, masked_position, masked_weight, \ next_sentence_label, segment_id, valid_length) = data num_masks = masked_weight.sum() + 1e-8 valid_length = valid_length.reshape(-1) masked_id = masked_id.reshape(-1) valid_length_typed = valid_length.astype(args.dtype, copy=False) _, _, classified, decoded = model(input_id, segment_id, valid_length_typed, masked_position) decoded = decoded.reshape((-1, vocab_size)) ls1 = mlm_loss(decoded.astype('float32', copy=False), masked_id, masked_weight.reshape((-1, 1))) ls2 = nsp_loss(classified.astype('float32', copy=False), next_sentence_label) ls1 = ls1.sum() / num_masks ls2 = ls2.mean() ls = ls1 + ls2 return ls, next_sentence_label, classified, masked_id, decoded, \ masked_weight, ls1, ls2, valid_length.astype('float32', copy=False) class ParallelBERT(Parallelizable): """Data parallel BERT model. Parameters ---------- model : Block The BERT model. """ def __init__(self, model, mlm_loss, nsp_loss, vocab_size, rescale_factor, trainer=None): self._model = model self._mlm_loss = mlm_loss self._nsp_loss = nsp_loss self._vocab_size = vocab_size self._rescale_factor = rescale_factor self._trainer = trainer def forward_backward(self, x): """forward backward implementation""" with mx.autograd.record(): (ls, next_sentence_label, classified, masked_id, decoded, \ masked_weight, ls1, ls2, valid_length) = forward(x, self._model, self._mlm_loss, self._nsp_loss, self._vocab_size) ls = ls / self._rescale_factor if args.dtype == 'float16': self._trainer.backward(ls) else: ls.backward() return ls, next_sentence_label, classified, masked_id, decoded, \ masked_weight, ls1, ls2, valid_length def evaluate(data_eval, model, nsp_loss, mlm_loss, vocab_size, ctx): """Evaluation function.""" mlm_metric = MaskedAccuracy() nsp_metric = MaskedAccuracy() mlm_metric.reset() nsp_metric.reset() eval_begin_time = time.time() begin_time = time.time() step_num = 0 # Total loss for the whole dataset total_mlm_loss = total_nsp_loss = 0 # Running loss, reset when a log is emitted running_mlm_loss = running_nsp_loss = 0 running_num_tks = 0 for _, dataloader in enumerate(data_eval): for _, data in enumerate(dataloader): step_num += 1 data_list = split_and_load(data, ctx) loss_list = [] ns_label_list, ns_pred_list = [], [] mask_label_list, mask_pred_list, mask_weight_list = [], [], [] # Run inference on the batch, collect the predictions and losses batch_mlm_loss = batch_nsp_loss = 0 for data in data_list: out = forward(data, model, mlm_loss, nsp_loss, vocab_size) (ls, next_sentence_label, classified, masked_id, decoded, masked_weight, ls1, ls2, valid_length) = out loss_list.append(ls) ns_label_list.append(next_sentence_label) ns_pred_list.append(classified) mask_label_list.append(masked_id) mask_pred_list.append(decoded) mask_weight_list.append(masked_weight) batch_mlm_loss += ls1.as_in_context(mx.cpu()) batch_nsp_loss += ls2.as_in_context(mx.cpu()) running_num_tks += valid_length.sum().as_in_context(mx.cpu()) running_mlm_loss += batch_mlm_loss running_nsp_loss += batch_nsp_loss total_mlm_loss += batch_mlm_loss total_nsp_loss += batch_nsp_loss nsp_metric.update(ns_label_list, ns_pred_list) mlm_metric.update(mask_label_list, mask_pred_list, mask_weight_list) # Log and reset running loss if (step_num + 1) % (args.log_interval) == 0: log(begin_time, running_num_tks, running_mlm_loss, running_nsp_loss, step_num, mlm_metric, nsp_metric, None) begin_time = time.time() running_mlm_loss = running_nsp_loss = running_num_tks = 0 mlm_metric.reset_local() nsp_metric.reset_local() mx.nd.waitall() eval_end_time = time.time() total_mlm_loss /= step_num total_nsp_loss /= step_num logging.info('mlm_loss={:.3f}\tmlm_acc={:.1f}\tnsp_loss={:.3f}\tnsp_acc={:.1f}\t' .format(total_mlm_loss.asscalar(), mlm_metric.get_global()[1] * 100, total_nsp_loss.asscalar(), nsp_metric.get_global()[1] * 100)) logging.info('Eval cost={:.1f}s'.format(eval_end_time - eval_begin_time)) def log(begin_time, running_num_tks, running_mlm_loss, running_nsp_loss, step_num, mlm_metric, nsp_metric, trainer): end_time = time.time() duration = end_time - begin_time throughput = running_num_tks / duration / 1000.0 running_mlm_loss = running_mlm_loss / args.log_interval running_nsp_loss = running_nsp_loss / args.log_interval lr = trainer.learning_rate if trainer else 0 # pylint: disable=line-too-long logging.info('[step {}]\tmlm_loss={:.5f}\tmlm_acc={:.5f}\tnsp_loss={:.5f}\tnsp_acc={:.3f}\tthroughput={:.1f}K tks/s\tlr={:.7f} time={:.2f}' .format(step_num, running_mlm_loss.asscalar(), mlm_metric.get()[1] * 100, running_nsp_loss.asscalar(), nsp_metric.get()[1] * 100, throughput.asscalar(), lr, duration)) # pylint: enable=line-too-long def save_params(step_num, args, model, trainer): param_path = os.path.join(args.ckpt_dir, '%07d.params'%step_num) trainer_path = os.path.join(args.ckpt_dir, '%07d.states'%step_num) logging.info('[step %d] Saving checkpoints to %s, %s.', step_num, param_path, trainer_path) model.save_parameters(param_path) trainer.save_states(trainer_path) def train(data_train, model, nsp_loss, mlm_loss, vocab_size, ctx, store): """Training function.""" mlm_metric = MaskedAccuracy() nsp_metric = MaskedAccuracy() mlm_metric.reset() nsp_metric.reset() lr = args.lr optim_params = {'learning_rate': lr, 'epsilon': 1e-6, 'wd': 0.01} if args.dtype == 'float16': optim_params['multi_precision'] = True trainer = gluon.Trainer(model.collect_params(), 'bertadam', optim_params, update_on_kvstore=False, kvstore=store) dynamic_loss_scale = args.dtype == 'float16' fp16_trainer = FP16Trainer(trainer, dynamic_loss_scale=dynamic_loss_scale) if args.ckpt_dir and args.start_step: state_path = os.path.join(args.ckpt_dir, '%07d.states' % args.start_step) logging.info('Loading trainer state from %s', state_path) trainer.load_states(state_path) accumulate = args.accumulate num_train_steps = args.num_steps warmup_ratio = args.warmup_ratio num_warmup_steps = int(num_train_steps * warmup_ratio) params = [p for p in model.collect_params().values() if p.grad_req != 'null'] # Do not apply weight decay on LayerNorm and bias terms for _, v in model.collect_params('.*beta|.*gamma|.*bias').items(): v.wd_mult = 0.0 for p in params: p.grad_req = 'add' train_begin_time = time.time() begin_time = time.time() local_mlm_loss = 0 local_nsp_loss = 0 local_num_tks = 0 batch_num = 0 step_num = args.start_step parallel_model = ParallelBERT(model, mlm_loss, nsp_loss, vocab_size, store.num_workers * accumulate, trainer=fp16_trainer) num_ctxes = len(ctx) parallel = Parallel(num_ctxes, parallel_model) while step_num < num_train_steps: for _, dataloader in enumerate(data_train): if step_num >= num_train_steps: break for _, data_batch in enumerate(dataloader): if step_num >= num_train_steps: break if batch_num % accumulate == 0: step_num += 1 # zero grad model.collect_params().zero_grad() # update learning rate if step_num <= num_warmup_steps: new_lr = lr * step_num / num_warmup_steps else: offset = lr * step_num / num_train_steps new_lr = lr - offset trainer.set_learning_rate(new_lr) if args.profile: profile(step_num, 10, 12) if args.by_token: data_list = [[seq.as_in_context(context) for seq in shard] for context, shard in zip(ctx, data_batch)] else: if data_batch[0].shape[0] < len(ctx): continue data_list = split_and_load(data_batch, ctx) ns_label_list, ns_pred_list = [], [] mask_label_list, mask_pred_list, mask_weight_list = [], [], [] # parallel forward / backward for data in data_list: parallel.put(data) for _ in range(len(ctx)): (_, next_sentence_label, classified, masked_id, decoded, masked_weight, ls1, ls2, valid_length) = parallel.get() ns_label_list.append(next_sentence_label) ns_pred_list.append(classified) mask_label_list.append(masked_id) mask_pred_list.append(decoded) mask_weight_list.append(masked_weight) local_mlm_loss += ls1.as_in_context(mx.cpu()) / num_ctxes local_nsp_loss += ls2.as_in_context(mx.cpu()) / num_ctxes local_num_tks += valid_length.sum().as_in_context(mx.cpu()) # update if (batch_num + 1) % accumulate == 0: fp16_trainer.step(1, max_norm=1) nsp_metric.update(ns_label_list, ns_pred_list) mlm_metric.update(mask_label_list, mask_pred_list, mask_weight_list) # logging if (step_num + 1) % (args.log_interval) == 0 and (batch_num + 1) % accumulate == 0: log(begin_time, local_num_tks, local_mlm_loss / accumulate, local_nsp_loss / accumulate, step_num, mlm_metric, nsp_metric, trainer) begin_time = time.time() local_mlm_loss = local_nsp_loss = local_num_tks = 0 mlm_metric.reset_local() nsp_metric.reset_local() # saving checkpoints if args.ckpt_dir and (step_num + 1) % (args.ckpt_interval) == 0 \ and (batch_num + 1) % accumulate == 0: save_params(step_num, args, model, trainer) batch_num += 1 save_params(step_num, args, model, trainer) mx.nd.waitall() train_end_time = time.time() logging.info('Train cost={:.1f}s'.format(train_end_time - train_begin_time)) if __name__ == '__main__': # random seed seed = args.seed np.random.seed(seed) random.seed(seed) mx.random.seed(seed) ctx = [mx.cpu()] if args.gpus is None or args.gpus == '' else \ [mx.gpu(int(x)) for x in args.gpus.split(',')] model, nsp_loss, mlm_loss, vocabulary = load_model(ctx) store = mx.kv.create(args.kvstore) if args.ckpt_dir: ckpt_dir = os.path.expanduser(args.ckpt_dir) if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) if not args.eval_only: if args.data: logging.info('Using training data at {}'.format(args.data)) data_train = get_dataset(args.data, args.batch_size, len(ctx), True, store) train(data_train, model, nsp_loss, mlm_loss, len(vocabulary), ctx, store) if args.data_eval: logging.info('Using evaluation data at {}'.format(args.data_eval)) data_eval = get_dataset(args.data_eval, args.batch_size_eval, len(ctx), False, store) evaluate(data_eval, model, nsp_loss, mlm_loss, len(vocabulary), ctx)
import networkx as nx import itertools import math import random def empty_graph(num_nodes): g = nx.Graph() g.add_nodes_from(range(num_nodes)) return g def complete_graph(num_nodes): g = empty_graph(num_nodes) edges = itertools.combinations(range(num_nodes), 2) g.add_edges_from(edges) return g def random_graph(num_nodes, p): g = empty_graph(num_nodes) if p <= 0: return g if p >= 1: return complete_graph(num_nodes) n = num_nodes w = -1 lp = math.log(1.0 - p) # Nodes in graph are from 0,n-1 (start with v as the second node index). v = 1 while v < n: lr = math.log(1.0 - random.random()) w = w + 1 + int(lr / lp) while w >= v and v < n: w = w - v v = v + 1 if v < n: g.add_edge(v, w) return g def e_r(in_graph, config): """ E-R graph generator :param in_graph: referenced graph, type: nx.Graph :param config: configure object :return: generated graphs, type: list of nx.Graph """ num_edges = in_graph.number_of_edges() num_nodes = in_graph.number_of_nodes() p = num_edges/(num_nodes*(num_nodes-1)/2) out_graphs = [] for i in range(config.num_gen): out_graph = random_graph(num_nodes, p) out_graphs.append(out_graph) return out_graphs
import sys import numpy as np from .__about__ import __copyright__, __version__ from .main import Mapper def main(argv=None): # Parse command line arguments. parser = _get_parser() args = parser.parse_args(argv) import meshio mapper = Mapper(verbose=args.verbose) mesh_source = meshio.read(args.mesh_source) mesh_target = meshio.read(args.mesh_target) mapper.prepare(mesh_source, mesh_target, args.method, args.intersection_type) res = mapper.transfer(args.field_name, args.nature) if ".txt" in args.outfile: np.savetxt(args.outfile, res.array()) elif ".npy" in args.outfile: np.save(args.outfile, res.array()) elif ".vtu" in args.outfile: res.export_vtk(args.outfile) else: mesh_target = res.mesh_meshio() meshio.write(args.outfile, mesh_target) return def _get_parser(): import argparse parser = argparse.ArgumentParser( description=("Mapping finite element data between meshes"), formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument( "mesh_source", type=str, help="meshio-compatible source mesh file" ) parser.add_argument( "mesh_target", type=str, help="meshio-compatible target mesh file" ) parser.add_argument( "field_name", type=str, help="field defined in the source mesh to transfer to the target mesh", ) parser.add_argument( "outfile", type=str, help="file to store mapped data: .txt, .npy or meshio-compatible mesh", ) parser.add_argument( "--method", type=str, choices=["P1P1", "P1P0", "P0P1", "P0P0"], default="P1P1", help="mapping method", ) parser.add_argument("--intersection_type", type=str, help="intersection algorithm") parser.add_argument( "--nature", type=str, default="IntensiveMaximum", help="physical nature of the field", ) parser.add_argument( "--verbose", action="store_true", default=False, help="increase output verbosity", ) version_text = "\n".join( [ "pymapping {} [Python {}.{}.{}]".format( __version__, sys.version_info.major, sys.version_info.minor, sys.version_info.micro, ), __copyright__, ] ) parser.add_argument( "--version", "-v", action="version", version=version_text, help="display version information", ) return parser
import gym import grid_game_env import numpy as np import os import sys sys.path.append('../../core/q_learning/') import q_table_learning env = gym.make("CliffWalking-v0") # 0 up, 1 right, 2 down, 3 left env = grid_game_env.CliffWalkingWapper(env) model_path = 'model/cliff_walking.csv' ''' env = gym.make("FrozenLake-v0", is_slippery=False) env = grid_game_env.FrozenLakeWapper(env) model_path = 'model/frozen_lake.csv' ''' ''' env = gym.make("FrozenLake-v0", is_slippery=True) env = grid_game_env.FrozenLakeWapper(env) model_path = 'model/frozen_lake_slippery.csv' ''' def main(mode, max_episodes): ACTIONS = [0, 1, 2, 3] rl = q_table_learning.QTableLearning(ACTIONS, learning_rate=0.1, reward_decay=0.9, e_greedy=0.9) rl.load_model_from_file(model_path) for episode in range(max_episodes): state = env.reset() env.render() done = False step = 0 while not done: action = rl.choose_action(state, mode) new_state, reward, done, x = env.step(action) env.render() if (mode == "train"): rl.learn(state, action, reward, new_state, done) state = new_state step += 1 print(("done", episode, step)) rl.show() rl.save_model_to_file(model_path) print("finished") if __name__ == "__main__": mode = sys.argv[1] # "inference" or "train" max_episodes = int(sys.argv[2]) main(mode, max_episodes)
from MeshProcess.ReadOBJ import * from MeshProcess.ReadPLY import * from MeshProcess.WriteOBJ import * from MeshProcess.WriteOBJ_WithVT import * from MeshProcess.WritePLY import * import numpy as np def MoveToCenterOBJ(filename): [vertrices, faces, vt] = ReadOBJ(filename) vxMax = np.max(vertrices.T[0]) vxMin = np.min(vertrices.T[0]) vyMax = np.max(vertrices.T[1]) vyMin = np.min(vertrices.T[1]) vzMax = np.max(vertrices.T[2]) vzMin = np.min(vertrices.T[2]) zoom = max(vxMax-vxMin,max(vyMax-vyMin,vzMax-vzMin)) for i in range(len(vertrices)): vertrices[i][0] = (vertrices[i][0]-vxMin)/zoom - ((vxMax-vxMin)/(zoom*2.0)) vertrices[i][1] = (vertrices[i][1]-vyMin)/zoom - ((vyMax-vyMin)/(zoom*2.0)) vertrices[i][2] = (vertrices[i][2]-vzMin)/zoom - ((vzMax-vzMin)/(zoom*2.0)) WriteOBJ(filename, vertrices, faces) def MoveToCenterOBJ_WithVT(filename): [vertrices, faces, vt] = ReadOBJ(filename) vxMax = np.max(vertrices.T[0]) vxMin = np.min(vertrices.T[0]) vyMax = np.max(vertrices.T[1]) vyMin = np.min(vertrices.T[1]) vzMax = np.max(vertrices.T[2]) vzMin = np.min(vertrices.T[2]) zoom = max(vxMax-vxMin,max(vyMax-vyMin,vzMax-vzMin)) for i in range(len(vertrices)): vertrices[i][0] = (vertrices[i][0]-vxMin)/zoom - ((vxMax-vxMin)/(zoom*2.0)) vertrices[i][1] = (vertrices[i][1]-vyMin)/zoom - ((vyMax-vyMin)/(zoom*2.0)) vertrices[i][2] = (vertrices[i][2]-vzMin)/zoom - ((vzMax-vzMin)/(zoom*2.0)) WriteOBJ_WithVT(filename, vertrices, faces, vt) def MoveToCenterPLY(filename): [vertrices, faces] = ReadPLY(filename) vertrices = np.array(vertrices) faces = np.array(faces) vxMax = np.max(vertrices.T[0]) vxMin = np.min(vertrices.T[0]) vyMax = np.max(vertrices.T[1]) vyMin = np.min(vertrices.T[1]) vzMax = np.max(vertrices.T[2]) vzMin = np.min(vertrices.T[2]) zoom = max(vxMax-vxMin,max(vyMax-vyMin,vzMax-vzMin)) for i in range(len(vertrices)): vertrices[i][0] = (vertrices[i][0]-vxMin)/zoom - ((vxMax-vxMin)/(zoom*2.0)) vertrices[i][1] = (vertrices[i][1]-vyMin)/zoom - ((vyMax-vyMin)/(zoom*2.0)) vertrices[i][2] = (vertrices[i][2]-vzMin)/zoom - ((vzMax-vzMin)/(zoom*2.0)) WritePLY(filename, vertrices, faces)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Nov 22 14:10:33 2017 @author: yanrpi """ # %% import glob import numpy as np import nibabel as nib import random import torch from torch.utils.data import Dataset, DataLoader from torchvision import transforms from os import path # from scipy.misc import imsave from scipy import ndimage # from scipy.misc import imsave # %% class LiverCTDataset(Dataset): """Liver CT image dataset.""" def __init__(self, root_dir, transform=None, verbose=False): """ Args: root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ if not path.isdir(root_dir): raise ValueError("\"{}\" is not a valid directory path!".format(root_dir)) self.root_dir = root_dir self.transform = transform self.verbose = verbose res = glob.glob(path.join(root_dir, 'volume-*.nii')) #print(res) self.num_images = len(res) self.ct_filenames = res def __len__(self): return self.num_images def __getitem__(self, idx): img_name = self.ct_filenames[idx] seg_name = img_name.replace('volume', 'segmentation') image = nib.load(img_name) segmentation = nib.load(seg_name) # image = nib.as_closest_canonical(image) # segmentation = nib.as_closest_canonical(segmentation) if self.verbose: print('{} -> {}'.format(idx, img_name)) print('Image shape: {}'.format(image.shape)) print('Segmentation shape: {}'.format(segmentation.shape)) sample = {'image': image, 'label': segmentation} #sample = {'image': img_name, 'segmentation': seg_name} if self.transform: sample = self.transform(sample) return sample # %% class RandomCrop(object): """Crop randomly the image in a sample. For segmentation training, only crop sections with non-zero label Args: output_size (tuple or int): Desired output size. If int, square crop is made. """ def __init__(self, output_size, view): assert isinstance(output_size, (int, tuple)) if isinstance(output_size, int): self.output_size = (output_size, output_size, output_size) else: assert len(output_size) == 3 self.output_size = output_size self.view = view def __call__(self, sample): image, segmentation = sample['image'], sample['label'] h, w, d = image.shape new_h, new_w, new_d = self.output_size view = self.view new_d_half = new_d >> 1 # Find slices containing segmentation object seg_data = segmentation.get_data() img_data = image.get_data() if view == 'axial': img_data = img_data seg_data = seg_data elif view == 'coronal': img_data = img_data.transpose((2, 0, 1)) seg_data = seg_data.transpose((2, 0, 1)) else: img_data = img_data.transpose((2, 1, 0)) seg_data = seg_data.transpose((2, 1, 0)) summed = np.sum(seg_data.sum(axis=0), axis=0) non0_list = np.asarray([i for i in range(summed.size)]) non0_list = non0_list[summed > 10] seg_start = max(np.min(non0_list) - new_d_half, 0) seg_end = min(np.max(non0_list) + new_d_half, d) if new_h == h: top = 0 left = 0 else: top = np.random.randint(0, h - new_h) left = np.random.randint(0, w - new_w) #ant = np.random.randint(0, d - new_d) ant = np.random.randint(seg_start, seg_end - new_d) img_data = img_data[top: top + new_h, left: left + new_w, ant: ant + new_d] img_data = img_data.astype(np.float32) ant_seg = ant + new_d_half seg_data = seg_data[top: top + new_h, left: left + new_w, ant_seg: ant_seg + 1] # seg_data = seg_data[top: top + new_h, # left: left + new_w, # ant: ant + new_d] seg_data = seg_data.astype(np.float32) # Merge labels # seg_data[seg_data > 1] = 1 # flip up side down to correct # image = np.flip(img_data, axis=1).copy() # label = np.flip(seg_data, axis=1).copy() return {'image': img_data, 'label': seg_data} class RandomHorizontalFlip(object): """Randomly flip the image in the horizontal direction. """ def __call__(self, sample): if random.uniform(0,1) < 0.5: return sample # else return flipped sample image, label = sample['image'], sample['label'] image = np.flip(image, axis=0).copy() label = np.flip(label, axis=0).copy() return {'image': image, 'label': label} class RandomVerticalFlip(object): """Randomly flip the image in the horizontal direction. """ def __call__(self, sample): if random.uniform(0,1) < 0.5: return sample # else return flipped sample image, label = sample['image'], sample['label'] image = np.flip(image, axis=1).copy() label = np.flip(label, axis=1).copy() return {'image': image, 'label': label} # def pixel_mask(image, p): # p_map = np.random.random(size = image.shape) # mask = p_map <= p # return mask # def boundary_mask(label, p1, p2): # d_map_in = ndimage.distance_transform_edt(label) # label_r = 1 - label # d_map_out = ndimage.distance_transform_edt(label_r) # d_map = d_map_in + d_map_out # d_map[d_map<=3] = 1 # d_map[d_map>3] = 0 # # d_map = d_map<=5 # # print('d_map:',d_map.sum()) # p_map = d_map # p_map[p_map == 1] = p1 # p_map[p_map == 0] = p2 # # print('p_map:',(p_map==p1).sum()) # r_map = np.random.random(size = label.shape) # mask = r_map <= p_map # mask = 1*mask # return mask # def bkg_mask(label, p1, p2): # p_map = label.copy() # p_map[p_map>=1] = 1 # p_map[p_map<1] = 0 # # print('P_map.sum0',(p_map==0).sum()) # # print('P_map.sum1',(p_map==1).sum()) # p_map[p_map == 0] = p2 # # print('p_mapsum1',p_map.sum()) # p_map[p_map == 1] = p1 # # print('p_map:',(p_map==p1).sum()) # r_map = np.random.random(size = label.shape) # mask = r_map <= p_map # mask = 1*mask # # print('mask.sum:',mask.sum()) # return mask # def bdy2blk(bdy, nrows, ncols, p1, p2): # # print(bdy.shape) # bdy1 = np.squeeze(bdy,-1) # # 224 x 224 # h, w = bdy1.shape # # print(h,nrows,h/nrows) # # 16 x 16 x 14 x 14 # bdy1 = bdy1.reshape(h//nrows, nrows, -1, ncols).swapaxes(1,2).reshape(-1, nrows, ncols) # bdy1 = bdy1.reshape(nrows, ncols, int(h/nrows), int(w/nrows)) # # print('bdy1.shape:',bdy1.shape) # for i in range(bdy1.shape[0]): # for j in range(bdy1.shape[1]): # if bdy1[i][j].sum() >= 1: # if np.random.random_sample() <= p1: # bdy1[i][j] = np.ones(bdy1[i][j].shape) # else: # bdy1[i][j] = np.zeros(bdy1[i][j],shape) # else: # if np.random.random_sample() <= p2: # bdy1[i][j] = np.ones(bdy1[i][j].shape) # else: # bdy1[i][j] = np.zeros(bdy1[i][j].shape) # return bdy1 # def blk_mask(label, p1, p2): # d_map_in = ndimage.distance_transform_edt(label) # label_r = 1 - label # d_map_out = ndimage.distance_transform_edt(label_r) # d_map = d_map_in + d_map_out # d_map[d_map<=5] = 1 # d_map[d_map>5] = 0 # p_map = d_map # # print('p_map_shape:', p_map.shape) # mask = bdy2blk(p_map,16,16, p1, p2) # # p_map size 16 x 16 x 14 x 14 # # p_map[p_map == 1] = p1 # # p_map[p_map == 0] = p2 # # r_map = np.random.random(size = label.shape) # # mask = r_map <= p_map # # 16x16 --> 224 x 224 # # print('mask_shape1', mask.shape) # mask = np.hstack(mask) # mask = np.hstack(mask) # # print('mask_shape', mask.shape) # mask = np.expand_dims(mask, -1) # return mask # class BdyblkOut(object): # def __init__(self, probability1, probability2): # self.pa = probability1 # self.pb = probability2 # def __call__(self, sample): # image, label = sample['image'], sample['label'] # p1 = self.pa + (1 - self.pa) * np.random.random_sample() # p2 = self.pb + (1 - self.pb) * np.random.random_sample() # # mask = boundary_mask(label, p1, p2) # mask = bdyblk_mask(label, p1, p2) # # print('mask:',mask.shape) # image = image * mask # label = label * mask # return {'image': image, 'label': label, 'mask': mask} # class BoundaryOut(object): # def __init__(self, probability1, probability2): # self.pa = probability1 # self.pb = probability2 # def __call__(self, sample): # image, label = sample['image'], sample['label'] # p1 = self.pa + (1 - self.pa) * np.random.random_sample() # p2 = self.pb + (1 - self.pb) * np.random.random_sample() # # p1 = self.pa # # p2 = self.pb # mask = boundary_mask(label, p1, p2) # # mask = bdyblk_mask(label, p1, p2) # # print('mask_:',mask.sum()) # # noise = np.random.normal(0,0.33,image.shape) # # noise[noise>1] = 1 # # noise[noise<-1] = -1 # # noise = noise*(1-mask) # image = image * mask # # image = image # # image = image + noise # # label = label * mask # return {'image': image, 'label': label, 'mask': mask} # class BkgOut(object): # def __init__(self, probability1, probability2): # self.pa = probability1 # self.pb = probability2 # def __call__(self, sample): # image, label = sample['image'], sample['label'] # p1 = self.pa + (1 - self.pa) * np.random.random_sample() # p2 = self.pb + (1 - self.pb) * np.random.random_sample() # mask = bkg_mask(label, p1, p2) # # print('mask:',mask.shape) # image = image * mask # label = label * mask # return {'image': image, 'label': label, 'mask': mask} # class MaskOut(object): # def __init__(self, probability): # self.pb = probability # def __call__(self, sample): # image, label = sample['image'], sample['label'] # p = self.pb + (1 - self.pb) * np.random.random_sample() # mask = pixel_mask(image, p) # # print('mask:',mask.shape) # image = image * mask # label = label * mask # return {'image': image, 'label': label, 'mask': mask} class Clip(object): """Clip the intensity values. Args: Lower and upper bounds. """ def __init__(self, lower_bound, upper_bound): ''' ''' # Make sure upper bound is larger than the lower bound self.LB = min(lower_bound, upper_bound) self.UB = max(lower_bound, upper_bound) def __call__(self, sample): image, label = sample['image'], sample['label'] image[image>self.UB] = self.UB image[image<self.LB] = self.LB return {'image': image, 'label': label} class Normalize(object): """Normalize the input data to 0 mean 1 std per channel""" def __init__(self, lower_bound, upper_bound): self.LB = min(lower_bound, upper_bound) self.UB = max(lower_bound, upper_bound) def __call__(self, sample): image, label = sample['image'], sample['label'] #img_mean = np.mean(image, axis=(0,1)) #img_std = np.std(image, axis=(0,1)) #nc = image.shape[2] #for c in range(nc): # image[:,:,c] = (image[:,:,c] - img_mean[c]) / img_std[c] mid_point = (self.LB + self.UB) / 2.0 image -= mid_point half_range = (self.UB - self.LB) / 2.0 image /= (half_range + 0.000001) return {'image': image, 'label': label} class ToTensor(object): """Convert ndarrays in sample to Tensors.""" def __call__(self, sample): image, label = sample['image'], sample['label'] # image, label, mask = sample['image'], sample['label'], sample['mask'] # swap color axis because # numpy image: W x H x C # torch image: C X H X W image = image.transpose((2, 1, 0)) #print(image.shape, type(image), image.dtype) label = label.transpose((2, 1, 0)) # mask = mask.transpose(2, 1, 0) #print(label.shape, type(label), label.dtype) return {'image': torch.from_numpy(image), 'label': torch.from_numpy(label)} # return {'image': torch.from_numpy(image), # 'label': torch.from_numpy(label), # 'mask': torch.from_numpy(mask)} def get_composed_transform(hw, slices, view): composed = transforms.Compose([RandomCrop((hw, hw, slices),view), Clip(-200, 200), Normalize(-200, 200), RandomHorizontalFlip(), RandomVerticalFlip(), # MaskOut(0.5), # BoundaryOut(0.5, 1), # BdyblkOut(1, 0.5), # BkgOut(1,0.5), ToTensor()]) return composed # %% Tester if __name__ == '__main__': img_folder = '/zion/fangx2/BTCV/training_256' #img_folder = '/Users/yan/Documents/data/LITS_training' log_dir = path.expanduser('/zion/fangx2/mu_or/train/logs/') composed = get_composed_transform(224, 3, 'axial') dataset = LiverCTDataset(img_folder, transform=composed, verbose = True) ''' for i in range(5): sample = dataset[i] img = sample['image'] print(i, img.size(), type(img)) label = sample['label'] print(i, label.size(), type(label)) ''' # num_workers = 4 to use more processes dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=0) #for i_batch, sample_batched in enumerate(dataloader): batch_it = iter(dataloader) sample_batched = next(batch_it) image_batch = sample_batched['image'] label_batch = sample_batched['label'] print('Batch size: {}, image size: {}, label size: {}'.format(len(image_batch), image_batch.size(2), label_batch.size(2))) img_data = image_batch[0,0,:,:].numpy() v_min = img_data.min() v_max = img_data.max() print('Img -> max: {}, min: {}'.format(v_max, v_min)) img_data = (img_data - v_min) / (v_max - v_min) * 255 img_data = img_data.astype(np.uint8) label_data = label_batch[0,0,:,:].numpy() v_min = label_data.min() v_max = label_data.max() print('Label -> max: {}, min: {}'.format(v_max, v_min)) label_data *= 255 lable_data = label_data.astype(np.uint8) # Save images imsave(path.join(log_dir, 'image_sample.png'), img_data, format='png') imsave(path.join(log_dir, 'label_sample.png'), label_data, format='png')
from __future__ import print_function from __future__ import division from __future__ import absolute_import import pandas as pd import numpy as np class KineticTrajectory(object): """A trajectory is a list of x,y,z and time coordinates for a single atom in a kinetic Monte Carlo simulation, which has the values of that atom after every hop that happens in the simulation. When dealing with data for several atoms, do not use this class. Instead use KineticData.""" def __init__(self, x, y, z, t, copy=False): """Initialize with a list of coordinates Parameters ---------- x : x component of coordinate y : y component of coordinate z : z component of coordinate t : time elapsed for the current coordinate copy : if True, creates copy of the data passed """ #Not convinced this is managing the memory the way you think, but "not copying" appears to be faster if copy: self._data = pd.DataFrame(data={"x": x.copy(), "y": y.copy(), "z": z.copy(), "t": t.copy()}) else: self._data = pd.DataFrame(data={"x": x, "y": y, "z": z, "t": t}) #Add the norm of the distances self._data["r"]=np.sqrt(np.square(self._data[["x","y","z"]]).sum(axis=1)) def x(self): return self._data["x"] def y(self): return self._data["y"] def z(self): return self._data["z"] def t(self): return self._data["t"] def r(self): return self._data["r"] def data(self): return self._data def size(self): return len(self.t()) def as_matrix(self): return self._data[["x","y","z","t"]].as_matrix() def segment(self, n): """Split the trajectory into n independent looking trajectories. If the number of samples is not divisible by n, the remainder will be discarded. Parameters ---------- n : int Returns ------- list[KineticTrajectory] """ block_size=self.size()//n data_blocks=[self._data.loc[i*block_size:(i+1)*block_size,["x","y","z","t"]] for i in xrange(n)] for ix,d in enumerate(data_blocks[1::]): d-=self._data.loc[block_size*(ix+1)-1] return [KineticTrajectory(**d) for d in data_blocks] class KineticData(object): """Store and retrieve kinetic Monte Carlo data by type of species, and other conveniences. This is meant to store a single KMC simulation from start to finish""" def _input_sanity_raise(self,trajectories, time, occ_species): if(trajectories.shape[0]!=len(occ_species)): raise ValueError("There must be an xyz trajectory for each species to name") if(trajectories.shape[1]!=len(time)): raise ValueError("There must be as many time data points as there are coordinates for each atom") if(trajectories.shape[2]!=3): raise ValueError("The trajectories arrays must hold only values for the x, y, and z coordinates") return def _master_dataframe(self, trajectories, time, occ_species): """Given the constructor data, create the master DataFrame that holds all the information about the trajectories of each atom, including what species each one is and where it was sitting at the beginning of the KMC simulation cell. Parameters ---------- trajectories : list of tx3 arrays of length s as np.array time : array of float of length t occ_species list of str of length s Returns ------- pd.DataFrame """ #Create the labels for each atom, with the species name and the index into the starting configdof occ_labels=[o+"({})".format(ix) for o,ix in zip(occ_species,xrange(len(occ_species)))] #Calculate the norm of the displacements for every atom at every time step norms=np.linalg.norm(trajectories,axis=2) assert(len(occ_labels)==len(trajectories)) assert(len(norms)==len(trajectories)) #The concatenated numpy array now has shape[2]==4 with the norm travelled as a new value full_trajectory_data=np.concatenate((trajectories,np.expand_dims(norms,2)),axis=2) assert(full_trajectory_data.shape[2]==4) #Create MultiIndex for columns, which will group x,y,z,r by atom doing the trajectory labels0=[ix for ix,_ in enumerate(occ_labels) for i in xrange(4)] assert(labels0[0]==labels0[3] and labels0[-1]==labels0[-4]) labels1=[i for ix,_ in enumerate(occ_labels) for i in xrange(4)] assert(labels0[1]==labels1[-4]) col_mix=pd.MultiIndex(levels=[occ_labels,["x","y","z","r"]],labels=[labels0,labels1],names=["atomic","cart"]) #Reshape the trajectory data so that it's 2 dimensional, with the xyzr columns side by side nats,ntime,ndim=full_trajectory_data.shape data_digest=full_trajectory_data.transpose(0,2,1).reshape(nats*ndim,ntime).T #Include the time into the set of data as an additional Index time_ix=np.arange(ntime) timed_mix=pd.MultiIndex(levels=[time_ix,time],labels=[time_ix,time_ix],names=["index","time"]) #Create the master DataFrame, this has all the things and has columns at two levels: #by species and by trajectory. There are two index levels, sample index and time master_frame=pd.DataFrame(data_digest,index=timed_mix,columns=col_mix) return master_frame def __init__(self, trajectories, time, occ_species,direct=None): """Initialize with a list of trajectories, the elapsed time per step, and a list of the occupation name for each atom. Assumes all data comes in incremental time (will not sort anything). Internally this is a multi-indexed Pandas array, where one level deals with the atoms, naming each "column" things like "Ni(0)", "Al(1)", etc, to indicate the species and the index into the unrolled configuration of the starting config, as well as the elapsed time, which is common across every atom. The other level deals with columns of type "x", "y", or "z" to keep track of the trajectory of each atom. The master data should always remain in a state where Level 0 refers to the atom labels and Level 1 refers to the trajectories Parameters ---------- trajectories : list of 3xt arrays of length s as np.array time : array of float of length t occ_species : list of str of length s direct : pd.DataFrame, bypasses the normal construction """ if(direct is None): self._input_sanity_raise(trajectories, time, occ_species) self._master=self._master_dataframe(trajectories,time,occ_species) else: self._master=direct return def atom_cols(self, va_as_specie=False): """Return array of the column names for every atom. If specified, include the vacancies as a specie. Parameters ---------- va_as_specie : bool Returns ------- list """ everything=self._master.columns.get_level_values("atomic").unique() if va_as_specie: return everything else: return [x for x in everything if "Va" not in x] def specie_cols(self, specie): """Return an array of column names that can be used to index into every trajectory of a particular specie Parameters ---------- specie : str Returns ------- list of str """ return [s for s in self.atom_cols() if specie in s] def num_atoms(self,va_as_specie=False): """Returns total number of sites that there is data for If specified, include the vacancies as a specie. Parameters ---------- va_as_specie : bool Returns ------- int """ return len(self.atom_cols(va_as_specie)) def composition(self, specie, va_as_specie=False): """Returns the ratio of number of specie to total number of atoms (not including vacancies unless specified) Parameters ---------- specie : str Returns ------- float """ return len(self.specie_cols(specie))/self.num_atoms(va_as_specie) def index_trajectory(self, index): """Return the x, y, z, and t values of a particular atom throughout the simulation, specifying only the index and not the specie Parameters ---------- atom : int Returns ------- pd.DataFrame with x,y,z columns and t as secondary index """ for a in self.atom_cols(): if "({})".format(index) in a: return self.atomic_trajectory(a) def atomic_trajectory(self, atom): """Return the x, y, z, and t values of a particular atom throughout the simulation Parameters ---------- atom : str (e.g. Ni(9)) Returns ------- pd.DataFrame with x,y,z columns and t as secondary index """ return self._master[atom] def specie_data(self, specie): """Return only the data for a particular species Parameters ---------- specie : str Returns ------- pd.DataFrame """ return self._master[self.specie_cols(specie)] def specie_names(self): """Returns the names of all species present Returns ------- set of str """ all_cols=self.atom_cols(va_as_specie=True) return set([col.split("(")[0] for col in all_cols]) def _column_swap(self): """return the master data with cart over atomic Returns ------- DataFrame """ return self._master.swaplevel("atomic","cart",axis=1) def x(self): return self._column_swap["x"] def y(self): return self._column_swap["y"] def z(self): return self._column_swap["z"] def r(self): return self._column_swap["r"] def t(self): return self._master.index.get_level_values("time").values def _index_at_time(self, time): """Return the index (row) corresponding to the data for the instant just after (or equal to) the specified time Parameters ---------- time : float Returns ------- int """ return self._master[self.t()>=time].index.get_level_values("index")[0] def values_at_time(self, time): """Return the values of everything just below the value of the time specified. Parameters ---------- time : float Returns ------- pd.DataFrame """ return self._master.loc[self._index_at_time(time)] def specie_values_at_time(self, time, specie): """Return the values of everything just below the value of the time specified, but only for the desired specie Parameters ---------- time : float specie : str Returns ------- pd.DataFrame """ specie_dump=self.specie_data(specie) return specie_dump.loc[self._index_at_time(time)] def independized_measurements(self): """Similar to segmenting the data into multiple apparently independent run, this routine will make every point appear to have started at t=0 and r=0. This can be useful for data you collect where you don't sample every step, and you'd like to keep all the "final" data points in the same array. Returns ------- KineticData """ #create copy of data and subtract out values indep=self._master.copy() indep.iloc[1::]=indep.iloc[1::].values-indep.iloc[0:-1] #fix the distance stacked=indep.stack("atomic") stacked["r"]=np.linalg.norm(stacked[["x","y","z"]],axis=1) indep=stacked.unstack("atomic").stack("cart").unstack("cart") #set the time reset_time=self._master.index.get_level_values("time").values reset_time[1::]=reset_time[1::]-reset_time[0:-1] indep.index.set_levels(reset_time,"time",inplace=True) return KineticData(None,None,None,direct=indep) def _indexed_segmentation(self, end_inds): """Given indexes into the sampled data, split the master DataFrame into the specified chunks, and reset the elapsed time and coordinates such that each segment appears to be an independent run Parameters ---------- end_inds : list of int, each int is the "up to" index of each segment Returns ------- list of KineticData """ start_inds=[0]+end_inds[0:-1] raw_segments=[self._master.iloc[ix:nx] for ix,nx in zip(start_inds,end_inds)] # raw_segments=[self._master.iloc[seg_length*s:seg_length*(s+1)] for s in xrange(n)] n=len(raw_segments) #We will subtract the values of the "previous simulation", starting with #the final segment #These are indexes in reverse that exclude zero rev_seg_ix=np.arange(n-1)[::-1]+1 for rix in rev_seg_ix: raw_segments[rix]=raw_segments[rix]-raw_segments[rix-1].iloc[-1] #The norm (r) needs to be recalculated raw_segments[rix]=raw_segments[rix].stack("atomic") raw_segments[rix]["r"]=np.linalg.norm(raw_segments[rix][["x","y","z"]],axis=1) raw_segments[rix]=raw_segments[rix].unstack("atomic").stack("cart").unstack("cart") #The time also needs to be reset reset_time=self._master.index.get_level_values("time")-raw_segments[rix-1].index.get_level_values("time")[-1] raw_segments[rix].index.set_levels(reset_time,"time",inplace=True) return [KineticData(None,None,None,direct=raw) for raw in raw_segments] def sampled_segmentation(self, n): """Split the data into n KineticData as if the data had been run independently, subtracting out time and coordinates so that they start at zero. Remainder data is discarded. Parameters ---------- n : int Returns ------- list of KineticData """ seg_length=len(self._master)//n seg_inds=[seg_length*(i+1) for i in xrange(n)] return self._indexed_segmentation(seg_inds) def timed_segmentation(self, n): """Return segments of data in which equal sets of time have elapsed Parameters ---------- time : int Returns ------- list of KineticData """ time_length=self.total_time()/n time_inds=[self._index_at_time(time_length*(i+1)) for i in xrange(n)] return self._indexed_segmentation(time_inds) def values(self): """Return all the data ever Returns ------- pd.DataFrame """ return self._master def total_time(self): """Returns the most amount of time elapsed Returns ------- float """ return self._master.index.get_level_values("time")[-1]
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from .lr_scheduler import WarmupMultiStepLR # add by kevin.cao at 20.01.08 import torch.optim as optim import numpy as np def make_optimizer(cfg, model): params = [] for key, value in model.named_parameters(): if not value.requires_grad: continue lr = cfg.SOLVER.BASE_LR weight_decay = cfg.SOLVER.WEIGHT_DECAY if "bias" in key: lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM) return optimizer def make_lr_scheduler(cfg, optimizer): return WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, warmup_factor=cfg.SOLVER.WARMUP_FACTOR, warmup_iters=cfg.SOLVER.WARMUP_ITERS, warmup_method=cfg.SOLVER.WARMUP_METHOD, ) # add by kevin.cao at 20.01.08 ======= class Optimizers(object): """Handles a list of optimizers.""" def __init__(self): self.optimizers = [] self.lrs = [] self.decay_every = [] def add(self, optimizer, lr, decay_every): """Adds optimizer to list.""" self.optimizers.append(optimizer) self.lrs.append(lr) self.decay_every.append(decay_every) def zero_grad(self): for optimizer in self.optimizers: optimizer.zero_grad() def step(self): """Makes all optimizers update their params.""" for optimizer in self.optimizers: optimizer.step() def update_lr(self, epoch_idx): """Update learning rate of every optimizer.""" for optimizer, init_lr, decay_every in zip(self.optimizers, self.lrs, self.decay_every): optimizer = self.step_lr( epoch_idx, init_lr, decay_every, 0.1, optimizer) def step_lr(epoch, base_lr, lr_decay_every, lr_decay_factor, optimizer): """Handles step decay of learning rate.""" factor = np.power(lr_decay_factor, np.floor((epoch - 1) / lr_decay_every)) new_lr = base_lr * factor for param_group in optimizer.param_groups: param_group['lr'] = new_lr print('Set lr to ', new_lr) return optimizer def make_PG_optimizer(cfg, model): # Masking will be done. # Checks. print(model) ''' assert not args.lr and not args.lr_decay_every assert args.lr_mask and args.lr_mask_decay_every assert args.lr_classifier and args.lr_classifier_decay_every ''' print('Performing masking.') ''' for key, value in model.backbone.named_parameters(): print(key, value) exit() ''' optimizer_masks = optim.Adam(model.backbone.parameters(), lr=cfg.SOLVER.PG_MASK_LR) params = [] lr = cfg.SOLVER.PG_HEAD_LR weight_decay = cfg.SOLVER.WEIGHT_DECAY for key, value in model.rpn.named_parameters(): if not value.requires_grad: continue if "bias" in key: weight_decay = 0 params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] for key, value in model.roi_heads.named_parameters(): if not value.requires_grad: continue if "bias" in key: weight_decay = 0 params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] optimizer_classifier = optim.Adam(params, lr=cfg.SOLVER.PG_HEAD_LR) optimizers = [optimizer_masks, optimizer_classifier] scheduler_masks = optim.lr_scheduler.MultiStepLR(optimizer_masks, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA) scheduler_classifier = optim.lr_scheduler.MultiStepLR(optimizer_classifier, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA) schedulers = [scheduler_masks, scheduler_classifier] return optimizers, schedulers
import numpy as np from scipy.spatial.distance import pdist, squareform, cdist import theano import theano.tensor as T from theano_utils import floatX, sharedX def comm_func_eval(samples, ground_truth): samples = np.copy(samples) ground_truth = np.copy(ground_truth) def ex(): f0 = np.mean(samples, axis=0) f1 = np.mean(ground_truth, axis=0) return np.mean((f0-f1)**2) def exsqr(): f0 = np.mean(samples**2, axis=0) f1 = np.mean(ground_truth**2, axis=0) return np.mean((f0-f1)**2) out = {} out['ex'] = ex() out['exsqr'] = exsqr() return out
########################################################################### # Created by: CASIA IVA # Email: jliu@nlpr.ia.ac.cn # Copyright (c) 2018 ########################################################################### import numpy as np import torch import math from torch.nn import Module, Sequential, Conv2d, ReLU,AdaptiveMaxPool2d, AdaptiveAvgPool2d, \ NLLLoss, BCELoss, CrossEntropyLoss, AvgPool2d, MaxPool2d, Parameter, Linear, Sigmoid, Softmax, Dropout, Embedding from torch.nn import functional as F from torch.autograd import Variable torch_ver = torch.__version__[:3] __all__ = ['CAM_Module', 'TAM_Module'] class CAM_Module(Module): def __init__(self): super(CAM_Module, self).__init__() self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self,x): m_batchsize, C, height, width = x.size() proj_query = x.view(m_batchsize, C, -1) proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy attention = self.softmax(energy_new) proj_value = x.view(m_batchsize, C, -1) out = torch.bmm(attention, proj_value) out = out.view(m_batchsize, C, height, width) out = self.gamma*out + x return out class TAM_Module(Module): def __init__(self): super(TAM_Module, self).__init__() self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self,x): m_batchsize, N, height, width = x.size() proj_query = x.view(m_batchsize, N, -1) proj_key = x.view(m_batchsize, N, -1).permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy attention = self.softmax(energy_new) proj_value = x.view(m_batchsize, N, -1) out = torch.bmm(attention, proj_value) out = out.view(m_batchsize, N, height, width) out = self.gamma*out + x return out
# -*- coding: utf-8 -*- import sys import numpy as np import smuthi.particles as part import smuthi.layers as lay import smuthi.initial_field as init import smuthi.simulation as simul import smuthi.postprocessing.far_field as farf import smuthi.utility.automatic_parameter_selection as autoparam import smuthi.fields as flds # Parameter input ---------------------------- vacuum_wavelength = 550 plane_wave_polar_angle = np.pi * 7/8 plane_wave_azimuthal_angle = np.pi * 1/3 plane_wave_polarization = 0 plane_wave_amplitude = 1 # -------------------------------------------- # initialize particle objects sphere1 = part.Sphere(position=[100, 100, 150], refractive_index=2.4 + 0.0j, radius=110, l_max=1, m_max=1) sphere2 = part.Sphere(position=[-100, -100, 250], refractive_index=1.9 + 0.0j, radius=120, l_max=1, m_max=1) sphere3 = part.Sphere(position=[-200, 100, 300], refractive_index=1.7 + 0.0j, radius=90, l_max=1, m_max=1) particle_list = [sphere1, sphere2, sphere3] # initialize layer system object lay_sys = lay.LayerSystem([0, 400, 0], [2, 1.3, 2]) # initialize initial field object init_fld = init.PlaneWave(vacuum_wavelength=vacuum_wavelength, polar_angle=plane_wave_polar_angle, azimuthal_angle=plane_wave_azimuthal_angle, polarization=plane_wave_polarization, amplitude=plane_wave_amplitude, reference_point=[0, 0, 400]) # initialize simulation object simulation = simul.Simulation(layer_system=lay_sys, particle_list=particle_list, initial_field=init_fld, log_to_terminal=(not sys.argv[0].endswith('nose2'))) # suppress output if called by nose autoparam.select_numerical_parameters(simulation, detector="extinction cross section", tolerance=1e-5, max_iter=20, neff_imag=1e-2, neff_step=1e-2, select_neff_max=True, neff_max_increment=0.5, neff_max=None, select_neff_step=True, select_multipole_cutoff=True, relative_convergence=True, suppress_simulation_output=True) simulation.run() scs = farf.scattering_cross_section(initial_field=simulation.initial_field, particle_list=simulation.particle_list, layer_system=simulation.layer_system) ecs = farf.extinction_cross_section(initial_field=simulation.initial_field,particle_list=simulation.particle_list, layer_system=simulation.layer_system) def test_optical_theorem(): relerr = abs((sum(scs.integral()) - ecs['top'] - ecs['bottom']) / sum(scs.integral())) print('error: ', relerr) assert relerr < 1e-4 if __name__ == '__main__': test_optical_theorem()
################################################################################ # Copyright (c) 2009-2020, National Research Foundation (SARAO) # # Licensed under the BSD 3-Clause License (the "License"); you may not use # this file except in compliance with the License. You may obtain a copy # of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ """Flux density model.""" import warnings import numpy as np import astropy.units as u class FluxError(ValueError): """Exception for a flux parsing error.""" class FluxDensityModel: """Spectral flux density model. This models the spectral flux density (or spectral energy distribtion - SED) of a radio source as:: log10(S) = a + b*log10(v) + c*log10(v)**2 + d*log10(v)**3 + e*exp(f*log10(v)) where *S* is the flux density in janskies (Jy) and *v* is the frequency in MHz. The model is based on the Baars polynomial [BGP1977]_ (up to a third- order term) and extended with an exponential term from the 1Jy catalogue [KWP+1981]_. It is considered valid for a specified frequency range only. For any frequencies outside this range a value of NaN is returned. It also models polarisation: an optional (I, Q, U, V) vector may be given to specify fractional Stokes parameters, which scale *S*. If not specified, the default is unpolarised (I = 1, Q = U = V = 0). It is recommended that I is left at 1, but it can be changed to model non-physical sources e.g. negative CLEAN components. The object can be instantiated directly with the minimum and maximum frequencies of the valid frequency range and the model coefficients, or indirectly via a description string. This string contains the minimum frequency, maximum frequency and model coefficients as space-separated values (optionally with parentheses enclosing the entire string). Some examples:: '1000.0 2000.0 0.34 -0.85 -0.02' '(1000.0 2000.0 0.34 -0.85 0.0 0.0 2.3 -1.0)' '1000.0 2000.0 0.34 -0.85 0.0 0.0 2.3 -1.0 1.0 0.2 -0.1 0.0' If less than the expected number of coefficients are provided, the rest are assumed to be zero, except that *I* is assumed to be one. If more than the expected number are provided, the extra coefficients are ignored, but a warning is shown. Parameters ---------- min_frequency, max_frequency : :class:`~astropy.units.Quantity` Minimum and maximum frequency for which model is valid coefs : sequence of floats, optional Model coefficients (a, b, c, d, e, f, I, Q, U, V), where missing coefficients at the end of the sequence are assumed to be zero (except for I, assumed to be one), and extra coefficients are ignored. References ---------- .. [BGP1977] J.W.M. Baars, R. Genzel, I.I.K. Pauliny-Toth, A. Witzel, "The Absolute Spectrum of Cas A; An Accurate Flux Density Scale and a Set of Secondary Calibrators," Astron. Astrophys., 61, 99-106, 1977. .. [KWP+1981] H. Kuehr, A. Witzel, I.I.K. Pauliny-Toth, U. Nauber, "A catalogue of extragalactic radio sources having flux densities greater than 1 Jy at 5 GHz," Astron. Astrophys. Suppl. Ser., 45, 367-430, 1981. """ # Coefficients are zero by default, except for I _DEFAULT_COEFS = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, # a, b, c, d, e, f 1.0, 0.0, 0.0, 0.0]) # I, Q, U, V @u.quantity_input(equivalencies=u.spectral()) def __init__(self, min_frequency: u.Hz, max_frequency: u.Hz, coefs): self.min_frequency = min_frequency << u.MHz self.max_frequency = max_frequency << u.MHz self.coefs = self._DEFAULT_COEFS.copy() # Extract up to the maximum number of coefficients from given sequence if len(coefs) > len(self.coefs): warnings.warn(f'Received {len(coefs)} coefficients but only expected {len(self.coefs)} - ' 'ignoring the rest', FutureWarning) self.coefs[:min(len(self.coefs), len(coefs))] = coefs[:min(len(self.coefs), len(coefs))] def __str__(self): """Complete string representation of object, sufficient to reconstruct it.""" return self.description def __repr__(self): """Short human-friendly string representation.""" min_freq = self.min_frequency.to_value(u.MHz) max_freq = self.max_frequency.to_value(u.MHz) freq_range = f'{min_freq:.0f}-{max_freq:.0f} MHz' param_str = ','.join(np.array(list('abcdefIQUV'))[self.coefs != self._DEFAULT_COEFS]) return f"<katpoint.FluxDensityModel {freq_range} params={param_str} at {id(self):#x}>" def __eq__(self, other): """Equality comparison operator (based on description string).""" return self.description == \ (other.description if isinstance(other, self.__class__) else other) def __hash__(self): """Base hash on description string, just like equality operator.""" return hash(self.description) @property def description(self): """Complete string representation of object, sufficient to reconstruct it.""" min_freq = self.min_frequency.to_value(u.MHz) max_freq = self.max_frequency.to_value(u.MHz) # Prune defaults at the end of coefficient list for the description string nondefault_coefs = np.nonzero(self.coefs != self._DEFAULT_COEFS)[0] last_nondefault_coef = nondefault_coefs[-1] if len(nondefault_coefs) > 0 else 0 pruned_coefs = self.coefs[:last_nondefault_coef + 1] coefs_str = ' '.join([repr(c) for c in pruned_coefs]) return f'({min_freq} {max_freq} {coefs_str})' @classmethod def from_description(cls, description): """Construct flux density model object from description string. Parameters ---------- description : str String of space-separated parameters (optionally in parentheses) Returns ------- flux_model : :class:`FluxDensityModel` Constructed flux density model object Raises ------ FluxError If `description` has the wrong format """ # Split description string on spaces and turn into numbers (discarding any parentheses) prefix = f"Flux density description string '{description}'" try: flux_info = [float(num) for num in description.strip(' ()').split()] except ValueError as err: raise FluxError(f"{prefix} contains invalid floats") from err if len(flux_info) < 2: raise FluxError(f"{prefix} should have at least two parameters") return cls(flux_info[0] * u.MHz, flux_info[1] * u.MHz, flux_info[2:]) @property def iquv_scale(self): """Fractional Stokes parameters which scale the flux density.""" return self.coefs[6:10] @u.quantity_input def _flux_density_raw(self, frequency: u.Hz) -> u.Jy: a, b, c, d, e, f = self.coefs[:6] log10_v = np.log10(frequency.to_value(u.MHz)) log10_S = a + b * log10_v + c * log10_v ** 2 + d * log10_v ** 3 + e * np.exp(f * log10_v) return 10 ** log10_S * u.Jy @u.quantity_input(equivalencies=u.spectral()) def flux_density(self, frequency: u.Hz) -> u.Jy: """Calculate Stokes I flux density for given observation frequency. Parameters ---------- frequency : :class:`~astropy.units.Quantity`, optional Frequency at which to evaluate flux density Returns ------- flux_density : :class:`~astropy.units.Quantity` Flux density, or NaN Jy if frequency is out of range. The shape matches the input. """ frequency <<= u.MHz flux = self._flux_density_raw(frequency) * self.iquv_scale[0] flux[frequency < self.min_frequency] = np.nan * u.Jy flux[frequency > self.max_frequency] = np.nan * u.Jy return flux @u.quantity_input(equivalencies=u.spectral()) def flux_density_stokes(self, frequency: u.Hz) -> u.Jy: """Calculate full-Stokes flux density for given observation frequency. Parameters ---------- frequency : :class:`~astropy.units.Quantity`, optional Frequency at which to evaluate flux density Returns ------- flux_density : :class:`~astropy.units.Quantity` Flux density, or NaN Jy if frequency is out of range. The shape matches the input with an extra trailing dimension of size 4 containing Stokes I, Q, U, V. """ frequency <<= u.MHz flux = self._flux_density_raw(frequency) flux[frequency < self.min_frequency] = np.nan * u.Jy flux[frequency > self.max_frequency] = np.nan * u.Jy return np.multiply.outer(flux, self.iquv_scale)
import numpy as np class MockRandomState(): """ Numpy RandomState is actually extremely slow, requiring about 300 microseconds for any operation involving state. Therefore, when reproducibility is not necessary, this class should be used to immensly improve efficiency. Tests were run for Player: %timeit _ = Player.from_state(pstate) # ORIGINAL: 26.8 ms ± 788 µs per loop # MockRandomState: 1.15 ms ± 107 µs per loop Use of MockRandomState improved the performance by 10x. This is very important. """ def __init__(self): pass def set_state(self, *args, **kwargs): """ Doesn't do anything """ return None def get_state(self, *args, **kwargs): return None def choice(self, *args, **kwargs): return np.random.choice(*args, **kwargs)
""" Created on Thu Aug 22 19:18:53 2019 @authors: Dr. M. S. Ramkarthik and Dr. Pranay Barkataki """ import numpy as np import math from QuantumInformation import RecurNum from QuantumInformation import LinearAlgebra as LA from QuantumInformation import QuantumMechanics as QM import scipy.linalg.lapack as la import re qobj=QM() class PartialTr: def __init__(self): """It is a class dealing with partial trace and transpose it primarily intrinsic functions of uses numpy, math, cmath. """ # partial trace operation subroutine for a real pure state #entry is in the column form def partial_trace_vec(self,state,sub_tr): """ Partial trace operation on a quantum state Input: state: real state vector sub_tr: details of the subsystems not to be traced out Output: red_den: reduced density matrix """ typestate=str(state.dtype) N=int(math.log2(state.shape[0])) length=len(sub_tr) # count=length, and count0= N-length assert set(sub_tr).issubset(set(np.arange(1,N+1))),\ "Invalid subsystems to be traced out" if re.findall("^complex",typestate): red_den=np.zeros([2**(length),2**(length)],dtype=np.complex_) vec=np.zeros([(N-length),1]) im=0 for ii in range(1,N+1): if ii not in sub_tr: vec[im]=2**(N-ii) im=im+1 mylist=[] icount=0 sum2=0 RecurNum.recur_comb_add(mylist,vec,icount,sum2) irow=np.zeros([N,1]) icol=np.zeros([N,1]) mylist=np.array(mylist) len_mylist=len(mylist) for i1 in range(0,2**length): col1=self.__dectobin(i1,length) for i2 in range(0,2**length): col2=self.__dectobin(i2,length) i3=0 for k in range(0,N): if k+1 not in sub_tr: irow[k]=0 else: irow[k]=col1[i3] i3=i3+1 ic=0 for k2 in range(0,N): if k2+1 not in sub_tr: icol[k2]=0 else: icol[k2]=col2[ic] ic=ic+1 icc=self.__bintodec(irow) jcc=self.__bintodec(icol) red_den[i1,i2]=red_den[i1,i2]+(state[icc]*\ np.conjugate(state[jcc])) for jj in range(0,len_mylist): icc2=icc+mylist[jj] jcc2=jcc+mylist[jj] red_den[i1,i2]=red_den[i1,i2]+(state[icc2]*\ np.conjugate(state[jcc2])) else: red_den=np.zeros([2**(length),2**(length)],dtype='float64') vec=np.zeros([(N-length),1]) im=0 for ii in range(1,N+1): if ii not in sub_tr: vec[im]=2**(N-ii) im=im+1 mylist=[] icount=0 sum2=0 RecurNum.recur_comb_add(mylist,vec,icount,sum2) irow=np.zeros([N,1]) icol=np.zeros([N,1]) mylist=np.array(mylist) len_mylist=len(mylist) for i1 in range(0,2**length): col1=self.__dectobin(i1,length) for i2 in range(0,2**length): col2=self.__dectobin(i2,length) i3=0 for k in range(0,N): if k+1 not in sub_tr: irow[k]=0 else: irow[k]=col1[i3] i3=i3+1 ic=0 for k2 in range(0,N): if k2+1 not in sub_tr: icol[k2]=0 else: icol[k2]=col2[ic] ic=ic+1 icc=self.__bintodec(irow) jcc=self.__bintodec(icol) red_den[i1,i2]=red_den[i1,i2]+(state[icc]*state[jcc]) for jj in range(0,len_mylist): icc2=icc+mylist[jj] jcc2=jcc+mylist[jj] red_den[i1,i2]=red_den[i1,i2]+(state[icc2]*state[jcc2]) return(red_den) # partial trace operation for a real state density matrix def partial_trace_den(self,state,sub_tr): """ Partial trace operation on a density matrix Input: state: input real density matrix sub_tr: details of the subsystem not to be traced out Output: red_den: reduced density matrix """ typestate=str(state.dtype) N=int(math.log2(state.shape[0])) length=len(sub_tr) # count=length, and count0= N-length assert set(sub_tr).issubset(set(np.arange(1,N+1))),\ "Invalid subsystems to be traced out" if re.findall("^complex",typestate): red_den=np.zeros([2**(length),2**(length)],dtype=np.complex_) vec=np.zeros([(N-length),1]) im=0 for ii in range(1,N+1): if ii not in sub_tr: vec[im]=2**(N-ii) im=im+1 mylist=[] icount=0 sum2=0 RecurNum.recur_comb_add(mylist,vec,icount,sum2) irow=np.zeros([N,1]) icol=np.zeros([N,1]) mylist=np.array(mylist) len_mylist=len(mylist) for i1 in range(0,2**length): col1=self.__dectobin(i1,length) for i2 in range(0,2**length): col2=self.__dectobin(i2,length) i3=0 for k in range(0,N): if k+1 not in sub_tr: irow[k]=0 else: irow[k]=col1[i3] i3=i3+1 ic=0 for k2 in range(0,N): if k2+1 not in sub_tr: icol[k2]=0 else: icol[k2]=col2[ic] ic=ic+1 icc=self.__bintodec(irow) jcc=self.__bintodec(icol) red_den[i1,i2]=red_den[i1,i2]+(state[icc,jcc]) for jj in range(0,len_mylist): icc2=icc+mylist[jj] jcc2=jcc+mylist[jj] red_den[i1,i2]=red_den[i1,i2]+(state[icc2,jcc2]) else: red_den=np.zeros([2**(length),2**(length)],dtype='float64') vec=np.zeros([(N-length),1]) im=0 for ii in range(1,N+1): if ii not in sub_tr: vec[im]=2**(N-ii) im=im+1 mylist=[] icount=0 sum2=0 RecurNum.recur_comb_add(mylist,vec,icount,sum2) irow=np.zeros([N,1]) icol=np.zeros([N,1]) mylist=np.array(mylist) len_mylist=len(mylist) for i1 in range(0,2**length): col1=self.__dectobin(i1,length) for i2 in range(0,2**length): col2=self.__dectobin(i2,length) i3=0 for k in range(0,N): if k+1 not in sub_tr: irow[k]=0 else: irow[k]=col1[i3] i3=i3+1 ic=0 for k2 in range(0,N): if k2+1 not in sub_tr: icol[k2]=0 else: icol[k2]=col2[ic] ic=ic+1 icc=self.__bintodec(irow) jcc=self.__bintodec(icol) red_den[i1,i2]=red_den[i1,i2]+(state[icc,jcc]) for jj in range(0,len_mylist): icc2=icc+mylist[jj] jcc2=jcc+mylist[jj] red_den[i1,i2]=red_den[i1,i2]+(state[icc2,jcc2]) return(red_den) # Partial Transpose of real pure state def ptranspose_vec(self,state,sub_tr): """ Partial transpose operation on a quantum state Parameters state : It is a real or complex state. sub_tr : List of number designating the subsystems to be partially transposed. Returns denc2: It is partially transposed density matrix """ N=int(math.log2(state.shape[0])) assert set(sub_tr).issubset(set(np.arange(1,N+1))),\ "Invalid subsystems to be traced out" typestate=str(state.dtype) if re.findall("^complex",typestate): denc2=np.zeros([2**N,2**N],dtype=np.complex_) for i in range(state.shape[0]): vec_row=qobj.decimal_binary(i,N) for j in range(state.shape[0]): vec_col=qobj.decimal_binary(j,N) vec_row2=vec_row.copy() for k in sub_tr: temp=vec_row2[k-1] vec_row2[k-1]=vec_col[k-1] vec_col[k-1]=temp row=qobj.binary_decimal(vec_row2) col=qobj.binary_decimal(vec_col) denc2[row,col]=state[i]*np.conjugate(state[j]) else: denc2=np.zeros([2**N,2**N],dtype='float64') for i in range(state.shape[0]): vec_row=qobj.decimal_binary(i,N) for j in range(state.shape[0]): vec_col=qobj.decimal_binary(j,N) vec_row2=vec_row.copy() for k in sub_tr: temp=vec_row2[k-1] vec_row2[k-1]=vec_col[k-1] vec_col[k-1]=temp row=qobj.binary_decimal(vec_row2) col=qobj.binary_decimal(vec_col) denc2[row,col]=state[i]*state[j] return(denc2) # Partial Transpose of real density matrix def ptranspose_den(self,denc,sub_tr): """ Partial transpose operation on density matrix Parameters denc : It is a real or complex density matrix. sub_tr : List of number designating the subsystems to be partially transposed. Returns denc2: It is partially transposed density matrix """ N=int(math.log2(denc.shape[0])) assert set(sub_tr).issubset(set(np.arange(1,N+1))),\ "Invalid subsystems to be traced out" typestate=str(denc.dtype) if re.findall("^complex",typestate): denc2=np.zeros([2**N,2**N],dtype=np.complex_) for i in range(denc.shape[0]): vec_row=qobj.decimal_binary(i,N) for j in range(denc.shape[1]): vec_col=qobj.decimal_binary(j,N) vec_row2=vec_row.copy() for k in sub_tr: temp=vec_row2[k-1] vec_row2[k-1]=vec_col[k-1] vec_col[k-1]=temp row=qobj.binary_decimal(vec_row2) col=qobj.binary_decimal(vec_col) denc2[row,col]=denc[i,j] else: denc2=np.zeros([2**N,2**N],dtype='float64') for i in range(denc.shape[0]): vec_row=qobj.decimal_binary(i,N) for j in range(denc.shape[1]): vec_col=qobj.decimal_binary(j,N) vec_row2=vec_row.copy() for k in sub_tr: temp=vec_row2[k-1] vec_row2[k-1]=vec_col[k-1] vec_col[k-1]=temp row=qobj.binary_decimal(vec_row2) col=qobj.binary_decimal(vec_col) denc2[row,col]=denc[i,j] return(denc2) def __dectobin(self,n,l): """It converts decimal to binary. Attributes: n: entry of the decimal number l: length of the binary output Returns: dtb: a numpy array containing the binary equivalent of number n """ import numpy as np p=n dtb=np.empty([l,1]) for i in range(0,l): dtb[l-1-i]=int(p % 2) p=int(p/2) #print(dtb) return(dtb) # Binary to decimal conversion def __bintodec(self,vec): """ It converts biinary to decimal Attributes: vec: entry of 1D array of binary numbers {0,1} Returns: t: decimal equivalent of the vec """ t=0 for i in range(0,len(vec)): t=t+vec[len(vec)-1-i]*(2**i) #print(dtb) return(int(t)) class Entanglement(PartialTr): # Concurrence calculation for a real pure state def concurrence_vec(self,state,i,j,eps=10**(-13)): """ Calculation of concurrence for a quantum state Parameters state : Real or complex state i : It stores the place values of the qubits. j : It stores the place values of the qubits. eps : Below the eps value the eigenvalues will be considered zero. The default is 10**(-13). Returns conc: concurrence value """ sigmay=np.zeros([4,4],dtype='float64') typestate=str(state.dtype) if re.findall("^complex",typestate): sigmay[0,3]=-1 sigmay[1,2]=1 sigmay[2,1]=1 sigmay[3,0]=-1 sub_tr=[i,j] rdm= self.partial_trace_vec(state,sub_tr) rhot3=rdm@sigmay@np.conjugate(rdm)@sigmay w,vl,vr,info =la.zgeev(rhot3) wc=[] for i in range(0,4): if abs(w.item(i))<eps: wc.append(0.000000000000000) else: wc.append(abs(w.item(i))) wc.sort(reverse=True) wc=np.array(wc,dtype='float64') conc=math.sqrt(wc.item(0))-math.sqrt(wc.item(1))-\ math.sqrt(wc.item(2))-math.sqrt(wc.item(3)) if conc<0: conc=0 else: sigmay[0,3]=-1 sigmay[1,2]=1 sigmay[2,1]=1 sigmay[3,0]=-1 sub_tr=[i,j] rdm= self.partial_trace_vec(state,sub_tr) rhot3=rdm@sigmay@rdm@sigmay wr,wi,vl,vr,info =la.dgeev(rhot3) w=[] for i in range(0,4): if wr[i] < eps: w.append(0.000000000000000) else: w.append(np.float64(wr.item(i))) w.sort(reverse=True) w=np.array(w,dtype='float64') conc=math.sqrt(w.item(0))-math.sqrt(w.item(1))-\ math.sqrt(w.item(2))-math.sqrt(w.item(3)) if conc<0: conc=0.0 return(np.float64(conc)) # Concurrence calculation for real state density matrix def concurrence_den(self,state,i,j,eps=10**(-13)): """ Calculation of concurrence for a density matrix Parameters state : Real or complex density matrix i : It stores the place values of the qubits. j : It stores the place values of the qubits. eps : Below the eps value the eigenvalues will be considered zero. The default is 10**(-13). Returns conc: concurrence value """ sigmay=np.zeros([4,4],dtype='float64') typestate=str(state.dtype) if re.findall("^complex",typestate): sigmay[0,3]=-1 sigmay[1,2]=1 sigmay[2,1]=1 sigmay[3,0]=-1 sub_tr=[i,j] rdm= self.partial_trace_den(state,sub_tr) rhot3=rdm@sigmay@np.conjugate(rdm)@sigmay w,vl,vr,info =la.zgeev(rhot3) wc=[] for i in range(0,4): if abs(w.item(i))<eps: wc.append(0.000000000000000) else: wc.append(abs(w.item(i))) wc.sort(reverse=True) wc=np.array(wc,dtype='float64') conc=math.sqrt(wc.item(0))-math.sqrt(wc.item(1))-\ math.sqrt(wc.item(2))-math.sqrt(wc.item(3)) if conc<0: conc=0 else: sigmay[0,3]=-1 sigmay[1,2]=1 sigmay[2,1]=1 sigmay[3,0]=-1 sub_tr=[i,j] rdm= self.partial_trace_den(state,sub_tr) rhot3=rdm@sigmay@rdm@sigmay wr,wi,vl,vr,info =la.dgeev(rhot3) w=[] for i in range(0,4): if wr[i] < eps: w.append(0.000000000000000) else: w.append(np.float64(wr.item(i))) w.sort(reverse=True) w=np.array(w,dtype='float64') conc=math.sqrt(w.item(0))-math.sqrt(w.item(1))-\ math.sqrt(w.item(2))-math.sqrt(w.item(3)) if conc<0: conc=0.0 return(np.float64(conc)) # Block entropy for a pure real state def block_entropy_vec(self,state,sub_tr,eps=10**(-13)): """ Calculation of block entropy for a quantum state Parameters state : Real or complex state sub_tr: List of numbers designating the particular subsystems not to be traced out. eps : Below the eps value the eigenvalues will be considered zero. The default is 10**(-13). Returns Bent: Block entropy value """ typestate=str(state.dtype) rdm= self.partial_trace_vec(state,sub_tr) if re.findall("^complex",typestate): w,v,info=la.zheev(rdm) else: w,v,info=la.dsyev(rdm) wlen=len(w) Bent=0.0 for x in range(0,wlen): if abs(w.item(x))<eps: w[x]=0.000000000000000 else: assert w.item(x) > 0.0,\ "The density matrix entered is not correct as the eigenvalues are negative" Bent=Bent-(w.item(x)*math.log(w.item(x),2)) return(Bent) # Block entropy for a pure real density matrix def block_entropy_den(self,state,sub_tr,eps=10**(-13)): """ Calculation of block entropy for a density matrix Parameters state : Real or complex density matrix sub_tr: List of numbers designating the particular subsystems not to be traced out. eps : Below the eps value the eigenvalues will be considered zero. The default is 10**(-13). Returns Bent: Block entropy value """ typestate=str(state.dtype) rdm= self.partial_trace_den(state,sub_tr) if re.findall("^complex",typestate): w,v,info=la.zheev(rdm) else: w,v,info=la.dsyev(rdm) wlen=len(w) Bent=0.0 for x in range(0,wlen): if abs(w.item(x))<eps: w[x]=0.000000000000000 else: assert w.item(x) > 0.0,\ "The density matrix entered is not correct as the eigenvalues are negative" Bent=Bent-(w.item(x)*math.log(w.item(x),2)) return(Bent) # Q measure for pure real state def QMeasure_vec(self,state): """ Calculation of Q measure for a quantum state Parameters state : Real or complex state Returns Qmeas: Q measure value """ NN=math.log2(state.shape[0])/math.log2(2) NN=int(NN) sub_tr=np.zeros([NN,1]) sum3=0.0 for x in range(0,NN): sub_tr=[] sub_tr.append(x+1) rho=self.partial_trace_vec(state,sub_tr) rho=np.matmul(rho,rho) tr2=np.trace(rho) sum3=sum3+tr2 Qmeas=2*(1-(sum3/NN)) return abs(Qmeas) # Q measure for real density matrix def QMeasure_den(self,den): """ Calculation of Q measure for a density matrix Parameters den : Real or complex density matrix Returns Qmeas: Q measure value """ NN=math.log2(den.shape[0])/math.log2(2) NN=int(NN) sub_tr=np.zeros([NN,1]) sum3=0.0 for x in range(0,NN): sub_tr=[] sub_tr.append(x+1) rho=self.partial_trace_den(den,sub_tr) rho=np.matmul(rho,rho) tr2=np.trace(rho) sum3=sum3+tr2 Qmeas=2*(1-(sum3/NN)) return abs(Qmeas) # Negativity of real pure state def negativity_log_vec(self,state,sub_tr,eps=10**(-13)): """ Calculation of negativity and logarithmic negativity for a quantum state Parameters state : Real or complex state sub_tr: List of numbers designating the particular subsystems to be transposed. eps : Below the eps value the eigenvalues will be considered zero. The default is 10**(-13). Returns negv,lognegv : negativity and log negativity values, respectively """ laobj=LA() typestate=str(state.dtype) rhoa=self.ptranspose_vec(state,sub_tr) if re.findall("^complex",typestate): negv=laobj.trace_norm_cmatrix(rhoa,precision=eps) else: negv=laobj.trace_norm_rmatrix(rhoa,precision=eps) assert negv > 0.0,\ "The density matrix entered is not correct as the negativity is negative" lognegv=math.log2(negv) negv=(negv-1)/2 return(negv,lognegv) # Negativity of real pure state def negativity_log_den(self,den,sub_tr,eps=10**(-13)): """ Calculation of negativity and logarithmic negativity for a density matrix Parameters state : Real or complex density matrix sub_tr: List of numbers designating the particular subsystems to be transposed. eps : Below the eps value the eigenvalues will be considered zero. The default is 10**(-13). Returns negv,lognegv : negativity and log negativity values, respectively """ laobj=LA() typestate=str(den.dtype) rhoa=self.ptranspose_den(den,sub_tr) if re.findall("^complex",typestate): negv=laobj.trace_norm_cmatrix(rhoa,precision=eps) else: negv=laobj.trace_norm_rmatrix(rhoa,precision=eps) assert negv > 0.0,\ "The density matrix entered is not correct as the negativity is negative" lognegv=math.log2(negv) negv=(negv-1)/2 return(negv,lognegv) def renyi_entropy(self,rho,alpha): """ Calculation of Renyi entropy Parameters rho : Real or complex density matrix alpha : It is the value of Renyi index Returns renyi : Renyi Entropy value """ assert alpha != 1.0, "alpha should not be equal to 1" typerho=str(rho.dtype) laobj=LA() if re.findall('^complex',typerho): renyi=math.log(abs(np.trace(laobj.power_hmatrix(rho,alpha))))/(1-alpha) else: renyi=math.log(np.trace(laobj.power_smatrix(rho,alpha)))/(1-alpha) return renyi def entanglement_spectrum(self,rho): """ Calculation of entanglement spectrum of a density matrix Parameters rho : Real or complex density matrix Returns eigenvalues : List containing the eigenvalues of rho logeigenvalues : List containing the negative logarithmic eigenvalues of rho """ typerho=str(rho.dtype) if re.findall('^complex',typerho): eigenvalues,eigenvectors,info=la.zheev(rho) else: eigenvalues,eigenvectors,info=la.dsyev(rho) logeigenvalues=np.zeros([eigenvalues.shape[0]],dtype='float64') for i in range(0,eigenvalues.shape[0]): assert eigenvalues[i]>0.0,\ "The eigenvalues of the matrix is coming less than equal to zero" logeigenvalues[i]=(-1)*math.log(eigenvalues[i]) return (eigenvalues,logeigenvalues) def residual_entanglement_vec(self,state): """ Calculation of residual entanglement for a three-qubit quantum state Parameters state : Real or complex 3-qubit state Returns res_tang : Residual entanglement value """ assert state.shape[0]==8,"It is not a three qubit quantum system" det=np.linalg.det(self.partial_trace_vec(state,[1])) det=4*det res_tang=det-(self.concurrence_vec(state,1,2)**2)-\ (self.concurrence_vec(state,1,3)**2) res_tang=abs(res_tang) return res_tang def residual_entanglement_den(self,den): """ Calculation of residual entanglement for a three-qubit density matrix Parameters den : Real or complex 3-qubit density matrix Returns res_tang : Residual entanglement value """ assert den.shape[0]==8,"It is not a three qubit quantum system" det=np.linalg.det(self.partial_trace_den(den,[1])) det=4*det res_tang=det-(self.concurrence_den(den,1,2)**2)-\ (self.concurrence_den(den,1,3)**2) res_tang=abs(res_tang) return res_tang
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Testing the random horizontal flip with bounding boxes op in DE """ import numpy as np import mindspore.log as logger import mindspore.dataset as ds import mindspore.dataset.vision.c_transforms as c_vision from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \ config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5 GENERATE_GOLDEN = False # updated VOC dataset with correct annotations DATA_DIR = "../data/dataset/testVOC2012_2" DATA_DIR_2 = ["../data/dataset/testCOCO/train/", "../data/dataset/testCOCO/annotations/train.json"] # DATA_DIR, ANNOTATION_DIR def test_random_horizontal_flip_with_bbox_op_c(plot_vis=False): """ Prints images and bboxes side by side with and without RandomHorizontalFlipWithBBox Op applied """ logger.info("test_random_horizontal_flip_with_bbox_op_c") # Load dataset dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) test_op = c_vision.RandomHorizontalFlipWithBBox(1) dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], output_columns=["image", "bbox"], column_order=["image", "bbox"]) unaugSamp, augSamp = [], [] for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True), dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)): unaugSamp.append(unAug) augSamp.append(Aug) if plot_vis: visualize_with_bounding_boxes(unaugSamp, augSamp) def test_random_horizontal_flip_with_bbox_op_coco_c(plot_vis=False): """ Prints images and bboxes side by side with and without RandomHorizontalFlipWithBBox Op applied, Testing with COCO dataset """ logger.info("test_random_horizontal_flip_with_bbox_op_coco_c") dataCoco1 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection", decode=True, shuffle=False) dataCoco2 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection", decode=True, shuffle=False) test_op = c_vision.RandomHorizontalFlipWithBBox(1) dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"], output_columns=["image", "bbox"], column_order=["image", "bbox"]) unaugSamp, augSamp = [], [] for unAug, Aug in zip(dataCoco1.create_dict_iterator(num_epochs=1, output_numpy=True), dataCoco2.create_dict_iterator(num_epochs=1, output_numpy=True)): unaugSamp.append(unAug) augSamp.append(Aug) if plot_vis: visualize_with_bounding_boxes(unaugSamp, augSamp, "bbox") def test_random_horizontal_flip_with_bbox_valid_rand_c(plot_vis=False): """ Uses a valid non-default input, expect to pass Prints images side by side with and without Aug applied + bboxes to compare and test """ logger.info("test_random_horizontal_bbox_valid_rand_c") original_seed = config_get_set_seed(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # Load dataset dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) test_op = c_vision.RandomHorizontalFlipWithBBox(0.6) # map to apply ops dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], output_columns=["image", "bbox"], column_order=["image", "bbox"]) filename = "random_horizontal_flip_with_bbox_01_c_result.npz" save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) unaugSamp, augSamp = [], [] for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True), dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)): unaugSamp.append(unAug) augSamp.append(Aug) if plot_vis: visualize_with_bounding_boxes(unaugSamp, augSamp) # Restore config setting ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers) def test_random_horizontal_flip_with_bbox_valid_edge_c(plot_vis=False): """ Test RandomHorizontalFlipWithBBox op (testing with valid edge case, box covering full image). Prints images side by side with and without Aug applied + bboxes to compare and test """ logger.info("test_horizontal_flip_with_bbox_valid_edge_c") dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) test_op = c_vision.RandomHorizontalFlipWithBBox(1) # map to apply ops # Add column for "bbox" dataVoc1 = dataVoc1.map( operations=lambda img, bbox: (img, np.array([[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.float32)), input_columns=["image", "bbox"], output_columns=["image", "bbox"], column_order=["image", "bbox"]) dataVoc2 = dataVoc2.map( operations=lambda img, bbox: (img, np.array([[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.float32)), input_columns=["image", "bbox"], output_columns=["image", "bbox"], column_order=["image", "bbox"]) dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], output_columns=["image", "bbox"], column_order=["image", "bbox"]) unaugSamp, augSamp = [], [] for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True), dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)): unaugSamp.append(unAug) augSamp.append(Aug) if plot_vis: visualize_with_bounding_boxes(unaugSamp, augSamp) def test_random_horizontal_flip_with_bbox_invalid_prob_c(): """ Test RandomHorizontalFlipWithBBox op with invalid input probability """ logger.info("test_random_horizontal_bbox_invalid_prob_c") dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) try: # Note: Valid range of prob should be [0.0, 1.0] test_op = c_vision.RandomHorizontalFlipWithBBox(1.5) # map to apply ops dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], output_columns=["image", "bbox"], column_order=["image", "bbox"]) # Add column for "bbox" except ValueError as error: logger.info("Got an exception in DE: {}".format(str(error))) assert "Input prob is not within the required interval of (0.0 to 1.0)." in str(error) def test_random_horizontal_flip_with_bbox_invalid_bounds_c(): """ Test RandomHorizontalFlipWithBBox op with invalid bounding boxes """ logger.info("test_random_horizontal_bbox_invalid_bounds_c") test_op = c_vision.RandomHorizontalFlipWithBBox(1) dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image") dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.HeightOverflow, "bounding boxes is out of bounds of the image") dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.NegativeXY, "min_x") dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.WrongShape, "4 features") if __name__ == "__main__": # set to false to not show plots test_random_horizontal_flip_with_bbox_op_c(plot_vis=False) test_random_horizontal_flip_with_bbox_op_coco_c(plot_vis=False) test_random_horizontal_flip_with_bbox_valid_rand_c(plot_vis=False) test_random_horizontal_flip_with_bbox_valid_edge_c(plot_vis=False) test_random_horizontal_flip_with_bbox_invalid_prob_c() test_random_horizontal_flip_with_bbox_invalid_bounds_c()
import numpy as np import numpy.linalg as la import matplotlib.pyplot as plt import pandas as pd F0 = np.zeros((4,3)) F0[0,2] = 10 F0[3,2] = 6 Fa = np.c_[F0, np.zeros((4,6-2))] #c_ acrescenta coluna Fa = np.r_[Fa, [np.ones(np.shape(Fa)[1])]] #r_ acrescenta linha print(np.shape(F0)[0]) print(F0, F0[:,1]) print(Fa) T = 10 F = np.c_[F0, np.zeros((np.shape(F0)[0],T - np.shape(F0)[1]))] print(np.zeros((3,1))) A = np.array([[2,3], [1,4]]) B = np.array([[1,4], [1,1]]).T C = np.array([[1,4], [1,1]]) D = B[:,1] - np.dot(A[0,:],B) E = np.array([[2,3]]).T F = np.add(A[:,1],B) G = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) print(D) print(np.dot(C,E)) print(G[:,1]) # A operação de baixo ele duplica o resultado da multiplicação e subtraiu elemento a elemento print(G[:,1] - np.dot(C,E)) H = np.copy(G[:,1]) print(H - np.dot(C,E)) H = np.zeros((2,1)) H[:,0] = G[:,1] # OU H[:,0] = np.copy(G[:,1]) # RESOLVIDO O PROBLEMA! print(H) print(H-np.dot(C,E)) # Pra multiplicação não há problema fazer direto a retirada de uma coluna # Agora, para soma e subtração, é necessário declarar uma variável como matriz # E alocar a coluna na mesma. print(np.dot(C[1,:],E)) I = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) print(I[::2,:]) # pega todas as linhas, de duas em duas print(I[::2,1]) # pega o segundo elemento de cada linha, de duas em duas linhas print(np.ones(3)) # um vetor de 3 componentes com 1 em cada componente print(np.ones((2,3))) x = np.linspace(0,40,num=2000) xd = len(x) print(xd) y = np.zeros((xd)) alpha = 200 step1 = int(0.05*xd) print(step1) y[0:step1] = 0.5*(1+(alpha-1)*x[0:step1]/x[step1]) plt.plot(x,y,'r--') plt.figure(1,figsize=(8,4)) plt.xlim(0,40) plt.grid(True) plt.show() comp_0 = pd.read_excel('sismo_artificial_comp0.xlsx').to_numpy().T comp_90 = pd.read_excel('sismo_artificial_comp90.xlsx').to_numpy().T up = pd.read_excel('sismo_artificial_up.xlsx').to_numpy().T col = np.shape(comp_0)[1] sismo_artificial = np.zeros((3,col)) sismo_artificial[0,:] = np.copy(comp_0) sismo_artificial[1,:] = np.copy(comp_90) sismo_artificial[2,:] = np.copy(up) print(sismo_artificial)
""" Copyright StrangeAI Authors @2019 original forked from deepfakes repo edit and promoted by StrangeAI authors """ from __future__ import print_function import argparse import os import cv2 import numpy as np import torch import torch.utils.data from torch import nn, optim from torch.autograd import Variable from torch.nn import functional as F import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader from models.swapnet import SwapNet, toTensor, var_to_np from utils.util import get_image_paths, load_images, stack_images from dataset.training_data import get_training_data from alfred.dl.torch.common import device from shutil import copyfile from loguru import logger from dataset.face_pair_dataset import FacePairDataset, FacePairDataset64x64 from torchvision import transforms import sys logger.remove() # Remove the pre-configured handler logger.start(sys.stderr, format="<lvl>{level}</lvl> {time:MM-DD HH:mm:ss} {file}:{line} - {message}") batch_size = 64 epochs = 100000 save_per_epoch = 300 a_dir = './data/galgadot_fbb/fanbingbing_faces' b_dir = './data/galgadot_fbb/galgadot_faces' # we start to train on bigger size target_size = 64 dataset_name = 'galgadot_fbb' log_img_dir = './checkpoint/results_{}_{}x{}'.format(dataset_name, target_size, target_size) log_model_dir = './checkpoint/{}_{}x{}'.format(dataset_name, target_size, target_size) check_point_save_path = os.path.join( log_model_dir, 'faceswap_{}_{}x{}.pth'.format(dataset_name, target_size, target_size)) def main(): os.makedirs(log_img_dir, exist_ok=True) os.makedirs(log_model_dir, exist_ok=True) transform = transforms.Compose([ # transforms.Resize((target_size, target_size)), transforms.RandomHorizontalFlip(), # transforms.RandomVerticalFlip(), # transforms.ToTensor(), ]) ds = FacePairDataset64x64(a_dir=a_dir, b_dir=b_dir, target_size=target_size, transform=transform) dataloader = DataLoader(ds, batch_size, shuffle=True) model = SwapNet() model.to(device) start_epoch = 0 logger.info('try resume from checkpoint') if os.path.isdir('checkpoint'): try: if torch.cuda.is_available(): checkpoint = torch.load(check_point_save_path) else: checkpoint = torch.load( check_point_save_path, map_location={'cuda:0': 'cpu'}) model.load_state_dict(checkpoint['state']) start_epoch = checkpoint['epoch'] logger.info('checkpoint loaded.') except FileNotFoundError: print('Can\'t found faceswap_trump_cage.pth') criterion = nn.L1Loss() optimizer_1 = optim.Adam([{'params': model.encoder.parameters()}, {'params': model.decoder_A.parameters()}], lr=5e-5, betas=(0.5, 0.999)) optimizer_2 = optim.Adam([{'params': model.encoder.parameters()}, {'params': model.decoder_B.parameters()}], lr=5e-5, betas=(0.5, 0.999)) logger.info('Start training, from epoch {} '.format(start_epoch)) try: for epoch in range(start_epoch, epochs): iter = 0 for data in dataloader: iter += 1 img_a_target, img_a_input, img_b_target, img_b_input = data img_a_target = img_a_target.to(device) img_a_input = img_a_input.to(device) img_b_target = img_b_target.to(device) img_b_input = img_b_input.to(device) # print(img_a.size()) # print(img_b.size()) optimizer_1.zero_grad() optimizer_2.zero_grad() predict_a = model(img_a_input, select='A') predict_b = model(img_b_input, select='B') loss1 = criterion(predict_a, img_a_target) loss2 = criterion(predict_b, img_b_target) loss1.backward() loss2.backward() optimizer_1.step() optimizer_2.step() logger.info('Epoch: {}, iter: {}, lossA: {}, lossB: {}'.format( epoch, iter, loss1.item(), loss2.item())) if epoch % save_per_epoch == 0 and epoch != 0: logger.info('Saving models...') state = { 'state': model.state_dict(), 'epoch': epoch } torch.save(state, os.path.join(os.path.dirname( check_point_save_path), 'faceswap_trump_cage_128x128_{}.pth'.format(epoch))) copyfile(os.path.join(os.path.dirname(check_point_save_path), 'faceswap_trump_cage_128x128_{}.pth'.format(epoch)), check_point_save_path) if epoch % 10 == 0 and epoch != 0 and iter == 1: img_a_original = np.array(img_a_target.detach().cpu().numpy()[0].transpose(2, 1, 0)*255, dtype=np.uint8) img_b_original = np.array(img_b_target.detach().cpu().numpy()[0].transpose(2, 1, 0)*255, dtype=np.uint8) a_predict_a = np.array(predict_a.detach().cpu().numpy()[0].transpose(2, 1, 0)*255, dtype=np.uint8) b_predict_b = np.array(predict_b.detach().cpu().numpy()[0].transpose(2, 1, 0)*255, dtype=np.uint8) a_predict_b = model(img_a_input, select='B') b_predict_a = model(img_b_input, select='A') a_predict_b = np.array(a_predict_b.detach().cpu().numpy()[0].transpose(2, 1, 0)*255, dtype=np.uint8) b_predict_a = np.array(b_predict_a.detach().cpu().numpy()[0].transpose(2, 1, 0)*255, dtype=np.uint8) cv2.imwrite(os.path.join(log_img_dir, '{}_0.png'.format(epoch)), cv2.cvtColor(img_a_original, cv2.COLOR_BGR2RGB)) cv2.imwrite(os.path.join(log_img_dir, '{}_3.png'.format(epoch)), cv2.cvtColor(img_b_original, cv2.COLOR_BGR2RGB)) cv2.imwrite(os.path.join(log_img_dir, '{}_1.png'.format(epoch)), cv2.cvtColor(a_predict_a, cv2.COLOR_BGR2RGB)) cv2.imwrite(os.path.join(log_img_dir, '{}_4.png'.format(epoch)), cv2.cvtColor(b_predict_b, cv2.COLOR_BGR2RGB)) cv2.imwrite(os.path.join(log_img_dir, '{}_2.png'.format(epoch)), cv2.cvtColor(a_predict_b, cv2.COLOR_BGR2RGB)) cv2.imwrite(os.path.join(log_img_dir, '{}_5.png'.format(epoch)), cv2.cvtColor(b_predict_a, cv2.COLOR_BGR2RGB)) logger.info('Record a result') except KeyboardInterrupt: logger.warning('try saving models...do not interrupt') state = { 'state': model.state_dict(), 'epoch': epoch } torch.save(state, os.path.join(os.path.dirname( check_point_save_path), 'faceswap_trump_cage_256x256_{}.pth'.format(epoch))) copyfile(os.path.join(os.path.dirname(check_point_save_path), 'faceswap_trump_cage_256x256_{}.pth'.format(epoch)), check_point_save_path) if __name__ == "__main__": main()
# required python version: 3.6+ import os import sys import src.load_data as load_data from src import plot_data from src import layer from src.network import NeuralNetwork_Dumpable as NN import src.network as network import matplotlib.pyplot as plt import numpy import os import pickle # format of data # disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited full_path = os.path.realpath(__file__) path, filename = os.path.split(full_path) data_filepath = '../data' data_train_filename = 'digitstrain.txt' data_valid_filename = 'digitsvalid.txt' data_test_filename = 'digitstest.txt' data_train_filepath = os.path.join(path, data_filepath, data_train_filename) data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename) data_test_filepath = os.path.join(path, data_filepath, data_test_filename) # x range [0, 1] x_train, y_train = load_data.load_from_path(data_train_filepath) x_valid, y_valid = load_data.load_from_path(data_valid_filepath) #l1 = Layer(784, 100, 10) print("start initiliazing...") # SET UP GLOBAL PARAMETERS lr = 0.05 momentum = 0.0 regularizer = 0.0 numpy.random.seed(1099) layers = [layer.Linear(784, 100), layer.Sigmoid(100, 100), layer.Linear(100, 10), layer.Softmax(10, 10)] myNN = NN(layers, learning_rate=lr, momentum=momentum, regularizer=regularizer) full_path = os.path.realpath(__file__) path, _ = os.path.split(full_path) data_filepath = '../output/dump' filepath = os.path.join(path, data_filepath, 'script-2-1-naive-autostop-rbm-whx-2639.dump') with open(filepath, 'rb') as f: w, h_bias, x_bias = pickle.load(f) myNN.layers[0].w = w.T myNN.layers[0].b = h_bias myNN.train(x_train, y_train, x_valid, y_valid, epoch=200, batch_size=32) layers = [layer.Linear(784, 100), layer.Sigmoid(100, 100), layer.Linear(100, 10), layer.Softmax(10, 10)] myNN = NN(layers, learning_rate=lr, momentum=momentum, regularizer=regularizer) filepath = os.path.join(path, data_filepath, 'script-2-5-AE-autostop-rbm-whx-1579.dump') with open(filepath, 'rb') as f: w, h_bias, x_bias = pickle.load(f) myNN.layers[0].w = w.T myNN.layers[0].b = h_bias myNN.train(x_train, y_train, x_valid, y_valid, epoch=200, batch_size=32) layers = [layer.Linear(784, 100), layer.Sigmoid(100, 100), layer.Linear(100, 10), layer.Softmax(10, 10)] myNN = NN(layers, learning_rate=lr, momentum=momentum, regularizer=regularizer) filepath = os.path.join(path, data_filepath, 'script-2-6-DAE-autostop-rbm-whx-1299.dump') with open(filepath, 'rb') as f: w, h_bias, x_bias = pickle.load(f) myNN.layers[0].w = w.T myNN.layers[0].b = h_bias myNN.train(x_train, y_train, x_valid, y_valid, epoch=200, batch_size=32) layers = [layer.Linear(784, 100), layer.Sigmoid(100, 100), layer.Linear(100, 10), layer.Softmax(10, 10)] myNN = NN(layers, learning_rate=lr, momentum=momentum, regularizer=regularizer) myNN.train(x_train, y_train, x_valid, y_valid, epoch=200, batch_size=32)
import os import sys import time import logging import pickle import numpy as np import matplotlib if "DISPLAY" not in os.environ: print("No DISPLAY found. Switching to noninteractive matplotlib backend...") print("Old backend is: {}".format(matplotlib.get_backend())) matplotlib.use('Agg') print("New backend is: {}".format(matplotlib.get_backend())) import matplotlib.pyplot as plt import ml.gptheano.vecgpdm.model as mdl import numerical.numpytheano.theanopool as tp import ml.gptheano.vecgpdm.kernels as krn import numerical.numpyext.logger as npl from ml.gptheano.vecgpdm.enums import * import dataset.mocap as ds from validation.common import * import argparse def train_CGPDM(y, settings): directory = settings["directory"] print(directory) if not os.path.exists(directory): os.makedirs(directory) statefilename = "{}/CGPDM_learned.pkl".format(directory) if not os.path.exists(statefilename): npl.setup_root_logger(rootlogfilename="{}/rootlog.txt".format(directory)) npl.setup_numpy_logger("ml.gptheano.vecgpdm.optimization", nplogfilename="{}/numpylog.pkl".format(directory)) ns = tp.TheanoVarPool() data = mdl.ModelData(y, ns=ns) params = mdl.ModelParam(data, Qs=settings["Qs"], parts_IDs=settings["parts_IDs"], dyn_Ms=settings["dyn_Ms"], lvm_Ms=settings["lvm_Ms"], lvm_kern_type=krn.RBF_Kernel, estimation_mode=settings["estimation_mode"], ns=ns) model = mdl.VECGPDM(params, ns=ns) if not os.path.exists(statefilename): model.precalc_posterior_predictive() mdl.save_plot_latent_space(model, directory, prefix="initial") mdl.save_plot_latent_vs_generated( model, directory, prefix="initial") mdl.save_plot_training_vs_generated( model, directory, prefix="initial") if not settings["dry_run"]: if settings["optimize_joint"]: mdl.optimize_joint(model, maxiter=settings["maxiter"], save_directory=directory) else: mdl.optimize_blocked(model, maxrun=settings["maxrun"], maxiter=settings["maxiter"], print_vars=True, save_directory=directory) model.save_state_to(statefilename) else: model.load_state_from(statefilename) model.precalc_posterior_predictive() return model def run_CGPDM_crossvalidation(training, validation, settings, bvhpartitioner=None): errorsfilename = settings["directory"] + "/errors.pkl" if not os.path.exists(errorsfilename): if not os.path.exists(settings["directory"]): os.makedirs(settings["directory"]) y = [t.copy() for t in training] # Validation seed is at the end y.append(validation[:settings["validation_seed_size"], :]) t0 = time.time() model = train_CGPDM(y, settings=settings) t1 = time.time() dyn_order = 2 validation_skip = settings["validation_seed_size"] - dyn_order T_validation = validation.shape[0] - validation_skip datasize = model.param.data.N x0 = model.get_dynamics_start_point(datasize - dyn_order) # Held-out data primer is at the end x_generated = model.run_generative_dynamics(T_validation, x0) y_generated = model.lvm_map_to_observed(x_generated) errors = compute_errors(observed=validation[validation_skip:, :], predicted=np.hstack(y_generated)) errors["ELBO"] = model.get_elbo_value() errors["timing"] = t1-t0 errors["settings"] = settings # Make a BVH if bvhpartitioner is not None: nframes = settings["bvh_nframes"] x_generated = model.run_generative_dynamics(nframes, x0) y_generated = model.lvm_map_to_observed(x_generated) bvhpartitioner.set_all_parts_data(np.hstack(y_generated)) bvhpartitioner.motiondata.write_BVH(settings["directory"] + "/final.bvh") # Write the errors with open(errorsfilename, "wb") as filehandle: pickle.dump(errors, filehandle) def print_settings(training, validation, settings, bvhpartitioner=None): print(settings["directory"]) def create_model_iterator(id=1): if id == 1: miter = ModelIterator() miter.load_recording( ds.Recordings.exp3_walk, bodypart_motiontypes=[(ds.BodyParts.Upper, ds.MotionType.WALK_PHASE_ALIGNED), (ds.BodyParts.Lower, ds.MotionType.WALK_PHASE_ALIGNED)], max_chunks=10) elif id == 2: miter = ModelIterator() miter.load_recording( ds.Recordings.exp3_protest_walk_2arms, bodypart_motiontypes=[(ds.BodyParts.Upper, ds.MotionType.WALK_SHAKE_ARMS), (ds.BodyParts.Lower, ds.MotionType.WALK_SHAKE_ARMS)], max_chunks=10) miter.settings = {"recording_info": miter.recording.info, "recording_filename": miter.recording.filename, "directory": None, "estimation_mode": EstimationMode.ELBO, "validation_seed_size": 4, # number of frames to be included into the training set "Qs": [3] * miter.nparts, # latent space dimensionality "parts_IDs": miter.parts_IDs, "dyn_Ms": None, "lvm_Ms": None, "optimize_joint": False, "maxrun": 3, "maxiter": 300, "bvh_nframes": 300, # number of frames to generate "dry_run": False, # skip optimization completely "hold": None,} miter.params_range = [("estimation_mode", [EstimationMode.ELBO]), ("dyn_Ms", [(i,)*miter.nparts for i in [8]]), ("lvm_Ms", [(i,)*miter.nparts for i in [10]]), ("hold", range(miter.trial.nchunks()))] miter.directory = "./cgpdm_{}".format(id) return miter def analyze_stats(): def firts_values(llst): return [lst[0] for lst in llst] for dataset_id in (1, 2): stats = ErrorStatsReader() miter = create_model_iterator(dataset_id) # Analysis save_dir = miter.directory miter.iterate_all_settings(stats.read_learned_errors) stats.params_range = miter.params_range plot_dir = "{}/statistics".format(save_dir) if not os.path.exists(plot_dir): os.makedirs(plot_dir) statsfile = open(plot_dir + "/stats-info.txt", "w") # vCGPDM ELBO and MSE plots for key in ["ELBO", "MSE", "WRAP_DYN", "WRAP_PATH", "timing"]: errs_axes = stats.params_range errs_value = stats.to_tensor(key=key, filter=None) errs_axes, errs_value = select_by(errs_axes, errs_value, [("estimation_mode", EstimationMode.ELBO)]) for iter_comb, axes, data in iterate_by(errs_axes, errs_value, iter_params_keys=["lvm_Ms"]): means, std = mean_std(axes, data, alongs=["hold"]) fig = plt.figure(figsize=(6, 5)) plt.plot(firts_values(values_by_name(axes, "dyn_Ms")), data, "x", markersize=10) plt.errorbar(firts_values(values_by_name(axes, "dyn_Ms")), means, std, fmt='--o', capsize=2) plt.xlabel("dyn_Ms") plt.ylabel(key) plt.title("vCGPDM, {} \n Model parameters: {}".format(key, iter_comb)) plt.savefig("{}/vCGPDM-{}-parameters({}).pdf".format(plot_dir, key, iter_comb)) plt.close(fig) statsfile.write("Key: {}, means: {}, std: {}\n".format(key, means, std)) statsfile.close() if __name__ == "__main__": parser = argparse.ArgumentParser( prog=__file__, description="""\ Train vCGPDM crossvalidation models""", formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("--v", action='version', version='%(prog)s 0.1') parser.add_argument("--i", type=int, default=None, help="""\ run only i-th model. i in 1..IMAX. None to run all models. 0 to collect statistics""") args = parser.parse_args() i_model = args.i if i_model == None: for dataset_id in [1, 2]: miter = create_model_iterator(dataset_id) miter.iterate_all_settings(run_CGPDM_crossvalidation) elif i_model == 0: analyze_stats() elif i_model <= 10: miter = create_model_iterator(1) miter.iterate_all_settings(run_CGPDM_crossvalidation, i_model=i_model-1) elif i_model <= 20: miter = create_model_iterator(2) miter.iterate_all_settings(run_CGPDM_crossvalidation, i_model=i_model-10-1)
""" Convolution module: gathers functions that define a convolutional operator. """ # Authors: Hamza Cherkaoui <hamza.cherkaoui@inria.fr> # License: BSD (3-clause) import numpy as np import numba from scipy import linalg from .atlas import get_indices_from_roi @numba.jit((numba.float64[:, :], numba.float64[:, :], numba.float64[:, :], numba.int64[:, :]), nopython=True, cache=True, fastmath=True) def adjconv_uv(residual_i, u, v, rois_idx): # pragma: no cover """ Pre-compute the convolution residual_i with the transpose for each atom k. Parameters ---------- residual_i : array, shape (n_voxels, n_times) residual term in the gradient u : array, shape (n_atoms, n_voxels) spatial maps v : array, shape (n_hrf_rois, n_times_atom) HRFs rois_idx: array, shape (n_hrf_rois, max_indices_per_rois), HRF ROIs Return ------ uvtX : array, shape (n_atoms, n_times_valid) computed operator image """ # use to compute uvtX for grad_z function _, n_times_atom = v.shape n_voxels, n_time = residual_i.shape vtX = np.empty((n_voxels, n_time - n_times_atom + 1)) # double for-loop that cycle on all the voxels for m in range(rois_idx.shape[0]): for j in get_indices_from_roi(m, rois_idx): vtX[j, :] = np.correlate(residual_i[j, :], v[m, :]) return np.dot(u, vtX) def adjconv_uH(residual, u, H, rois_idx): """ Pre-compute the convolution residual with the transpose for each atom k. Parameters ---------- residual : array, shape (n_voxels, n_times) residual term in the gradient u : array, shape (n_atoms, n_voxels) spatial maps H : array, shape (n_hrf_rois, n_times_valid, n_times), Toeplitz matrices rois_idx: array, shape (n_hrf_rois, max_indices_per_rois), HRF ROIs Return ------ uvtX : array, shape (n_atoms, n_times_valid) computed operator image """ # use to compute uvtX for grad_z function # no need to Numba this function the dot product is already on C. This # function is not faster than adjconv_uv since residual[indices, :] is not # adjacent is the memory n_hrf_rois, _, n_times_valid = H.shape n_voxels, _ = residual.shape vtX = np.empty((n_voxels, n_times_valid)) for m in range(n_hrf_rois): indices = get_indices_from_roi(m, rois_idx) vtX[indices, :] = residual[indices, :].dot(H[m, :, :]) return np.dot(u, vtX) def make_toeplitz(v, n_times_valid): """ Make Toeplitz matrix from given kernel to perform valid convolution. Parameters ---------- v : array, shape (n_times_atom), HRF n_times_valid : int, length of the temporal components Return ------ H : array, shape (n_times, n_times_valid), Toeplitz matrix, recall that n_times = n_times_valid + n_times_atom -1 """ padd = np.zeros((1, n_times_valid - 1)) return linalg.toeplitz(np.c_[v[None, :], padd], np.c_[1.0, padd])
import datetime import faulthandler import unittest import numpy as np faulthandler.enable() # to debug seg faults and timeouts import cf from cf import Units class DatetimeTest(unittest.TestCase): def test_Datetime(self): cf.dt(2003) cf.dt(2003, 2) cf.dt(2003, 2, 30, calendar="360_day") cf.dt(2003, 2, 30, 0, 0, calendar="360_day") cf.dt(2003, 2, 30, 0, 0, 0, calendar="360_day") cf.dt(2003, 4, 5, 12, 30, 15) cf.dt(2003, month=4, day=5, hour=12, minute=30, second=15) def test_Datetime_rt2dt(self): for a in (1, np.array(1), np.ma.array(1)): self.assertEqual( cf.cfdatetime.rt2dt(a, Units("days since 2004-2-28")), np.array(cf.dt(2004, 2, 29, calendar="standard"), dtype="O"), ) for a in (np.ma.array(1, mask=True), np.ma.array([1], mask=True)): b = cf.cfdatetime.rt2dt(a, Units("days since 2004-2-28")) self.assertIsInstance(b, np.ndarray) self.assertEqual(b.mask, True) self.assertTrue( ( cf.cfdatetime.rt2dt([1, 3], Units("days since 2004-2-28")) == np.array( [ datetime.datetime(2004, 2, 29), datetime.datetime(2004, 3, 2), ] ) ).all() ) a = np.array( [ cf.dt(2004, 2, 29, calendar=None), cf.dt(2004, 3, 2, calendar="gregorian"), ], dtype="O", ) b = cf.cfdatetime.rt2dt([1, 3], Units("days since 2004-2-28")) self.assertTrue((a == b).all()) for a in ( np.ma.array(3), np.ma.array([3]), ): b = cf.cfdatetime.rt2dt(a, Units("days since 1970-01-01")) self.assertEqual(b, cf.dt(1970, 1, 4, calendar="gregorian")) for a in ( np.ma.array(3, mask=True), np.ma.array([3], mask=True), ): b = cf.cfdatetime.rt2dt(a, Units("days since 1970-01-01")) self.assertEqual(b.mask, True) def test_Datetime_dt2rt(self): units = Units("days since 2004-2-28") self.assertEqual( cf.cfdatetime.dt2rt(datetime.datetime(2004, 2, 29), None, units), np.array(1.0), ) self.assertTrue( ( cf.cfdatetime.dt2rt( [ datetime.datetime(2004, 2, 29), datetime.datetime(2004, 3, 2), ], None, units, ) == np.array([1.0, 3.0]) ).all() ) units = Units("days since 2004-2-28", "360_day") self.assertTrue( ( cf.cfdatetime.dt2rt( [cf.dt(2004, 2, 29), cf.dt(2004, 3, 1)], None, units ) == np.array([1.0, 3.0]) ).all() ) units = Units("seconds since 2004-2-28") self.assertEqual( cf.cfdatetime.dt2rt(datetime.datetime(2004, 2, 29), None, units), np.array(86400.0), ) def test_Datetime_Data(self): d = cf.Data([1, 2, 3], "days since 2004-02-28") self.assertTrue((d < cf.dt(2005, 2, 28)).all()) with self.assertRaises(Exception): d < cf.dt(2005, 2, 29) with self.assertRaises(Exception): d < cf.dt(2005, 2, 29, calendar="360_day") d = cf.Data([1, 2, 3], "days since 2004-02-28", calendar="360_day") self.assertTrue((d < cf.dt(2005, 2, 28)).all()) self.assertTrue((d < cf.dt(2005, 2, 29)).all()) self.assertTrue((d < cf.dt(2005, 2, 30)).all()) with self.assertRaises(Exception): d < cf.dt(2005, 2, 31) with self.assertRaises(Exception): d < cf.dt(2005, 2, 29, calendar="noleap") def test_Datetime_dt_vector(self): for v in (2000, [2000], [[2000]], "2000-01-1", ["2000-01-1"]): x = cf.dt_vector(v) self.assertIsInstance(x, np.ndarray) self.assertEqual(x[0], cf.dt(2000, 1, 1)) for v in ([2000, 2001], [[2000], [2001]]): x = cf.dt_vector(v) self.assertIsInstance(x, np.ndarray) self.assertEqual( x.tolist(), [cf.dt(2000, 1, 1), cf.dt(2001, 1, 1)] ) for v in ([[2000, 1], [2001, 2]], ["2000-01-1", "2001-02-1"]): x = cf.dt_vector(v) self.assertIsInstance(x, np.ndarray) self.assertEqual( x.tolist(), [cf.dt(2000, 1, 1), cf.dt(2001, 2, 1)] ) def test_Datetime_st2dt(self): for a in ( "1970-01-04", np.array("1970-01-04"), np.ma.array(["1970-01-04"]), ): b = cf.cfdatetime.st2rt( a, Units("days since 1970-01-01"), Units("days since 1970-01-01"), ) self.assertIsInstance(b, np.ndarray) self.assertEqual(b, 3) if __name__ == "__main__": print("Run date:", datetime.datetime.now()) cf.environment() print() unittest.main(verbosity=2)
import statsmodels.api as sm import itertools from dowhy.causal_estimators.regression_estimator import RegressionEstimator class GeneralizedLinearModelEstimator(RegressionEstimator): """Compute effect of treatment using a generalized linear model such as logistic regression. Implementation uses statsmodels.api.GLM. Needs an additional parameter, "glm_family" to be specified in method_params. The value of this parameter can be any valid statsmodels.api families object. For example, to use logistic regression, specify "glm_family" as statsmodels.api.families.Binomial(). """ def __init__(self, *args, glm_family=None, predict_score=True, **kwargs): """For a list of args and kwargs, see documentation for :class:`~dowhy.causal_estimator.CausalEstimator`. :param glm_family: statsmodels family for the generalized linear model. For example, use statsmodels.api.families.Binomial() for logistic regression or statsmodels.api.families.Poisson() for count data. :param predict_score: For models that have a binary output, whether to output the model's score or the binary output based on the score. """ # Required to ensure that self.method_params contains all the # parameters needed to create an object of this class args_dict = {k: v for k, v in locals().items() if k not in type(self)._STD_INIT_ARGS} args_dict.update(kwargs) super().__init__(*args, **args_dict) self.logger.info("INFO: Using Generalized Linear Model Estimator") if glm_family is not None: self.family = glm_family else: raise ValueError("Need to specify the family for the generalized linear model. Provide a 'glm_family' parameter in method_params, such as statsmodels.api.families.Binomial() for logistic regression.") self.predict_score = predict_score # Checking if Y is binary outcome_values = self._data[self._outcome_name].astype(int).unique() self.outcome_is_binary = all([v in [0,1] for v in outcome_values]) def _build_model(self): features = self._build_features() model = sm.GLM(self._outcome, features, family=self.family).fit() return (features, model) def predict_fn(self, model, features): if self.outcome_is_binary: if self.predict_score: return model.predict(features) else: return (model.predict(features) > 0.5).astype(int) else: return model.predict(features) def construct_symbolic_estimator(self, estimand): expr = "b: " + ",".join(estimand.outcome_variable) + "~" + "Sigmoid(" var_list = estimand.treatment_variable + estimand.get_backdoor_variables() expr += "+".join(var_list) if self._effect_modifier_names: interaction_terms = ["{0}*{1}".format(x[0], x[1]) for x in itertools.product(estimand.treatment_variable, self._effect_modifier_names)] expr += "+" + "+".join(interaction_terms) expr += ")" return expr
import numpy as np __doc__ = """ https://math.stackexchange.com/questions/351913/probability-that-a-stick-randomly-broken-in-five-places-can-form-a-tetrahedron Choose 5 locations on a stick to break it into 6 pieces. What is the probability that these 6 pieces can be edge-lengths of a tetrahedron (3D symplex). """ __all__ = ['mc_three_piece_stick_triangle_prob', 'mc_six_piece_stick_tetrahedron_prob'] def triangle_inequality_(x1, x2, x3): """Efficiently finds `np.less(x1,x2+x3)*np.less(x2,x1+x3)*np.less(x3,x1+x2)`""" tmp_sum = x2 + x3 res = np.less(x1, tmp_sum) # x1 < x2 + x3 np.add(x1, x3, out=tmp_sum) buf = np.less(x2, tmp_sum) # x2 < x1 + x3 np.logical_and(res, buf, out=res) np.add(x1, x2, out=tmp_sum) np.less(x3, tmp_sum, out=buf) # x3 < x1 + x2 np.logical_and(res, buf, out=res) return res def triangle_inequality(x1, x2, x3, out=None): """Efficiently finds `np.less(x1,x2+x3)*np.less(x2,x1+x3)*np.less(x3,x1+x2)`, logically ending this on top of out array, if any""" if out is None: return triangle_inequality_(x1, x2, x3) res = out tmp_sum = x2 + x3 buf = np.less(x1, tmp_sum) # x1 < x2 + x3 np.logical_and(res, buf, out=res) np.add(x1, x3, out=tmp_sum) np.less(x2, tmp_sum, out=buf) # x2 < x1 + x3 np.logical_and(res, buf, out=res) np.add(x1, x2, out=tmp_sum) np.less(x3, tmp_sum, out=buf) # x3 < x1 + x2 np.logical_and(res, buf, out=res) return res def facial_tetrahedron(x, y, z, xb, yb, zb): """ Computes boolean mask for facial tetrahedron condition for six side-lengths This condition is necessary, but not sufficient for 3 sticks to form a tetrahedon yet, it needs to be supplemented with positivity of Cayley-Manger determinant. """ success_mask = triangle_inequality(x, y, zb) # x, y, zb triangle_inequality(x, y, zb, out = success_mask) # x, yb, z triangle_inequality(xb, y, z, out = success_mask) # xb, y, z triangle_inequality(xb, yb, zb, out = success_mask) # xb, yb, zb return success_mask def cayley_menger_mat(x2, y2, z2, xb2, yb2, zb2): """ Menger's determinant. If positive, there exist 4 points in R^3, with pair-wise distances squared equal to given 6 arguments. K. Wirth, A.S. Dreiding, Edge lengths determining tetrahedrons, Elemente der Mathematic, vol. 64 (2009) pp. 160-170. """ one = np.ones_like(x2) zero = np.zeros_like(x2) mat = np.array([[zero, x2, y2, z2, one], [x2, zero, zb2, yb2, one], [y2, zb2, zero, xb2, one], [z2, yb2, xb2, zero, one], [one, one, one, one, zero] ]).T return mat def cayley_menger_det_no_linalg(x2, y2, z2, xb2, yb2, zb2): """ D(S) = 2 * x2 * xb2 * (y2 + yb2 + z2 + zb2 - x2 - xb2) + 2 * y2 * yb2 * (z2 + zb2 + x2 + xb2 - y2 - yb2) + 2 * z2 * zb2 * (x2 + xb2 + y2 + yb2 - z2 - zb2) + (x2 - xb2) * (y2 - yb2) * (z2 - zb2) - (x2 + xb2) * (x2 + xb2) * (z2 + zb2) """ xs = x2 + xb2 ys = y2 + yb2 zs = z2 + zb2 buf1 = ys + zs buf1 -= xs buf2 = x2 * xb2 buf1 *= buf2 # buf1 has first term, halved np.multiply(y2, yb2, out=buf2) buf3 = xs + zs buf3 -= ys buf2 *= buf3 # buf2 has second term buf1 += buf2 # buf1 is sum of two terms, halved np.multiply(z2, zb2, out=buf3) np.add(xs, ys, out=buf2) # reuse buf2 buf2 -= zs buf3 *= buf2 # buf3 has third term buf1 += buf3 # buf1 is sum of 3 first terms, halved buf1 *= 2 np.subtract(x2, xb2, out=buf2) np.subtract(y2, yb2, out=buf3) buf2 *= buf3 np.subtract(z2, zb2, out=buf3) buf2 *= buf3 buf1 += buf2 # buf1 is sum of 4 first terms np.multiply(xs, ys, out=buf3) buf3 *= zs buf1 -= buf3 return buf1 def cayley_menger_cond(x2, y2, z2, xb2, yb2, zb2): # return np.linalg.det(cayley_menger_mat(x2, y2, z2, xb2, yb2, zb2)) > 0 return cayley_menger_det_no_linalg(x2, y2, z2, xb2, yb2, zb2) > 0 def mc_six_piece_stick_tetrahedron_prob(rs, n): """ Monte-Carlo estimate of the probability that a unit stick, randomly broken in 5 places (making 6 pieces), can form a tetrahedron. Using provided random state instance `rs` routine generates `n` samples, and outputs the number of tetrahedral 6-tuples. """ u = rs.rand(6,n) u[0, :] = 1 np.log(u[1], out=u[1]) u[1] /= 5 np.exp(u[1], out=u[1]) # np.power(u[1], 1/5, out=u[1]) np.sqrt(u[2], out=u[2]) np.sqrt(u[2], out=u[2]) np.cbrt(u[3], out=u[3]) np.sqrt(u[4], out=u[4]) np.cumprod(u, axis=0, out=u) u[0] -= u[1] u[1] -= u[2] u[2] -= u[3] u[3] -= u[4] u[4] -= u[5] success_mask = facial_tetrahedron(u[0], u[1], u[2], u[3], u[4], u[5]) np.square(u, out=u) # only squares enter Cayler-Manger determinant cm_mask = cayley_menger_cond(u[0], u[1], u[2], u[3], u[4], u[5]) np.logical_and(success_mask, cm_mask, out=success_mask) return success_mask.sum() def mc_three_piece_stick_triangle_prob(rs, n): """ Monte-Carlo estimate of probability that a unit stick, randomly broken in 2 places (making 3 pieces), corresponds to a triple of sides of a triangle. Using provided random state instance `rs` routine generates `n` samples, and outputs the number of triangular 3-tuples.""" ws = np.sort(rs.rand(2,n), axis=0) x2 = np.empty(n, dtype=np.double) x3 = np.empty(n, dtype=np.double) x1 = ws[0] np.subtract(ws[1], ws[0], out=x2) np.subtract(1, ws[1], out=x3) return triangle_inequality_(x1, x2, x3).sum()
import cfpq_data import networkx as nx from project import write_graph_to_dot def test_graph_isomorphism(tmpdir): n, m = 52, 48 edge_labels = ("a", "b") file = tmpdir.mkdir("test_dir").join("two_cycles.dot") graph = cfpq_data.labeled_two_cycles_graph( n, m, edge_labels=edge_labels, verbose=False ) write_graph_to_dot(graph, file) expected_graph = cfpq_data.labeled_two_cycles_graph( n, m, edge_labels=edge_labels, verbose=False ) expected_graph_string = nx.drawing.nx_pydot.to_pydot(expected_graph).to_string() with open(file, "r") as f: actual_graph_string = f.read() assert actual_graph_string == expected_graph_string
""" Reader for the hashtable, in combination with the :class:`SpatialRegion` objects from ``regions.py``. Use the :class:`SpatialLoader` class to set up and read from the hashtables. Note that all large data is actually contained in the region objects, and the loader class is really just a convenience object. """ from pathlib import Path from typing import Dict, List import attr import h5py import numpy as np from sparepo.particle_types import ParticleType from sparepo.regions import SpatialRegion @attr.s class ChunkFileHashtable: """ Hashtable for a single chunk file and particle type. """ filename: Path = attr.ib(converter=Path) file_number: int = attr.ib(converter=int) hashtable: np.ndarray = attr.ib() @attr.s class SpatialLoader: """ Spatially load data from files based on the pre-generated hashtable. If you need to create a hashtable, see the ``build_hashtable.py``. Note that there is no built-in periodic wrapping. Parameters ---------- hashtable: Path Path to the hashtable hdf5 file. snapshot: Path Path to the first snapshot (the one including ``.0.hdf5``) """ hashtable: Path = attr.ib(converter=Path) snapshot: Path = attr.ib(converter=Path) box_size: float number_of_chunks: int unit: str hubble_param: float hubble_param_scaling: int available_part_types: List[ParticleType] centers: np.ndarray counts: Dict[ParticleType, np.ndarray] cell_size: float number_of_cells: int cells_per_axis: int def __attrs_post_init__(self): """ Loads in metadata from the hashtable. """ with h5py.File(self.hashtable, "r") as handle: header_attrs = handle["Header"].attrs cell_centers = handle["Cells/Centers"][...] cell_counts = { ParticleType(int(name[-1])): value[:] for name, value in handle["Cells/Counts"].items() } cell_attrs = handle["Cells"].attrs self.box_size = header_attrs["BoxSize"] self.number_of_chunks = header_attrs["NumberOfChunks"] self.unit = header_attrs["Units"] self.hubble_param = header_attrs["HubbleParam"] self.hubble_param_scaling = header_attrs["HubbleParamScaling"] self.centers = cell_centers self.counts = cell_counts self.available_part_types = list(cell_counts.keys()) self.cell_size = cell_attrs["Size"] self.number_of_cells = cell_attrs["NumberOfCells"] self.cells_per_axis = cell_attrs["CellsPerAxis"] def snapshot_filename_for_chunk(self, chunk: int): """ Gets the snapshot filename for a given chunk. """ return self.snapshot.parent / ( self.snapshot.stem.split(".")[0] + f".{chunk}.hdf5" ) def read_dataset( self, part_type: ParticleType, field_name: str, region: SpatialRegion, ) -> np.ndarray: """ Reads a dataset in a given spatial region. Parameters ---------- part_type: ParticleType Particle type to read. Example: ParticleType.Gas field_name: str Particle field to read. Example: Coordinates region: SpatialRegion Spatial region to load data within. Returns ------- dataset: np.ndarray Particle dataset within the specified spatial region. """ if not region.mask_calculated: region.set_cell_mask( centers=self.centers, cell_size=self.cell_size, ) # First, read out the cell data from the hashtable file. # This is one contiguous read so doesn't need to be cached, # as relative to the particle data reading it is very fast. file_mask, file_count = region.get_file_mask( hashtable=self.hashtable, part_type=part_type ) particles_to_read = sum(file_count.values()) dataset_path = f"PartType{part_type.value}/{field_name}" with h5py.File(self.snapshot, "r") as handle: dataset = handle[dataset_path] shape = list(dataset.shape) dtype = dataset.dtype # Truncate the shape shape[0] = particles_to_read output = np.empty(shape, dtype=dtype) already_read = 0 for file_number, ranges in file_mask.items(): with h5py.File( self.snapshot_filename_for_chunk(chunk=file_number), "r" ) as handle: dataset = handle[dataset_path] for read_start, read_end in ranges: if read_end == read_start: continue # Because we read inclusively size_of_range = read_end - read_start # Construct selectors so we can use read_direct to prevent creating # copies of data from the hdf5 file. hdf5_read_sel = np.s_[read_start:read_end] output_dest_sel = np.s_[already_read : size_of_range + already_read] dataset.read_direct( output, source_sel=hdf5_read_sel, dest_sel=output_dest_sel ) already_read += size_of_range return output
import tensorflow as tf import numpy as np import time import os import random from datetime import datetime from model import AudioWord2Vec from utils import * import operator from tqdm import tqdm class Solver(object): def __init__(self, examples, labels, utters, batch_size, feat_dim, gram_num, memory_dim, init_lr, log_dir, model_dir, n_epochs, neg_sample_num, min_count, sampling_factor): # self.train_num = train_num self.example_dir = examples self.label_dir = labels self.utter_dir = utters self.batch_size = batch_size self.feat_dim = feat_dim self.gram_num = gram_num self.memory_dim = memory_dim self.init_lr = init_lr self.log_dir = log_dir self.model_dir = model_dir self.n_epochs = n_epochs self.neg_sample_num = neg_sample_num self.min_count = min_count self.sampling_factor = sampling_factor self.model = AudioWord2Vec(memory_dim, feat_dim, gram_num, neg_sample_num) self.generate_op = None self.discriminate_op = None self.n_feats = None self.feats = None self.feat_idx = None self.skip_feat_idx = None self.labels = None self.spk2idx = None self.idx2spk = None self.n_batches = None self.masks = None def generate_opt(self, loss, learning_rate, momentum, var_list): ### Optimizer building ### ### variable: generate_op ### # optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.5, beta2=momentum) # gvs = optimizer.compute_gradients(loss, var_list=var_list) # capped_gvs = [(grad if grad is None else tf.clip_by_value(grad, -5., 5.), var) for grad, var in gvs] # train_op = optimizer.apply_gradients(capped_gvs) train_op = optimizer.minimize(loss, var_list=var_list) return train_op def discriminate_opt(self, loss, learning_rate, momentum, var_list): ### Optimizer building ### ### variable: discriminate_op ### # optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.5, beta2=momentum) # gvs = optimizer.compute_gradients(loss, var_list=var_list) # capped_gvs = [(grad if grad is None else tf.clip_by_value(grad, -5., 5.), var) for grad, var in gvs] # train_op = optimizer.apply_gradients(capped_gvs) train_op = optimizer.minimize(loss) return train_op def save_embedding(self, embedding_file, embedding_vectors, global_step, batch_labels): """Getting Embedding Vectors""" with open(embedding_file+'_'+str(global_step), 'a') as fout: for e, l in zip(embedding_vectors, batch_labels): fout.write(l + ' ') for i in e[:-1]: fout.write(str(i) + ' ') fout.write(str(e[-1]) + '\n') def compute_train_loss(self, sess, summary_writer, summary_op, epoch, file_num, part, loss, first_products, enc, neighbor_encs, target_encs): feat_order = list(range(self.n_feats)) random.shuffle(feat_order) n_batches = self.n_batches feats = self.feats feat_idx = self.feat_idx skip_feat_idx = self.skip_feat_idx labels = self.labels spk2idx = self.spk2idx idx2spk = self.idx2spk masks = self.masks total_loss_value = 0. for step in tqdm(range(n_batches+1)): start_time = time.time() start_idx = step * self.batch_size end_idx = start_idx + self.batch_size feat_indices = feat_order[start_idx:end_idx] if step == n_batches: feat_indices = feat_order[step * self.batch_size:] batch_size = len(feat_indices) if batch_size == 0: continue batch_pos_feat, batch_neg_feats, batch_skip_feats, batch_labels, batch_masks \ = batch_pair_data(feats, feat_idx, skip_feat_idx, labels, spk2idx, idx2spk, masks, feat_indices, self.neg_sample_num) # batch_pos_feat, batch_neg_feats, batch_skip_feats, batch_labels \ # = batch_pair_data(feats, feat_idx, skip_feat_idx, labels, spk2idx, idx2spk, masks, # feat_indices, self.neg_sample_num) batch_neg_feats = batch_neg_feats.reshape((batch_size, self.neg_sample_num, self.feat_dim)) # batch_neg_feats = batch_neg_feats.reshape((-1, self.neg_sample_num, self.feat_dim)) batch_skip_feats = batch_skip_feats.reshape((batch_size, 2*self.gram_num, self.feat_dim)) # batch_skip_feats = batch_skip_feats.reshape((-1, self.feat_dim)) batch_masks = batch_masks.reshape((batch_size, 2*self.gram_num)) _, summary, loss_value, dot_value, enc_value, neighbor_encs_value, target_encs_value = \ sess.run([self.generate_op, summary_op, loss, first_products, enc, neighbor_encs, target_encs], feed_dict={self.model.pos_feat: batch_pos_feat, self.model.neg_feats: batch_neg_feats, self.model.neighbors: batch_skip_feats, self.model.masks: batch_masks}) total_loss_value += loss_value print_step = 1000 if step % print_step == 0 and step != 0: duration = time.time() - start_time example_per_sec = batch_size / duration print ('enc value:') print (enc_value[0][:10]) print ('mask:') print (batch_masks[0]) print ('context vector:') print (neighbor_encs_value[0][0][:10]) print ('neg vector:') print (target_encs_value[0][0][:10]) print ('cosine similarities:') print (dot_value[0][:2*self.gram_num]) print (dot_value[0][2*self.gram_num:]) format_str = ('%s: epoch %d, part %d, step %d, loss=%.5f') print (format_str % (datetime.now(), epoch, part, step, total_loss_value/(print_step))) total_loss_value = 0. summary_writer.add_summary(summary, (epoch-1)*file_num+part) summary_writer.flush() def compute_test_loss(self, sess, summary_writer, summary_op, epoch, loss, embedding_vectors, embedding_file, global_step): feat_order = list(range(self.n_feats)) random.shuffle(feat_order) n_batches = self.n_batches feats = self.feats feat_idx = self.feat_idx skip_feat_idx = self.skip_feat_idx labels = self.labels spk2idx = self.spk2idx idx2spk = self.idx2spk masks = self.masks total_loss_value = 0. for step in tqdm(range(n_batches+1)): start_idx = step * self.batch_size end_idx = start_idx + self.batch_size feat_indices = feat_order[start_idx:end_idx] if step == n_batches: feat_indices = feat_order[step * self.batch_size:] batch_size = len(feat_indices) if batch_size == 0: continue batch_pos_feat, batch_neg_feats, batch_skip_feats, batch_labels, batch_masks \ = batch_pair_data(feats, feat_idx, skip_feat_idx, labels, spk2idx, idx2spk, masks, feat_indices, self.neg_sample_num) # batch_pos_feat, batch_neg_feats, batch_skip_feats, batch_labels \ # = batch_pair_data(feats, feat_idx, skip_feat_idx, labels, spk2idx, idx2spk, masks, # feat_indices, self.neg_sample_num) batch_neg_feats = batch_neg_feats.reshape((-1, self.neg_sample_num, self.feat_dim)) # batch_neg_feats = batch_neg_feats.reshape((-1, self.neg_sample_num, self.feat_dim)) batch_skip_feats = batch_skip_feats.reshape((-1, 2*self.gram_num, self.feat_dim)) # batch_skip_feats = batch_skip_feats.reshape((-1, self.feat_dim)) batch_masks = batch_masks.reshape((-1, 2*self.gram_num)) if summary_writer == None: loss_value, e_v = \ sess.run([loss, embedding_vectors], feed_dict={self.model.pos_feat: batch_pos_feat, self.model.neg_feats: batch_neg_feats, self.model.neighbors: batch_skip_feats, self.model.masks: batch_masks}) # format_str = ('%s: step %d, sim_l=%.5f') # print (format_str % (datetime.now(), step, sim_l)) self.save_embedding(embedding_file, e_v, global_step, batch_labels) # print ('pos: '+str(pos_l)) total_loss_value += loss_value avg_loss = total_loss_value / n_batches if summary_writer != None: summary_writer.add_summary(summary, epoch) summary_writer.flush() print ('%s: average loss for eval = %.5f' % (datetime.now(), avg_loss)) def train(self): """ Training for Audio-WordVec.""" ### Count words label_dir = self.label_dir label_files = os.listdir(label_dir) word_freq = Counter() total_count = 0 for label_file in label_files: with open(os.path.join(label_dir, label_file), 'r') as f_l: for line in f_l: word = line[:-1] word_freq[word] += 1 total_count += 1 sorted_words = sorted(word_freq.items(), key=operator.itemgetter(1), reverse=True) with open(os.path.join(self.label_dir, '../word_count'), 'w') as fout: for word in sorted_words: fout.write(word[0] + ', ' + str(word[1]) + '\n') print ('number of total words: '+ str(total_count)) enc, loss, first_products, neighbor_encs, target_encs = self.model.build_model() # enc_test, similarity_loss_test = self.model.build_test() # Variables t_vars = tf.trainable_variables() g_vars = [var for var in t_vars if 'generator' in var.name] d_vars = [var for var in t_vars if 'discriminator' in var.name] # print ("G_VAR:") # for v in g_vars: # print (v) # print ("D_VAR:") # for v in d_vars: # print (v) self.generate_op = self.generate_opt(loss, self.init_lr, 0.9, g_vars) # self.generate_op = self.generate_opt(similarity_loss-discrimination_loss, self.init_lr, 0.9, g_vars) # self.discriminate_op = self.discriminate_opt(discrimination_loss+10*GP_loss, self.init_lr, 0.9, d_vars) # Build and initialization operation to run below init = tf.global_variables_initializer() # Start running operations on the Graph. config = tf.ConfigProto(log_device_placement=False) config.gpu_options.allow_growth = True sess = tf.Session(config=config) sess.run(init) # Create a saver. saver = tf.train.Saver(tf.all_variables(), max_to_keep=100) summary_train = [tf.summary.scalar("loss", loss)] # tf.summary.scalar("discrimination loss", discrimination_loss), # tf.summary.scalar("GP loss", GP_loss)] # summary_test = [tf.summary.scalar("similarity loss eval", similarity_loss), # tf.summary.scalar("discrimination loss eval", discrimination_loss), # tf.summary.scalar("GP loss eval", GP_loss)] summary_op_train = tf.summary.merge(summary_train) # summary_op_test = tf.summary.merge(summary_test) summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph) ### Restore the model ### ckpt = tf.train.get_checkpoint_state(self.model_dir) global_step = 0 if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print ("Model restored.") else: print ('No checkpoint file found.') coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) ### Start training ### example_files = os.listdir(self.example_dir) label_files = os.listdir(self.label_dir) utter_files = os.listdir(self.utter_dir) print (example_files) print (label_files) print (utter_files) print ("Start batch training.") for epoch in range(self.n_epochs): e = epoch + global_step + 1 print ("Start of Epoch: " + str(e) + "!") ### Load data ### # tmp = list(zip(example_files, label_files, utter_files)) # random.shuffle(tmp) # example_files, label_files, utter_files = zip(*tmp) file_num = len(label_files) file_order = list(range(file_num)) random.shuffle(file_order) count = 0 # for i, (example_file, label_file, utter_file) in enumerate(zip(example_files, label_files, utter_files)): for i in file_order: example_file = 'example_'+str(i) label_file = 'word_'+str(i) utter_file = 'utter_'+str(i) self.n_feats, self.feats, self.feat_idx, self.skip_feat_idx, \ self.labels, self.spk2idx, self.idx2spk, self.masks \ = load_subsampled_data(os.path.join(self.example_dir, example_file), os.path.join(self.label_dir, label_file), os.path.join(self.utter_dir, utter_file), word_freq, total_count, self.min_count, self.gram_num, self.sampling_factor) self.n_batches = self.n_feats // self.batch_size print ('Part: '+str(count)) print ('# of batches: ' + str(self.n_batches)) self.compute_train_loss(sess, summary_writer, summary_op_train, e, len(label_file), count, loss, first_products, enc, neighbor_encs, target_encs) # self.compute_test_loss(sess, summary_writer, summary_op_test, e, similarity_loss, None, None) count += 1 ckpt = self.model_dir + '/model.ckpt' # saver.save(sess, ckpt, global_step=(e-1)*file_num+(i+1)) saver.save(sess, ckpt, global_step=e) print ("End of Epoch: " + str(e) + "!") summary_writer.flush() def test(self, embedding_file): """ Testing for Audio-Word2Vec.""" ### Count words label_dir = self.label_dir label_files = os.listdir(label_dir) word_freq = Counter() total_count = 0 for label_file in label_files: with open(os.path.join(label_dir, label_file), 'r') as f_l: for line in f_l: word = line[:-1] word_freq[word] += 1 total_count += 1 sorted_words = sorted(word_freq.items(), key=operator.itemgetter(1), reverse=True) with open(os.path.join(self.label_dir, '../word_count'), 'w') as fout: for word in sorted_words: fout.write(word[0] + ', ' + str(word[1]) + '\n') print ('number of total words: '+ str(total_count)) enc, loss, first_products, neighbor_encs, target_encs = self.model.build_model() # Build and initialization operation to run below init = tf.global_variables_initializer() # Start running operations on the Graph. config = tf.ConfigProto(log_device_placement=False) config.gpu_options.allow_growth = True sess = tf.Session(config=config) sess.run(init) # Create a saver. saver = tf.train.Saver(tf.all_variables(), max_to_keep=100) ### Restore the model ### ckpt = tf.train.get_checkpoint_state(self.model_dir) global_step = 0 if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print ("Model restored.") else: print ('No checkpoint file found.') exit() ### Load data ### print ("Start testing.") example_files = os.listdir(self.example_dir) label_files = os.listdir(self.label_dir) utter_files = os.listdir(self.utter_dir) print (example_files) print (label_files) print (utter_files) file_num = len(label_files) file_order = list(range(file_num)) count = 0 # for example_file, label_file, utter_file in zip(example_files, label_files, utter_files): for i in file_order: example_file = 'example_'+str(i) label_file = 'word_'+str(i) utter_file = 'utter_'+str(i) self.n_feats, self.feats, self.feat_idx, self.skip_feat_idx, \ self.labels, self.spk2idx, self.idx2spk, self.masks \ = load_subsampled_data(os.path.join(self.example_dir, example_file), os.path.join(self.label_dir, label_file), os.path.join(self.utter_dir, utter_file), word_freq, total_count, self.min_count, self.gram_num, self.sampling_factor)#, subsample=False) self.n_batches = self.n_feats // self.batch_size print ('Part :'+str(count)) print ('# of testing batches: ' + str(self.n_batches)) count += 1 ### Start testing ### self.compute_test_loss(sess, None, None, None, loss, enc, embedding_file, global_step)
"""Module containing low-level functions to classify gridded radar / lidar measurements. """ from collections import namedtuple import numpy as np import numpy.ma as ma from cloudnetpy import utils from cloudnetpy.categorize import droplet from cloudnetpy.categorize import melting, insects, falling, freezing def classify_measurements(radar, lidar, model, mwr): """Classifies radar/lidar observations. This function classifies atmospheric scatterers from the input data. The input data needs to be averaged or interpolated to the common time / height grid before calling this function. Args: radar (Radar): The :class:`Radar` instance. lidar (Lidar): The :class:`Lidar` instance. model (Model): The :class:`Model` instance. mwr (Mwr): The :class:`Mwr` instance. Returns: ClassificationResult: The :class:`ClassificationResult` instance. References: The Cloudnet classification scheme is based on methodology proposed by Hogan R. and O'Connor E., 2004, https://bit.ly/2Yjz9DZ and its proprietary Matlab implementation. Notes: Some of the individual classification methods are changed in this Python implementation compared to the original Cloudnet methodology. Especially methods classifying insects, melting layer and liquid droplets. """ obs = ClassData(radar, lidar, model, mwr) bits = [None] * 6 liquid = droplet.find_liquid(obs) bits[3] = melting.find_melting_layer(obs) bits[2] = freezing.find_freezing_region(obs, bits[3]) bits[0] = droplet.correct_liquid_top(obs, liquid, bits[2], limit=500) bits[5], insect_prob = insects.find_insects(obs, bits[3], bits[0]) bits[1] = falling.find_falling_hydrometeors(obs, bits[0], bits[5]) bits[4] = _find_aerosols(obs, bits[1], bits[0]) return ClassificationResult(_bits_to_integer(bits), obs.is_rain, obs.is_clutter, insect_prob, liquid['bases'], _find_profiles_with_undetected_melting(bits)) def fetch_quality(radar, lidar, classification, attenuations): """Returns Cloudnet quality bits. Args: radar (Radar): The :class:`Radar` instance. lidar (Lidar): The :class:`Lidar` instance. classification (ClassificationResult): The :class:`ClassificationResult` instance. attenuations (dict): Dictionary containing keys `liquid_corrected`, `liquid_uncorrected`. Returns: dict: Dictionary containing `quality_bits`, an integer array with the bits: - bit 0: Pixel contains radar data - bit 1: Pixel contains lidar data - bit 2: Pixel contaminated by radar clutter - bit 3: Molecular scattering present (currently not implemented!) - bit 4: Pixel was affected by liquid attenuation - bit 5: Liquid attenuation was corrected """ bits = [None]*6 bits[0] = ~radar.data['Z'][:].mask bits[1] = ~lidar.data['beta'][:].mask bits[2] = classification.is_clutter bits[4] = attenuations['liquid_corrected'] | attenuations['liquid_uncorrected'] bits[5] = attenuations['liquid_corrected'] qbits = _bits_to_integer(bits) return {'quality_bits': qbits} def _find_aerosols(obs, is_falling, is_liquid): """Estimates aerosols from lidar backscattering. Aerosols are lidar signals that are: a) not falling, b) not liquid droplets. Args: obs (ClassData): The :class:`ClassData` instance. is_falling (ndarray): 2-D boolean array of falling hydrometeors. is_liquid (ndarray): 2-D boolean array of liquid droplets. Returns: ndarray: 2-D boolean array containing aerosols. """ is_beta = ~obs.beta.mask return is_beta & ~is_falling & ~is_liquid def _find_profiles_with_undetected_melting(bits): drizzle_and_falling = _find_drizzle_and_falling(*bits[:3]) transition = ma.diff(drizzle_and_falling, axis=1) is_transition = ma.any(transition, axis=1) is_melting_layer = ma.any(bits[3], axis=1) is_undetected_melting = is_transition & ~is_melting_layer is_undetected_melting[is_undetected_melting == 0] = ma.masked return is_undetected_melting.astype(int) def _find_drizzle_and_falling(is_liquid, is_falling, is_freezing): """Classifies pixels as falling, drizzle and others. Args: is_liquid (ndarray): 2D boolean array denoting liquid layers. is_falling (ndarray): 2D boolean array denoting falling pixels. is_freezing (ndarray): 2D boolean array denoting subzero temperatures. Returns: MaskedArray: 2D array where values are 1 (falling), 2 (drizzle), and masked (all others). """ falling_dry = is_falling & ~is_liquid drizzle = falling_dry & ~is_freezing drizzle_and_falling = falling_dry.astype(int) + drizzle.astype(int) drizzle_and_falling = ma.copy(drizzle_and_falling) drizzle_and_falling[drizzle_and_falling == 0] = ma.masked return drizzle_and_falling def _bits_to_integer(bits): """Creates array of integers from individual boolean arrays. Args: bits (list): List of bit fields (of similar sizes) to be saved in the resulting array of integers. bits[0] is saved as bit 0, bits[1] as bit 1, etc. Returns: ndarray: Array of integers containing the information of the individual boolean arrays. """ int_array = np.zeros_like(bits[0], dtype=int) for n, bit in enumerate(bits): ind = np.where(bit) # works also if bit is None int_array[ind] = utils.setbit(int_array[ind].astype(int), n) return int_array class ClassData: """ Container for observations that are used in the classification. Args: radar (Radar): The :class:`Radar` instance. lidar (Lidar): The :class:`Lidar` instance. model (Model): The :class:`Model` instance. mwr (Mwr): The :class:`Mwr` instance. Attributes: z (ndarray): 2D radar echo. ldr (ndarray): 2D linear depolarization ratio. v (ndarray): 2D radar velocity. width (ndarray): 2D radar width. v_sigma (ndarray): 2D standard deviation of the velocity. tw (ndarray): 2D wet bulb temperature. beta (ndarray): 2D lidar backscatter. lwp (ndarray): 1D liquid water path. time (ndarray): 1D fraction hour. height (ndarray): 1D height vector (m). model_type (str): Model identifier. radar_type (str): Radar identifier. is_rain (ndarray): 2D boolean array denoting rain. is_clutter (ndarray): 2D boolean array denoting clutter. """ def __init__(self, radar, lidar, model, mwr): self.z = radar.data['Z'][:] self.ldr = radar.data['ldr'][:] self.v = radar.data['v'][:] self.width = radar.data['width'][:] self.v_sigma = radar.data['v_sigma'][:] self.tw = model.data['Tw'][:] self.beta = lidar.data['beta'][:] self.lwp = mwr.data['lwp'][:] self.time = radar.time self.height = radar.height self.model_type = model.type self.radar_type = radar.type self.is_rain = _find_rain(self.z, self.time) self.is_clutter = _find_clutter(self.v, self.is_rain) def _find_rain(z, time, time_buffer=5): """Find profiles affected by rain. Rain is present in such profiles where the radar echo in the third range gate is > 0 dB. To make sure we do not include any rainy profiles, we also flag a few profiles before and after detections as raining. Args: z (ndarray): Radar echo. time (ndarray): Time vector. time_buffer (int): Time in minutes. """ is_rain = ma.array(z[:, 3] > 0, dtype=bool).filled(False) n_profiles = len(time) n_steps = utils.n_elements(time, time_buffer, 'time') for ind in np.where(is_rain)[0]: ind1 = max(0, ind - n_steps) ind2 = min(ind + n_steps, n_profiles) is_rain[ind1:ind2 + 1] = True return is_rain def _find_clutter(v, is_rain, n_gates=10, v_lim=0.05): """Estimates clutter from doppler velocity. Args: n_gates (int, optional): Number of range gates from the ground where clutter is expected to be found. Default is 10. v_lim (float, optional): Velocity threshold. Smaller values are classified as clutter. Default is 0.05 (m/s). Returns: ndarray: 2-D boolean array denoting pixels contaminated by clutter. """ is_clutter = np.zeros(v.shape, dtype=bool) tiny_velocity = (np.abs(v[:, :n_gates]) < v_lim).filled(False) is_clutter[:, :n_gates] = tiny_velocity * utils.transpose(~is_rain) return is_clutter class ClassificationResult(namedtuple('ClassificationResult', ['category_bits', 'is_rain', 'is_clutter', 'insect_prob', 'liquid_bases', 'is_undetected_melting'])): """ Result of classification Attributes: category_bits (ndarray): Array of integers concatenating all the individual boolean bit arrays. is_rain (ndarray): 1D array denoting presence of rain. is_clutter (ndarray): 2D array denoting presence of clutter. insect_prob (ndarray): 2D array denoting 0-1 probability of insects. liquid_bases (ndarray): 2D array denoting bases of liquid clouds. is_undetected_melting (ndarray): 1D array denoting profiles that should contain melting layer but was not detected from the data. """
import collections import logging from time import sleep import numpy as np from tqdm import tqdm from oscml.utils.util import smiles2mol, concat def get_atoms_BFS(graph): def bfs(visited, graph, node): visited.append(node.GetIdx()) queue.append(node) while queue: s = queue.pop(0) neighbours = s.GetNeighbors() for neighbour in neighbours: nidx = neighbour.GetIdx() if nidx not in visited: visited.append(nidx) queue.append(neighbour) return visited visited = [] queue = [] # Initialize a queue start_node = graph.GetAtomWithIdx(0) atoms_BFS_order = bfs(visited, graph, start_node) return atoms_BFS_order def get_atoms_and_bonds(mol, atom_dict, bond_dict): atoms = [a.GetSymbol() for a in mol.GetAtoms()] for a in mol.GetAromaticAtoms(): i = a.GetIdx() atoms[i] = (atoms[i], 'aromatic') atoms = [atom_dict[a] for a in atoms] i_jbond_dict = collections.defaultdict(lambda: []) for b in mol.GetBonds(): i, j = b.GetBeginAtomIdx(), b.GetEndAtomIdx() bond = bond_dict[str(b.GetBondType())] i_jbond_dict[i].append((j, bond)) i_jbond_dict[j].append((i, bond)) return atoms, i_jbond_dict def extract_fragments(radius, atoms, i_jbond_dict, fingerprint_dict, edge_dict): """Extract the fragments from a molecular graph based on Weisfeiler-Lehman algorithm. """ if (len(atoms) == 1) or (radius == 0): nodes = [fingerprint_dict[a] for a in atoms] else: nodes = atoms i_jedge_dict = i_jbond_dict for _ in range(radius): """Update each node ID considering its neighboring nodes and edges. The updated node IDs are the fingerprint IDs. """ nodes_ = [] for i, j_edge in i_jedge_dict.items(): neighbors = [(nodes[j], edge) for j, edge in j_edge] fingerprint = (nodes[i], tuple(sorted(neighbors))) nodes_.append(fingerprint_dict[fingerprint]) """Also update each edge ID considering its two nodes on both sides. """ i_jedge_dict_ = collections.defaultdict(lambda: []) for i, j_edge in i_jedge_dict.items(): for j, edge in j_edge: both_side = tuple(sorted((nodes[i], nodes[j]))) edge = edge_dict[(both_side, edge)] i_jedge_dict_[i].append((j, edge)) nodes = nodes_ i_jedge_dict = i_jedge_dict_ return np.array(nodes) class Mol2seq_WL(): def __init__(self, radius): self.atom_dict = collections.defaultdict(lambda:len(self.atom_dict)) self.bond_dict = collections.defaultdict(lambda:len(self.bond_dict)) self.fragment_dict = collections.defaultdict(lambda:len(self.fragment_dict)) self.edge_dict = collections.defaultdict(lambda: len(self.edge_dict)) self.radius = radius def __call__(self, m): atoms, i_jbond_dict = get_atoms_and_bonds(m, self.atom_dict, self.bond_dict) descriptor = extract_fragments(self.radius, atoms, i_jbond_dict, self.fragment_dict, self.edge_dict) atoms_BFS_order = get_atoms_BFS(m) descriptor_BFS = [descriptor[i] for i in atoms_BFS_order] return descriptor_BFS def mol2seq(radius, df, column='SMILES_str'): logging.info('filling mol2seq according to Weisfeiler Lehman algorithm with radius=' + str(radius)) sleep(1) mol2seq = Mol2seq_WL(radius) for i in tqdm(range(len(df))): smiles = df.iloc[i][column] m = smiles2mol(smiles) mol2seq(m) logging.info(concat('atom dict:', len(mol2seq.atom_dict), mol2seq.atom_dict)) logging.info(concat('bond dict:', len(mol2seq.bond_dict), mol2seq.bond_dict)) logging.info(concat('fragment dict:', len(mol2seq.fragment_dict), mol2seq.fragment_dict)) logging.info(concat('edge dict:', len(mol2seq.edge_dict), mol2seq.edge_dict)) return mol2seq
from mysorts import * from numpy import random from pygame.locals import ( #for tracking specific keypresses K_ESCAPE, KEYDOWN, ) from pygame import time #START LOGIC #print the menu sortType, arrSize = printMenu() #start pygame pygame.init() #set the popup screen up screen = pygame.display.set_mode([SCREEN_WIDTH,SCREEN_HEIGHT]) #fill the screen with white screen.fill(background) #the arary to be sorted arr = [0] * arrSize #fill the array with random numbers for j in range(arrSize): arr[j] = random.randint(SCREEN_HEIGHT) if arrSize == SMALL: interval = 80 elif arrSize == MEDIUM: interval = 50 elif arrSize == LARGE: interval = 20 #draw the array bars before it's sorted drawBarArr(screen, arr, red, interval) #perform selection sort sort(arr, sortType) #let the sorted array sit for some time pygame.time.delay(100) #clear and print the final sorted array screen.fill(background) #draw the whole array as bars after it's sorted drawBarArr(screen, arr, green, 0) #final view loop displaying = True #REMOVE THIS SECTION LATER while displaying: for event in pygame.event.get(): if event.type == KEYDOWN: #While the user hasn't quit out if event.key == K_ESCAPE: displaying = False #quit pygame pygame.quit()
# ___________________________________________________________________________ # # EGRET: Electrical Grid Research and Engineering Tools # Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC # (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. # Government retains certain rights in this software. # This software is distributed under the Revised BSD License. # ___________________________________________________________________________ ## functions for adding the basic status varibles from pyomo.environ import * import math from .uc_utils import add_model_attr component_name = 'status_vars' def _is_relaxed(model): if hasattr(model, 'relax_binaries') and model.relax_binaries: return True else: return False def _add_unit_on_vars(model, relaxed=False): # indicator variables for each generator, at each time period. if relaxed: model.UnitOn = Var(model.ThermalGenerators, model.TimePeriods, within=UnitInterval) else: model.UnitOn = Var(model.ThermalGenerators, model.TimePeriods, within=Binary) def _add_unit_start_vars(model, relaxed=False): # unit start if relaxed: model.UnitStart=Var(model.ThermalGenerators,model.TimePeriods, within=UnitInterval) else: model.UnitStart=Var(model.ThermalGenerators,model.TimePeriods, within=Binary) def _add_unit_stop_vars(model, relaxed=False): if relaxed: model.UnitStop=Var(model.ThermalGenerators,model.TimePeriods, within=UnitInterval) else: model.UnitStop=Var(model.ThermalGenerators,model.TimePeriods, within=Binary) @add_model_attr(component_name, requires = {'data_loader': None} ) def CA_1bin_vars(model): ''' This adds only a binary variable for unit-on, as in Carrion, M. and Arroyo, J. (2006) A Computationally Efficient Mixed-Integer Liner Formulation for the Thermal Unit Commitment Problem. IEEE Transactions on Power Systems, Vol. 21, No. 3, Aug 2006. ''' if _is_relaxed(model): _add_unit_on_vars(model, True) else: _add_unit_on_vars(model) @add_model_attr(component_name, requires = {'data_loader': None} ) def garver_3bin_vars(model): ''' This add the common 3-binary variables per generator per time period. One for start, one for stop, and one for on, as originally proposed in L. L. Garver. Power generation scheduling by integer programming-development of theory. Power Apparatus and Systems, Part III. Transactions of the American Institute of Electrical Engineers, 81(3): 730–734, April 1962. ISSN 0097-2460. ''' if _is_relaxed(model): _add_unit_on_vars(model, True) _add_unit_start_vars(model, True) _add_unit_stop_vars(model, True) else: _add_unit_on_vars(model) _add_unit_start_vars(model) _add_unit_stop_vars(model) return @add_model_attr(component_name, requires = {'data_loader': None} ) def garver_2bin_vars(model): ''' This adds the unit start and unit on variables, and causes the unit stop variable to be projected out. ''' if _is_relaxed(model): _add_unit_on_vars(model, True) _add_unit_start_vars(model, True) else: _add_unit_on_vars(model) _add_unit_start_vars(model) # unit stop def unit_stop_expr_rule(m, g, t): if t == value(m.InitialTime): return m.UnitOnT0[g] - m.UnitOn[g,t] + m.UnitStart[g,t] return m.UnitOn[g,t-1] - m.UnitOn[g,t] + m.UnitStart[g,t] model.UnitStop=Expression(model.ThermalGenerators,model.TimePeriods, rule=unit_stop_expr_rule) return @add_model_attr(component_name, requires = {'data_loader': None} ) def garver_3bin_relaxed_stop_vars(model): ''' This adds the 3-binary variables, but relaxes the integrality on the stop variable, like the "MILP-3R" formulation from Carrion, M. and Arroyo, J. (2006) A Computationally Efficient Mixed-Integer Liner Formulation for the Thermal Unit Commitment Problem. IEEE Transactions on Power Systems, Vol. 21, No. 3, Aug 2006. ''' if _is_relaxed(model): _add_unit_on_vars(model, True) _add_unit_start_vars(model, True) else: _add_unit_on_vars(model) _add_unit_start_vars(model) _add_unit_stop_vars(model, True) return @add_model_attr(component_name, requires = {'data_loader': None} ) def ALS_state_transition_vars(model): ''' These are the state-transition variables proposed in Atakan, Semih, Guglielmo Lulli, and Suvrajeet Sen. "A State Transition MIP Formulation for the Unit Commitment Problem." IEEE Transactions on Power Systems 33.1 (2018): 736-748. ''' if _is_relaxed(model): model.UnitStayOn = Var(model.ThermalGenerators, model.TimePeriods, within=UnitInterval) _add_unit_start_vars(model, True) _add_unit_stop_vars(model, True) else: model.UnitStayOn = Var(model.ThermalGenerators, model.TimePeriods, within=Binary) _add_unit_start_vars(model) _add_unit_stop_vars(model) def unit_on_expr_rule(m, g, t): return m.UnitStayOn[g,t] + m.UnitStart[g,t] model.UnitOn = Expression(model.ThermalGenerators, model.TimePeriods, rule=unit_on_expr_rule)
#! /usr/bin/env python3 # # Author: Martin Schreiber # Email: schreiberx@gmail.com # Date: 2017-06-18 # import sys import math import mule_local.rexi.EFloat as ef # # Supported Functions to approximate # class Functions: def phiNDirect( self, n: int, z: float ): """ Direct formulation of phiN functions. WARNING: There's a singularity close to 0! """ if n == 0: return self.efloat.exp(z) elif n == 1: return (self.efloat.exp(z)-1)/z elif n == 2: return (self.efloat.exp(z)-1-z)/(z*z) elif n == 3: return (2*self.efloat.exp(z)-2-2*z-z*z)/(2*z*z*z) elif n == 4: return (6*self.efloat.exp(z)-6-6*z-3*z*z-z*z*z)/(6*z*z*z*z) elif n == 5: return (24*self.efloat.exp(z) -24 - 24*z - 12*z*z - 4*z*z*z - z*z*z*z)/(24*z*z*z*z*z) else: raise Exception("Not yet implemented") def factorial(self, N): """ Helper function to support (N-1) factorials """ if N < 0: return 1 return math.factorial(N) def phiNDirectFormula( self, n: int, z ): """ retval = self.factorial(n-1)*self.efloat.exp(z) for i in range(n): retval -= (self.factorial(n-1)/self.factorial(i))*self.efloat.pow(z, i) retval /= self.factorial(n-1)*self.efloat.pow(z, n) """ # self.factorial(n-1) is cancelled out retval = self.efloat.exp(z) for i in range(n): retval -= self.efloat.pow(z, i)/self.factorial(i) retval /= self.efloat.pow(z, n) return retval def phiNRec( self, n: int, z ): """ Recursive calculation of phiN functions. """ if n == 0: return self.efloat.exp(z) return (self.phiN(n-1, z) - self.efloat.to(1.0)/self.efloat.to(math.factorial(n-1)))/z; def phiNSeries( self, n: int, z ): """ It takes less than 20 iterations for cases (abs(z) < 0.5) to converge """ niters = 20 #for i in range(niters): # retval += self.efloat.pow(z, i)/math.factorial(i+n) # Avoid repeated factorial and pow computations powz = self.efloat.to(1.0) facn = math.factorial(n) retval = powz/facn for i in range(1, niters): powz *= z facn *= (n+i) retval += powz/facn return retval def phiN( self, n: int, z ): # Use Series if z < 0.2 since this converges relatively fast if self.efloat.abs(z) < 0.2: return self.phiNSeries(n, z) return self.phiNRec(n, z) def upsNDirect( self, n: int, z ): if n == 1: return (-4-z+self.efloat.exp(z)*(4-3*z+z*z)) / (z*z*z) if n == 2: return (2+z+self.efloat.exp(z)*(-2+z)) / (z*z*z) if n == 3: return (-4-3*z-z*z+self.efloat.exp(z)*(4-z)) / (z*z*z) raise Exception("ups number "+str(n)+" is not supported!") def upsNSeries( self, n: int, z ): """ It takes less than 20 iterations for cases (abs(z) < 0.5) to converge """ niters = 20 if n == 1: #retval = 0 #for l in range(niters): # retval += self.efloat.pow(z, l)*(l+1)*(l+1)/math.factorial(l+3) #return retval # avoid repeated pow and factorial computations powz = self.efloat.to(1.0) facn = math.factorial(3) retval = powz/facn for l in range(1, niters): powz *= z facn *= (l+3) retval += powz*(l+1)*(l+1)/facn return retval if n == 2: retval = self.efloat.to(1.0)/self.efloat.to(2.0) #for l in range(niters): # retval += (z-2)*self.efloat.pow(z, l)/math.factorial(l+3) powz = self.efloat.to(1.0) facn = math.factorial(3) retval += (z-2)*powz/facn for l in range(1, niters): powz *= z facn *= (l+3) retval += (z-2)*powz/facn return retval if n == 3: retval = -self.efloat.to(1.0)/self.efloat.to(2.0) #for l in range(niters): # retval += (4-z)*self.efloat.pow(z, l)/math.factorial(l+3) #return retval powz = self.efloat.to(1.0) facn = math.factorial(3) retval += (4-z)*powz/facn for l in range(1, niters): powz *= z facn *= (l+3) retval += (4-z)*powz/facn return retval raise Exception("ups number "+str(n)+" is not supported!") def upsN( self, n: int, z ): # Use Series if z < 0.2 since this converges relatively fast if self.efloat.abs(z) < 0.2: return self.upsNSeries(n, z) return self.upsNDirect(n, z) def __init__( self, function_name = "phi0", efloat_mode = None ): self.efloat = ef.EFloat(efloat_mode) self.function_name = function_name self.function_complex = True if self.efloat.floatmode == 'mpfloat': import mpmath as mp # Set numerical threshold to half of precision self.epsthreshold = 1e-15 else: self.epsthreshold = 1e-10 # Exponential integrator: phi0 if self.function_name[0:3] == 'phi': N = int(self.function_name[3:]) def fun(x): return self.phiN(N, x) self.eval = fun if self.function_complex: self.is_real_symmetric = True self.is_complex_conjugate_symmetric = True else: self.is_real_symmetric = True self.is_complex_conjugate_symmetric = False elif self.function_name[0:3] == 'ups': N = int(self.function_name[3:]) if self.efloat.floatmode == 'mpfloat': import mpmath as mp # Set numerical threshold to half of precision self.epsthreshold = 1e-10 else: self.epsthreshold = 1e-10 if N == 1: # # Setup \upsilon_1 for EDTRK4 # See document notes_on_time_splitting_methods.lyx # def fun(x): K = x if abs(x) < self.epsthreshold: return self.efloat.to(1.0)/self.efloat.to(2.0*3.0) else: return (-self.efloat.to(4.0)-K+self.efloat.exp(K)*(self.efloat.to(4.0)-self.efloat.to(3.0)*K+K*K))/(K*K*K) self.eval = fun if self.function_complex: self.is_real_symmetric = True self.is_complex_conjugate_symmetric = True else: self.is_real_symmetric = True self.is_complex_conjugate_symmetric = False elif N == 2: # # Setup \upsilon_2 for EDTRK4 # See document notes_on_time_splitting_methods.lyx # def fun(x): K = x if abs(x) < self.epsthreshold: return self.efloat.to(1.0)/self.efloat.to(2.0*3.0) else: return (self.efloat.to(2.0)+1.0*K+self.efloat.exp(K)*(self.efloat.to(-2.0)+K))/(K*K*K) self.eval = fun if self.function_complex: self.is_real_symmetric = True self.is_complex_conjugate_symmetric = True else: self.is_real_symmetric = True self.is_complex_conjugate_symmetric = False elif N == 3: # # Setup \upsilon_3 for EDTRK4 # See document notes_on_time_splitting_methods.lyx # def fun(x): K = x if abs(x) < self.epsthreshold: return self.efloat.to(1.0)/self.efloat.to(2.0*3.0) else: return (-self.efloat.to(4.0) - 3.0*K - K*K + self.efloat.exp(K)*(self.efloat.to(4.0)-K))/(K*K*K) self.eval = fun if self.function_complex: self.is_real_symmetric = True self.is_complex_conjugate_symmetric = True else: self.is_real_symmetric = True self.is_complex_conjugate_symmetric = False else: print("Unknown ups function "+str(N)) sys.exit(1) else: print("Unknown basis function "+str(self.function_name)) sys.exit(1)
"""Sanity check the EEG data. This script should run without giving any errors. """ # %% # Imports import mne import numpy as np import pandas as pd from config import DATA_DIR_EXTERNAL, STREAMS from utils import get_sourcedata # %% # Load data for sub in range(1, 33): for stream in STREAMS: print(f"Checking {sub}-{stream}") vhdr, tsv = get_sourcedata(sub, stream, DATA_DIR_EXTERNAL) with mne.utils.use_log_level(0): raw = mne.io.read_raw_brainvision(vhdr) events, event_id = mne.events_from_annotations(raw) # %% # Check amount of triggers are exactly as expected events_series = pd.Series(events[:, -1]) vcounts = events_series.value_counts() # Check misc ttl codes, see: # https://github.com/sappelhoff/ecomp_experiment/blob/main/ecomp_experiment/define_ttl.py occur = { 80: 1, # experiment started 90: 1, # experiment stopped 1: 300, # new trial 2: 300, # fixstim offset in each trial 3: 300, # response prompt onset 7: 6, # block break started 8: 6, # block break stopped } # adjust codes by stream (were offset by 100 in dual) # also add some code that should have never occurred # depending on the stream if stream == "dual": occur = {key + 100: val for key, val in occur.items()} occur[131] = 0 # lower ... meaningless in "dual" occur[132] = 0 # higher ... meaningless in "dual" else: assert stream == "single" occur[33] = 0 # blue ... meaningless in "single" occur[34] = 0 # red ... meaningless in "single" # for explanation of codes 10001 and 99999, see: # https://mne.tools/stable/generated/mne.events_from_annotations.html#mne.events_from_annotations special = { 10001: 2, # e.g., "Comment,ControlBox ...", "Comment,actiCAP Data On", etc. 99999: 2, # "New Segment/" } occur.update(special) for value, expected in occur.items(): # skip this check for a few subjs and specific markers, # these are cases where deviations are known and in order toskip = { "02-dual": "Recording immediately started (not stopped again).", "04-single": "Paused twice, instead of once.", "10-dual": "Control box was still connected via USB.", "19-single": "Recording immediately started (not stopped again).", } if f"{sub:02}-{stream}" in toskip and value in special: continue occurrences = vcounts.get(value, 0) msg = f"{value} is not as expected: {occurrences} != {expected}" assert occurrences == expected, msg # %% # Check number of digit ttl codes # These should be 300 (trials) * 10 (digits per trial) digit_vals = list(range(11, 20)) + list(range(21, 30)) if stream == "dual": digit_vals = [val + 100 for val in digit_vals] n_occurrences = 0 for value in digit_vals: n_occurrences += vcounts.get(value, 0) assert n_occurrences == 3000, n_occurrences # %% # Check number of choice ttl codes # these should be 300 - vcounts.get(30, 0) choice_vals = [31, 32, 33, 34] n_timeouts = vcounts.get(30, 0) if stream == "dual": choice_vals = [val + 100 for val in choice_vals] n_timeouts = vcounts.get(130, 0) n_occurrences = 0 for value in choice_vals: n_occurrences += vcounts.get(value, 0) assert n_occurrences == (300 - n_timeouts), n_occurrences # %% # Check number of feedback ttl codes fdbk_timeout = 6 if stream == "single" else 106 assert n_timeouts == vcounts.get(fdbk_timeout, 0) # these should be 300 fdbk_vals = [4, 5, 6] if stream == "dual": fdbk_vals = [val + 100 for val in fdbk_vals] n_occurrences = 0 for value in fdbk_vals: n_occurrences += vcounts.get(value, 0) assert n_occurrences == 300, n_occurrences # %% # Crosscheck with behavioral data df = pd.read_csv(tsv, sep="\t") # %% # Check that timeouts fit assert df["validity"].sum() == (300 - n_timeouts) # Make sure not too many are ambiguous (if there are: could be chance) assert df["ambiguous"].sum() < 30 # Inter-trial-interval should be within bounds assert df["iti"].min() >= 500 assert df["iti"].max() <= 1500 # %% # Ensure there is no NaN in the data data = raw.get_data() assert not np.isnan(data).any() print(" done!") # %%
#!/usr/bin/env python import numpy as np import matplotlib.pyplot as plt import argparse import pandas as pd import yaml import os params = {'axes.labelsize': 14, 'axes.titlesize': 16, 'xtick.labelsize': 12, 'ytick.labelsize': 12, 'legend.fontsize': 14} plt.rcParams.update(params) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('data_name') parser.add_argument('-c', '--config', default='config_inv.yml') parser.add_argument('--out', default=None, help='figure name of output') args = parser.parse_args() data_name = args.data_name file_config = args.config file_out = args.out with open(file_config, 'r') as fp: config = yaml.safe_load(fp) dir_lcurve = config['l-curve']['dir_out'] path = os.path.join(dir_lcurve, data_name + '.txt') curve = pd.read_fwf(path) ind = np.argsort(curve['factor']) plt.figure(figsize=(10, 10)) plt.plot(np.log(curve['f_residual'][ind]), np.log(curve['f_reg'][ind]), 'k.-') for x, y, a in zip(curve['f_residual'][ind], curve['f_reg'][ind], curve['factor'][ind]): plt.annotate('{:9.6f}'.format(a), (np.log(x), np.log(y))) plt.xlabel('Residual norm $\log\|Ax - b\|_2$') plt.ylabel('Regularized norm $\log\| L (x - x_0)\|_2$') plt.title('L-curve') plt.tight_layout() if file_out: plt.savefig(file_out, dpi=300) plt.show()
import argparse import os parser = argparse.ArgumentParser(description='Model Trainer') parser.add_argument('--path', help='Path to data folder.', required=True) parser.add_argument('--lite', help='Generate lite Model.', action='store_true') args = parser.parse_args() if args.path: import cv2 import numpy as np import tensorflow as tf from sklearn.model_selection import train_test_split def load_dataset(input_path): features_list = [] features_label = [] for root, dirs, files in os.walk(input_path): for dir in dirs: for filename in os.listdir(input_path + "/" + dir): training_digit_image = cv2.imread(input_path + "/" + dir + "/" + filename) gray = cv2.cvtColor(training_digit_image, cv2.COLOR_BGR2GRAY) gray = np.array(gray, dtype='f').ravel() features_list.append(np.array(gray)) features_label.append(np.float(dir)) features_list = np.array(features_list) features_label = np.array(features_label) return features_list, features_label class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): print('\nLearning rate for epoch {} is {}'.format(epoch + 1, model.optimizer.lr.numpy())) if logs.get('loss') < 0.01 and logs.get('accuracy') > .999: print("\nReached 99.8% accuracy so cancelling training!") self.model.stop_training = True def scheduler(epoch): return 0.001 if epoch < 10 else float(0.001 * tf.math.exp(0.1 * (10 - epoch))) train, labels = load_dataset(args.path) # Split into train/test X_train, X_test, y_train, y_test = train_test_split(train, labels, test_size=0.3, stratify=labels, random_state=0) X_train /= 255.0 X_test /= 255.0 train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(1700).batch(64) test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test)).shuffle(1700).batch(64) model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(38, activation='softmax') ]) callbacks = myCallback() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(train_ds, validation_data=test_ds, epochs=100, callbacks=[callbacks, tf.keras.callbacks.LearningRateScheduler(scheduler)]) model.save('model.h5') if args.lite: # Convert the model. converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() open('Model.tflite', 'wb').write(tflite_model)
""" name: interpolation.py Goal: resume all interpolation functions author: HOUNSI Madouvi antoine-sebastien date: 14/03/2022 """ import sys from os.path import dirname, join import matplotlib.pyplot as plt import numpy as np from interpolation.polynom import Polynom from interpolation.polynome import Polynome from linearEq.utils.gaussForVal import gauss from math import pow, exp class Interpolation: def __init__(self, file): self.file = file self.polyN = None self.polyL = None self.polyM = None try: sys.stdin = open(join(dirname(__file__), self.file)) self.X = [float(i) for i in sys.stdin.readline().split()] self.Y = [float(i) for i in sys.stdin.readline().split()] if len(self.X) != len(self.Y): print("Vos tableaux ne sont pas de meme taille") else: self.X = [k for k in self.X] self.Y = [k for k in self.Y] self.dim = len(self.X) Xval = np.arange(-3, 3, 0.1) vals = list() for i in range(self.dim): vals.append([self.X[i], self.Y[i]]) # self.polynomeLagrange(vals) self.polyN = self.newton() self.polyL = self.lagrange() self.polyM = self.moindreCarre(4) print("\n\t-****** Interpolation ******-\n") print("Newton:",self.polyN) print("Lagrange:",self.polyL) print("Moindre carre:",self.polyM) plt.plot(np.arange(-3, 3, 0.1), self.calcLagrange(np.arange(-3, 3, 0.1)), label='Courbe lagrange (C1)', c='blue') plt.plot(Xval, self.calcNewton(Xval), label='Courbe newton (C2)', c='green') plt.plot(Xval, self.calcMoindreCarre(Xval), label='Courbe moindreCarre (C3)', c='red') plt.plot(np.arange(-3, 3, 0.1), self.givenFunc(np.arange(-3, 3, 0.1), "(x**3 - 1)/(x**2 + 1)"), label='Courbe', c='black') # plt.scatter(self.X, self.Y, c='coral', label='Points') plt.title("Interpolation:\nPx1 = {}\nPx2 = {}\nPx3 = {}".format(self.polyL, self.polyN, self.polyM)) plt.xlabel("X") plt.ylabel("Y") # plt.xticks(np.arange(-30, 30, 2)) plt.legend() plt.show() except ValueError: print("Valeurs mal définies") """ ===================================================================== """ def newton(self): matrix = np.zeros((self.dim, self.dim)) for i in range(self.dim): for j in range(i + 1): matrix[i][j] = self.calcN(j, self.X[i]) Coefs = gauss(matrix, self.Y).showResult() f = [0] for i in range(self.dim): f1 = [Coefs[i]] for j in range(i): f1 = Polynom().mult(f1, [ -self.X[j], 1]) f = Polynom().add(f, f1) # print("zzzzz: ",Polynom().build(f)) # Coefs[0] = -1 polyN = Polynom().build(f) return polyN def calcN(self, dim, x): val = 1 for i in range(dim): val = val * (x - self.X[i]) return val def calcNewton(self, x): return eval(self.polyN) """ ===================================================================== """ def lagrange(self): Px = [0] for i in range(self.dim): fi = [1] for j in range(self.dim): if i != j: Dnmteur = self.X[i] - self.X[j] fi = Polynom().mult(P1=fi, P2=[-self.X[j]/Dnmteur, 1 / Dnmteur]) # fi * (x - self.X[j]) / (self.X[i] - self.X[j]) Px = Polynom().add(Px, Polynom().mult([self.Y[i]], fi)) # print(Polynom().build(Px)) # Px.reverse() """ for i in range(len(Px)): if i != 0: Px[i] = Px[i] - Px[i]/5""" return Polynom().build(Px) def polynomeLagrange(self, listPoint) -> Polynome: n = len(listPoint) - 1 P = Polynome(n, 0) X = [el[0] for el in listPoint] fX = [el[1] for el in listPoint] for i in range(n + 1): Oi = Polynome(0, 1) for j in range(n + 1): if (j != i): Oi *= Polynome([-X[j] / (X[i] - X[j]), 1 / (X[i] - X[j])]) P += fX[i] * Oi print(P) return P def calcLagrange(self, x): return eval(self.polyL) def givenFunc(self, x, poly): return eval(poly) # return exp(x-1)/exp(x+1) """ ===================================================================== """ def moindreCarre(self, deg): deg = deg matrix = np.zeros((deg + 1, deg + 1)) vect = list() maxDeg = 2 * deg # matrix for i in range(maxDeg, deg - 1, -1): for j in range(deg + 1): temp = [pow(self.X[k], i - j) for k in range(self.dim)] matrix[maxDeg - i][j] = sum(temp) # vector for i in range(deg, -1, -1): temp = [pow(self.X[k], i) * self.Y[k] for k in range(self.dim)] vect.append(sum(temp)) Coefs = gauss(matrix, vect).showResult() Coefs.reverse() return Polynom().build(Coefs) def calcMoindreCarre(self, x): return eval(self.polyM)
import numpy as np import torch import torch.nn.functional as F import torchvision import PIL import itertools import datetime import random import skimage from skimage import filters def noise_permute(datapoint): """Permutes the pixels of an img and assigns the label (label, 'permuted'). The input should be an image (PIL, others like numpy arrays might work, too) with a label. The returned image is a PIL image. It is assumed that img has 3 dimensions, the last of which is the color channels. """ img, label = datapoint imgn = np.transpose(img.numpy(), (1,2,0)) assert len(imgn.shape) == 3 and imgn.shape[2] <=4, 'Unexpected image dimensions.' imgn_flat = imgn.reshape(imgn.shape[0]*imgn.shape[1], imgn.shape[2]) imgn_flat_permuted = np.random.permutation(imgn_flat) #this function shuffles the first axis imgn_permuted = imgn_flat_permuted.reshape(imgn.shape) return torch.from_numpy(np.transpose(imgn_permuted, (2,0,1))), label #(label, 'permuted') def filter_gauss(datapoint, srange=[1,1]): img, label = datapoint imgn = np.transpose(img.numpy(), (1,2,0)) sigma = srange[0] + np.random.random_sample()*(srange[1]-srange[0]) imgn_gaussed = skimage.filters.gaussian(imgn, sigma=sigma, multichannel=3) return torch.from_numpy(np.transpose(imgn_gaussed, (2,0,1))), label #+ ('gauss', sigma) def gaussed_noise_perm(x): x = noise_permute(x) x = filter_gauss(x, srange=[0.25,1.25]) return x def scale_full_range(datapoint): img_in = datapoint[0] img_0_based = img_in - img_in.min() img_scaled = img_0_based/(img_0_based.max()) return img_scaled, datapoint[1] def noise_uniform(datapoint): """Returns uniform noise with the same shape as the input. The input should be an image (PIL, others like numpy arrays might work, too) with a label. The returned image is a PIL image. It is assumed that img has 3 dimensions, the last of which is the color channels. """ img, label = datapoint assert len(img.shape) == 3, 'Unexpected image dimensions:' + str(img.shape) imgn = np.transpose(img.numpy(), (1,2,0)) if imgn.shape[2] != 1: assert imgn.shape[2] == 3, 'Unexpected last image dimensions:' + str(imgn.shape) imgn_random = np.float32(np.random.uniform(size=imgn.shape)) return torch.from_numpy(np.transpose(imgn_random, (2,0,1))), label else: imgn_random = np.float32(np.random.uniform(size=imgn.shape)) assert torch.from_numpy(np.transpose(imgn_random, (2,0,1))).shape == img.shape, 'torch.from_numpy(np.transpose(imgn_random, (2,0,1))).shape wrong: ' + str(torch.from_numpy(np.transpose(imgn_random, (2,0,1))).shape) return torch.from_numpy(np.transpose(imgn_random, (2,0,1))), label def noise_low_freq(datapoint): uniform = noise_uniform(datapoint) gaussed = filter_gauss(uniform, srange=[1,2.5]) low_freq = scale_full_range(gaussed) return low_freq def identity(datapoint): return datapoint class monochrome: def __init__(self, color): super().__init__() self.color = color def __call__(self, datapoint): img, label = datapoint assert len(img.shape) == 3, 'Unexpected image dimensions:' + str(img.shape) imgn = np.transpose(img.numpy(), (1,2,0)) imgn_monochrome = np.float32(self.color*np.ones(imgn.shape)) return torch.from_numpy(np.transpose(imgn_monochrome, (2,0,1))), label class uniform_on_sphere: def __init__(self, radius, center): super().__init__() self.radius = radius self.center = center def draw(self, datapoint): img, label = datapoint normal_rand_img = torch.randn(img.shape) scaling = self.radius / normal_rand_img.norm(2) return normal_rand_img * scaling + self.center, label class rectangles: def __init__(self, sections): super().__init__() self.h_sections, self.v_sections = sections def draw(self, datapoint): img, label = datapoint assert img.shape[1] % self.h_sections == 0, 'horizontal tiling invalid' assert img.shape[2] % self.v_sections == 0, 'vertical tiling invalid' h_section_length = img.shape[1] // self.h_sections v_section_length = img.shape[2] // self.v_sections h = random.randint(0, self.h_sections-1) v = random.randint(0, self.v_sections-1) img *= 0 img[:,h*h_section_length:(h+1)*h_section_length,v*v_section_length:(v+1)*v_section_length] = 1 return img, label
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # Copied from https://github.com/facebookresearch/detectron2 and modified import logging import numpy as np import cv2 import torch Image = np.ndarray Boxes = torch.Tensor class MatrixVisualizer(object): """ Base visualizer for matrix data """ def __init__( self, inplace=True, cmap=cv2.COLORMAP_PARULA, val_scale=1.0, alpha=0.7, interp_method_matrix=cv2.INTER_LINEAR, interp_method_mask=cv2.INTER_NEAREST, ): self.inplace = inplace self.cmap = cmap self.val_scale = val_scale self.alpha = alpha self.interp_method_matrix = interp_method_matrix self.interp_method_mask = interp_method_mask def visualize(self, image_bgr, mask, matrix, bbox_xywh): self._check_image(image_bgr) self._check_mask_matrix(mask, matrix) if self.inplace: image_target_bgr = image_bgr else: image_target_bgr = image_bgr * 0 x, y, w, h = [int(v) for v in bbox_xywh] if w <= 0 or h <= 0: return image_bgr mask, matrix = self._resize(mask, matrix, w, h) mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3]) matrix_scaled = matrix.astype(np.float32) * self.val_scale _EPSILON = 1e-6 if np.any(matrix_scaled > 255 + _EPSILON): logger = logging.getLogger(__name__) logger.warning( f"Matrix has values > {255 + _EPSILON} after " f"scaling, clipping to [0..255]" ) matrix_scaled_8u = matrix_scaled.clip(0, 255).astype(np.uint8) matrix_vis = cv2.applyColorMap(matrix_scaled_8u, self.cmap) matrix_vis[mask_bg] = image_target_bgr[y : y + h, x : x + w, :][mask_bg] image_target_bgr[y : y + h, x : x + w, :] = ( image_target_bgr[y : y + h, x : x + w, :] * (1.0 - self.alpha) + matrix_vis * self.alpha ) return image_target_bgr.astype(np.uint8) def _resize(self, mask, matrix, w, h): if (w != mask.shape[1]) or (h != mask.shape[0]): mask = cv2.resize(mask, (w, h), self.interp_method_mask) if (w != matrix.shape[1]) or (h != matrix.shape[0]): matrix = cv2.resize(matrix, (w, h), self.interp_method_matrix) return mask, matrix def _check_image(self, image_rgb): assert len(image_rgb.shape) == 3 assert image_rgb.shape[2] == 3 assert image_rgb.dtype == np.uint8 def _check_mask_matrix(self, mask, matrix): assert len(matrix.shape) == 2 assert len(mask.shape) == 2 assert mask.dtype == np.uint8 class RectangleVisualizer(object): _COLOR_GREEN = (18, 127, 15) def __init__(self, color=_COLOR_GREEN, thickness=1): self.color = color self.thickness = thickness def visualize(self, image_bgr, bbox_xywh, color=None, thickness=None): x, y, w, h = bbox_xywh color = color or self.color thickness = thickness or self.thickness cv2.rectangle(image_bgr, (int(x), int(y)), (int(x + w), int(y + h)), color, thickness) return image_bgr class PointsVisualizer(object): _COLOR_GREEN = (18, 127, 15) def __init__(self, color_bgr=_COLOR_GREEN, r=5): self.color_bgr = color_bgr self.r = r def visualize(self, image_bgr, pts_xy, colors_bgr=None, rs=None): for j, pt_xy in enumerate(pts_xy): x, y = pt_xy color_bgr = colors_bgr[j] if colors_bgr is not None else self.color_bgr r = rs[j] if rs is not None else self.r cv2.circle(image_bgr, (x, y), r, color_bgr, -1) return image_bgr class TextVisualizer(object): _COLOR_GRAY = (218, 227, 218) _COLOR_WHITE = (255, 255, 255) def __init__( self, font_face=cv2.FONT_HERSHEY_SIMPLEX, font_color_bgr=_COLOR_GRAY, font_scale=0.35, font_line_type=cv2.LINE_AA, font_line_thickness=1, fill_color_bgr=_COLOR_WHITE, fill_color_transparency=1.0, frame_color_bgr=_COLOR_WHITE, frame_color_transparency=1.0, frame_thickness=1, ): self.font_face = font_face self.font_color_bgr = font_color_bgr self.font_scale = font_scale self.font_line_type = font_line_type self.font_line_thickness = font_line_thickness self.fill_color_bgr = fill_color_bgr self.fill_color_transparency = fill_color_transparency self.frame_color_bgr = frame_color_bgr self.frame_color_transparency = frame_color_transparency self.frame_thickness = frame_thickness def visualize(self, image_bgr, txt, topleft_xy): txt_w, txt_h = self.get_text_size_wh(txt) topleft_xy = tuple(map(int, topleft_xy)) bottomleft_xy = (topleft_xy[0], topleft_xy[1] + txt_h) x, y = topleft_xy if self.frame_color_transparency < 1.0: t = self.frame_thickness image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] = ( image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] * self.frame_color_transparency + np.array(self.frame_color_bgr) * (1.0 - self.frame_color_transparency) ).astype(np.float) if self.fill_color_transparency < 1.0: image_bgr[y : y + txt_h, x : x + txt_w, :] = ( image_bgr[y : y + txt_h, x : x + txt_w, :] * self.fill_color_transparency + np.array(self.fill_color_bgr) * (1.0 - self.fill_color_transparency) ).astype(np.float) cv2.putText( image_bgr, txt, bottomleft_xy, self.font_face, self.font_scale, self.font_color_bgr, self.font_line_thickness, self.font_line_type, ) return image_bgr def get_text_size_wh(self, txt): ((txt_w, txt_h), _) = cv2.getTextSize( txt, self.font_face, self.font_scale, self.font_line_thickness ) return txt_w, txt_h class CompoundVisualizer(object): def __init__(self, visualizers): self.visualizers = visualizers def visualize(self, image_bgr, data): assert len(data) == len(self.visualizers), ( "The number of datas {} should match the number of visualizers" " {}".format(len(data), len(self.visualizers)) ) image = image_bgr for i, visualizer in enumerate(self.visualizers): image = visualizer.visualize(image, data[i]) return image def __str__(self): visualizer_str = ", ".join([str(v) for v in self.visualizers]) return "Compound Visualizer [{}]".format(visualizer_str)
import numpy as np import pandas as pd import matplotlib.pyplot as plt class AdalineGD(object): def __init__(self, eta=0.01, n_iter=50, random_state=1): self.eta = eta self.n_iter = n_iter self.random_state = random_state self._DataShuffled = False self.cost_track = [] def initialize_weights(self, m): # create random weights self.rgen = np.random.RandomState(self.random_state) self.w_ = self.rgen.normal(loc=0.01, scale=0.01, size = m + 1) def net_input(self, X): return np.dot(X, self.w_[1:]) + self.w_[0] def activation(self, X): return X def predict(self, X): return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1) def fit(self, X, y): self.initialize_weights(X.shape[1]) #run fit over data for n_iter for i in range(self.n_iter): errors = 0 output = self.activation(self.net_input(X)) errors = y - output self.w_[1:] += self.eta * X.T.dot(errors) self.w_[0] += self.eta * errors.sum() cost = np.dot(errors, errors.T) self.cost_track.append(cost) return self def _shuffle(self, X, y): r = self.rgen.permutation(y.size) return X[r], y[r] def _update_weights(self, xi, target): output = self.activation(self.net_input(xi)) error = target - output self.w_[1:] += self.eta * xi.dot(error) self.w_[0] += self.eta * error cost = 0.5 * error**2 return cost def Stochastic_fit(self, X, y): self.initialize_weights(X.shape[1]) for i in range(self.n_iter): if self._DataShuffled !=True: X, y = self._shuffle(X, y) self._DataShuffled = True costs = [] for xi, yi in zip(X, y): costs.append(self._update_weights(xi, yi)) self.cost_track.append(sum(costs)/len(costs)) return self dataset = pd.read_csv('iris.data', header=None) print(dataset.tail()) y = dataset.iloc[:100, 4].values y = np.where(y == 'Iris-setosa', 1, -1) X = dataset.iloc[:100, [0, 2]].values fig, ax = plt.subplots(nrows= 1 , ncols= 3, figsize=(15,4)) ada1 = AdalineGD(eta=0.01, n_iter = 10, random_state= 1).fit(X,y) ax[0].plot(range(1, len(ada1.cost_track) +1), np.log10(ada1.cost_track), marker = 'o') ax[0].set_xlabel('Epochs') ax[0].set_ylabel('log(Sum-squared-error') ax[0].set_title('Adaline, learning rate = 0.01') ada2 = AdalineGD(eta = 0.0001, n_iter = 10, random_state= 1).fit(X,y) ax[1].plot(range(1, len(ada2.cost_track) +1), np.log10(ada2.cost_track), marker = 'x') ax[1].set_xlabel('Epochs') ax[1].set_ylabel('log(Sum-squared-error') ax[1].set_title('Adaline, learning rate = 0.1') # Standardization X_s = np.copy(X) X_mean, X_std = np.mean(X_s, axis=0), np.std(X_s, axis = 0) X_s -= X_mean X_s /= X_std print(X, X_s) ada3 = AdalineGD(eta = 0.01, n_iter = 10, random_state= 1).fit(X_s,y) ax[2].plot(range(1, len(ada3.cost_track) +1), np.log10(ada3.cost_track), marker = 'o') ax[2].set_xlabel('Epochs') ax[2].set_ylabel('log(Sum-squared-error') ax[2].set_title('Adaline, learning rate = 0.1') plt.show() ada4 = AdalineGD(eta = 0.1, n_iter = 10, random_state= 1).Stochastic_fit(X_s,y) fig, ax = plt.subplots() ax.plot(range(1, len(ada4.cost_track) +1), np.log10(ada4.cost_track), marker = 'x') ax.set_xlabel('Epochs') ax.set_ylabel('log(Sum-squared-error') ax.set_title('Adaline, learning rate = 0.1') plt.show()
import pandas as pd import numpy as np import scipy.stats from inferelator import utils from inferelator.regression import bayes_stats from inferelator.regression import base_regression from inferelator.regression import mi from inferelator.distributed.inferelator_mp import MPControl # Default number of predictors to include in the model DEFAULT_nS = 10 # Default weight for priors & Non-priors # If prior_weight is the same as no_prior_weight: # Priors will be included in the pp matrix before the number of predictors is reduced to nS # They won't get special treatment in the model though DEFAULT_prior_weight = 1 DEFAULT_no_prior_weight = 1 # Throw away the priors which have a CLR that is 0 before the number of predictors is reduced by BIC DEFAULT_filter_priors_for_clr = False class BBSR(base_regression.BaseRegression): # Bayseian correlation measurements # Priors Data prior_mat = None # [G x K] # numeric filter_priors_for_clr = DEFAULT_filter_priors_for_clr # bool # Weights for Predictors (weights_mat is set with _calc_weight_matrix) weights_mat = None # [G x K] numeric prior_weight = DEFAULT_prior_weight # numeric no_prior_weight = DEFAULT_no_prior_weight # numeric # Predictors to include in modeling (pp is set with _build_pp_matrix) pp = None # [G x K] bool nS = DEFAULT_nS # int ols_only = False def __init__(self, X, Y, clr_mat, prior_mat, nS=DEFAULT_nS, prior_weight=DEFAULT_prior_weight, no_prior_weight=DEFAULT_no_prior_weight, ordinary_least_squares=False): """ Create a Regression object for Bayes Best Subset Regression :param X: Expression or Activity data [N x K] :type X: InferelatorData :param Y: Response expression data [N x G] :type Y: InferelatorData :param clr_mat: Calculated CLR between features of X & Y [G x K] :type clr_mat: pd.DataFrame :param prior_mat: Prior data between features of X & Y [G x K] :type prior_mat: pd.DataFrame :param nS: int Number of predictors to retain :param prior_weight: int Weight of a predictor which does have a prior :param no_prior_weight: int Weight of a predictor which doesn't have a prior """ super(BBSR, self).__init__(X, Y) self.nS = nS self.ols_only = ordinary_least_squares # Calculate the weight matrix self.prior_weight = prior_weight self.no_prior_weight = no_prior_weight weights_mat = self._calculate_weight_matrix(prior_mat, p_weight=prior_weight, no_p_weight=no_prior_weight) utils.Debug.vprint("Weight matrix {} construction complete".format(weights_mat.shape)) # Rebuild weights, priors, and the CLR matrix for the features that are in this bootstrap self.weights_mat = weights_mat.loc[self.genes, self.tfs] self.prior_mat = prior_mat.loc[self.genes, self.tfs] self.clr_mat = clr_mat.loc[self.genes, self.tfs] # Build a boolean matrix indicating which tfs should be used as predictors for regression for each gene self.pp = self._build_pp_matrix() def regress(self): """ Execute BBSR :return: pd.DataFrame [G x K], pd.DataFrame [G x K] Returns the regression betas and beta error reductions for all threads if this is the master thread (rank 0) Returns None, None if it's a subordinate thread """ if MPControl.is_dask(): from inferelator.distributed.dask_functions import bbsr_regress_dask return bbsr_regress_dask(self.X, self.Y, self.pp, self.weights_mat, self.G, self.genes, self.nS) def regression_maker(j): level = 0 if j % 100 == 0 else 2 utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=self.genes[j], i=j, total=self.G), level=level) data = bayes_stats.bbsr(self.X.values, utils.scale_vector(self.Y.get_gene_data(j, force_dense=True).flatten()), self.pp.iloc[j, :].values.flatten(), self.weights_mat.iloc[j, :].values.flatten(), self.nS, ordinary_least_squares=self.ols_only) data['ind'] = j return data return MPControl.map(regression_maker, range(self.G), tell_children=False) def _build_pp_matrix(self): """ From priors and context likelihood of relatedness, determine which predictors should be included in the model :return pp: pd.DataFrame [G x K] Boolean matrix indicating which predictor variables should be included in BBSR for each response variable """ # Create a predictor boolean array from priors pp = np.logical_or(self.prior_mat != 0, self.weights_mat != self.no_prior_weight) pp_idx = pp.index pp_col = pp.columns if self.filter_priors_for_clr: # Set priors which have a CLR of 0 to FALSE pp = np.logical_and(pp, self.clr_mat != 0).values else: pp = pp.values # Mark the nS predictors with the highest CLR true (Do not include anything with a CLR of 0) mask = np.logical_or(self.clr_mat == 0, ~np.isfinite(self.clr_mat)).values masked_clr = np.ma.array(self.clr_mat.values, mask=mask) for i in range(self.G): n_to_keep = min(self.nS, self.K, mask.shape[1] - np.sum(mask[i, :])) if n_to_keep == 0: continue clrs = np.ma.argsort(masked_clr[i, :], endwith=False)[-1 * n_to_keep:] pp[i, clrs] = True # Rebuild into a DataFrame and set autoregulation to 0 pp = pd.DataFrame(pp, index=pp_idx, columns=pp_col, dtype=np.dtype(bool)) pp = utils.df_set_diag(pp, False) return pp @staticmethod def _calculate_weight_matrix(p_matrix, no_p_weight=DEFAULT_no_prior_weight, p_weight=DEFAULT_prior_weight): """ Create a weights matrix. Everywhere p_matrix is not set to 0, the weights matrix will have p_weight. Everywhere p_matrix is set to 0, the weights matrix will have no_p_weight :param p_matrix: pd.DataFrame [G x K] :param no_p_weight: int Weight of something which doesn't have a prior :param p_weight: int Weight of something which does have a prior :return weights_mat: pd.DataFrame [G x K] """ weights_mat = p_matrix * 0 + no_p_weight return weights_mat.mask(p_matrix != 0, other=p_weight) class BBSRRegressionWorkflow(base_regression.RegressionWorkflow): """ Add BBSR regression into a workflow object """ mi_driver = mi.MIDriver mi_sync_path = None prior_weight = DEFAULT_prior_weight no_prior_weight = DEFAULT_no_prior_weight bsr_feature_num = DEFAULT_nS clr_only = False ols_only = False def set_regression_parameters(self, prior_weight=None, no_prior_weight=None, bsr_feature_num=None, clr_only=False, ordinary_least_squares_only=None): """ Set regression parameters for BBSR :param prior_weight: :param no_prior_weight: :param bsr_feature_num: :param clr_only: """ self._set_with_warning("prior_weight", prior_weight) self._set_with_warning("no_prior_weight", no_prior_weight) self._set_with_warning("bsr_feature_num", bsr_feature_num) self._set_without_warning("clr_only", clr_only) self._set_without_warning("ols_only", ordinary_least_squares_only) def run_bootstrap(self, bootstrap): X = self.design.get_bootstrap(bootstrap) Y = self.response.get_bootstrap(bootstrap) utils.Debug.vprint('Calculating MI, Background MI, and CLR Matrix', level=0) clr_matrix, _ = self.mi_driver().run(Y, X, return_mi=False) utils.Debug.vprint('Calculating betas using BBSR', level=0) # Create a mock prior with no information if clr_only is set if self.clr_only: priors = pd.DataFrame(0, index=self.priors_data.index, columns=self.priors_data.columns) else: priors = self.priors_data return BBSR(X, Y, clr_matrix, priors, prior_weight=self.prior_weight, no_prior_weight=self.no_prior_weight, nS=self.bsr_feature_num, ordinary_least_squares=self.ols_only).run()
#Load dependencies import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler from matplotlib import* import matplotlib.pyplot as plt from matplotlib.cm import register_cmap from scipy import stats from sklearn.decomposition import PCA import seaborn import os import glob def getPCAEigenPair(dataFrame): dataFrame.columns= ['Index','A','B','C','D'] M=pd.pivot_table(dataFrame,index=['Index']) m = M.shape #print(M) df = M.replace(np.nan,0,regex=True) X_std = StandardScaler().fit_transform(df) print('NumPy covariance matrix: \n%s' %np.cov(X_std.T)) pca = PCA(n_components=4) print (pca) pca.fit_transform(df) print (pca.explained_variance_ratio_) component = pca.explained_variance_ratio_ cov_mat = np.cov(X_std.T) eig_vals,eig_vecs=np.linalg.eig(cov_mat) print('Eigenvectors \n%s' %eig_vecs) print('\nEigenvalues \n%s' %eig_vals) #Explained variance #pca = PCA().fit(X_std) #egvco = np.empty(shape = [1,4]) #plt.plot(np.cumsum(pca.explained_variance_ratio_)) #plt.xlabel('number of components') #plt.ylabel('cumulative explained variance') #plt.show() #egvco = np.concatenate([eig_vals]) #return {"eigen_value" : eig_vals, "eigen_vector" : eig_vecs, "pcacomponents" : component, "egvc" : egvco} return {"eigen_value" : eig_vals, "eigen_vector" : eig_vecs} file_key_list = ["p" + str(i+1) +".csv" for i in range(8)] file_list = [d for d in file_key_list] #eigen_val_array = np.array([]) componentN = 4 eigen_value_array = np.empty((componentN,0)) for key in file_list: print("key\n%s" %key) dfp=pd.read_csv(key) eigen_data = getPCAEigenPair(dfp) egval = np.array(eigen_data["eigen_value"]) #s = np.array(eigen_data["pcacomponents"]) #vt = np.array(eigen_data["eigen_vector"]) #u = np.transpose(vt) #print(eigen_val_array) print(np.array(getPCAEigenPair(dfp))) #eigen_value_array=np.concatenate((eigen_value_array, np.array(getPCAEigenPair(dfp)))) #A = u.dot(s.dot(vt)) #print("") #print(A) #egvl = np.empty(shape = [1,4]) #egvl = np.concatenate([eigen_data["egvc"]]) #print (egvl) # Visually confirm that the list is correctly sorted by decreasing eigenvalues """ eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i])for i in range(len(eig_vals))] #print('Eigenvalues in descending order:') for i in eig_pairs: print(i[0]) pca = PCA(n_components=3) print (pca) pca.fit_transform(df1) print (pca.explained_variance_ratio_) #Explained variance pca = PCA().fit(X_std) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance') plt.show() """
# Copyright 2022 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for model checkpoints.""" from unittest import mock from absl.testing import absltest from absl.testing import parameterized import chex import jax import jax.numpy as jnp import numpy as np from vmoe.checkpoints import base from vmoe.checkpoints import partitioned from vmoe.checkpoints import types ArrayChunks = types.ArrayChunks Device = jax.xla.Device Mesh = partitioned.Mesh PartitionSpec = partitioned.PartitionSpec Slice = partitioned.Slice SliceNd = partitioned.SliceNd SliceNdArray = partitioned.SliceNdArray class MakeSliceNdArrayTest(absltest.TestCase): """Tests the function creating SliceNdArrays from a mesh and ShapedArrays.""" def test_make_slice_nd_arrays(self): # (4, 2) mesh with 4 processes, each handling two devices. devices = np.asarray([ [_make_device(process_index=0, id=0), _make_device(process_index=2, id=4)], [_make_device(process_index=0, id=1), _make_device(process_index=2, id=5)], [_make_device(process_index=1, id=2), _make_device(process_index=3, id=6)], [_make_device(process_index=1, id=3), _make_device(process_index=3, id=7)], ], dtype=np.object) mesh = Mesh(devices, ('a', 'b')) aval = jax.ShapedArray((16, 8, 3), dtype=jnp.float32) partition_spec = partitioned.ParsedPartitionSpec.from_user_input( PartitionSpec('a', 'b'), 'input') slice_nd_arrays = partitioned._make_slice_nd_arrays( [aval], [partition_spec], mesh) expected_slice_nd_array = SliceNdArray.create([ SliceNd(Slice(0, 4), Slice(0, 4), Slice(0, 3)), SliceNd(Slice(0, 4), Slice(4, 8), Slice(0, 3)), SliceNd(Slice(4, 8), Slice(0, 4), Slice(0, 3)), SliceNd(Slice(4, 8), Slice(4, 8), Slice(0, 3)), SliceNd(Slice(8, 12), Slice(0, 4), Slice(0, 3)), SliceNd(Slice(8, 12), Slice(4, 8), Slice(0, 3)), SliceNd(Slice(12, 16), Slice(0, 4), Slice(0, 3)), SliceNd(Slice(12, 16), Slice(4, 8), Slice(0, 3)), ], shape=(4, 2)) self.assertLen(slice_nd_arrays, 1) np.testing.assert_array_equal(slice_nd_arrays[0], expected_slice_nd_array) class MatchCheckpointToLocalSlices(absltest.TestCase): def test_match_checkpoint_to_local_slices(self): local_global_slices = [ (SliceNd(Slice(0, 4)), SliceNd(Slice(4, 8))), (SliceNd(Slice(4, 8)), SliceNd(Slice(0, 4))), ] ckpt_slices_and_shards = [ (SliceNd(Slice(6, 12)), 1), (SliceNd(Slice(0, 6)), 2), ] output = list( partitioned._match_checkpoint_to_local_slices(local_global_slices, ckpt_slices_and_shards)) expected_output = [ (1, SliceNd(Slice(6, 12)), SliceNd(Slice(0, 2)), SliceNd(Slice(2, 4))), (2, SliceNd(Slice(0, 6)), SliceNd(Slice(4, 6)), SliceNd(Slice(0, 2))), (2, SliceNd(Slice(0, 6)), SliceNd(Slice(0, 4)), SliceNd(Slice(4, 8))), ] self.assertCountEqual(expected_output, output) def test_match_checkpoint_to_local_slices_raises(self): local_global_slices = [(SliceNd(Slice(0, 4)), SliceNd(Slice(4, 8)))] ckpt_slices_and_shards = [] with self.assertRaises(ValueError): _ = list( partitioned._match_checkpoint_to_local_slices(local_global_slices, ckpt_slices_and_shards)) class PairLocalAndGlobalSlicesTest(parameterized.TestCase): """Tests the function pairing local SliceNds and global SliceNds. A (local/global) SliceNdArray is an array of SliceNd objects denoting which chunk of a particular array each device in the (local/global) mesh holds. """ def _make_partitioned_across_process_data(self): # pylint: disable=g-unreachable-test-method # 2x2 mesh, with two processes handling two devices each. # devices | processes # [0, 1] | [0, 0] # [2, 3] | [1, 1] devices = [ _make_device(process_index=0, id=0), _make_device(process_index=0, id=1), _make_device(process_index=1, id=2), _make_device(process_index=1, id=3), ] mesh = Mesh(np.asarray(devices).reshape(2, 2), ('a', 'b')) # The global shape of the data is (8, ?), which is chunked in 2 partitions, # each one is handled by a different process. The two devices of a given # process store the same data, thus the local shape is (4, ?). global_slices_array = SliceNdArray.create( [SliceNd(Slice(0, 4), Slice()), SliceNd(Slice(0, 4), Slice()), SliceNd(Slice(4, 8), Slice()), SliceNd(Slice(4, 8), Slice())], shape=(2, 2)) local_slices_array = SliceNdArray.create( [SliceNd(Slice(0, 4), Slice()), SliceNd(Slice(0, 4), Slice())], shape=(1, 2)) return mesh, local_slices_array, global_slices_array def _make_partitioned_within_process_data(self): # pylint: disable=g-unreachable-test-method # 2x2 mesh, with two processes handling two devices each. # devices | processes # [0, 2] | [0, 1] # [1, 3] | [0, 1] devices = [ _make_device(process_index=0, id=0), _make_device(process_index=1, id=2), _make_device(process_index=0, id=1), _make_device(process_index=1, id=3), ] mesh = Mesh(np.asarray(devices).reshape(2, 2), ('a', 'b')) # The global shape of the data is (8, ?), which is chunked in 2 partitions, # each one is handled by a different device within the same process. # The two processes actually hold the same data. Thus, they have local shape # of (8, ?). global_slices_array = SliceNdArray.create( [SliceNd(Slice(0, 4), Slice()), SliceNd(Slice(0, 4), Slice()), SliceNd(Slice(4, 8), Slice()), SliceNd(Slice(4, 8), Slice())], shape=(2, 2)) local_slices_array = SliceNdArray.create( [SliceNd(Slice(0, 4), Slice()), SliceNd(Slice(4, 8), Slice())], shape=(2, 1)) return mesh, local_slices_array, global_slices_array @parameterized.named_parameters( ('process_0_across_process', 0, '_make_partitioned_across_process_data', {(SliceNd(Slice(0, 4), Slice()), SliceNd(Slice(0, 4), Slice()))}), ('process_1_across_process', 1, '_make_partitioned_across_process_data', {(SliceNd(Slice(0, 4), Slice()), SliceNd(Slice(4, 8), Slice()))}), ('process_0_within_process', 0, '_make_partitioned_within_process_data', {(SliceNd(Slice(0, 4), Slice()), SliceNd(Slice(0, 4), Slice())), (SliceNd(Slice(4, 8), Slice()), SliceNd(Slice(4, 8), Slice()))}), ('process_1_within_process', 1, '_make_partitioned_within_process_data', {(SliceNd(Slice(0, 4), Slice()), SliceNd(Slice(0, 4), Slice())), (SliceNd(Slice(4, 8), Slice()), SliceNd(Slice(4, 8), Slice()))}), ) def test_pair_local_and_global_slices(self, process_index, make_data, expected_pairs): mesh, local_slices_array, global_slices_array = getattr(self, make_data)() with mock.patch.object( jax._src.lib.xla_bridge, 'process_index', return_value=process_index): pairs = list(partitioned._pair_local_and_global_slices( [local_slices_array], [global_slices_array], mesh, local_mesh=None)) self.assertLen(pairs, 1) self.assertEqual(pairs[0], expected_pairs) class RemoveUnusedShardsTest(absltest.TestCase): """Tests that shards that don't handle any slice are removed.""" def test_remove_unused_shards(self): shards_per_slice = [[1, 3], [2]] process_per_shard = [5, 4, 3, 2, 1] output = partitioned._remove_unused_shards(shards_per_slice, process_per_shard) expected_shards_per_slice = [[0, 1], [2]] expected_process_per_shard = (4, 2, 3) self.assertEqual(output[0], expected_shards_per_slice) self.assertTupleEqual(output[1], expected_process_per_shard) class RestoreArrayChunks(parameterized.TestCase): def test_restore_array_chunks(self): array_chunks = types.ArrayChunks( chunks={ 0: [1 * np.ones((5, 4)), 2 * np.ones((4, 5))], 1: [3 * np.ones((6,)), 4 * np.ones((3,))], 2: [np.arange(6).reshape((3, 2))], }, global_slices={ 0: [SliceNd(Slice(0, 5), Slice(4, 8)), SliceNd(Slice(10, 14), Slice(0, 5))], 1: [SliceNd(Slice(0, 6)), SliceNd(Slice(6, 9))], 2: [SliceNd(Slice(7, 10), Slice(4, 6))], }) array_slices_to_restore = { 1: [(SliceNd(Slice(0, 6)), SliceNd(Slice(1, 3)), SliceNd(Slice(0, 2))), (SliceNd(Slice(6, 9)), SliceNd(Slice(0, 2)), SliceNd(Slice(3, 5)))], 2: [(SliceNd(Slice(7, 10), Slice(4, 6)), SliceNd(Slice(0, 3), Slice(0, 1)), SliceNd(Slice(0, 3), Slice(2, 3))), (SliceNd(Slice(7, 10), Slice(4, 6)), SliceNd(Slice(1, 3), Slice(0, 2)), SliceNd(Slice(2, 4), Slice(0, 2)))], } local_arrays = [None, np.zeros((8,)), np.zeros((4, 4))] with mock.patch.object( base, 'restore_checkpoint', return_value=array_chunks): partitioned._restore_array_chunks('foo', local_arrays, array_slices_to_restore) np.testing.assert_array_almost_equal( local_arrays[1], [3, 3, 0, 4, 4, 0, 0, 0]) np.testing.assert_array_almost_equal( local_arrays[2], [[0, 0, 0, 0], [0, 0, 2, 0], [2, 3, 4, 0], [4, 5, 0, 0]]) @parameterized.parameters( # Checkpoint has slice [0:5], process holds global slice [5:10] in a # local slice [0:5] of an array. # Checkpoint and global slices do not intersect. (SliceNd(Slice(0, 5)), SliceNd(Slice(5, 10)), SliceNd(Slice(0, 5)), None), # Checkpoint has slice [3:8], process holds global slice [3:8] in a # local slice [0:5] of an array. # Checkpoint chunk[0:5] must be copied to local array[0:5]. (SliceNd(Slice(3, 8)), SliceNd(Slice(3, 8)), SliceNd(Slice(0, 5)), (SliceNd(Slice(0, 5)), SliceNd(Slice(0, 5)))), # Checkpoint has slice [2:5, 0:4], process holds global slice [1:4, 1:3] # in local slice [4:7, 4:6] of an array. # Checkpoint chunk[0:2, 1:3] must be copied to local array[5:7, 4:6]. (SliceNd(Slice(2, 5), Slice(0, 4)), SliceNd(Slice(1, 4), Slice(1, 3)), SliceNd(Slice(4, 7), Slice(4, 6)), (SliceNd(Slice(0, 2), Slice(1, 3)), SliceNd(Slice(5, 7), Slice(4, 6)))), ) def test_intersect_slice_nd(self, ckpt_slice_nd, global_slice_nd, local_slice_nd, expected_output): output = partitioned._intersect_slice_nd(ckpt_slice_nd, global_slice_nd, local_slice_nd) self.assertEqual(output, expected_output) class RestoreAndSaveCheckpointTest(parameterized.TestCase): @parameterized.named_parameters( ('process_0_of_2', 0, None, [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3]]), ('process_1_of_2', 1, None, [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3]]), ('process_0_of_2_axis_resources', 0, {'x': None, 'y': None, 'z': PartitionSpec('a')}, [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]]), ('process_1_of_2_axis_resources', 1, {'x': None, 'y': None, 'z': PartitionSpec('a')}, [[2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2, 3, 3, 3, 3, 3]]), ) @mock.patch.object(partitioned.jax, 'process_count', return_value=2) def test_restore_checkpoint(self, process_index, axis_resources, expected_z, _): devices = np.asarray( [_make_device(process_index=i // 2, id=i) for i in range(4)]) mesh = partitioned.Mesh(devices.reshape((2, 2)), ('a', 'b')) prefix = self.create_tempfile().full_path def side_effect(filepath, *unused_args, **unused_kwargs): return { prefix + '.index': self._get_expected_index(), prefix + '.data-00000-of-00004': self._get_expected_shard_content(0), prefix + '.data-00001-of-00004': self._get_expected_shard_content(1), prefix + '.data-00002-of-00004': self._get_expected_shard_content(2), prefix + '.data-00003-of-00004': self._get_expected_shard_content(3), }[filepath] with mock.patch.object(partitioned.vmoe.checkpoints.base, 'restore_checkpoint', side_effect=side_effect): with mock.patch.object(jax, 'process_index', return_value=process_index): with mock.patch.object(jax._src.lib.xla_bridge, 'process_index', return_value=process_index): restored = partitioned.restore_checkpoint( prefix=prefix, tree=None, axis_resources=axis_resources, mesh=mesh) np.testing.assert_array_almost_equal( restored['x'], np.zeros((5, 5), dtype=np.float32)) np.testing.assert_array_almost_equal( restored['y'], [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]]) np.testing.assert_array_almost_equal(restored['z'], expected_z) def test_restore_checkpoint_empty_mesh(self): prefix = self.create_tempfile().full_path with self.assertRaisesRegex(ValueError, 'You must pass a non-empty mesh'): partitioned.restore_checkpoint( prefix=prefix, tree=None, axis_resources=None) @parameterized.named_parameters( ('process_0', 0, 2, 0), ('process_1', 1, 1, 2), ('process_2', 2, 1, 1), ('process_3', 3, 1, 3), ) @mock.patch.object(partitioned.jax, 'process_count', return_value=4) @mock.patch.object( partitioned.vmoe.multihost_utils, 'sync_devices', return_value=None) def test_save_checkpoint(self, process_index, num_written_files, shard, unused_1, unused_2): devices = np.asarray( [_make_device(process_index=i, id=i) for i in range(4)]).reshape((2, 2)) mesh = partitioned.Mesh(devices, ('a', 'b')) tree = { # 'x' has global_shape = (5, 5), it's written by process 0 in shard 0. 'x': np.ones((5, 5), dtype=np.float32) * process_index, # 'y' has global_shape = (10, 5), first half is written by process 0 # (shard 0), second half is written by process 2 (shard 1). 'y': np.ones((5, 5), dtype=np.float32) * process_index, # 'z' has global_shape = (10, 10), first quarter is written by process 0 # (shard 0), second quarter is written by process 1 (shard 2), # third quarter is written by process 2 (shard 1), fourth quarter is # written by process 3 (shard 3). 'z': np.ones((5, 5), dtype=np.float32) * process_index, } axis_resources = { 'x': None, 'y': PartitionSpec('a'), 'z': PartitionSpec('a', 'b'), } prefix = self.create_tempfile().full_path # Note: we need to mock both jax.process_index AND # jax._src.lib.process_index. with mock.patch.object(jax, 'process_index', return_value=process_index): with mock.patch.object(jax._src.lib.xla_bridge, 'process_index', return_value=process_index): async_result = partitioned.save_checkpoint( prefix=prefix, tree=tree, axis_resources=axis_resources, mesh=mesh) written_files = async_result.get() # Check that the process writes the expected number of files. self.assertLen(written_files, num_written_files) # If the process writes the index, load the index and check its icontent. if num_written_files == 2: index_content = base.restore_checkpoint(prefix + '.index') expected_index_content = self._get_expected_index() chex.assert_trees_all_equal_comparator( lambda x, y: x == y, lambda x, y: f'IndexInfos do not match:\n{x}\n{y}', index_content, expected_index_content) # Check that the process has written the expected sharded file. expected_ckpt_shard = prefix + f'.data-{shard:05d}-of-00004' self.assertIn(expected_ckpt_shard, written_files) array_chunks = base.restore_checkpoint(expected_ckpt_shard) expected_array_chunks = self._get_expected_shard_content(shard) chex.assert_trees_all_equal_comparator( self._compare_array_chunks, lambda x, y: f'ArrayChunks do not match:\n{x}\n{y}', array_chunks, expected_array_chunks, ) def test_save_checkpoint_empty_mesh(self): prefix = self.create_tempfile().full_path with self.assertRaisesRegex(ValueError, 'You must pass a non-empty mesh'): partitioned.save_checkpoint(prefix=prefix, tree=mock.MagicMock(), axis_resources=mock.MagicMock()) def _compare_array_chunks(self, a, b): """Compares two ArrayChunks objects.""" if a.global_slices != b.global_slices: return False a, sa = jax.tree_flatten(dict(a.chunks)) b, sb = jax.tree_flatten(dict(b.chunks)) if sa != sb: return False return all(map(lambda x, y: (x == y).all, a, b)) def _get_expected_index(self): return { 'shard_count': 4, 'index': { 'x': partitioned.IndexInfo( global_shape=jax.ShapedArray((5, 5), dtype=jnp.float32), global_slices=((Slice(0, 5), Slice(0, 5)),), shards=(0,)), 'y': partitioned.IndexInfo( global_shape=jax.ShapedArray((10, 5), dtype=jnp.float32), global_slices=((Slice(0, 5), Slice(0, 5)), (Slice(5, 10), Slice(0, 5))), shards=(0, 1)), 'z': partitioned.IndexInfo( global_shape=jax.ShapedArray((10, 10), dtype=jnp.float32), global_slices=((Slice(0, 5), Slice(0, 5)), (Slice(0, 5), Slice(5, 10)), (Slice(5, 10), Slice(0, 5)), (Slice(5, 10), Slice(5, 10))), shards=(0, 2, 1, 3)), }, } def _get_expected_shard_content(self, shard): """Returns the ArrayChunks data stored in each shard.""" return { # shard 0 is written by process 0. 0: ArrayChunks( chunks={ 0: [np.zeros((5, 5), dtype=np.float32)], # x[:, :] 1: [np.zeros((5, 5), dtype=np.float32)], # y[0:5, :] 2: [np.zeros((5, 5), dtype=np.float32)], # z[0:5, 0:5] }, global_slices={ 0: [(Slice(0, 5), Slice(0, 5))], 1: [(Slice(0, 5), Slice(0, 5))], 2: [(Slice(0, 5), Slice(0, 5))], }), # shard 1 is written by process 2. 1: ArrayChunks( chunks={ 1: [2 * np.ones((5, 5), dtype=np.float32)], # y[5:10, :] 2: [2 * np.ones((5, 5), dtype=np.float32)], # z[5:10, 0:5] }, global_slices={ 1: [(Slice(5, 10), Slice(0, 5))], 2: [(Slice(5, 10), Slice(0, 5))], }), # shard 2 is written by process 1. 2: ArrayChunks( chunks={ 2: [np.ones((5, 5), dtype=np.float32)], # z[0:5, 5:10] }, global_slices={ 2: [(Slice(0, 5), Slice(5, 10))], }), # shard 2 is written by process 3. 3: ArrayChunks( chunks={ 2: [3 * np.ones((5, 5), dtype=np.float32)], # z[5:10, 5:10] }, global_slices={ 2: [(Slice(5, 10), Slice(5, 10))], }), }[shard] class SliceNdArraysToShardsTest(absltest.TestCase): def _create_test_data(self): # PyTree used in several tests. return [ # Array 'x' has two axis, none of which is partitioned. SliceNdArray.create([SliceNd(Slice(), Slice())] * 6, shape=(3, 2)), # Array 'y' is also not partitioned, but only has one axis. SliceNdArray.create([SliceNd(Slice(),)] * 6, shape=(3, 2)), # Array 'z' is partitioned on its second axis, across the second logical # axis in two chunks. SliceNdArray.create( [ SliceNd(Slice(None), Slice(0, 3)), # Processes {0, 1, 2}. SliceNd(Slice(None), Slice(3, 6)), # Processes {3, 4, 5}. ], shape=(1, 2), tile=(3, 1)), ] @mock.patch.object(partitioned.jax, 'process_count', return_value=6) def test_slice_nd_arrays_to_shards(self, _): # Assume there's only one device per process to simplify calculations. devices = np.asarray([ [_make_device(process_index=0), _make_device(process_index=3)], [_make_device(process_index=1), _make_device(process_index=4)], [_make_device(process_index=2), _make_device(process_index=5)], ]) output = partitioned._slice_nd_arrays_to_shards( self._create_test_data(), devices, num_shards=6) expected_shards_per_slice = [[0], [1], [2, 3]] self.assertEqual(output[0], expected_shards_per_slice) self.assertTupleEqual(output[1], (0, 1, 2, 3, 4, 5)) @mock.patch.object(partitioned.jax, 'process_count', return_value=6) def test_slice_nd_arrays_to_shards_minimum(self, _): # Assume there's only one device per process to simplify calculations. # Notice that the process_indices are not contiguous. This affects the slice # that each process handles (for additional info, check the 'z' array in # _create_slice_axes_array_to_shards_test_data). devices = np.asarray([ [_make_device(process_index=0), _make_device(process_index=3)], [_make_device(process_index=1), _make_device(process_index=4)], [_make_device(process_index=2), _make_device(process_index=5)], ]) devices = devices.reshape(3, 2) output = partitioned._slice_nd_arrays_to_shards( self._create_test_data(), devices, num_shards=0) expected_shards_per_slice = [ [0], # Process 0. [0], # Process 0. [0, 1], # Process {0, 3}. ] self.assertEqual(output[0], expected_shards_per_slice) self.assertTupleEqual(output[1], (0, 3)) def _make_device(**kwargs): """Returns a new mocked device.""" device = mock.MagicMock(Device) for key, value in kwargs.items(): setattr(device, key, value) return device if __name__ == '__main__': absltest.main()
import colorsys import random import matplotlib.colors as mplc import numpy as np from numpy.core.shape_base import block from skimage import measure from matplotlib.patches import Polygon, Rectangle import matplotlib import matplotlib.pyplot as plt from typing import List, Dict, Tuple, Union from enum import Enum from PIL import Image def change_color_brightness(color: Tuple, brightness_factor: float): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color def draw_text(ax: plt.Axes, text: str, position: Tuple, font_size: float, color: str = "g", horizontal_alignment: str = "center", rotation: int = 0): """ Args: text (str): class label position (tuple): a tuple of the x and y coordinates to place text on image. font_size (int, optional): font of the text. If not provided, a font size proportional to the image width is calculated and used. color: color of the text. Refer to `matplotlib.colors` for full list of formats that are accepted. horizontal_alignment (str): see `matplotlib.text.Text` rotation: rotation angle in degrees CCW Returns: output (VisImage): image object with text drawn. """ # since the text background is dark, we don't want the text to be dark color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) x, y = position ax.text( x, y, text, size=font_size * 1, family="sans-serif", bbox={ "facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none" }, verticalalignment="top", horizontalalignment=horizontal_alignment, color=color, zorder=10, rotation=rotation, ) class BoxType(Enum): xyxy = 1 xywh = 2 def draw_instances(img: Union[Image.Image, np.ndarray], boxes: Union[np.ndarray, List], labels: Union[np.ndarray, List], scores: Union[np.ndarray, List], masks: Union[np.ndarray, List], idx_class_dict: Dict[int, str], title: str = '', figsize: Tuple = (16, 8), show_boxes: bool = False, show_masks: bool = True, min_score: float = 0.5, min_area: int = 0, colors: List = None, ax: plt.Axes = None, box_type: BoxType = BoxType.xyxy, only_class_idxs: List[int] = None): """draw the instances from a object detector or an instance segmentation model Args: img (np.ndarray): an image with shape (width, height, channels) boxes (np.ndarray): an array of shape (nboxes, 4) labels (np.ndarray): an array of shape (nlabels,) scores (np.ndarray): an array of shape (nscores,) masks (np.ndarray): an array of shape [nmasks, 1, width, height ] idx_class_dict (Dict[int, str]): a dictionary that maps class id to class name title (str, optional): [description]. Defaults to ''. figsize (Tuple, optional): [description]. Defaults to (16, 8). show_boxes (bool, optional): [description]. Defaults to False. show_masks (bool, optional): [description]. Defaults to True. min_score (float, optional): [description]. Defaults to 0.5. colors (List, optional): [description]. Defaults to None. ax (plt.Axes, optional): [description]. Defaults to None. box_type (BoxType, optional): [description]. Defaults to BoxType.xyxy. only_class_idxs (List[int], optional): [description]. Defaults to None. Returns: [type]: [description] """ if boxes is not None and len(boxes) > 0 and len(np.array(boxes).shape) != 2: raise ValueError( f'the shape of the boxes should be (N_BOXES, 4) while shape is {np.array(boxes).shape}' ) if masks is not None and len(masks) > 0 and len(np.array(masks).shape) < 3: raise ValueError( f'the shape of the masks should be (N_MASKS, HEIGHT, WIDTH) while shape is {np.array(masks).shape}' ) labels_names = create_text_labels(labels, scores, idx_class_dict) if colors is None: colors = generate_colormap(len(idx_class_dict) + 1) if ax is None: _, ax = plt.subplots(figsize=figsize) if only_class_idxs is None: only_class_idxs = list(idx_class_dict.keys()) if isinstance(img, Image.Image): width, height = img.size else: height, width = img.shape[:2] ax.set_ylim(height + 10, -10) ax.set_xlim(-10, width + 10) ax.axis('off') ax.set_title(title) out_image = np.array(img).astype(np.uint8) for idx in range(len(labels)): label_id = labels[idx] if label_id not in only_class_idxs: continue label_name = labels_names[idx] score = scores[idx] if score < min_score: continue if show_masks: mask = np.squeeze(masks[idx, ...]) color = colors[label_id] if show_boxes: box = boxes[idx] if box_type.value == BoxType.xyxy.value: x0, y0, x1, y1 = box x, y, w, h = x0, y0, x1 - x0, y1 - y0 else: x, y, w, h = box area = w * h if area < min_area: continue p = Rectangle((x, y), w, h, linewidth=2, alpha=0.7, linestyle="dashed", edgecolor=color, facecolor='none') ax.add_patch(p) # add the caption # draw text in the center (defined by median) when box is not drawn # median is less sensitive to outliers. if show_masks: text_pos = np.median(mask.nonzero(), axis=1)[::-1] else: text_pos = (x + 5, y + 5) horiz_align = "left" lighter_color = change_color_brightness(color, brightness_factor=0.7) font_size = 10 draw_text(ax, label_name, text_pos, font_size, lighter_color, horiz_align) if show_masks: padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.float32) padded_mask[1:-1, 1:-1] = mask contours = measure.find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor=color, edgecolor=color, fill=True, alpha=.5) ax.add_patch(p) ax.imshow(out_image) return ax def generate_colormap(nelems: int, scaled: bool = False, bright: bool = True): # Generate colors for drawing bounding boxes. brightness = 1. if bright else .7 hsv_tuples = [(x / nelems, 1., brightness) for x in range(nelems)] colors = [colorsys.hsv_to_rgb(*x) for x in hsv_tuples] if scaled: colors = [ (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)) for x in colors ] random.shuffle(colors) return colors def create_text_labels(classes: List[int], scores: List[float], idx_class_dict: Dict[int, str]): """ Args: classes (list[int] or None): scores (list[float] or None): idx_class_dict (Dict[int, str] or None): Returns: list[str] """ labels = [idx_class_dict[i] for i in classes] labels = [ "{} {:.0f}%".format(label, score * 100) for label, score in zip(labels, scores) ] return labels def draw_segmentation_map(mask: np.ndarray, colors: List[str]): """draw a segmentation map based on a class mask. The mask have to contains number that represent the class Args: mask (np.ndarray): [description] colors (List[str], optional): [description]. Defaults to None. Returns: [type]: [description] """ colors_rgb = np.array([matplotlib.colors.to_rgb(c) for c in colors]) * 255 r = np.zeros_like(mask).astype(np.uint8) g = np.zeros_like(mask).astype(np.uint8) b = np.zeros_like(mask).astype(np.uint8) for c in sorted(np.unique(mask))[1:]: idx = mask == c r[idx] = colors_rgb[c, 0] g[idx] = colors_rgb[c, 1] b[idx] = colors_rgb[c, 2] rgb = np.stack([r, g, b], axis=2) return rgb def draw_segmentation( img: np.ndarray, mask: np.ndarray, idx_name_dict: Dict[int, str], colors: List = None, title: str = '', ax: plt.Axes = None, figsize: Tuple[int, int] = (16, 8), ): if colors is None: colors = generate_colormap(len(idx_name_dict) + 1) if ax is None: _, ax = plt.subplots(figsize=figsize) width, height = img.size ax.set_ylim(height + 10, -10) ax.set_xlim(-10, width + 10) ax.axis('off') ax.set_title(title) out_image = np.array(img).astype(np.uint8) for cat in np.unique(mask)[1:]: mask_cat = (mask == cat) cat_name = idx_name_dict[cat] color = colors[cat] # draw text in the center (defined by median) when box is not drawn # median is less sensitive to outliers. text_pos = np.median(mask_cat.nonzero(), axis=1)[::-1] - 20 # horiz_align = "left" lighter_color = change_color_brightness(color, brightness_factor=0.7) font_size = 10 draw_text(ax, cat_name, text_pos, font_size, horizontal_alignment='left') padded_mask = np.zeros((mask_cat.shape[0] + 2, mask_cat.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask_cat contours = measure.find_contours(padded_mask, 0.5) for verts in contours: verts = np.fliplr(verts) - 1 p = Polygon( verts, facecolor=color, edgecolor=lighter_color, # 'black', fill=True, alpha=.5) ax.add_patch(p) ax.imshow(out_image) return ax
import numpy as np from scipy import * from scipy.sparse import * from itertools import izip import operator def sort_dic_by_value(dic, reverse=False): return sorted(dic.iteritems(), key=operator.itemgetter(1), reverse=reverse) # Maximum value of a dictionary def dict_max(dic): aux = dict(map(lambda item: (item[1], item[0]), dic.items())) if not aux.keys(): return 0 max_value = max(aux.keys()) return max_value, aux[max_value] # ---------- # Dot products that works for sparse matrix as well # Taken from: # http://old.nabble.com/Sparse-matrices-and-dot-product-td30315992.html # ---------- def spdot(A, B): """The same as np.dot(A, B), except it works even if A or B or both might be sparse.""" if issparse(A) and issparse(B): return A * B elif issparse(A) and not issparse(B): return (A * B).view(type=B.__class__) elif not issparse(A) and issparse(B): return (B.T * A.T).T.view(type=A.__class__) else: return np.dot(A, B) # ---------- # Gets a perpendicualar line in 2D # ---------- def perp_2d(a): res = 1. / a res = res[:, ] * [-1, 1] return res def l2norm(a): value = 0 for i in xrange(a.shape[1]): value += np.dot(a[:, i], a[:, i]) return np.sqrt(value) def l2norm_squared(a): value = 0 for i in xrange(a.shape[1]): value += np.dot(a[:, i], a[:, i]) return value # ---------- # Normalizes an array to sum to one, either column wize, or row wize or the full array. # Column wize - 0 default # Rown wize - 1 default # All - 2 default # ---------- def normalize_array(a, direction="column"): b = a.copy() if direction == "column": sums = np.sum(b, 0) return np.nan_to_num(b / sums) elif direction == "row": sums = np.sum(b, 1) return np.nan_to_num((b.transpose() / sums).transpose()) elif direction == "all": sums = np.sum(b) return np.nan_to_num(b / sums) else: print "Error non existing normalization" return b
import os import torch import torch.nn as nn import torchvision.transforms.functional as tvf from torch.optim import Adam, lr_scheduler from torch.utils.data import DataLoader import gdown from PIL import Image import json from .unet import Unet from .dataset import NoisyDataset from PIL import Image import numpy as np import random from PIL import Image, ImageDraw, ImageFont from string import ascii_letters root_dir = os.path.dirname(os.path.realpath(__file__)) __modes__ = ["train", "test", "inference"] __noises__ = ["gaussian", "text"] class Noise2Noise: ''' Noise2Noise class. ''' def __init__(self, noise, data_path="", show=False, mode='inference'): ''' Initialise class ''' if noise not in __noises__: raise ValueError("{} not supported".format(noise)) if mode not in __modes__: raise ValueError("{} not supported".format(mode)) print("Initialising Noise2Noise Model") self.show = show self.noise = noise self.data_path = data_path self.mode = mode self.crop_size = 320 if torch.cuda.is_available(): self.map_location = 'cuda' else: self.map_location = 'cpu' try: self.model = Unet(in_channels=3) self.load_model() except Exception as err: print("Error at {}".format(err)) exit() if mode=='test': imgs = self.format_data(data_path) self.save_path = self.save_path() self.check_weights() self.model.eval() self.test(imgs) elif mode == "inference": self.model.eval() else: self.loss = nn.MSELoss() self.optim = Adam(self.model.parameters(),lr=1e-3) self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optim, factor=0.5, verbose=True) train_loader = self.load_dataset(data_path) self.train(train_loader) def format_data(self,data_path): imgs_path = [] imgs = [] for file in os.listdir(data_path): if file.endswith(".jpg") or file.endswith(".png"): imgs_path.append( os.path.join(data_path,file)) # Cropping Images for file in imgs_path: img = Image.open(file) w,h = img.size m = min(w,h) img = tvf.crop(img,0,0,m,m) img = tvf.resize(img,(self.crop_size, self.crop_size)) imgs.append(img) return imgs def check_weights(self): if os.path.exists(root_dir + "/weights/n2n-{}.pt".format(self.noise)): print("Found weights") else: print("Downloading weights") self.download_weights() def download_weights(self): with open(root_dir+"/config/weights_download.json") as fp: json_file = json.load(fp) if not os.path.exists(root_dir+"/weights/"): os.mkdir(root_dir+"/weights/") url = 'https://drive.google.com/uc?id={}'.format(json_file['n2n-{}.pt'.format(self.noise)]) gdown.download(url, root_dir+"/weights/n2n-{}.pt".format(self.noise), quiet=False) def load_model(self): self.check_weights() ckpt_dir = root_dir + "/weights/n2n-{}.pt".format(self.noise) self.model.load_state_dict(torch.load(ckpt_dir, self.map_location)) def load_dataset(self,img): dataset = NoisyDataset(img, self.noise, crop_size=256) train_loader = DataLoader(dataset, batch_size=5) return train_loader def save_path(self): ''' Directory for output of model ''' save_path = os.path.join(root_dir, 'Output') if not os.path.isdir(save_path): print("Making dir for denoised images") os.mkdir(save_path) print("Saving at {}".format(save_path)) return save_path def crop_image(self,img): ''' Crops the image to a square of size (crop_size, crop_size) Input: img of type PIL.Image Output: Cropped image of type PIL.Image ''' w,h = img.size m = min(w,h) img = tvf.crop(img, 0,0,m,m) img = tvf.resize(img, (320, 320)) return img def gaussian_noise(self,img): w,h = img.size c = len(img.getbands()) sigma = np.random.uniform(20,50) gauss = np.random.normal(10,sigma,(h,w,c)) noisy = np.array(img) + gauss #Values less than 0 become 0 and more than 255 become 255 noisy = np.clip(noisy, 0, 255).astype(np.uint8) img = Image.fromarray(noisy) return img def add_text(self,img): w,h = img.size c = len(img.getbands()) im = img.copy() draw = ImageDraw.Draw(im) for i in range(random.randint(5,15)): font_type = ImageFont.truetype(font='Arial.ttf',size=np.random.randint(10,20)) len_text = np.random.randint(4,20) text = ''.join(random.choice(ascii_letters) for i in range(len_text)) x = np.random.randint(0,w) y = np.random.randint(0,h) col = tuple(np.random.randint(0,255,c)) draw.text((x,y),text,fill=col,font=font_type) return im def test(self,imgs): ''' Input: List of images ''' source_imgs = [] denoised_imgs = [] for source in imgs: if self.noise == 'gaussian': source = self.gaussian_noise(source) else: source = self.add_text(source) source_imgs.append(source) source = torch.unsqueeze(tvf.to_tensor(source),dim=0) output = self.model(source) denoised = tvf.to_pil_image(torch.squeeze(output)) denoised_imgs.append(denoised) #Save images to directory for i in range(len(source_imgs)): source = source_imgs[i] denoised = denoised_imgs[i] source.save(os.path.join(self.save_path,'source_{}.png'.format(i+1))) denoised.save(os.path.join(self.save_path,'denoised_{}.png'.format(i+1))) if self.show==True: source.show() denoised.show() def train(self,train_loader): for epoch in range(2): print("Epoch {}/{}".format(epoch+1,2)) for batch, (source,target) in enumerate(train_loader): denoised = self.model(source) loss = self.loss(denoised,target) print("Loss = ", loss.item()) self.optim.zero_grad() loss.backward() self.optim.step() def inference(self, img, save=None, show=None): print("Running inference") if isinstance(img, str): if os.path.exists(img): img_name = os.path.basename(img) img = Image.open(img) else: raise FileNotFoundError("2",img) elif isinstance(img, np.ndarray): img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) elif isinstance(img, Image.Image): pass if self.noise=='gaussian': img = self.gaussian_noise(img) else: img = self.add_text(img) img = img.resize((320, 320)) img.save("Noised.png") noisy_source = torch.unsqueeze(tvf.to_tensor(img), dim=0) denoised_source = self.model(noisy_source) denoised_source = torch.squeeze(denoised_source, dim=0) if show or save is not None and not False: denoised_source = tvf.to_pil_image(denoised_source) if save is not None and save is not False: denoised_source.save(save) if show: print("Show: ", show) denoised_source.show() return denoised_source
import pathlib import numpy as np import pytest from neatmesh.analyzer import Analyzer3D from neatmesh.reader import assign_reader h5py = pytest.importorskip("h5py") def test_hex_one_cell(): this_dir = pathlib.Path(__file__).resolve().parent mesh = assign_reader(this_dir / "meshes" / "one_hex_cell.med") analyzer = Analyzer3D(mesh) analyzer.count_cell_types() analyzer.analyze_cells() analyzer.analyze_faces() assert np.isclose(np.sum(analyzer.cells_volumes), 1.0) assert np.allclose(analyzer.face_aspect_ratios, np.ones(shape=(6,)))
import dask.dataframe as ddf import dask.multiprocessing import numpy as np #import os, psutil types = { 'Email': object, 'Affiliation': object, 'Department': object, 'Institution': object, 'ZipCode': object, 'Location': object, 'Country': object, 'City': object, 'State': object, 'AffiliationType': object, } df = ddf.read_csv('OA04_Affiliations.csv', dtype=types) print(df.head()) chunk_df = df[["PMID","Email","Affiliation","Department","Institution"]] print(chunk_df.head()) chunk_df = chunk_df.mask(chunk_df['Email'] == '', np.nan) print(chunk_df.head()) chunk_df = chunk_df.dropna(subset=['Email']) print(chunk_df.head()) chunk_df.to_csv("OA04_Affiliations_With_Emails.csv", single_file=True) #process = psutil.Process(os.getpid()) #print(process.memory_info().vms)
from typing import List, Tuple, Any from six import int2byte import tensorflow as tf from tensorflow.keras import layers import numpy as np import gym import sys import copy from collections import deque import random import pandas as pd def construct_model(input_shape=(5,)) -> tf.keras.Model: inputs = tf.keras.Input(shape=input_shape) x = layers.Dense(16, activation="relu")(inputs) outputs = layers.Dense(1)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) return model def sample1(): q_net = construct_model() optimizer = tf.keras.optimizers.Adam() loss = tf.keras.losses.Huber() q_net.compile(optimizer=optimizer, loss=loss) q_net.summary() x = pd.read_pickle("x.pkl") print(x) y = q_net(x) print(y) def sample(): a = np.array([1, 2, 3, 4]) print(a) a = a.reshape((4,)) print(a) a = a.reshape((4, 1)) print(a) def play(): env = gym.make("CartPole-v0") for i in range(10): obserbation = env.reset() for t in range(1000): env.render() steps = env.step(env.action_space.sample()) print(steps) observation, reward, done, info = steps if done: print(f"Episode {i} finished after {t} timesteps") break env.close() def inputs(): while True: pass def test2(): env = gym.make("CartPole-v0") print(env.action_space) if __name__ == "__main__": test2()
import socket from BD_2 import BancoDeDados from datetime import datetime from datetime import timedelta import numpy as np def dataHora(): ''' :return: Retorna a data e a hora do PC no momento ''' data_e_hora_atuais = datetime.now() return data_e_hora_atuais.strftime("%d/%m/%Y %H:%M:%S").split() def recebe_msg(con): ''' :param con: conexao socket aberta no momento :return: mensagem recebida ''' m = '' msg = con.recv(1024) m = str(msg, 'cp437').split() while not (b'fim' in msg): msg = con.recv(1024) if not msg : return m = str(msg, 'cp437').split() #decodificando a msg con.close() return m def envia_pro_BD(tipo,registros): ''' :param tipo: tipo de registro - 1: Controle , 2: Sensores :param registros: os dados que vão ser inseridos no BD :return: ''' bd = BancoDeDados() if(tipo): aux = registros[5] ultimo = aux[1].split(':') #import pdb; pdb.set_trace() ultimo = timedelta(days = 0, hours = int(ultimo[0]), minutes = int(ultimo[1]),seconds=int(ultimo[2])) novo = registros[4].split(':') novo = timedelta(days = 0, hours = int(novo[0]), minutes = int(novo[1]),seconds=int(novo[2])) result = novo - ultimo if np.abs(result.total_seconds()) < 20: bd.alteraDados_Controle(registros[0], aux[0], aux[1], registros[4], registros[1]) else: bd.insereDados_Controle(registros[0],registros[3],registros[4],registros[1],registros[2]) else: for i in registros: if len(registros[i]) == 6: #condicao para mandar pro servidor med_temp = 0 med_umi = 0 sum_corrente = 0 #import pdb; pdb.set_trace() for j in registros[i]: if not (j[1] or j[2] or j[3]): return med_temp += float(j[1]) med_umi += float(j[3]) sum_corrente += float(j[2]) med_temp = round(med_temp /6) med_umi = round(med_umi /6) if bd.buscaNo(j[0]): bd.insereDados_Sensores(j[0], j[4], j[5], str(med_temp), str(sum_corrente), str(med_umi)) else: bd.insereNodes(j[0], tipo) bd.insereDados_Sensores(j[0], j[4], j[5], str(med_temp), str(sum_corrente), str(med_umi)) registros[i] = [] def controlador(msg): """ Vai ser responsavel por filtrar as mensagens que chegam """ msg.pop(-1) # remove a ultima palavra da string que é uma msg de controle t = msg.pop(0) # remove a primeira palavra da string que é o codigo do tipo de msg if t == '0': #Nós que estão coletando temperatura/corrente data, horario = dataHora() # horario msg.append(data) msg.append(horario) #print(msg) elif t == '1':#Nó controle do Ar data, horario = dataHora() # horario msg.append(data) msg.append(horario) return int(t),msg #-------------------------main--------------------------------- conectados = [] # lista de nodes que estão conectados HOST = '10.94.15.69' # Endereco IP do Servidor PORT = 9999 # Porta que o Servidor está sensores = {} controle = '' tempo_controle = dataHora() tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) orig = (HOST, PORT) tcp.bind(orig) tcp.listen(7) print("Servidor On") print(dataHora()) while True: try: try: con, cliente = tcp.accept() a = recebe_msg(con) except Exception as err: try: con.close() except Exception as err: print("Não conseguiu fechar a conexao") d, h = dataHora() e = " Error: {0} no dia".format(err) + d + " as " + h arq = open('log.txt', 'a+') arq.write(e) arq.close() continue if not a: continue t,msg = controlador(a) # t: tipo de msg msg: a mensagem print(msg[0]) if t == 1: msg.append(tempo_controle) controle = cliente envia_pro_BD(t, msg) tempo_controle = (msg[3] + " " + msg[4]).split() if cliente[0] in sensores and t == 0: # se ip já é conhecido sensores[cliente[0]].append(msg) #if len(sensores[cliente[0]]) == 6: #print("Sensor" , msg) envia_pro_BD(t, sensores) elif t == 0: sensores.update({cliente[0]: []}) sensores[cliente[0]].append(msg) envia_pro_BD(t, sensores) except Exception as err: d,h = dataHora() e = " Error: {0} no dia".format(err) + d + " as " +h +"\n" arq = open('log.txt', 'a+') arq.write(e) arq.close() continue
r""" .. _conditional: Conditional Independence Testing ******************************** Conditional independence testing is similar to independence testing but introduces the presence of a third conditioning variable. Consider random variables :math:`X`, :math:`Y`, and :math:`Z` with distributions :math:`F_X`, :math:`F_Y`, and :math:`F_Z`. When performing conditional independence testing, we are evaluating whether :math:`F_{X, Y|Z} = F_{X|Z}F_{Y|Z}`. Specifically, we are testing .. math:: H_0 &: X \perp \!\!\! \perp Y \mid Z \\ H_A &: X \not\!\perp\!\!\!\perp Y \mid Z Like all the other tests within hyppo, each method has a :func:`statistic` and :func:`test` method. The :func:`test` method is the one that returns the test statistic and p-values, among other outputs, and is the one that is used most often in the examples, tutorials, etc. Specifics about how the test statistics are calculated for each in :class:`hyppo.conditional` can be found the docstring of the respective test. Here, we overview subsets of the types of conditional tests we offer in hyppo, and special parameters unique to those tests. Now, let's look at unique properties of some of the tests in :mod:`hyppo.conditional`: """ ######################################################################################## # Fast Conditional Independence Test (FCIT) # --------------------------------------------- # # The **Fast Conditional Independence Test (FCIT)** is a non-parametric conditional # independence test. The test is based on a weak assumption that if the conditional # independence alternative hypothesis is true, then prediction of the independent # variable with only the conditioning variable should be just as accurate as # prediction of the independent variable using the dependent variable conditioned on # the conditioning variable. # More details can be found in :class:`hyppo.conditional.FCIT` # # .. note:: # # :Pros: - Very fast due on high-dimensional data due to parallel processes # :Cons: - Heuristic method; above assumption, though weak, is not always true # # The test uses a regression model to construct predictors for the indendent variable. # By default, the regressor used is the decision tree regressor but the user can also # specify other forms of regressors to use along with a set of hyperparameters to be # tuned using cross-validation. Below is an example where the null hypothesis is true: import numpy as np from hyppo.conditional import FCIT from sklearn.tree import DecisionTreeRegressor np.random.seed(1234) dim = 2 n = 100000 z1 = np.random.multivariate_normal(mean=np.zeros(dim), cov=np.eye(dim), size=(n)) A1 = np.random.normal(loc=0, scale=1, size=dim * dim).reshape(dim, dim) B1 = np.random.normal(loc=0, scale=1, size=dim * dim).reshape(dim, dim) x1 = (A1 @ z1.T + np.random.multivariate_normal(mean=np.zeros(dim), cov=np.eye(dim), size=(n)).T) y1 = (B1 @ z1.T + np.random.multivariate_normal(mean=np.zeros(dim), cov=np.eye(dim), size=(n)).T) model = DecisionTreeRegressor() cv_grid = {"min_samples_split": [2, 8, 64, 512, 1e-2, 0.2, 0.4]} stat, pvalue = FCIT(model=model, cv_grid=cv_grid).test(x1.T, y1.T, z1) print("Statistic: ", stat) print("p-value: ", pvalue) ######################################################################################## # Kernel Conditional Independence Test (KCI) # --------------------------------------------- # # The Kernel Conditional Independence Test (KCI) is a conditional independence test # that works based on calculating the RBF kernels of distinct samples of data. # The respective kernels are then normalized and multiplied together to determine # the test statistic via the trace of the matrix product. The test then employs # a gamma approximation based on the mean and variance of the independent # sample kernel values to determine the p-value of the test. # More details can be found in :class:`hyppo.conditional.kci # # .. note:: # # :Pros: - Very fast on high-dimensional data due to simplicity and approximation # :Cons: - Dispute in literature as to ideal theta value, loss of accuracy on very large datasets # # Below is a linear example where we fail to reject the null hypothesis: import numpy as np from hyppo.conditional import KCI from hyppo.tools import linear np.random.seed(123456789) x, y = linear(100, 1) stat, pvalue = KCI().test(x, y) print("Statistic: ", stat) print("p-value: ", pvalue)
from abc import ABCMeta, abstractmethod import numpy as np from . import dataset class ThreatModel(metaclass=ABCMeta): @abstractmethod def check(self, original, perturbed): ''' Returns whether the perturbed image is a valid perturbation of the original under the threat model. `original` and `perturbed` are numpy arrays of the same dtype and shape. ''' raise NotImplementedError @property @abstractmethod def targeted(self): ''' Returns whether the threat model only includes targeted attacks (requiring the attack to be capable of synthesizing targeted adversarial examples). ''' raise NotImplementedError class Or(ThreatModel): ''' A union of threat models. ''' def __init__(self, *threat_models): self._threat_models = threat_models def check(self, original, perturbed): return any(i.check(original, perturbed) for i in self._threat_models) @property def targeted(self): return all(i.targeted for i in self._threat_models) class And(ThreatModel): ''' An intersection of threat models. ''' def __init__(self, *threat_models): self._threat_models = threat_models def check(self, original, perturbed): return all(i.check(original, perturbed) for i in self._threat_models) @property def targeted(self): return any(i.targeted for i in self._threat_models) class Lp(ThreatModel): ''' Bounded L_p perturbation. Given a `p` and `epsilon`, x' is a valid perturbation of x if the following holds: || x - x' ||_p <= \epsilon ''' _SLOP = 0.0001 # to account for rounding errors def __init__(self, p, epsilon, targeted=False): self._p = p self._epsilon = epsilon self._targeted = targeted def check(self, original, perturbed): # we want to treat the inputs as big vectors original = np.ndarray.flatten(original) perturbed = np.ndarray.flatten(perturbed) # ensure it's a valid image if np.min(perturbed) < -self._SLOP or np.max(perturbed) > 1+self._SLOP: return False norm = np.linalg.norm(original - perturbed, ord=self._p) return norm <= self._epsilon + self._SLOP @property def targeted(self): return self._targeted @property def p(self): return self._p @property def epsilon(self): return self._epsilon class L0(Lp): def __init__(self, epsilon, targeted=False): super().__init__(p=0, epsilon=epsilon, targeted=targeted) class L1(Lp): def __init__(self, epsilon, targeted=False): super().__init__(p=1, epsilon=epsilon, targeted=targeted) class L2(Lp): def __init__(self, epsilon, targeted=False): super().__init__(p=2, epsilon=epsilon, targeted=targeted) class Linf(Lp): ''' Bounded L_inf perturbation. Given a `p` and `epsilon`, x' is a valid perturbation of x if the following holds: || x - x' ||_\infty <= \epsilon >>> model = Linf(0.1) >>> x = np.array([0.1, 0.2, 0.3]) >>> model.check(x, x) True >>> model.targeted False >>> model = Linf(0.1, targeted=True) >>> model.targeted True >>> y = np.array([0.1, 0.25, 0.32]) >>> model.check(x, y) True >>> z = np.array([0.3, 0.2, 0.3]) >>> model.check(x, z) False ''' def __init__(self, epsilon, targeted=False): super().__init__(p=np.inf, epsilon=epsilon, targeted=targeted)
#!/usr/bin/env python3 # Copyright 2020 Christian Henning, Maria Cervera # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Controller for simulations (:mod:`main`) ---------------------------------------- The module :mod:`main` is an executable script that controls the simulations (i.e., the training and testing of MNIST digit classification tasks). For more usage information, check out: .. code-block:: console $ python3 main.py --help .. autosummary:: main.train main.test main.run """ import argparse import numpy as np import random import torch print(torch.__version__) import lib.spiking_functions as sf from lib.snn import SNN from lib import utils def train(args, device, x, y, net): r"""Trains the given network on the MNIST dataset. The :mod:`main.train` method takes data (x, y) and a spiking neural net, puts the net in training mode, and sets up the optimiser. Then, for each epoch, it runs through the whole MNIST dataset once, updating the weights once every mini-batch, after the images in this mini-batch have been converted to spike trains. Note, the ``Function`` :func:`lib.spiking_functions.loss_on_spikes` is used to compute the loss. Args: args (argparse.Namespace): The command-line arguments. device (torch.device): The PyTorch device to be used. x (torch.Tensor): The training inputs. y (torch.Tensor): The training targets. net (lib.snn.SNN): The spiking neural network. """ print('Training network ...') net.train() # Puts the SNN in training mode optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9,0.99)) for e in range(args.epochs): for inputs, targets in utils.sparse_data_generator(x, y, args): inputs, targets = inputs.to(device), targets.to(device) voltage, spikes = net.forward(inputs.to_dense()) output_voltage = voltage[-1] output_spikes = spikes[-1] optimizer.zero_grad() loss = sf.loss_on_spikes(output_spikes, targets) # Compute spike regularization on hidden layers loss += args.spike_reg*sf.spike_regularizer(spikes[:-1]) loss.backward() optimizer.step() print('Epoch %i -- loss = %.3f.'%(e+1, loss)) print('Training network ... Done') def test(args, device, x, y, net): r"""Tests a trained network by computing the classification accuracy on the test set. Args: (....): See docstring of function :func:`train`. x (torch.Tensor): The testing inputs. y (torch.Tensor): The testing targets. Returns: (float): The classification accuracy for the test data (x, y) when using the network ``net``. Note, the ``Function`` :func:`lib.spiking_functions.accuracy_on_spikes` is used to compute the accuracy. """ net.eval() with torch.no_grad(): num_samples = 0 accu = 0. for inputs, targets in utils.sparse_data_generator(x, y, args): inputs, targets = inputs.to(device), targets.to(device) batch_size = int(inputs.shape[0]) num_samples += batch_size voltage, spikes = net.forward(inputs.to_dense()) output_voltage = voltage[-1] output_spikes = spikes[-1] batch_accu = sf.accuracy_on_spikes(output_spikes, targets) accu += batch_size * batch_accu accu /= num_samples print('Test accuracy: %.2f%%.'%(accu * 100)) return float(accu.cpu().detach().numpy()) def run(): r"""Runs the script. The :mod:`main.run` method performs the following actions: - Parses command-line arguments - Sets random seeds to ensure deterministic computation - Loads MNIST dataset - Initiates training process - Tests accuracy of final network - Plots weight histograms if required """ ### Parse CLI arguments. parser = argparse.ArgumentParser(description='MNIST classification with ' + 'spiking neural networks.') dgroup = parser.add_argument_group('Neuronal dynamics options') dgroup.add_argument('--tau_mem', type=float, default=10e-3, help='Membrane time constant (in s). Default: ' + '%(default)s.') dgroup.add_argument('--tau_syn', type=float, default=12e-3, help='Synaptic time constant (in s). Default: ' + '%(default)s.') dgroup.add_argument('--tau_rise', type=float, default=1e-3, help='Synaptic rise time constant (in s). Default: ' + '%(default)s.') dgroup.add_argument('--u_rest', type=float, default=0, help='Resting membrane potential (in volts). ' + 'Default: %(default)s.') dgroup.add_argument('--u_threshold', type=float, default=1, help='Threshold voltage for spike generation (in ' + 'volts). Default: %(default)s.') dgroup.add_argument('--R', type=float, default=1e4, help='Membrane resistance (in ohms). ' + 'Default: %(default)s.') sgroup = parser.add_argument_group('Spiking options') sgroup.add_argument('--spike_reg', type=float, default=1e-5, help='Strength of the spike rate regularization. ' + 'Default: %(default)s.') tgroup = parser.add_argument_group('Training options') tgroup.add_argument('--epochs', type=int, metavar='N', default=2, help='Number of training epochs. ' + 'Default: %(default)s.') tgroup.add_argument('--batch_size', type=int, metavar='N', default=256, help='Training batch size. Default: %(default)s.') tgroup.add_argument('--lr', type=float, default=1e-3, help='Learning rate of optimizer. Default: ' + '%(default)s.') ngroup = parser.add_argument_group('Network options') ngroup.add_argument('--num_hidden', type=int, default=1, help='Number of hidden layers in the network. ' + 'Default: %(default)s.') ngroup.add_argument('--size_hidden', type=int, default=100, help='Number of units in each hidden layer of the ' + 'network. Default: %(default)s.') ngroup.add_argument('--weight_scale', type=float, default=0.2, help='Scale for the initialization of the weights. ' + 'Default: %(default)s.') mgroup = parser.add_argument_group('Miscellaneous options') mgroup.add_argument('--delta_t', type=float, default=1e-3, help='Time step size (in s). Default: %(default)s.') mgroup.add_argument('--t_max', type=int, default=100, help='Number of time steps used for each sample. ' + 'Default: %(default)s.') mgroup.add_argument('--use_cuda', action='store_true', help='Flag to enable GPU usage.') mgroup.add_argument('--random_seed', type=int, metavar='N', default=42, help='Random seed. Default: %(default)s.') mgroup.add_argument('--plot_weight_hist', action='store_true', help='Whether histograms of the weights before and ' + 'after learning should be plotted.') args = parser.parse_args() ### Ensure deterministic computation. torch.manual_seed(args.random_seed) torch.cuda.manual_seed_all(args.random_seed) np.random.seed(args.random_seed) random.seed(args.random_seed) # Ensure that runs are reproducible even on GPU. Note, this slows down # training! # https://pytorch.org/docs/stable/notes/randomness.html torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False use_cuda = args.use_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") print('Using cuda: ' + str(use_cuda)) ### Generate datasets and data handlers. print('### Learning to classify MNIST digits with a spiking network ###') n_in = 784 n_out = 10 train_x, test_x, train_y, test_y = utils.load_MNIST() ### Generate network. n_hidden = [args.size_hidden] * args.num_hidden net = SNN(args, n_in=n_in, n_out=n_out, n_hidden=n_hidden).to(device) ### Store initial weights. initial_weights = [] for weights in net.parameters(): initial_weights.append(weights.clone().data.cpu().numpy()) ### Train network. train(args, device, train_x, train_y, net) ### Test network. accuracy = test(args, device, test_x, test_y, net) ### Plot weight histograms if asked to. if args.plot_weight_hist: utils.plot_weight_hist(net.parameters(), initial_weights) if __name__ == '__main__': run()
#!/usr/bin/python # # XMLMessageVacuumAddExpenditureWorld.py # # Created on: 7 March, 2011 # Author: black # # Methods for the class that keeps track of the information # specific to the commander. This is information that the # commander sends to the planner to let the planner know what # order went to a vacuum. # # This material is based on research sponsored by DARPA under agreement # number FA8750-10-2-0165. The U.S. Government is authorized to # reproduce and distribute reprints for Governmental purposes # notwithstanding any copyright notation thereon. # # The views and conclusions contained herein are those of the authors # and should not be interpreted as necessarily representing the official # policies or endorsements, either expressed or implied, of DARPA or the # U.S. Government. # # ========================================================================== # # For use by entities other than the U.S. Government the following # additional limitations apply: # # Copyright (c) 2011, Clarkson University # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of the Clarkson University nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # (license copied from http://www.opensource.org/licenses/bsd-license) # # # # from numpy import * from numpy.linalg import * from xml.dom.minidom import Document from XMLMessageVacuumIDPosBase import XMLMessageVacuumIDPosBase from XMLParser import XMLParser class XMLMessageVacuumAddExpenditureWorld (XMLMessageVacuumIDPosBase) : def __init__(self,expenditure=0.0) : XMLMessageVacuumIDPosBase.__init__(self) self.setMyInformationType(self.MESSAGE_VACUUM_WORLD_ADD_EXPENDITURE) self.setExpenditure(expenditure) def __del__(self) : pass def getExpenditure(self) : return(self.expenditure) def setExpenditure(self,value) : self.expenditure = int(value) def createObjectClass(self) : # Creates the node that contains the object class definition # and all of its children. node = self.doc.createElement("objects") self.root_node.appendChild(node) self.objectClassNode = self.doc.createElement("objectClass") node.appendChild(self.objectClassNode) nameNode = self.doc.createElement("name") nameNode.appendChild(self.doc.createTextNode("World")) self.objectClassNode.appendChild(nameNode) typeNode = self.doc.createElement("type") typeNode.appendChild(self.doc.createTextNode("Add Expenditure")) self.objectClassNode.appendChild(typeNode) self.createDimensions() def setExpenditureNode(self) : # Method to set the value of the id for this vacuum. It # indicates which vacumm this structure is associated # with. The value is then added to the xml tree under the # dimensions node. self.expenditureNode = self.doc.createElement("dimension") self.dimensionsNode.appendChild(self.expenditureNode) dimension = self.doc.createElement("name") node = self.doc.createTextNode("expenditure") dimension.appendChild(node) self.expenditureNode.appendChild(dimension) dimension = self.doc.createElement("value") node = self.doc.createTextNode(str(self.getExpenditure())) dimension.appendChild(node) self.expenditureNode.appendChild(dimension) def createDimensions(self): # Creates the dimensions node in the xml tree. It adds the # objectClass node as a child of the dimensions node. Finally # a "name" node is added as a child of the dimensions node. # This overrides the one in the base class because the # positions are not needed. self.dimensionsNode = self.doc.createElement("dimensions") self.objectClassNode.appendChild(self.dimensionsNode) self.setVacuumIDNode() self.setExpenditureNode() if (__name__ =='__main__') : from XMLIncomingDIF import XMLIncomingDIF network = XMLMessageMoveOrderCommanderVacuum() network.setVacuumID(3) network.setPos(2,4) network.createRootNode() #print(network.xml2Char()) network.setVacuumID(1) network.setXPos(5) network.setYPos(2) #print(network.xml2Char()) #root_node = network.root_node.cloneNode(True) #network.copyXMLTree(root_node) dif = XMLIncomingDIF() xmlString = network.xml2Char() info = dif.determineXMLInformation(xmlString) info.createRootNode() print("theXML:\n{0}".format(info.xml2Char()))
import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm import pandas as pd import seaborn as sns from sklearn.cluster import DBSCAN from sklearn.preprocessing import StandardScaler from sklearn.manifold import TSNE from sklearn.metrics import silhouette_score, calinski_harabaz_score from sklearn.metrics.pairwise import pairwise_distances # For reproducibility np.random.seed(1000) # Download the dataset from https://archive.ics.uci.edu/ml/machine-learning-databases/00445/ # unzip and set the path to .csv file data_path = '<YOUR_PATH>/Absenteeism_at_work.csv' n_perturbed = 20 n_data = [] if __name__ == '__main__': # Load the dataset df = pd.read_csv(data_path, sep=';', header=0, index_col=0).fillna(0.0) print(df.count()) # Preprocess the dataset cdf = pd.get_dummies(df, columns=['Reason for absence', 'Month of absence', 'Day of the week', 'Seasons', 'Disciplinary failure', 'Education', 'Social drinker', 'Social smoker']) cdf = cdf.drop(labels=['Reason for absence', 'Month of absence', 'Day of the week', 'Seasons', 'Disciplinary failure', 'Education', 'Social drinker', 'Social smoker']).astype(np.float64) # Standardize the dataset ss = StandardScaler(with_std=False) sdf = ss.fit_transform(cdf) # Perform the TSNE non-linear dimensionality reduction tsne = TSNE(n_components=2, perplexity=15, random_state=1000) data_tsne = tsne.fit_transform(sdf) df_tsne = pd.DataFrame(data_tsne, columns=['x', 'y'], index=cdf.index) dff = pd.concat([cdf, df_tsne], axis=1) # Show the dataset sns.set() fig, ax = plt.subplots(figsize=(18, 11)) with sns.plotting_context("notebook", font_scale=1.5): sns.scatterplot(x='x', y='y', size='Age', sizes=(30, 400), palette=sns.color_palette("husl", 2), data=dff, ax=ax) ax.set_xlabel(r'$x$', fontsize=14) ax.set_ylabel(r'$y$', fontsize=14) plt.show() # Perform the preliminary analysis n_clusters = [] n_noise_points = [] silhouette_scores = [] calinski_harabaz_scores = [] for p in [2, 4, 8, 12]: n_clusters_p = [] n_noise_points_p = [] silhouette_scores_p = [] calinski_harabaz_scores_p = [] for eps in np.arange(15, 30, 0.5): dst = DBSCAN(eps=eps, min_samples=3, metric='minkowski', p=p) Y_pred_t = dst.fit_predict(sdf) n_clusters_p.append(np.max(Y_pred_t) + 1) n_noise_points_p.append(np.sum(Y_pred_t == -1)) silhouette_scores_p.append(silhouette_score(dff, Y_pred_t, metric='minkowski', p=p)) calinski_harabaz_scores_p.append(calinski_harabaz_score(dff, Y_pred_t)) n_clusters.append(n_clusters_p) n_noise_points.append(n_noise_points_p) silhouette_scores.append(silhouette_scores_p) calinski_harabaz_scores.append(calinski_harabaz_scores_p) # Show the results of the preliminary analysis fig, ax = plt.subplots(4, 4, figsize=(18, 12), sharex=True) for idx, p in enumerate([2, 4, 8, 12]): x = np.arange(15, 30, 0.5) ax[idx, 0].plot(x, n_clusters[idx], label='p={}'.format(p)) ax[idx, 1].plot(x, n_noise_points[idx], label='p={}'.format(p)) ax[idx, 2].plot(x, silhouette_scores[idx], label='p={}'.format(p)) ax[idx, 3].plot(x, calinski_harabaz_scores[idx], label='p={}'.format(p)) ax[0, 0].set_title('Number of clusters', fontsize=14) ax[0, 1].set_title('Number of noise points', fontsize=14) ax[0, 2].set_title('Silhouette score', fontsize=14) ax[0, 3].set_title('Calinski-Harabasz score', fontsize=14) for i in range(4): ax[i, 0].set_yticks(np.arange(5, 60, 7)) ax[i, 0].legend() ax[3, i].set_xlabel(r'$\epsilon$') plt.show() # Perform the clustering ds = DBSCAN(eps=25, min_samples=3, metric='minkowski', p=12) Y_pred = ds.fit_predict(sdf) print('Number of clusters: {}'.format(np.max(Y_pred) + 1)) print('Number of noise points: {}'.format(np.sum(Y_pred == -1))) print('Silhouette score: {:.3f}'.format(silhouette_score(dff, Y_pred, metric='minkowski', p=12))) print('Calinski-Harabaz score: {:.3f}'.format(calinski_harabaz_score(dff, Y_pred))) # Show the clustering results fig, ax = plt.subplots(figsize=(18, 11)) for i in range(np.max(np.unique(Y_pred)) + 1): ax.scatter(data_tsne[Y_pred == i, 0], data_tsne[Y_pred == i, 1], s=100, c=cm.Paired(i), label='C{}'.format(i + 1)) ax.scatter(data_tsne[Y_pred == -1, 0], data_tsne[Y_pred == -1, 1], marker='x', c='black', s=150, label='Noisy') ax.set_xlabel(r'$x$') ax.set_ylabel(r'$y$') ax.legend() plt.show() # Describe the region x < -45 sdff = dff[(dff.x < -45.0)] print(sdff[sdff.columns[0:10]].describe()) # Describe the region x > 20 and -20 < y < 20 sdff = dff[(dff.x > 20.0) & (dff.y > -20.0) & (dff.y < 20.0)] print(sdff[sdff.columns[0:10]].describe()) # Perform the instability analysis data = sdf.copy() data_mean = np.mean(data, axis=0) data_cov = np.cov(data.T) / 4.0 for i in range(n_perturbed): gaussian_noise = np.random.multivariate_normal(data_mean, data_cov, size=(data.shape[0],)) noise = gaussian_noise * np.random.uniform(0.0, 1.0, size=(data.shape[0], data.shape[1])) n_data.append(data.copy() + noise) instabilities = [] for eps in np.arange(5.0, 31.0, 1.5): Yn = [] for nd in n_data: ds = DBSCAN(eps=eps, min_samples=3, metric='minkowski', p=12) Yn.append(ds.fit_predict(nd)) distances = [] for i in range(len(Yn) - 1): for j in range(i, len(Yn)): d = pairwise_distances(Yn[i].reshape(-1, 1), Yn[j].reshape(-1, 1), 'hamming') distances.append(d[0, 0]) instability = (2.0 * np.sum(distances)) / float(n_perturbed ** 2) instabilities.append(instability) # Show the results fig, ax = plt.subplots(figsize=(18, 8)) ax.plot(np.arange(5.0, 31.0, 1.5), instabilities) ax.set_xlabel(r'$\epsilon$', fontsize=14) ax.set_ylabel('Instability', fontsize=14) plt.show()
"""Lambdata is a collection of Data Science helper functions""" import pandas as pd import numpy as np print("lambdata has been successfully imported!")
#!/usr/bin/env python # coding: utf-8 # In[ ]: import os import sys import math sys.path.insert(0, '../libraries') import pprint import rospy from copy import deepcopy from baxter_interface import (RobotEnable, Gripper, CameraController, Limb) from baxter_core_msgs.srv import (SolvePositionIK, SolvePositionIKRequest) import cv2 import cv_bridge from cv_bridge import (CvBridge, CvBridgeError) from geometry_msgs.msg import (PoseStamped, Pose, Point, Quaternion) from sensor_msgs.msg import (Image, Range) from std_msgs.msg import Header import requests import cv2 import matplotlib.pyplot as plt import numpy as np from uogbaxter.detect import detect_toy pp = pprint.PrettyPrinter(indent=4) bridge = CvBridge() # In[ ]: rospy.init_node('Group7Baxter') # In[ ]: baxter = RobotEnable() baxter.enable() # In[ ]: right_hand_camera = CameraController('right_hand_camera') # In[ ]: right_hand_camera.resolution = (960, 600) right_hand_camera.gain = 0 right_hand_camera.open() # In[ ]: right_arm = Limb('right') # In[ ]: fingers = Gripper('right') fingers.calibrate() # ### Baxter Detect Class # In[ ]: class BaxterDetect(object): def __init__(self): self.distance = 0 sensor_topic = "/robot/range/right_hand_range/state" right_hand_image_topic = '/cameras/right_hand_camera/image' display_topic = '/robot/xdisplay' self.__right_sensor = rospy.Subscriber(sensor_topic, Range, callback=self.__sensorCallback, queue_size=1) right_hand_image_subscriber = rospy.Subscriber(right_hand_image_topic, Image, callback=self.__processImageCallback, queue_size=1) self.__display_publisher = rospy.Publisher(display_topic, Image, queue_size=1) def __sensorCallback(self,msg,side): self.distance = msg.range def __processImageCallback(self, message): # convert ROS iamge to OpenCV image cv2_image = bridge.imgmsg_to_cv2(message) # this is where we do our image processing. # We're just going to draw a red rectangle and the distance we previously got from our distance sensor # note, that in OpenCV, colors are represented as (blue, green, red), rather than RGB cv2.rectangle( cv2_image, pt1=(280,200), pt2=(680,400), color=(0,0,255), thickness=5 ) cv2.putText( cv2_image, text='%.2f' % self.distance, org=(400,500), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(255,255,0) ) # convert OpenCV image back to ROS image ros_image = bridge.cv2_to_imgmsg(cv2_image) # publish our modified image to the display self.__display_publisher.publish(ros_image) def __findToy(self, img): try: except (TimeoutError, NewConnectionError, MaxRetryError, ConnectionError): print("Detection not possible, the SOE detection server could not be reached....") except: print("No animals found") # In[ ]: baxter_detect = BaxterDetect() # In[ ]: right_namespace = "ExternalTools/right/PositionKinematicsNode/IKService" right_ik_service = rospy.ServiceProxy(right_namespace, SolvePositionIK) right_ik_request = SolvePositionIKRequest() ik_header = Header(stamp=rospy.Time.now(), frame_id='base') # In[ ]: def adjust_pose(target_pose): adj_p1 = deepcopy(target_pose) adj_p1.pose.position.x -= 0.15 adj_p1.pose.position.z += 0.02 return adj_p1 # In[ ]: zero_pose = PoseStamped( header=ik_header, pose=Pose( position=Point( x=0.5, y=-0.5, z=0.6, ), orientation=Quaternion( x=0.0, y=0.0, z=0.0, w=1.0, ) ) ) starting_pose = PoseStamped( header=ik_header, pose=Pose( position=Point( x=0.575814771825969, y=-0.6921240261798756, z=0.132303617877802 ), orientation=Quaternion( x=-0.035401679659970944, y=0.7351025065602724, z=-0.011401826130588908, w=0.6769350222044543 ) ) ) top_left = PoseStamped( header=ik_header, pose=Pose( position=Point( x=0.7157861729277576, y=-0.5944180233461482, z=0.14473098504048154 ), orientation=Quaternion( x=-0.035401679659970944, y=0.7351025065602724, z=-0.011401826130588908, w=0.6769350222044543 ) ) ) top_right = PoseStamped( header=ik_header, pose=Pose( position=Point( x=0.7282098926508587, y=-0.8043508833787663, z=0.1451601697469714 ), orientation=Quaternion( x=-0.035401679659970944, y=0.7351025065602724, z=-0.011401826130588908, w=0.6769350222044543 ) ) ) bottom_left = PoseStamped( header=ik_header, pose=Pose( position=Point( x=0.7134527406891848, y=-0.5891747770147832, z=-0.05405738045318162 ), orientation=Quaternion( x=-0.051997633390138645, y=0.8097438412456411, z=-0.025234618335036367, w=0.5839301085952527 ) ) ) bottom_right = PoseStamped( header=ik_header, pose=Pose( position=Point( x=0.7249046758051918, y=-0.7983831869923568, z=-0.042637299029533254 ), orientation=Quaternion( x=-0.051997633390138645, y=0.8097438412456411, z=-0.025234618335036367, w=0.5839301085952527 ) ) ) basket_pose = PoseStamped( header=ik_header, pose=Pose( position=Point( x=-0.18960557257166022, y=-0.8013329235692273, z=0.07650624118442936 ), orientation=Quaternion( x=0.40813023278127375, y=1.9362437364362493, z=-0.2597175943373065, w=0.47320359766165804 ), ), ) adj_pose = adjust_pose(top_left) kangaroo = [starting_pose, adj_pose, top_left, 'reached', adj_pose, starting_pose, basket_pose, 'end'] adj_pose = adjust_pose(top_right) hippo = [starting_pose, adj_pose, top_right, 'reached', adj_pose, starting_pose, basket_pose, 'end'] adj_pose = adjust_pose(bottom_left) deer = [starting_pose, adj_pose, bottom_left, 'reached', adj_pose, starting_pose, basket_pose, 'end'] adj_pose = adjust_pose(bottom_right) lion = [starting_pose, adj_pose, bottom_right, 'reached', adj_pose, starting_pose, basket_pose, 'end'] animals = {'kangaroo': kangaroo, 'hippo': hippo, 'deer': deer, 'lion': lion} # In[ ]: def find_solution(target_pose): right_ik_request.pose_stamp[:] = [] right_ik_request.pose_stamp.append(target_pose) print right_ik_request.pose_stamp try: rospy.wait_for_service(right_namespace, 5.0) right_ik_response = right_ik_service(right_ik_request) if (right_ik_response.isValid[0]): right_limb_joints = dict(zip(right_ik_response.joints[0].name, right_ik_response.joints[0].position)) pp.pprint(right_limb_joints) return right_limb_joints else: print("INVALID POSE - No Valid Joint Solution Found.") except (rospy.ServiceException, rospy.ROSException), e: rospy.logerr("Service call failed: %s" % (e,)) # In[ ]: def execute_trajectory(target_joints): right_arm.move_to_joint_positions(target_joints) # In[ ]: user_choices = [] choices = animals.keys() print("Please select two animals of your choice") for i in range(2): user_input = raw_input('Animal ' + str(i+1) + ': ').lower() if(user_input in choices): user_choices.append(user_input) print(str(user_input) + " successfully selected!") else: print("Invalid choice, please try again.") # In[ ]: success = [] for choice in user_choices: selected = animals[choice] for pose in selected: if(pose == 'reached'): fingers.close() print("Picking up the " + str(choice) + " now!") elif(pose == 'end'): fingers.open() print("The retrieval of the " + str(choice) + " has been completed!") else: solution = find_solution(pose) if(solution): execute_trajectory(solution) else: print("No valid pose was found, terminating retrieval of ....") print("Try homing the arm and try the retrieval again!") break; success.append(choice) # In[ ]: failed = [choice for choice in user_choices if choice not in success] print("============================\n RETRIEVAL SUMMARY\n============================") for animal in success: print("The retrieval of the " + animal + " was successful!") for animal in failed: print("The retrieval of the " + animal + " failed....") # In[ ]:
# from napari_segment_blobs_and_things_with_membranes import threshold, image_arithmetic # add your tests here... import numpy as np def test_something(): from napari_segment_blobs_and_things_with_membranes import gaussian_blur, \ subtract_background,\ threshold_otsu,\ threshold_yen,\ threshold_isodata,\ threshold_li,\ threshold_mean,\ threshold_minimum,\ threshold_triangle,\ binary_invert,\ split_touching_objects,\ connected_component_labeling,\ seeded_watershed,\ voronoi_otsu_labeling, \ gauss_otsu_labeling,\ gaussian_laplace,\ median_filter,\ maximum_filter,\ minimum_filter,\ percentile_filter,\ black_tophat,\ white_tophat,\ morphological_gradient,\ local_minima_seeded_watershed,\ thresholded_local_minima_seeded_watershed,\ sum_images,\ multiply_images,\ divide_images,\ invert_image, \ skeletonize import numpy as np image = np.asarray([[0, 1, 2, 3], [2, 0, 1, 3], [2, 253, 1, 3], [255, 253, 1, 3]]) for operation in [gaussian_blur, subtract_background, threshold_otsu, threshold_yen, threshold_isodata, threshold_li, threshold_mean, threshold_minimum, threshold_triangle, binary_invert, split_touching_objects, connected_component_labeling, voronoi_otsu_labeling, gauss_otsu_labeling, gaussian_laplace, median_filter, maximum_filter, minimum_filter, percentile_filter, black_tophat, white_tophat, morphological_gradient, local_minima_seeded_watershed, invert_image]: print(operation) operation(image) for operation in [ seeded_watershed, sum_images, multiply_images, divide_images]: print(operation) operation(image, image) skeletonize(image > 0) def test_remove_labels_on_edges_sequential_labeling(): image = np.asarray([ [1,2,3], [4,5,6], [7,7,7], ]) reference = np.asarray([ [0,0,0], [0,1,0], [0,0,0], ]) from napari_segment_blobs_and_things_with_membranes import remove_labels_on_edges result = remove_labels_on_edges(image) print(result) print(reference) assert np.array_equal(result, reference) def test_connected_component_labeling_sequential_labeling(): image = np.asarray([ [1, 0, 1, 0, 1], [0, 0, 0, 0, 0], [1, 0, 1, 0, 1], [0, 0, 0, 0, 0], [1, 0, 1, 0, 1], ]) reference = np.asarray([ [0,0,0,0,0], [0,0,0,0,0], [0,0,1,0,0], [0,0,0,0,0], [0,0,0,0,0], ]) from napari_segment_blobs_and_things_with_membranes import connected_component_labeling result = connected_component_labeling(image, exclude_on_edges=True) print(result) print(reference) assert np.array_equal(result, reference)
""" Welcome to CS375! This is the starter file for assignment 2 in which you will train unsupervised networks. Since you should be familiar with tfutils by now from assignment 1 the only thing that we provide is the config for the dataprovider and the dataproviders themselves as you will be also training and testing on CIFAR 10 in this assignment. You should be able to setup the rest of the code yourself. You can find the instructions in assignment2.pdf in this folder. Good luck with assigment 2! """ import os import numpy as np import tensorflow as tf from tfutils import base, data, model, optimizer, utils from dataprovider import CIFAR10DataProvider class CIFAR10Experiment(): """ Defines the CIFAR10 training experiment """ class Config(): """ Holds model hyperparams and data information. The config class is used to store various hyperparameters and dataset information parameters. """ batch_size = 256 data_path = '/datasets/cifar10/tfrecords' seed = 0 crop_size = 24 thres_loss = 1000000000000000 n_epochs = 60 train_steps = CIFAR10DataProvider.N_TRAIN / batch_size * n_epochs val_steps = np.ceil(CIFAR10DataProvider.N_VAL / batch_size).astype(int) def setup_params(self): """ This function illustrates how to setup up the parameters for train_from_params. """ params = {} """ train_params defines the training parameters consisting of - the data provider that reads the data, preprocesses it and enqueues it into the data queue - the data queue that batches and if specified shuffles the data and provides the input to the model - other configuration parameters like the number of training steps It's arguments are data_params: defines how the data is read in. queue_params: defines how the data is presented to the model, i.e. if it is shuffled or not and how big of a batch size is used. targets: the targets to be extracted and evaluated in the tensorflow session num_steps: number of training steps thres_loss: if the loss exceeds thres_loss the training will be stopped validate_first: run validation before starting the training """ params['train_params'] = { 'data_params': { # Cifar 10 data provider arguments 'func': CIFAR10DataProvider, 'data_path': self.Config.data_path, 'group': 'train', 'crop_size': self.Config.crop_size, # TFRecords (super class) data provider arguments 'file_pattern': 'train*.tfrecords', 'batch_size': self.Config.batch_size, 'shuffle': False, 'shuffle_seed': self.Config.seed, 'n_threads': 4, }, } """ validation_params similar to train_params defines the validation parameters. It has the same arguments as train_params and additionally agg_func: function that aggregates the validation results across batches, e.g. to calculate the mean of across batch losses online_agg_func: function that aggregates the validation results across batches in an online manner, e.g. to calculate the RUNNING mean across batch losses """ params['validation_params'] = { 'valid0': { 'data_params': { # Cifar 10 data provider arguments 'func': CIFAR10DataProvider, 'data_path': self.Config.data_path, 'group': 'val', 'crop_size': self.Config.crop_size, # TFRecords (super class) data provider arguments 'file_pattern': 'test*.tfrecords', 'batch_size': self.Config.batch_size, 'shuffle': False, 'shuffle_seed': self.Config.seed, 'n_threads': 4, }, } } """ model_params defines the model i.e. the architecture that takes the output of the data provider as input and outputs the prediction of the model. """ params['model_params'] = { } """ loss_params defines your training loss. """ params['loss_params'] = { } """ learning_rate_params defines the learning rate, decay and learning function. """ params['learning_rate_params'] = { } """ optimizer_params defines the optimizer. """ params['optimizer_params'] = { } """ save_params defines how, where and when your training results are saved in the database. """ params['save_params'] = { } """ load_params defines how and if a model should be restored from the database. """ params['load_params'] = { } return params if __name__ == '__main__': """ Illustrates how to run the configured model using tfutils """ base.get_params() m = CIFAR10Experiment() params = m.setup_params() base.train_from_params(**params)
import numpy as np import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score, accuracy_score import random def generate_random_results(size): return np.random.randint(2, size=size) def get_baseline_performance(root_folder, test_file, target, no_trails = 5): np.random.seed(123) # Read test file test_data = pd.read_csv('{0}/{1}'.format(root_folder, test_file)) test_Y = test_data[target].values.reshape(-1) test_size = len(test_Y) # Generate random results full_r_results = [] for i in range(no_trails): r_results = generate_random_results(test_size) full_r_results.append(r_results) full_r_results = np.array(full_r_results) pred = np.apply_along_axis(lambda col : np.argmax(np.bincount(col)), 0, full_r_results) accuracy = accuracy_score(test_Y, pred) auc = roc_auc_score(test_Y, pred) print('****Baseline random results****') print('AUC: {0}'.format(auc)) print('Accuracy: {0}\n'.format(accuracy)) return (accuracy,auc) def get_logistic_reg_performance(root_folder, train_file, test_file, predictors, target): train_data = pd.read_csv('{0}/{1}'.format(root_folder, train_file)) test_data = pd.read_csv('{0}/{1}'.format(root_folder, test_file)) train_X = train_data[predictors].values train_Y = train_data[target].values.reshape(-1) test_X = test_data[predictors].values test_Y = test_data[target].values.reshape(-1) logisticRegr = LogisticRegression() logisticRegr.fit(train_X, train_Y) pred = logisticRegr.predict(test_X) print('****Baseline Logistic regression results****') print('AUC: {0}'.format(roc_auc_score(test_Y, pred))) print('Accuracy: {0}\n'.format(accuracy_score(test_Y, pred))) if __name__ == '__main__': root_folder = '/home/vparambath/Desktop/iith/AML-Assignment' predictors = ["Temperature","Humidity","Light","CO2","HumidityRatio"] target_col = "Occupancy" train_file = 'train_data.txt' test_file = 'test_data.txt' get_logistic_reg_performance(root_folder, train_file, test_file, predictors, target_col) get_baseline_performance(root_folder, test_file, target_col)
#!/usr/bin/env python3 import math import numpy as np import argparse import sys import matplotlib.pyplot as plt g_apply_scaling = False g_apply_normalization = False from sklearn.ensemble import IsolationForest from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.decomposition import PCA from mpl_toolkits.mplot3d import Axes3D from sklearn.metrics import silhouette_score from metric_learn import SCML from metric_learn import Covariance import pickle def read_file(filename:str) -> np.ndarray: return np.genfromtxt(filename, dtype=float, delimiter=',') def restart_KMeans(filename:str, num_centroids:int, iterations:int, restarts:int): data = read_file(filename) best_error = None best_assignment = None """ Run for N restarts """ for i in range(restarts): error, assignments = iterate_knn(np.copy(data), num_centroids, iterations) if None == best_error or error < best_error: best_error = error best_assignment = assignments return best_error, best_assignment def get_centroids(data:np.ndarray, assignments:np.ndarray): centroids= [] centroid_nums = np.unique(assignments) for i in centroid_nums: points = data[assignments == i,:] centroid = np.mean(points, axis=0) centroids.append(centroid) return np.array(centroids) def calculate_error(data:np.ndarray, centroids:np.ndarray, assignments:np.ndarray)->float: """ Assignments are the indices of the centroids that are closest to each point It is an array 1000x1 We need to convert this to an array containing the actual centroids. If there are m features, then this would be a 1000xm array This is easily done by indexing """ closest_centroids = centroids[assignments, :] """ Subtract each axis of the closest centroid from each point """ square_distances = np.square(np.subtract(data, closest_centroids)) """ return the mean of the square of the distances We can do some optimization here Distance of each point = SQRT(SUM_OVER_i((xi - yi)^2)), i = number of features Mean distance = [(Distance of each point) ^ 2] / m We can get rid of the square root and just add all the individual differences, for all the axes for all the points, and return it """ return np.sum(square_distances) / data.shape[0] def plot_scatter(data:np.ndarray, assignments:np.ndarray): newdata = PCA(n_components=3).fit_transform(data) fig = plt.figure() #ax = fig.add_subplot(111) ax = fig.add_subplot(111, projection='3d') for i in np.unique(assignments): points = newdata[i == assignments] ax.scatter(points[:,0], points[:,1], points[:,2]) #ax.scatter(points[:,0], points[:,1]) with open("plot_scatter.pickle", "wb") as f: pickle.dump(fig, f, pickle.HIGHEST_PROTOCOL) g_best_error = 999999999 g_best_assignment = None def restart_and_elbow_plot_with_pca(filename:str, iterations:int, restarts:int, max_N:int, pca_value:int, fig): global g_best_error global g_best_assignment x = [] y = [] data = read_file(filename) scaler = StandardScaler().fit(data.copy()) scaled_data = scaler.transform(data.copy()) scaled_save = scaled_data.copy() cov = Covariance().fit_transform(scaled_data) print(cov, dir(cov), cov.shape) if 0 != pca_value: pca = PCA(n_components=pca_value) scaled_data = pca.fit_transform(scaled_data) for i in range(3, max_N+1): best_error = 99999999999 best_assignment = None for j in range(5): # restarts clf = KMeans(n_clusters=i, random_state=0).fit(scaled_data) assignments = clf.predict(scaled_data).copy() centroids = clf.cluster_centers_ scaled_centroids = get_centroids(scaled_save, assignments) error = calculate_error(scaled_save, scaled_centroids, assignments) if best_error > error: best_error = error best_assignment = assignments #print(f"For {i} clusters and pca={pca_value}, silhouette = {silhouette_score(scaled_data, assignments)}") x.append(i) y.append(best_error) print(i, best_error) if 4 == i and best_error < g_best_error: g_best_error = best_error g_best_assignment = best_assignment.copy() fig.plot(x, y, label=f'pca = {pca_value}') with open("pca_plot.pickle", "wb") as f: pickle.dump(fig, f, pickle.HIGHEST_PROTOCOL) if "__main__" == __name__: parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", help="File name", type=str, required=True) args = parser.parse_args() fig , ax = plt.subplots(1, 1) for i in range(6): restart_and_elbow_plot_with_pca(args.file, 200, 10, 10, i, ax) data = read_file(args.file) plot_scatter(data, g_best_assignment) fig.legend() plt.legend() plt.show()
#!/usr/bin/env python3 import numpy as np import sympy as sym from sympy.physics.quantum.cg import CG as sym_cg from sympy.physics.wigner import wigner_6j as sym_wigner_6j # transition and drive operators, exact def trans_op_exact(dim, L, M): if L >= dim or abs(M) > L: return np.zeros((dim,dim)) L, M = sym.S(L), sym.S(M) I = sym.S(dim-1)/2 mu_min = max(-M,0) mu_max = min(dim-M,dim) diag_vals = [ sym_cg(I, -I+mu, L, M, I, -I+mu+M).doit() for mu in range(mu_min,mu_max) ] return sym.sqrt(sym.S(2*L+1)/sym.S(2*I+1)) * np.diag(diag_vals, -M) def drive_op_exact(dim, L, M): if L >= dim or abs(M) > L: return np.zeros((dim,dim)) T_LM = trans_op_exact(dim, L, M) if M == 0: return T_LM eta_M = (-1)**M if M > 0 else sym.I return eta_M/sym.sqrt(2) * ( T_LM + np.sign(M) * T_LM.conj().T ) # transition and drive operators, numerical def trans_op(dim, L, M): return trans_op_exact(dim, L, M).astype(float) def drive_op(dim, L, M): dtype = float if M >= 0 else complex return drive_op_exact(dim, L, M).astype(dtype) # drive scale factor def drive_scale_exact(dim, L): L = sym.S(L) return sym.sqrt(2*L+1) * sym.factorial(L) / sym.factorial(2*L+1) * \ sym.sqrt(np.prod([ dim + l for l in range(-L,L+1) ])) def drive_scale(dim, L): return float(drive_scale_exact(dim, L)) # transition product expansion coefficient (structure factor) def trans_prod_coef_exact(dim, l1, m1, l2, m2, L, M): I = sym.S(dim-1)/2 return ( (-1)**(2*I+L) * sym.sqrt((2*l1+1)*(2*l2+1)) * sym_cg(l1, m1, l2, m2, L, M).doit() * sym_wigner_6j(l1, l2, L, I, I, I) ) def trans_prod_coef(dim, l1, m1, l2, m2, L, M): return float(trans_prod_coef_exact(dim, l1, m1, l2, m2, L, M))
import pytest from tempfile import NamedTemporaryFile from mlflow.store.artifact.artifact_repository_registry import get_artifact_repository from mlflow.store.artifact.sftp_artifact_repo import SFTPArtifactRepository from mlflow.utils.file_utils import TempDir import os import mlflow import posixpath pytestmark = pytest.mark.requires_ssh def test_artifact_uri_factory(tmp_path): assert isinstance(get_artifact_repository(f"sftp://{tmp_path}"), SFTPArtifactRepository) def test_list_artifacts_empty(tmp_path): repo = SFTPArtifactRepository(f"sftp://{tmp_path}") assert repo.list_artifacts() == [] @pytest.mark.parametrize("artifact_path", [None, "sub_dir", "very/nested/sub/dir"]) def test_list_artifacts(tmp_path, artifact_path): file_path = "file" dir_path = "model" tmp_path.joinpath(artifact_path or "").mkdir(parents=True, exist_ok=True) tmp_path.joinpath(artifact_path or "", file_path).write_text("test") tmp_path.joinpath(artifact_path or "", dir_path).mkdir() repo = SFTPArtifactRepository(f"sftp://{tmp_path}") artifacts = repo.list_artifacts(path=artifact_path) assert len(artifacts) == 2 assert artifacts[0].path == posixpath.join(artifact_path or "", file_path) assert artifacts[0].is_dir is False assert artifacts[0].file_size == 4 assert artifacts[1].path == posixpath.join(artifact_path or "", dir_path) assert artifacts[1].is_dir is True assert artifacts[1].file_size is None @pytest.mark.parametrize("artifact_path", [None, "sub_dir", "very/nested/sub/dir"]) def test_log_artifact(artifact_path): file_content = "A simple test artifact\nThe artifact is located in: " + str(artifact_path) with NamedTemporaryFile(mode="w") as local, TempDir() as remote: local.write(file_content) local.flush() sftp_path = "sftp://" + remote.path() store = SFTPArtifactRepository(sftp_path) store.log_artifact(local.name, artifact_path) remote_file = posixpath.join( remote.path(), "." if artifact_path is None else artifact_path, os.path.basename(local.name), ) assert posixpath.isfile(remote_file) with open(remote_file, "r") as remote_content: assert remote_content.read() == file_content @pytest.mark.parametrize("artifact_path", [None, "sub_dir", "very/nested/sub/dir"]) def test_log_artifacts(artifact_path): file_content_1 = "A simple test artifact\nThe artifact is located in: " + str(artifact_path) file_content_2 = os.urandom(300) file1 = "meta.yaml" directory = "saved_model" file2 = "sk_model.pickle" with TempDir() as local, TempDir() as remote: with open(os.path.join(local.path(), file1), "w") as f: f.write(file_content_1) os.mkdir(os.path.join(local.path(), directory)) with open(os.path.join(local.path(), directory, file2), "wb") as f: f.write(file_content_2) sftp_path = "sftp://" + remote.path() store = SFTPArtifactRepository(sftp_path) store.log_artifacts(local.path(), artifact_path) remote_dir = posixpath.join(remote.path(), "." if artifact_path is None else artifact_path) assert posixpath.isdir(remote_dir) assert posixpath.isdir(posixpath.join(remote_dir, directory)) assert posixpath.isfile(posixpath.join(remote_dir, file1)) assert posixpath.isfile(posixpath.join(remote_dir, directory, file2)) with open(posixpath.join(remote_dir, file1), "r") as remote_content: assert remote_content.read() == file_content_1 with open(posixpath.join(remote_dir, directory, file2), "rb") as remote_content: assert remote_content.read() == file_content_2 @pytest.mark.parametrize("artifact_path", [None, "sub_dir", "very/nested/sub/dir"]) def test_delete_artifact(artifact_path): file_content = f"A simple test artifact\nThe artifact is located in: {artifact_path}" with NamedTemporaryFile(mode="w") as local, TempDir() as remote: local.write(file_content) local.flush() sftp_path = f"sftp://{remote.path()}" store = SFTPArtifactRepository(sftp_path) store.log_artifact(local.name, artifact_path) remote_file = posixpath.join( remote.path(), "." if artifact_path is None else artifact_path, os.path.basename(local.name), ) assert posixpath.isfile(remote_file) with open(remote_file, "r") as remote_content: assert remote_content.read() == file_content store.delete_artifacts(remote.path()) assert not posixpath.exists(remote_file) assert not posixpath.exists(remote.path()) @pytest.mark.parametrize("artifact_path", [None, "sub_dir", "very/nested/sub/dir"]) def test_delete_artifacts(artifact_path): file_content_1 = f"A simple test artifact\nThe artifact is located in: {artifact_path}" file_content_2 = os.urandom(300) file1 = "meta.yaml" directory = "saved_model" file2 = "sk_model.pickle" with TempDir() as local, TempDir() as remote: with open(os.path.join(local.path(), file1), "w", encoding="utf8") as f: f.write(file_content_1) os.mkdir(os.path.join(local.path(), directory)) with open(os.path.join(local.path(), directory, file2), "wb") as f: f.write(file_content_2) sftp_path = f"sftp://{remote.path()}" store = SFTPArtifactRepository(sftp_path) store.log_artifacts(local.path(), artifact_path) remote_dir = posixpath.join(remote.path(), "." if artifact_path is None else artifact_path) assert posixpath.isdir(remote_dir) assert posixpath.isdir(posixpath.join(remote_dir, directory)) assert posixpath.isfile(posixpath.join(remote_dir, file1)) assert posixpath.isfile(posixpath.join(remote_dir, directory, file2)) with open(posixpath.join(remote_dir, file1), "r", encoding="utf8") as remote_content: assert remote_content.read() == file_content_1 with open(posixpath.join(remote_dir, directory, file2), "rb") as remote_content: assert remote_content.read() == file_content_2 store.delete_artifacts(remote.path()) assert not posixpath.exists(posixpath.join(remote_dir, directory)) assert not posixpath.exists(posixpath.join(remote_dir, file1)) assert not posixpath.exists(posixpath.join(remote_dir, directory, file2)) assert not posixpath.exists(remote_dir) assert not posixpath.exists(remote.path()) @pytest.mark.parametrize("artifact_path", [None, "sub_dir", "very/nested/sub/dir"]) def test_delete_selective_artifacts(artifact_path): file_content_1 = f"A simple test artifact\nThe artifact is located in: {artifact_path}" file_content_2 = os.urandom(300) file1 = "meta.yaml" directory = "saved_model" file2 = "sk_model.pickle" with TempDir() as local, TempDir() as remote: with open(os.path.join(local.path(), file1), "w", encoding="utf8") as f: f.write(file_content_1) os.mkdir(os.path.join(local.path(), directory)) with open(os.path.join(local.path(), directory, file2), "wb") as f: f.write(file_content_2) sftp_path = f"sftp://{remote.path()}" store = SFTPArtifactRepository(sftp_path) store.log_artifacts(local.path(), artifact_path) remote_dir = posixpath.join(remote.path(), "." if artifact_path is None else artifact_path) assert posixpath.isdir(remote_dir) assert posixpath.isdir(posixpath.join(remote_dir, directory)) assert posixpath.isfile(posixpath.join(remote_dir, file1)) assert posixpath.isfile(posixpath.join(remote_dir, directory, file2)) with open(posixpath.join(remote_dir, file1), "r", encoding="utf8") as remote_content: assert remote_content.read() == file_content_1 with open(posixpath.join(remote_dir, directory, file2), "rb") as remote_content: assert remote_content.read() == file_content_2 store.delete_artifacts(posixpath.join(remote_dir, file1)) assert posixpath.isdir(posixpath.join(remote_dir, directory)) assert not posixpath.exists(posixpath.join(remote_dir, file1)) assert posixpath.isfile(posixpath.join(remote_dir, directory, file2)) assert posixpath.isdir(remote_dir) def test_log_and_download_sklearn_model(tmp_path): from sklearn.linear_model import LogisticRegression from sklearn.datasets import load_iris from numpy.testing import assert_allclose X, y = load_iris(return_X_y=True) original = LogisticRegression().fit(X, y) experiment_id = mlflow.create_experiment( name="sklearn-model-experiment", artifact_location=f"sftp://{tmp_path}", ) with mlflow.start_run(experiment_id=experiment_id): model_uri = mlflow.sklearn.log_model(original, "model").model_uri downloaded = mlflow.sklearn.load_model(model_uri) assert_allclose(original.predict(X), downloaded.predict(X))
import numpy as np """ For conv2D methods: Weights shape must be in form of (o, i, k_h, k_w), where 'o' stands for number of outputs, 'i' number of inputs, 'k_h' is kernel height and 'k_w' is kernel width fMaps stands for Feature Maps, or input images, its shape must be in form of (i, h, w), where 'i' is the number of inputs, 'h' is image height and 'w' is image width For dense method: Weights shape must be in form of (o, i), where 'o' stands for number of outputs and 'i' number of inputs fMaps stands for Feature Maps, or input images, its a flattened array """ # Convolves feature maps and weights def conv2D(fMaps, weights, bias, padding='same'): kernels_per_fmap = weights.shape[1] image_initial_addr = [0,0] if padding == 'same': ### PADDING == 'SAME' if len(fMaps.shape) > 2: fMap_height = fMaps.shape[1] fMap_width = fMaps.shape[2] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[1], fMaps.shape[2])) else: fMap_height = fMaps.shape[0] fMap_width = fMaps.shape[1] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[0], fMaps.shape[1])) for j, w in enumerate(weights): # Loops over FMaps weights convolved_rows = -1 while(convolved_rows < (fMap_height-1)): convolved_cols = -1 while (convolved_cols < (fMap_width-1)): convolved = np.zeros((3,3)) #Convolved Matrix for i, kernel in enumerate(w): # Loops over weights # Convolve image and kernel for col in range(0, 3): for row in range(0, 3): col_addr = convolved_cols + col row_addr = convolved_rows + row # Verifies if convolution is occurring at image borders if col_addr < image_initial_addr[1] or col_addr == fMap_width or row_addr < image_initial_addr[0] or row_addr == fMap_height: convolved[row][col] += 0 else: convolved[row][col] += (fMaps[i][row_addr,col_addr] * kernel[row][col]) summ = np.asarray(convolved).sum() + bias[j] convolved_fMap[j][convolved_rows+1, convolved_cols+1] = summ convolved_cols += 1 convolved_rows += 1 # Counts how many lines have been convolved else: ### PADDING == 'VALID' if len(fMaps.shape) > 2: fMap_height = fMaps.shape[1] fMap_width = fMaps.shape[2] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[1]-2, fMaps.shape[2]-2)) else: fMap_height = fMaps.shape[0] fMap_width = fMaps.shape[1] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[0]-2, fMaps.shape[1]-2)) for j, w in enumerate(weights): # Loops over FMaps weights im_row_addr = image_initial_addr[0] while(im_row_addr < (fMap_height - 2)): im_col_addr = image_initial_addr[1] while (im_col_addr < (fMap_width-2)): convolved = np.zeros((3,3)) #Convolved Matrix #Creates a 3x3 kernel matrix for i, kernel in enumerate(w): # Loops over weights for k in range(0, 3): convolved[0][k] += (fMaps[i][im_row_addr,k+im_col_addr] * kernel[0][k]) convolved[1][k] += (fMaps[i][1+im_row_addr,k+im_col_addr] * kernel[1][k]) convolved[2][k] += (fMaps[i][2+im_row_addr,k+im_col_addr] * kernel[2][k]) summ = np.asarray(convolved).sum() + bias[j] convolved_fMap[j][im_row_addr, im_col_addr] += summ im_col_addr += 1 im_row_addr += 1 # Counts how many lines have been denoised return convolved_fMap ### DENSE def dense(fMap, weights, bias): out = np.zeros((weights.shape[0],)) ## Output vector for j, w in enumerate(weights): summ = 0 for i, k in enumerate(w): summ += k*fMap[i] summ = summ.sum() + bias[j] out[j] = summ return out def softmax(x): ### Compute softmax values for each sets of scores in x. e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) # only difference def LeakyReLU(x, alpha): ### Leaky Rectified Linear Unit activation ## If 'alpha' is equal to zero, then it becomes a standard ReLU return np.maximun(x, alpha*x)
# -*- coding: ascii -*- """ Evolves the sun and earth where the sun will lose mass every 220th step. """ from __future__ import print_function import numpy from amuse.community.hermite.interface import Hermite # from amuse.community.sse.interface import SSE from amuse import datamodel from amuse.units import units from amuse.units import nbody_system from amuse.units.quantities import VectorQuantity from amuse.plot import ( plot, native_plot) def simulate_massloss(time): return units.MSun( 0.5 * (1.0 + 1.0 / (1.0 + numpy.exp((time.value_in(time.unit) - 70.0) / 15.))) ) if __name__ == "__main__": convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 1.0 | units.AU) particles = datamodel.Particles(2) sun = particles[0] sun.mass = 1.0 | units.MSun sun.position = [0.0, 0.0, 0.0] | units.AU sun.velocity = [0.0, 0.0, 0.0] | units.AU / units.yr sun.radius = 1.0 | units.RSun earth = particles[1] earth.mass = 5.9736e24 | units.kg earth.radius = 6371.0 | units.km earth.position = [0.0, 1.0, 0.0] | units.AU earth.velocity = [2.0 * numpy.pi, -0.0001, 0.0] | units.AU / units.yr instance = Hermite(convert_nbody) instance.particles.add_particles(particles) channelp = instance.particles.new_channel_to(particles) start = 0 | units.yr end = 150 | units.yr step = 10 | units.day timerange = VectorQuantity.arange(start, end, step) masses = [] | units.MSun for i, time in enumerate(timerange): instance.evolve_model(time) channelp.copy() particles.savepoint(time) if (i % 220 == 0): instance.particles[0].mass = simulate_massloss(time) masses.append(instance.particles[0].mass) instance.stop() particle = particles[1] t, pos = particle.get_timeline_of_attribute_as_vector("position") distances = pos.lengths().as_quantity_in(units.AU) plot(timerange, distances, timerange, masses) native_plot.show()
# coding=utf-8 python3.6 # ================================================================ # Copyright (C) 2019 * Ltd. All rights reserved. # license='MIT License' # Author : haibingshuai  # Created date: 2019/10/29 18:05 # Description : # ================================================================ import cv2 import numpy as np import core.utils as utils import tensorflow as tf from PIL import Image import os return_elements = ["input/input_data:0", "pred_smt_box/concat_2:0", "pred_mid_box/concat_2:0", "pred_big_box/concat_2:0"] pb_file = "./susong_header_5k_model_1.pb" image_path_ = "./data_test/in" image_out_path_ = "./data_test/out" num_classes = 1 input_size = 608 # 416 graph = tf.Graph() pic_list_path = [[os.path.join(image_path_, one), os.path.join(image_out_path_, one)] for one in os.listdir(image_path_)] with tf.Session(graph=graph) as sess: for image_path, image_out_path in pic_list_path: original_image = cv2.imread(image_path) original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB) original_image_size = original_image.shape[:2] image_data = utils.image_pretreat_process(np.copy(original_image), input_size) image_data = image_data[np.newaxis, ...] return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements) pred_sbbox, pred_mbbox, pred_lbbox = sess.run( [return_tensors[1], return_tensors[2], return_tensors[3]], feed_dict={return_tensors[0]: image_data}) pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)), np.reshape(pred_mbbox, (-1, 5 + num_classes)), np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0) bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.2) bboxes = utils.nms(bboxes, 0.5, method='nms') print(len(bboxes)) image = utils.draw_bbox(original_image, bboxes) image = Image.fromarray(image) image.show() image = np.array(image) image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR) cv2.imwrite(image_out_path, image)
import numpy as np import os import pandas as pd import pytest import tiledbvcf # Directory containing this file CONTAINING_DIR = os.path.abspath(os.path.dirname(__file__)) # Test inputs directory TESTS_INPUT_DIR = os.path.abspath( os.path.join(CONTAINING_DIR, "../../../libtiledbvcf/test/inputs") ) def _check_dfs(expected, actual): def assert_series(s1, s2): if type(s2.iloc[0]) == np.ndarray: assert len(s1) == len(s2) for i in range(0, len(s1)): assert np.array_equal(s1.iloc[i], s2.iloc[i]) else: assert s1.equals(s2) for k in expected: assert_series(expected[k], actual[k]) for k in actual: assert_series(expected[k], actual[k]) @pytest.fixture def test_ds(): return tiledbvcf.Dataset( os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples") ) @pytest.fixture def test_ds_attrs(): return tiledbvcf.Dataset( os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples_GT_DP_PL") ) def test_basic_count(test_ds): assert test_ds.count() == 14 def test_read_must_specify_attrs(test_ds): with pytest.raises(Exception): df = test_ds.read() def test_retrieve_attributes(test_ds): builtin_attrs = [ "sample_name", "contig", "pos_start", "pos_end", "alleles", "id", "fmt", "info", "filters", "qual", "query_bed_end", "query_bed_start", "query_bed_line", ] assert sorted(test_ds.attributes(attr_type="builtin")) == sorted(builtin_attrs) info_attrs = [ "info_BaseQRankSum", "info_ClippingRankSum", "info_DP", "info_DS", "info_END", "info_HaplotypeScore", "info_InbreedingCoeff", "info_MLEAC", "info_MLEAF", "info_MQ", "info_MQ0", "info_MQRankSum", "info_ReadPosRankSum", ] assert test_ds.attributes(attr_type="info") == info_attrs fmt_attrs = [ "fmt_AD", "fmt_DP", "fmt_GQ", "fmt_GT", "fmt_MIN_DP", "fmt_PL", "fmt_SB", ] assert test_ds.attributes(attr_type="fmt") == fmt_attrs def test_retrieve_samples(test_ds): assert test_ds.samples() == ["HG00280", "HG01762"] def test_read_attrs(test_ds_attrs): attrs = ["sample_name"] df = test_ds_attrs.read(attrs=attrs) assert df.columns.values.tolist() == attrs attrs = ["sample_name", "fmt_GT"] df = test_ds_attrs.read(attrs=attrs) assert df.columns.values.tolist() == attrs attrs = ["sample_name"] df = test_ds_attrs.read(attrs=attrs) assert df.columns.values.tolist() == attrs def test_basic_reads(test_ds): expected_df = pd.DataFrame( { "sample_name": pd.Series( [ "HG00280", "HG01762", "HG00280", "HG01762", "HG00280", "HG01762", "HG00280", "HG00280", "HG00280", "HG00280", "HG00280", "HG00280", "HG00280", "HG00280", ] ), "pos_start": pd.Series( [ 12141, 12141, 12546, 12546, 13354, 13354, 13375, 13396, 13414, 13452, 13520, 13545, 17319, 17480, ], dtype=np.int32, ), "pos_end": pd.Series( [ 12277, 12277, 12771, 12771, 13374, 13389, 13395, 13413, 13451, 13519, 13544, 13689, 17479, 17486, ], dtype=np.int32, ), } ).sort_values(ignore_index=True, by=["sample_name", "pos_start"]) for use_arrow in [False, True]: func = test_ds.read_arrow if use_arrow else test_ds.read df = func(attrs=["sample_name", "pos_start", "pos_end"]) if use_arrow: df = df.to_pandas() _check_dfs( expected_df, df.sort_values(ignore_index=True, by=["sample_name", "pos_start"]), ) # Region intersection df = test_ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12700-13400"] ) expected_df = pd.DataFrame( { "sample_name": pd.Series( ["HG00280", "HG01762", "HG00280", "HG01762", "HG00280", "HG00280"] ), "pos_start": pd.Series( [12546, 12546, 13354, 13354, 13375, 13396], dtype=np.int32 ), "pos_end": pd.Series( [12771, 12771, 13374, 13389, 13395, 13413], dtype=np.int32 ), } ).sort_values(ignore_index=True, by=["sample_name", "pos_start"]) _check_dfs( expected_df, df.sort_values(ignore_index=True, by=["sample_name", "pos_start"]) ) # Region and sample intersection df = test_ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12700-13400"], samples=["HG01762"], ) expected_df = pd.DataFrame( { "sample_name": pd.Series(["HG01762", "HG01762"]), "pos_start": pd.Series([12546, 13354], dtype=np.int32), "pos_end": pd.Series([12771, 13389], dtype=np.int32), } ).sort_values(ignore_index=True, by=["sample_name", "pos_start"]) _check_dfs( expected_df, df.sort_values(ignore_index=True, by=["sample_name", "pos_start"]) ) # Sample only df = test_ds.read( attrs=["sample_name", "pos_start", "pos_end"], samples=["HG01762"] ) expected_df = pd.DataFrame( { "sample_name": pd.Series(["HG01762", "HG01762", "HG01762"]), "pos_start": pd.Series([12141, 12546, 13354], dtype=np.int32), "pos_end": pd.Series([12277, 12771, 13389], dtype=np.int32), } ).sort_values(ignore_index=True, by=["sample_name", "pos_start"]) _check_dfs( expected_df, df.sort_values(ignore_index=True, by=["sample_name", "pos_start"]) ) def test_multiple_counts(test_ds): assert test_ds.count() == 14 assert test_ds.count() == 14 assert test_ds.count(regions=["1:12700-13400"]) == 6 assert test_ds.count(samples=["HG00280"], regions=["1:12700-13400"]) == 4 assert test_ds.count() == 14 assert test_ds.count(samples=["HG01762"]) == 3 assert test_ds.count(samples=["HG00280"]) == 11 def test_empty_region(test_ds): assert test_ds.count(regions=["12:1-1000000"]) == 0 def test_missing_sample_raises_exception(test_ds): with pytest.raises(RuntimeError): test_ds.count(samples=["abcde"]) # TODO remove skip @pytest.mark.skip def test_bad_contig_raises_exception(test_ds): with pytest.raises(RuntimeError): test_ds.count(regions=["chr1:1-1000000"]) with pytest.raises(RuntimeError): test_ds.count(regions=["1"]) with pytest.raises(RuntimeError): test_ds.count(regions=["1:100-"]) with pytest.raises(RuntimeError): test_ds.count(regions=["1:-100"]) def test_bad_attr_raises_exception(test_ds): with pytest.raises(RuntimeError): test_ds.read(attrs=["abcde"], regions=["1:12700-13400"]) def test_read_write_mode_exceptions(): ds = tiledbvcf.Dataset(os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples")) samples = [os.path.join(TESTS_INPUT_DIR, s) for s in ["small.bcf", "small2.bcf"]] with pytest.raises(Exception): ds.create_dataset() with pytest.raises(Exception): ds.ingest_samples(samples) ds = tiledbvcf.Dataset( os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples"), mode="w" ) with pytest.raises(Exception): ds.count() def test_incomplete_reads(): # Using undocumented "0 MB" budget to test incomplete reads. uri = os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples") cfg = tiledbvcf.ReadConfig(memory_budget_mb=0) test_ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = test_ds.read(attrs=["pos_end"], regions=["1:12700-13400"]) assert not test_ds.read_completed() assert len(df) == 2 _check_dfs( pd.DataFrame.from_dict({"pos_end": np.array([12771, 12771], dtype=np.int32)}), df, ) df = test_ds.continue_read() assert not test_ds.read_completed() assert len(df) == 2 _check_dfs( pd.DataFrame.from_dict({"pos_end": np.array([13374, 13389], dtype=np.int32)}), df, ) df = test_ds.continue_read() assert test_ds.read_completed() assert len(df) == 2 _check_dfs( pd.DataFrame.from_dict({"pos_end": np.array([13395, 13413], dtype=np.int32)}), df, ) # test incomplete via read_arrow table = test_ds.read_arrow(attrs=["pos_end"], regions=["1:12700-13400"]) assert not test_ds.read_completed() assert len(table) == 2 _check_dfs( pd.DataFrame.from_dict({"pos_end": np.array([12771, 12771], dtype=np.int32)}), table.to_pandas(), ) table = test_ds.continue_read_arrow() assert not test_ds.read_completed() assert len(table) == 2 _check_dfs( pd.DataFrame.from_dict({"pos_end": np.array([13374, 13389], dtype=np.int32)}), table.to_pandas(), ) table = test_ds.continue_read_arrow() assert test_ds.read_completed() assert len(table) == 2 _check_dfs( pd.DataFrame.from_dict({"pos_end": np.array([13395, 13413], dtype=np.int32)}), table.to_pandas(), ) def test_incomplete_read_generator(): # Using undocumented "0 MB" budget to test incomplete reads. uri = os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples") cfg = tiledbvcf.ReadConfig(memory_budget_mb=0) test_ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) overall_df = None for df in test_ds.read_iter(attrs=["pos_end"], regions=["1:12700-13400"]): if overall_df is None: overall_df = df else: overall_df = overall_df.append(df, ignore_index=True) assert len(overall_df) == 6 _check_dfs( pd.DataFrame.from_dict( { "pos_end": np.array( [12771, 12771, 13374, 13389, 13395, 13413], dtype=np.int32 ) } ), overall_df, ) def test_read_filters(test_ds): df = test_ds.read( attrs=["sample_name", "pos_start", "pos_end", "filters"], regions=["1:12700-13400"], ) expected_df = pd.DataFrame( { "sample_name": pd.Series( ["HG00280", "HG01762", "HG00280", "HG01762", "HG00280", "HG00280"] ), "pos_start": pd.Series( [12546, 12546, 13354, 13354, 13375, 13396], dtype=np.int32 ), "pos_end": pd.Series( [12771, 12771, 13374, 13389, 13395, 13413], dtype=np.int32 ), "filters": pd.Series( map( lambda lst: np.array(lst, dtype=np.object), [None, None, ["LowQual"], None, None, None], ) ), } ).sort_values(ignore_index=True, by=["sample_name", "pos_start"]) _check_dfs( expected_df, df.sort_values(ignore_index=True, by=["sample_name", "pos_start"]) ) def test_read_var_length_filters(tmp_path): uri = os.path.join(tmp_path, "dataset") ds = tiledbvcf.Dataset(uri, mode="w") samples = [os.path.join(TESTS_INPUT_DIR, s) for s in ["varLenFilter.vcf.gz"]] ds.create_dataset() ds.ingest_samples(samples) ds = tiledbvcf.Dataset(uri, mode="r") df = ds.read(["pos_start", "filters"]) expected_df = pd.DataFrame( { "pos_start": pd.Series( [ 12141, 12546, 13354, 13375, 13396, 13414, 13452, 13520, 13545, 17319, 17480, ], dtype=np.int32, ), "filters": pd.Series( map( lambda lst: np.array(lst, dtype=np.object), [ ["PASS"], ["PASS"], ["ANEUPLOID", "LowQual"], ["PASS"], ["PASS"], ["ANEUPLOID", "LOWQ", "LowQual"], ["PASS"], ["PASS"], ["PASS"], ["LowQual"], ["PASS"], ], ) ), } ).sort_values(ignore_index=True, by=["pos_start"]) _check_dfs(expected_df, df.sort_values(ignore_index=True, by=["pos_start"])) def test_read_alleles(test_ds): df = test_ds.read( attrs=["sample_name", "pos_start", "pos_end", "alleles"], regions=["1:12100-13360", "1:13500-17350"], ) expected_df = pd.DataFrame( { "sample_name": pd.Series( [ "HG00280", "HG01762", "HG00280", "HG01762", "HG00280", "HG01762", "HG00280", "HG00280", "HG00280", "HG00280", ] ), "pos_start": pd.Series( [12141, 12141, 12546, 12546, 13354, 13354, 13452, 13520, 13545, 17319], dtype=np.int32, ), "pos_end": pd.Series( [12277, 12277, 12771, 12771, 13374, 13389, 13519, 13544, 13689, 17479], dtype=np.int32, ), "alleles": pd.Series( map( lambda lst: np.array(lst, dtype=np.object), [ ["C", "<NON_REF>"], ["C", "<NON_REF>"], ["G", "<NON_REF>"], ["G", "<NON_REF>"], ["T", "<NON_REF>"], ["T", "<NON_REF>"], ["G", "<NON_REF>"], ["G", "<NON_REF>"], ["G", "<NON_REF>"], ["T", "<NON_REF>"], ], ) ), } ).sort_values(ignore_index=True, by=["sample_name", "pos_start"]) _check_dfs( expected_df, df.sort_values(ignore_index=True, by=["sample_name", "pos_start"]) ) def test_read_multiple_alleles(tmp_path): uri = os.path.join(tmp_path, "dataset") ds = tiledbvcf.Dataset(uri, mode="w") samples = [os.path.join(TESTS_INPUT_DIR, s) for s in ["small3.bcf", "small.bcf"]] ds.create_dataset() ds.ingest_samples(samples) ds = tiledbvcf.Dataset(uri, mode="r") df = ds.read( attrs=["sample_name", "pos_start", "alleles", "id", "filters"], regions=["1:70100-1300000"], ) expected_df = pd.DataFrame( { "sample_name": pd.Series(["HG00280", "HG00280"]), "pos_start": pd.Series([866511, 1289367], dtype=np.int32), "alleles": pd.Series( map( lambda lst: np.array(lst, dtype=np.object), [["T", "CCCCTCCCT", "C", "CCCCTCCCTCCCT", "CCCCT"], ["CTG", "C"]], ) ), "id": pd.Series([".", "rs1497816"]), "filters": pd.Series( map( lambda lst: np.array(lst, dtype=np.object), [["LowQual"], ["LowQual"]], ) ), } ).sort_values(ignore_index=True, by=["sample_name", "pos_start"]) _check_dfs( expected_df, df.sort_values(ignore_index=True, by=["sample_name", "pos_start"]) ) def test_read_var_len_attrs(test_ds): df = test_ds.read( attrs=["sample_name", "pos_start", "pos_end", "fmt_DP", "fmt_PL"], regions=["1:12100-13360", "1:13500-17350"], ) expected_df = pd.DataFrame( { "sample_name": pd.Series( [ "HG00280", "HG01762", "HG00280", "HG01762", "HG00280", "HG01762", "HG00280", "HG00280", "HG00280", "HG00280", ] ), "pos_start": pd.Series( [12141, 12141, 12546, 12546, 13354, 13354, 13452, 13520, 13545, 17319], dtype=np.int32, ), "pos_end": pd.Series( [12277, 12277, 12771, 12771, 13374, 13389, 13519, 13544, 13689, 17479], dtype=np.int32, ), "fmt_DP": pd.Series([0, 0, 0, 0, 15, 64, 10, 6, 0, 0], dtype=np.int32), "fmt_PL": pd.Series( map( lambda lst: np.array(lst, dtype=np.int32), [ [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 24, 360], [0, 66, 990], [0, 21, 210], [0, 6, 90], [0, 0, 0], [0, 0, 0], ], ) ), } ).sort_values(ignore_index=True, by=["sample_name", "pos_start"]) _check_dfs( expected_df, df.sort_values(ignore_index=True, by=["sample_name", "pos_start"]) ) def test_sample_args(test_ds, tmp_path): sample_file = os.path.join(tmp_path, "1_sample.txt") with open(sample_file, "w") as file: file.write("HG00280") region = ["1:12141-12141"] df1 = test_ds.read(["sample_name"], regions=region, samples=["HG00280"]) df2 = test_ds.read(["sample_name"], regions=region, samples_file=sample_file) _check_dfs(df1, df2) with pytest.raises(TypeError): test_ds.read( attrs=["sample_name"], regions=region, samples=["HG00280"], samples_file=sample_file, ) def test_read_null_attrs(tmp_path): uri = os.path.join(tmp_path, "dataset") ds = tiledbvcf.Dataset(uri, mode="w") samples = [os.path.join(TESTS_INPUT_DIR, s) for s in ["small3.bcf", "small.bcf"]] ds.create_dataset() ds.ingest_samples(samples) ds = tiledbvcf.Dataset(uri, mode="r") df = ds.read( attrs=[ "sample_name", "pos_start", "pos_end", "info_BaseQRankSum", "info_DP", "fmt_DP", "fmt_MIN_DP", ], regions=["1:12700-13400", "1:69500-69800"], ) expected_df = pd.DataFrame( { "sample_name": pd.Series( [ "HG00280", "HG00280", "HG00280", "HG00280", "HG01762", "HG01762", "HG00280", "HG00280", "HG00280", "HG00280", "HG00280", "HG00280", ] ), "pos_start": pd.Series( [ 12546, 13354, 13375, 13396, 12546, 13354, 69371, 69511, 69512, 69761, 69762, 69771, ], dtype=np.int32, ), "pos_end": pd.Series( [ 12771, 13374, 13395, 13413, 12771, 13389, 69510, 69511, 69760, 69761, 69770, 69834, ], dtype=np.int32, ), "info_BaseQRankSum": pd.Series( [ None, None, None, None, None, None, None, np.array([-0.787], dtype=np.float32), None, np.array([1.97], dtype=np.float32), None, None, ] ), "info_DP": pd.Series( [ None, None, None, None, None, None, None, np.array([89], dtype=np.int32), None, np.array([24], dtype=np.int32), None, None, ] ), "fmt_DP": pd.Series( [0, 15, 6, 2, 0, 64, 180, 88, 97, 24, 23, 21], dtype=np.int32 ), "fmt_MIN_DP": pd.Series([0, 14, 3, 1, 0, 30, 20, None, 24, None, 23, 19]), } ).sort_values(ignore_index=True, by=["sample_name", "pos_start"]) _check_dfs( expected_df, df.sort_values(ignore_index=True, by=["sample_name", "pos_start"]) ) def test_read_config(): uri = os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples") cfg = tiledbvcf.ReadConfig() ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) cfg = tiledbvcf.ReadConfig( memory_budget_mb=512, region_partition=(0, 3), tiledb_config=["sm.tile_cache_size=0", "sm.compute_concurrency_level=1"], ) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) with pytest.raises(TypeError): cfg = tiledbvcf.ReadConfig(abc=123) # This test is skipped because running it in the same process as all the normal # tests will cause it to fail (the first context created in a process determines # the number of TBB threads allowed). @pytest.mark.skip def test_tbb_threads_config(): uri = os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples") cfg = tiledbvcf.ReadConfig(tiledb_config=["sm.num_tbb_threads=3"]) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) cfg = tiledbvcf.ReadConfig(tiledb_config=["sm.num_tbb_threads=4"]) with pytest.raises(RuntimeError): ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) def test_read_limit(): uri = os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples") cfg = tiledbvcf.ReadConfig(limit=3) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = ds.read( attrs=["sample_name", "pos_start", "pos_end", "fmt_DP", "fmt_PL"], regions=["1:12100-13360", "1:13500-17350"], ) assert len(df) == 3 def test_region_partitioned_read(): uri = os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples") cfg = tiledbvcf.ReadConfig(region_partition=(0, 2)) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12000-13000", "1:17000-18000"], ) assert len(df) == 4 cfg = tiledbvcf.ReadConfig(region_partition=(1, 2)) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12000-13000", "1:17000-18000"], ) assert len(df) == 2 # Too many partitions still produces results cfg = tiledbvcf.ReadConfig(region_partition=(1, 3)) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12000-13000", "1:17000-18000"], ) assert len(df) == 2 # Error: index >= num partitions cfg = tiledbvcf.ReadConfig(region_partition=(2, 2)) with pytest.raises(RuntimeError): ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) def test_sample_partitioned_read(): uri = os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples") cfg = tiledbvcf.ReadConfig(sample_partition=(0, 2)) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12000-18000"] ) assert len(df) == 11 assert (df.sample_name == "HG00280").all() cfg = tiledbvcf.ReadConfig(sample_partition=(1, 2)) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12000-18000"] ) assert len(df) == 3 assert (df.sample_name == "HG01762").all() # Error: too many partitions cfg = tiledbvcf.ReadConfig(sample_partition=(1, 3)) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) with pytest.raises(RuntimeError): df = ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12000-18000"] ) # Error: index >= num partitions cfg = tiledbvcf.ReadConfig(sample_partition=(2, 2)) with pytest.raises(RuntimeError): ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) def test_sample_and_region_partitioned_read(): uri = os.path.join(TESTS_INPUT_DIR, "arrays/v3/ingested_2samples") cfg = tiledbvcf.ReadConfig(region_partition=(0, 2), sample_partition=(0, 2)) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12000-13000", "1:17000-18000"], ) assert len(df) == 2 assert (df.sample_name == "HG00280").all() cfg = tiledbvcf.ReadConfig(region_partition=(0, 2), sample_partition=(1, 2)) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12000-13000", "1:17000-18000"], ) assert len(df) == 2 assert (df.sample_name == "HG01762").all() cfg = tiledbvcf.ReadConfig(region_partition=(1, 2), sample_partition=(0, 2)) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12000-13000", "1:17000-18000"], ) assert len(df) == 2 assert (df.sample_name == "HG00280").all() cfg = tiledbvcf.ReadConfig(region_partition=(1, 2), sample_partition=(1, 2)) ds = tiledbvcf.Dataset(uri, mode="r", cfg=cfg) df = ds.read( attrs=["sample_name", "pos_start", "pos_end"], regions=["1:12000-13000", "1:17000-18000"], ) assert len(df) == 0 def test_large_export_correctness(): uri = "s3://tiledb-inc-demo-data/tiledbvcf-arrays/v4/vcf-samples-20" ds = tiledbvcf.Dataset(uri, mode="r", verbose=True) df = ds.read( attrs=[ "sample_name", "contig", "pos_start", "pos_end", "query_bed_start", "query_bed_end", ], samples=["v2-DjrIAzkP", "v2-YMaDHIoW", "v2-usVwJUmo", "v2-ZVudhauk"], bed_file=os.path.join( TESTS_INPUT_DIR, "E001_15_coreMarks_dense_filtered.bed.gz" ), ) # total number of exported records assert df.shape[0] == 1172081 # number of unique exported records record_index = ["sample_name", "contig", "pos_start"] assert df[record_index].drop_duplicates().shape[0] == 1168430 def test_basic_ingest(tmp_path): # Create the dataset uri = os.path.join(tmp_path, "dataset") ds = tiledbvcf.Dataset(uri, mode="w") samples = [os.path.join(TESTS_INPUT_DIR, s) for s in ["small.bcf", "small2.bcf"]] ds.create_dataset() ds.ingest_samples(samples) # Open it back in read mode and check some queries ds = tiledbvcf.Dataset(uri, mode="r") assert ds.count() == 14 assert ds.count(regions=["1:12700-13400"]) == 6 assert ds.count(samples=["HG00280"], regions=["1:12700-13400"]) == 4 def test_incremental_ingest(tmp_path): uri = os.path.join(tmp_path, "dataset") ds = tiledbvcf.Dataset(uri, mode="w") ds.create_dataset() ds.ingest_samples([os.path.join(TESTS_INPUT_DIR, "small.bcf")]) ds.ingest_samples([os.path.join(TESTS_INPUT_DIR, "small2.bcf")]) # Open it back in read mode and check some queries ds = tiledbvcf.Dataset(uri, mode="r") assert ds.count() == 14 assert ds.count(regions=["1:12700-13400"]) == 6 assert ds.count(samples=["HG00280"], regions=["1:12700-13400"]) == 4 def test_ingest_disable_merging(tmp_path): # Create the dataset uri = os.path.join(tmp_path, "dataset_disable_merging") cfg = tiledbvcf.ReadConfig(memory_budget_mb=1024) attrs = ["sample_name", "contig", "pos_start", "pos_end"] ds = tiledbvcf.Dataset(uri, mode="w") samples = [ os.path.join(TESTS_INPUT_DIR, s) for s in ["v2-DjrIAzkP-downsampled.vcf.gz"] ] ds.create_dataset() ds.ingest_samples(samples, contig_fragment_merging=False) # Open it back in read mode and check some queries ds = tiledbvcf.Dataset(uri, cfg=cfg, mode="r", verbose=False) df = ds.read(attrs=attrs) assert ds.count() == 246 assert ds.count(regions=["chrX:9032893-9032893"]) == 1 # Create the dataset uri = os.path.join(tmp_path, "dataset_merging_separate") ds2 = tiledbvcf.Dataset(uri, mode="w", verbose=True) samples = [ os.path.join(TESTS_INPUT_DIR, s) for s in ["v2-DjrIAzkP-downsampled.vcf.gz"] ] ds2.create_dataset() ds2.ingest_samples(samples, contigs_to_keep_separate=["chr1"]) # Open it back in read mode and check some queries ds2 = tiledbvcf.Dataset(uri, cfg=cfg, mode="r", verbose=True) df2 = ds2.read(attrs=attrs) print(df.equals(df2)) assert df.equals(df2) assert ds.count() == 246 assert ds.count(regions=["chrX:9032893-9032893"]) == 1 def test_ingest_merging_separate(tmp_path): # Create the dataset uri = os.path.join(tmp_path, "dataset_merging_separate") ds = tiledbvcf.Dataset(uri, mode="w") samples = [ os.path.join(TESTS_INPUT_DIR, s) for s in ["v2-DjrIAzkP-downsampled.vcf.gz"] ] ds.create_dataset() ds.ingest_samples(samples, contigs_to_keep_separate=["chr1"]) # Open it back in read mode and check some queries ds = tiledbvcf.Dataset(uri, mode="r") assert ds.count() == 246 assert ds.count(regions=["chrX:9032893-9032893"]) == 1 def test_ingest_merging(tmp_path): # Create the dataset uri = os.path.join(tmp_path, "dataset_merging") ds = tiledbvcf.Dataset(uri, mode="w") samples = [ os.path.join(TESTS_INPUT_DIR, s) for s in ["v2-DjrIAzkP-downsampled.vcf.gz"] ] ds.create_dataset() ds.ingest_samples(samples, contigs_to_allow_merging=["chr1", "chr2"]) # Open it back in read mode and check some queries ds = tiledbvcf.Dataset(uri, mode="r") assert ds.count() == 246 assert ds.count(regions=["chrX:9032893-9032893"]) == 1 def test_vcf_attrs(tmp_path): # Create the dataset with vcf info and fmt attributes uri = os.path.join(tmp_path, "vcf_attrs_dataset") ds = tiledbvcf.Dataset(uri, mode="w") vcf_uri = os.path.join(TESTS_INPUT_DIR, "v2-DjrIAzkP-downsampled.vcf.gz") ds.create_dataset(vcf_attrs=vcf_uri) # Open it back in read mode and check attributes ds = tiledbvcf.Dataset(uri, mode="r") queryable_attrs = [ "alleles", "contig", "filters", "fmt", "fmt_DP", "fmt_GQ", "fmt_GT", "fmt_MIN_DP", "fmt_PS", "fmt_SB", "fmt_STR_MAX_LEN", "fmt_STR_PERIOD", "fmt_STR_TIMES", "fmt_VAR_CONTEXT", "fmt_VAR_TYPE", "id", "info", "info_AC", "info_AC_AFR", "info_AC_AMR", "info_AC_Adj", "info_AC_CONSANGUINEOUS", "info_AC_EAS", "info_AC_FEMALE", "info_AC_FIN", "info_AC_Hemi", "info_AC_Het", "info_AC_Hom", "info_AC_MALE", "info_AC_NFE", "info_AC_OTH", "info_AC_POPMAX", "info_AC_SAS", "info_AF", "info_AF_AFR", "info_AF_AMR", "info_AF_Adj", "info_AF_EAS", "info_AF_FIN", "info_AF_NFE", "info_AF_OTH", "info_AF_SAS", "info_AGE_HISTOGRAM_HET", "info_AGE_HISTOGRAM_HOM", "info_AN", "info_AN_AFR", "info_AN_AMR", "info_AN_Adj", "info_AN_CONSANGUINEOUS", "info_AN_EAS", "info_AN_FEMALE", "info_AN_FIN", "info_AN_MALE", "info_AN_NFE", "info_AN_OTH", "info_AN_POPMAX", "info_AN_SAS", "info_BaseQRankSum", "info_CCC", "info_CSQ", "info_ClippingRankSum", "info_DB", "info_DOUBLETON_DIST", "info_DP", "info_DP_HIST", "info_DS", "info_END", "info_ESP_AC", "info_ESP_AF_GLOBAL", "info_ESP_AF_POPMAX", "info_FS", "info_GQ_HIST", "info_GQ_MEAN", "info_GQ_STDDEV", "info_HWP", "info_HaplotypeScore", "info_Hemi_AFR", "info_Hemi_AMR", "info_Hemi_EAS", "info_Hemi_FIN", "info_Hemi_NFE", "info_Hemi_OTH", "info_Hemi_SAS", "info_Het_AFR", "info_Het_AMR", "info_Het_EAS", "info_Het_FIN", "info_Het_NFE", "info_Het_OTH", "info_Het_SAS", "info_Hom_AFR", "info_Hom_AMR", "info_Hom_CONSANGUINEOUS", "info_Hom_EAS", "info_Hom_FIN", "info_Hom_NFE", "info_Hom_OTH", "info_Hom_SAS", "info_InbreedingCoeff", "info_K1_RUN", "info_K2_RUN", "info_K3_RUN", "info_KG_AC", "info_KG_AF_GLOBAL", "info_KG_AF_POPMAX", "info_MLEAC", "info_MLEAF", "info_MQ", "info_MQ0", "info_MQRankSum", "info_NCC", "info_NEGATIVE_TRAIN_SITE", "info_OLD_VARIANT", "info_POPMAX", "info_POSITIVE_TRAIN_SITE", "info_QD", "info_ReadPosRankSum", "info_VQSLOD", "info_clinvar_conflicted", "info_clinvar_measureset_id", "info_clinvar_mut", "info_clinvar_pathogenic", "info_culprit", "pos_end", "pos_start", "qual", "query_bed_end", "query_bed_line", "query_bed_start", "sample_name", ] assert ds.attributes(attr_type="info") == [] assert ds.attributes(attr_type="fmt") == [] assert sorted(ds.attributes()) == sorted(queryable_attrs)
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import sys sys.path.append("..") import numpy as np import paddle import paddle.fluid as fluid from op_test import OpTest from op_test_xpu import XPUOpTest from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper paddle.enable_static() class XPUTestUnsqueeze2Op(XPUOpTestWrapper): def __init__(self): self.op_name = "unsqueeze2" self.use_dynamic_create_class = False class TestUnsqueeze2Op(XPUOpTest): def setUp(self): self.op_type = "unsqueeze2" self.use_mkldnn = False self.init_dtype() self.init_test_case() self.inputs = { "X": np.random.random(self.ori_shape).astype(self.dtype) } self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), "XShape": np.random.random(self.ori_shape).astype(self.dtype) } self.init_attrs() def init_dtype(self): self.dtype = self.in_type def init_attrs(self): self.attrs = {"axes": self.axes} def init_test_case(self): self.ori_shape = (3, 40) self.axes = (1, 2) self.new_shape = (3, 1, 1, 40) def test_check_output(self): place = paddle.XPUPlace(0) self.check_output_with_place(place, no_check_set=['XShape']) def test_check_grad(self): place = paddle.XPUPlace(0) if self.dtype in [np.float32, np.float64]: self.check_grad_with_place(place, ['X'], 'Out') elif self.dtype == np.bool_: return else: user_defined_grad_outputs = np.random.random( self.new_shape).astype(self.dtype) self.check_grad_with_place( place, ['X'], 'Out', user_defined_grad_outputs=user_defined_grad_outputs) # Correct: Single input index. class TestUnsqueeze2Op1(TestUnsqueeze2Op): def init_test_case(self): self.ori_shape = (20, 5) self.axes = (-1, ) self.new_shape = (20, 5, 1) # Correct: Mixed input axis. class TestUnsqueeze2Op2(TestUnsqueeze2Op): def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) self.new_shape = (1, 20, 5, 1) # Correct: There is duplicated axis. class TestUnsqueeze2Op3(TestUnsqueeze2Op): def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) self.new_shape = (1, 10, 2, 1, 1, 5) # Correct: Reversed axes. class TestUnsqueeze2Op4(TestUnsqueeze2Op): def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) self.new_shape = (10, 1, 1, 2, 5, 1) # axes is a list(with tensor) class TestUnsqueeze2Op_AxesTensorList(XPUOpTest): def setUp(self): self.op_type = "unsqueeze2" self.use_mkldnn = False self.init_dtype() self.init_test_case() axes_tensor_list = [] for index, ele in enumerate(self.axes): axes_tensor_list.append(("axes" + str(index), np.ones( (1)).astype('int32') * ele)) self.inputs = { "X": np.random.random(self.ori_shape).astype(self.dtype), "AxesTensorList": axes_tensor_list } self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), "XShape": np.random.random(self.ori_shape).astype(self.dtype) } def init_dtype(self): self.dtype = self.in_type def test_check_output(self): place = paddle.XPUPlace(0) self.check_output_with_place(place, no_check_set=['XShape']) def test_check_grad(self): place = paddle.XPUPlace(0) if self.dtype in [np.float32, np.float64]: self.check_grad_with_place(place, ['X'], 'Out') else: return def init_test_case(self): self.ori_shape = (20, 5) self.axes = (1, 2) self.new_shape = (20, 1, 1, 5) def init_attrs(self): self.attrs = {} class TestUnsqueeze2Op1_AxesTensorList(TestUnsqueeze2Op_AxesTensorList): def init_test_case(self): self.ori_shape = (20, 5) self.axes = (-1, ) self.new_shape = (20, 5, 1) class TestUnsqueeze2Op2_AxesTensorList(TestUnsqueeze2Op_AxesTensorList): def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) self.new_shape = (1, 20, 5, 1) class TestUnsqueeze2Op3_AxesTensorList(TestUnsqueeze2Op_AxesTensorList): def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) self.new_shape = (1, 10, 2, 1, 1, 5) class TestUnsqueeze2Op4_AxesTensorList(TestUnsqueeze2Op_AxesTensorList): def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) self.new_shape = (10, 1, 1, 2, 5, 1) # axes is a Tensor class TestUnsqueeze2Op_AxesTensor(XPUOpTest): def setUp(self): self.op_type = "unsqueeze2" self.use_mkldnn = False self.init_test_case() self.init_dtype() self.inputs = { "X": np.random.random(self.ori_shape).astype(self.dtype), "AxesTensor": np.array(self.axes).astype("int32") } self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), "XShape": np.random.random(self.ori_shape).astype(self.dtype) } def init_dtype(self): self.dtype = self.in_type def test_check_output(self): place = paddle.XPUPlace(0) self.check_output_with_place(place, no_check_set=['XShape']) def test_check_grad(self): place = paddle.XPUPlace(0) if self.dtype in [np.float32, np.float64]: self.check_grad_with_place(place, ['X'], 'Out') else: return def init_test_case(self): self.ori_shape = (20, 5) self.axes = (1, 2) self.new_shape = (20, 1, 1, 5) def init_attrs(self): self.attrs = {} class TestUnsqueeze2Op1_AxesTensor(TestUnsqueeze2Op_AxesTensor): def init_test_case(self): self.ori_shape = (20, 5) self.axes = (-1, ) self.new_shape = (20, 5, 1) class TestUnsqueeze2Op2_AxesTensor(TestUnsqueeze2Op_AxesTensor): def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) self.new_shape = (1, 20, 5, 1) class TestUnsqueeze2Op3_AxesTensor(TestUnsqueeze2Op_AxesTensor): def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) self.new_shape = (1, 10, 2, 1, 1, 5) class TestUnsqueeze2Op4_AxesTensor(TestUnsqueeze2Op_AxesTensor): def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) self.new_shape = (10, 1, 1, 2, 5, 1) support_types = get_xpu_op_support_types("unsqueeze2") for stype in support_types: create_test_class(globals(), XPUTestUnsqueeze2Op, stype) if __name__ == "__main__": unittest.main()
from sigpipes.sources import SynergyLP from sigpipes.sigoperator import Print, Sample, FeatureExtractor, ChannelSelect, Fft, MVNormalization, \ RangeNormalization, FFtAsSignal from sigpipes.plotting import Plot, FftPlot, GraphOpts from sigpipes.sigoperator import CSVSaver, Hdf5 from glob import iglob from pathlib import Path from sigpipes.joiner import JoinChannels, CrossCorrelate from sigpipes.pandas_support import FeatureFrame import numpy as np def rename(path, newname): parts = list(Path(path).parts) parts[-1] = newname return Path(*parts).absolute() sources = "/home/fiser/data/emg/*.txt" signals = [SynergyLP(file).sigcontainer() | Sample(0, 450_000) for file in sorted(iglob(sources))] #fs = 50000 #xdata = 2 * np.pi * np.arange(0, 450_000) / fs #ydata = np.sin(xdata*20)+ 0.3*np.sin(xdata*50) + 0.5*np.sin(xdata*5) # signals.append(SigContainer.from_signal_array(ydata, ["test signal"], ["u"], fs)) gopts = GraphOpts(sharey=True) (JoinChannels(*signals).fromSources() | CSVSaver(rename(sources, "signals.csv")) | Hdf5(rename(sources, "signals.hdf5")) | Plot(file=rename(sources, "signals_full.png"), graph_opts=gopts) ) eqsig = (JoinChannels(*signals).fromSources() | ChannelSelect([0, 1, 2]) | Sample(4.0, 8.0) | MVNormalization() | CSVSaver(rename(sources, "processed_normalized_part.csv")) | Hdf5(rename(sources, "processed_normalized_part.hdf5")) | Plot(file=rename(sources,"signals_part.png"), graph_opts=gopts) | FeatureExtractor(dict(IEMG=True, MAV=True, SSI=True, VAR=True, RMS=True, WL=True, SC=[0.0001, 0.0002])) | Sample(5.0, 5.2) | Plot(file=rename(sources, "signals_detail.png"), graph_opts=gopts) ) # (eqsig | FeatureFrame()).to_excel(rename(sources,"eqsignals.xls")) eqsig | Fft() | FftPlot(file=rename(sources,"signals_part_spectrum.png"), frange=(1, 1200)) | FFtAsSignal() | CSVSaver(rename(sources, "signals_part_spectre.csv")) (eqsig | ChannelSelect([0, 0, 0, 1, 1, 2]) | CrossCorrelate(eqsig | ChannelSelect([0, 1, 2, 1, 2, 2]), mode="full") | Sample(0, 1.5) | RangeNormalization(-1, 1) | CSVSaver(rename(sources, "signals_part_correlations.csv")) | Plot(file=rename(sources, "signals_part_correlation.png")) )
import numpy as np import matplotlib.pyplot as plt import matplotlib import sqlite3 from datetime import datetime from matplotlib.dates import DateFormatter, HourLocator, MinuteLocator fs = 8 conn = sqlite3.connect('astrodek.sqlite') cur = conn.cursor() sql_script = ('''SELECT time, demand, ev_demand, pv_generation FROM results WHERE simulation_id = 1 and simulation_num = 1 ORDER BY id DESC''') sql = cur.execute(sql_script) x_values = [] y_values_houses = [] y_values_ev = [] y_values_pv = [] for row in sql: sim_hour = row[0] date_time = (datetime.strptime(sim_hour, '%H:%M:%S')) x_values.append(date_time) y_values_houses.append(row[1]) y_values_ev.append(row[2]) y_values_pv.append(row[3]) date_list = matplotlib.dates.date2num(x_values) fig, axes_array = plt.subplots(3, sharex=True, figsize=(9, 7)) axes_array[0].plot(date_list,y_values_houses,label="Houses") axes_array[0].set_ylabel("kWh", fontsize=fs) axes_array[0].tick_params(axis='both', which='major', labelsize=fs) axes_array[0].set_ylim([5,100]) axes_array[0].set_yticks(np.arange(0,100,20)) axes_array[0].legend(fontsize=fs) axes_array[1].plot(date_list, y_values_pv, label="PV") axes_array[1].set_ylabel("kWh", fontsize=fs) axes_array[1].tick_params(axis='both', which='major', labelsize=fs) axes_array[1].set_ylim([0,250]) axes_array[1].set_yticks(np.arange(0,250,50)) axes_array[1].legend(fontsize=fs) axes_array[2].plot(date_list, y_values_ev, label="EV") axes_array[2].set_ylabel("kWh", fontsize=fs) axes_array[2].tick_params(axis='both', which='major', labelsize=fs) axes_array[2].set_ylim([0,50]) axes_array[2].set_yticks(np.arange(0,50,10)) axes_array[2].legend(fontsize=fs) axes_array[2].xaxis.set_major_locator( MinuteLocator(np.arange(0,60,60)) ) axes_array[2].xaxis.set_major_formatter( DateFormatter('%H') ) axes_array[2].fmt_xdata = DateFormatter('%H') plt.tight_layout() plt.subplots_adjust(bottom=0.1,top=0.98, hspace=0.02) plt.show()