code
stringlengths
17
6.64M
class Cascading(Simulation): '\n This class simulates cascading failures on a network :cite:`crucitti2004model`.\n\n :param graph: an undirected NetworkX graph\n :param runs: an integer number of times to run the simulation\n :param steps: an integer number of steps to run a single simulation\n :param l: a float representing the maximum initial load for each node\n :param r: a float representing the amount of redundancy in the network\n :param **kwargs: see parent class Simulation for additional options\n ' def __init__(self, graph, runs=10, steps=100, l=0.8, r=0.2, **kwargs): super().__init__(graph, runs, steps, **kwargs) self.prm.update({'l': l, 'r': r, 'c': len(graph), 'robust_measure': 'largest_connected_component', 'k_a': 10, 'attack': 'id_node', 'attack_approx': None, 'k_d': None, 'defense': None}) self.prm.update(kwargs) if (self.prm['plot_transition'] or self.prm['gif_animation']): (self.node_pos, self.edge_pos) = self.get_graph_coordinates() self.save_dir = os.path.join(os.getcwd(), 'plots', self.get_plot_title(steps)) os.makedirs(self.save_dir, exist_ok=True) self.capacity_og = nx.betweenness_centrality(self.graph, k=self.prm['c'], normalized=True, endpoints=True) self.max_val = (max(self.capacity_og.values()) * (1.0 + self.prm['r'])) self.protected = set() self.failed = set() self.load = defaultdict() self.sim_info = defaultdict() self.reset_simulation() def reset_simulation(self): '\n Resets the simulation between each run\n ' self.protected = set() self.failed = set() self.load = defaultdict() self.sim_info = defaultdict() self.capacity = self.capacity_og.copy() for n in self.graph.nodes: self.load[n] = (self.capacity[n] * np.random.uniform(0, self.prm['l'])) self.capacity[n] = (self.capacity[n] * (1.0 + self.prm['r'])) self.track_simulation(step=0) if ((self.prm['attack'] is not None) and (self.prm['k_a'] > 0)): self.failed = set(run_attack_method(self.graph, self.prm['attack'], self.prm['k_a'], approx=self.prm['attack_approx'], seed=self.prm['seed'])) if (get_attack_category(self.prm['attack']) == 'node'): for n in self.failed: self.load[n] = (2 * self.load[n]) elif (get_attack_category(self.prm['attack']) == 'edge'): self.graph.remove_edges_from(self.failed) if ((self.prm['defense'] is not None) and (self.prm['k_d'] > 0)): if (get_defense_category(self.prm['defense']) == 'node'): self.protected = run_defense_method(self.graph, self.prm['defense'], self.prm['k_d'], seed=self.prm['seed']) for n in self.protected: self.capacity[n] = (2 * self.capacity[n]) elif (get_defense_category(self.prm['defense']) == 'edge'): edge_info = run_defense_method(self.graph, self.prm['defense'], self.prm['k_d'], seed=self.prm['seed']) self.graph.add_edges_from(edge_info['added']) if ('removed' in edge_info): self.graph.remove_edges_from(edge_info['removed']) elif (self.prm['defense'] is not None): print(self.prm['defense'], 'not available or k <= 0') self.track_simulation(step=1) def track_simulation(self, step): '\n Keeps track of important simulation information at each step of the simulation\n\n :param step: current simulation iteration\n ' nodes_functioning = set(self.graph.nodes).difference(self.failed) measure = 0 if (len(nodes_functioning) > 0): measure = run_measure(self.graph.subgraph(nodes_functioning), self.prm['robust_measure']) self.sim_info[step] = {'status': [self.load[n] for n in self.graph.nodes], 'failed': len(self.failed), 'measure': measure, 'protected': self.protected} def run_single_sim(self): '\n Run the attack simulation\n ' for step in range(self.prm['steps']): self.track_simulation((step + 2)) failed_new = set() for n in self.failed: if (self.load[n] > self.capacity[n]): nbrs = list(self.graph.neighbors(n)) for nb in self.graph.neighbors(n): if ((nb not in self.failed) and (nb not in failed_new)): self.load[nb] += (self.load[n] / len(nbrs)) if (self.load[nb] > self.capacity[nb]): failed_new.add(nb) self.failed = self.failed.union(failed_new) robustness = [(v['measure'] if (v['measure'] is not None) else 0) for (k, v) in self.sim_info.items()] return robustness
def main(): graph = electrical() params = {'runs': 1, 'steps': 100, 'seed': 1, 'l': 0.8, 'r': 0.2, 'c': int((0.1 * len(graph))), 'k_a': 5, 'attack': 'id_node', 'attack_approx': None, 'k_d': 0, 'defense': None, 'robust_measure': 'largest_connected_component', 'plot_transition': False, 'gif_animation': True, 'gif_snaps': True, 'edge_style': None, 'node_style': 'spectral', 'fa_iter': 2000} cf = Cascading(graph, **params) results = cf.run_simulation() cf.plot_results(results)
def run_defense_method(graph, method, k=3, seed=None): '\n Runs a specified defense on an undirected graph, returning a list of nodes to defend.\n\n :param graph: an undirected NetworkX graph\n :param method: a string representing one of the attack methods\n :param k: number of nodes or edges to attack\n :param seed: sets the seed in order to obtain reproducible defense runs\n :return: a list of nodes or edge tuples to defend\n ' protected = [] if ((method in methods) and (k > 0)): if (seed is not None): np.random.seed(seed) protected = methods[method](graph, k) else: print('{} not implemented or k <= 0'.format(method)) return protected
def get_defense_methods(): '\n Gets a list of available defense methods as a list of functions.\n\n :return: a list of all defense functions\n ' return methods.keys()
def get_defense_category(method): "\n Gets the defense category e.g., 'node', 'edge' defense.\n\n :param method: a string representing the defense method\n :return: a string representing the defense type ('node' or 'edge')\n " category = None if (method in categories): category = categories[method] return category
def get_node_ns(graph, k=3): '\n Get k nodes to defend based on the Netshield algorithm :cite:`tong2010vulnerability`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of nodes to defend\n\n :return: a list of nodes to defend\n ' return get_node_ns_attack(graph, k)
def get_node_pr(graph, k=3): '\n Get k nodes to defend based on top PageRank entries :cite:`page1999pagerank`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of nodes to defend\n\n :return: a list of nodes to defend\n ' return get_node_pr_attack(graph, k)
def get_node_eig(graph, k=3): '\n Get k nodes to defend based on top eigenvector centrality entries\n\n :param graph: an undirected NetworkX graph\n :param k: number of nodes to defend\n :return: a list of nodes to defend\n ' return get_node_eig_attack(graph, k)
def get_node_ib(graph, k=3, approx=np.inf): '\n Get k nodes to defend based on Initial Betweenness (IB) Removal :cite:`holme2002attack`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of nodes to defend\n :param approx: number of nodes to approximate the betweenness centrality, k=0.1n is a good approximation, where n\n is the number of nodes in the graph\n\n :return: a list of nodes to defend\n ' return get_node_ib_attack(graph, k, approx)
def get_node_rb(graph, k=3, approx=np.inf): '\n Get k nodes to defend based on Recalculated Betweenness (RB) Removal :cite:`holme2002attack`\n\n :param graph: an undirected NetworkX graph\n :param k: number of nodes to defend\n :param approx: number of nodes to approximate the betweenness centrality, k=0.1n is a good approximation, where n\n is the number of nodes in the graph\n\n :return: a list of nodes to defend\n ' return get_node_rb_attack(graph, k, approx)
def get_node_id(graph, k=3): '\n Get k nodes to defend based on Initial Degree (ID) Removal :cite:`holme2002attack`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of nodes to defend\n\n :return: a list of nodes to defend\n ' return get_node_id_attack(graph, k)
def get_node_rd(graph, k=3): '\n Get k nodes to defend based on Recalculated Degree (RD) Removal :cite:`holme2002attack`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of nodes to defend\n\n :return: a list of nodes to defend\n ' return get_node_rd_attack(graph, k)
def get_node_rnd(graph, k=3): '\n Randomly select k distinct nodes to defend\n\n :param graph: an undirected NetworkX graph\n :param k: number of nodes to defend\n\n :return: a list of nodes to defend\n ' return get_node_rnd_attack(graph, k)
def get_central_edges(graph, k, method='eig'): '\n Internal function to compute edge PageRank, eigenvector centrality and degree centrality\n\n :param graph: undirected NetworkX graph\n :param k: int number of nodes to defend\n :param method: string representing defense method\n :return: list of edges to add\n ' max_deg = max([d[1] for d in graph.degree]) top_nodes = get_node_id(graph, k=(max_deg + k)) if (method == 'pr'): centrality = nx.pagerank(graph) elif (method == 'eig'): centrality = nx.eigenvector_centrality(graph) elif (method == 'deg'): centrality = dict(graph.degree) score = {} tried = set() for u in top_nodes: for v in top_nodes: if ((u != v) and (not graph.has_edge(u, v)) and ((u, v) not in tried)): tried.add((u, v)) tried.add((v, u)) score[(u, v)] = (centrality[u] * centrality[v]) nodes = heapq.nlargest(k, score, key=score.get) return nodes
def add_edge_pr(graph, k=3): "\n Get k edges to defend based on top edge PageRank entries :cite:`tong2012gelling`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of edges to add\n :return: a dictionary of the edges to be 'added'\n " info = defaultdict(list) info['added'] = get_central_edges(graph, k, method='pr') return info
def add_edge_eig(graph, k=3): "\n Get k edges to defend based on top edge eigenvector centrality entries :cite:`tong2012gelling`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of edges to add\n :return: a dictionary of the edges to be 'added'\n " info = defaultdict(list) info['added'] = get_central_edges(graph, k, method='eig') return info
def add_edge_degree(graph, k=3): '\n Add k edges to defend based on top edge degree centrality entries :cite:`tong2012gelling`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of edges to add\n :return: a list of edges to add\n ' info = defaultdict(list) info['added'] = get_central_edges(graph, k, method='deg') return info
def add_edge_rnd(graph, k=3): "\n Add k random edges to the graph\n\n :param graph: an undirected NetworkX graph\n :param k: number of edges to add\n :return: a dictionary of the edges to be 'added'\n " graph_ = graph.copy() info = defaultdict(list) for _ in range(k): nodes = graph_.nodes (u, v) = np.random.choice(nodes, 2, replace=False) while (graph_.has_edge(u, v) or (u == v)): (u, v) = np.random.choice(nodes, 2, replace=False) graph_.add_edge(u, v) info['added'].append((u, v)) return info
def add_edge_pref(graph, k=3): "\n Adds an edge connecting two nodes with the lowest degrees :cite:`beygelzimer2005improving`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of edges to add\n :return: a dictionary of the edges to be 'added'\n " info = defaultdict(list) deg = dict(graph.degree) edges_tried = set() for _ in range(k): u = min(deg, key=deg.get) u_d = (deg[u] + 1) deg.pop(u) v = min(deg, key=deg.get) deg[v] += 1 deg[u] = u_d if (((u, v) not in edges_tried) and ((v, u) not in edges_tried)): info['added'].append((u, v)) edges_tried.update([(u, v), (v, u)]) return info
def rewire_edge_rnd(graph, k=3): "\n Removes a random edge and adds one randomly :cite:`beygelzimer2005improving`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of edges to rewire\n :return: a dictionary of the edges to be 'removed' and edges to be 'added'\n " info = defaultdict(list) edges = list(graph.edges) m = len(edges) k = min(k, m) idx = np.random.choice(m, k, replace=False) info['removed'] = [edges[i] for i in idx] info['added'] = add_edge_rnd(graph, k=k)['added'] return info
def rewire_edge_rnd_neighbor(graph, k=3): "\n Randomly selects a neighbor of a node and removes the edge; then adds a random edge :cite:`beygelzimer2005improving`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of edges to rewire\n :return: a dictionary of the edges to be 'removed' and edges to be 'added'\n " info = defaultdict(list) edges_seen = set() nodes = [n for n in graph.nodes if (len(list(graph.neighbors(n))) > 0)] nodes = np.random.choice(nodes, min(k, len(nodes)), replace=False) for u in nodes: v = np.random.choice(list(graph.neighbors(u))) removed = (u, v) added = add_edge_rnd(graph, k=1)['added'][0] if ((added not in edges_seen) and (removed not in edges_seen)): info['added'].append(added) info['removed'].append(removed) edges_seen.update([added, added[::(- 1)], removed, removed[::(- 1)]]) return info
def rewire_edge_pref(graph, k=3): "\n Selects node with highest degree, randomly removes a neighbor; adds edge to random node in graph :cite:`beygelzimer2005improving`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of edges to rewire\n :return: a dictionary of the edges to be 'removed' and edges to be 'added'\n " graph_ = graph.copy() info = defaultdict(list) for _ in range(k): u = max(dict(graph_.degree), key=dict(graph_.degree).get) nbr = np.random.choice(list(graph_.neighbors(u))) graph_.remove_edge(u, nbr) info['removed'].append((u, nbr)) v = np.random.choice(graph_.nodes) while (graph_.has_edge(u, v) or (u == v)): v = np.random.choice(graph_.nodes) graph_.add_edge(nbr, v) info['added'].append((nbr, v)) return info
def rewire_edge_pref_rnd(graph, k=3): "\n Selects an edge, disconnects the higher degree node, and reconnects to a random one :cite:`beygelzimer2005improving`.\n\n :param graph: an undirected NetworkX graph\n :param k: number of edges to rewire\n :return: a dictionary of the edges to be 'removed' and edges to be 'added'\n " graph_ = graph.copy() info = defaultdict(list) edges = list(graph_.edges) k = min(len(edges), k) idx = np.random.choice(len(edges), k, replace=False) edges = [edges[i] for i in idx] for (u, v) in edges: info['removed'].append((u, v)) rnd_node = np.random.choice(graph_.nodes) if (graph_.degree(u) > graph_.degree(v)): graph_.add_edge(v, rnd_node) info['added'].append((v, rnd_node)) else: graph_.add_edge(u, rnd_node) info['added'].append((u, rnd_node)) return info
class Defense(Simulation): '\n This class simulates a variety of defense techniques on an undirected NetworkX graph\n\n :param graph: an undirected NetworkX graph\n :param runs: an integer number of times to run the simulation\n :param steps: an integer number of steps to run a single simulation\n :param attack: a string representing the attack strategy to run\n :param defense: a string representing the defense strategy to run\n :param k_d: an integer number of nodes to defend\n :param **kwargs: see parent class Simulation for additional options\n ' def __init__(self, graph, runs=10, steps=50, attack='id_node', defense=None, k_d=0, **kwargs): super().__init__(graph, runs, steps, **kwargs) self.graph = graph self.prm.update({'attack': attack, 'attack_approx': None, 'k_d': k_d, 'defense': defense, 'robust_measure': 'largest_connected_component'}) self.prm.update(kwargs) if (self.prm['plot_transition'] or self.prm['gif_animation']): (self.node_pos, self.edge_pos) = self.get_graph_coordinates() self.save_dir = os.path.join(os.getcwd(), 'plots', self.get_plot_title(steps)) os.makedirs(self.save_dir, exist_ok=True) self.attacked = [] self.protected = defaultdict(list) self.connectivity = [] self.reset_simulation() def reset_simulation(self): '\n Resets the simulation between each run\n ' self.graph_ = self.graph.copy() self.attacked = [] self.protected = [] self.connectivity = [] if ((self.prm['attack'] is not None) and (self.prm['k_a'] > 0)): self.attacked = run_attack_method(self.graph_, self.prm['attack'], self.prm['k_a'], approx=self.prm['attack_approx'], seed=self.prm['seed']) if (get_attack_category(self.prm['attack']) == 'edge'): self.graph_.remove_nodes_from(self.attacked) elif (self.prm['attack'] is not None): print(self.prm['attack'], 'not available or k <= 0') if ((self.prm['defense'] is not None) and (self.prm['steps'] > 0)): if (get_defense_category(self.prm['defense']) == 'node'): self.protected = run_defense_method(self.graph_, self.prm['defense'], self.prm['steps'], seed=self.prm['seed']) elif (get_defense_category(self.prm['defense']) == 'edge'): self.protected = run_defense_method(self.graph_, self.prm['defense'], self.prm['steps'], seed=self.prm['seed']) elif (self.prm['defense'] is not None): print(self.prm['defense'], 'not available or k <= 0') if (get_attack_category(self.prm['attack']) == 'node'): if (get_defense_category(self.prm['defense']) == 'node'): diff = (set(self.protected) - set(self.attacked)) self.graph_.remove_nodes_from(diff) else: self.graph_.remove_nodes_from(self.attacked) def track_simulation(self, step): '\n Keeps track of important simulation information at each step of the simulation\n\n :param step: current simulation iteration\n ' measure = run_measure(self.graph_, self.prm['robust_measure']) ccs = list(nx.connected_components(self.graph_)) ccs.sort(key=len, reverse=True) m = interp1d([0, len(ccs)], [0.15, 1]) status = {} for n in self.graph: for (idx, cc) in enumerate(ccs): if (n in self.attacked[0:step]): status[n] = 1 break elif (n in cc): status[n] = float(m(idx)) break else: status[n] = 0 self.sim_info[step] = {'status': list(status.values()), 'failed': (len(self.graph_) - len(max(ccs))), 'measure': measure, 'protected': self.protected, 'edges_added': (self.protected['added'][0:step] if ('added' in self.protected) else []), 'edges_removed': (self.protected['removed'][0:step] if ('removed' in self.protected) else [])} def run_single_sim(self): '\n Run the defense simulation\n ' for step in range(self.prm['steps']): if ((step < len(self.protected)) and (len(self.protected) > 0) and (get_defense_category(self.prm['defense']) == 'edge')): self.track_simulation(step) (u, v) = self.protected['added'][step] self.graph_.add_edge(u, v) if ('removed' in self.protected[step]): (u, v) = self.protected['removed'][step] self.graph.remove_edge(u, v) else: self.track_simulation(step) print("Ending defense simulation early, not an 'edge' defense or out of {}s".format(get_attack_category(self.prm['defense']))) results = [(v['measure'] if (v['measure'] is not None) else 0) for (k, v) in self.sim_info.items()] return results
def main(): graph = graph_loader(graph_type='ky2', seed=1) params = {'runs': 1, 'steps': 30, 'seed': 1, 'attack': 'rb_node', 'k_a': 30, 'attack_approx': int((0.1 * len(graph))), 'defense': 'add_edge_random', 'robust_measure': 'largest_connected_component', 'plot_transition': True, 'gif_animation': True, 'edge_style': None, 'node_style': 'spectral', 'fa_iter': 2000} cf = Defense(graph, **params) results = cf.run_simulation() cf.plot_results(results)
class Diffusion(Simulation): '\n Simulates the propagation of a virus using either the SIS or SIR model :cite:`kermack1927contribution`.\n\n :param graph: contact network\n :param model: a string to set the model type (i.e., SIS or SIR)\n :param runs: an integer number of times to run the simulation\n :param steps: an integer number of steps to run a single simulation\n :param b: float representing birth rate of virus (probability of transmitting disease to each neighbor)\n :param d: float representing death rate of virus (probability of each infected node healing)\n :param c: fraction of initially infected nodes\n :param **kwargs: see parent class Simulation for additional options\n ' def __init__(self, graph, model='SIS', runs=10, steps=5000, b=0.00208, d=0.01, c=1, **kwargs): super().__init__(graph, runs, steps, **kwargs) self.prm.update({'model': model, 'b': b, 'd': d, 'c': c, 'diffusion': None, 'method': None, 'k': None}) self.prm.update(kwargs) self.vaccinated = set() self.infected = set() if (self.prm['plot_transition'] or self.prm['gif_animation']): (self.node_pos, self.edge_pos) = self.get_graph_coordinates() self.save_dir = os.path.join(os.getcwd(), 'plots', self.get_plot_title(steps)) os.makedirs(self.save_dir, exist_ok=True) self.reset_simulation() def get_effective_strength(self): "\n Gets the effective string of the virus. This is a factor of the spectral radius (first eigenvalue) of graph,\n the virus birth rate 'b' and the virus death rate 'd'\n\n :return: a float for virus effective strength\n " return round(((spectral_radius(self.graph) * self.prm['b']) / self.prm['d']), 2) def reset_simulation(self): '\n Resets the simulation between each run\n ' self.graph = self.graph_og.copy() self.vaccinated = set() self.sim_info = defaultdict() self.infected = set(np.random.choice(list(self.graph.nodes), size=int((self.prm['c'] * len(self.graph))), replace=False).tolist()) if ((self.prm['diffusion'] == 'min') and (self.prm['k'] > 0)): if (get_attack_category(self.prm['method']) == 'node'): self.vaccinated = set(run_attack_method(self.graph, self.prm['method'], self.prm['k'], seed=self.prm['seed'])) self.infected = self.infected.difference(self.vaccinated) elif (get_attack_category(self.prm['method']) == 'edge'): edge_info = run_attack_method(self.graph, self.prm['method'], self.prm['k'], seed=self.prm['seed']) self.graph.remove_edges_from(edge_info) else: print(self.prm['method'], 'not available') elif ((self.prm['diffusion'] == 'max') and (self.prm['k'] > 0)): if (get_defense_category(self.prm['method']) == 'edge'): edge_info = run_defense_method(self.graph, self.prm['method'], self.prm['k'], seed=self.prm['seed']) self.graph.add_edges_from(edge_info['added']) self.graph.remove_edges_from(edge_info['removed']) else: print(self.prm['method'], 'not available') elif (self.prm['diffusion'] is not None): print(self.prm['diffusion'], 'not available or k <= 0') def track_simulation(self, step): '\n Keeps track of important simulation information at each step of the simulation\n\n :param step: current simulation iteration\n ' self.sim_info[step] = {'status': [(1 if (n in self.infected) else 0) for n in self.graph.nodes], 'failed': len(self.infected), 'recovered': len(self.vaccinated), 'protected': self.vaccinated} def run_single_sim(self): "\n The initially infected nodes are chosen uniformly at random. At each time step,\n every susceptible (i.e., non-infected) node has a probability 'b' of being\n infected by neighboring infected nodes. Every infected node has a probability 'd'\n of being cured and becoming susceptible again (or recovered for SIR model).\n " for step in range(self.prm['steps']): self.track_simulation(step) infected_new = set() for node in self.infected: nbrs = self.graph.neighbors(node) nbrs = set(nbrs).difference(self.infected).difference(self.vaccinated) nbrs_infected = set([n for n in nbrs if (random.random() <= self.prm['b'])]) infected_new = infected_new.union(nbrs_infected) cured = set([n for n in self.infected if (random.random() <= self.prm['d'])]) self.infected = self.infected.union(infected_new) self.infected = self.infected.difference(cured) if (self.prm['model'] == 'SIR'): self.vaccinated.update(cured) if (self.prm['model'] == 'SIS'): history = [v['failed'] for (k, v) in self.sim_info.items()] else: history = [v['recovered'] for (k, v) in self.sim_info.items()] return history
def main(): graph = as_733() sis_params = {'model': 'SIS', 'b': 0.001, 'd': 0.01, 'c': 1, 'runs': 1, 'steps': 5000, 'seed': 1, 'diffusion': 'min', 'method': 'ns_node', 'k': 5, 'plot_transition': True, 'gif_animation': True, 'edge_style': 'bundled', 'node_style': 'force_atlas', 'fa_iter': 20} ds = Diffusion(graph, **sis_params) results = ds.run_simulation() ds.plot_results(results)
def graph_loader(graph_type, **kwargs): "\n Loads any of the available graph models, supported user-downloaded datasets and toy graphs.\n In order to get a list of available graph options run 'get_graph_options()'.\n\n :param graph_type: a string representing the graph you want to load. For example, 'ER', 'WS', 'BA',\n 'oregon_1' (must first download), 'electrical' (must first download)\n :param kwargs: allows user to specify specific graph model properties\n :return: an undirected NetworkX graph\n " if (graph_type in models.keys()): graph = models[graph_type](**kwargs) elif (graph_type in datasets): download_dataset(graph_type) graph = datasets[graph_type]() elif (graph_type in custom.keys()): graph = custom[graph_type]() else: print('Graph not supported. Select from one of the following graphs: {}'.format(get_graph_options())) graph = None return graph
def download_dataset(dataset): '\n Reading the dataset from the web.\n\n :param dataset: a string representing the dataset to download\n ' url_path = graph_urls[dataset][0] local_path = (graph_dir + url_path.split('datasets/')[1]) if (not os.path.exists(local_path)): urllib.request.urlretrieve(url_path, local_path)
def get_graph_urls(): '\n Returns a dictionary of the datasets used in TIGER and the original link to download them\n\n :return: dictionary containing links to each dataset\n ' return graph_urls
def get_graph_options(): '\n Returns a formatted string containing all of the generators, datasets and custom graphs implemented in TIGER\n\n :return: formatted string\n ' graph_options = {'models': list(models.keys()), 'datasets': datasets, 'custom': list(custom.keys())} return json.dumps(graph_options, indent=1)
def erdos_reyni(n, p=None, seed=None): '\n Returns a Erdos Reyni NetworkX graph\n\n :param n: number of nodes\n :param p: probability for edge creation\n :param seed: fixes the graph generation process\n :return: a NetworkX graph\n ' if (p is None): p = ((1.0 / n) + 0.1) return nx.generators.erdos_renyi_graph(n=n, p=p, seed=seed)
def watts_strogatz(n, m=4, p=0.05, seed=None): '\n Returns a Watts Strogatz NetworkX graph\n\n :param n: number of nodes\n :param m: each node is joined with its k nearest neighbors in a ring topology\n :param p: probability of rewiring each edge\n :param seed: fixes the graph generation process\n :return: a NetworkX graph\n ' return nx.generators.connected_watts_strogatz_graph(n=n, k=m, p=p, seed=seed)
def barabasi_albert(n, m=3, seed=None): '\n Returns a Barabasi Albert NetworkX graph\n\n :param n: number of nodes\n :param m: number of edges to attach from a new node to existing nodes\n :param seed: fixes the graph generation process\n :return: a NetworkX graph\n ' return nx.generators.barabasi_albert_graph(n=n, m=m, seed=seed)
def clustered_scale_free(n, m=3, p=0.3, seed=None): '\n Returns a Clustered Scale-Free NetworkX graph\n\n :param n: number of nodes\n :param m: the number of random edges to add for each new node\n :param p: probability of adding a triangle after adding a random edge\n :param seed: fixes the graph generation process\n :return: a NetworkX graph\n ' return nx.powerlaw_cluster_graph(n=n, m=m, p=p, seed=seed)
def wdn_ky2(): '\n Returns the graph from: https://uknowledge.uky.edu/wdst/4/,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.Graph() with open((graph_dir + 'ky2.txt')) as f: lines = f.readlines() for line in lines: if (len(line.split('\t')) == 9): (u, v) = line.strip().split('\t')[1:3] u = u.strip() v = v.strip() if (('J' in u) and ('J' in v)): graph.add_edge(u, v) else: (name, x_pos, y_pos) = line.strip().split('\t') name = name.strip() x_pos = float(x_pos.strip()) y_pos = float(y_pos.strip()) graph.nodes[name]['pos'] = [x_pos, y_pos] graph = nx.convert_node_labels_to_integers(graph) return graph.subgraph(max(nx.connected_components(graph), key=len))
def as_733(): "\n Returns the 'as19971108' graph from: http://snap.stanford.edu/data/as-733.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n " graph = nx.read_edgelist((graph_dir + 'as19971108.txt')) graph = nx.convert_node_labels_to_integers(graph) return graph.subgraph(max(nx.connected_components(graph), key=len))
def p2p_gnuetella08(): '\n Returns the graph from: https://snap.stanford.edu/data/p2p-Gnutella08.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_edgelist((graph_dir + 'p2p-Gnutella08.txt')) return graph.subgraph(max(nx.connected_components(graph), key=len))
def ca_grqc(): '\n Returns the graph from: https://snap.stanford.edu/data/ca-GrQc.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_edgelist((graph_dir + 'ca-GrQc.txt')) return graph.subgraph(max(nx.connected_components(graph), key=len))
def cit_hep_th(): '\n Returns the graph from: https://snap.stanford.edu/data/cit-HepTh.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_edgelist((graph_dir + 'cit-HepTh.txt')) return graph.subgraph(max(nx.connected_components(graph), key=len))
def wiki_vote(): '\n Returns the graph from: https://snap.stanford.edu/data/wiki-Vote.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_edgelist((graph_dir + 'wiki-Vote.txt')) return graph.subgraph(max(nx.connected_components(graph), key=len))
def email_eu_all(): '\n Returns the graph from: https://snap.stanford.edu/data/email-EuAll.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_edgelist((graph_dir + 'email-EuAll.txt')) return graph.subgraph(max(nx.connected_components(graph), key=len))
def dblp(): '\n Returns the graph from: https://snap.stanford.edu/data/com-DBLP.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_edgelist((graph_dir + 'dblp.txt')) return graph.subgraph(max(nx.connected_components(graph), key=len))
def ca_astro_ph(): '\n Returns the graph from: https://snap.stanford.edu/data/ca-AstroPh.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_edgelist((graph_dir + 'ca-AstroPh.txt')) return graph.subgraph(max(nx.connected_components(graph), key=len))
def ca_hep_th(): '\n Returns the graph from: https://snap.stanford.edu/data/cit-HepTh.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_edgelist((graph_dir + 'ca-HepTh.txt')) return graph.subgraph(max(nx.connected_components(graph), key=len))
def enron_email(): '\n Returns the graph from: https://snap.stanford.edu/data/email-Enron.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_edgelist((graph_dir + 'email-enron.txt')) return graph.subgraph(max(nx.connected_components(graph), key=len))
def karate(): '\n Returns the graph from: https://networkx.org/documentation/stable/reference/generated/networkx.generators.social.karate_club_graph.html,\n\n :return: undirected NetworkX graph\n ' return nx.karate_club_graph()
def oregeon_1(): '\n Returns the graph from: https://snap.stanford.edu/data/oregon1_010331.html,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_edgelist((graph_dir + 'as-oregon1.txt')) return graph.subgraph(max(nx.connected_components(graph), key=len))
def electrical(): '\n Returns the graph from: http://konect.cc/networks/opsahl-powergrid/,\n where we preprocess it to only keep the largest connected component\n\n :return: undirected NetworkX graph\n ' graph = nx.read_gml((graph_dir + 'power.gml'), label='id') return graph.subgraph(max(nx.connected_components(graph), key=len))
def o4_graph(): '\n Returns a 4 node disconnected graph\n\n :return: undirected NetworkX graph\n ' G = nx.Graph() G.add_nodes_from([0, 1, 2, 3]) return G
def p4_graph(): '\n Returns a 4 node path graph\n\n :return: undirected NetworkX graph\n ' G = nx.Graph() G.add_edges_from([(0, 1), (1, 2), (2, 3)]) return G
def s4_graph(): '\n Returns a 4 node star graph\n\n :return: undirected NetworkX graph\n ' G = nx.Graph() G.add_edges_from([(0, 1), (1, 2), (1, 3)]) return G
def c4_graph(): '\n Returns a 4 node cycle graph\n\n :return: undirected NetworkX graph\n ' G = nx.Graph() G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) return G
def k4_1_graph(): '\n Returns a 4 node diamond graph (1 diagonal edge)\n\n :return: undirected NetworkX graph\n ' G = nx.Graph() G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 3), (2, 3)]) return G
def k4_2_graph(): '\n Returns a 4 node diamond graph (2 diagonal edges), a.k.a. complete graph\n\n :return: undirected NetworkX graph\n ' G = nx.Graph() G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 3), (1, 2), (2, 3)]) return G
def two_c4_0_bridge(): '\n Returns two disconnected 4 node cycle graphs\n\n :return: undirected NetworkX graph\n ' G = nx.Graph() G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (4, 5), (5, 6), (6, 7), (7, 4)]) return G
def two_c4_1_bridge(): '\n Returns two 4 node cycle graphs connected by 1 edge\n\n :return: undirected NetworkX graph\n ' G = nx.Graph() G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (4, 5), (5, 6), (6, 7), (7, 4), (2, 4)]) return G
def two_c4_2_bridge(): '\n Returns two 4 node cycle graphs connected by 2 edges\n\n :return: undirected NetworkX graph\n ' G = nx.MultiGraph() G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (4, 5), (5, 6), (6, 7), (7, 4), (2, 4), (2, 4)]) return G
def two_c4_3_bridge(): '\n Returns two 4 node cycle graphs connected by 3 edges\n\n :return: undirected NetworkX graph\n ' G = nx.MultiGraph() G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (4, 5), (5, 6), (6, 7), (7, 4), (2, 4), (2, 4), (2, 4)]) return G
class Simulation(): '\n The parent class for all simulation classes i.e., attack, defense, cascading failure and diffusion models.\n Provides a shared set of functions, largely for network visualization and plotting of results\n\n :param graph: undirected NetworkX graph\n :param runs: number of times to run the simulation\n :param steps: number of time steps to run each simulation\n :param kwargs: optional parameters to change visualization settings\n ' def __init__(self, graph, runs, steps, **kwargs): self.graph_og = graph.copy() self.graph = graph self.prm = {'runs': runs, 'steps': steps, 'seed': 1, 'max_val': 1, 'gif_animation': False, 'gif_snaps': False, 'plot_transition': False, 'edge_style': None, 'node_style': None, 'fa_iter': 200} self.sim_info = defaultdict() self.sparse_graph = get_sparse_graph(self.graph) if (self.prm['seed'] is not None): random.seed(self.prm['seed']) np.random.seed(self.prm['seed']) def child_class(self): '\n Gets the child class name\n :return: string\n ' return self.__class__.__name__ def get_graph_coordinates(self): "\n Gets the graph coordinates, which can be:\n (1) set in the graph itself with the 'pos' tag on the vertices,\n (2) positioned according to the force atlas2 algorithm,\n (3) positioned using a spectral layout.\n\n Then lays out the edges, can be curved, bundled, or straight\n\n :return: Tuple containing node and edge positions\n " edge_pos = None node_pos = {idx: v['pos'] for (idx, (k, v)) in enumerate(dict(self.graph.nodes).items()) if ('pos' in v)} node_pos = (node_pos if (len(node_pos) == len(self.graph)) else None) if ((self.prm['node_style'] == 'force_atlas') and (node_pos is None)): force = ForceAtlas2(outboundAttractionDistribution=True, edgeWeightInfluence=0, scalingRatio=6.0, verbose=False) node_pos = force.forceatlas2_networkx_layout(self.graph, pos=None, iterations=self.prm['fa_iter']) elif (node_pos is None): node_pos = nx.spectral_layout(self.graph) if (self.prm['edge_style'] == 'bundled'): pos = pd.DataFrame.from_dict(node_pos, orient='index', columns=['x', 'y']).rename_axis('name').reset_index() edge_pos = hammer_bundle(pos, nx.to_pandas_edgelist(self.graph)) return (node_pos, edge_pos) def plot_results(self, results): '\n Plots the compiled simulation results\n\n :param results: a list of floats representing each simulation output\n ' results_norm = [(r / len(self.graph)) for r in results] plt.figure(figsize=(6.4, 4.8)) if (self.child_class() == 'Diffusion'): plt.plot(results_norm, label='Effective strength: {}'.format(self.get_effective_strength())) if (self.prm['model'] == 'SIS'): plt.ylabel('Infected Nodes') else: plt.ylabel('Recovered Nodes') plt.legend() plt.yscale('log') plt.ylim(0.001, 1) elif ((self.child_class() == 'Cascading') or (self.child_class() == 'Attack') or (self.child_class() == 'Defense')): plt.plot(results_norm) plt.ylabel(self.prm['robust_measure']) plt.ylim(0, 1) plt.xlabel('Steps') plt.title(self.child_class()) plt.savefig(os.path.join(self.save_dir, (self.get_plot_title(self.prm['steps']) + '_results.pdf'))) plt.clf() def get_plot_title(self, step): '\n Gets the title for each plot\n\n :param step: the current simulation iteration\n :return: title string\n ' if (self.child_class() == 'Diffusion'): title = '{}_epidemic--step={},diffusion={},method={},k={}'.format(self.prm['model'], step, self.prm['diffusion'], self.prm['method'], self.prm['k']) elif (self.child_class() == 'Cascading'): title = 'Cascading--step={},l={},r={},k_a={},attack={},k_d={},defense={}'.format(step, self.prm['l'], self.prm['r'], self.prm['k_a'], self.prm['attack'], self.prm['k_d'], self.prm['defense']) elif (self.child_class() == 'Attack'): title = 'Attack--step={},attack={},k_d={},defense={}'.format(step, self.prm['attack'], self.prm['k_d'], self.prm['defense']) elif (self.child_class() == 'Defense'): title = 'Defense--step={},attack={},k_a={},defense={}'.format(step, self.prm['attack'], self.prm['k_a'], self.prm['defense']) else: title = '' return title def plot_graph_transition(self, sim_info): '\n Helper function to decide which snapshots to take for network visualization\n\n :param sim_info: the information stored at each step in the simulation\n ' history = [info['failed'] for (step, info) in sim_info.items()] start = history[0] end = history[(- 1)] middle = (start - int(((start - end) / 2))) (mid_step, _) = min(enumerate(history), key=(lambda x: abs((x[1] - middle)))) steps_to_plot = [0, 1, 2, mid_step, (self.prm['steps'] - 1)] for step in steps_to_plot: self.plot_network(step=step) def get_visual_settings(self, step): '\n Sets the visual settings for the network visualization\n\n :param step: current iteration of the simulation\n :return: four lists, each containing a number corresponding to the size or color of each node in the graph + cmap representing color scheme\n ' if (self.child_class() == 'Cascading'): (nc, ns) = ([], []) ew = 1 ec = 'gray' for (idx, load) in enumerate(self.sim_info[step]['status']): cval = interp1d([0, self.prm['max_val']], [20, 1500]) ns.append(float(cval(self.capacity[idx]))) if (load <= self.capacity[idx]): cval = interp1d([0, self.capacity[idx]], [0, 0.8]) nc.append(float(cval(load))) else: nc.append(1) cmap = plt.get_cmap('jet', 5) elif (self.child_class() == 'Diffusion'): (nc, ns) = ([], []) ew = 0.1 ec = '#1F76B4' for (idx, s) in enumerate(self.sim_info[step]['status']): if (idx in self.sim_info[0]['protected']): nc.append(0.5) ns.append(200) elif (s == 1): nc.append(s) ns.append(40) else: nc.append(s) ns.append(20) cmap = LinearSegmentedColormap.from_list('mycmap', ['#67CAFF', '#17255A', '#FF5964']) elif ((self.child_class() == 'Attack') or (self.child_class() == 'Defense')): ew = 5 ec = 'gray' nc = self.sim_info[step]['status'] ns = [(120 if (status == 1) else 40) for status in self.sim_info[step]['status']] cmap = plt.get_cmap('gist_rainbow_r') nc = np.array(nc) ns = np.array(ns) return (nc, ns, ec, ew, cmap) def draw_graph(self, step): '\n Draws the graph\n\n :param step: current iteration of the simulation\n :return: matplotlib.collections.PathCollection PathCollection` of the nodes.\n ' (nc, ns, ec, ew, cmap) = self.get_visual_settings(step) if (self.prm['edge_style'] == 'bundled'): plt.plot(self.edge_pos.x, self.edge_pos.y, zorder=1, linewidth=ew, color=ec) else: nx.draw_networkx_edges(self.graph, pos=self.node_pos, width=ew, edge_color=ec) nodes = nx.draw_networkx_nodes(self.graph, pos=self.node_pos, cmap=cmap, vmin=0, vmax=self.prm['max_val'], node_size=ns, node_color=nc) return nodes def plot_network(self, step): '\n Plots the compiled simulation results\n\n :param step: current iteration of the simulation\n ' fig = plt.figure(figsize=(20, 20)) self.draw_graph(step) plt.axis('image') title = self.get_plot_title(step) plt.savefig(os.path.join(self.save_dir, (title + '.pdf'))) plt.clf() def create_simulation_gif(self): '\n Draws and saves the network simulation to an MP4 file\n ' fig = plt.figure(figsize=(20, 20)) nodes = self.draw_graph(step=0) def update(step): (nc, ns, _, _, _) = self.get_visual_settings(step) nodes.set_array(nc) nodes.set_sizes(ns) if self.prm['gif_snaps']: snap_dir = os.path.join(self.save_dir, 'gif_snaps/') os.makedirs(snap_dir, exist_ok=True) plt.savefig((snap_dir + 'step_{}.pdf'.format(step))) return (nodes,) if (self.child_class() == 'Diffusion'): frames = iter(list(range(0, self.prm['steps'], 10))) interval = 20 fps = 5 elif (self.child_class() == 'Cascading'): frames = self.prm['steps'] interval = 20 fps = 3 else: frames = self.prm['steps'] interval = 20 fps = 1 if (platform.system() != 'Windows'): anim = animation.FuncAnimation(fig, update, frames=frames, interval=interval, blit=(not self.prm['gif_snaps']), repeat=False) writer = animation.FFMpegWriter(fps=fps, extra_args=['-vcodec', 'libx264']) title = self.get_plot_title(self.prm['steps']) gif_path = os.path.join(self.save_dir, (title + '.mp4')) anim.save(gif_path, writer=writer) else: print('Warning: Animated video functionality not supported on Windows; snapshot images are available.') plt.clf() def run_simulation(self): "\n Averages the simulation over the number of 'runs'.\n\n :return: a list containing the average value at each 'step' of the simulation.\n " print('Running simulation {} times'.format(self.prm['runs'])) sim_results = list(range(self.prm['runs'])) for r in range(self.prm['runs']): sim_results[r] = self.run_single_sim() if (self.prm['plot_transition'] and (r == 0)): self.plot_graph_transition(self.sim_info) if (self.prm['gif_animation'] and (r == 0)): self.create_simulation_gif() self.reset_simulation() avg_results = [] for t in range(self.prm['steps']): avg_results.append(np.mean([sim_results[r][t] for r in range(self.prm['runs'])])) return avg_results def reset_simulation(self): '\n Implemented by child class\n ' pass def run_single_sim(self): '\n Implemented by child class\n ' pass def get_effective_strength(self): '\n Implemented by child class\n ' pass
def gpu_available(): from pip._internal.utils.misc import get_installed_distributions gpu = False installed_packages = [package.project_name for package in get_installed_distributions()] if any((('cupy' in s) for s in installed_packages)): gpu = True return gpu
def get_sparse_graph(graph): '\n Returns a sparse adjacency matrix in CSR format\n\n :param graph: undirected NetworkX graph\n :return: Scipy sparse adjacency matrix\n ' return nx.to_scipy_sparse_matrix(graph, format='csr', dtype=float, nodelist=graph.nodes)
def get_adjacency_spectrum(graph, k=np.inf, eigvals_only=False, which='LA', use_gpu=False): '\n Gets the top k eigenpairs of the adjacency matrix\n\n :param graph: undirected NetworkX graph\n :param k: number of top k eigenpairs to obtain\n :param eigvals_only: get only the eigenvalues i.e., no eigenvectors\n :param which: the type of k eigenvectors and eigenvalues to find\n :return: the eigenpair information\n ' if (len(graph) < 100): A = nx.adjacency_matrix(graph).todense() eigpairs = eigh(A, eigvals_only=eigvals_only) else: A = nx.to_scipy_sparse_matrix(graph, format='csr', dtype=np.float, nodelist=graph.nodes) if (gpu_available() and use_gpu): import cupy as cp import cupyx.scipy.sparse.linalg as cp_linalg A_gpu = cp.sparse.csr_matrix(A) eigpairs = cp_linalg.eigsh(A_gpu, k=min(k, (len(graph) - 3)), which=which, return_eigenvectors=(not eigvals_only)) if (type(eigpairs) is tuple): eigpairs = list(eigpairs) (eigpairs[0], eigpairs[1]) = (cp.asnumpy(eigpairs[0]), cp.asnumpy(eigpairs[1])) else: eigpairs = cp.asnumpy(eigpairs) else: if use_gpu: print('Warning: GPU requested, but not available') eigpairs = eigsh(A, k=min(k, (len(graph) - 1)), which=which, return_eigenvectors=(not eigvals_only)) return eigpairs
def get_laplacian_spectrum(graph, k=np.inf, which='SM', tol=0.01, eigvals_only=True, use_gpu=False): '\n Gets the bottom k eigenpairs of the Laplacian matrix\n\n :param graph: undirected NetworkX graph\n :param k: number of bottom k eigenpairs to obtain\n :param which: he type of k eigenvectors and eigenvalues to find\n :param tol: the precision at which to stop computing the eigenpairs\n :param eigvals_only: get only the eigenvalues i.e., no eigenvectors\n\n :return: the eigenpair information\n ' if use_gpu: print('Warning: GPU requested, but not available for Laplacian measures') if (len(graph) < 100): lam = nx.laplacian_spectrum(graph) else: L = get_laplacian(graph) lam = eigsh(L, k=min(k, (len(graph) - 1)), which=which, tol=tol, return_eigenvectors=(not eigvals_only)) lam = np.sort(lam) return lam
def get_laplacian(graph): '\n Gets the Laplacian matrix in sparse CSR format\n\n :param graph: undirected NetworkX graph\n :return: Scipy sparse Laplacian matrix\n ' A = nx.to_scipy_sparse_matrix(graph, format='csr', dtype=np.float, nodelist=graph.nodes) D = sparse.spdiags(data=A.sum(axis=1).flatten(), diags=[0], m=len(graph), n=len(graph), format='csr') L = (D - A) return L
def test_attack_strength(): '\n check that valid nodes are returned\n :return:\n ' graph = karate() methods = get_attack_methods() strength = list(range(1, 20)) for method in methods: for k in strength: nodes = run_attack_method(graph, method=method, k=k) assert (len(nodes) == k)
def test_method_selection(): '\n check that valid nodes are returned\n :return:\n ' ground_truth = {'ns_node': ([33, 0, 2, 32], [33, 2, 0, 32]), 'pr_node': [33, 0, 32, 2], 'eig_node': [33, 0, 2, 32], 'id_node': [33, 0, 32, 2], 'rd_node': [33, 0, 32, 1], 'ib_node': [0, 33, 32, 2], 'rb_node': [0, 33, 32, 2], 'ns_line_edge': [(32, 33), (8, 33), (31, 33), (13, 33)], 'pr_line_edge': [(32, 33), (0, 2), (0, 1), (0, 31)], 'eig_line_edge': [(32, 33), (8, 33), (31, 33), (13, 33)], 'deg_line_edge': [(32, 33), (0, 2), (0, 1), (31, 33)], 'id_edge': [(32, 33), (0, 2), (0, 1), (2, 32)], 'rd_edge': [(32, 33), (0, 2), (0, 1), (2, 32)], 'ib_edge': ([(0, 31), (0, 6), (0, 5), (0, 2)], [(0, 31), (0, 5), (0, 6), (0, 2)]), 'rb_edge': [(0, 31), (0, 2), (0, 8), (13, 33)]} graph = karate() k = 4 methods = get_attack_methods() for method in methods: values = run_attack_method(graph, method=method, k=k, seed=1) if (('rnd' not in method) and (method != 'ib_edge') and (method != 'ns_node')): assert (values == ground_truth[method]) elif ((method == 'ib_edge') or (method == 'ns_node')): assert ((values == ground_truth[method][0]) or (values == ground_truth[method][1]))
def main(): test_method_selection() test_attack_strength()
def test_defense_strength(): '\n check that valid nodes are returned\n :return:\n ' graph = karate() methods = get_defense_methods() strength = list(range(1, 20)) for method in methods: if (get_defense_category(method) == 'node'): for k in strength: nodes = run_defense_method(graph, method=method, k=k) assert (len(nodes) == k)
def test_method_selection(): '\n check that valid nodes are returned\n :return:\n ' ground_truth = {'ns_node': ([33, 0, 2, 32], [33, 2, 0, 32]), 'pr_node': [33, 0, 32, 2], 'eig_node': [33, 0, 2, 32], 'id_node': [33, 0, 32, 2], 'rd_node': [33, 0, 32, 1], 'ib_node': [0, 33, 32, 2], 'rb_node': [0, 33, 32, 2], 'rnd_node': [14, 19, 3, 27], 'add_edge_pr': {'added': [(33, 0), (0, 32), (33, 2), (33, 1)]}, 'add_edge_eig': {'added': [(33, 0), (33, 2), (0, 32), (33, 1)]}, 'add_edge_deg': {'added': [(33, 0), (0, 32), (33, 2), (33, 1)]}, 'add_edge_random': {'added': [(14, 19), (16, 22), (29, 20), (31, 15)]}, 'add_edge_preferential': {'added': [(11, 9), (12, 14), (15, 16), (17, 18)]}, 'rewire_edge_random': {'added': [(21, 26), (30, 31), (18, 26), (17, 29)], 'removed': [(27, 33), (32, 33), (9, 33), (2, 9)]}, 'rewire_edge_random_neighbor': {'added': [(16, 22), (9, 12), (3, 23), (16, 30)], 'removed': [(14, 33), (19, 1), (3, 12), (27, 2)]}, 'rewire_edge_preferential': {'added': [(18, 12), (10, 9), (28, 5), (1, 16)], 'removed': [(33, 18), (0, 10), (33, 28), (0, 1)]}, 'rewire_edge_preferential_random': {'added': [(27, 19), (32, 8), (9, 32), (9, 10)], 'removed': [(27, 33), (32, 33), (9, 33), (2, 9)]}} graph = karate() k = 4 methods = get_defense_methods() for method in methods: values = run_defense_method(graph, method=method, k=k, seed=1) if (get_defense_category(method) == 'node'): if (method == 'ns_node'): assert ((values == ground_truth[method][0]) or (values == ground_truth[method][1])) else: assert (values == ground_truth[method]) else: assert np.array_equal(values['added'], ground_truth[method]['added']) if ('removed' in values): assert np.array_equal(values['removed'], ground_truth[method]['removed'])
def main(): test_defense_strength() test_method_selection()
def test_measures(): ground_truth = {'node_connectivity': [0, 1, 2, 2, 3, 0, 1, 1, 1], 'edge_connectivity': [0, 1, 2, 2, 3, 0, 1, 1, 1], 'diameter': [None, 3, 2, 2, 1, None, 5, 5, 5], 'average_distance': [None, 1.67, 1.33, 1.17, 1, None, 2.29, 2.29, 2.29], 'average_inverse_distance': [0, 0.72, 0.83, 0.92, 1.0, 0.36, 0.58, 0.58, 0.58], 'average_vertex_betweenness': ([0, 4, 3.5, 3.25, 3, 3.5, 11.5, 11.5, 11.5], [0, 4, 3.5, 3.25, 3, 3.5, 11.5, None, None]), 'average_edge_betweenness': ([0, 3.33, 2.0, 1.4, 1, 2, 7.11, 6.4, 5.82], [0, 3.33, 2.0, 1.4, 1, 2, 7.11, 7.11, 7.11]), 'average_clustering_coefficient': [0, 0, 0, 0.83, 1, 0, 0, None, None], 'largest_connected_component': [1, 4, 4, 4, 4, 4, 8, 8, 8], 'spectral_radius': [0, 1.62, 2, 2.56, 3, 2, 2.34, 2.9, 3.65], 'spectral_gap': [0, 1, 2, 2.56, 4, 0, 0.53, 1.19, 2], 'natural_connectivity': [0, 0.65, 0.87, 1.29, 1.67, 0.87, 0.97, 1.28, 1.81], 'spectral_scaling': ([None, 7.16, 7.26, 0.17, 0.09, None, None, 7.01, 6.9], [None, 7.18, 7.28, 0.17, 0.09, None, None, 7.04, 6.93]), 'generalized_robustness_index': ([None, 7.16, 7.26, 0.17, 0.09, None, None, 7.01, 6.9], [None, 7.18, 7.28, 0.17, 0.09, None, None, 7.04, 6.93]), 'algebraic_connectivity': [0, 0.59, 2, 2, 4, 0, 0.29, 0.4, 0.45], 'number_spanning_trees': [0, 1, 4, 8, 16, 0, 16, 32, 48], 'effective_resistance': [np.inf, 10, 5, 4, 3, np.inf, 46, 38, 35.33]} graphs = [o4_graph(), p4_graph(), c4_graph(), k4_1_graph(), k4_2_graph(), two_c4_0_bridge(), two_c4_1_bridge(), two_c4_2_bridge(), two_c4_3_bridge()] for (measure, gt) in ground_truth.items(): for (idx, graph) in enumerate(graphs): value = run_measure(graph, measure) if (value is not None): value = round(value, 2) if ((measure == 'average_vertex_betweenness') or (measure == 'average_edge_betweenness') or (measure == 'spectral_scaling') or (measure == 'generalized_robustness_index')): if (value is None): assert ((gt[0][idx] == value) or (gt[1][idx] == value)) else: assert (((gt[0][idx] - 0.1) <= value <= (gt[0][idx] + 0.1)) or ((gt[1][idx] - 0.1) <= value <= (gt[1][idx] + 0.1))) elif (value is None): assert (gt[idx] == value) else: assert ((gt[idx] - 0.1) <= value <= (gt[idx] + 0.1))
def main(): test_measures()
def test_sis_model(): params = {'model': 'SIS', 'b': 0.00208, 'd': 0.01, 'c': 1, 'runs': 10, 'steps': 5000, 'diffusion': 'max', 'method': 'add_edge_random', 'k': 15, 'seed': 1, 'plot_transition': False, 'gif_animation': False} graph = karate() ds = Diffusion(graph, **params) increased_diffusion = ds.run_simulation() params['diffusion'] = None params['method'] = None params['k'] = 0 ds = Diffusion(graph, **params) baseline_diffusion = ds.run_simulation() params['diffusion'] = 'min' params['method'] = 'ns_node' params['k'] = 4 ds = Diffusion(graph, **params) decreased_diffusion = ds.run_simulation() assert (sum(decreased_diffusion) < sum(baseline_diffusion) < sum(increased_diffusion))
def test_sir_model(): params = {'model': 'SIR', 'b': 0.00208, 'd': 0.01, 'c': 0.1, 'runs': 10, 'steps': 5000, 'diffusion': 'max', 'method': 'add_edge_random', 'k': 40, 'seed': 1, 'plot_transition': False, 'gif_animation': False} graph = karate() ds = Diffusion(graph, **params) increased_diffusion = ds.run_simulation() params['diffusion'] = None params['method'] = None params['k'] = 0 ds = Diffusion(graph, **params) baseline_diffusion = ds.run_simulation() params['diffusion'] = 'min' params['method'] = 'ns_node' params['k'] = 4 ds = Diffusion(graph, **params) decreased_diffusion = ds.run_simulation() assert (sum(decreased_diffusion) < sum(baseline_diffusion) < sum(increased_diffusion))
def test_cascading(): params = {'runs': 10, 'steps': 30, 'l': 0.8, 'r': 0.5, 'capacity_approx': np.inf, 'k_a': 4, 'attack': 'rnd_node', 'k_d': 0, 'defense': None, 'robust_measure': 'largest_connected_component', 'seed': 1, 'plot_transition': False, 'gif_animation': False} graph = karate() cf = Cascading(graph, **params) attacked = cf.run_simulation() params['k_a'] = 0 params['attack'] = None cf = Cascading(graph, **params) baseline = cf.run_simulation() params['k_a'] = 4 params['attack'] = 'rnd_node' params['k_d'] = 4 params['defense'] = 'pr_node' cf = Cascading(graph, **params) defended = cf.run_simulation() assert (sum(attacked) <= sum(defended) <= sum(baseline))
def main(): test_sis_model() test_sir_model() test_cascading()
def run_test(params): graph = karate() ds = Diffusion(graph, **params) ds.run_simulation()
def test_animation(): params = {'model': 'SIS', 'b': 0.00208, 'd': 0.01, 'c': 1, 'runs': 10, 'steps': 500, 'seed': 1, 'diffusion': 'max', 'method': 'add_edge_random', 'k': 15, 'plot_transition': False, 'gif_animation': True} run_test(params)
def test_transition(): params = {'model': 'SIS', 'b': 0.00208, 'd': 0.01, 'c': 1, 'runs': 10, 'steps': 500, 'seed': 1, 'diffusion': 'max', 'method': 'add_edge_random', 'k': 15, 'plot_transition': True, 'gif_animation': False} run_test(params)
def test_gif_snaps(): params = {'model': 'SIS', 'b': 0.00208, 'd': 0.01, 'c': 1, 'runs': 10, 'steps': 500, 'seed': 1, 'diffusion': 'max', 'method': 'add_edge_random', 'k': 15, 'plot_transition': False, 'gif_animation': True, 'gif_snaps': True} run_test(params)
def test_force_atlas(): params = {'model': 'SIR', 'b': 0.00208, 'd': 0.01, 'c': 1, 'runs': 10, 'steps': 500, 'seed': 1, 'diffusion': 'max', 'method': 'add_edge_random', 'k': 15, 'edge_style': None, 'node_style': 'force_atlas', 'fa_iter': 200, 'plot_transition': True, 'gif_animation': False} run_test(params)
def test_edge_bundling(): params = {'model': 'SIS', 'b': 0.00208, 'd': 0.01, 'c': 1, 'runs': 10, 'steps': 500, 'seed': 1, 'diffusion': 'max', 'method': 'add_edge_random', 'k': 15, 'edge_style': 'bundled', 'node_style': 'force_atlas', 'fa_iter': 200, 'plot_transition': True, 'gif_animation': False} run_test(params)
def main(): test_animation() test_transition() test_gif_snaps() test_force_atlas() test_edge_bundling()
def gps2coordinate(p2, p1): x = vincenty(p1, (p2[0], p1[1])).meters y = vincenty(p1, (p1[0], p2[1])).meters x = ((- x) if (p2[0] < p1[0]) else x) y = ((- y) if (p2[1] < p1[1]) else y) return [round(x, 6), round(y, 6)]
def get_coordinates(records): global start_location if (start_location == None): start_location = (records[0]['x'], records[0]['y']) for r in records: coord = gps2coordinate((r['x'], r['y']), start_location) r['x'] = coord[0] r['y'] = coord[1] return records
def read_records(input_records_file, gps_type): lines = open(input_records_file).readlines()[1:] records = [l.strip().split(' ') for l in lines] records = np.array([(r[0], float(r[1]), float(r[2])) for r in records], dtype=[('name', object), ('x', float), ('y', float)]) return records
def main(args): train_records = read_records(args.train_list, args.gps_type) train_names = [r[0] for r in train_records] test_records = read_records(args.test_list, args.gps_type) test_names = [r[0] for r in test_records] name2vlad = np.load(args.vlad_features)['name2vlad'].item() print(len(train_records), len(test_records), len(train_names), len(test_names)) if (args.gps_type == 'global'): train_records = get_coordinates(train_records) test_gps = copy.deepcopy(test_records) print('Before: test_gps[0]: {}'.format(test_gps[0])) test_records = get_coordinates(test_records) print('After: test_gps[0]: {}'.format(test_gps[0])) print('After: train_records[0]: {}'.format(test_records[0])) def get_records(): map_idx = [] loc_idx = [] map_vlad = [] loc_vlad = [] for ith in xrange(len(train_records)): ith_name = train_names[ith] if (not name2vlad.has_key(ith_name)): continue map_idx.append(ith) map_vlad.append(name2vlad[ith_name]) for ith in xrange(len(test_records)): ith_name = test_names[ith] if (not name2vlad.has_key(ith_name)): print('{} not found in name2vlad'.format(ith_name)) continue loc_idx.append(ith) loc_vlad.append(name2vlad[ith_name]) print(len(map_idx), len(loc_idx), len(map_vlad), len(loc_vlad)) return (map_idx, loc_idx, map_vlad, loc_vlad) (map_idx, loc_idx, map_vlad, loc_vlad) = get_records() assert (len(map_idx) >= args.top_k) map_vlad_mat = np.array(map_vlad) loc_pairs = zip(loc_idx, loc_vlad) def whether_should_found(r_query): xy_map = np.array([(train_records[ith]['x'], train_records[ith]['y']) for ith in map_idx]) xy_query = np.tile(np.array([r_query['x'], r_query['y']]), (len(map_idx), 1)) dxy = (xy_map - xy_query) d = np.sqrt((dxy * dxy).sum(axis=1)) return np.any((d <= args.acceptance_distance_thresh)) total_found = np.zeros((args.top_k,), dtype=int) ith_found = np.zeros(len(test_records)) cnt = 0 total_should_found = 0 for (ith, ith_vlad) in loc_pairs: cnt = (cnt + 1) if ((cnt % 1000) == 0): print(('compute for query %d' % cnt)) ith_name = test_names[ith] ith_rec = test_records[ith] ith_vlad = name2vlad[ith_name] similarities = map_vlad_mat.dot(ith_vlad) results = np.argsort(similarities)[::(- 1)] results = results[:args.top_k] total_should_found += whether_should_found(ith_rec) found = np.zeros((args.top_k,), dtype=bool) for k in xrange(args.top_k): if (similarities[results[k]] < args.similarity_thresh): continue jth = map_idx[results[k]] jth_rec = train_records[jth] dij = np.sqrt((((ith_rec['x'] - jth_rec['x']) ** 2) + ((ith_rec['y'] - jth_rec['y']) ** 2))) found[k] = (dij <= args.acceptance_distance_thresh) top_k_found = (np.cumsum(found) > 0) total_found += top_k_found ith_found[ith] = top_k_found[(args.top_k - 1)] print('for acceptance distance={:.1f}m:'.format(args.acceptance_distance_thresh)) for k in xrange(args.top_k): print('\ttop-{:d}-accuracy={:d}/{:d}={:.2f}%'.format((k + 1), total_found[k], total_should_found, ((total_found[k] * 100.0) / total_should_found))) found_gps = [] if (args.gps_type == 'global'): found_gps = [(test_gps[i]['x'], test_gps[i]['y']) for i in range(len(ith_found)) if ith_found[i]] fname_found = args.test_list.split('/')[(- 1)].replace('.csv', '_d={}_found.txt'.format(args.acceptance_distance_thresh)) print('found GPSs are written into {}'.format(fname_found)) with open(fname_found, 'w') as f: for (x, y) in found_gps: f.write('{} {}\n'.format(x, y))
class PGNetwork(nn.Module): def __init__(self, state_dim, action_dim): '\n Initialize PGNetwork.\n :param state_dim: dimension of the state\n :param action_dim: dimension of the action\n ' super(PGNetwork, self).__init__() self.fc1 = nn.Linear(state_dim, 20) self.fc2 = nn.Linear(20, action_dim) def forward(self, x): out = F.relu(self.fc1(x)) out = self.fc2(out) return out def initialize_weights(self): for m in self.modules(): nn.init.normal_(m.weight.data, 0, 0.1) nn.init.constant_(m.bias.data, 0.01)
class Actor(object): def __init__(self, state_dim, action_dim, device, LR): self.state_dim = state_dim self.action_dim = action_dim self.device = device self.LR = LR self.network = PGNetwork(state_dim=self.state_dim, action_dim=self.action_dim).to(self.device) self.optimizer = torch.optim.Adam(self.network.parameters(), lr=self.LR) self.time_step = 0 def choose_action(self, observation): observation = torch.FloatTensor(observation).to(self.device) network_output = self.network.forward(observation) with torch.no_grad(): prob_weights = F.softmax(network_output, dim=0).data.cpu().numpy() action = np.random.choice(range(prob_weights.shape[0]), p=prob_weights) return action def learn(self, state, action, td_error): self.time_step += 1 softmax_input = self.network.forward(torch.FloatTensor(state).to(self.device)).unsqueeze(0) action = torch.LongTensor([action]).to(self.device) neg_log_prob = F.cross_entropy(input=softmax_input, target=action, reduction='none') loss_a = ((- neg_log_prob) * td_error) self.optimizer.zero_grad() loss_a.backward() self.optimizer.step()
class QNetwork(nn.Module): def __init__(self, state_dim, action_dim): super(QNetwork, self).__init__() self.fc1 = nn.Linear(state_dim, 20) self.fc2 = nn.Linear(20, 1) def forward(self, x): out = F.relu(self.fc1(x)) out = self.fc2(out) return out def initialize_weights(self): for m in self.modules(): nn.init.normal_(m.weight.data, 0, 0.1) nn.init.constant_(m.bias.data, 0.01)
class Critic(object): def __init__(self, state_dim, action_dim, device, LR, GAMMA): self.state_dim = state_dim self.action_dim = action_dim self.device = device self.LR = LR self.GAMMA = GAMMA self.network = QNetwork(state_dim=self.state_dim, action_dim=self.action_dim).to(self.device) self.optimizer = torch.optim.Adam(self.network.parameters(), lr=self.LR) self.loss_func = nn.MSELoss() def train_Q_network(self, state, reward, next_state): (s, s_) = (torch.FloatTensor(state).to(self.device), torch.FloatTensor(next_state).to(self.device)) v = self.network.forward(s) v_ = self.network.forward(s_) loss_q = self.loss_func((reward + (self.GAMMA * v_)), v) self.optimizer.zero_grad() loss_q.backward() self.optimizer.step() with torch.no_grad(): td_error = ((reward + (self.GAMMA * v_)) - v) return td_error
class RLForest(): def __init__(self, width_rl, height_rl, device, LR, GAMMA, stop_num, r_num): '\n Initialize the RL Forest.\n :param width_rl: width of each relation tree\n :param height_rl: height of each relation tree\n :param device: "cuda" / "cpu"\n :param LR: Actor learning rate (hyper-parameters of AC)\n :param GAMMA: Actor discount factor (hyper-parameters of AC)\n :param stop_num: deep switching or termination conditions\n :param r_num: the number of relations\n ' self.actors = [[Actor(1, width_rl[r], device, LR) for j in range(height_rl[r])] for r in range(r_num)] self.critics = [[Critic(1, width_rl[r], device, LR, GAMMA) for j in range(height_rl[r])] for r in range(r_num)] self.r_num = r_num self.init_rl = [0 for r in range(r_num)] self.init_termination = [0 for r in range(r_num)] self.init_action = [0 for r in range(r_num)] self.max_auc = 0 self.max_thresholds = [0 for r in range(r_num)] self.width = list(width_rl) self.stop_num = stop_num self.thresholds_log = [] self.actions_log = [] self.states_log = [] self.scores_log = [] self.rewards_log = [] def get_threshold(self, scores, labels, previous_thresholds, batch_num, auc): '\n The reinforcement learning module.\n It updates the neighbor filtering threshold for each relation based\n on the average neighbor distances between two consecutive epochs.\n :param scores: the neighbor nodes label-aware scores for each relation\n :param labels: the batch node labels used to select positive nodes\n :param previous_thresholds: the current neighbor filtering thresholds for each relation\n :param batch_num: numbers batches in an epoch\n :param auc: the auc of the previous filter thresholds for each relation\n ' new_scores = get_scores(scores, labels) rl_flag0 = 0 if (((len(self.scores_log) % batch_num) != 0) or (len(self.scores_log) < batch_num)): new_thresholds = list(previous_thresholds) else: current_epoch_states = [(sum(s) / batch_num) for s in zip(*self.scores_log[(- batch_num):])] new_states = [np.array([s], float) for (i, s) in enumerate(current_epoch_states)] if (auc >= self.max_auc): self.max_auc = auc self.max_thresholds = list(previous_thresholds) new_actions = [0 for r in range(self.r_num)] new_thresholds = [0 for r in range(self.r_num)] if (len(self.states_log) == 0): self.init_termination = [(i + 1) for i in self.init_termination] for r_num in range(self.r_num): (new_actions[r_num], new_thresholds[r_num]) = self.get_action(new_states, r_num) else: previous_states = self.states_log[(- 1)] previous_actions = self.actions_log[(- 1)] new_rewards = [(s if ((0 < previous_thresholds[i]) and (previous_thresholds[i] <= 1)) else (- 100)) for (i, s) in enumerate(current_epoch_states)] r_flag = self.adjust_depth() for r_num in range(self.r_num): if (r_flag[r_num] == 1): if (len(self.actors[r_num]) == (self.init_rl[r_num] + 1)): self.init_termination[r_num] = self.init_termination[r_num] new_actions[r_num] = previous_actions[r_num] new_thresholds[r_num] = self.max_thresholds[r_num] rl_flag0 += 1 print('Relation {0} is complete !!!!!'.format(str((r_num + 1))), flush=True) else: self.init_termination[r_num] = 0 self.init_rl[r_num] = (self.init_rl[r_num] + 1) self.init_action[r_num] = (self.max_thresholds[r_num] - ((self.width[r_num] / 2) * pow((1 / self.width[r_num]), (self.init_rl[r_num] + 1)))) (new_actions[r_num], new_thresholds[r_num]) = self.get_action(new_states, r_num) else: self.init_termination[r_num] = (self.init_termination[r_num] + 1) self.learn(previous_states, previous_actions, new_states, new_rewards, r_num) (new_actions[r_num], new_thresholds[r_num]) = self.get_action(new_states, r_num) self.rewards_log.append(new_rewards) print(('Rewards: ' + str(new_rewards)), flush=True) self.states_log.append(new_states) print(('States: ' + str(new_states)), flush=True) self.thresholds_log.append(new_thresholds) print(('Thresholds: ' + str(new_thresholds)), flush=True) self.actions_log.append(new_actions) self.scores_log.append(new_scores) print(('Historical maximum AUC: ' + str(self.max_auc)), flush=True) print(('Thresholds to obtain the historical maximum AUC: ' + str(self.max_thresholds)), flush=True) print(('Current depth of each RL Tree: ' + str(self.init_rl)), flush=True) rl_flag = (False if (rl_flag0 == self.r_num) else True) print(('Completion flag of the entire RL Forest: ' + str(rl_flag)), flush=True) return (new_thresholds, rl_flag) def learn(self, previous_states, previous_actions, new_states, new_rewards, r_num): '\n :param previous_states: the previous states\n :param previous_actions: the previous actions\n :param new_states: the current states\n :param new_rewards: the current rewards\n :param r_num: the index of relation\n ' td_error = self.critics[r_num][self.init_rl[r_num]].train_Q_network(previous_states[r_num], new_rewards[r_num], new_states[r_num]) self.actors[r_num][self.init_rl[r_num]].learn(previous_states[r_num], previous_actions[r_num], td_error) return def get_action(self, new_states, r_num): '\n :param new_states: the current states\n :param r_num: the index of relation\n :returns: new actions and thresholds for new_states under relation r_num\n ' new_actions = self.actors[r_num][self.init_rl[r_num]].choose_action(new_states[r_num]) new_thresholds = (self.init_action[r_num] + ((new_actions + 1) * pow((1 / self.width[r_num]), (self.init_rl[r_num] + 1)))) new_thresholds = (1 if (new_thresholds >= 1) else new_thresholds) return (new_actions, new_thresholds) def adjust_depth(self): '\n :returns: the depth flag of each relation\n ' r_flag = [1 for r in range(self.r_num)] for r_num in range(self.r_num): if (self.init_termination[r_num] > self.stop_num): for s in range((self.stop_num - 1)): r_flag[r_num] = (r_flag[r_num] * (1 if (self.actions_log[((- 1) * (s + 1))][r_num] == self.actions_log[((- 1) * (s + 2))][r_num]) else 0)) else: r_flag[r_num] = 0 return r_flag
def get_scores(scores, labels): '\n Get the scores of current batch.\n :param scores: the neighbor nodes label-aware scores for each relation\n :param labels: the batch node labels used to select positive nodes\n :returns: the state of current batch\n ' relation_scores = [] pos_index = (labels == 1).nonzero().tolist() pos_index = [i[0] for i in pos_index] for score in scores: pos_scores = itemgetter(*pos_index)(score) neigh_count = sum([(1 if isinstance(i, float) else len(i)) for i in pos_scores]) pos_sum = [(i if isinstance(i, float) else sum(i)) for i in pos_scores] relation_scores.append((sum(pos_sum) / neigh_count)) return relation_scores
class GraphSage(nn.Module): '\n\tVanilla GraphSAGE Model\n\tCode partially from https://github.com/williamleif/graphsage-simple/\n\t' def __init__(self, num_classes, enc): super(GraphSage, self).__init__() self.enc = enc self.xent = nn.CrossEntropyLoss() self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim)) init.xavier_uniform_(self.weight) def forward(self, nodes): embeds = self.enc(nodes) scores = self.weight.mm(embeds) return scores.t() def to_prob(self, nodes): pos_scores = torch.sigmoid(self.forward(nodes)) return pos_scores def loss(self, nodes, labels): scores = self.forward(nodes) return self.xent(scores, labels.squeeze())
class MeanAggregator(nn.Module): "\n\tAggregates a node's embeddings using mean of neighbors' embeddings\n\t" def __init__(self, features, cuda=False, gcn=False): '\n\t\tInitializes the aggregator for a specific graph.\n\n\t\tfeatures -- function mapping LongTensor of node ids to FloatTensor of feature values.\n\t\tcuda -- whether to use GPU\n\t\tgcn --- whether to perform concatenation GraphSAGE-style, or add self-loops GCN-style\n\t\t' super(MeanAggregator, self).__init__() self.features = features self.cuda = cuda self.gcn = gcn def forward(self, nodes, to_neighs, num_sample=10): '\n\t\tnodes --- list of nodes in a batch\n\t\tto_neighs --- list of sets, each set is the set of neighbors for node in batch\n\t\tnum_sample --- number of neighbors to sample. No sampling if None.\n\t\t' _set = set if (not (num_sample is None)): _sample = random.sample samp_neighs = [(_set(_sample(to_neigh, num_sample)) if (len(to_neigh) >= num_sample) else to_neigh) for to_neigh in to_neighs] else: samp_neighs = to_neighs if self.gcn: samp_neighs = [samp_neigh.union(set([int(nodes[i])])) for (i, samp_neigh) in enumerate(samp_neighs)] unique_nodes_list = list(set.union(*samp_neighs)) unique_nodes = {n: i for (i, n) in enumerate(unique_nodes_list)} mask = Variable(torch.zeros(len(samp_neighs), len(unique_nodes))) column_indices = [unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh] row_indices = [i for i in range(len(samp_neighs)) for j in range(len(samp_neighs[i]))] mask[(row_indices, column_indices)] = 1 if self.cuda: mask = mask.cuda() num_neigh = mask.sum(1, keepdim=True) mask = mask.div(num_neigh) if self.cuda: embed_matrix = self.features(torch.LongTensor(unique_nodes_list).cuda()) else: embed_matrix = self.features(torch.LongTensor(unique_nodes_list)) to_feats = mask.mm(embed_matrix) return to_feats
class Encoder(nn.Module): "\n\tVanilla GraphSAGE Encoder Module\n\tEncodes a node's using 'convolutional' GraphSage approach\n\t" def __init__(self, features, feature_dim, embed_dim, adj_lists, aggregator, num_sample=10, base_model=None, gcn=False, cuda=False, feature_transform=False): super(Encoder, self).__init__() self.features = features self.feat_dim = feature_dim self.adj_lists = adj_lists self.aggregator = aggregator self.num_sample = num_sample if (base_model != None): self.base_model = base_model self.gcn = gcn self.embed_dim = embed_dim self.cuda = cuda self.aggregator.cuda = cuda self.weight = nn.Parameter(torch.FloatTensor(embed_dim, (self.feat_dim if self.gcn else (2 * self.feat_dim)))) init.xavier_uniform_(self.weight) def forward(self, nodes): '\n\t\tGenerates embeddings for a batch of nodes.\n\n\t\tnodes -- list of nodes\n\t\t' neigh_feats = self.aggregator.forward(nodes, [self.adj_lists[int(node)] for node in nodes], self.num_sample) if isinstance(nodes, list): index = torch.LongTensor(nodes).cuda() else: index = nodes if (not self.gcn): if self.cuda: self_feats = self.features(index) else: self_feats = self.features(index) combined = torch.cat((self_feats, neigh_feats), dim=1) else: combined = neigh_feats combined = F.relu(self.weight.mm(combined.t())) return combined
class OneLayerRio(nn.Module): '\n\tThe Rio-GNN model in one layer\n\t' def __init__(self, num_classes, inter1, lambda_1): '\n\t\tInitialize the Rio-GNN model\n\t\t:param num_classes: number of classes (2 in our paper)\n\t\t:param inter1: the inter-relation aggregator that output the final embedding\n\t\t' super(OneLayerRio, self).__init__() self.inter1 = inter1 self.xent = nn.CrossEntropyLoss() self.weight = nn.Parameter(torch.FloatTensor(num_classes, inter1.embed_dim)) init.xavier_uniform_(self.weight) self.lambda_1 = lambda_1 def forward(self, nodes, labels, train_flag=True): (embeds1, label_scores) = self.inter1(nodes, labels, train_flag) scores = self.weight.mm(embeds1) return (scores.t(), label_scores) def to_prob(self, nodes, labels, train_flag=True): (gnn_logits, label_logits) = self.forward(nodes, labels, train_flag) gnn_scores = torch.sigmoid(gnn_logits) label_scores = torch.sigmoid(label_logits) return (gnn_scores, label_scores) def loss(self, nodes, labels, train_flag=True): (gnn_scores, label_scores) = self.forward(nodes, labels, train_flag) label_loss = self.xent(label_scores, labels.squeeze()) gnn_loss = self.xent(gnn_scores, labels.squeeze()) final_loss = (gnn_loss + (self.lambda_1 * label_loss)) return final_loss
class TwoLayerRio(nn.Module): '\n\tThe Rio-GNN model in one layer\n\t' def __init__(self, num_classes, inter1, inter2, lambda_1, last_label_scores): '\n\t\tInitialize the Rio-GNN model\n\t\t:param num_classes: number of classes (2 in our paper)\n\t\t:param inter1: the inter-relation aggregator that output the final embedding\n\t\t' super(TwoLayerRio, self).__init__() self.inter1 = inter1 self.inter2 = inter2 self.xent = nn.CrossEntropyLoss() self.weight = nn.Parameter(torch.FloatTensor(num_classes, inter2.embed_dim)) init.xavier_uniform_(self.weight) self.lambda_1 = lambda_1 self.last_label_scores = last_label_scores def forward(self, nodes, labels, train_flag=True): label_scores_one = self.last_label_scores (embeds2, label_scores_two) = self.inter2(nodes, labels, train_flag) scores2 = self.weight.mm(embeds2) return (scores2.t(), label_scores_one, label_scores_two) def to_prob(self, nodes, labels, train_flag=True): (gnn_logits2, label_logits_one, label_logits_two) = self.forward(nodes, labels, train_flag) gnn_scores2 = torch.sigmoid(gnn_logits2) label_scores_one = torch.sigmoid(label_logits_one) label_scores_two = torch.sigmoid(label_logits_two) return (gnn_scores2, label_scores_one, label_scores_two) def loss(self, nodes, labels, train_flag=True): (gnn_scores2, label_scores_one, label_scores_two) = self.forward(nodes, labels, train_flag) label_loss_one = self.xent(label_scores_one, labels.squeeze()) label_loss_two = self.xent(label_scores_two, labels.squeeze()) gnn_loss2 = self.xent(gnn_scores2, labels.squeeze()) final_loss = (gnn_loss2 + (self.lambda_1 * label_loss_one)) return final_loss
class Vgg16(torch.nn.Module): def __init__(self, device='cpu'): super(Vgg16, self).__init__() vgg_pretrained_features = vgg16(pretrained=True).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() for x in range(4): self.slice1.add_module(str(x), vgg_pretrained_features[x].to(device)) for x in range(4, 9): self.slice2.add_module(str(x), vgg_pretrained_features[x].to(device)) for x in range(9, 16): self.slice3.add_module(str(x), vgg_pretrained_features[x].to(device)) for x in range(16, 23): self.slice4.add_module(str(x), vgg_pretrained_features[x].to(device)) for param in self.parameters(): param.requires_grad = False def forward(self, X): h = self.slice1(X) h_relu1_2 = h h = self.slice2(h) h_relu2_2 = h h = self.slice3(h) h_relu3_3 = h h = self.slice4(h) h_relu4_3 = h vgg_outputs = namedtuple('VggOutputs', ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3']) out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3) return out