code
stringlengths
17
6.64M
def gen_graph(branches, g=None, init_root=0, pre=''): num_branches = [branch2num(i, init_root) for i in branches] all_nodes = [j for branch in num_branches for j in branch] all_nodes = np.unique(all_nodes) all_nodes = all_nodes.tolist() if (g is None): g = ig.Graph() for k in all_nodes: g.add_vertex((pre + str(k))) t = [] for j in range(len(branches)): branch = branch2num(branches[j], init_root) for i in range((len(branch) - 1)): pair = [branch[i], branch[(i + 1)]] if (pair not in t): t.append(pair) g.add_edge((pre + str(branch[i])), (pre + str(branch[(i + 1)]))) return (g, max(all_nodes))
def c_factor(n): '\n Average path length of unsuccesful search in a binary search tree given n points\n \n Parameters\n ----------\n n : int\n Number of data points for the BST.\n\n Returns\n -------\n float\n Average path length of unsuccesful search in a BST\n \n ' return ((2.0 * (np.log((n - 1)) + 0.5772156649)) - ((2.0 * (n - 1.0)) / (n * 1.0)))
class iForest(object): '\n Creates an iForest object. This object holds the data as well as the trained trees (iTree objects).\n\n Attributes\n ----------\n X : list\n Data used for training. It is a list of list of floats.\n nobjs: int\n Size of the dataset.\n sample: int\n Size of the sample to be used for tree creation.\n Trees: list\n A list of tree objects.\n limit: int\n Maximum depth a tree can have.\n exlevel: int\n Exention level to be used in the creating splitting critera.\n c: float\n Multiplicative factor used in computing the anomaly scores.\n\n Methods\n -------\n CheckExtensionLevel()\n Chaeck the validity of extension level provided by user based on the data\n compute_paths(X_in)\n Computes the anomaly score for data X_in\n ' def __init__(self, X, ntrees, sample_size, limit=None, ExtensionLevel=0): '\n iForest(X, ntrees, sample_size, limit=None, ExtensionLevel=0)\n Initialize a forest by passing in training data, number of trees to be used and the subsample size.\n\n Parameters\n ----------\n X : list of list of floats\n Training data. List of [x1,x2,...,xn] coordinate points.\n ntrees : int\n Number of trees to be used.\n sample_size : int\n The size of the subsample to be used in creation of each tree. Must be smaller than |X|\n limit : int\n The maximum allowed tree depth. This is by default set to average length of unsucessful search in a binary tree.\n ExtensionLevel : int\n Specifies degree of freedom in choosing the hyperplanes for dividing up data. Must be smaller than the dimension n of the dataset.\n ' self.ntrees = ntrees self.X = X self.nobjs = len(X) self.sample = sample_size self.Trees = [] self.limit = limit self.exlevel = ExtensionLevel self.CheckExtensionLevel() if (limit is None): self.limit = int(np.ceil(np.log2(self.sample))) self.c = c_factor(self.sample) for i in range(self.ntrees): ix = rn.sample(range(self.nobjs), self.sample) X_p = X[ix] self.Trees.append(iTree(X_p, 0, self.limit, exlevel=self.exlevel)) def CheckExtensionLevel(self): '\n This function makes sure the extension level provided by the user does not exceed the dimension of the data. An exception will be raised in the case of a violation.\n ' dim = self.X.shape[1] if (self.exlevel < 0): raise Exception((('Extension level has to be an integer between 0 and ' + str((dim - 1))) + '.')) if (self.exlevel > (dim - 1)): raise Exception((((('Your data has ' + str(dim)) + " dimensions. Extension level can't be higher than ") + str((dim - 1))) + '.')) def compute_paths(self, X_in=None): '\n compute_paths(X_in = None)\n Compute anomaly scores for all data points in a dataset X_in\n\n Parameters\n ----------\n X_in : list of list of floats\n Data to be scored. iForest.Trees are used for computing the depth reached in each tree by each data point.\n\n Returns\n -------\n float\n Anomaly score for a given data point.\n ' if (X_in is None): X_in = self.X S = np.zeros(len(X_in)) for i in range(len(X_in)): h_temp = 0 for j in range(self.ntrees): h_temp += (PathFactor(X_in[i], self.Trees[j]).path * 1.0) Eh = (h_temp / self.ntrees) S[i] = (2.0 ** ((- Eh) / self.c)) return S
class Node(object): "\n A single node from each tree (each iTree object). Nodes containe information on hyperplanes used for data division, date to be passed to left and right nodes, whether they are external or internal nodes.\n\n Attributes\n ----------\n e: int\n Depth of the tree to which the node belongs.\n size: int\n Size of the dataset present at the node.\n X: list\n Data at the node.\n n: list\n Normal vector used to build the hyperplane that splits the data in the node.\n p: list\n Intercept point through which the hyperplane passes.\n lef: Node object\n Left child node.\n right: Node object\n Right child node.\n ntype: str\n The type of the node: 'exNode', 'inNode'.\n " def __init__(self, X, n, p, e, left, right, node_type=''): "\n Node(X, n, p, e, left, right, node_type = '' )\n Create a node in a given tree (iTree objectg)\n\n Parameters\n ----------\n X : list of list of floats\n Training data available to each node. List of [x1,x2,...,xn] coordinate points.\n n : list of floats\n Normal vector for the hyperplane used for splitting data.\n p : list of floats\n Intercept point for the hyperplane used for splitting data.\n left : Node object\n Left child node.\n right : Node object\n Right child node.\n node_type : str\n Specifies if the node is external or internal. Takes two values: 'exNode', 'inNode'.\n " self.e = e self.size = len(X) self.X = X self.n = n self.p = p self.left = left self.right = right self.ntype = node_type
class iTree(object): '\n A single tree in the forest that is build using a unique subsample.\n\n Attributes\n ----------\n exlevel: int\n Extension level used in the splitting criteria.\n e: int\n Depth of tree\n X: list\n Data present at the root node of this tree.\n size: int\n Size of the dataset.\n dim: int\n Dimension of the dataset.\n Q: list\n List of ordered integers smaller than dim.\n l: int\n Maxium depth a tree can reach before its creation is terminated.\n n: list\n Normal vector at the root of this tree, which is used in creating hyperplanes for splitting critera\n p: list\n Intercept point at the root of this tree through which the splitting hyperplane passes.\n exnodes: int\n The number of external nodes this tree has.\n root: Node object\n At each node create a new tree.\n\n Methods\n -------\n make_tree(X, e, l)\n Builds the tree recursively from a given node. Returns a Node object.\n ' def __init__(self, X, e, l, exlevel=0): '\n iTree(X, e, l, exlevel=0)\n Create a tree\n\n Parameters\n ----------\n X : list of list of floats\n Subsample of training data. |X| = iForest.sample_size. List of [x1,x2,...,xn] coordinate points\n e : int\n Depth of the tree as it is being traversed down. e <= l.\n l : int\n The maximum depth the tree can reach before its creation is terminated.\n exlevel : int\n Specifies degree of freedom in choosing the hyperplanes for dividing up data. Must be smaller than the dimension n of the dataset.\n ' self.exlevel = exlevel self.e = e self.X = X self.size = len(X) self.dim = self.X.shape[1] self.Q = np.arange(np.shape(X)[1], dtype='int') self.l = l self.p = None self.n = None self.exnodes = 0 self.root = self.make_tree(X, e, l) def make_tree(self, X, e, l): '\n make_tree(X,e,l)\n Builds the tree recursively from a given node. Returns a Node object.\n\n Parameters\n ----------\n X: list of list of floats\n Subsample of training data. |X| = iForest.sample_size. List of [x1,x2,...,xn] coordinate point.\n e : int\n Depth of the tree as it is being traversed down. Integer. e <= l.\n l : int\n The maximum depth the tree can reach before its creation is terminated. Integer.\n\n Returns\n -------\n Node object\n ' self.e = e if ((e >= l) or (len(X) <= 1)): left = None right = None self.exnodes += 1 return Node(X, self.n, self.p, e, left, right, node_type='exNode') else: mins = X.min(axis=0) maxs = X.max(axis=0) idxs = np.random.choice(range(self.dim), ((self.dim - self.exlevel) - 1), replace=False) self.n = np.random.normal(0, 1, self.dim) self.n[idxs] = 0 self.p = np.random.uniform(mins, maxs) w = ((X - self.p).dot(self.n) < 0) return Node(X, self.n, self.p, e, left=self.make_tree(X[w], (e + 1), l), right=self.make_tree(X[(~ w)], (e + 1), l), node_type='inNode')
class PathFactor(object): "\n Given a single tree (iTree objext) and a data point x = [x1,x2,...,xn], compute the legth of the path traversed by the point on the tree when it reaches an external node.\n\n Attributes\n ----------\n path_list: list\n A list of strings 'L' or 'R' which traces the path a data point travels down a tree.\n x: list\n A single data point, which is represented as a list of floats.\n e: int\n The depth of a given node in the tree.\n\n Methods\n -------\n find_path(T)\n Given a tree, it finds the path a single data points takes.\n\n " def __init__(self, x, itree): '\n PathFactor(x, itree)\n Given a single tree (iTree objext) and a data point x = [x1,x2,...,xn], compute the legth of the path traversed by the point on the tree when it reaches an external node.\n\n Parameters\n ----------\n x : list of floats\n A data point x = [x1, x2, ..., xn].\n itree : iTree object\n A single tree.\n ' self.path_list = [] self.x = x self.e = 0 self.path = self.find_path(itree.root) def find_path(self, T): '\n find_path(T)\n Given a tree, find the path for a single data point based on the splitting criteria stored at each node.\n\n Parameters\n ----------\n T : iTree object\n\n Returns\n -------\n int\n The depth reached by the data point.\n ' if (T.ntype == 'exNode'): if (T.size <= 1): return self.e else: self.e = (self.e + c_factor(T.size)) return self.e else: p = T.p n = T.n self.e += 1 if ((self.x - p).dot(n) < 0): self.path_list.append('L') return self.find_path(T.left) else: self.path_list.append('R') return self.find_path(T.right)
def all_branches(node, current=[], branches=None): '\n Utility function used in generating a graph visualization. It returns all the branches of a given tree so they can be visualized.\n\n Parameters\n ----------\n node: Node object\n\n Returns\n -------\n list\n list of branches that were reached.\n ' current = current[:node.e] if (branches is None): branches = [] if (node.ntype == 'inNode'): current.append('L') all_branches(node.left, current=current, branches=branches) current = current[:(- 1)] current.append('R') all_branches(node.right, current=current, branches=branches) else: branches.append(current) return branches
def read(filename): return open(os.path.join(prjdir, filename)).read()
class FormanRicci(): def __init__(self, G: nx.Graph, weight='weight', method='augmented', verbose='ERROR'): 'A class to compute Forman-Ricci curvature for all nodes and edges in G.\n\n Parameters\n ----------\n G : NetworkX graph\n A given NetworkX graph, unweighted graph only for now, edge weight will be ignored.\n weight : str\n The edge weight used to compute Ricci curvature. (Default value = "weight")\n method : {"1d", "augmented"}\n The method used to compute Forman-Ricci curvature. (Default value = "augmented")\n\n - "1d": Computed with 1-dimensional simplicial complex (vertex, edge).\n - "augmented": Computed with 2-dimensional simplicial complex, length <=3 (vertex, edge, face).\n verbose: {"INFO","DEBUG","ERROR"}\n Verbose level. (Default value = "ERROR")\n - "INFO": show only iteration process log.\n - "DEBUG": show all output logs.\n - "ERROR": only show log if error happened.\n ' self.G = G.copy() self.weight = weight self.method = method if (not nx.get_edge_attributes(self.G, self.weight)): logger.info('Edge weight not detected in graph, use "weight" as default edge weight.') for (v1, v2) in self.G.edges(): self.G[v1][v2][self.weight] = 1.0 if (not nx.get_node_attributes(self.G, self.weight)): logger.info('Node weight not detected in graph, use "weight" as default node weight.') for v in self.G.nodes(): self.G.nodes[v][self.weight] = 1.0 if self.G.is_directed(): logger.info('Forman-Ricci curvature is not supported for directed graph yet, covert input graph to undirected.') self.G = self.G.to_undirected() set_verbose(verbose) def compute_ricci_curvature(self): 'Compute Forman-ricci curvature for all nodes and edges in G.\n Node curvature is defined as the average of all it\'s adjacency edge.\n\n Returns\n -------\n G: NetworkX graph\n A NetworkX graph with "formanCurvature" on nodes and edges.\n\n Examples\n --------\n To compute the Forman-Ricci curvature for karate club graph:\n\n >>> G = nx.karate_club_graph()\n >>> frc = FormanRicci(G)\n >>> frc.compute_ricci_curvature()\n >>> frc.G[0][2]\n {\'weight\': 1.0, \'formanCurvature\': -7.0}\n ' if (self.method == '1d'): for (v1, v2) in self.G.edges(): v1_nbr = set(self.G.neighbors(v1)) v1_nbr.remove(v2) v2_nbr = set(self.G.neighbors(v2)) v2_nbr.remove(v1) w_e = self.G[v1][v2][self.weight] w_v1 = self.G.nodes[v1][self.weight] w_v2 = self.G.nodes[v2][self.weight] ev1_sum = sum([(w_v1 / math.sqrt((w_e * self.G[v1][v][self.weight]))) for v in v1_nbr]) ev2_sum = sum([(w_v2 / math.sqrt((w_e * self.G[v2][v][self.weight]))) for v in v2_nbr]) self.G[v1][v2]['formanCurvature'] = (w_e * (((w_v1 / w_e) + (w_v2 / w_e)) - (ev1_sum + ev2_sum))) logger.debug(('Source: %s, target: %d, Forman-Ricci curvature = %f ' % (v1, v2, self.G[v1][v2]['formanCurvature']))) elif (self.method == 'augmented'): for (v1, v2) in self.G.edges(): v1_nbr = set(self.G.neighbors(v1)) v1_nbr.remove(v2) v2_nbr = set(self.G.neighbors(v2)) v2_nbr.remove(v1) face = (v1_nbr & v2_nbr) w_e = self.G[v1][v2][self.weight] w_f = 1 w_v1 = self.G.nodes[v1][self.weight] w_v2 = self.G.nodes[v2][self.weight] sum_ef = sum([(w_e / w_f) for _ in face]) sum_ve = sum([((w_v1 / w_e) + (w_v2 / w_e))]) sum_ehef = 0 sum_veeh = sum(([(w_v1 / math.sqrt((w_e * self.G[v1][v][self.weight]))) for v in (v1_nbr - face)] + [(w_v2 / math.sqrt((w_e * self.G[v2][v][self.weight]))) for v in (v2_nbr - face)])) self.G[v1][v2]['formanCurvature'] = (w_e * ((sum_ef + sum_ve) - math.fabs((sum_ehef - sum_veeh)))) logger.debug(('Source: %s, target: %d, Forman-Ricci curvature = %f ' % (v1, v2, self.G[v1][v2]['formanCurvature']))) else: assert True, ('Method %s not available. Support methods: {"1d","augmented"}' % self.method) for n in self.G.nodes(): fcsum = 0 if (self.G.degree(n) != 0): for nbr in self.G.neighbors(n): if ('formanCurvature' in self.G[n][nbr]): fcsum += self.G[n][nbr]['formanCurvature'] self.G.nodes[n]['formanCurvature'] = (fcsum / self.G.degree(n)) else: self.G.nodes[n]['formanCurvature'] = fcsum logger.debug(('node %d, Forman Curvature = %f' % (n, self.G.nodes[n]['formanCurvature']))) logger.debug(('Forman curvature (%s) computation done.' % self.method))
@lru_cache(_cache_maxsize) def _get_single_node_neighbors_distributions(node, direction='successors'): 'Get the neighbor density distribution of given node `node`.\n\n Parameters\n ----------\n node : int\n Node index in Networkit graph `_Gk`.\n direction : {"predecessors", "successors"}\n Direction of neighbors in directed graph. (Default value: "successors")\n\n Returns\n -------\n distributions : lists of float\n Density distributions of neighbors up to top `_nbr_topk` nodes.\n nbrs : lists of int\n Neighbor index up to top `_nbr_topk` nodes.\n\n ' if _Gk.isDirected(): if (direction == 'predecessors'): neighbors = list(_Gk.iterInNeighbors(node)) else: neighbors = list(_Gk.iterNeighbors(node)) else: neighbors = list(_Gk.iterNeighbors(node)) heap_weight_node_pair = [] for nbr in neighbors: if (direction == 'predecessors'): w = (_base ** (- (_Gk.weight(nbr, node) ** _exp_power))) else: w = (_base ** (- (_Gk.weight(node, nbr) ** _exp_power))) if (len(heap_weight_node_pair) < _nbr_topk): heapq.heappush(heap_weight_node_pair, (w, nbr)) else: heapq.heappushpop(heap_weight_node_pair, (w, nbr)) nbr_edge_weight_sum = sum([x[0] for x in heap_weight_node_pair]) if (not neighbors): return ([1], [node]) if (nbr_edge_weight_sum > EPSILON): distributions = [(((1.0 - _alpha) * w) / nbr_edge_weight_sum) for (w, _) in heap_weight_node_pair] else: logger.warning('Neighbor weight sum too small, list:', heap_weight_node_pair) distributions = ([((1.0 - _alpha) / len(heap_weight_node_pair))] * len(heap_weight_node_pair)) nbr = [x[1] for x in heap_weight_node_pair] return ((distributions + [_alpha]), (nbr + [node]))
def _distribute_densities(source, target): "Get the density distributions of source and target node, and the cost (all pair shortest paths) between\n all source's and target's neighbors. Notice that only neighbors with top `_nbr_topk` edge weights.\n\n Parameters\n ----------\n source : int\n Source node index in Networkit graph `_Gk`.\n target : int\n Target node index in Networkit graph `_Gk`.\n Returns\n -------\n x : (m,) numpy.ndarray\n Source's density distributions, includes source and source's neighbors.\n y : (n,) numpy.ndarray\n Target's density distributions, includes source and source's neighbors.\n d : (m, n) numpy.ndarray\n Shortest path matrix.\n\n " t0 = time.time() if _Gk.isDirected(): (x, source_topknbr) = _get_single_node_neighbors_distributions(source, 'predecessors') else: (x, source_topknbr) = _get_single_node_neighbors_distributions(source, 'successors') (y, target_topknbr) = _get_single_node_neighbors_distributions(target, 'successors') logger.debug(('%8f secs density distribution for edge.' % (time.time() - t0))) t0 = time.time() if (_shortest_path == 'pairwise'): d = [] for src in source_topknbr: tmp = [] for tgt in target_topknbr: tmp.append(_source_target_shortest_path(src, tgt)) d.append(tmp) d = np.array(d) else: d = _apsp[np.ix_(source_topknbr, target_topknbr)] x = np.array(x) y = np.array(y) logger.debug(('%8f secs density matrix construction for edge.' % (time.time() - t0))) return (x, y, d)
@lru_cache(_cache_maxsize) def _source_target_shortest_path(source, target): 'Compute pairwise shortest path from `source` to `target` by BidirectionalDijkstra via Networkit.\n\n Parameters\n ----------\n source : int\n Source node index in Networkit graph `_Gk`.\n target : int\n Target node index in Networkit graph `_Gk`.\n\n Returns\n -------\n length : float\n Pairwise shortest path length.\n\n ' length = nk.distance.BidirectionalDijkstra(_Gk, source, target).run().getDistance() assert (length < 1e+300), ('Shortest path between %d, %d is not found' % (source, target)) return length
def _get_all_pairs_shortest_path(): 'Pre-compute all pairs shortest paths of the assigned graph `_Gk`.' logger.trace('Start to compute all pair shortest path.') global _Gk t0 = time.time() apsp = nk.distance.APSP(_Gk).run().getDistances() logger.trace(('%8f secs for all pair by NetworKit.' % (time.time() - t0))) return np.array(apsp)
def _optimal_transportation_distance(x, y, d): "Compute the optimal transportation distance (OTD) of the given density distributions by CVXPY.\n\n Parameters\n ----------\n x : (m,) numpy.ndarray\n Source's density distributions, includes source and source's neighbors.\n y : (n,) numpy.ndarray\n Target's density distributions, includes source and source's neighbors.\n d : (m, n) numpy.ndarray\n Shortest path matrix.\n\n Returns\n -------\n m : float\n Optimal transportation distance.\n\n " t0 = time.time() m = ot.emd2(x, y, d) logger.debug(('%8f secs for Wasserstein dist. \t#source_nbr: %d, #target_nbr: %d' % ((time.time() - t0), len(x), len(y)))) return m
def _sinkhorn_distance(x, y, d): "Compute the approximate optimal transportation distance (Sinkhorn distance) of the given density distributions.\n\n Parameters\n ----------\n x : (m,) numpy.ndarray\n Source's density distributions, includes source and source's neighbors.\n y : (n,) numpy.ndarray\n Target's density distributions, includes source and source's neighbors.\n d : (m, n) numpy.ndarray\n Shortest path matrix.\n\n Returns\n -------\n m : float\n Sinkhorn distance, an approximate optimal transportation distance.\n\n " t0 = time.time() m = ot.sinkhorn2(x, y, d, 0.1, method='sinkhorn') logger.debug(('%8f secs for Sinkhorn dist. \t#source_nbr: %d, #target_nbr: %d' % ((time.time() - t0), len(x), len(y)))) return m
def _average_transportation_distance(source, target): 'Compute the average transportation distance (ATD) of the given density distributions.\n\n Parameters\n ----------\n source : int\n Source node index in Networkit graph `_Gk`.\n target : int\n Target node index in Networkit graph `_Gk`.\n\n Returns\n -------\n m : float\n Average transportation distance.\n\n ' t0 = time.time() if _Gk.isDirected(): source_nbr = list(_Gk.iterInNeighbors(source)) else: source_nbr = list(_Gk.iterNeighbors(source)) target_nbr = list(_Gk.iterNeighbors(target)) share = ((1.0 - _alpha) / (len(source_nbr) * len(target_nbr))) cost_nbr = 0 cost_self = (_alpha * _apsp[source][target]) for src in source_nbr: for tgt in target_nbr: cost_nbr += (_apsp[src][tgt] * share) m = (cost_nbr + cost_self) logger.debug(('%8f secs for avg trans. dist. \t#source_nbr: %d, #target_nbr: %d' % ((time.time() - t0), len(source_nbr), len(target_nbr)))) return m
def _compute_ricci_curvature_single_edge(source, target): 'Ricci curvature computation for a given single edge.\n\n Parameters\n ----------\n source : int\n Source node index in Networkit graph `_Gk`.\n target : int\n Target node index in Networkit graph `_Gk`.\n\n Returns\n -------\n result : dict[(int,int), float]\n The Ricci curvature of given edge in dict format. E.g.: {(node1, node2): ricciCurvature}\n\n ' assert (source != target), 'Self loop is not allowed.' if (_Gk.weight(source, target) < EPSILON): logger.trace(('Zero weight edge detected for edge (%s,%s), return Ricci Curvature as 0 instead.' % (source, target))) return {(source, target): 0} m = 1 assert (_method in ['OTD', 'ATD', 'Sinkhorn', 'OTDSinkhornMix']), ('Method %s not found, support method:["OTD", "ATD", "Sinkhorn", "OTDSinkhornMix]' % _method) if (_method == 'OTD'): (x, y, d) = _distribute_densities(source, target) m = _optimal_transportation_distance(x, y, d) elif (_method == 'ATD'): m = _average_transportation_distance(source, target) elif (_method == 'Sinkhorn'): (x, y, d) = _distribute_densities(source, target) m = _sinkhorn_distance(x, y, d) elif (_method == 'OTDSinkhornMix'): (x, y, d) = _distribute_densities(source, target) if ((len(x) > _OTDSinkhorn_threshold) and (len(y) > _OTDSinkhorn_threshold)): m = _sinkhorn_distance(x, y, d) else: m = _optimal_transportation_distance(x, y, d) result = (1 - (m / _Gk.weight(source, target))) logger.debug(('Ricci curvature (%s,%s) = %f' % (source, target, result))) return {(source, target): result}
def _wrap_compute_single_edge(stuff): 'Wrapper for args in multiprocessing.' return _compute_ricci_curvature_single_edge(*stuff)
def _compute_ricci_curvature_edges(G: nx.Graph, weight='weight', edge_list=[], alpha=0.5, method='OTDSinkhornMix', base=math.e, exp_power=2, proc=mp.cpu_count(), chunksize=None, cache_maxsize=1000000, shortest_path='all_pairs', nbr_topk=3000): 'Compute Ricci curvature for edges in given edge lists.\n\n Parameters\n ----------\n G : NetworkX graph\n A given directional or undirectional NetworkX graph.\n weight : str\n The edge weight used to compute Ricci curvature. (Default value = "weight")\n edge_list : list of edges\n The list of edges to compute Ricci curvature, set to [] to run for all edges in G. (Default value = [])\n alpha : float\n The parameter for the discrete Ricci curvature, range from 0 ~ 1.\n It means the share of mass to leave on the original node.\n E.g. x -> y, alpha = 0.4 means 0.4 for x, 0.6 to evenly spread to x\'s nbr.\n (Default value = 0.5)\n method : {"OTD", "ATD", "Sinkhorn"}\n The optimal transportation distance computation method. (Default value = "OTDSinkhornMix")\n\n Transportation method:\n - "OTD" for Optimal Transportation Distance,\n - "ATD" for Average Transportation Distance.\n - "Sinkhorn" for OTD approximated Sinkhorn distance.\n - "OTDSinkhornMix" use OTD for nodes of edge with less than _OTDSinkhorn_threshold(default 2000) neighbors,\n use Sinkhorn for faster computation with nodes of edge more neighbors. (OTD is faster for smaller cases)\n base : float\n Base variable for weight distribution. (Default value = `math.e`)\n exp_power : float\n Exponential power for weight distribution. (Default value = 0)\n proc : int\n Number of processor used for multiprocessing. (Default value = `cpu_count()`)\n chunksize : int\n Chunk size for multiprocessing, set None for auto decide. (Default value = `None`)\n cache_maxsize : int\n Max size for LRU cache for pairwise shortest path computation.\n Set this to `None` for unlimited cache. (Default value = 1000000)\n shortest_path : {"all_pairs","pairwise"}\n Method to compute shortest path. (Default value = `all_pairs`)\n nbr_topk : int\n Only take the top k edge weight neighbors for density distribution.\n Smaller k run faster but the result is less accurate. (Default value = 3000)\n\n Returns\n -------\n output : dict[(int,int), float]\n A dictionary of edge Ricci curvature. E.g.: {(node1, node2): ricciCurvature}.\n\n ' logger.trace(('Number of nodes: %d' % G.number_of_nodes())) logger.trace(('Number of edges: %d' % G.number_of_edges())) if (not nx.get_edge_attributes(G, weight)): logger.info('Edge weight not detected in graph, use "weight" as default edge weight.') for (v1, v2) in G.edges(): G[v1][v2][weight] = 1.0 global _Gk global _alpha global _weight global _method global _base global _exp_power global _proc global _cache_maxsize global _shortest_path global _nbr_topk global _apsp _Gk = nk.nxadapter.nx2nk(G, weightAttr=weight) _alpha = alpha _weight = weight _method = method _base = base _exp_power = exp_power _proc = proc _cache_maxsize = cache_maxsize _shortest_path = shortest_path _nbr_topk = nbr_topk (nx2nk_ndict, nk2nx_ndict) = ({}, {}) for (idx, n) in enumerate(G.nodes()): nx2nk_ndict[n] = idx nk2nx_ndict[idx] = n if (_shortest_path == 'all_pairs'): _apsp = _get_all_pairs_shortest_path() if edge_list: args = [(nx2nk_ndict[source], nx2nk_ndict[target]) for (source, target) in edge_list] else: args = [(nx2nk_ndict[source], nx2nk_ndict[target]) for (source, target) in G.edges()] t0 = time.time() with mp.get_context('fork').Pool(processes=_proc) as pool: if (chunksize is None): (chunksize, extra) = divmod(len(args), (proc * 4)) if extra: chunksize += 1 result = pool.imap_unordered(_wrap_compute_single_edge, args, chunksize=chunksize) pool.close() pool.join() output = {} for rc in result: for k in list(rc.keys()): output[(nk2nx_ndict[k[0]], nk2nx_ndict[k[1]])] = rc[k] logger.info(('%8f secs for Ricci curvature computation.' % (time.time() - t0))) return output
def _compute_ricci_curvature(G: nx.Graph, weight='weight', **kwargs): 'Compute Ricci curvature of edges and nodes.\n The node Ricci curvature is defined as the average of node\'s adjacency edges.\n\n Parameters\n ----------\n G : NetworkX graph\n A given directional or undirectional NetworkX graph.\n weight : str\n The edge weight used to compute Ricci curvature. (Default value = "weight")\n **kwargs\n Additional keyword arguments passed to `_compute_ricci_curvature_edges`.\n\n Returns\n -------\n G: NetworkX graph\n A NetworkX graph with "ricciCurvature" on nodes and edges.\n ' edge_ricci = _compute_ricci_curvature_edges(G, weight=weight, **kwargs) nx.set_edge_attributes(G, edge_ricci, 'ricciCurvature') for n in G.nodes(): rc_sum = 0 if (G.degree(n) != 0): for nbr in G.neighbors(n): if ('ricciCurvature' in G[n][nbr]): rc_sum += G[n][nbr]['ricciCurvature'] G.nodes[n]['ricciCurvature'] = (rc_sum / G.degree(n)) logger.debug(('node %s, Ricci Curvature = %f' % (n, G.nodes[n]['ricciCurvature']))) return G
def _compute_ricci_flow(G: nx.Graph, weight='weight', iterations=20, step=1, delta=0.0001, surgery=((lambda G, *args, **kwargs: G), 100), **kwargs): 'Compute the given Ricci flow metric of each edge of a given connected NetworkX graph.\n\n Parameters\n ----------\n G : NetworkX graph\n A given directional or undirectional NetworkX graph.\n weight : str\n The edge weight used to compute Ricci curvature. (Default value = "weight")\n iterations : int\n Iterations to require Ricci flow metric. (Default value = 20)\n step : float\n step size for gradient decent process. (Default value = 1)\n delta : float\n process stop when difference of Ricci curvature is within delta. (Default value = 1e-4)\n surgery : (function, int)\n A tuple of user define surgery function that will execute every certain iterations.\n (Default value = (lambda G, *args, **kwargs: G, 100))\n **kwargs\n Additional keyword arguments passed to `_compute_ricci_curvature`.\n\n Returns\n -------\n G: NetworkX graph\n A NetworkX graph with ``weight`` as Ricci flow metric.\n ' if (not nx.is_connected(G)): logger.info('Not connected graph detected, compute on the largest connected component instead.') G = nx.Graph(G.subgraph(max(nx.connected_components(G), key=len))) normalized_weight = float(G.number_of_edges()) global _apsp t0 = time.time() if nx.get_edge_attributes(G, 'original_RC'): logger.info('original_RC detected, continue to refine the ricci flow.') else: logger.info('No ricciCurvature detected, compute original_RC...') _compute_ricci_curvature(G, weight=weight, **kwargs) for (v1, v2) in G.edges(): G[v1][v2]['original_RC'] = G[v1][v2]['ricciCurvature'] _apsp = {} for i in range(iterations): for (v1, v2) in G.edges(): G[v1][v2][weight] -= ((step * G[v1][v2]['ricciCurvature']) * G[v1][v2][weight]) w = nx.get_edge_attributes(G, weight) sumw = sum(w.values()) for (k, v) in w.items(): w[k] = (w[k] * (normalized_weight / sumw)) nx.set_edge_attributes(G, values=w, name=weight) logger.info((' === Ricci flow iteration %d === ' % i)) _compute_ricci_curvature(G, weight=weight, **kwargs) rc = nx.get_edge_attributes(G, 'ricciCurvature') diff = (max(rc.values()) - min(rc.values())) logger.trace(('Ricci curvature difference: %f' % diff)) logger.trace(('max:%f, min:%f | maxw:%f, minw:%f' % (max(rc.values()), min(rc.values()), max(w.values()), min(w.values())))) if (diff < delta): logger.trace('Ricci curvature converged, process terminated.') break (surgery_func, do_surgery) = surgery if ((i != 0) and ((i % do_surgery) == 0)): G = surgery_func(G, weight) normalized_weight = float(G.number_of_edges()) for (n1, n2) in G.edges(): logger.debug(('%s %s %s' % (n1, n2, G[n1][n2]))) _apsp = {} logger.info(('%8f secs for Ricci flow computation.' % (time.time() - t0))) return G
class OllivierRicci(): "A class to compute Ollivier-Ricci curvature for all nodes and edges in G.\n Node Ricci curvature is defined as the average of all it's adjacency edge.\n\n " def __init__(self, G: nx.Graph, weight='weight', alpha=0.5, method='OTDSinkhornMix', base=math.e, exp_power=2, proc=mp.cpu_count(), chunksize=None, shortest_path='all_pairs', cache_maxsize=1000000, nbr_topk=3000, verbose='ERROR'): 'Initialized a container to compute Ollivier-Ricci curvature/flow.\n\n Parameters\n ----------\n G : NetworkX graph\n A given directional or undirectional NetworkX graph.\n weight : str\n The edge weight used to compute Ricci curvature. (Default value = "weight")\n alpha : float\n The parameter for the discrete Ricci curvature, range from 0 ~ 1.\n It means the share of mass to leave on the original node.\n E.g. x -> y, alpha = 0.4 means 0.4 for x, 0.6 to evenly spread to x\'s nbr.\n (Default value = 0.5)\n method : {"OTD", "ATD", "Sinkhorn"}\n The optimal transportation distance computation method. (Default value = "OTDSinkhornMix")\n\n Transportation method:\n - "OTD" for Optimal Transportation Distance,\n - "ATD" for Average Transportation Distance.\n - "Sinkhorn" for OTD approximated Sinkhorn distance.\n - "OTDSinkhornMix" use OTD for nodes of edge with less than _OTDSinkhorn_threshold(default 2000) neighbors,\n use Sinkhorn for faster computation with nodes of edge more neighbors. (OTD is faster for smaller cases)\n base : float\n Base variable for weight distribution. (Default value = `math.e`)\n exp_power : float\n Exponential power for weight distribution. (Default value = 2)\n proc : int\n Number of processor used for multiprocessing. (Default value = `cpu_count()`)\n chunksize : int\n Chunk size for multiprocessing, set None for auto decide. (Default value = `None`)\n shortest_path : {"all_pairs","pairwise"}\n Method to compute shortest path. (Default value = `all_pairs`)\n cache_maxsize : int\n Max size for LRU cache for pairwise shortest path computation.\n Set this to `None` for unlimited cache. (Default value = 1000000)\n nbr_topk : int\n Only take the top k edge weight neighbors for density distribution.\n Smaller k run faster but the result is less accurate. (Default value = 3000)\n verbose : {"INFO", "TRACE","DEBUG","ERROR"}\n Verbose level. (Default value = "ERROR")\n - "INFO": show only iteration process log.\n - "TRACE": show detailed iteration process log.\n - "DEBUG": show all output logs.\n - "ERROR": only show log if error happened.\n\n ' self.G = G.copy() self.alpha = alpha self.weight = weight self.method = method self.base = base self.exp_power = exp_power self.proc = proc self.chunksize = chunksize self.cache_maxsize = cache_maxsize self.shortest_path = shortest_path self.nbr_topk = nbr_topk self.set_verbose(verbose) self.lengths = {} self.densities = {} assert util.find_spec('ot'), 'Package POT: Python Optimal Transport is required for Sinkhorn distance.' if (not nx.get_edge_attributes(self.G, weight)): logger.info('Edge weight not detected in graph, use "weight" as default edge weight.') for (v1, v2) in self.G.edges(): self.G[v1][v2][weight] = 1.0 self_loop_edges = list(nx.selfloop_edges(self.G)) if self_loop_edges: logger.info(('Self-loop edge detected. Removing %d self-loop edges.' % len(self_loop_edges))) self.G.remove_edges_from(self_loop_edges) def set_verbose(self, verbose): 'Set the verbose level for this process.\n\n Parameters\n ----------\n verbose : {"INFO", "TRACE","DEBUG","ERROR"}\n Verbose level. (Default value = "ERROR")\n - "INFO": show only iteration process log.\n - "TRACE": show detailed iteration process log.\n - "DEBUG": show all output logs.\n - "ERROR": only show log if error happened.\n\n ' set_verbose(verbose) def compute_ricci_curvature_edges(self, edge_list=None): 'Compute Ricci curvature for edges in given edge lists.\n\n Parameters\n ----------\n edge_list : list of edges\n The list of edges to compute Ricci curvature, set to [] to run for all edges in G. (Default value = [])\n\n Returns\n -------\n output : dict[(int,int), float]\n A dictionary of edge Ricci curvature. E.g.: {(node1, node2): ricciCurvature}.\n ' return _compute_ricci_curvature_edges(G=self.G, weight=self.weight, edge_list=edge_list, alpha=self.alpha, method=self.method, base=self.base, exp_power=self.exp_power, proc=self.proc, chunksize=self.chunksize, cache_maxsize=self.cache_maxsize, shortest_path=self.shortest_path, nbr_topk=self.nbr_topk) def compute_ricci_curvature(self): 'Compute Ricci curvature of edges and nodes.\n The node Ricci curvature is defined as the average of node\'s adjacency edges.\n\n Returns\n -------\n G: NetworkX graph\n A NetworkX graph with "ricciCurvature" on nodes and edges.\n\n Examples\n --------\n To compute the Ollivier-Ricci curvature for karate club graph::\n\n >>> G = nx.karate_club_graph()\n >>> orc = OllivierRicci(G, alpha=0.5, verbose="INFO")\n >>> orc.compute_ricci_curvature()\n >>> orc.G[0][1]\n {\'weight\': 1.0, \'ricciCurvature\': 0.11111111071683011}\n ' self.G = _compute_ricci_curvature(G=self.G, weight=self.weight, alpha=self.alpha, method=self.method, base=self.base, exp_power=self.exp_power, proc=self.proc, chunksize=self.chunksize, cache_maxsize=self.cache_maxsize, shortest_path=self.shortest_path, nbr_topk=self.nbr_topk) return self.G def compute_ricci_flow(self, iterations=10, step=1, delta=0.0001, surgery=((lambda G, *args, **kwargs: G), 100)): 'Compute the given Ricci flow metric of each edge of a given connected NetworkX graph.\n\n Parameters\n ----------\n iterations : int\n Iterations to require Ricci flow metric. (Default value = 10)\n step : float\n Step size for gradient decent process. (Default value = 1)\n delta : float\n Process stop when difference of Ricci curvature is within delta. (Default value = 1e-4)\n surgery : (function, int)\n A tuple of user define surgery function that will execute every certain iterations.\n (Default value = (lambda G, *args, **kwargs: G, 100))\n\n Returns\n -------\n G: NetworkX graph\n A graph with ``weight`` as Ricci flow metric.\n\n Examples\n --------\n To compute the Ollivier-Ricci flow for karate club graph::\n\n >>> G = nx.karate_club_graph()\n >>> orc_OTD = OllivierRicci(G, alpha=0.5, method="OTD", verbose="INFO")\n >>> orc_OTD.compute_ricci_flow(iterations=10)\n >>> orc_OTD.G[0][1]\n {\'weight\': 0.06399135316908759,\n \'ricciCurvature\': 0.18608249978652802,\n \'original_RC\': 0.11111111071683011}\n ' self.G = _compute_ricci_flow(G=self.G, weight=self.weight, iterations=iterations, step=step, delta=delta, surgery=surgery, alpha=self.alpha, method=self.method, base=self.base, exp_power=self.exp_power, proc=self.proc, chunksize=self.chunksize, cache_maxsize=self.cache_maxsize, shortest_path=self.shortest_path, nbr_topk=self.nbr_topk) return self.G def ricci_community(self, cutoff_step=0.025, drop_threshold=0.01): 'Detect community clustering by Ricci flow metric.\n The communities are detected by the modularity drop while iteratively remove edge weight (Ricci flow metric)\n from large to small.\n\n Parameters\n ----------\n cutoff_step: float\n The step size to find the good cutoff points.\n drop_threshold: float\n At least drop this much to considered as a drop for good_cut.\n\n Returns\n -------\n cutoff: float\n Ricci flow metric weight cutoff for detected community clustering.\n clustering : dict\n Detected community clustering.\n\n Examples\n --------\n To compute the Ricci community for karate club graph::\n\n >>> G = nx.karate_club_graph()\n >>> orc = OllivierRicci(G, alpha=0.5, verbose="INFO")\n >>> orc.compute_ricci_flow(iterations=50)\n >>> cc = orc.ricci_community()\n >>> print("The detected community label of node 0: %s" % cc[1][0])\n The detected community label of node 0: 0\n ' cc = self.ricci_community_all_possible_clusterings(cutoff_step=cutoff_step, drop_threshold=drop_threshold) assert cc, 'No clustering found!' number_of_clustering = len(set(cc[(- 1)][1].values())) logger.info(('Communities detected: %d' % number_of_clustering)) return cc[(- 1)] def ricci_community_all_possible_clusterings(self, cutoff_step=0.025, drop_threshold=0.01): 'Detect community clustering by Ricci flow metric (all possible clustering guesses).\n The communities are detected by Modularity drop while iteratively remove edge weight (Ricci flow metric)\n from large to small.\n\n Parameters\n ----------\n cutoff_step: float\n The step size to find the good cutoff points.\n drop_threshold: float\n At least drop this much to considered as a drop for good_cut.\n\n Returns\n -------\n cc : list of (float, dict)\n All detected cutoff and community clusterings pairs. Clusterings are detected by detected cutoff points from\n large to small. Usually the last one is the best clustering result.\n\n Examples\n --------\n To compute the Ricci community for karate club graph::\n\n >>> G = nx.karate_club_graph()\n >>> orc = OllivierRicci(G, alpha=0.5, verbose="INFO")\n >>> orc.compute_ricci_flow(iterations=50)\n >>> cc = orc.ricci_community_all_possible_clusterings()\n >>> print("The number of possible clusterings: %d" % len(cc))\n The number of possible clusterings: 3\n ' if (not nx.get_edge_attributes(self.G, 'original_RC')): logger.info('Ricci flow not detected yet, run Ricci flow with default setting first...') self.compute_ricci_flow() logger.info('Ricci flow detected, start cutting graph into community...') cut_guesses = get_rf_metric_cutoff(self.G, weight=self.weight, cutoff_step=cutoff_step, drop_threshold=drop_threshold) assert cut_guesses, 'No cutoff point found!' Gp = self.G.copy() cc = [] for cut in cut_guesses[::(- 1)]: Gp = cut_graph_by_cutoff(Gp, cutoff=cut, weight=self.weight) cc.append((cut, {c: idx for (idx, comp) in enumerate(nx.connected_components(Gp)) for c in comp})) return cc
def set_verbose(verbose='ERROR'): 'Set up the verbose level of the GraphRicciCurvature.\n\n Parameters\n ----------\n verbose : {"INFO", "TRACE","DEBUG","ERROR"}\n Verbose level. (Default value = "ERROR")\n - "INFO": show only iteration process log.\n - "TRACE": show detailed iteration process log.\n - "DEBUG": show all output logs.\n - "ERROR": only show log if error happened.\n ' if (verbose == 'INFO'): logger.setLevel(logging.INFO) elif (verbose == 'TRACE'): logger.setLevel(logging.TRACE) elif (verbose == 'DEBUG'): logger.setLevel(logging.DEBUG) elif (verbose == 'ERROR'): logger.setLevel(logging.ERROR) else: print('Incorrect verbose level, option:["INFO","DEBUG","ERROR"], use "ERROR instead."') logger.setLevel(logging.ERROR)
def cut_graph_by_cutoff(G_origin, cutoff, weight='weight'): 'Remove graph\'s edges with "weight" greater than "cutoff".\n\n Parameters\n ----------\n G_origin : NetworkX graph\n A graph with ``weight`` as Ricci flow metric to cut.\n cutoff : float\n A threshold to remove all edges with "weight" greater than it.\n weight : str\n The edge weight used as Ricci flow metric. (Default value = "weight")\n Returns\n -------\n\n G: NetworkX graph\n A graph with edges cut by given cutoff value.\n ' assert nx.get_edge_attributes(G_origin, weight), 'No edge weight detected, abort.' G = G_origin.copy() edge_trim_list = [] for (n1, n2) in G.edges(): if (G[n1][n2][weight] > cutoff): edge_trim_list.append((n1, n2)) G.remove_edges_from(edge_trim_list) return G
def get_rf_metric_cutoff(G_origin, weight='weight', cutoff_step=0.025, drop_threshold=0.01): 'Get good clustering cutoff points for Ricci flow metric by detect the change of modularity while removing edges.\n\n Parameters\n ----------\n G_origin : NetworkX graph\n A graph with "weight" as Ricci flow metric to cut.\n weight : str\n The edge weight used as Ricci flow metric. (Default value = "weight")\n cutoff_step : float\n The step size to find the good cutoff points.\n drop_threshold : float\n At least drop this much to considered as a drop for good_cut.\n\n Returns\n -------\n good_cuts : list of float\n A list of possible cutoff point, usually we use the first one as the best cut.\n ' G = G_origin.copy() (modularity, ari) = ([], []) maxw = max(nx.get_edge_attributes(G, weight).values()) cutoff_range = np.arange(maxw, 1, (- cutoff_step)) for cutoff in cutoff_range: G = cut_graph_by_cutoff(G, cutoff, weight=weight) clustering = {c: idx for (idx, comp) in enumerate(nx.connected_components(G)) for c in comp} modularity.append(community_louvain.modularity(clustering, G, weight)) good_cuts = [] mod_last = modularity[(- 1)] for i in range((len(modularity) - 1), 0, (- 1)): mod_now = modularity[i] if ((mod_last > mod_now > 0.0001) and ((abs((mod_last - mod_now)) / mod_last) > drop_threshold)): logger.trace(('Cut detected: cut:%f, diff:%f, mod_now:%f, mod_last:%f' % (cutoff_range[(i + 1)], (mod_last - mod_now), mod_now, mod_last))) good_cuts.append(cutoff_range[(i + 1)]) mod_last = mod_now return good_cuts
def ARI(G, clustering, clustering_label='club'): '\n Computer the Adjust Rand Index (clustering accuracy) of "clustering" with "clustering_label" as ground truth.\n\n Parameters\n ----------\n G : NetworkX graph\n A given NetworkX graph with node attribute "clustering_label" as ground truth.\n clustering : dict or list or list of set\n Predicted community clustering.\n clustering_label : str\n Node attribute name for ground truth.\n\n Returns\n -------\n ari : float\n Adjust Rand Index for predicted community.\n ' if (util.find_spec('sklearn') is not None): from sklearn import preprocessing, metrics else: print('scikit-learn not installed, skipped...') return (- 1) complex_list = nx.get_node_attributes(G, clustering_label) le = preprocessing.LabelEncoder() y_true = le.fit_transform(list(complex_list.values())) if isinstance(clustering, dict): y_pred = np.array([clustering[v] for v in complex_list.keys()]) elif isinstance(clustering[0], set): predict_dict = {c: idx for (idx, comp) in enumerate(clustering) for c in comp} y_pred = np.array([predict_dict[v] for v in complex_list.keys()]) elif isinstance(clustering, list): y_pred = clustering else: return (- 1) return metrics.adjusted_rand_score(y_true, y_pred)
def my_surgery(G_origin: nx.Graph(), weight='weight', cut=0): 'A simple surgery function that remove the edges with weight above a threshold\n\n Parameters\n ----------\n G_origin : NetworkX graph\n A graph with ``weight`` as Ricci flow metric to cut.\n weight:\n The edge weight used as Ricci flow metric. (Default value = "weight")\n cut:\n Manually assigned cutoff point.\n\n Returns\n -------\n G : NetworkX graph\n A graph after surgery.\n ' G = G_origin.copy() w = nx.get_edge_attributes(G, weight) assert (cut >= 0), 'Cut value should be greater than 0.' if (not cut): cut = (((max(w.values()) - 1.0) * 0.6) + 1.0) to_cut = [] for (n1, n2) in G.edges(): if (G[n1][n2][weight] > cut): to_cut.append((n1, n2)) print('*************** Surgery time ****************') print(('* Cut %d edges.' % len(to_cut))) G.remove_edges_from(to_cut) print(('* Number of nodes now: %d' % G.number_of_nodes())) print(('* Number of edges now: %d' % G.number_of_edges())) cc = list(nx.connected_components(G)) print(('* Modularity now: %f ' % nx.algorithms.community.quality.modularity(G, cc))) print(('* ARI now: %f ' % ARI(G, cc))) print('*********************************************') return G
def check_accuracy(G_origin, weight='weight', clustering_label='value', plot_cut=True): 'To check the clustering quality while cut the edges with weight using different threshold\n\n Parameters\n ----------\n G_origin : NetworkX graph\n A graph with ``weight`` as Ricci flow metric to cut.\n weight: float\n The edge weight used as Ricci flow metric. (Default value = "weight")\n clustering_label : str\n Node attribute name for ground truth.\n plot_cut: bool\n To plot the good guessed cut or not.\n\n ' if (util.find_spec('matplotlib') is not None): import matplotlib.pyplot as plt else: print('matplotlib not installed, skipped to show the cut graph...') return (- 1) G = G_origin.copy() (modularity, ari) = ([], []) maxw = max(nx.get_edge_attributes(G, weight).values()) cutoff_range = np.arange(maxw, 1, (- 0.025)) for cutoff in cutoff_range: edge_trim_list = [] for (n1, n2) in G.edges(): if (G[n1][n2][weight] > cutoff): edge_trim_list.append((n1, n2)) G.remove_edges_from(edge_trim_list) clustering = {c: idx for (idx, comp) in enumerate(nx.connected_components(G)) for c in comp} modularity.append(community_louvain.modularity(clustering, G, weight)) ari.append(ARI(G, clustering, clustering_label=clustering_label)) plt.xlim(maxw, 0) plt.xlabel('Edge weight cutoff') plt.plot(cutoff_range, modularity, alpha=0.8) plt.plot(cutoff_range, ari, alpha=0.8) if plot_cut: good_cut = (- 1) mod_last = modularity[(- 1)] drop_threshold = 0.01 for i in range((len(modularity) - 1), 0, (- 1)): mod_now = modularity[i] if ((mod_last > mod_now > 0.0001) and ((abs((mod_last - mod_now)) / mod_last) > drop_threshold)): if (good_cut != (- 1)): print(('Other cut:%f, diff:%f, mod_now:%f, mod_last:%f, ari:%f' % (cutoff_range[(i + 1)], (mod_last - mod_now), mod_now, mod_last, ari[(i + 1)]))) else: good_cut = cutoff_range[(i + 1)] print(('*Good Cut:%f, diff:%f, mod_now:%f, mod_last:%f, ari:%f' % (good_cut, (mod_last - mod_now), mod_now, mod_last, ari[(i + 1)]))) mod_last = mod_now plt.axvline(x=good_cut, color='red') plt.legend(['Modularity', 'Adjust Rand Index', 'Good cut']) else: plt.legend(['Modularity', 'Adjust Rand Index'])
def show_results(G, curvature='ricciCurvature'): print('Karate Club Graph, first 5 edges: ') for (n1, n2) in list(G.edges())[:5]: print(('Ricci curvature of edge (%s,%s) is %f' % (n1, n2, G[n1][n2][curvature]))) plt.subplot(2, 1, 1) ricci_curvtures = nx.get_edge_attributes(G, curvature).values() plt.hist(ricci_curvtures, bins=20) plt.xlabel('Ricci curvature') plt.title('Histogram of Ricci Curvatures (Karate Club)') plt.subplot(2, 1, 2) weights = nx.get_edge_attributes(G, 'weight').values() plt.hist(weights, bins=20) plt.xlabel('Edge weight') plt.title('Histogram of Edge weights (Karate Club)') plt.tight_layout()
def draw_graph(G, clustering_label='club'): '\n A helper function to draw a nx graph with community.\n ' complex_list = nx.get_node_attributes(G, clustering_label) le = preprocessing.LabelEncoder() node_color = le.fit_transform(list(complex_list.values())) nx.draw_spring(G, nodelist=G.nodes(), node_color=node_color, cmap=plt.cm.rainbow, alpha=0.8)
def ARI(G, clustering, clustering_label='club'): '\n Computer the Adjust Rand Index (clustering accuracy) of "clustering" with "clustering_label" as ground truth.\n\n Parameters\n ----------\n G : NetworkX graph\n A given NetworkX graph with node attribute "clustering_label" as ground truth.\n clustering : dict or list or list of set\n Predicted community clustering.\n clustering_label : str\n Node attribute name for ground truth.\n\n Returns\n -------\n ari : float\n Adjust Rand Index for predicted community.\n ' complex_list = nx.get_node_attributes(G, clustering_label) le = preprocessing.LabelEncoder() y_true = le.fit_transform(list(complex_list.values())) if isinstance(clustering, dict): y_pred = np.array([clustering[v] for v in complex_list.keys()]) elif isinstance(clustering[0], set): predict_dict = {c: idx for (idx, comp) in enumerate(clustering) for c in comp} y_pred = np.array([predict_dict[v] for v in complex_list.keys()]) elif isinstance(clustering, list): y_pred = clustering else: return (- 1) return metrics.adjusted_rand_score(y_true, y_pred)
def my_surgery(G_origin: nx.Graph(), weight='weight', cut=0): 'A simple surgery function that remove the edges with weight above a threshold\n\n Parameters\n ----------\n G_origin : NetworkX graph\n A graph with ``weight`` as Ricci flow metric to cut.\n weight: str\n The edge weight used as Ricci flow metric. (Default value = "weight")\n cut: float\n Manually assigned cutoff point.\n\n Returns\n -------\n G : NetworkX graph\n A graph after surgery.\n ' G = G_origin.copy() w = nx.get_edge_attributes(G, weight) assert (cut >= 0), 'Cut value should be greater than 0.' if (not cut): cut = (((max(w.values()) - 1.0) * 0.6) + 1.0) to_cut = [] for (n1, n2) in G.edges(): if (G[n1][n2][weight] > cut): to_cut.append((n1, n2)) print('*************** Surgery time ****************') print(('* Cut %d edges.' % len(to_cut))) G.remove_edges_from(to_cut) print(('* Number of nodes now: %d' % G.number_of_nodes())) print(('* Number of edges now: %d' % G.number_of_edges())) cc = list(nx.connected_components(G)) print(('* Modularity now: %f ' % nx.algorithms.community.quality.modularity(G, cc))) print(('* ARI now: %f ' % ARI(G, cc))) print('*********************************************') return G
def check_accuracy(G_origin, weight='weight', clustering_label='value', plot_cut=True): 'To check the clustering quality while cut the edges with weight using different threshold\n\n Parameters\n ----------\n G_origin : NetworkX graph\n A graph with ``weight`` as Ricci flow metric to cut.\n weight: float\n The edge weight used as Ricci flow metric. (Default value = "weight")\n clustering_label : str\n Node attribute name for ground truth.\n plot_cut: bool\n To plot the good guessed cut or not.\n\n ' G = G_origin.copy() (modularity, ari) = ([], []) maxw = max(nx.get_edge_attributes(G, weight).values()) cutoff_range = np.arange(maxw, 1, (- 0.025)) for cutoff in cutoff_range: edge_trim_list = [] for (n1, n2) in G.edges(): if (G[n1][n2][weight] > cutoff): edge_trim_list.append((n1, n2)) G.remove_edges_from(edge_trim_list) clustering = {c: idx for (idx, comp) in enumerate(nx.connected_components(G)) for c in comp} modularity.append(community_louvain.modularity(clustering, G, weight)) ari.append(ARI(G, clustering, clustering_label=clustering_label)) plt.xlim(maxw, 0) plt.xlabel('Edge weight cutoff') plt.plot(cutoff_range, modularity, alpha=0.8) plt.plot(cutoff_range, ari, alpha=0.8) if plot_cut: good_cut = (- 1) mod_last = modularity[(- 1)] drop_threshold = 0.01 for i in range((len(modularity) - 1), 0, (- 1)): mod_now = modularity[i] if ((mod_last > mod_now > 0.0001) and ((abs((mod_last - mod_now)) / mod_last) > drop_threshold)): if (good_cut != (- 1)): print(('Other cut:%f, diff:%f, mod_now:%f, mod_last:%f, ari:%f' % (cutoff_range[(i + 1)], (mod_last - mod_now), mod_now, mod_last, ari[(i + 1)]))) else: good_cut = cutoff_range[(i + 1)] print(('*Good Cut:%f, diff:%f, mod_now:%f, mod_last:%f, ari:%f' % (good_cut, (mod_last - mod_now), mod_now, mod_last, ari[(i + 1)]))) mod_last = mod_now plt.axvline(x=good_cut, color='red') plt.legend(['Modularity', 'Adjust Rand Index', 'Good cut']) else: plt.legend(['Modularity', 'Adjust Rand Index'])
def clean_graph(G): for (n1, n2) in G.edges(): del G[n1][n2]['ricciCurvature'] del G[n1][n2]['original_RC'] G[n1][n2]['weight'] = 1 for n in G.nodes(): del G.nodes[n]['ricciCurvature']
def test_compute_ricci_curvature(): G = nx.Graph() G.add_edges_from([(1, 2), (2, 3), (3, 4), (2, 4)]) G.add_node(5) frc = FormanRicci(G, method='1d') frc.compute_ricci_curvature() frc_edges = list(nx.get_edge_attributes(frc.G, 'formanCurvature').values()) frc_nodes = list(nx.get_node_attributes(frc.G, 'formanCurvature').values()) frc_edges_ans = [0.0, (- 1.0), (- 1.0), 0.0] frc_nodes_ans = [0.0, (- 0.6666666666666666), (- 0.5), (- 0.5), 0] npt.assert_array_almost_equal(frc_edges, frc_edges_ans) npt.assert_array_almost_equal(frc_nodes, frc_nodes_ans) frc_a = FormanRicci(G, method='augmented') frc_a.compute_ricci_curvature() frc_a_edges = list(nx.get_edge_attributes(frc_a.G, 'formanCurvature').values()) frc_a_nodes = list(nx.get_node_attributes(frc_a.G, 'formanCurvature').values()) frc_a_edges_ans = [0.0, 2.0, 2.0, 3.0] frc_a_nodes_ans = [0.0, 1.3333333333333333, 2.5, 2.5, 0] npt.assert_array_almost_equal(frc_a_edges, frc_a_edges_ans) npt.assert_array_almost_equal(frc_a_nodes, frc_a_nodes_ans)
def test_compute_ricci_curvature_edges(): G = nx.karate_club_graph() for (n1, n2, d) in G.edges(data=True): d.clear() orc = OllivierRicci(G, method='OTD', alpha=0.5) output = orc.compute_ricci_curvature_edges([(0, 1)]) npt.assert_almost_equal(output[(0, 1)], 0.111111)
def test_compute_ricci_curvature(): G = nx.karate_club_graph() for (n1, n2, d) in G.edges(data=True): d.clear() orc = OllivierRicci(G, method='OTD', alpha=0.5) Gout = orc.compute_ricci_curvature() rc = list(nx.get_edge_attributes(Gout, 'ricciCurvature').values()) ans = [0.111111, (- 0.14375), 0.041667, (- 0.114583), (- 0.28125), (- 0.28125), 0.0625, (- 0.2), (- 0.114583), 0.0625, (- 0.0), 0.0625, 0.0625, (- 0.03125), 0.0625, (- 0.427083), 0.044444, 0.166667, 0.194444, 0.244444, 0.166667, 0.111111, 0.166667, (- 0.041667), 0.05, 0.125, 0.1, 0.1, 0.2, (- 0.175), 0.033333, (- 0.233333), 0.416667, 0.25, 0.216667, 0.291667, 0.5, 0.5, 0.291667, 0.375, 0.375, 0.375, (- 0.025), 0.011765, (- 0.044118), (- 0.288235), 0.125, 0.088235, 0.125, 0.088235, 0.125, 0.088235, (- 0.254902), 0.125, 0.088235, 0.125, 0.088235, 0.1, 0.225, 0.2, (- 0.066667), (- 0.076471), 0.5, 0.125, 0.083333, 0.166667, 0.375, (- 0.073529), (- 0.147059), 0.166667, (- 0.068627), (- 0.041667), (- 0.014706), (- 0.041667), (- 0.044118), (- 0.166667), (- 0.122549), 0.267157] npt.assert_array_almost_equal(rc, ans)
def test_compute_ricci_curvature_directed(): Gd = nx.DiGraph() Gd.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 3), (3, 1)]) orc = OllivierRicci(Gd, method='OTD', alpha=0.5) Gout = orc.compute_ricci_curvature() rc = list(nx.get_edge_attributes(Gout, 'ricciCurvature').values()) ans = [(- 0.49999999999999956), (- 3.842615114990622e-11), 0.49999999996158007, 0.49999999992677135, 0.7499999999364129] npt.assert_array_almost_equal(rc, ans)
def test_compute_ricci_curvature_ATD(): G = nx.karate_club_graph() for (n1, n2, d) in G.edges(data=True): d.clear() orc = OllivierRicci(G, alpha=0.5, method='ATD', verbose='INFO') orc.compute_ricci_curvature() Gout = orc.compute_ricci_curvature() rc = list(nx.get_edge_attributes(Gout, 'ricciCurvature').values()) ans = [(- 0.34375), (- 0.4375), (- 0.265625), (- 0.25), (- 0.390625), (- 0.390625), (- 0.195312), (- 0.44375), (- 0.25), 0.0, (- 0.140625), (- 0.2875), (- 0.109375), (- 0.291667), (- 0.109375), (- 0.640625), (- 0.311111), (- 0.175926), (- 0.083333), (- 0.166667), 0.0, (- 0.166667), 0.0, (- 0.333333), (- 0.241667), (- 0.1375), (- 0.22), (- 0.125), (- 0.16), (- 0.4), (- 0.2), (- 0.479167), 0.020833, 0.041667, (- 0.1), (- 0.041667), 0.055556, (- 0.0625), (- 0.041667), 0.0, 0.0, (- 0.075), (- 0.275), (- 0.3), (- 0.176471), (- 0.464706), 0.0, (- 0.073529), 0.0, (- 0.073529), 0.0, (- 0.073529), (- 0.421569), 0.0, (- 0.073529), 0.0, (- 0.073529), (- 0.2), (- 0.2), (- 0.125), (- 0.291667), (- 0.335294), (- 0.055556), (- 0.208333), (- 0.194444), (- 0.194444), 0.0625, (- 0.176471), (- 0.375), (- 0.166667), (- 0.245098), (- 0.197917), (- 0.227941), (- 0.25), (- 0.294118), (- 0.430556), (- 0.455882), (- 0.355392)] npt.assert_array_almost_equal(rc, ans)
def test_compute_ricci_flow(): G = nx.karate_club_graph() for (n1, n2, d) in G.edges(data=True): d.clear() orc = OllivierRicci(G, method='OTD', alpha=0.5) Gout = orc.compute_ricci_flow(iterations=3) rf = list(nx.get_edge_attributes(Gout, 'weight').values()) ans = [0.584642, 1.222957, 0.828566, 1.893597, 2.179315, 2.179315, 0.814135, 1.647656, 1.893597, 0.90643, 0.916791, 0.798319, 0.760511, 0.829311, 0.760511, 2.477847, 0.937765, 0.681481, 0.612859, 0.568307, 0.675702, 0.702774, 0.675702, 1.484889, 0.843498, 0.753397, 1.098413, 0.868616, 0.646627, 2.061065, 1.425968, 1.924123, 0.292387, 0.487378, 0.446435, 0.509673, 0.101477, 0.108645, 0.509673, 0.246037, 0.246037, 0.228701, 1.309931, 1.213249, 1.317511, 2.149341, 0.712759, 0.811386, 0.712759, 0.811386, 0.712759, 0.811386, 2.245314, 0.712759, 0.811386, 0.712759, 0.811386, 0.94731, 0.518039, 0.857636, 1.52574, 1.429449, 0.180896, 0.692919, 0.724545, 0.639637, 0.281116, 1.427853, 1.622385, 0.807457, 1.386869, 1.372091, 1.320579, 1.324087, 1.276729, 1.843012, 1.721982, 0.412472] npt.assert_array_almost_equal(rf, ans)
def test_ricci_community_all_possible_clusterings(): G = nx.karate_club_graph() for (n1, n2, d) in G.edges(data=True): d.clear() orc = OllivierRicci(G, exp_power=1, alpha=0.5) orc.compute_ricci_flow(iterations=40) cc = orc.ricci_community_all_possible_clusterings() cuts = [x[0] for x in cc] clusterings = [x[1] for x in cc] cuts_ans = [1.8364944935528884, 1.6114944935528852, 1.461494493552883, 1.2614944935528802, 1.1864944935528792, 1.111494493552878, 1.036494493552877] clusterings_ans = [{0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 30: 2, 23: 3, 24: 3, 25: 3, 26: 3, 27: 3, 28: 3, 29: 3, 31: 3}] npt.assert_array_almost_equal(cuts, cuts_ans) assert (clusterings == clusterings_ans)
def test_ricci_community(): G = nx.karate_club_graph() for (n1, n2, d) in G.edges(data=True): d.clear() orc = OllivierRicci(G, exp_power=1, alpha=0.5) (cut, clustering) = orc.ricci_community() cut_ans = 1.2613588421005884 clustering_ans = {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 8: 2, 30: 2, 32: 3, 33: 3, 14: 3, 15: 3, 18: 3, 20: 3, 22: 3, 23: 4, 24: 4, 25: 4, 26: 4, 27: 4, 28: 4, 29: 4, 31: 4} npt.assert_array_almost_equal(cut, cut_ans) assert (clustering == clustering_ans)
def fix_bad_unicode(text, normalization='NFC'): return fix_text(text, normalization=normalization)
def fix_strange_quotes(text): '\n Replace strange quotes, i.e., 〞with a single quote \' or a double quote " if it fits better.\n ' text = constants.SINGLE_QUOTE_REGEX.sub("'", text) text = constants.DOUBLE_QUOTE_REGEX.sub('"', text) return text
def replace_urls(text, replace_with=''): '\n Replace all URLs in ``text`` str with ``replace_with`` str.\n ' return constants.URL_REGEX.sub(replace_with, text)
def replace_emails(text, replace_with=''): '\n Replace all emails in ``text`` str with ``replace_with`` str.\n ' return constants.EMAIL_REGEX.sub(replace_with, text)
def remove_substrings(text, to_replace, replace_with=''): '\n Remove (or replace) substrings from a text.\n Args:\n text (str): raw text to preprocess\n to_replace (iterable or str): substrings to remove/replace\n replace_with (str): defaults to an empty string but\n you replace substrings with a token.\n ' if isinstance(to_replace, str): to_replace = [to_replace] result = text for x in to_replace: result = result.replace(x, replace_with) return result
def remove_emoji(text): return remove_substrings(text, UNICODE_EMOJI['en'])
def remove_number_or_digit(text, replace_with=''): return re.sub(constants.BANGLA_DIGIT_REGEX, replace_with, text)
def remove_punctuations(text, replace_with=''): for punc in corpus.punctuations: print(punc) text = text.replace(punc, replace_with) return text
class CleanText(object): def __init__(self, fix_unicode=True, unicode_norm=True, unicode_norm_form='NFKC', remove_url=False, remove_email=False, remove_number=False, remove_digits=False, remove_emoji=False, remove_punct=False, replace_with_url='<URL>', replace_with_email='<EMAIL>', replace_with_number='<NUMBER>', replace_with_digit='<DIGIT>', replace_with_punct='<PUNC>'): self.fix_unicode = fix_unicode self.unicode_norm = unicode_norm self.unicode_norm_form = unicode_norm_form self.remove_url = remove_url self.remove_email = remove_email self.remove_number = remove_number self.remove_digits = remove_digits self.remove_emoji = remove_emoji self.remove_punct = remove_punct self.replace_with_url = replace_with_url self.replace_with_email = replace_with_email self.replace_with_number = replace_with_number self.replace_with_digit = replace_with_digit self.replace_with_punct = replace_with_punct def __call__(self, text: str) -> str: if (text is None): text = '' text = str(text) text = fix_strange_quotes(text) if self.fix_unicode: text = fix_bad_unicode(text) if self.unicode_norm: text = normalize(self.unicode_norm_form, text) if self.remove_punct: text = remove_punctuations(text, replace_with=self.replace_with_punct) if self.remove_url: text = replace_urls(text, replace_with=self.replace_with_url) if self.remove_email: text = replace_emails(text, replace_with=self.replace_with_email) if self.remove_emoji: text = remove_emoji(text) if self.remove_digits: text = remove_number_or_digit(text, replace_with=self.replace_with_digit) if self.remove_number: text = remove_number_or_digit(text, replace_with=self.replace_with_number) return text
class BengaliCorpus(): punctuations: str = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~।ঃ' letters: str = 'অআইঈউঊঋএঐওঔকখগঘঙচছজঝঞটঠডঢণতথদধনপফবভমযরলশষসহড়ঢ়য়ৎংঃঁ' digits: str = '০১২৩৪৫৬৭৮৯' vowels: str = 'া ি ী ু ৃ ে ৈ ো ৌ' stopwords: List[str] = bengali_stopwords
def _read_corpus(files: List[str], tokenizer=None): for (i, file) in tqdm(enumerate(files)): with open(file) as f: text = f.read() if tokenizer: tokens = tokenizer(text) else: tokens = default_tokenizer.tokenize(text) (yield gensim.models.doc2vec.TaggedDocument(tokens, [i]))
class BengaliDoc2vec(): def __init__(self, model_path: str='', tokenizer: Callable=None): if ((model_path == '') or (model_path == ModelTypeEnum.NEWS_DOC2VEC)): model_path = download_model(ModelTypeEnum.NEWS_DOC2VEC) if (model_path == ModelTypeEnum.WIKI_DOC2VEC): model_path = download_model(ModelTypeEnum.WIKI_DOC2VEC) self.tokenizer = tokenizer self.model = Doc2Vec.load(model_path) def get_document_vector(self, document: str) -> np.ndarray: 'Get document vector using trained doc2vec model\n\n Args:\n document (str): input documents\n\n Returns:\n ndarray: generated vector\n ' if self.tokenizer: tokens = self.tokenizer(document) else: tokens = default_tokenizer.tokenize(document) vector = self.model.infer_vector(tokens) return vector def get_document_similarity(self, document_1: str, document_2: str) -> float: 'Get document similarity score from input two document using pretrained doc2vec model\n\n Args:\n document_1 (str): input document\n document_2 (str): input document\n\n Returns:\n float: output similarity score\n ' if self.tokenizer: document_1_tokens = self.tokenizer(document_1) document_2_tokens = self.tokenizer(document_2) else: document_1_tokens = default_tokenizer.tokenize(document_1) document_2_tokens = default_tokenizer.tokenize(document_2) document_1_vector = self.model.infer_vector(document_1_tokens) document_2_vector = self.model.infer_vector(document_2_tokens) similarity = round((1 - spatial.distance.cosine(document_1_vector, document_2_vector)), 2) return similarity
class BengaliDoc2vecTrainer(): def __init__(self, tokenizer: Callable=None): self.tokenizer = tokenizer def train(self, text_files, checkpoint_path='ckpt', vector_size=100, min_count=2, epochs=10): "Train doc2vec with custom text files\n\n Args:\n text_files (str): path contains the text files with extension .txt\n checkpoint_path (str, optional): checkpoint save path. Defaults to 'ckpt'.\n vector_size (int, optional): size of the vector. Defaults to 100.\n min_count (int, optional): minimum word count. Defaults to 2.\n epochs (int, optional): training iteration number. Defaults to 10.\n " text_files = glob.glob((text_files + '/*.txt')) if self.tokenizer: train_corpus = list(_read_corpus(text_files, self.tokenizer)) else: train_corpus = list(_read_corpus(text_files)) model = Doc2Vec(vector_size=vector_size, min_count=min_count, epochs=epochs) model.build_vocab(train_corpus) model.train(train_corpus, total_examples=model.corpus_count, epochs=model.epochs) os.makedirs(checkpoint_path, exist_ok=True) output_model_name = os.path.join(checkpoint_path, 'custom_doc2vec_model.model') model.save(output_model_name)
class BengaliFasttext(): def __init__(self, model_path: str=''): if (not model_path): model_path = download_model(ModelTypeEnum.FASTTEXT) self.model = fasttext.load_model(model_path) def get_word_vector(self, word: str) -> np.ndarray: 'generate word vector from given input word\n\n Args:\n word (str): input word or token\n\n Returns:\n str: word or token vector\n ' word_vector = self.model[word] return word_vector def bin2vec(self, vector_name: str): 'Generate vector text file from fasttext binary model\n\n Args:\n vector_name (str): name of the output vector with extension\n ' output_vector = open(vector_name, 'w') words = self.model.get_words() vocab_len = str(len(words)) dimension = str(self.model.get_dimension()) output_vector.write((((vocab_len + ' ') + dimension) + '\n')) for w in words: v = self.model.get_word_vector(w) vstr = '' for vi in v: vstr += (' ' + str(vi)) output_vector.write(((w + vstr) + '\n')) output_vector.close()
class FasttextTrainer(): def train(self, data, model_name, epoch, lr=0.05, dim=300, ws=5, minCount=5, minn=3, maxn=6, neg=5, wordNgrams=1, loss='ns', bucket=2000000, thread=(multiprocessing.cpu_count() - 1)): 'train fasttext with raw text data\n\n Args:\n data (str): raw text data path\n model_name (str): name of output trained model with extension\n epoch (int): number of training iteration\n lr (float, optional): learning rate. Defaults to 0.05.\n dim (int, optional): vector size or dimension. Defaults to 300.\n ws (int, optional): window size. Defaults to 5.\n minCount (int, optional): minimum word count to ignore training. Defaults to 5.\n minn (int, optional): [description]. Defaults to 3.\n maxn (int, optional): [description]. Defaults to 6.\n neg (int, optional): negative sampling. Defaults to 5.\n wordNgrams (int, optional): [description]. Defaults to 1.\n loss (str, optional): loss type . Defaults to "ns".\n bucket (int, optional): [description]. Defaults to 2000000.\n thread ([type], optional): [description]. Defaults to multiprocessing.cpu_count()-1.\n ' print('training started.....') model = fasttext.train_unsupervised(data, model='skipgram', epoch=epoch, lr=lr, dim=dim, ws=ws, minCount=minCount, minn=minn, maxn=maxn, neg=neg, wordNgrams=wordNgrams, loss=loss, bucket=bucket, thread=thread) print(f'training done! saving as {model_name}') model.save_model(model_name)
class BengaliGlove(): def __init__(self, glove_vector_path: str=''): if (not glove_vector_path): glove_vector_path = download_model(ModelTypeEnum.GLOVE) self.embedding_dict = self._get_embedding_dict(glove_vector_path) def get_word_vector(self, word: str) -> np.ndarray: word_vector = self.embedding_dict[word] return word_vector def get_closest_word(self, word: str) -> List[str]: def find_closest_embeddings(embedding): return sorted(self.embedding_dict.keys(), key=(lambda word: spatial.distance.euclidean(self.embedding_dict[word], embedding))) result = find_closest_embeddings(self.embedding_dict[word])[:10] return result def _get_embedding_dict(self, glove_vector_path: str): embeddings_dict = {} with open(glove_vector_path, 'r', encoding='utf-8') as f: for line in f: values = line.split() word = values[0] vector = np.asarray(values[1:], 'float32') embeddings_dict[word] = vector return embeddings_dict
class BengaliWord2Vec(): def __init__(self, model_path: str=''): if (not model_path): model_path = download_model(ModelTypeEnum.WORD2VEC) self.model = Word2Vec.load(model_path) def get_word_vector(self, word: str) -> np.ndarray: vector = self.model.wv[word] return vector def get_most_similar_words(self, word: str, topn: int=10) -> List[Tuple[(str, float)]]: similar_word = self.model.wv.most_similar(word, topn=topn) return similar_word
class MyCorpus(): 'An iterator that yields sentences (lists of str).\n We used NLTKTokenizer from bnlp to tokenize sentence words\n ' def __init__(self, data_path): self.data_path = data_path self.bnltk = NLTKTokenizer() def __iter__(self): for line in open(self.data_path): sentences = self.bnltk.sentence_tokenize(line) for sentence in sentences: tokens = self.bnltk.word_tokenize(sentence) (yield tokens)
class Word2VecTraining(): def train(self, data_path, model_name, vector_name, vector_size=100, alpha=0.025, min_alpha=0.0001, sg=0, hs=0, negative=5, ns_exponent=0.75, window=5, min_count=5, max_vocab_size=None, workers=3, epochs=5, sample=0.001, cbow_mean=1, compute_loss=True, callbacks=()): 'train bengali word2vec\n\n Args:\n data_path (str/list): raw text data path as string with extension or\n sentence token list. example: [[], []]\n model_name (str): output model name ex: mymodel.model\n vector_name (str): output vector name ex: myvector.txt\n vector_size (int, optional): vector dimension. Defaults to 100.\n alpha (float, optional): initial learning rate. Defaults to 0.025.\n min_alpha (float, optional): minimum learning rate. Defaults to 0.0001.\n sg (int, optional): skip-gram model or cbow model. if 1 then skip-gram. Defaults to 0.\n hs (int, optional): hierarchical softmax. Defaults to 0.\n negative (int, optional): negative sampling. Defaults to 5.\n ns_exponent (float, optional): The exponent used to shape the\n negative sampling distribution. Defaults to 0.75.\n window (int, optional): window size. Defaults to 5.\n min_count (int, optional): minimum word count to ignore. Defaults to 5.\n max_vocab_size ([type], optional): maximum vocab size. Defaults to None.\n workers (int, optional): worker number. Defaults to 3.\n epochs (int, optional): number of training iteration. Defaults to 5.\n sample ([type], optional): sampling rate. Defaults to 1e-3.\n cbow_mean (int, optional): cbow_mean or cbow_sum. Defaults to 1.\n compute_loss (bool, optional): compute training loss. Defaults to True.\n callbacks (tuple, optional): callback sequence. Defaults to ().\n ' if isinstance(data_path, list): sentences = data_path else: sentences = MyCorpus(data_path) print('training started.......') print('please wait.....it will take time according to your data size and computation capability') model = Word2Vec(sentences=sentences, vector_size=vector_size, alpha=alpha, min_alpha=min_alpha, sg=sg, hs=hs, negative=negative, ns_exponent=ns_exponent, sample=sample, cbow_mean=cbow_mean, window=window, min_count=min_count, max_vocab_size=max_vocab_size, workers=workers, epochs=epochs, compute_loss=compute_loss, callbacks=callbacks) training_loss = model.get_latest_training_loss() print('train completed successfully') print(f'trianing loss: {training_loss}') print('model and vector saving...') model.save(model_name) model.wv.save_word2vec_format(vector_name, binary=False) print(f'model and vector saved as {model_name} and {vector_name}') def pretrain(self, model_path, new_sentences, output_model_name, output_vector_name, epochs=5): 'resume training from saved word2vec model\n\n Args:\n model_path (bin): path of trained word2vec model\n new_sentences (list): list of new sentences\n output_model_name (str): output model name\n output_vector_name (str): output vector name\n epoch(int): number of training iteration\n ' if isinstance(new_sentences, str): new_sentences = MyCorpus(new_sentences) print('model loading ....') model = Word2Vec.load(model_path) print('vocab building with new sentences') model.build_vocab(new_sentences, update=True) print('pre-training started.......') print('please wait.....it will take time according to your data size and computation capability') model.train(new_sentences, total_examples=model.corpus_count, epochs=epochs) training_loss = model.get_latest_training_loss() print('pre-train completed successfully') print(f'pre-trianing loss: {training_loss}') print('model and vector saving...') model.save(output_model_name) model.wv.save_word2vec_format(output_vector_name, binary=False) print(f'model and vector saved as {output_model_name} and {output_vector_name}')
class BengaliNER(): def __init__(self, model_path: str='', tokenizer: Callable=None): if (not model_path): model_path = download_model('NER') self.model = load_pickle_model(model_path) self.tokenizer = (tokenizer if tokenizer else BasicTokenizer()) def tag(self, text: str) -> List[Tuple[(str, str)]]: punctuations = (string.punctuation + '।') tokens = self.tokenizer(text) tokens = [x for x in tokens if (x not in punctuations)] sentence_features = [features(tokens, index) for index in range(len(tokens))] result = list(zip(tokens, self.model.predict([sentence_features])[0])) return result
class BengaliPOS(): def __init__(self, model_path: str='', tokenizer: Callable=None): if (not model_path): model_path = download_model('POS') self.model = load_pickle_model(model_path) self.tokenizer = (tokenizer if tokenizer else BasicTokenizer()) def tag(self, text: str) -> List[Tuple[(str, str)]]: tokens = self.tokenizer(text) sentence_features = [features(tokens, index) for index in range(len(tokens))] result = list(zip(tokens, self.model.predict([sentence_features])[0])) return result
class CRFTaggerTrainer(): def train(self, model_name, train_data, test_data, average='micro'): (X_train, y_train) = transform_to_dataset(train_data) (X_test, y_test) = transform_to_dataset(test_data) print(len(X_train)) print(len(X_test)) print('Training Started........') print('It will take time according to your dataset size...') model = CRF() model.fit(X_train, y_train) print('Training Finished!') print('Evaluating with Test Data...') y_pred = model.predict(X_test) print('Accuracy is: ') print(metrics.flat_accuracy_score(y_test, y_pred)) print(f'F1 Score({average}) is: ') print(metrics.flat_f1_score(y_test, y_pred, average=average)) pickle.dump(model, open(model_name, 'wb')) print('Model Saved!')
def convert_to_unicode(text): "Converts `text` to Unicode (if it's not already), assuming utf-8 input." if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode('utf-8', 'ignore') else: raise ValueError(('Unsupported string type: %s' % type(text))) elif six.PY2: if isinstance(text, str): return text.decode('utf-8', 'ignore') elif isinstance(text, unicode): return text else: raise ValueError(('Unsupported string type: %s' % type(text))) else: raise ValueError('Not running on Python2 or Python 3?')
def whitespace_tokenize(text: str) -> List[str]: 'Runs basic whitespace cleaning and splitting on a piece of text.' text = text.strip() if (not text): return [] tokens = text.split() return tokens
def _is_punctuation(char): 'Checks whether `chars` is a punctuation character.' cp = ord(char) if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))): return True cat = unicodedata.category(char) if cat.startswith('P'): return True return False
class BasicTokenizer(): 'Runs basic tokenization (punctuation splitting, lower casing, etc.).' def __call__(self, text: str) -> List[str]: return self.tokenize(text) def tokenize(self, text: str) -> List[str]: 'Tokenizes a piece of text.' text = convert_to_unicode(text) text = text.replace('.', DUMMYTOKEN) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) output_tokens = [token.replace(DUMMYTOKEN, '.') for token in output_tokens] return output_tokens def _run_split_on_punc(self, text): 'Splits punctuation on a piece of text.' chars = list(text) i = 0 start_new_word = True output = [] while (i < len(chars)): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[(- 1)].append(char) i += 1 return [''.join(x) for x in output]
class NLTKTokenizer(): def word_tokenize(self, text: str) -> List[str]: text = text.replace('.', DUMMYTOKEN) text = text.replace('।', '.') tokens = nltk.word_tokenize(text) new_tokens = [] for token in tokens: token = token.replace('.', '।') token = token.replace(DUMMYTOKEN, '.') new_tokens.append(token) return new_tokens def sentence_tokenize(self, text: str) -> List[str]: text = text.replace('.', DUMMYTOKEN) text = text.replace('।', '.') tokens = nltk.tokenize.sent_tokenize(text) new_tokens = [] for token in tokens: token = token.replace('.', '।') token = token.replace(DUMMYTOKEN, '.') new_tokens.append(token) return new_tokens
class SentencepieceTokenizer(): def __init__(self, model_path: str=''): if (not model_path): model_path = download_model(ModelTypeEnum.SENTENCEPIECE) self.model = bsp.SentencePieceProcessor() self.model.Load(model_path) def tokenize(self, text: str) -> List[str]: tokens = self.model.EncodeAsPieces(text) return tokens def text2id(self, text: str) -> List[int]: ids = self.model.EncodeAsIds(text) return ids def id2text(self, ids: List[int]) -> str: text = self.model.DecodeIds(ids) return text
class SentencepieceTrainer(): def __init__(self, data, vocab_size, model_prefix): self.data = data self.vocab_size = vocab_size self.model_prefix = model_prefix def train(self): train_args = ((((('--model_prefix=' + self.model_prefix) + ' --input=') + self.data) + ' --vocab_size=') + str(self.vocab_size)) bsp.SentencePieceTrainer.train(train_args) print(('%s.model and %s.vocab is saved on your current directory' % (self.model_prefix, self.model_prefix)))
class ModelTypeEnum(): NER = 'NER' POS = 'POS' SENTENCEPIECE = 'SPM' FASTTEXT = 'FASTTEXT' GLOVE = 'GLOVE' NEWS_DOC2VEC = 'NEWS_DOC2VEC' WIKI_DOC2VEC = 'WIKI_DOC2VEC' WORD2VEC = 'WORD2VEC'
class ModelInfo(): 'Class for various model name and their URLs\n ' __url_dict = {'NER': {'name': 'bn_ner.pkl', 'type': 'single', 'url': 'https://raw.githubusercontent.com/sagorbrur/bnlp/master/model/bn_ner.pkl'}, 'POS': {'name': 'bn_pos.pkl', 'type': 'single', 'url': 'https://raw.githubusercontent.com/sagorbrur/bnlp/master/model/bn_pos.pkl'}, 'SPM': {'name': 'bn_spm.model', 'type': 'single', 'url': 'https://raw.githubusercontent.com/sagorbrur/bnlp/master/model/bn_spm.model'}, 'FASTTEXT': {'name': 'bengali_fasttext_wiki.bin', 'type': 'zip', 'url': 'https://huggingface.co/sagorsarker/bangla-fasttext/resolve/main/bengali_fasttext_wiki.zip'}, 'GLOVE': {'name': 'bn_glove.39M.100d.txt', 'type': 'zip', 'url': 'https://huggingface.co/sagorsarker/bangla-glove-vectors/resolve/main/bn_glove.39M.100d.zip'}, 'NEWS_DOC2VEC': {'name': 'bangla_news_article_doc2vec.model', 'type': 'zip', 'url': 'https://huggingface.co/sagorsarker/news_article_doc2vec/resolve/main/news_article_doc2vec.zip'}, 'WIKI_DOC2VEC': {'name': 'bnwiki_doc2vec.model', 'type': 'zip', 'url': 'https://huggingface.co/sagorsarker/bnwiki_doc2vec_model/resolve/main/bnwiki_doc2vec_model.zip'}, 'WORD2VEC': {'name': 'bnwiki_word2vec.model', 'type': 'zip', 'url': 'https://huggingface.co/sagorsarker/bangla_word2vec/resolve/main/bangla_word2vec_gen4.zip'}} @staticmethod def get_model_info(name: str) -> tuple: 'Get Filename of the model\n\n Args:\n name (str): Name of the model\n\n Raises:\n KeyError: KeyError if model name not in config\n\n Returns:\n tuple: tuple (model name, model type, model URL)\n ' try: model_info = ModelInfo.__url_dict[name] file_name = model_info['name'] model_type = model_info['type'] model_url = model_info['url'] return (file_name, model_type, model_url) except KeyError as key_err: print(f'{name} model not found in the configuration') raise key_err @staticmethod def get_all_models() -> list: 'Get keys of all models\n\n Args:\n\n Returns:\n list: list of model keys\n ' all_model_keys = list(ModelInfo.__url_dict.keys()) return all_model_keys
def _create_dirs(model_name: str) -> str: 'Create directories for downloading models\n\n Args:\n model_name (str): Name of the model\n\n Returns:\n str: Absolute path where model can be downloaded\n ' model_dir = os.path.join(os.path.expanduser('~'), 'bnlp', 'models') os.makedirs(model_dir, exist_ok=True) model_path = os.path.join(model_dir, model_name) return model_path
def _unzip_file(zip_file_path: str, unzip_dir: str='') -> None: 'Function to extract archives in .zip format\n\n Args:\n zip_file_path (str): Path of archive to be extracted\n unzip_dir (str, optional): Directory where archive will be extracted. Defaults to "".\n\n Raises:\n zip_error: Error from ZipFile module\n ' if (not unzip_dir): unzip_dir = os.path.dirname(zip_file_path) op_desc = f'Extracting: {os.path.basename(zip_file_path)}' try: with ZipFile(file=zip_file_path) as zip_file: for member_name in tqdm(zip_file.namelist(), desc=op_desc): file_name = os.path.basename(member_name) if (not file_name): continue target_path = os.path.join(unzip_dir, file_name) target_path = open(target_path, 'wb') source_file = zip_file.open(member_name) with source_file, target_path: shutil.copyfileobj(source_file, target_path) os.remove(zip_file_path) except Exception as zip_error: zip_file_str = os.path.basename(zip_file_path) zip_file_str = os.path.splitext(zip_file_str)[0] for file_name in os.listdir(unzip_dir): if (zip_file_str in file_name): os.remove(os.path.join(unzip_dir, file_name)) raise zip_error
def _download_file(file_url: str, file_path: str) -> str: 'Function to download file\n\n Args:\n file_url (str): URL of the file\n file_path (str): Path where file will be downloaded\n\n Raises:\n network_error: Download related error\n\n Returns:\n str: Path where the file is downloaded\n ' if os.path.exists(file_path): return file_path op_desc = f'Downloading {os.path.basename(file_path)}' try: with requests.Session() as req_sess: req_res = req_sess.get(file_url, stream=True) total_length = int(req_res.headers.get('Content-Length')) with tqdm.wrapattr(req_res.raw, 'read', total=total_length, desc=op_desc) as raw: with open(file_path, 'wb') as file: shutil.copyfileobj(raw, file) return file_path except Exception as network_error: if os.path.exists(file_path): os.remove(file_path) raise network_error
def _download_zip_model(model_url: str, model_path: str) -> str: 'Download and extract model archive and return extracted path.\n\n Args:\n model_url (str): URL of the model\n model_path (str): Path where model will be downloaded\n\n Returns:\n str: Path where model is extracted after downloading\n ' if os.path.exists(model_path): return model_path extract_dir = os.path.dirname(model_path) url_model_name = os.path.basename(urlparse(model_url).path) tmp_zip_file_path = os.path.join(extract_dir, url_model_name) _download_file(model_url, tmp_zip_file_path) _unzip_file(tmp_zip_file_path, extract_dir) return model_path
def download_model(name: str) -> str: 'Download and extract model if necessary\n\n Args:\n name (str): _description_\n\n Returns:\n str: _description_\n ' (model_name, model_type, model_url) = ModelInfo.get_model_info(name) model_path = _create_dirs(model_name) if (model_type == 'single'): model_path = _download_file(model_url, model_path) elif (model_type == 'zip'): model_path = _download_zip_model(model_url, model_path) else: print(f'model type {model_type} not yet implemented') model_path = '' return model_path
def download_all_models() -> None: 'Download and extract all available models for BNLP\n ' model_keys = ModelInfo.get_all_models() for model_key in model_keys: download_model(model_key)
def features(sentence, index): 'sentence: [w1, w2, ...], index: the index of the word' return {'word': sentence[index], 'is_first': (index == 0), 'is_last': (index == (len(sentence) - 1)), 'is_capitalized': (sentence[index][0].upper() == sentence[index][0]), 'is_all_caps': (sentence[index].upper() == sentence[index]), 'is_all_lower': (sentence[index].lower() == sentence[index]), 'prefix-1': sentence[index][0], 'prefix-2': sentence[index][:2], 'prefix-3': sentence[index][:3], 'suffix-1': sentence[index][(- 1)], 'suffix-2': sentence[index][(- 2):], 'suffix-3': sentence[index][(- 3):], 'prev_word': ('' if (index == 0) else sentence[(index - 1)]), 'next_word': ('' if (index == (len(sentence) - 1)) else sentence[(index + 1)]), 'has_hyphen': ('-' in sentence[index]), 'is_numeric': sentence[index].isdigit(), 'capitals_inside': (sentence[index][1:].lower() != sentence[index][1:])}
def transform_to_dataset(tagged_sentences): (X, y) = ([], []) for tagged in tagged_sentences: try: X.append([features(untag(tagged), index) for index in range(len(tagged))]) y.append([tag for (_, tag) in tagged]) except Exception as e: print(e) return (X, y)
def load_pickle_model(model_path: str) -> CRF: with open(model_path, 'rb') as pkl_model: model = pickle.load(pkl_model) return model
class TestDocVec(unittest.TestCase): def setUp(self): self.doc2vec = BengaliDoc2vec() self.document = 'রাষ্ট্রবিরোধী ও উসকানিমূলক বক্তব্য দেওয়ার অভিযোগে গাজীপুরের গাছা থানায় ডিজিটাল নিরাপত্তা আইনে করা মামলায় আলোচিত ‘শিশুবক্তা’ রফিকুল ইসলামের বিরুদ্ধে অভিযোগ গঠন করেছেন আদালত। ফলে মামলার আনুষ্ঠানিক বিচার শুরু হলো। আজ বুধবার (২৬ জানুয়ারি) ঢাকার সাইবার ট্রাইব্যুনালের বিচারক আসসামছ জগলুল হোসেন এ অভিযোগ গঠন করেন। এর আগে, রফিকুল ইসলামকে কারাগার থেকে আদালতে হাজির করা হয়। এরপর তাকে নির্দোষ দাবি করে তার আইনজীবী শোহেল মো. ফজলে রাব্বি অব্যাহতি চেয়ে আবেদন করেন। অন্যদিকে, রাষ্ট্রপক্ষ অভিযোগ গঠনের পক্ষে শুনানি করেন। উভয় পক্ষের শুনানি শেষে আদালত অব্যাহতির আবেদন খারিজ করে অভিযোগ গঠনের মাধ্যমে বিচার শুরুর আদেশ দেন। একইসঙ্গে সাক্ষ্যগ্রহণের জন্য আগামী ২২ ফেব্রুয়ারি দিন ধার্য করেন আদালত।' def test_get_document_vector(self): vector = self.doc2vec.get_document_vector(self.document) self.assertEqual(vector.shape, (100,))
class TestBengaliFasttext(unittest.TestCase): def setUp(self): self.fasttext = BengaliFasttext() def test_generate_word_vector(self): word = 'আমি' vector = self.fasttext.generate_word_vector(word) self.assertEqual(vector.shape, (300,))
class TestBengaliGlove(unittest.TestCase): def setUp(self): self.glove = BengaliGlove() def test_get_word_vector(self): word = 'আমি' vector = self.glove.get_word_vector(word) self.assertEqual(vector.shape, (100,))
class TestBengaliWord2Vec(unittest.TestCase): def setUp(self): self.word2vec = BengaliWord2Vec() def test_get_word_vector(self): word = 'আমি' vector = self.word2vec.get_word_vector(word) self.assertEqual(vector.shape, (100,)) def test_get_most_similar_words(self): word = 'আমি' topn = 5 similar_words = self.word2vec.get_most_similar_words(word, topn=topn) self.assertEqual(len(similar_words), topn) self.assertTrue(all(((isinstance(word, str) and isinstance(similarity, float)) for (word, similarity) in similar_words)))
class TestBengaliNER(unittest.TestCase): def setUp(self): self.ner = BengaliNER() def test_tag(self): text = 'সে ঢাকায় থাকে।' tags = self.ner.tag(text) self.assertEqual(tags, [('সে', 'O'), ('ঢাকায়', 'S-LOC'), ('থাকে', 'O')])
class TestBengaliNER(unittest.TestCase): def setUp(self): self.ner = BengaliPOS() def test_tag(self): text = 'আমি ভাত খাই।' tags = self.ner.tag(text) self.assertEqual(tags, [('আমি', 'PPR'), ('ভাত', 'NC'), ('খাই', 'VM'), ('।', 'PU')])
class TestBasicTokenizer(unittest.TestCase): def setUp(self): self.basic_tokenizer = BasicTokenizer() def test_basic_tokenizer_with_sample_bangla_text(self): text = 'আমি ভাত খাই।' tokens = self.basic_tokenizer(text) self.assertEqual(tokens, ['আমি', 'ভাত', 'খাই', '।']) def test_basic_tokenizer_with_long_bangla_text(self): text = '\n ভারত থেকে অনুপ্রবেশ ঠেকাতে বর্ডার গার্ড বাংলাদেশের (বিজিবি)\n সঙ্গে রাজশাহীর চরখানপুর সীমান্ত পাহারা দিচ্ছেন গ্রামবাসী।\n সীমান্তে নজরদারি জোরদার করার জন্য চরখানপুর গ্রামের প্রায় আড়াই শ\n বাসিন্দা রাত জেগে পালাক্রমে এই কাজ করছেন গত ২৮ নভেম্বর থেকে।\n ' tokens = self.basic_tokenizer(text) gt_tokens = ['ভারত', 'থেকে', 'অনুপ্রবেশ', 'ঠেকাতে', 'বর্ডার', 'গার্ড', 'বাংলাদেশের', '(', 'বিজিবি', ')', 'সঙ্গে', 'রাজশাহীর', 'চরখানপুর', 'সীমান্ত', 'পাহারা', 'দিচ্ছেন', 'গ্রামবাসী', '।', 'সীমান্তে', 'নজরদারি', 'জোরদার', 'করার', 'জন্য', 'চরখানপুর', 'গ্রামের', 'প্রায়', 'আড়াই', 'শ', 'বাসিন্দা', 'রাত', 'জেগে', 'পালাক্রমে', 'এই', 'কাজ', 'করছেন', 'গত', '২৮', 'নভেম্বর', 'থেকে', '।'] self.assertEqual(tokens, gt_tokens) def test_basic_tokenizer_with_dot_in_bangla_text(self): text = 'মো. রহিম বাজারে গিয়েছেন।' tokens = self.basic_tokenizer(text) gt_tokens = ['মো.', 'রহিম', 'বাজারে', 'গিয়েছেন', '।'] self.assertEqual(tokens, gt_tokens)
class TestBasicTokenizer(unittest.TestCase): def setUp(self): self.nltk_tokenizer = NLTKTokenizer() def test_nltk_word_tokenizer_with_sample_bangla_text(self): text = 'আমি ভাত খাই।' tokens = self.nltk_tokenizer.word_tokenize(text) self.assertEqual(tokens, ['আমি', 'ভাত', 'খাই', '।']) def test_nltk_word_tokenizer_with_long_bangla_text(self): text = '\n ভারত থেকে অনুপ্রবেশ ঠেকাতে বর্ডার গার্ড বাংলাদেশের (বিজিবি)\n সঙ্গে রাজশাহীর চরখানপুর সীমান্ত পাহারা দিচ্ছেন গ্রামবাসী।\n সীমান্তে নজরদারি জোরদার করার জন্য চরখানপুর গ্রামের প্রায় আড়াই শ\n বাসিন্দা রাত জেগে পালাক্রমে এই কাজ করছেন গত ২৮ নভেম্বর থেকে।\n ' tokens = self.nltk_tokenizer.word_tokenize(text) gt_tokens = ['ভারত', 'থেকে', 'অনুপ্রবেশ', 'ঠেকাতে', 'বর্ডার', 'গার্ড', 'বাংলাদেশের', '(', 'বিজিবি', ')', 'সঙ্গে', 'রাজশাহীর', 'চরখানপুর', 'সীমান্ত', 'পাহারা', 'দিচ্ছেন', 'গ্রামবাসী', '।', 'সীমান্তে', 'নজরদারি', 'জোরদার', 'করার', 'জন্য', 'চরখানপুর', 'গ্রামের', 'প্রায়', 'আড়াই', 'শ', 'বাসিন্দা', 'রাত', 'জেগে', 'পালাক্রমে', 'এই', 'কাজ', 'করছেন', 'গত', '২৮', 'নভেম্বর', 'থেকে', '।'] self.assertEqual(tokens, gt_tokens) def test_nltk_word_tokenizer_with_dot_in_bangla_text(self): text = 'মো. রহিম বাজারে গিয়েছেন।' tokens = self.nltk_tokenizer.word_tokenize(text) gt_tokens = ['মো.', 'রহিম', 'বাজারে', 'গিয়েছেন', '।'] self.assertEqual(tokens, gt_tokens) def test_nltk_sentence_tokenizer(self): text = 'আমি ভাত খাই। সে বাজারে যায়। কী বলছো এসব?' gt_tokens = ['আমি ভাত খাই।', 'সে বাজারে যায়।', 'কী বলছো এসব?'] sentence_tokens = self.nltk_tokenizer.sentence_tokenize(text) self.assertEqual(sentence_tokens, gt_tokens)
class TestSentencepieceTokenizer(unittest.TestCase): def setUp(self): self.bsp = SentencepieceTokenizer() self.input_text = 'সে বাজারে যায়।' self.input_text_gt_tokens = ['▁সে', '▁বাজারে', '▁যায়', '।'] def test_sentencepiece_tokenizer_with_input_bangla_text_and_trained_model(self): tokens = self.bsp.tokenize(self.input_text) self.assertEqual(tokens, self.input_text_gt_tokens)
def compute_intermediate_size(n): return ((int((math.ceil(((n * 8) / 3)) + 255)) // 256) * 256)
def read_json(path): with open(path, 'r') as f: return json.load(f)
def write_json(text, path): with open(path, 'w') as f: json.dump(text, f)
def write_model(model_path, input_base_path, model_size): assert (model_size in NUM_SHARDS) os.makedirs(model_path, exist_ok=True) params = read_json(os.path.join(input_base_path, 'params.json')) num_shards = NUM_SHARDS[model_size] n_layers = params['n_layers'] n_heads = params['n_heads'] n_heads_per_shard = (n_heads // num_shards) dim = params['dim'] dims_per_head = (dim // n_heads) base = 10000.0 inv_freq = (1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))) def permute(w): return w.view(n_heads, ((dim // n_heads) // 2), 2, dim).transpose(1, 2).reshape(dim, dim) if (model_size == '7B'): loaded = torch.load(os.path.join(input_base_path, 'consolidated.00.pth'), map_location='cpu') else: loaded = [torch.load(os.path.join(input_base_path, f'consolidated.{i:02d}.pth'), map_location='cpu') for i in range(num_shards)] param_count = 0 index_dict = {'weight_map': {}} for layer_i in range(n_layers): filename = 'pytorch_model-{:05d}-of-{:05d}.bin'.format((layer_i + 1), (n_layers + 1)) if (model_size == '7B'): state_dict = {f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(loaded[f'layers.{layer_i}.attention.wq.weight']), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(loaded[f'layers.{layer_i}.attention.wk.weight']), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight']} else: state_dict = {f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][f'layers.{layer_i}.ffn_norm.weight']} state_dict[f'model.layers.{layer_i}.self_attn.q_proj.weight'] = permute(torch.cat([loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(n_heads_per_shard, dims_per_head, dim) for i in range(num_shards)], dim=0).reshape(dim, dim)) state_dict[f'model.layers.{layer_i}.self_attn.k_proj.weight'] = permute(torch.cat([loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(n_heads_per_shard, dims_per_head, dim) for i in range(num_shards)], dim=0).reshape(dim, dim)) state_dict[f'model.layers.{layer_i}.self_attn.v_proj.weight'] = torch.cat([loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(n_heads_per_shard, dims_per_head, dim) for i in range(num_shards)], dim=0).reshape(dim, dim) state_dict[f'model.layers.{layer_i}.self_attn.o_proj.weight'] = torch.cat([loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(num_shards)], dim=1) state_dict[f'model.layers.{layer_i}.mlp.gate_proj.weight'] = torch.cat([loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(num_shards)], dim=0) state_dict[f'model.layers.{layer_i}.mlp.down_proj.weight'] = torch.cat([loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(num_shards)], dim=1) state_dict[f'model.layers.{layer_i}.mlp.up_proj.weight'] = torch.cat([loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(num_shards)], dim=0) state_dict[f'model.layers.{layer_i}.self_attn.rotary_emb.inv_freq'] = inv_freq for (k, v) in state_dict.items(): index_dict['weight_map'][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(model_path, filename)) filename = 'pytorch_model-{:05d}-of-{:05d}.bin'.format((n_layers + 1), (n_layers + 1)) if (model_size == '7B'): state_dict = {'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight']} else: state_dict = {'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat([loaded[i]['tok_embeddings.weight'] for i in range(num_shards)], dim=1), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(num_shards)], dim=0)} for (k, v) in state_dict.items(): index_dict['weight_map'][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(model_path, filename)) index_dict['metadata'] = {'total_size': (param_count * 2)} write_json(index_dict, os.path.join(model_path, 'pytorch_model.bin.index.json')) config_out = {'architectures': ['LlamaForCausalLM'], 'bos_token_id': 1, 'eos_token_id': 2, 'hidden_act': 'silu', 'hidden_size': dim, 'intermediate_size': compute_intermediate_size(dim), 'initializer_range': 0.02, 'max_sequence_length': 2048, 'model_type': 'llama', 'num_attention_heads': params['n_heads'], 'num_hidden_layers': params['n_layers'], 'pad_token_id': 0, 'rms_norm_eps': params['norm_eps'], 'torch_dtype': 'float16', 'transformers_version': '4.27.0.dev0', 'use_cache': True, 'vocab_size': 32000} write_json(config_out, os.path.join(model_path, 'config.json')) generation_config = {'_from_model_config': True, 'bos_token_id': 1, 'eos_token_id': 2, 'pad_token_id': 0, 'transformers_version': '4.27.0.dev0'} write_json(generation_config, os.path.join(model_path, 'generation_config.json'))
def write_tokenizer(tokenizer_path, input_tokenizer_path): os.makedirs(tokenizer_path, exist_ok=True) write_json({}, os.path.join(tokenizer_path, 'special_tokens_map.json')) write_json({'bos_token': '', 'eos_token': '', 'model_max_length': int(1e+30), 'tokenizer_class': 'LlamaTokenizer', 'unk_token': ''}, os.path.join(tokenizer_path, 'tokenizer_config.json')) shutil.copyfile(input_tokenizer_path, os.path.join(tokenizer_path, 'tokenizer.model'))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--input_dir', help='Location of LLaMA weights, which contains tokenizer.model and model folders') parser.add_argument('--model_size', choices=['7B', '13B', '30B', '65B', 'tokenizer_only']) parser.add_argument('--output_dir', help='Location to write HF model and tokenizer') args = parser.parse_args() if (args.model_size != 'tokenizer_only'): write_model(model_path=os.path.join(args.output_dir, 'llama-{}'.format(args.model_size).lower()), input_base_path=os.path.join(args.input_dir, args.model_size), model_size=args.model_size) write_tokenizer(tokenizer_path=os.path.join(args.output_dir, 'tokenizer'), input_tokenizer_path=os.path.join(args.input_dir, 'tokenizer.model'))
def encode_prompt(prompt_instructions): 'Encode multiple prompt instructions into a single string.' prompt = (open('./prompt.txt').read() + '\n') for (idx, task_dict) in enumerate(prompt_instructions): (instruction, input, output) = (task_dict['instruction'], task_dict['input'], task_dict['output']) instruction = re.sub('\\s+', ' ', instruction).strip().rstrip(':') input = ('<noinput>' if (input.lower() == '') else input) prompt += f'''### ''' prompt += f'''{(idx + 1)}. Instruction: {instruction} ''' prompt += f'''{(idx + 1)}. Input: {input} ''' prompt += f'''{(idx + 1)}. Output: {output} ''' prompt += f'''### ''' prompt += f'{(idx + 2)}. Instruction:' return prompt
def post_process_gpt3_response(num_prompt_instructions, response): if (response is None): return [] raw_instructions = (f'{(num_prompt_instructions + 1)}. Instruction:' + response['text']) raw_instructions = re.split('###', raw_instructions) instructions = [] for (idx, inst) in enumerate(raw_instructions): if ((idx == (len(raw_instructions) - 1)) and (response['finish_reason'] == 'length')): continue idx += (num_prompt_instructions + 1) splitted_data = re.split(f'{idx}\.\s+(Instruction|Input|Output):', inst) if (len(splitted_data) != 7): continue else: inst = splitted_data[2].strip() input = splitted_data[4].strip() input = ('' if (input.lower() == '<noinput>') else input) output = splitted_data[6].strip() if ((len(inst.split()) <= 3) or (len(inst.split()) > 150)): continue blacklist = ['image', 'images', 'graph', 'graphs', 'picture', 'pictures', 'file', 'files', 'map', 'maps', 'draw', 'plot', 'go to', 'video', 'audio', 'music', 'flowchart', 'diagram'] blacklist += [] if any((find_word_in_string(word, inst) for word in blacklist)): continue if inst.startswith('Write a program'): continue if (inst[0] in string.punctuation): continue if (not inst[0].isascii()): continue instructions.append({'instruction': inst, 'input': input, 'output': output}) return instructions
def find_word_in_string(w, s): return re.compile('\\b({0})\\b'.format(w), flags=re.IGNORECASE).search(s)