_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q262100
bond_task
validation
def bond_task( perc_graph_result, seeds, ps, convolution_factors_tasks_iterator ): """ Perform a number of runs The number of runs is the number of seeds convolution_factors_tasks_iterator needs to be an iterator We shield the convolution factors tasks from jug value/result mechanism by supplying an iterator to the list of tasks for lazy evaluation http://github.com/luispedro/jug/blob/43f0d80a78f418fd3aa2b8705eaf7c4a5175fff7/jug/task.py#L100 http://github.com/luispedro/jug/blob/43f0d80a78f418fd3aa2b8705eaf7c4a5175fff7/jug/task.py#L455 """ # restore the list of convolution factors tasks convolution_factors_tasks = list(convolution_factors_tasks_iterator) return reduce( percolate.hpc.bond_reduce, map( bond_run, itertools.repeat(perc_graph_result), seeds, itertools.repeat(ps), itertools.repeat(convolution_factors_tasks), ) )
python
{ "resource": "" }
q262101
get_peripheral_successor_edges
validation
def get_peripheral_successor_edges(graph: BELGraph, subgraph: BELGraph) -> EdgeIterator: """Get the set of possible successor edges peripheral to the sub-graph. The source nodes in this iterable are all inside the sub-graph, while the targets are outside. """ for u in subgraph: for _, v, k in graph.out_edges(u, keys=True): if v not in subgraph: yield u, v, k
python
{ "resource": "" }
q262102
get_peripheral_predecessor_edges
validation
def get_peripheral_predecessor_edges(graph: BELGraph, subgraph: BELGraph) -> EdgeIterator: """Get the set of possible predecessor edges peripheral to the sub-graph. The target nodes in this iterable are all inside the sub-graph, while the sources are outside. """ for v in subgraph: for u, _, k in graph.in_edges(v, keys=True): if u not in subgraph: yield u, v, k
python
{ "resource": "" }
q262103
count_sources
validation
def count_sources(edge_iter: EdgeIterator) -> Counter: """Count the source nodes in an edge iterator with keys and data. :return: A counter of source nodes in the iterable """ return Counter(u for u, _, _ in edge_iter)
python
{ "resource": "" }
q262104
count_targets
validation
def count_targets(edge_iter: EdgeIterator) -> Counter: """Count the target nodes in an edge iterator with keys and data. :return: A counter of target nodes in the iterable """ return Counter(v for _, v, _ in edge_iter)
python
{ "resource": "" }
q262105
get_subgraph_edges
validation
def get_subgraph_edges(graph: BELGraph, annotation: str, value: str, source_filter=None, target_filter=None, ): """Gets all edges from a given subgraph whose source and target nodes pass all of the given filters :param pybel.BELGraph graph: A BEL graph :param str annotation: The annotation to search :param str value: The annotation value to search by :param source_filter: Optional filter for source nodes (graph, node) -> bool :param target_filter: Optional filter for target nodes (graph, node) -> bool :return: An iterable of (source node, target node, key, data) for all edges that match the annotation/value and node filters :rtype: iter[tuple] """ if source_filter is None: source_filter = keep_node_permissive if target_filter is None: target_filter = keep_node_permissive for u, v, k, data in graph.edges(keys=True, data=True): if not edge_has_annotation(data, annotation): continue if data[ANNOTATIONS][annotation] == value and source_filter(graph, u) and target_filter(graph, v): yield u, v, k, data
python
{ "resource": "" }
q262106
get_subgraph_peripheral_nodes
validation
def get_subgraph_peripheral_nodes(graph: BELGraph, subgraph: Iterable[BaseEntity], node_predicates: NodePredicates = None, edge_predicates: EdgePredicates = None, ): """Get a summary dictionary of all peripheral nodes to a given sub-graph. :return: A dictionary of {external node: {'successor': {internal node: list of (key, dict)}, 'predecessor': {internal node: list of (key, dict)}}} :rtype: dict For example, it might be useful to quantify the number of predecessors and successors: >>> from pybel.struct.filters import exclude_pathology_filter >>> value = 'Blood vessel dilation subgraph' >>> sg = get_subgraph_by_annotation_value(graph, annotation='Subgraph', value=value) >>> p = get_subgraph_peripheral_nodes(graph, sg, node_predicates=exclude_pathology_filter) >>> for node in sorted(p, key=lambda n: len(set(p[n]['successor']) | set(p[n]['predecessor'])), reverse=True): >>> if 1 == len(p[value][node]['successor']) or 1 == len(p[value][node]['predecessor']): >>> continue >>> print(node, >>> len(p[node]['successor']), >>> len(p[node]['predecessor']), >>> len(set(p[node]['successor']) | set(p[node]['predecessor']))) """ node_filter = concatenate_node_predicates(node_predicates=node_predicates) edge_filter = and_edge_predicates(edge_predicates=edge_predicates) result = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) for u, v, k, d in get_peripheral_successor_edges(graph, subgraph): if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k): continue result[v]['predecessor'][u].append((k, d)) for u, v, k, d in get_peripheral_predecessor_edges(graph, subgraph): if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k): continue result[u]['successor'][v].append((k, d)) return result
python
{ "resource": "" }
q262107
enrich_complexes
validation
def enrich_complexes(graph: BELGraph) -> None: """Add all of the members of the complex abundances to the graph.""" nodes = list(get_nodes_by_function(graph, COMPLEX)) for u in nodes: for v in u.members: graph.add_has_component(u, v)
python
{ "resource": "" }
q262108
enrich_composites
validation
def enrich_composites(graph: BELGraph): """Adds all of the members of the composite abundances to the graph.""" nodes = list(get_nodes_by_function(graph, COMPOSITE)) for u in nodes: for v in u.members: graph.add_has_component(u, v)
python
{ "resource": "" }
q262109
enrich_reactions
validation
def enrich_reactions(graph: BELGraph): """Adds all of the reactants and products of reactions to the graph.""" nodes = list(get_nodes_by_function(graph, REACTION)) for u in nodes: for v in u.reactants: graph.add_has_reactant(u, v) for v in u.products: graph.add_has_product(u, v)
python
{ "resource": "" }
q262110
enrich_variants
validation
def enrich_variants(graph: BELGraph, func: Union[None, str, Iterable[str]] = None): """Add the reference nodes for all variants of the given function. :param graph: The target BEL graph to enrich :param func: The function by which the subject of each triple is filtered. Defaults to the set of protein, rna, mirna, and gene. """ if func is None: func = {PROTEIN, RNA, MIRNA, GENE} nodes = list(get_nodes_by_function(graph, func)) for u in nodes: parent = u.get_parent() if parent is None: continue if parent not in graph: graph.add_has_variant(parent, u)
python
{ "resource": "" }
q262111
enrich_unqualified
validation
def enrich_unqualified(graph: BELGraph): """Enrich the sub-graph with the unqualified edges from the graph. The reason you might want to do this is you induce a sub-graph from the original graph based on an annotation filter, but the unqualified edges that don't have annotations that most likely connect elements within your graph are not included. .. seealso:: This function thinly wraps the successive application of the following functions: - :func:`enrich_complexes` - :func:`enrich_composites` - :func:`enrich_reactions` - :func:`enrich_variants` Equivalent to: >>> enrich_complexes(graph) >>> enrich_composites(graph) >>> enrich_reactions(graph) >>> enrich_variants(graph) """ enrich_complexes(graph) enrich_composites(graph) enrich_reactions(graph) enrich_variants(graph)
python
{ "resource": "" }
q262112
expand_internal
validation
def expand_internal(universe: BELGraph, graph: BELGraph, edge_predicates: EdgePredicates = None) -> None: """Edges between entities in the sub-graph that pass the given filters. :param universe: The full graph :param graph: A sub-graph to find the upstream information :param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool """ edge_filter = and_edge_predicates(edge_predicates) for u, v in itt.product(graph, repeat=2): if graph.has_edge(u, v) or not universe.has_edge(u, v): continue rs = defaultdict(list) for key, data in universe[u][v].items(): if not edge_filter(universe, u, v, key): continue rs[data[RELATION]].append((key, data)) if 1 == len(rs): relation = list(rs)[0] for key, data in rs[relation]: graph.add_edge(u, v, key=key, **data) else: log.debug('Multiple relationship types found between %s and %s', u, v)
python
{ "resource": "" }
q262113
expand_internal_causal
validation
def expand_internal_causal(universe: BELGraph, graph: BELGraph) -> None: """Add causal edges between entities in the sub-graph. Is an extremely thin wrapper around :func:`expand_internal`. :param universe: A BEL graph representing the universe of all knowledge :param graph: The target BEL graph to enrich with causal relations between contained nodes Equivalent to: >>> from pybel_tools.mutation import expand_internal >>> from pybel.struct.filters.edge_predicates import is_causal_relation >>> expand_internal(universe, graph, edge_predicates=is_causal_relation) """ expand_internal(universe, graph, edge_predicates=is_causal_relation)
python
{ "resource": "" }
q262114
get_namespaces_with_incorrect_names
validation
def get_namespaces_with_incorrect_names(graph: BELGraph) -> Set[str]: """Return the set of all namespaces with incorrect names in the graph.""" return { exc.namespace for _, exc, _ in graph.warnings if isinstance(exc, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)) }
python
{ "resource": "" }
q262115
get_undefined_namespaces
validation
def get_undefined_namespaces(graph: BELGraph) -> Set[str]: """Get all namespaces that are used in the BEL graph aren't actually defined.""" return { exc.namespace for _, exc, _ in graph.warnings if isinstance(exc, UndefinedNamespaceWarning) }
python
{ "resource": "" }
q262116
get_incorrect_names_by_namespace
validation
def get_incorrect_names_by_namespace(graph: BELGraph, namespace: str) -> Set[str]: """Return the set of all incorrect names from the given namespace in the graph. :return: The set of all incorrect names from the given namespace in the graph """ return { exc.name for _, exc, _ in graph.warnings if isinstance(exc, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)) and exc.namespace == namespace }
python
{ "resource": "" }
q262117
get_undefined_namespace_names
validation
def get_undefined_namespace_names(graph: BELGraph, namespace: str) -> Set[str]: """Get the names from a namespace that wasn't actually defined. :return: The set of all names from the undefined namespace """ return { exc.name for _, exc, _ in graph.warnings if isinstance(exc, UndefinedNamespaceWarning) and exc.namespace == namespace }
python
{ "resource": "" }
q262118
get_incorrect_names
validation
def get_incorrect_names(graph: BELGraph) -> Mapping[str, Set[str]]: """Return the dict of the sets of all incorrect names from the given namespace in the graph. :return: The set of all incorrect names from the given namespace in the graph """ return { namespace: get_incorrect_names_by_namespace(graph, namespace) for namespace in get_namespaces(graph) }
python
{ "resource": "" }
q262119
group_errors
validation
def group_errors(graph: BELGraph) -> Mapping[str, List[int]]: """Group the errors together for analysis of the most frequent error. :return: A dictionary of {error string: list of line numbers} """ warning_summary = defaultdict(list) for _, exc, _ in graph.warnings: warning_summary[str(exc)].append(exc.line_number) return dict(warning_summary)
python
{ "resource": "" }
q262120
get_names_including_errors
validation
def get_names_including_errors(graph: BELGraph) -> Mapping[str, Set[str]]: """Takes the names from the graph in a given namespace and the erroneous names from the same namespace and returns them together as a unioned set :return: The dict of the sets of all correct and incorrect names from the given namespace in the graph """ return { namespace: get_names_including_errors_by_namespace(graph, namespace) for namespace in get_namespaces(graph) }
python
{ "resource": "" }
q262121
count_defaultdict
validation
def count_defaultdict(dict_of_lists: Mapping[X, List[Y]]) -> Mapping[X, typing.Counter[Y]]: """Count the number of elements in each value of the dictionary.""" return { k: Counter(v) for k, v in dict_of_lists.items() }
python
{ "resource": "" }
q262122
set_percentage
validation
def set_percentage(x: Iterable[X], y: Iterable[X]) -> float: """What percentage of x is contained within y? :param set x: A set :param set y: Another set :return: The percentage of x contained within y """ a, b = set(x), set(y) if not a: return 0.0 return len(a & b) / len(a)
python
{ "resource": "" }
q262123
tanimoto_set_similarity
validation
def tanimoto_set_similarity(x: Iterable[X], y: Iterable[X]) -> float: """Calculate the tanimoto set similarity.""" a, b = set(x), set(y) union = a | b if not union: return 0.0 return len(a & b) / len(union)
python
{ "resource": "" }
q262124
min_tanimoto_set_similarity
validation
def min_tanimoto_set_similarity(x: Iterable[X], y: Iterable[X]) -> float: """Calculate the tanimoto set similarity using the minimum size. :param set x: A set :param set y: Another set :return: The similarity between """ a, b = set(x), set(y) if not a or not b: return 0.0 return len(a & b) / min(len(a), len(b))
python
{ "resource": "" }
q262125
calculate_single_tanimoto_set_distances
validation
def calculate_single_tanimoto_set_distances(target: Iterable[X], dict_of_sets: Mapping[Y, Set[X]]) -> Mapping[Y, float]: """Return a dictionary of distances keyed by the keys in the given dict. Distances are calculated based on pairwise tanimoto similarity of the sets contained :param set target: A set :param dict_of_sets: A dict of {x: set of y} :type dict_of_sets: dict :return: A similarity dicationary based on the set overlap (tanimoto) score between the target set and the sets in dos :rtype: dict """ target_set = set(target) return { k: tanimoto_set_similarity(target_set, s) for k, s in dict_of_sets.items() }
python
{ "resource": "" }
q262126
calculate_tanimoto_set_distances
validation
def calculate_tanimoto_set_distances(dict_of_sets: Mapping[X, Set]) -> Mapping[X, Mapping[X, float]]: """Return a distance matrix keyed by the keys in the given dict. Distances are calculated based on pairwise tanimoto similarity of the sets contained. :param dict_of_sets: A dict of {x: set of y} :return: A similarity matrix based on the set overlap (tanimoto) score between each x as a dict of dicts """ result: Dict[X, Dict[X, float]] = defaultdict(dict) for x, y in itt.combinations(dict_of_sets, 2): result[x][y] = result[y][x] = tanimoto_set_similarity(dict_of_sets[x], dict_of_sets[y]) for x in dict_of_sets: result[x][x] = 1.0 return dict(result)
python
{ "resource": "" }
q262127
calculate_global_tanimoto_set_distances
validation
def calculate_global_tanimoto_set_distances(dict_of_sets: Mapping[X, Set]) -> Mapping[X, Mapping[X, float]]: r"""Calculate an alternative distance matrix based on the following equation. .. math:: distance(A, B)=1- \|A \cup B\| / \| \cup_{s \in S} s\| :param dict_of_sets: A dict of {x: set of y} :return: A similarity matrix based on the alternative tanimoto distance as a dict of dicts """ universe = set(itt.chain.from_iterable(dict_of_sets.values())) universe_size = len(universe) result: Dict[X, Dict[X, float]] = defaultdict(dict) for x, y in itt.combinations(dict_of_sets, 2): result[x][y] = result[y][x] = 1.0 - len(dict_of_sets[x] | dict_of_sets[y]) / universe_size for x in dict_of_sets: result[x][x] = 1.0 - len(x) / universe_size return dict(result)
python
{ "resource": "" }
q262128
barh
validation
def barh(d, plt, title=None): """A convenience function for plotting a horizontal bar plot from a Counter""" labels = sorted(d, key=d.get) index = range(len(labels)) plt.yticks(index, labels) plt.barh(index, [d[v] for v in labels]) if title is not None: plt.title(title)
python
{ "resource": "" }
q262129
barv
validation
def barv(d, plt, title=None, rotation='vertical'): """A convenience function for plotting a vertical bar plot from a Counter""" labels = sorted(d, key=d.get, reverse=True) index = range(len(labels)) plt.xticks(index, labels, rotation=rotation) plt.bar(index, [d[v] for v in labels]) if title is not None: plt.title(title)
python
{ "resource": "" }
q262130
safe_add_edge
validation
def safe_add_edge(graph, u, v, key, attr_dict, **attr): """Adds an edge while preserving negative keys, and paying no respect to positive ones :param pybel.BELGraph graph: A BEL Graph :param tuple u: The source BEL node :param tuple v: The target BEL node :param int key: The edge key. If less than zero, corresponds to an unqualified edge, else is disregarded :param dict attr_dict: The edge data dictionary :param dict attr: Edge data to assign via keyword arguments """ if key < 0: graph.add_edge(u, v, key=key, attr_dict=attr_dict, **attr) else: graph.add_edge(u, v, attr_dict=attr_dict, **attr)
python
{ "resource": "" }
q262131
prepare_c3
validation
def prepare_c3(data: Union[List[Tuple[str, int]], Mapping[str, int]], y_axis_label: str = 'y', x_axis_label: str = 'x', ) -> str: """Prepares C3 JSON for making a bar chart from a Counter :param data: A dictionary of {str: int} to display as bar chart :param y_axis_label: The Y axis label :param x_axis_label: X axis internal label. Should be left as default 'x') :return: A JSON dictionary for making a C3 bar chart """ if not isinstance(data, list): data = sorted(data.items(), key=itemgetter(1), reverse=True) try: labels, values = zip(*data) except ValueError: log.info(f'no values found for {x_axis_label}, {y_axis_label}') labels, values = [], [] return json.dumps([ [x_axis_label] + list(labels), [y_axis_label] + list(values), ])
python
{ "resource": "" }
q262132
prepare_c3_time_series
validation
def prepare_c3_time_series(data: List[Tuple[int, int]], y_axis_label: str = 'y', x_axis_label: str = 'x') -> str: """Prepare C3 JSON string dump for a time series. :param data: A list of tuples [(year, count)] :param y_axis_label: The Y axis label :param x_axis_label: X axis internal label. Should be left as default 'x') """ years, counter = zip(*data) years = [ datetime.date(year, 1, 1).isoformat() for year in years ] return json.dumps([ [x_axis_label] + list(years), [y_axis_label] + list(counter) ])
python
{ "resource": "" }
q262133
calculate_betweenness_centality
validation
def calculate_betweenness_centality(graph: BELGraph, number_samples: int = CENTRALITY_SAMPLES) -> Counter: """Calculate the betweenness centrality over nodes in the graph. Tries to do it with a certain number of samples, but then tries a complete approach if it fails. """ try: res = nx.betweenness_centrality(graph, k=number_samples) except Exception: res = nx.betweenness_centrality(graph) return Counter(res)
python
{ "resource": "" }
q262134
canonical_circulation
validation
def canonical_circulation(elements: T, key: Optional[Callable[[T], bool]] = None) -> T: """Get get a canonical representation of the ordered collection by finding its minimum circulation with the given sort key """ return min(get_circulations(elements), key=key)
python
{ "resource": "" }
q262135
pair_has_contradiction
validation
def pair_has_contradiction(graph: BELGraph, u: BaseEntity, v: BaseEntity) -> bool: """Check if a pair of nodes has any contradictions in their causal relationships. Assumes both nodes are in the graph. """ relations = {data[RELATION] for data in graph[u][v].values()} return relation_set_has_contradictions(relations)
python
{ "resource": "" }
q262136
relation_set_has_contradictions
validation
def relation_set_has_contradictions(relations: Set[str]) -> bool: """Return if the set of BEL relations contains a contradiction.""" has_increases = any(relation in CAUSAL_INCREASE_RELATIONS for relation in relations) has_decreases = any(relation in CAUSAL_DECREASE_RELATIONS for relation in relations) has_cnc = any(relation == CAUSES_NO_CHANGE for relation in relations) return 1 < sum([has_cnc, has_decreases, has_increases])
python
{ "resource": "" }
q262137
single_run_arrays
validation
def single_run_arrays(spanning_cluster=True, **kwargs): r''' Generate statistics for a single run This is a stand-alone helper function to evolve a single sample state (realization) and return the cluster statistics. Parameters ---------- spanning_cluster : bool, optional Whether to detect a spanning cluster or not. Defaults to ``True``. kwargs : keyword arguments Piped through to :func:`sample_states` Returns ------- ret : dict Cluster statistics ret['N'] : int Total number of sites ret['M'] : int Total number of bonds ret['max_cluster_size'] : 1-D :py:class:`numpy.ndarray` of int, size ``ret['M'] + 1`` Array of the sizes of the largest cluster (absolute number of sites) at the respective occupation number. ret['has_spanning_cluster'] : 1-D :py:class:`numpy.ndarray` of bool, size ``ret['M'] + 1`` Array of booleans for each occupation number. The respective entry is ``True`` if there is a spanning cluster, ``False`` otherwise. Only exists if `spanning_cluster` argument is set to ``True``. ret['moments'] : 2-D :py:class:`numpy.ndarray` of int Array of shape ``(5, ret['M'] + 1)``. The ``(k, m)``-th entry is the ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``, at occupation number ``m``. See Also -------- sample_states ''' # initial iteration # we do not need a copy of the result dictionary since we copy the values # anyway kwargs['copy_result'] = False ret = dict() for n, state in enumerate(sample_states( spanning_cluster=spanning_cluster, **kwargs )): # merge cluster statistics if 'N' in ret: assert ret['N'] == state['N'] else: ret['N'] = state['N'] if 'M' in ret: assert ret['M'] == state['M'] else: ret['M'] = state['M'] number_of_states = state['M'] + 1 max_cluster_size = np.empty(number_of_states) if spanning_cluster: has_spanning_cluster = np.empty(number_of_states, dtype=np.bool) moments = np.empty((5, number_of_states)) max_cluster_size[n] = state['max_cluster_size'] for k in range(5): moments[k, n] = state['moments'][k] if spanning_cluster: has_spanning_cluster[n] = state['has_spanning_cluster'] ret['max_cluster_size'] = max_cluster_size ret['moments'] = moments if spanning_cluster: ret['has_spanning_cluster'] = has_spanning_cluster return ret
python
{ "resource": "" }
q262138
_microcanonical_average_spanning_cluster
validation
def _microcanonical_average_spanning_cluster(has_spanning_cluster, alpha): r''' Compute the average number of runs that have a spanning cluster Helper function for :func:`microcanonical_averages` Parameters ---------- has_spanning_cluster : 1-D :py:class:`numpy.ndarray` of bool Each entry is the ``has_spanning_cluster`` field of the output of :func:`sample_states`: An entry is ``True`` if there is a spanning cluster in that respective run, and ``False`` otherwise. alpha : float Significance level. Returns ------- ret : dict Spanning cluster statistics ret['spanning_cluster'] : float The average relative number (Binomial proportion) of runs that have a spanning cluster. This is the Bayesian point estimate of the posterior mean, with a uniform prior. ret['spanning_cluster_ci'] : 1-D :py:class:`numpy.ndarray` of float, size 2 The lower and upper bounds of the Binomial proportion confidence interval with uniform prior. See Also -------- sample_states : spanning cluster detection microcanonical_averages : spanning cluster statistics Notes ----- Averages and confidence intervals for Binomial proportions As Cameron [8]_ puts it, the normal approximation to the confidence interval for a Binomial proportion :math:`p` "suffers a *systematic* decline in performance (...) towards extreme values of :math:`p` near :math:`0` and :math:`1`, generating binomial [confidence intervals] with effective coverage far below the desired level." (see also References [6]_ and [7]_). A different approach to quantifying uncertainty is Bayesian inference. [5]_ For :math:`n` independent Bernoulli trails with common success probability :math:`p`, the *likelihood* to have :math:`k` successes given :math:`p` is the binomial distribution .. math:: P(k|p) = \binom{n}{k} p^k (1-p)^{n-k} \equiv B(a,b), where :math:`B(a, b)` is the *Beta distribution* with parameters :math:`a = k + 1` and :math:`b = n - k + 1`. Assuming a uniform prior :math:`P(p) = 1`, the *posterior* is [5]_ .. math:: P(p|k) = P(k|p)=B(a,b). A point estimate is the posterior mean .. math:: \bar{p} = \frac{k+1}{n+2} with the :math:`1 - \alpha` credible interval :math:`(p_l, p_u)` given by .. math:: \int_0^{p_l} dp B(a,b) = \int_{p_u}^1 dp B(a,b) = \frac{\alpha}{2}. References ---------- .. [5] Wasserman, L. All of Statistics (Springer New York, 2004), `doi:10.1007/978-0-387-21736-9 <http://dx.doi.org/10.1007/978-0-387-21736-9>`_. .. [6] DasGupta, A., Cai, T. T. & Brown, L. D. Interval Estimation for a Binomial Proportion. Statistical Science 16, 101-133 (2001). `doi:10.1214/ss/1009213286 <http://dx.doi.org/10.1214/ss/1009213286>`_. .. [7] Agresti, A. & Coull, B. A. Approximate is Better than "Exact" for Interval Estimation of Binomial Proportions. The American Statistician 52, 119-126 (1998), `doi:10.2307/2685469 <http://dx.doi.org/10.2307/2685469>`_. .. [8] Cameron, E. On the Estimation of Confidence Intervals for Binomial Population Proportions in Astronomy: The Simplicity and Superiority of the Bayesian Approach. Publications of the Astronomical Society of Australia 28, 128-139 (2011), `doi:10.1071/as10046 <http://dx.doi.org/10.1071/as10046>`_. ''' ret = dict() runs = has_spanning_cluster.size # Bayesian posterior mean for Binomial proportion (uniform prior) k = has_spanning_cluster.sum(dtype=np.float) ret['spanning_cluster'] = ( (k + 1) / (runs + 2) ) # Bayesian credible interval for Binomial proportion (uniform # prior) ret['spanning_cluster_ci'] = scipy.stats.beta.ppf( [alpha / 2, 1 - alpha / 2], k + 1, runs - k + 1 ) return ret
python
{ "resource": "" }
q262139
_microcanonical_average_max_cluster_size
validation
def _microcanonical_average_max_cluster_size(max_cluster_size, alpha): """ Compute the average size of the largest cluster Helper function for :func:`microcanonical_averages` Parameters ---------- max_cluster_size : 1-D :py:class:`numpy.ndarray` of int Each entry is the ``max_cluster_size`` field of the output of :func:`sample_states`: The size of the largest cluster (absolute number of sites). alpha: float Significance level. Returns ------- ret : dict Largest cluster statistics ret['max_cluster_size'] : float Average size of the largest cluster (absolute number of sites) ret['max_cluster_size_ci'] : 1-D :py:class:`numpy.ndarray` of float, size 2 Lower and upper bounds of the normal confidence interval of the average size of the largest cluster (absolute number of sites) See Also -------- sample_states : largest cluster detection microcanonical_averages : largest cluster statistics """ ret = dict() runs = max_cluster_size.size sqrt_n = np.sqrt(runs) max_cluster_size_sample_mean = max_cluster_size.mean() ret['max_cluster_size'] = max_cluster_size_sample_mean max_cluster_size_sample_std = max_cluster_size.std(ddof=1) if max_cluster_size_sample_std: old_settings = np.seterr(all='raise') ret['max_cluster_size_ci'] = scipy.stats.t.interval( 1 - alpha, df=runs - 1, loc=max_cluster_size_sample_mean, scale=max_cluster_size_sample_std / sqrt_n ) np.seterr(**old_settings) else: ret['max_cluster_size_ci'] = ( max_cluster_size_sample_mean * np.ones(2) ) return ret
python
{ "resource": "" }
q262140
_microcanonical_average_moments
validation
def _microcanonical_average_moments(moments, alpha): """ Compute the average moments of the cluster size distributions Helper function for :func:`microcanonical_averages` Parameters ---------- moments : 2-D :py:class:`numpy.ndarray` of int ``moments.shape[1] == 5`. Each array ``moments[r, :]`` is the ``moments`` field of the output of :func:`sample_states`: The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster size distribution. alpha: float Significance level. Returns ------- ret : dict Moment statistics ret['moments'] : 1-D :py:class:`numpy.ndarray` of float, size 5 The ``k``-th entry is the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. ret['moments_ci'] : 2-D :py:class:`numpy.ndarray` of float, shape (5,2) ``ret['moments_ci'][k]`` are the lower and upper bounds of the normal confidence interval of the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. See Also -------- sample_states : computation of moments microcanonical_averages : moment statistics """ ret = dict() runs = moments.shape[0] sqrt_n = np.sqrt(runs) moments_sample_mean = moments.mean(axis=0) ret['moments'] = moments_sample_mean moments_sample_std = moments.std(axis=0, ddof=1) ret['moments_ci'] = np.empty((5, 2)) for k in range(5): if moments_sample_std[k]: old_settings = np.seterr(all='raise') ret['moments_ci'][k] = scipy.stats.t.interval( 1 - alpha, df=runs - 1, loc=moments_sample_mean[k], scale=moments_sample_std[k] / sqrt_n ) np.seterr(**old_settings) else: ret['moments_ci'][k] = ( moments_sample_mean[k] * np.ones(2) ) return ret
python
{ "resource": "" }
q262141
microcanonical_averages
validation
def microcanonical_averages( graph, runs=40, spanning_cluster=True, model='bond', alpha=alpha_1sigma, copy_result=True ): r''' Generate successive microcanonical percolation ensemble averages This is a :ref:`generator function <python:tut-generators>` to successively add one edge at a time from the graph to the percolation model for a number of independent runs in parallel. At each iteration, it calculates and returns the averaged cluster statistics. Parameters ---------- graph : networkx.Graph The substrate graph on which percolation is to take place runs : int, optional Number of independent runs. Defaults to ``40``. spanning_cluster : bool, optional Defaults to ``True``. model : str, optional The percolation model (either ``'bond'`` or ``'site'``). Defaults to ``'bond'``. .. note:: Other models than ``'bond'`` are not supported yet. alpha: float, optional Significance level. Defaults to 1 sigma of the normal distribution. ``1 - alpha`` is the confidence level. copy_result : bool, optional Whether to return a copy or a reference to the result dictionary. Defaults to ``True``. Yields ------ ret : dict Cluster statistics ret['n'] : int Number of occupied bonds ret['N'] : int Total number of sites ret['M'] : int Total number of bonds ret['spanning_cluster'] : float The average number (Binomial proportion) of runs that have a spanning cluster. This is the Bayesian point estimate of the posterior mean, with a uniform prior. Only exists if `spanning_cluster` is set to ``True``. ret['spanning_cluster_ci'] : 1-D :py:class:`numpy.ndarray` of float, size 2 The lower and upper bounds of the Binomial proportion confidence interval with uniform prior. Only exists if `spanning_cluster` is set to ``True``. ret['max_cluster_size'] : float Average size of the largest cluster (absolute number of sites) ret['max_cluster_size_ci'] : 1-D :py:class:`numpy.ndarray` of float, size 2 Lower and upper bounds of the normal confidence interval of the average size of the largest cluster (absolute number of sites) ret['moments'] : 1-D :py:class:`numpy.ndarray` of float, size 5 The ``k``-th entry is the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. ret['moments_ci'] : 2-D :py:class:`numpy.ndarray` of float, shape (5,2) ``ret['moments_ci'][k]`` are the lower and upper bounds of the normal confidence interval of the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. Raises ------ ValueError If `runs` is not a positive integer ValueError If `alpha` is not a float in the interval (0, 1) See also -------- sample_states percolate.percolate._microcanonical_average_spanning_cluster percolate.percolate._microcanonical_average_max_cluster_size Notes ----- Iterating through this generator corresponds to several parallel runs of the Newman-Ziff algorithm. Each iteration yields a microcanonical percolation ensemble for the number :math:`n` of occupied bonds. [9]_ The first iteration yields the trivial microcanonical percolation ensemble with :math:`n = 0` occupied bonds. Spanning cluster .. seealso:: :py:func:`sample_states` Raw moments of the cluster size distribution .. seealso:: :py:func:`sample_states` References ---------- .. [9] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site or bond percolation. Physical Review E 64, 016706+ (2001), `doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_. ''' try: runs = int(runs) except: raise ValueError("runs needs to be a positive integer") if runs <= 0: raise ValueError("runs needs to be a positive integer") try: alpha = float(alpha) except: raise ValueError("alpha needs to be a float in the interval (0, 1)") if alpha <= 0.0 or alpha >= 1.0: raise ValueError("alpha needs to be a float in the interval (0, 1)") # initial iteration # we do not need a copy of the result dictionary since we copy the values # anyway run_iterators = [ sample_states( graph, spanning_cluster=spanning_cluster, model=model, copy_result=False ) for _ in range(runs) ] ret = dict() for microcanonical_ensemble in zip(*run_iterators): # merge cluster statistics ret['n'] = microcanonical_ensemble[0]['n'] ret['N'] = microcanonical_ensemble[0]['N'] ret['M'] = microcanonical_ensemble[0]['M'] max_cluster_size = np.empty(runs) moments = np.empty((runs, 5)) if spanning_cluster: has_spanning_cluster = np.empty(runs) for r, state in enumerate(microcanonical_ensemble): assert state['n'] == ret['n'] assert state['N'] == ret['N'] assert state['M'] == ret['M'] max_cluster_size[r] = state['max_cluster_size'] moments[r] = state['moments'] if spanning_cluster: has_spanning_cluster[r] = state['has_spanning_cluster'] ret.update(_microcanonical_average_max_cluster_size( max_cluster_size, alpha )) ret.update(_microcanonical_average_moments(moments, alpha)) if spanning_cluster: ret.update(_microcanonical_average_spanning_cluster( has_spanning_cluster, alpha )) if copy_result: yield copy.deepcopy(ret) else: yield ret
python
{ "resource": "" }
q262142
spanning_1d_chain
validation
def spanning_1d_chain(length): """ Generate a linear chain with auxiliary nodes for spanning cluster detection Parameters ---------- length : int Number of nodes in the chain, excluding the auxiliary nodes. Returns ------- networkx.Graph A linear chain graph with auxiliary nodes for spanning cluster detection See Also -------- sample_states : spanning cluster detection """ ret = nx.grid_graph(dim=[int(length + 2)]) ret.node[0]['span'] = 0 ret[0][1]['span'] = 0 ret.node[length + 1]['span'] = 1 ret[length][length + 1]['span'] = 1 return ret
python
{ "resource": "" }
q262143
spanning_2d_grid
validation
def spanning_2d_grid(length): """ Generate a square lattice with auxiliary nodes for spanning detection Parameters ---------- length : int Number of nodes in one dimension, excluding the auxiliary nodes. Returns ------- networkx.Graph A square lattice graph with auxiliary nodes for spanning cluster detection See Also -------- sample_states : spanning cluster detection """ ret = nx.grid_2d_graph(length + 2, length) for i in range(length): # side 0 ret.node[(0, i)]['span'] = 0 ret[(0, i)][(1, i)]['span'] = 0 # side 1 ret.node[(length + 1, i)]['span'] = 1 ret[(length + 1, i)][(length, i)]['span'] = 1 return ret
python
{ "resource": "" }
q262144
microcanonical_averages_arrays
validation
def microcanonical_averages_arrays(microcanonical_averages): """ Compile microcanonical averages over all iteration steps into single arrays Helper function to aggregate the microcanonical averages over all iteration steps into single arrays for further processing Parameters ---------- microcanonical_averages : iterable Typically, this is the :func:`microcanonical_averages` generator Returns ------- ret : dict Aggregated cluster statistics ret['N'] : int Total number of sites ret['M'] : int Total number of bonds ret['spanning_cluster'] : 1-D :py:class:`numpy.ndarray` of float The percolation probability: The normalized average number of runs that have a spanning cluster. ret['spanning_cluster_ci'] : 2-D :py:class:`numpy.ndarray` of float, size 2 The lower and upper bounds of the percolation probability. ret['max_cluster_size'] : 1-D :py:class:`numpy.ndarray` of float The percolation strength: Average relative size of the largest cluster ret['max_cluster_size_ci'] : 2-D :py:class:`numpy.ndarray` of float Lower and upper bounds of the normal confidence interval of the percolation strength. ret['moments'] : 2-D :py:class:`numpy.ndarray` of float, shape (5, M + 1) Average raw moments of the (relative) cluster size distribution. ret['moments_ci'] : 3-D :py:class:`numpy.ndarray` of float, shape (5, M + 1, 2) Lower and upper bounds of the normal confidence interval of the raw moments of the (relative) cluster size distribution. See Also -------- microcanonical_averages """ ret = dict() for n, microcanonical_average in enumerate(microcanonical_averages): assert n == microcanonical_average['n'] if n == 0: num_edges = microcanonical_average['M'] num_sites = microcanonical_average['N'] spanning_cluster = ('spanning_cluster' in microcanonical_average) ret['max_cluster_size'] = np.empty(num_edges + 1) ret['max_cluster_size_ci'] = np.empty((num_edges + 1, 2)) if spanning_cluster: ret['spanning_cluster'] = np.empty(num_edges + 1) ret['spanning_cluster_ci'] = np.empty((num_edges + 1, 2)) ret['moments'] = np.empty((5, num_edges + 1)) ret['moments_ci'] = np.empty((5, num_edges + 1, 2)) ret['max_cluster_size'][n] = microcanonical_average['max_cluster_size'] ret['max_cluster_size_ci'][n] = ( microcanonical_average['max_cluster_size_ci'] ) if spanning_cluster: ret['spanning_cluster'][n] = ( microcanonical_average['spanning_cluster'] ) ret['spanning_cluster_ci'][n] = ( microcanonical_average['spanning_cluster_ci'] ) ret['moments'][:, n] = microcanonical_average['moments'] ret['moments_ci'][:, n] = microcanonical_average['moments_ci'] # normalize by number of sites for key in ret: if 'spanning_cluster' in key: continue ret[key] /= num_sites ret['M'] = num_edges ret['N'] = num_sites return ret
python
{ "resource": "" }
q262145
_binomial_pmf
validation
def _binomial_pmf(n, p): """ Compute the binomial PMF according to Newman and Ziff Helper function for :func:`canonical_averages` See Also -------- canonical_averages Notes ----- See Newman & Ziff, Equation (10) [10]_ References ---------- .. [10] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site or bond percolation. Physical Review E 64, 016706+ (2001), `doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_. """ n = int(n) ret = np.empty(n + 1) nmax = int(np.round(p * n)) ret[nmax] = 1.0 old_settings = np.seterr(under='ignore') # seterr to known value for i in range(nmax + 1, n + 1): ret[i] = ret[i - 1] * (n - i + 1.0) / i * p / (1.0 - p) for i in range(nmax - 1, -1, -1): ret[i] = ret[i + 1] * (i + 1.0) / (n - i) * (1.0 - p) / p np.seterr(**old_settings) # reset to default return ret / ret.sum()
python
{ "resource": "" }
q262146
canonical_averages
validation
def canonical_averages(ps, microcanonical_averages_arrays): """ Compute the canonical cluster statistics from microcanonical statistics This is according to Newman and Ziff, Equation (2). Note that we also simply average the bounds of the confidence intervals according to this formula. Parameters ---------- ps : iterable of float Each entry is a probability for which to form the canonical ensemble and compute the weighted statistics from the microcanonical statistics microcanonical_averages_arrays Typically the output of :func:`microcanonical_averages_arrays` Returns ------- ret : dict Canonical ensemble cluster statistics ret['ps'] : iterable of float The parameter `ps` ret['N'] : int Total number of sites ret['M'] : int Total number of bonds ret['spanning_cluster'] : 1-D :py:class:`numpy.ndarray` of float The percolation probability: The normalized average number of runs that have a spanning cluster. ret['spanning_cluster_ci'] : 2-D :py:class:`numpy.ndarray` of float, size 2 The lower and upper bounds of the percolation probability. ret['max_cluster_size'] : 1-D :py:class:`numpy.ndarray` of float The percolation strength: Average relative size of the largest cluster ret['max_cluster_size_ci'] : 2-D :py:class:`numpy.ndarray` of float Lower and upper bounds of the normal confidence interval of the percolation strength. ret['moments'] : 2-D :py:class:`numpy.ndarray` of float, shape (5, M + 1) Average raw moments of the (relative) cluster size distribution. ret['moments_ci'] : 3-D :py:class:`numpy.ndarray` of float, shape (5, M + 1, 2) Lower and upper bounds of the normal confidence interval of the raw moments of the (relative) cluster size distribution. See Also -------- microcanonical_averages microcanonical_averages_arrays """ num_sites = microcanonical_averages_arrays['N'] num_edges = microcanonical_averages_arrays['M'] spanning_cluster = ('spanning_cluster' in microcanonical_averages_arrays) ret = dict() ret['ps'] = ps ret['N'] = num_sites ret['M'] = num_edges ret['max_cluster_size'] = np.empty(ps.size) ret['max_cluster_size_ci'] = np.empty((ps.size, 2)) if spanning_cluster: ret['spanning_cluster'] = np.empty(ps.size) ret['spanning_cluster_ci'] = np.empty((ps.size, 2)) ret['moments'] = np.empty((5, ps.size)) ret['moments_ci'] = np.empty((5, ps.size, 2)) for p_index, p in enumerate(ps): binomials = _binomial_pmf(n=num_edges, p=p) for key, value in microcanonical_averages_arrays.items(): if len(key) <= 1: continue if key in ['max_cluster_size', 'spanning_cluster']: ret[key][p_index] = np.sum(binomials * value) elif key in ['max_cluster_size_ci', 'spanning_cluster_ci']: ret[key][p_index] = np.sum( np.tile(binomials, (2, 1)).T * value, axis=0 ) elif key == 'moments': ret[key][:, p_index] = np.sum( np.tile(binomials, (5, 1)) * value, axis=1 ) elif key == 'moments_ci': ret[key][:, p_index] = np.sum( np.rollaxis(np.tile(binomials, (5, 2, 1)), 2, 1) * value, axis=1 ) else: raise NotImplementedError( '{}-dimensional array'.format(value.ndim) ) return ret
python
{ "resource": "" }
q262147
statistics
validation
def statistics( graph, ps, spanning_cluster=True, model='bond', alpha=alpha_1sigma, runs=40 ): """ Helper function to compute percolation statistics See Also -------- canonical_averages microcanonical_averages sample_states """ my_microcanonical_averages = microcanonical_averages( graph=graph, runs=runs, spanning_cluster=spanning_cluster, model=model, alpha=alpha ) my_microcanonical_averages_arrays = microcanonical_averages_arrays( my_microcanonical_averages ) return canonical_averages(ps, my_microcanonical_averages_arrays)
python
{ "resource": "" }
q262148
rank_causalr_hypothesis
validation
def rank_causalr_hypothesis(graph, node_to_regulation, regulator_node): """Test the regulator hypothesis of the given node on the input data using the algorithm. Note: this method returns both +/- signed hypotheses evaluated Algorithm: 1. Calculate the shortest path between the regulator node and each node in observed_regulation 2. Calculate the concordance of the causal network and the observed regulation when there is path between target node and regulator node :param networkx.DiGraph graph: A causal graph :param dict node_to_regulation: Nodes to score (1,-1,0) :return Dictionaries with hypothesis results (keys: score, correct, incorrect, ambiguous) :rtype: dict """ upregulation_hypothesis = { 'correct': 0, 'incorrect': 0, 'ambiguous': 0 } downregulation_hypothesis = { 'correct': 0, 'incorrect': 0, 'ambiguous': 0 } targets = [ node for node in node_to_regulation if node != regulator_node ] predicted_regulations = run_cna(graph, regulator_node, targets) # + signed hypothesis for _, target_node, predicted_regulation in predicted_regulations: if (predicted_regulation is Effect.inhibition or predicted_regulation is Effect.activation) and ( predicted_regulation.value == node_to_regulation[target_node]): upregulation_hypothesis['correct'] += 1 downregulation_hypothesis['incorrect'] += 1 elif predicted_regulation is Effect.ambiguous: upregulation_hypothesis['ambiguous'] += 1 downregulation_hypothesis['ambiguous'] += 1 elif predicted_regulation is Effect.no_effect: continue else: downregulation_hypothesis['correct'] += 1 upregulation_hypothesis['incorrect'] += 1 upregulation_hypothesis['score'] = upregulation_hypothesis['correct'] - upregulation_hypothesis['incorrect'] downregulation_hypothesis['score'] = downregulation_hypothesis['correct'] - downregulation_hypothesis['incorrect'] return upregulation_hypothesis, downregulation_hypothesis
python
{ "resource": "" }
q262149
get_path_effect
validation
def get_path_effect(graph, path, relationship_dict): """Calculate the final effect of the root node to the sink node in the path. :param pybel.BELGraph graph: A BEL graph :param list path: Path from root to sink node :param dict relationship_dict: dictionary with relationship effects :rtype: Effect """ causal_effect = [] for predecessor, successor in pairwise(path): if pair_has_contradiction(graph, predecessor, successor): return Effect.ambiguous edges = graph.get_edge_data(predecessor, successor) edge_key, edge_relation, _ = rank_edges(edges) relation = graph[predecessor][successor][edge_key][RELATION] # Returns Effect.no_effect if there is a non causal edge in path if relation not in relationship_dict or relationship_dict[relation] == 0: return Effect.no_effect causal_effect.append(relationship_dict[relation]) final_effect = reduce(lambda x, y: x * y, causal_effect) return Effect.activation if final_effect == 1 else Effect.inhibition
python
{ "resource": "" }
q262150
rank_edges
validation
def rank_edges(edges, edge_ranking=None): """Return the highest ranked edge from a multiedge. :param dict edges: dictionary with all edges between two nodes :param dict edge_ranking: A dictionary of {relationship: score} :return: Highest ranked edge :rtype: tuple: (edge id, relation, score given ranking) """ edge_ranking = default_edge_ranking if edge_ranking is None else edge_ranking edges_scores = [ (edge_id, edge_data[RELATION], edge_ranking[edge_data[RELATION]]) for edge_id, edge_data in edges.items() ] return max(edges_scores, key=itemgetter(2))
python
{ "resource": "" }
q262151
group_nodes_by_annotation
validation
def group_nodes_by_annotation(graph: BELGraph, annotation: str = 'Subgraph') -> Mapping[str, Set[BaseEntity]]: """Group the nodes occurring in edges by the given annotation.""" result = defaultdict(set) for u, v, d in graph.edges(data=True): if not edge_has_annotation(d, annotation): continue result[d[ANNOTATIONS][annotation]].add(u) result[d[ANNOTATIONS][annotation]].add(v) return dict(result)
python
{ "resource": "" }
q262152
average_node_annotation
validation
def average_node_annotation(graph: BELGraph, key: str, annotation: str = 'Subgraph', aggregator: Optional[Callable[[Iterable[X]], X]] = None, ) -> Mapping[str, X]: """Groups graph into subgraphs and assigns each subgraph a score based on the average of all nodes values for the given node key :param pybel.BELGraph graph: A BEL graph :param key: The key in the node data dictionary representing the experimental data :param annotation: A BEL annotation to use to group nodes :param aggregator: A function from list of values -> aggregate value. Defaults to taking the average of a list of floats. :type aggregator: lambda """ if aggregator is None: def aggregator(x): """Calculates the average""" return sum(x) / len(x) result = {} for subgraph, nodes in group_nodes_by_annotation(graph, annotation).items(): values = [graph.nodes[node][key] for node in nodes if key in graph.nodes[node]] result[subgraph] = aggregator(values) return result
python
{ "resource": "" }
q262153
group_nodes_by_annotation_filtered
validation
def group_nodes_by_annotation_filtered(graph: BELGraph, node_predicates: NodePredicates = None, annotation: str = 'Subgraph', ) -> Mapping[str, Set[BaseEntity]]: """Group the nodes occurring in edges by the given annotation, with a node filter applied. :param graph: A BEL graph :param node_predicates: A predicate or list of predicates (graph, node) -> bool :param annotation: The annotation to use for grouping :return: A dictionary of {annotation value: set of nodes} """ node_filter = concatenate_node_predicates(node_predicates) return { key: { node for node in nodes if node_filter(graph, node) } for key, nodes in group_nodes_by_annotation(graph, annotation).items() }
python
{ "resource": "" }
q262154
build_expand_node_neighborhood_by_hash
validation
def build_expand_node_neighborhood_by_hash(manager: Manager) -> Callable[[BELGraph, BELGraph, str], None]: """Make an expand function that's bound to the manager.""" @uni_in_place_transformation def expand_node_neighborhood_by_hash(universe: BELGraph, graph: BELGraph, node_hash: str) -> None: """Expand around the neighborhoods of a node by identifier.""" node = manager.get_dsl_by_hash(node_hash) return expand_node_neighborhood(universe, graph, node) return expand_node_neighborhood_by_hash
python
{ "resource": "" }
q262155
build_delete_node_by_hash
validation
def build_delete_node_by_hash(manager: Manager) -> Callable[[BELGraph, str], None]: """Make a delete function that's bound to the manager.""" @in_place_transformation def delete_node_by_hash(graph: BELGraph, node_hash: str) -> None: """Remove a node by identifier.""" node = manager.get_dsl_by_hash(node_hash) graph.remove_node(node) return delete_node_by_hash
python
{ "resource": "" }
q262156
bel_to_spia_matrices
validation
def bel_to_spia_matrices(graph: BELGraph) -> Mapping[str, pd.DataFrame]: """Create an excel sheet ready to be used in SPIA software. :param graph: BELGraph :return: dictionary with matrices """ index_nodes = get_matrix_index(graph) spia_matrices = build_spia_matrices(index_nodes) for u, v, edge_data in graph.edges(data=True): # Both nodes are CentralDogma abundances if isinstance(u, CentralDogma) and isinstance(v, CentralDogma): # Update matrix dict update_spia_matrices(spia_matrices, u, v, edge_data) # Subject is CentralDogmaAbundance and node is ListAbundance elif isinstance(u, CentralDogma) and isinstance(v, ListAbundance): # Add a relationship from subject to each of the members in the object for node in v.members: # Skip if the member is not in CentralDogma if not isinstance(node, CentralDogma): continue update_spia_matrices(spia_matrices, u, node, edge_data) # Subject is ListAbundance and node is CentralDogmaAbundance elif isinstance(u, ListAbundance) and isinstance(v, CentralDogma): # Add a relationship from each of the members of the subject to the object for node in u.members: # Skip if the member is not in CentralDogma if not isinstance(node, CentralDogma): continue update_spia_matrices(spia_matrices, node, v, edge_data) # Both nodes are ListAbundance elif isinstance(u, ListAbundance) and isinstance(v, ListAbundance): for sub_member, obj_member in product(u.members, v.members): # Update matrix if both are CentralDogma if isinstance(sub_member, CentralDogma) and isinstance(obj_member, CentralDogma): update_spia_matrices(spia_matrices, sub_member, obj_member, edge_data) # else Not valid edge return spia_matrices
python
{ "resource": "" }
q262157
build_spia_matrices
validation
def build_spia_matrices(nodes: Set[str]) -> Dict[str, pd.DataFrame]: """Build an adjacency matrix for each KEGG relationship and return in a dictionary. :param nodes: A set of HGNC gene symbols :return: Dictionary of adjacency matrix for each relationship """ nodes = list(sorted(nodes)) # Create sheets of the excel in the given order matrices = OrderedDict() for relation in KEGG_RELATIONS: matrices[relation] = pd.DataFrame(0, index=nodes, columns=nodes) return matrices
python
{ "resource": "" }
q262158
update_spia_matrices
validation
def update_spia_matrices(spia_matrices: Dict[str, pd.DataFrame], u: CentralDogma, v: CentralDogma, edge_data: EdgeData, ) -> None: """Populate the adjacency matrix.""" if u.namespace.upper() != 'HGNC' or v.namespace.upper() != 'HGNC': return u_name = u.name v_name = v.name relation = edge_data[RELATION] if relation in CAUSAL_INCREASE_RELATIONS: # If it has pmod check which one and add it to the corresponding matrix if v.variants and any(isinstance(variant, ProteinModification) for variant in v.variants): for variant in v.variants: if not isinstance(variant, ProteinModification): continue if variant[IDENTIFIER][NAME] == "Ub": spia_matrices["activation_ubiquination"][u_name][v_name] = 1 elif variant[IDENTIFIER][NAME] == "Ph": spia_matrices["activation_phosphorylation"][u_name][v_name] = 1 elif isinstance(v, (Gene, Rna)): # Normal increase, add activation spia_matrices['expression'][u_name][v_name] = 1 else: spia_matrices['activation'][u_name][v_name] = 1 elif relation in CAUSAL_DECREASE_RELATIONS: # If it has pmod check which one and add it to the corresponding matrix if v.variants and any(isinstance(variant, ProteinModification) for variant in v.variants): for variant in v.variants: if not isinstance(variant, ProteinModification): continue if variant[IDENTIFIER][NAME] == "Ub": spia_matrices['inhibition_ubiquination'][u_name][v_name] = 1 elif variant[IDENTIFIER][NAME] == "Ph": spia_matrices["inhibition_phosphorylation"][u_name][v_name] = 1 elif isinstance(v, (Gene, Rna)): # Normal decrease, check which matrix spia_matrices["repression"][u_name][v_name] = 1 else: spia_matrices["inhibition"][u_name][v_name] = 1 elif relation == ASSOCIATION: spia_matrices["binding_association"][u_name][v_name] = 1
python
{ "resource": "" }
q262159
spia_matrices_to_excel
validation
def spia_matrices_to_excel(spia_matrices: Mapping[str, pd.DataFrame], path: str) -> None: """Export a SPIA data dictionary into an Excel sheet at the given path. .. note:: # The R import should add the values: # ["nodes"] from the columns # ["title"] from the name of the file # ["NumberOfReactions"] set to "0" """ writer = pd.ExcelWriter(path, engine='xlsxwriter') for relation, df in spia_matrices.items(): df.to_excel(writer, sheet_name=relation, index=False) # Save excel writer.save()
python
{ "resource": "" }
q262160
spia_matrices_to_tsvs
validation
def spia_matrices_to_tsvs(spia_matrices: Mapping[str, pd.DataFrame], directory: str) -> None: """Export a SPIA data dictionary into a directory as several TSV documents.""" os.makedirs(directory, exist_ok=True) for relation, df in spia_matrices.items(): df.to_csv(os.path.join(directory, f'{relation}.tsv'), index=True)
python
{ "resource": "" }
q262161
main
validation
def main(graph: BELGraph, xlsx: str, tsvs: str): """Export the graph to a SPIA Excel sheet.""" if not xlsx and not tsvs: click.secho('Specify at least one option --xlsx or --tsvs', fg='red') sys.exit(1) spia_matrices = bel_to_spia_matrices(graph) if xlsx: spia_matrices_to_excel(spia_matrices, xlsx) if tsvs: spia_matrices_to_tsvs(spia_matrices, tsvs)
python
{ "resource": "" }
q262162
overlay_data
validation
def overlay_data(graph: BELGraph, data: Mapping[BaseEntity, Any], label: Optional[str] = None, overwrite: bool = False, ) -> None: """Overlays tabular data on the network :param graph: A BEL Graph :param data: A dictionary of {tuple node: data for that node} :param label: The annotation label to put in the node dictionary :param overwrite: Should old annotations be overwritten? """ if label is None: label = WEIGHT for node, value in data.items(): if node not in graph: log.debug('%s not in graph', node) continue if label in graph.nodes[node] and not overwrite: log.debug('%s already on %s', label, node) continue graph.nodes[node][label] = value
python
{ "resource": "" }
q262163
overlay_type_data
validation
def overlay_type_data(graph: BELGraph, data: Mapping[str, float], func: str, namespace: str, label: Optional[str] = None, overwrite: bool = False, impute: Optional[float] = None, ) -> None: """Overlay tabular data on the network for data that comes from an data set with identifiers that lack namespaces. For example, if you want to overlay differential gene expression data from a table, that table probably has HGNC identifiers, but no specific annotations that they are in the HGNC namespace or that the entities to which they refer are RNA. :param graph: A BEL Graph :param dict data: A dictionary of {name: data} :param func: The function of the keys in the data dictionary :param namespace: The namespace of the keys in the data dictionary :param label: The annotation label to put in the node dictionary :param overwrite: Should old annotations be overwritten? :param impute: The value to use for missing data """ new_data = { node: data.get(node[NAME], impute) for node in filter_nodes(graph, function_namespace_inclusion_builder(func, namespace)) } overlay_data(graph, new_data, label=label, overwrite=overwrite)
python
{ "resource": "" }
q262164
load_differential_gene_expression
validation
def load_differential_gene_expression(path: str, gene_symbol_column: str = 'Gene.symbol', logfc_column: str = 'logFC', aggregator: Optional[Callable[[List[float]], float]] = None, ) -> Mapping[str, float]: """Load and pre-process a differential gene expression data. :param path: The path to the CSV :param gene_symbol_column: The header of the gene symbol column in the data frame :param logfc_column: The header of the log-fold-change column in the data frame :param aggregator: A function that aggregates a list of differential gene expression values. Defaults to :func:`numpy.median`. Could also use: :func:`numpy.mean`, :func:`numpy.average`, :func:`numpy.min`, or :func:`numpy.max` :return: A dictionary of {gene symbol: log fold change} """ if aggregator is None: aggregator = np.median # Load the data frame df = pd.read_csv(path) # Check the columns exist in the data frame assert gene_symbol_column in df.columns assert logfc_column in df.columns # throw away columns that don't have gene symbols - these represent control sequences df = df.loc[df[gene_symbol_column].notnull(), [gene_symbol_column, logfc_column]] values = defaultdict(list) for _, gene_symbol, log_fold_change in df.itertuples(): values[gene_symbol].append(log_fold_change) return { gene_symbol: aggregator(log_fold_changes) for gene_symbol, log_fold_changes in values.items() }
python
{ "resource": "" }
q262165
get_merged_namespace_names
validation
def get_merged_namespace_names(locations, check_keywords=True): """Loads many namespaces and combines their names. :param iter[str] locations: An iterable of URLs or file paths pointing to BEL namespaces. :param bool check_keywords: Should all the keywords be the same? Defaults to ``True`` :return: A dictionary of {names: labels} :rtype: dict[str, str] Example Usage >>> from pybel.resources import write_namespace >>> from pybel_tools.definition_utils import export_namespace, get_merged_namespace_names >>> graph = ... >>> original_ns_url = ... >>> export_namespace(graph, 'MBS') # Outputs in current directory to MBS.belns >>> value_dict = get_merged_namespace_names([original_ns_url, 'MBS.belns']) >>> with open('merged_namespace.belns', 'w') as f: >>> ... write_namespace('MyBrokenNamespace', 'MBS', 'Other', 'Charles Hoyt', 'PyBEL Citation', value_dict, file=f) """ resources = {location: get_bel_resource(location) for location in locations} if check_keywords: resource_keywords = set(config['Namespace']['Keyword'] for config in resources.values()) if 1 != len(resource_keywords): raise ValueError('Tried merging namespaces with different keywords: {}'.format(resource_keywords)) result = {} for resource in resources: result.update(resource['Values']) return result
python
{ "resource": "" }
q262166
merge_namespaces
validation
def merge_namespaces(input_locations, output_path, namespace_name, namespace_keyword, namespace_domain, author_name, citation_name, namespace_description=None, namespace_species=None, namespace_version=None, namespace_query_url=None, namespace_created=None, author_contact=None, author_copyright=None, citation_description=None, citation_url=None, citation_version=None, citation_date=None, case_sensitive=True, delimiter='|', cacheable=True, functions=None, value_prefix='', sort_key=None, check_keywords=True): """Merges namespaces from multiple locations to one. :param iter input_locations: An iterable of URLs or file paths pointing to BEL namespaces. :param str output_path: The path to the file to write the merged namespace :param str namespace_name: The namespace name :param str namespace_keyword: Preferred BEL Keyword, maximum length of 8 :param str namespace_domain: One of: :data:`pybel.constants.NAMESPACE_DOMAIN_BIOPROCESS`, :data:`pybel.constants.NAMESPACE_DOMAIN_CHEMICAL`, :data:`pybel.constants.NAMESPACE_DOMAIN_GENE`, or :data:`pybel.constants.NAMESPACE_DOMAIN_OTHER` :param str author_name: The namespace's authors :param str citation_name: The name of the citation :param str namespace_query_url: HTTP URL to query for details on namespace values (must be valid URL) :param str namespace_description: Namespace description :param str namespace_species: Comma-separated list of species taxonomy id's :param str namespace_version: Namespace version :param str namespace_created: Namespace public timestamp, ISO 8601 datetime :param str author_contact: Namespace author's contact info/email address :param str author_copyright: Namespace's copyright/license information :param str citation_description: Citation description :param str citation_url: URL to more citation information :param str citation_version: Citation version :param str citation_date: Citation publish timestamp, ISO 8601 Date :param bool case_sensitive: Should this config file be interpreted as case-sensitive? :param str delimiter: The delimiter between names and labels in this config file :param bool cacheable: Should this config file be cached? :param functions: The encoding for the elements in this namespace :type functions: iterable of characters :param str value_prefix: a prefix for each name :param sort_key: A function to sort the values with :func:`sorted` :param bool check_keywords: Should all the keywords be the same? Defaults to ``True`` """ results = get_merged_namespace_names(input_locations, check_keywords=check_keywords) with open(output_path, 'w') as file: write_namespace( namespace_name=namespace_name, namespace_keyword=namespace_keyword, namespace_domain=namespace_domain, author_name=author_name, citation_name=citation_name, values=results, namespace_species=namespace_species, namespace_description=namespace_description, namespace_query_url=namespace_query_url, namespace_version=namespace_version, namespace_created=namespace_created, author_contact=author_contact, author_copyright=author_copyright, citation_description=citation_description, citation_url=citation_url, citation_version=citation_version, citation_date=citation_date, case_sensitive=case_sensitive, delimiter=delimiter, cacheable=cacheable, functions=functions, value_prefix=value_prefix, sort_key=sort_key, file=file )
python
{ "resource": "" }
q262167
run_rcr
validation
def run_rcr(graph, tag='dgxp'): """Run the reverse causal reasoning algorithm on a graph. Steps: 1. Get all downstream controlled things into map (that have at least 4 downstream things) 2. calculate population of all things that are downstream controlled .. note:: Assumes all nodes have been pre-tagged with data :param pybel.BELGraph graph: :param str tag: The key for the nodes' data dictionaries that corresponds to the integer value for its differential expression. """ # Step 1: Calculate the hypothesis subnetworks (just simple star graphs) hypotheses = defaultdict(set) increases = defaultdict(set) decreases = defaultdict(set) for u, v, d in graph.edges(data=True): hypotheses[u].add(v) if d[RELATION] in CAUSAL_INCREASE_RELATIONS: increases[u].add(v) elif d[RELATION] in CAUSAL_DECREASE_RELATIONS: decreases[u].add(v) # Step 2: Calculate the matching of the data points to the causal relationships #: A dictionary from {tuple controller node: int count of correctly matching observations} correct = defaultdict(int) #: A dictionary from {tuple controller node: int count of incorrectly matching observations} contra = defaultdict(int) #: A dictionary from {tuple controller node: int count of ambiguous observations} ambiguous = defaultdict(int) #: A dictionary from {tuple controller node: int count of missing obvservations} missing = defaultdict(int) for controller, downstream_nodes in hypotheses.items(): if len(downstream_nodes) < 4: continue # need enough data to make reasonable calculations! for node in downstream_nodes: if node in increases[controller] and node in decreases[controller]: ambiguous[controller] += 1 elif node in increases[controller]: if graph.node[node][tag] == 1: correct[controller] += 1 elif graph.node[node][tag] == -1: contra[controller] += 1 elif node in decreases[controller]: if graph.node[node][tag] == 1: contra[controller] += 1 elif graph.node[node][tag] == -1: correct[controller] += 1 else: missing[controller] += 1 # Step 3: Keep only controller nodes who have 4 or more downstream nodes controllers = { controller for controller, downstream_nodes in hypotheses.items() if 4 <= len(downstream_nodes) } # Step 4: Calculate concordance scores concordance_scores = { controller: scipy.stats.beta(0.5, correct[controller], contra[controller]) for controller in controllers } # Step 5: Calculate richness scores # TODO # Calculate the population as the union of all downstream nodes for all controllers population = { node for controller in controllers for node in hypotheses[controller] } population_size = len(population) # Step 6: Export return pandas.DataFrame({ 'contra': contra, 'correct': correct, 'concordance': concordance_scores })
python
{ "resource": "" }
q262168
export_namespace
validation
def export_namespace(graph, namespace, directory=None, cacheable=False): """Exports all names and missing names from the given namespace to its own BEL Namespace files in the given directory. Could be useful during quick and dirty curation, where planned namespace building is not a priority. :param pybel.BELGraph graph: A BEL graph :param str namespace: The namespace to process :param str directory: The path to the directory where to output the namespace. Defaults to the current working directory returned by :func:`os.getcwd` :param bool cacheable: Should the namespace be cacheable? Defaults to ``False`` because, in general, this operation will probably be used for evil, and users won't want to reload their entire cache after each iteration of curation. """ directory = os.getcwd() if directory is None else directory path = os.path.join(directory, '{}.belns'.format(namespace)) with open(path, 'w') as file: log.info('Outputting to %s', path) right_names = get_names_by_namespace(graph, namespace) log.info('Graph has %d correct names in %s', len(right_names), namespace) wrong_names = get_incorrect_names_by_namespace(graph, namespace) log.info('Graph has %d incorrect names in %s', len(right_names), namespace) undefined_ns_names = get_undefined_namespace_names(graph, namespace) log.info('Graph has %d names in missing namespace %s', len(right_names), namespace) names = (right_names | wrong_names | undefined_ns_names) if 0 == len(names): log.warning('%s is empty', namespace) write_namespace( namespace_name=namespace, namespace_keyword=namespace, namespace_domain='Other', author_name=graph.authors, author_contact=graph.contact, citation_name=graph.name, values=names, cacheable=cacheable, file=file )
python
{ "resource": "" }
q262169
lint_file
validation
def lint_file(in_file, out_file=None): """Helps remove extraneous whitespace from the lines of a file :param file in_file: A readable file or file-like :param file out_file: A writable file or file-like """ for line in in_file: print(line.strip(), file=out_file)
python
{ "resource": "" }
q262170
lint_directory
validation
def lint_directory(source, target): """Adds a linted version of each document in the source directory to the target directory :param str source: Path to directory to lint :param str target: Path to directory to output """ for path in os.listdir(source): if not path.endswith('.bel'): continue log.info('linting: %s', path) with open(os.path.join(source, path)) as i, open(os.path.join(target, path), 'w') as o: lint_file(i, o)
python
{ "resource": "" }
q262171
get_entrez_gene_data
validation
def get_entrez_gene_data(entrez_ids: Iterable[Union[str, int]]): """Get gene info from Entrez.""" url = PUBMED_GENE_QUERY_URL.format(','.join(str(x).strip() for x in entrez_ids)) response = requests.get(url) tree = ElementTree.fromstring(response.content) return { element.attrib['uid']: { 'summary': _sanitize(element.find('Summary').text), 'description': element.find('Description').text } for element in tree.findall('./DocumentSummarySet/DocumentSummary') }
python
{ "resource": "" }
q262172
make_pubmed_gene_group
validation
def make_pubmed_gene_group(entrez_ids: Iterable[Union[str, int]]) -> Iterable[str]: """Builds a skeleton for gene summaries :param entrez_ids: A list of Entrez Gene identifiers to query the PubMed service :return: An iterator over statement lines for NCBI Entrez Gene summaries """ url = PUBMED_GENE_QUERY_URL.format(','.join(str(x).strip() for x in entrez_ids)) response = requests.get(url) tree = ElementTree.fromstring(response.content) for x in tree.findall('./DocumentSummarySet/DocumentSummary'): yield '\n# {}'.format(x.find('Description').text) yield 'SET Citation = {{"Other", "PubMed Gene", "{}"}}'.format(x.attrib['uid']) yield 'SET Evidence = "{}"'.format(x.find('Summary').text.strip().replace('\n', '')) yield '\nUNSET Evidence\nUNSET Citation'
python
{ "resource": "" }
q262173
write_boilerplate
validation
def write_boilerplate(name: str, version: Optional[str] = None, description: Optional[str] = None, authors: Optional[str] = None, contact: Optional[str] = None, copyright: Optional[str] = None, licenses: Optional[str] = None, disclaimer: Optional[str] = None, namespace_url: Optional[Mapping[str, str]] = None, namespace_patterns: Optional[Mapping[str, str]] = None, annotation_url: Optional[Mapping[str, str]] = None, annotation_patterns: Optional[Mapping[str, str]] = None, annotation_list: Optional[Mapping[str, Set[str]]] = None, pmids: Optional[Iterable[Union[str, int]]] = None, entrez_ids: Optional[Iterable[Union[str, int]]] = None, file: Optional[TextIO] = None, ) -> None: """Write a boilerplate BEL document, with standard document metadata, definitions. :param name: The unique name for this BEL document :param contact: The email address of the maintainer :param description: A description of the contents of this document :param authors: The authors of this document :param version: The version. Defaults to current date in format ``YYYYMMDD``. :param copyright: Copyright information about this document :param licenses: The license applied to this document :param disclaimer: The disclaimer for this document :param namespace_url: an optional dictionary of {str name: str URL} of namespaces :param namespace_patterns: An optional dictionary of {str name: str regex} namespaces :param annotation_url: An optional dictionary of {str name: str URL} of annotations :param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations :param annotation_list: An optional dictionary of {str name: set of names} of list annotations :param pmids: A list of PubMed identifiers to auto-populate with citation and abstract :param entrez_ids: A list of Entrez identifiers to autopopulate the gene summary as evidence :param file: A writable file or file-like. If None, defaults to :data:`sys.stdout` """ lines = make_knowledge_header( name=name, version=version or '1.0.0', description=description, authors=authors, contact=contact, copyright=copyright, licenses=licenses, disclaimer=disclaimer, namespace_url=namespace_url, namespace_patterns=namespace_patterns, annotation_url=annotation_url, annotation_patterns=annotation_patterns, annotation_list=annotation_list, ) for line in lines: print(line, file=file) if pmids is not None: for line in make_pubmed_abstract_group(pmids): print(line, file=file) if entrez_ids is not None: for line in make_pubmed_gene_group(entrez_ids): print(line, file=file)
python
{ "resource": "" }
q262174
get_subgraph_by_node_search
validation
def get_subgraph_by_node_search(graph: BELGraph, query: Strings) -> BELGraph: """Get a sub-graph induced over all nodes matching the query string. :param graph: A BEL Graph :param query: A query string or iterable of query strings for node names Thinly wraps :func:`search_node_names` and :func:`get_subgraph_by_induction`. """ nodes = search_node_names(graph, query) return get_subgraph_by_induction(graph, nodes)
python
{ "resource": "" }
q262175
get_largest_component
validation
def get_largest_component(graph: BELGraph) -> BELGraph: """Get the giant component of a graph.""" biggest_component_nodes = max(nx.weakly_connected_components(graph), key=len) return subgraph(graph, biggest_component_nodes)
python
{ "resource": "" }
q262176
random_by_nodes
validation
def random_by_nodes(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph: """Get a random graph by inducing over a percentage of the original nodes. :param graph: A BEL graph :param percentage: The percentage of edges to keep """ percentage = percentage or 0.9 assert 0 < percentage <= 1 nodes = graph.nodes() n = int(len(nodes) * percentage) subnodes = random.sample(nodes, n) result = graph.subgraph(subnodes) update_node_helper(graph, result) return result
python
{ "resource": "" }
q262177
random_by_edges
validation
def random_by_edges(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph: """Get a random graph by keeping a certain percentage of original edges. :param graph: A BEL graph :param percentage: What percentage of eges to take """ percentage = percentage or 0.9 assert 0 < percentage <= 1 edges = graph.edges(keys=True) n = int(graph.number_of_edges() * percentage) subedges = random.sample(edges, n) rv = graph.fresh_copy() for u, v, k in subedges: safe_add_edge(rv, u, v, k, graph[u][v][k]) update_node_helper(graph, rv) return rv
python
{ "resource": "" }
q262178
shuffle_node_data
validation
def shuffle_node_data(graph: BELGraph, key: str, percentage: Optional[float] = None) -> BELGraph: """Shuffle the node's data. Useful for permutation testing. :param graph: A BEL graph :param key: The node data dictionary key :param percentage: What percentage of possible swaps to make """ percentage = percentage or 0.3 assert 0 < percentage <= 1 n = graph.number_of_nodes() swaps = int(percentage * n * (n - 1) / 2) result: BELGraph = graph.copy() for _ in range(swaps): s, t = random.sample(result.node, 2) result.nodes[s][key], result.nodes[t][key] = result.nodes[t][key], result.nodes[s][key] return result
python
{ "resource": "" }
q262179
shuffle_relations
validation
def shuffle_relations(graph: BELGraph, percentage: Optional[str] = None) -> BELGraph: """Shuffle the relations. Useful for permutation testing. :param graph: A BEL graph :param percentage: What percentage of possible swaps to make """ percentage = percentage or 0.3 assert 0 < percentage <= 1 n = graph.number_of_edges() swaps = int(percentage * n * (n - 1) / 2) result: BELGraph = graph.copy() edges = result.edges(keys=True) for _ in range(swaps): (s1, t1, k1), (s2, t2, k2) = random.sample(edges, 2) result[s1][t1][k1], result[s2][t2][k2] = result[s2][t2][k2], result[s1][t1][k1] return result
python
{ "resource": "" }
q262180
is_edge_consistent
validation
def is_edge_consistent(graph, u, v): """Check if all edges between two nodes have the same relation. :param pybel.BELGraph graph: A BEL Graph :param tuple u: The source BEL node :param tuple v: The target BEL node :return: If all edges from the source to target node have the same relation :rtype: bool """ if not graph.has_edge(u, v): raise ValueError('{} does not contain an edge ({}, {})'.format(graph, u, v)) return 0 == len(set(d[RELATION] for d in graph.edge[u][v].values()))
python
{ "resource": "" }
q262181
rewire_targets
validation
def rewire_targets(graph, rewiring_probability): """Rewire a graph's edges' target nodes. - For BEL graphs, assumes edge consistency (all edges between two given nodes are have the same relation) - Doesn't make self-edges :param pybel.BELGraph graph: A BEL graph :param float rewiring_probability: The probability of rewiring (between 0 and 1) :return: A rewired BEL graph """ if not all_edges_consistent(graph): raise ValueError('{} is not consistent'.format(graph)) result = graph.copy() nodes = result.nodes() for u, v in result.edges(): if random.random() < rewiring_probability: continue w = random.choice(nodes) while w == u or result.has_edge(u, w): w = random.choice(nodes) result.add_edge(w, v) result.remove_edge(u, v) return result
python
{ "resource": "" }
q262182
self_edge_filter
validation
def self_edge_filter(_: BELGraph, source: BaseEntity, target: BaseEntity, __: str) -> bool: """Check if the source and target nodes are the same.""" return source == target
python
{ "resource": "" }
q262183
has_protein_modification_increases_activity
validation
def has_protein_modification_increases_activity(graph: BELGraph, source: BaseEntity, target: BaseEntity, key: str, ) -> bool: """Check if pmod of source causes activity of target.""" edge_data = graph[source][target][key] return has_protein_modification(graph, source) and part_has_modifier(edge_data, OBJECT, ACTIVITY)
python
{ "resource": "" }
q262184
has_degradation_increases_activity
validation
def has_degradation_increases_activity(data: Dict) -> bool: """Check if the degradation of source causes activity of target.""" return part_has_modifier(data, SUBJECT, DEGRADATION) and part_has_modifier(data, OBJECT, ACTIVITY)
python
{ "resource": "" }
q262185
has_translocation_increases_activity
validation
def has_translocation_increases_activity(data: Dict) -> bool: """Check if the translocation of source causes activity of target.""" return part_has_modifier(data, SUBJECT, TRANSLOCATION) and part_has_modifier(data, OBJECT, ACTIVITY)
python
{ "resource": "" }
q262186
complex_has_member
validation
def complex_has_member(graph: BELGraph, complex_node: ComplexAbundance, member_node: BaseEntity) -> bool: """Does the given complex contain the member?""" return any( # TODO can't you look in the members of the complex object (if it's enumerated) v == member_node for _, v, data in graph.out_edges(complex_node, data=True) if data[RELATION] == HAS_COMPONENT )
python
{ "resource": "" }
q262187
complex_increases_activity
validation
def complex_increases_activity(graph: BELGraph, u: BaseEntity, v: BaseEntity, key: str) -> bool: """Return if the formation of a complex with u increases the activity of v.""" return ( isinstance(u, (ComplexAbundance, NamedComplexAbundance)) and complex_has_member(graph, u, v) and part_has_modifier(graph[u][v][key], OBJECT, ACTIVITY) )
python
{ "resource": "" }
q262188
find_activations
validation
def find_activations(graph: BELGraph): """Find edges that are A - A, meaning that some conditions in the edge best describe the interaction.""" for u, v, key, data in graph.edges(keys=True, data=True): if u != v: continue bel = graph.edge_to_bel(u, v, data) line = data.get(LINE) if line is None: continue # this was inferred, so need to investigate another way elif has_protein_modification_increases_activity(graph, u, v, key): print(line, '- pmod changes -', bel) find_related(graph, v, data) elif has_degradation_increases_activity(data): print(line, '- degradation changes -', bel) find_related(graph, v, data) elif has_translocation_increases_activity(data): print(line, '- translocation changes -', bel) find_related(graph, v, data) elif complex_increases_activity(graph, u, v, key): print(line, '- complex changes - ', bel) find_related(graph, v, data) elif has_same_subject_object(graph, u, v, key): print(line, '- same sub/obj -', bel) else: print(line, '- *** - ', bel)
python
{ "resource": "" }
q262189
summarize_edge_filter
validation
def summarize_edge_filter(graph: BELGraph, edge_predicates: EdgePredicates) -> None: """Print a summary of the number of edges passing a given set of filters.""" passed = count_passed_edge_filter(graph, edge_predicates) print('{}/{} edges passed {}'.format( passed, graph.number_of_edges(), ( ', '.join(edge_filter.__name__ for edge_filter in edge_predicates) if isinstance(edge_predicates, Iterable) else edge_predicates.__name__ ) ))
python
{ "resource": "" }
q262190
build_edge_data_filter
validation
def build_edge_data_filter(annotations: Mapping, partial_match: bool = True) -> EdgePredicate: # noqa: D202 """Build a filter that keeps edges whose data dictionaries are super-dictionaries to the given dictionary. :param annotations: The annotation query dict to match :param partial_match: Should the query values be used as partial or exact matches? Defaults to :code:`True`. """ @edge_predicate def annotation_dict_filter(data: EdgeData) -> bool: """A filter that matches edges with the given dictionary as a sub-dictionary.""" return subdict_matches(data, annotations, partial_match=partial_match) return annotation_dict_filter
python
{ "resource": "" }
q262191
build_pmid_exclusion_filter
validation
def build_pmid_exclusion_filter(pmids: Strings) -> EdgePredicate: """Fail for edges with citations whose references are one of the given PubMed identifiers. :param pmids: A PubMed identifier or list of PubMed identifiers to filter against """ if isinstance(pmids, str): @edge_predicate def pmid_exclusion_filter(data: EdgeData) -> bool: """Fail for edges with PubMed citations matching the contained PubMed identifier. :return: If the edge has a PubMed citation with the contained PubMed identifier """ return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] != pmids elif isinstance(pmids, Iterable): pmids = set(pmids) @edge_predicate def pmid_exclusion_filter(data: EdgeData) -> bool: """Pass for edges with PubMed citations matching one of the contained PubMed identifiers. :return: If the edge has a PubMed citation with one of the contained PubMed identifiers """ return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] not in pmids else: raise TypeError return pmid_exclusion_filter
python
{ "resource": "" }
q262192
node_has_namespace
validation
def node_has_namespace(node: BaseEntity, namespace: str) -> bool: """Pass for nodes that have the given namespace.""" ns = node.get(NAMESPACE) return ns is not None and ns == namespace
python
{ "resource": "" }
q262193
node_has_namespaces
validation
def node_has_namespaces(node: BaseEntity, namespaces: Set[str]) -> bool: """Pass for nodes that have one of the given namespaces.""" ns = node.get(NAMESPACE) return ns is not None and ns in namespaces
python
{ "resource": "" }
q262194
get_cutoff
validation
def get_cutoff(value: float, cutoff: Optional[float] = None) -> int: """Assign if a value is greater than or less than a cutoff.""" cutoff = cutoff if cutoff is not None else 0 if value > cutoff: return 1 if value < (-1 * cutoff): return - 1 return 0
python
{ "resource": "" }
q262195
calculate_concordance_helper
validation
def calculate_concordance_helper(graph: BELGraph, key: str, cutoff: Optional[float] = None, ) -> Tuple[int, int, int, int]: """Help calculate network-wide concordance Assumes data already annotated with given key :param graph: A BEL graph :param key: The node data dictionary key storing the logFC :param cutoff: The optional logFC cutoff for significance """ scores = defaultdict(int) for u, v, k, d in graph.edges(keys=True, data=True): c = edge_concords(graph, u, v, k, d, key, cutoff=cutoff) scores[c] += 1 return ( scores[Concordance.correct], scores[Concordance.incorrect], scores[Concordance.ambiguous], scores[Concordance.unassigned], )
python
{ "resource": "" }
q262196
calculate_concordance
validation
def calculate_concordance(graph: BELGraph, key: str, cutoff: Optional[float] = None, use_ambiguous: bool = False) -> float: """Calculates network-wide concordance. Assumes data already annotated with given key :param graph: A BEL graph :param key: The node data dictionary key storing the logFC :param cutoff: The optional logFC cutoff for significance :param use_ambiguous: Compare to ambiguous edges as well """ correct, incorrect, ambiguous, _ = calculate_concordance_helper(graph, key, cutoff=cutoff) try: return correct / (correct + incorrect + (ambiguous if use_ambiguous else 0)) except ZeroDivisionError: return -1.0
python
{ "resource": "" }
q262197
one_sided
validation
def one_sided(value: float, distribution: List[float]) -> float: """Calculate the one-sided probability of getting a value more extreme than the distribution.""" assert distribution return sum(value < element for element in distribution) / len(distribution)
python
{ "resource": "" }
q262198
calculate_concordance_probability
validation
def calculate_concordance_probability(graph: BELGraph, key: str, cutoff: Optional[float] = None, permutations: Optional[int] = None, percentage: Optional[float] = None, use_ambiguous: bool = False, permute_type: str = 'shuffle_node_data', ) -> Tuple[float, List[float], float]: """Calculates a graph's concordance as well as its statistical probability. :param graph: A BEL graph :param str key: The node data dictionary key storing the logFC :param float cutoff: The optional logFC cutoff for significance :param int permutations: The number of random permutations to test. Defaults to 500 :param float percentage: The percentage of the graph's edges to maintain. Defaults to 0.9 :param bool use_ambiguous: Compare to ambiguous edges as well :returns: A triple of the concordance score, the null distribution, and the p-value. """ if permute_type == 'random_by_edges': permute_func = partial(random_by_edges, percentage=percentage) elif permute_type == 'shuffle_node_data': permute_func = partial(shuffle_node_data, key=key, percentage=percentage) elif permute_type == 'shuffle_relations': permute_func = partial(shuffle_relations, percentage=percentage) else: raise ValueError('Invalid permute_type: {}'.format(permute_type)) graph: BELGraph = graph.copy() collapse_to_genes(graph) collapse_all_variants(graph) score = calculate_concordance(graph, key, cutoff=cutoff) distribution = [] for _ in range(permutations or 500): permuted_graph = permute_func(graph) permuted_graph_scores = calculate_concordance(permuted_graph, key, cutoff=cutoff, use_ambiguous=use_ambiguous) distribution.append(permuted_graph_scores) return score, distribution, one_sided(score, distribution)
python
{ "resource": "" }
q262199
calculate_concordance_by_annotation
validation
def calculate_concordance_by_annotation(graph, annotation, key, cutoff=None): """Returns the concordance scores for each stratified graph based on the given annotation :param pybel.BELGraph graph: A BEL graph :param str annotation: The annotation to group by. :param str key: The node data dictionary key storing the logFC :param float cutoff: The optional logFC cutoff for significance :rtype: dict[str,tuple] """ return { value: calculate_concordance(subgraph, key, cutoff=cutoff) for value, subgraph in get_subgraphs_by_annotation(graph, annotation).items() }
python
{ "resource": "" }