id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
19,400
sorgerlab/indra
indra/statements/statements.py
get_type_hierarchy
def get_type_hierarchy(s): """Get the sequence of parents from `s` to Statement. Parameters ---------- s : a class or instance of a child of Statement For example the statement `Phosphorylation(MEK(), ERK())` or just the class `Phosphorylation`. Returns ------- parent_list : list[types] A list of the types leading up to Statement. Examples -------- >> s = Phosphorylation(MAPK1(), Elk1()) >> get_type_hierarchy(s) [Phosphorylation, AddModification, Modification, Statement] >> get_type_hierarchy(AddModification) [AddModification, Modification, Statement] """ tp = type(s) if not isinstance(s, type) else s p_list = [tp] for p in tp.__bases__: if p is not Statement: p_list.extend(get_type_hierarchy(p)) else: p_list.append(p) return p_list
python
def get_type_hierarchy(s): tp = type(s) if not isinstance(s, type) else s p_list = [tp] for p in tp.__bases__: if p is not Statement: p_list.extend(get_type_hierarchy(p)) else: p_list.append(p) return p_list
[ "def", "get_type_hierarchy", "(", "s", ")", ":", "tp", "=", "type", "(", "s", ")", "if", "not", "isinstance", "(", "s", ",", "type", ")", "else", "s", "p_list", "=", "[", "tp", "]", "for", "p", "in", "tp", ".", "__bases__", ":", "if", "p", "is"...
Get the sequence of parents from `s` to Statement. Parameters ---------- s : a class or instance of a child of Statement For example the statement `Phosphorylation(MEK(), ERK())` or just the class `Phosphorylation`. Returns ------- parent_list : list[types] A list of the types leading up to Statement. Examples -------- >> s = Phosphorylation(MAPK1(), Elk1()) >> get_type_hierarchy(s) [Phosphorylation, AddModification, Modification, Statement] >> get_type_hierarchy(AddModification) [AddModification, Modification, Statement]
[ "Get", "the", "sequence", "of", "parents", "from", "s", "to", "Statement", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L2465-L2494
19,401
sorgerlab/indra
indra/statements/statements.py
get_statement_by_name
def get_statement_by_name(stmt_name): """Get a statement class given the name of the statement class.""" stmt_classes = get_all_descendants(Statement) for stmt_class in stmt_classes: if stmt_class.__name__.lower() == stmt_name.lower(): return stmt_class raise NotAStatementName('\"%s\" is not recognized as a statement type!' % stmt_name)
python
def get_statement_by_name(stmt_name): stmt_classes = get_all_descendants(Statement) for stmt_class in stmt_classes: if stmt_class.__name__.lower() == stmt_name.lower(): return stmt_class raise NotAStatementName('\"%s\" is not recognized as a statement type!' % stmt_name)
[ "def", "get_statement_by_name", "(", "stmt_name", ")", ":", "stmt_classes", "=", "get_all_descendants", "(", "Statement", ")", "for", "stmt_class", "in", "stmt_classes", ":", "if", "stmt_class", ".", "__name__", ".", "lower", "(", ")", "==", "stmt_name", ".", ...
Get a statement class given the name of the statement class.
[ "Get", "a", "statement", "class", "given", "the", "name", "of", "the", "statement", "class", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L2501-L2508
19,402
sorgerlab/indra
indra/statements/statements.py
get_unresolved_support_uuids
def get_unresolved_support_uuids(stmts): """Get uuids unresolved in support from stmts from stmts_from_json.""" return {s.uuid for stmt in stmts for s in stmt.supports + stmt.supported_by if isinstance(s, Unresolved)}
python
def get_unresolved_support_uuids(stmts): return {s.uuid for stmt in stmts for s in stmt.supports + stmt.supported_by if isinstance(s, Unresolved)}
[ "def", "get_unresolved_support_uuids", "(", "stmts", ")", ":", "return", "{", "s", ".", "uuid", "for", "stmt", "in", "stmts", "for", "s", "in", "stmt", ".", "supports", "+", "stmt", ".", "supported_by", "if", "isinstance", "(", "s", ",", "Unresolved", ")...
Get uuids unresolved in support from stmts from stmts_from_json.
[ "Get", "uuids", "unresolved", "in", "support", "from", "stmts", "from", "stmts_from_json", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L2516-L2519
19,403
sorgerlab/indra
indra/statements/statements.py
stmt_type
def stmt_type(obj, mk=True): """Return standardized, backwards compatible object type String. This is a temporary solution to make sure type comparisons and matches keys of Statements and related classes are backwards compatible. """ if isinstance(obj, Statement) and mk: return type(obj) else: return type(obj).__name__
python
def stmt_type(obj, mk=True): if isinstance(obj, Statement) and mk: return type(obj) else: return type(obj).__name__
[ "def", "stmt_type", "(", "obj", ",", "mk", "=", "True", ")", ":", "if", "isinstance", "(", "obj", ",", "Statement", ")", "and", "mk", ":", "return", "type", "(", "obj", ")", "else", ":", "return", "type", "(", "obj", ")", ".", "__name__" ]
Return standardized, backwards compatible object type String. This is a temporary solution to make sure type comparisons and matches keys of Statements and related classes are backwards compatible.
[ "Return", "standardized", "backwards", "compatible", "object", "type", "String", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L2522-L2532
19,404
sorgerlab/indra
indra/statements/statements.py
Statement.get_hash
def get_hash(self, shallow=True, refresh=False): """Get a hash for this Statement. There are two types of hash, "shallow" and "full". A shallow hash is as unique as the information carried by the statement, i.e. it is a hash of the `matches_key`. This means that differences in source, evidence, and so on are not included. As such, it is a shorter hash (14 nibbles). The odds of a collision among all the statements we expect to encounter (well under 10^8) is ~10^-9 (1 in a billion). Checks for collisions can be done by using the matches keys. A full hash includes, in addition to the matches key, information from the evidence of the statement. These hashes will be equal if the two Statements came from the same sentences, extracted by the same reader, from the same source. These hashes are correspondingly longer (16 nibbles). The odds of a collision for an expected less than 10^10 extractions is ~10^-9 (1 in a billion). Note that a hash of the Python object will also include the `uuid`, so it will always be unique for every object. Parameters ---------- shallow : bool Choose between the shallow and full hashes described above. Default is true (e.g. a shallow hash). refresh : bool Used to get a new copy of the hash. Default is false, so the hash, if it has been already created, will be read from the attribute. This is primarily used for speed testing. Returns ------- hash : int A long integer hash. """ if shallow: if not hasattr(self, '_shallow_hash') or self._shallow_hash is None\ or refresh: self._shallow_hash = make_hash(self.matches_key(), 14) ret = self._shallow_hash else: if not hasattr(self, '_full_hash') or self._full_hash is None \ or refresh: ev_mk_list = sorted([ev.matches_key() for ev in self.evidence]) self._full_hash = \ make_hash(self.matches_key() + str(ev_mk_list), 16) ret = self._full_hash return ret
python
def get_hash(self, shallow=True, refresh=False): if shallow: if not hasattr(self, '_shallow_hash') or self._shallow_hash is None\ or refresh: self._shallow_hash = make_hash(self.matches_key(), 14) ret = self._shallow_hash else: if not hasattr(self, '_full_hash') or self._full_hash is None \ or refresh: ev_mk_list = sorted([ev.matches_key() for ev in self.evidence]) self._full_hash = \ make_hash(self.matches_key() + str(ev_mk_list), 16) ret = self._full_hash return ret
[ "def", "get_hash", "(", "self", ",", "shallow", "=", "True", ",", "refresh", "=", "False", ")", ":", "if", "shallow", ":", "if", "not", "hasattr", "(", "self", ",", "'_shallow_hash'", ")", "or", "self", ".", "_shallow_hash", "is", "None", "or", "refres...
Get a hash for this Statement. There are two types of hash, "shallow" and "full". A shallow hash is as unique as the information carried by the statement, i.e. it is a hash of the `matches_key`. This means that differences in source, evidence, and so on are not included. As such, it is a shorter hash (14 nibbles). The odds of a collision among all the statements we expect to encounter (well under 10^8) is ~10^-9 (1 in a billion). Checks for collisions can be done by using the matches keys. A full hash includes, in addition to the matches key, information from the evidence of the statement. These hashes will be equal if the two Statements came from the same sentences, extracted by the same reader, from the same source. These hashes are correspondingly longer (16 nibbles). The odds of a collision for an expected less than 10^10 extractions is ~10^-9 (1 in a billion). Note that a hash of the Python object will also include the `uuid`, so it will always be unique for every object. Parameters ---------- shallow : bool Choose between the shallow and full hashes described above. Default is true (e.g. a shallow hash). refresh : bool Used to get a new copy of the hash. Default is false, so the hash, if it has been already created, will be read from the attribute. This is primarily used for speed testing. Returns ------- hash : int A long integer hash.
[ "Get", "a", "hash", "for", "this", "Statement", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L269-L317
19,405
sorgerlab/indra
indra/statements/statements.py
Statement._tag_evidence
def _tag_evidence(self): """Set all the Evidence stmt_tag to my deep matches-key hash.""" h = self.get_hash(shallow=False) for ev in self.evidence: ev.stmt_tag = h return
python
def _tag_evidence(self): h = self.get_hash(shallow=False) for ev in self.evidence: ev.stmt_tag = h return
[ "def", "_tag_evidence", "(", "self", ")", ":", "h", "=", "self", ".", "get_hash", "(", "shallow", "=", "False", ")", "for", "ev", "in", "self", ".", "evidence", ":", "ev", ".", "stmt_tag", "=", "h", "return" ]
Set all the Evidence stmt_tag to my deep matches-key hash.
[ "Set", "all", "the", "Evidence", "stmt_tag", "to", "my", "deep", "matches", "-", "key", "hash", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L319-L324
19,406
sorgerlab/indra
indra/statements/statements.py
Statement.agent_list
def agent_list(self, deep_sorted=False): """Get the canonicallized agent list.""" ag_list = [] for ag_name in self._agent_order: ag_attr = getattr(self, ag_name) if isinstance(ag_attr, Concept) or ag_attr is None: ag_list.append(ag_attr) elif isinstance(ag_attr, list): if not all([isinstance(ag, Concept) for ag in ag_attr]): raise TypeError("Expected all elements of list to be Agent " "and/or Concept, but got: %s" % {type(ag) for ag in ag_attr}) if deep_sorted: ag_attr = sorted_agents(ag_attr) ag_list.extend(ag_attr) else: raise TypeError("Expected type Agent, Concept, or list, got " "type %s." % type(ag_attr)) return ag_list
python
def agent_list(self, deep_sorted=False): ag_list = [] for ag_name in self._agent_order: ag_attr = getattr(self, ag_name) if isinstance(ag_attr, Concept) or ag_attr is None: ag_list.append(ag_attr) elif isinstance(ag_attr, list): if not all([isinstance(ag, Concept) for ag in ag_attr]): raise TypeError("Expected all elements of list to be Agent " "and/or Concept, but got: %s" % {type(ag) for ag in ag_attr}) if deep_sorted: ag_attr = sorted_agents(ag_attr) ag_list.extend(ag_attr) else: raise TypeError("Expected type Agent, Concept, or list, got " "type %s." % type(ag_attr)) return ag_list
[ "def", "agent_list", "(", "self", ",", "deep_sorted", "=", "False", ")", ":", "ag_list", "=", "[", "]", "for", "ag_name", "in", "self", ".", "_agent_order", ":", "ag_attr", "=", "getattr", "(", "self", ",", "ag_name", ")", "if", "isinstance", "(", "ag_...
Get the canonicallized agent list.
[ "Get", "the", "canonicallized", "agent", "list", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L336-L354
19,407
sorgerlab/indra
indra/statements/statements.py
Statement.to_json
def to_json(self, use_sbo=False): """Return serialized Statement as a JSON dict. Parameters ---------- use_sbo : Optional[bool] If True, SBO annotations are added to each applicable element of the JSON. Default: False Returns ------- json_dict : dict The JSON-serialized INDRA Statement. """ stmt_type = type(self).__name__ # Original comment: For backwards compatibility, could be removed later all_stmts = [self] + self.supports + self.supported_by for st in all_stmts: if not hasattr(st, 'uuid'): st.uuid = '%s' % uuid.uuid4() ################## json_dict = _o(type=stmt_type) json_dict['belief'] = self.belief if self.evidence: evidence = [ev.to_json() for ev in self.evidence] json_dict['evidence'] = evidence json_dict['id'] = '%s' % self.uuid if self.supports: json_dict['supports'] = \ ['%s' % st.uuid for st in self.supports] if self.supported_by: json_dict['supported_by'] = \ ['%s' % st.uuid for st in self.supported_by] def get_sbo_term(cls): sbo_term = stmt_sbo_map.get(cls.__name__.lower()) while not sbo_term: cls = cls.__bases__[0] sbo_term = stmt_sbo_map.get(cls.__name__.lower()) return sbo_term if use_sbo: sbo_term = get_sbo_term(self.__class__) json_dict['sbo'] = \ 'http://identifiers.org/sbo/SBO:%s' % sbo_term return json_dict
python
def to_json(self, use_sbo=False): stmt_type = type(self).__name__ # Original comment: For backwards compatibility, could be removed later all_stmts = [self] + self.supports + self.supported_by for st in all_stmts: if not hasattr(st, 'uuid'): st.uuid = '%s' % uuid.uuid4() ################## json_dict = _o(type=stmt_type) json_dict['belief'] = self.belief if self.evidence: evidence = [ev.to_json() for ev in self.evidence] json_dict['evidence'] = evidence json_dict['id'] = '%s' % self.uuid if self.supports: json_dict['supports'] = \ ['%s' % st.uuid for st in self.supports] if self.supported_by: json_dict['supported_by'] = \ ['%s' % st.uuid for st in self.supported_by] def get_sbo_term(cls): sbo_term = stmt_sbo_map.get(cls.__name__.lower()) while not sbo_term: cls = cls.__bases__[0] sbo_term = stmt_sbo_map.get(cls.__name__.lower()) return sbo_term if use_sbo: sbo_term = get_sbo_term(self.__class__) json_dict['sbo'] = \ 'http://identifiers.org/sbo/SBO:%s' % sbo_term return json_dict
[ "def", "to_json", "(", "self", ",", "use_sbo", "=", "False", ")", ":", "stmt_type", "=", "type", "(", "self", ")", ".", "__name__", "# Original comment: For backwards compatibility, could be removed later", "all_stmts", "=", "[", "self", "]", "+", "self", ".", "...
Return serialized Statement as a JSON dict. Parameters ---------- use_sbo : Optional[bool] If True, SBO annotations are added to each applicable element of the JSON. Default: False Returns ------- json_dict : dict The JSON-serialized INDRA Statement.
[ "Return", "serialized", "Statement", "as", "a", "JSON", "dict", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L421-L466
19,408
sorgerlab/indra
indra/statements/statements.py
Statement.to_graph
def to_graph(self): """Return Statement as a networkx graph.""" def json_node(graph, element, prefix): if not element: return None node_id = '|'.join(prefix) if isinstance(element, list): graph.add_node(node_id, label='') # Enumerate children and add nodes and connect to anchor node for i, sub_element in enumerate(element): sub_id = json_node(graph, sub_element, prefix + ['%s' % i]) if sub_id: graph.add_edge(node_id, sub_id, label='') elif isinstance(element, dict): graph.add_node(node_id, label='') # Add node recursively for each element # Connect to this node with edge label according to key for k, v in element.items(): if k == 'id': continue elif k == 'name': graph.node[node_id]['label'] = v continue elif k == 'type': graph.node[node_id]['label'] = v continue sub_id = json_node(graph, v, prefix + ['%s' % k]) if sub_id: graph.add_edge(node_id, sub_id, label=('%s' % k)) else: if isinstance(element, basestring) and \ element.startswith('http'): element = element.split('/')[-1] graph.add_node(node_id, label=('%s' % str(element))) return node_id jd = self.to_json() graph = networkx.DiGraph() json_node(graph, jd, ['%s' % self.uuid]) return graph
python
def to_graph(self): def json_node(graph, element, prefix): if not element: return None node_id = '|'.join(prefix) if isinstance(element, list): graph.add_node(node_id, label='') # Enumerate children and add nodes and connect to anchor node for i, sub_element in enumerate(element): sub_id = json_node(graph, sub_element, prefix + ['%s' % i]) if sub_id: graph.add_edge(node_id, sub_id, label='') elif isinstance(element, dict): graph.add_node(node_id, label='') # Add node recursively for each element # Connect to this node with edge label according to key for k, v in element.items(): if k == 'id': continue elif k == 'name': graph.node[node_id]['label'] = v continue elif k == 'type': graph.node[node_id]['label'] = v continue sub_id = json_node(graph, v, prefix + ['%s' % k]) if sub_id: graph.add_edge(node_id, sub_id, label=('%s' % k)) else: if isinstance(element, basestring) and \ element.startswith('http'): element = element.split('/')[-1] graph.add_node(node_id, label=('%s' % str(element))) return node_id jd = self.to_json() graph = networkx.DiGraph() json_node(graph, jd, ['%s' % self.uuid]) return graph
[ "def", "to_graph", "(", "self", ")", ":", "def", "json_node", "(", "graph", ",", "element", ",", "prefix", ")", ":", "if", "not", "element", ":", "return", "None", "node_id", "=", "'|'", ".", "join", "(", "prefix", ")", "if", "isinstance", "(", "elem...
Return Statement as a networkx graph.
[ "Return", "Statement", "as", "a", "networkx", "graph", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L484-L523
19,409
sorgerlab/indra
indra/statements/statements.py
Statement.make_generic_copy
def make_generic_copy(self, deeply=False): """Make a new matching Statement with no provenance. All agents and other attributes besides evidence, belief, supports, and supported_by will be copied over, and a new uuid will be assigned. Thus, the new Statement will satisfy `new_stmt.matches(old_stmt)`. If `deeply` is set to True, all the attributes will be deep-copied, which is comparatively slow. Otherwise, attributes of this statement may be altered by changes to the new matching statement. """ if deeply: kwargs = deepcopy(self.__dict__) else: kwargs = self.__dict__.copy() for attr in ['evidence', 'belief', 'uuid', 'supports', 'supported_by', 'is_activation']: kwargs.pop(attr, None) for attr in ['_full_hash', '_shallow_hash']: my_hash = kwargs.pop(attr, None) my_shallow_hash = kwargs.pop(attr, None) for attr in self._agent_order: attr_value = kwargs.get(attr) if isinstance(attr_value, list): kwargs[attr] = sorted_agents(attr_value) new_instance = self.__class__(**kwargs) new_instance._full_hash = my_hash new_instance._shallow_hash = my_shallow_hash return new_instance
python
def make_generic_copy(self, deeply=False): if deeply: kwargs = deepcopy(self.__dict__) else: kwargs = self.__dict__.copy() for attr in ['evidence', 'belief', 'uuid', 'supports', 'supported_by', 'is_activation']: kwargs.pop(attr, None) for attr in ['_full_hash', '_shallow_hash']: my_hash = kwargs.pop(attr, None) my_shallow_hash = kwargs.pop(attr, None) for attr in self._agent_order: attr_value = kwargs.get(attr) if isinstance(attr_value, list): kwargs[attr] = sorted_agents(attr_value) new_instance = self.__class__(**kwargs) new_instance._full_hash = my_hash new_instance._shallow_hash = my_shallow_hash return new_instance
[ "def", "make_generic_copy", "(", "self", ",", "deeply", "=", "False", ")", ":", "if", "deeply", ":", "kwargs", "=", "deepcopy", "(", "self", ".", "__dict__", ")", "else", ":", "kwargs", "=", "self", ".", "__dict__", ".", "copy", "(", ")", "for", "att...
Make a new matching Statement with no provenance. All agents and other attributes besides evidence, belief, supports, and supported_by will be copied over, and a new uuid will be assigned. Thus, the new Statement will satisfy `new_stmt.matches(old_stmt)`. If `deeply` is set to True, all the attributes will be deep-copied, which is comparatively slow. Otherwise, attributes of this statement may be altered by changes to the new matching statement.
[ "Make", "a", "new", "matching", "Statement", "with", "no", "provenance", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L525-L553
19,410
sorgerlab/indra
indra/databases/lincs_client.py
load_lincs_csv
def load_lincs_csv(url): """Helper function to turn csv rows into dicts.""" resp = requests.get(url, params={'output_type': '.csv'}, timeout=120) resp.raise_for_status() if sys.version_info[0] < 3: csv_io = BytesIO(resp.content) else: csv_io = StringIO(resp.text) data_rows = list(read_unicode_csv_fileobj(csv_io, delimiter=',')) headers = data_rows[0] return [{header: val for header, val in zip(headers, line_elements)} for line_elements in data_rows[1:]]
python
def load_lincs_csv(url): resp = requests.get(url, params={'output_type': '.csv'}, timeout=120) resp.raise_for_status() if sys.version_info[0] < 3: csv_io = BytesIO(resp.content) else: csv_io = StringIO(resp.text) data_rows = list(read_unicode_csv_fileobj(csv_io, delimiter=',')) headers = data_rows[0] return [{header: val for header, val in zip(headers, line_elements)} for line_elements in data_rows[1:]]
[ "def", "load_lincs_csv", "(", "url", ")", ":", "resp", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "{", "'output_type'", ":", "'.csv'", "}", ",", "timeout", "=", "120", ")", "resp", ".", "raise_for_status", "(", ")", "if", "sys", "."...
Helper function to turn csv rows into dicts.
[ "Helper", "function", "to", "turn", "csv", "rows", "into", "dicts", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/lincs_client.py#L146-L157
19,411
sorgerlab/indra
indra/databases/lincs_client.py
LincsClient.get_small_molecule_name
def get_small_molecule_name(self, hms_lincs_id): """Get the name of a small molecule from the LINCS sm metadata. Parameters ---------- hms_lincs_id : str The HMS LINCS ID of the small molecule. Returns ------- str The name of the small molecule. """ entry = self._get_entry_by_id(self._sm_data, hms_lincs_id) if not entry: return None name = entry['Name'] return name
python
def get_small_molecule_name(self, hms_lincs_id): entry = self._get_entry_by_id(self._sm_data, hms_lincs_id) if not entry: return None name = entry['Name'] return name
[ "def", "get_small_molecule_name", "(", "self", ",", "hms_lincs_id", ")", ":", "entry", "=", "self", ".", "_get_entry_by_id", "(", "self", ".", "_sm_data", ",", "hms_lincs_id", ")", "if", "not", "entry", ":", "return", "None", "name", "=", "entry", "[", "'N...
Get the name of a small molecule from the LINCS sm metadata. Parameters ---------- hms_lincs_id : str The HMS LINCS ID of the small molecule. Returns ------- str The name of the small molecule.
[ "Get", "the", "name", "of", "a", "small", "molecule", "from", "the", "LINCS", "sm", "metadata", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/lincs_client.py#L35-L52
19,412
sorgerlab/indra
indra/databases/lincs_client.py
LincsClient.get_small_molecule_refs
def get_small_molecule_refs(self, hms_lincs_id): """Get the id refs of a small molecule from the LINCS sm metadata. Parameters ---------- hms_lincs_id : str The HMS LINCS ID of the small molecule. Returns ------- dict A dictionary of references. """ refs = {'HMS-LINCS': hms_lincs_id} entry = self._get_entry_by_id(self._sm_data, hms_lincs_id) # If there is no entry for this ID if not entry: return refs # If there is an entry then fill up the refs with existing values mappings = dict(chembl='ChEMBL ID', chebi='ChEBI ID', pubchem='PubChem CID', lincs='LINCS ID') for k, v in mappings.items(): if entry.get(v): refs[k.upper()] = entry.get(v) return refs
python
def get_small_molecule_refs(self, hms_lincs_id): refs = {'HMS-LINCS': hms_lincs_id} entry = self._get_entry_by_id(self._sm_data, hms_lincs_id) # If there is no entry for this ID if not entry: return refs # If there is an entry then fill up the refs with existing values mappings = dict(chembl='ChEMBL ID', chebi='ChEBI ID', pubchem='PubChem CID', lincs='LINCS ID') for k, v in mappings.items(): if entry.get(v): refs[k.upper()] = entry.get(v) return refs
[ "def", "get_small_molecule_refs", "(", "self", ",", "hms_lincs_id", ")", ":", "refs", "=", "{", "'HMS-LINCS'", ":", "hms_lincs_id", "}", "entry", "=", "self", ".", "_get_entry_by_id", "(", "self", ".", "_sm_data", ",", "hms_lincs_id", ")", "# If there is no entr...
Get the id refs of a small molecule from the LINCS sm metadata. Parameters ---------- hms_lincs_id : str The HMS LINCS ID of the small molecule. Returns ------- dict A dictionary of references.
[ "Get", "the", "id", "refs", "of", "a", "small", "molecule", "from", "the", "LINCS", "sm", "metadata", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/lincs_client.py#L54-L80
19,413
sorgerlab/indra
indra/databases/lincs_client.py
LincsClient.get_protein_refs
def get_protein_refs(self, hms_lincs_id): """Get the refs for a protein from the LINCs protein metadata. Parameters ---------- hms_lincs_id : str The HMS LINCS ID for the protein Returns ------- dict A dictionary of protein references. """ # TODO: We could get phosphorylation states from the protein data. refs = {'HMS-LINCS': hms_lincs_id} entry = self._get_entry_by_id(self._prot_data, hms_lincs_id) # If there is no entry for this ID if not entry: return refs mappings = dict(egid='Gene ID', up='UniProt ID') for k, v in mappings.items(): if entry.get(v): refs[k.upper()] = entry.get(v) return refs
python
def get_protein_refs(self, hms_lincs_id): # TODO: We could get phosphorylation states from the protein data. refs = {'HMS-LINCS': hms_lincs_id} entry = self._get_entry_by_id(self._prot_data, hms_lincs_id) # If there is no entry for this ID if not entry: return refs mappings = dict(egid='Gene ID', up='UniProt ID') for k, v in mappings.items(): if entry.get(v): refs[k.upper()] = entry.get(v) return refs
[ "def", "get_protein_refs", "(", "self", ",", "hms_lincs_id", ")", ":", "# TODO: We could get phosphorylation states from the protein data.", "refs", "=", "{", "'HMS-LINCS'", ":", "hms_lincs_id", "}", "entry", "=", "self", ".", "_get_entry_by_id", "(", "self", ".", "_p...
Get the refs for a protein from the LINCs protein metadata. Parameters ---------- hms_lincs_id : str The HMS LINCS ID for the protein Returns ------- dict A dictionary of protein references.
[ "Get", "the", "refs", "for", "a", "protein", "from", "the", "LINCs", "protein", "metadata", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/lincs_client.py#L82-L106
19,414
sorgerlab/indra
indra/tools/gene_network.py
GeneNetwork.get_bel_stmts
def get_bel_stmts(self, filter=False): """Get relevant statements from the BEL large corpus. Performs a series of neighborhood queries and then takes the union of all the statements. Because the query process can take a long time for large gene lists, the resulting list of statements are cached in a pickle file with the filename `<basename>_bel_stmts.pkl`. If the pickle file is present, it is used by default; if not present, the queries are performed and the results are cached. Parameters ---------- filter : bool If True, includes only those statements that exclusively mention genes in :py:attr:`gene_list`. Default is False. Note that the full (unfiltered) set of statements are cached. Returns ------- list of :py:class:`indra.statements.Statement` List of INDRA statements extracted from the BEL large corpus. """ if self.basename is not None: bel_stmt_path = '%s_bel_stmts.pkl' % self.basename # Check for cached BEL stmt file if self.basename is not None and os.path.isfile(bel_stmt_path): logger.info("Loading BEL statements from %s" % bel_stmt_path) with open(bel_stmt_path, 'rb') as f: bel_statements = pickle.load(f) # No cache, so perform the queries else: bel_proc = bel.process_pybel_neighborhood(self.gene_list, network_file=self.bel_corpus) bel_statements = bel_proc.statements # Save to pickle file if we're caching if self.basename is not None: with open(bel_stmt_path, 'wb') as f: pickle.dump(bel_statements, f) # Optionally filter out statements not involving only our gene set if filter: if len(self.gene_list) > 1: bel_statements = ac.filter_gene_list(bel_statements, self.gene_list, 'all') return bel_statements
python
def get_bel_stmts(self, filter=False): if self.basename is not None: bel_stmt_path = '%s_bel_stmts.pkl' % self.basename # Check for cached BEL stmt file if self.basename is not None and os.path.isfile(bel_stmt_path): logger.info("Loading BEL statements from %s" % bel_stmt_path) with open(bel_stmt_path, 'rb') as f: bel_statements = pickle.load(f) # No cache, so perform the queries else: bel_proc = bel.process_pybel_neighborhood(self.gene_list, network_file=self.bel_corpus) bel_statements = bel_proc.statements # Save to pickle file if we're caching if self.basename is not None: with open(bel_stmt_path, 'wb') as f: pickle.dump(bel_statements, f) # Optionally filter out statements not involving only our gene set if filter: if len(self.gene_list) > 1: bel_statements = ac.filter_gene_list(bel_statements, self.gene_list, 'all') return bel_statements
[ "def", "get_bel_stmts", "(", "self", ",", "filter", "=", "False", ")", ":", "if", "self", ".", "basename", "is", "not", "None", ":", "bel_stmt_path", "=", "'%s_bel_stmts.pkl'", "%", "self", ".", "basename", "# Check for cached BEL stmt file", "if", "self", "."...
Get relevant statements from the BEL large corpus. Performs a series of neighborhood queries and then takes the union of all the statements. Because the query process can take a long time for large gene lists, the resulting list of statements are cached in a pickle file with the filename `<basename>_bel_stmts.pkl`. If the pickle file is present, it is used by default; if not present, the queries are performed and the results are cached. Parameters ---------- filter : bool If True, includes only those statements that exclusively mention genes in :py:attr:`gene_list`. Default is False. Note that the full (unfiltered) set of statements are cached. Returns ------- list of :py:class:`indra.statements.Statement` List of INDRA statements extracted from the BEL large corpus.
[ "Get", "relevant", "statements", "from", "the", "BEL", "large", "corpus", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/gene_network.py#L51-L94
19,415
sorgerlab/indra
indra/tools/gene_network.py
GeneNetwork.get_biopax_stmts
def get_biopax_stmts(self, filter=False, query='pathsbetween', database_filter=None): """Get relevant statements from Pathway Commons. Performs a "paths between" query for the genes in :py:attr:`gene_list` and uses the results to build statements. This function caches two files: the list of statements built from the query, which is cached in `<basename>_biopax_stmts.pkl`, and the OWL file returned by the Pathway Commons Web API, which is cached in `<basename>_pc_pathsbetween.owl`. If these cached files are found, then the results are returned based on the cached file and Pathway Commons is not queried again. Parameters ---------- filter : Optional[bool] If True, includes only those statements that exclusively mention genes in :py:attr:`gene_list`. Default is False. query : Optional[str] Defined what type of query is executed. The two options are 'pathsbetween' which finds paths between the given list of genes and only works if more than 1 gene is given, and 'neighborhood' which searches the immediate neighborhood of each given gene. Note that for pathsbetween queries with more thatn 60 genes, the query will be executed in multiple blocks for scalability. database_filter: Optional[list[str]] A list of PathwayCommons databases to include in the query. Returns ------- list of :py:class:`indra.statements.Statement` List of INDRA statements extracted from Pathway Commons. """ # If we're using a cache, initialize the appropriate filenames if self.basename is not None: biopax_stmt_path = '%s_biopax_stmts.pkl' % self.basename biopax_ras_owl_path = '%s_pc_pathsbetween.owl' % self.basename # Check for cached Biopax stmt file at the given path # if it's there, return the statements from the cache if self.basename is not None and os.path.isfile(biopax_stmt_path): logger.info("Loading Biopax statements from %s" % biopax_stmt_path) with open(biopax_stmt_path, 'rb') as f: bp_statements = pickle.load(f) return bp_statements # Check for cached file before querying Pathway Commons Web API if self.basename is not None and os.path.isfile(biopax_ras_owl_path): logger.info("Loading Biopax from OWL file %s" % biopax_ras_owl_path) bp = biopax.process_owl(biopax_ras_owl_path) # OWL file not found; do query and save to file else: if (len(self.gene_list) < 2) and (query == 'pathsbetween'): logger.warning('Using neighborhood query for one gene.') query = 'neighborhood' if query == 'pathsbetween': if len(self.gene_list) > 60: block_size = 60 else: block_size = None bp = biopax.process_pc_pathsbetween(self.gene_list, database_filter=database_filter, block_size=block_size) elif query == 'neighborhood': bp = biopax.process_pc_neighborhood(self.gene_list, database_filter=database_filter) else: logger.error('Invalid query type: %s' % query) return [] # Save the file if we're caching if self.basename is not None: bp.save_model(biopax_ras_owl_path) # Save statements to pickle file if we're caching if self.basename is not None: with open(biopax_stmt_path, 'wb') as f: pickle.dump(bp.statements, f) # Optionally filter out statements not involving only our gene set if filter: policy = 'one' if len(self.gene_list) > 1 else 'all' stmts = ac.filter_gene_list(bp.statements, self.gene_list, policy) else: stmts = bp.statements return stmts
python
def get_biopax_stmts(self, filter=False, query='pathsbetween', database_filter=None): # If we're using a cache, initialize the appropriate filenames if self.basename is not None: biopax_stmt_path = '%s_biopax_stmts.pkl' % self.basename biopax_ras_owl_path = '%s_pc_pathsbetween.owl' % self.basename # Check for cached Biopax stmt file at the given path # if it's there, return the statements from the cache if self.basename is not None and os.path.isfile(biopax_stmt_path): logger.info("Loading Biopax statements from %s" % biopax_stmt_path) with open(biopax_stmt_path, 'rb') as f: bp_statements = pickle.load(f) return bp_statements # Check for cached file before querying Pathway Commons Web API if self.basename is not None and os.path.isfile(biopax_ras_owl_path): logger.info("Loading Biopax from OWL file %s" % biopax_ras_owl_path) bp = biopax.process_owl(biopax_ras_owl_path) # OWL file not found; do query and save to file else: if (len(self.gene_list) < 2) and (query == 'pathsbetween'): logger.warning('Using neighborhood query for one gene.') query = 'neighborhood' if query == 'pathsbetween': if len(self.gene_list) > 60: block_size = 60 else: block_size = None bp = biopax.process_pc_pathsbetween(self.gene_list, database_filter=database_filter, block_size=block_size) elif query == 'neighborhood': bp = biopax.process_pc_neighborhood(self.gene_list, database_filter=database_filter) else: logger.error('Invalid query type: %s' % query) return [] # Save the file if we're caching if self.basename is not None: bp.save_model(biopax_ras_owl_path) # Save statements to pickle file if we're caching if self.basename is not None: with open(biopax_stmt_path, 'wb') as f: pickle.dump(bp.statements, f) # Optionally filter out statements not involving only our gene set if filter: policy = 'one' if len(self.gene_list) > 1 else 'all' stmts = ac.filter_gene_list(bp.statements, self.gene_list, policy) else: stmts = bp.statements return stmts
[ "def", "get_biopax_stmts", "(", "self", ",", "filter", "=", "False", ",", "query", "=", "'pathsbetween'", ",", "database_filter", "=", "None", ")", ":", "# If we're using a cache, initialize the appropriate filenames", "if", "self", ".", "basename", "is", "not", "No...
Get relevant statements from Pathway Commons. Performs a "paths between" query for the genes in :py:attr:`gene_list` and uses the results to build statements. This function caches two files: the list of statements built from the query, which is cached in `<basename>_biopax_stmts.pkl`, and the OWL file returned by the Pathway Commons Web API, which is cached in `<basename>_pc_pathsbetween.owl`. If these cached files are found, then the results are returned based on the cached file and Pathway Commons is not queried again. Parameters ---------- filter : Optional[bool] If True, includes only those statements that exclusively mention genes in :py:attr:`gene_list`. Default is False. query : Optional[str] Defined what type of query is executed. The two options are 'pathsbetween' which finds paths between the given list of genes and only works if more than 1 gene is given, and 'neighborhood' which searches the immediate neighborhood of each given gene. Note that for pathsbetween queries with more thatn 60 genes, the query will be executed in multiple blocks for scalability. database_filter: Optional[list[str]] A list of PathwayCommons databases to include in the query. Returns ------- list of :py:class:`indra.statements.Statement` List of INDRA statements extracted from Pathway Commons.
[ "Get", "relevant", "statements", "from", "Pathway", "Commons", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/gene_network.py#L96-L175
19,416
sorgerlab/indra
indra/tools/gene_network.py
GeneNetwork.get_statements
def get_statements(self, filter=False): """Return the combined list of statements from BEL and Pathway Commons. Internally calls :py:meth:`get_biopax_stmts` and :py:meth:`get_bel_stmts`. Parameters ---------- filter : bool If True, includes only those statements that exclusively mention genes in :py:attr:`gene_list`. Default is False. Returns ------- list of :py:class:`indra.statements.Statement` List of INDRA statements extracted the BEL large corpus and Pathway Commons. """ bp_stmts = self.get_biopax_stmts(filter=filter) bel_stmts = self.get_bel_stmts(filter=filter) return bp_stmts + bel_stmts
python
def get_statements(self, filter=False): bp_stmts = self.get_biopax_stmts(filter=filter) bel_stmts = self.get_bel_stmts(filter=filter) return bp_stmts + bel_stmts
[ "def", "get_statements", "(", "self", ",", "filter", "=", "False", ")", ":", "bp_stmts", "=", "self", ".", "get_biopax_stmts", "(", "filter", "=", "filter", ")", "bel_stmts", "=", "self", ".", "get_bel_stmts", "(", "filter", "=", "filter", ")", "return", ...
Return the combined list of statements from BEL and Pathway Commons. Internally calls :py:meth:`get_biopax_stmts` and :py:meth:`get_bel_stmts`. Parameters ---------- filter : bool If True, includes only those statements that exclusively mention genes in :py:attr:`gene_list`. Default is False. Returns ------- list of :py:class:`indra.statements.Statement` List of INDRA statements extracted the BEL large corpus and Pathway Commons.
[ "Return", "the", "combined", "list", "of", "statements", "from", "BEL", "and", "Pathway", "Commons", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/gene_network.py#L177-L198
19,417
sorgerlab/indra
indra/tools/gene_network.py
GeneNetwork.run_preassembly
def run_preassembly(self, stmts, print_summary=True): """Run complete preassembly procedure on the given statements. Results are returned as a dict and stored in the attribute :py:attr:`results`. They are also saved in the pickle file `<basename>_results.pkl`. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` Statements to preassemble. print_summary : bool If True (default), prints a summary of the preassembly process to the console. Returns ------- dict A dict containing the following entries: - `raw`: the starting set of statements before preassembly. - `duplicates1`: statements after initial de-duplication. - `valid`: statements found to have valid modification sites. - `mapped`: mapped statements (list of :py:class:`indra.preassembler.sitemapper.MappedStatement`). - `mapped_stmts`: combined list of valid statements and statements after mapping. - `duplicates2`: statements resulting from de-duplication of the statements in `mapped_stmts`. - `related2`: top-level statements after combining the statements in `duplicates2`. """ # First round of preassembly: remove duplicates before sitemapping pa1 = Preassembler(hierarchies, stmts) logger.info("Combining duplicates") pa1.combine_duplicates() # Map sites logger.info("Mapping sites") (valid, mapped) = sm.map_sites(pa1.unique_stmts) # Combine valid and successfully mapped statements into single list correctly_mapped_stmts = [] for ms in mapped: if all([True if mm[1] is not None else False for mm in ms.mapped_mods]): correctly_mapped_stmts.append(ms.mapped_stmt) mapped_stmts = valid + correctly_mapped_stmts # Second round of preassembly: de-duplicate and combine related pa2 = Preassembler(hierarchies, mapped_stmts) logger.info("Combining duplicates again") pa2.combine_duplicates() pa2.combine_related() # Fill out the results dict self.results = {} self.results['raw'] = stmts self.results['duplicates1'] = pa1.unique_stmts self.results['valid'] = valid self.results['mapped'] = mapped self.results['mapped_stmts'] = mapped_stmts self.results['duplicates2'] = pa2.unique_stmts self.results['related2'] = pa2.related_stmts # Print summary if print_summary: logger.info("\nStarting number of statements: %d" % len(stmts)) logger.info("After duplicate removal: %d" % len(pa1.unique_stmts)) logger.info("Unique statements with valid sites: %d" % len(valid)) logger.info("Unique statements with invalid sites: %d" % len(mapped)) logger.info("After post-mapping duplicate removal: %d" % len(pa2.unique_stmts)) logger.info("After combining related statements: %d" % len(pa2.related_stmts)) # Save the results if we're caching if self.basename is not None: results_filename = '%s_results.pkl' % self.basename with open(results_filename, 'wb') as f: pickle.dump(self.results, f) return self.results
python
def run_preassembly(self, stmts, print_summary=True): # First round of preassembly: remove duplicates before sitemapping pa1 = Preassembler(hierarchies, stmts) logger.info("Combining duplicates") pa1.combine_duplicates() # Map sites logger.info("Mapping sites") (valid, mapped) = sm.map_sites(pa1.unique_stmts) # Combine valid and successfully mapped statements into single list correctly_mapped_stmts = [] for ms in mapped: if all([True if mm[1] is not None else False for mm in ms.mapped_mods]): correctly_mapped_stmts.append(ms.mapped_stmt) mapped_stmts = valid + correctly_mapped_stmts # Second round of preassembly: de-duplicate and combine related pa2 = Preassembler(hierarchies, mapped_stmts) logger.info("Combining duplicates again") pa2.combine_duplicates() pa2.combine_related() # Fill out the results dict self.results = {} self.results['raw'] = stmts self.results['duplicates1'] = pa1.unique_stmts self.results['valid'] = valid self.results['mapped'] = mapped self.results['mapped_stmts'] = mapped_stmts self.results['duplicates2'] = pa2.unique_stmts self.results['related2'] = pa2.related_stmts # Print summary if print_summary: logger.info("\nStarting number of statements: %d" % len(stmts)) logger.info("After duplicate removal: %d" % len(pa1.unique_stmts)) logger.info("Unique statements with valid sites: %d" % len(valid)) logger.info("Unique statements with invalid sites: %d" % len(mapped)) logger.info("After post-mapping duplicate removal: %d" % len(pa2.unique_stmts)) logger.info("After combining related statements: %d" % len(pa2.related_stmts)) # Save the results if we're caching if self.basename is not None: results_filename = '%s_results.pkl' % self.basename with open(results_filename, 'wb') as f: pickle.dump(self.results, f) return self.results
[ "def", "run_preassembly", "(", "self", ",", "stmts", ",", "print_summary", "=", "True", ")", ":", "# First round of preassembly: remove duplicates before sitemapping", "pa1", "=", "Preassembler", "(", "hierarchies", ",", "stmts", ")", "logger", ".", "info", "(", "\"...
Run complete preassembly procedure on the given statements. Results are returned as a dict and stored in the attribute :py:attr:`results`. They are also saved in the pickle file `<basename>_results.pkl`. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` Statements to preassemble. print_summary : bool If True (default), prints a summary of the preassembly process to the console. Returns ------- dict A dict containing the following entries: - `raw`: the starting set of statements before preassembly. - `duplicates1`: statements after initial de-duplication. - `valid`: statements found to have valid modification sites. - `mapped`: mapped statements (list of :py:class:`indra.preassembler.sitemapper.MappedStatement`). - `mapped_stmts`: combined list of valid statements and statements after mapping. - `duplicates2`: statements resulting from de-duplication of the statements in `mapped_stmts`. - `related2`: top-level statements after combining the statements in `duplicates2`.
[ "Run", "complete", "preassembly", "procedure", "on", "the", "given", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/gene_network.py#L200-L276
19,418
sorgerlab/indra
indra/sources/hume/processor.py
_get_grounding
def _get_grounding(entity): """Return Hume grounding.""" db_refs = {'TEXT': entity['text']} groundings = entity.get('grounding') if not groundings: return db_refs def get_ont_concept(concept): """Strip slash, replace spaces and remove example leafs.""" # In the WM context, groundings have no URL prefix and start with / # The following block does some special handling of these groundings. if concept.startswith('/'): concept = concept[1:] concept = concept.replace(' ', '_') # We eliminate any entries that aren't ontology categories # these are typically "examples" corresponding to the category while concept not in hume_onto_entries: parts = concept.split('/') if len(parts) == 1: break concept = '/'.join(parts[:-1]) # Otherwise we just return the concept as is return concept # Basic collection of grounding entries raw_grounding_entries = [(get_ont_concept(g['ontologyConcept']), g['value']) for g in groundings] # Occasionally we get duplicate grounding entries, we want to # eliminate those here grounding_dict = {} for cat, score in raw_grounding_entries: if (cat not in grounding_dict) or (score > grounding_dict[cat]): grounding_dict[cat] = score # Then we sort the list in reverse order according to score # Sometimes the exact same score appears multiple times, in this # case we prioritize by the "depth" of the grounding which is # obtained by looking at the number of /-s in the entry. # However, there are still cases where the grounding depth and the score # are the same. In these cases we just sort alphabetically. grounding_entries = sorted(list(set(grounding_dict.items())), key=lambda x: (x[1], x[0].count('/'), x[0]), reverse=True) # We could get an empty list here in which case we don't add the # grounding if grounding_entries: db_refs['HUME'] = grounding_entries return db_refs
python
def _get_grounding(entity): db_refs = {'TEXT': entity['text']} groundings = entity.get('grounding') if not groundings: return db_refs def get_ont_concept(concept): """Strip slash, replace spaces and remove example leafs.""" # In the WM context, groundings have no URL prefix and start with / # The following block does some special handling of these groundings. if concept.startswith('/'): concept = concept[1:] concept = concept.replace(' ', '_') # We eliminate any entries that aren't ontology categories # these are typically "examples" corresponding to the category while concept not in hume_onto_entries: parts = concept.split('/') if len(parts) == 1: break concept = '/'.join(parts[:-1]) # Otherwise we just return the concept as is return concept # Basic collection of grounding entries raw_grounding_entries = [(get_ont_concept(g['ontologyConcept']), g['value']) for g in groundings] # Occasionally we get duplicate grounding entries, we want to # eliminate those here grounding_dict = {} for cat, score in raw_grounding_entries: if (cat not in grounding_dict) or (score > grounding_dict[cat]): grounding_dict[cat] = score # Then we sort the list in reverse order according to score # Sometimes the exact same score appears multiple times, in this # case we prioritize by the "depth" of the grounding which is # obtained by looking at the number of /-s in the entry. # However, there are still cases where the grounding depth and the score # are the same. In these cases we just sort alphabetically. grounding_entries = sorted(list(set(grounding_dict.items())), key=lambda x: (x[1], x[0].count('/'), x[0]), reverse=True) # We could get an empty list here in which case we don't add the # grounding if grounding_entries: db_refs['HUME'] = grounding_entries return db_refs
[ "def", "_get_grounding", "(", "entity", ")", ":", "db_refs", "=", "{", "'TEXT'", ":", "entity", "[", "'text'", "]", "}", "groundings", "=", "entity", ".", "get", "(", "'grounding'", ")", "if", "not", "groundings", ":", "return", "db_refs", "def", "get_on...
Return Hume grounding.
[ "Return", "Hume", "grounding", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L230-L277
19,419
sorgerlab/indra
indra/sources/hume/processor.py
HumeJsonLdProcessor._find_relations
def _find_relations(self): """Find all relevant relation elements and return them in a list.""" # Get all extractions extractions = \ list(self.tree.execute("$.extractions[(@.@type is 'Extraction')]")) # Get relations from extractions relations = [] for e in extractions: label_set = set(e.get('labels', [])) # If this is a DirectedRelation if 'DirectedRelation' in label_set: self.relation_dict[e['@id']] = e subtype = e.get('subtype') if any(t in subtype for t in polarities.keys()): relations.append((subtype, e)) # If this is an Event or an Entity if {'Event', 'Entity'} & label_set: self.concept_dict[e['@id']] = e if not relations and not self.relation_dict: logger.info("No relations found.") else: logger.info('%d relations of types %s found' % (len(relations), ', '.join(polarities.keys()))) logger.info('%d relations in dict.' % len(self.relation_dict)) logger.info('%d concepts found.' % len(self.concept_dict)) return relations
python
def _find_relations(self): # Get all extractions extractions = \ list(self.tree.execute("$.extractions[(@.@type is 'Extraction')]")) # Get relations from extractions relations = [] for e in extractions: label_set = set(e.get('labels', [])) # If this is a DirectedRelation if 'DirectedRelation' in label_set: self.relation_dict[e['@id']] = e subtype = e.get('subtype') if any(t in subtype for t in polarities.keys()): relations.append((subtype, e)) # If this is an Event or an Entity if {'Event', 'Entity'} & label_set: self.concept_dict[e['@id']] = e if not relations and not self.relation_dict: logger.info("No relations found.") else: logger.info('%d relations of types %s found' % (len(relations), ', '.join(polarities.keys()))) logger.info('%d relations in dict.' % len(self.relation_dict)) logger.info('%d concepts found.' % len(self.concept_dict)) return relations
[ "def", "_find_relations", "(", "self", ")", ":", "# Get all extractions", "extractions", "=", "list", "(", "self", ".", "tree", ".", "execute", "(", "\"$.extractions[(@.@type is 'Extraction')]\"", ")", ")", "# Get relations from extractions", "relations", "=", "[", "]...
Find all relevant relation elements and return them in a list.
[ "Find", "all", "relevant", "relation", "elements", "and", "return", "them", "in", "a", "list", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L68-L95
19,420
sorgerlab/indra
indra/sources/hume/processor.py
HumeJsonLdProcessor._get_documents
def _get_documents(self): """Populate sentences attribute with a dict keyed by document id.""" documents = self.tree.execute("$.documents") for doc in documents: sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])} self.document_dict[doc['@id']] = {'sentences': sentences, 'location': doc['location']}
python
def _get_documents(self): documents = self.tree.execute("$.documents") for doc in documents: sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])} self.document_dict[doc['@id']] = {'sentences': sentences, 'location': doc['location']}
[ "def", "_get_documents", "(", "self", ")", ":", "documents", "=", "self", ".", "tree", ".", "execute", "(", "\"$.documents\"", ")", "for", "doc", "in", "documents", ":", "sentences", "=", "{", "s", "[", "'@id'", "]", ":", "s", "[", "'text'", "]", "fo...
Populate sentences attribute with a dict keyed by document id.
[ "Populate", "sentences", "attribute", "with", "a", "dict", "keyed", "by", "document", "id", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L97-L103
19,421
sorgerlab/indra
indra/sources/hume/processor.py
HumeJsonLdProcessor._make_context
def _make_context(self, entity): """Get place and time info from the json for this entity.""" loc_context = None time_context = None # Look for time and place contexts. for argument in entity["arguments"]: if argument["type"] == "place": entity_id = argument["value"]["@id"] loc_entity = self.concept_dict[entity_id] place = loc_entity.get("canonicalName") if not place: place = loc_entity['text'] geo_id = loc_entity.get('geoname_id') loc_context = RefContext(name=place, db_refs={"GEOID": geo_id}) if argument["type"] == "time": entity_id = argument["value"]["@id"] temporal_entity = self.concept_dict[entity_id] text = temporal_entity['mentions'][0]['text'] if len(temporal_entity.get("timeInterval", [])) < 1: time_context = TimeContext(text=text) continue time = temporal_entity["timeInterval"][0] start = datetime.strptime(time['start'], '%Y-%m-%dT%H:%M') end = datetime.strptime(time['end'], '%Y-%m-%dT%H:%M') duration = int(time['duration']) time_context = TimeContext(text=text, start=start, end=end, duration=duration) # Put context together context = None if loc_context or time_context: context = WorldContext(time=time_context, geo_location=loc_context) return context
python
def _make_context(self, entity): loc_context = None time_context = None # Look for time and place contexts. for argument in entity["arguments"]: if argument["type"] == "place": entity_id = argument["value"]["@id"] loc_entity = self.concept_dict[entity_id] place = loc_entity.get("canonicalName") if not place: place = loc_entity['text'] geo_id = loc_entity.get('geoname_id') loc_context = RefContext(name=place, db_refs={"GEOID": geo_id}) if argument["type"] == "time": entity_id = argument["value"]["@id"] temporal_entity = self.concept_dict[entity_id] text = temporal_entity['mentions'][0]['text'] if len(temporal_entity.get("timeInterval", [])) < 1: time_context = TimeContext(text=text) continue time = temporal_entity["timeInterval"][0] start = datetime.strptime(time['start'], '%Y-%m-%dT%H:%M') end = datetime.strptime(time['end'], '%Y-%m-%dT%H:%M') duration = int(time['duration']) time_context = TimeContext(text=text, start=start, end=end, duration=duration) # Put context together context = None if loc_context or time_context: context = WorldContext(time=time_context, geo_location=loc_context) return context
[ "def", "_make_context", "(", "self", ",", "entity", ")", ":", "loc_context", "=", "None", "time_context", "=", "None", "# Look for time and place contexts.", "for", "argument", "in", "entity", "[", "\"arguments\"", "]", ":", "if", "argument", "[", "\"type\"", "]...
Get place and time info from the json for this entity.
[ "Get", "place", "and", "time", "info", "from", "the", "json", "for", "this", "entity", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L105-L139
19,422
sorgerlab/indra
indra/sources/hume/processor.py
HumeJsonLdProcessor._make_concept
def _make_concept(self, entity): """Return Concept from a Hume entity.""" # Use the canonical name as the name of the Concept by default name = self._sanitize(entity['canonicalName']) # But if there is a trigger head text, we prefer that since # it almost always results in a cleaner name # This is removed for now since the head word seems to be too # minimal for some concepts, e.g. it gives us only "security" # for "food security". """ trigger = entity.get('trigger') if trigger is not None: head_text = trigger.get('head text') if head_text is not None: name = head_text """ # Save raw text and Hume scored groundings as db_refs db_refs = _get_grounding(entity) concept = Concept(name, db_refs=db_refs) metadata = {arg['type']: arg['value']['@id'] for arg in entity['arguments']} return concept, metadata
python
def _make_concept(self, entity): # Use the canonical name as the name of the Concept by default name = self._sanitize(entity['canonicalName']) # But if there is a trigger head text, we prefer that since # it almost always results in a cleaner name # This is removed for now since the head word seems to be too # minimal for some concepts, e.g. it gives us only "security" # for "food security". """ trigger = entity.get('trigger') if trigger is not None: head_text = trigger.get('head text') if head_text is not None: name = head_text """ # Save raw text and Hume scored groundings as db_refs db_refs = _get_grounding(entity) concept = Concept(name, db_refs=db_refs) metadata = {arg['type']: arg['value']['@id'] for arg in entity['arguments']} return concept, metadata
[ "def", "_make_concept", "(", "self", ",", "entity", ")", ":", "# Use the canonical name as the name of the Concept by default", "name", "=", "self", ".", "_sanitize", "(", "entity", "[", "'canonicalName'", "]", ")", "# But if there is a trigger head text, we prefer that since...
Return Concept from a Hume entity.
[ "Return", "Concept", "from", "a", "Hume", "entity", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L141-L163
19,423
sorgerlab/indra
indra/sources/hume/processor.py
HumeJsonLdProcessor._get_event_and_context
def _get_event_and_context(self, event, arg_type): """Return an INDRA Event based on an event entry.""" eid = _choose_id(event, arg_type) ev = self.concept_dict[eid] concept, metadata = self._make_concept(ev) ev_delta = {'adjectives': [], 'states': get_states(ev), 'polarity': get_polarity(ev)} context = self._make_context(ev) event_obj = Event(concept, delta=ev_delta, context=context) return event_obj
python
def _get_event_and_context(self, event, arg_type): eid = _choose_id(event, arg_type) ev = self.concept_dict[eid] concept, metadata = self._make_concept(ev) ev_delta = {'adjectives': [], 'states': get_states(ev), 'polarity': get_polarity(ev)} context = self._make_context(ev) event_obj = Event(concept, delta=ev_delta, context=context) return event_obj
[ "def", "_get_event_and_context", "(", "self", ",", "event", ",", "arg_type", ")", ":", "eid", "=", "_choose_id", "(", "event", ",", "arg_type", ")", "ev", "=", "self", ".", "concept_dict", "[", "eid", "]", "concept", ",", "metadata", "=", "self", ".", ...
Return an INDRA Event based on an event entry.
[ "Return", "an", "INDRA", "Event", "based", "on", "an", "event", "entry", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L165-L175
19,424
sorgerlab/indra
indra/sources/hume/processor.py
HumeJsonLdProcessor._get_evidence
def _get_evidence(self, event, adjectives): """Return the Evidence object for the INDRA Statement.""" provenance = event.get('provenance') # First try looking up the full sentence through provenance doc_id = provenance[0]['document']['@id'] sent_id = provenance[0]['sentence'] text = self.document_dict[doc_id]['sentences'][sent_id] text = self._sanitize(text) bounds = [provenance[0]['documentCharPositions'][k] for k in ['start', 'end']] annotations = { 'found_by': event.get('rule'), 'provenance': provenance, 'event_type': os.path.basename(event.get('type')), 'adjectives': adjectives, 'bounds': bounds } location = self.document_dict[doc_id]['location'] ev = Evidence(source_api='hume', text=text, annotations=annotations, pmid=location) return [ev]
python
def _get_evidence(self, event, adjectives): provenance = event.get('provenance') # First try looking up the full sentence through provenance doc_id = provenance[0]['document']['@id'] sent_id = provenance[0]['sentence'] text = self.document_dict[doc_id]['sentences'][sent_id] text = self._sanitize(text) bounds = [provenance[0]['documentCharPositions'][k] for k in ['start', 'end']] annotations = { 'found_by': event.get('rule'), 'provenance': provenance, 'event_type': os.path.basename(event.get('type')), 'adjectives': adjectives, 'bounds': bounds } location = self.document_dict[doc_id]['location'] ev = Evidence(source_api='hume', text=text, annotations=annotations, pmid=location) return [ev]
[ "def", "_get_evidence", "(", "self", ",", "event", ",", "adjectives", ")", ":", "provenance", "=", "event", ".", "get", "(", "'provenance'", ")", "# First try looking up the full sentence through provenance", "doc_id", "=", "provenance", "[", "0", "]", "[", "'docu...
Return the Evidence object for the INDRA Statement.
[ "Return", "the", "Evidence", "object", "for", "the", "INDRA", "Statement", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L177-L199
19,425
sorgerlab/indra
indra/sources/medscan/processor.py
_is_statement_in_list
def _is_statement_in_list(new_stmt, old_stmt_list): """Return True of given statement is equivalent to on in a list Determines whether the statement is equivalent to any statement in the given list of statements, with equivalency determined by Statement's equals method. Parameters ---------- new_stmt : indra.statements.Statement The statement to compare with old_stmt_list : list[indra.statements.Statement] The statement list whose entries we compare with statement Returns ------- in_list : bool True if statement is equivalent to any statements in the list """ for old_stmt in old_stmt_list: if old_stmt.equals(new_stmt): return True elif old_stmt.evidence_equals(new_stmt) and old_stmt.matches(new_stmt): # If we're comparing a complex, make sure the agents are sorted. if isinstance(new_stmt, Complex): agent_pairs = zip(old_stmt.sorted_members(), new_stmt.sorted_members()) else: agent_pairs = zip(old_stmt.agent_list(), new_stmt.agent_list()) # Compare agent-by-agent. for ag_old, ag_new in agent_pairs: s_old = set(ag_old.db_refs.items()) s_new = set(ag_new.db_refs.items()) # If they're equal this isn't the one we're interested in. if s_old == s_new: continue # If the new statement has nothing new to offer, just ignore it if s_old > s_new: return True # If the new statement does have something new, add it to the # existing statement. And then ignore it. if s_new > s_old: ag_old.db_refs.update(ag_new.db_refs) return True # If this is a case where different CHEBI ids were mapped to # the same entity, set the agent name to the CHEBI id. if _fix_different_refs(ag_old, ag_new, 'CHEBI'): # Check to make sure the newly described statement does # not match anything. return _is_statement_in_list(new_stmt, old_stmt_list) # If this is a case, like above, but with UMLS IDs, do the same # thing as above. This will likely never be improved. if _fix_different_refs(ag_old, ag_new, 'UMLS'): # Check to make sure the newly described statement does # not match anything. return _is_statement_in_list(new_stmt, old_stmt_list) logger.warning("Found an unexpected kind of duplicate. " "Ignoring it.") return True # This means all the agents matched, which can happen if the # original issue was the ordering of agents in a Complex. return True elif old_stmt.get_hash(True, True) == new_stmt.get_hash(True, True): # Check to see if we can improve the annotation of the existing # statement. e_old = old_stmt.evidence[0] e_new = new_stmt.evidence[0] if e_old.annotations['last_verb'] is None: e_old.annotations['last_verb'] = e_new.annotations['last_verb'] # If the evidence is "the same", modulo annotations, just ignore it if e_old.get_source_hash(True) == e_new.get_source_hash(True): return True return False
python
def _is_statement_in_list(new_stmt, old_stmt_list): for old_stmt in old_stmt_list: if old_stmt.equals(new_stmt): return True elif old_stmt.evidence_equals(new_stmt) and old_stmt.matches(new_stmt): # If we're comparing a complex, make sure the agents are sorted. if isinstance(new_stmt, Complex): agent_pairs = zip(old_stmt.sorted_members(), new_stmt.sorted_members()) else: agent_pairs = zip(old_stmt.agent_list(), new_stmt.agent_list()) # Compare agent-by-agent. for ag_old, ag_new in agent_pairs: s_old = set(ag_old.db_refs.items()) s_new = set(ag_new.db_refs.items()) # If they're equal this isn't the one we're interested in. if s_old == s_new: continue # If the new statement has nothing new to offer, just ignore it if s_old > s_new: return True # If the new statement does have something new, add it to the # existing statement. And then ignore it. if s_new > s_old: ag_old.db_refs.update(ag_new.db_refs) return True # If this is a case where different CHEBI ids were mapped to # the same entity, set the agent name to the CHEBI id. if _fix_different_refs(ag_old, ag_new, 'CHEBI'): # Check to make sure the newly described statement does # not match anything. return _is_statement_in_list(new_stmt, old_stmt_list) # If this is a case, like above, but with UMLS IDs, do the same # thing as above. This will likely never be improved. if _fix_different_refs(ag_old, ag_new, 'UMLS'): # Check to make sure the newly described statement does # not match anything. return _is_statement_in_list(new_stmt, old_stmt_list) logger.warning("Found an unexpected kind of duplicate. " "Ignoring it.") return True # This means all the agents matched, which can happen if the # original issue was the ordering of agents in a Complex. return True elif old_stmt.get_hash(True, True) == new_stmt.get_hash(True, True): # Check to see if we can improve the annotation of the existing # statement. e_old = old_stmt.evidence[0] e_new = new_stmt.evidence[0] if e_old.annotations['last_verb'] is None: e_old.annotations['last_verb'] = e_new.annotations['last_verb'] # If the evidence is "the same", modulo annotations, just ignore it if e_old.get_source_hash(True) == e_new.get_source_hash(True): return True return False
[ "def", "_is_statement_in_list", "(", "new_stmt", ",", "old_stmt_list", ")", ":", "for", "old_stmt", "in", "old_stmt_list", ":", "if", "old_stmt", ".", "equals", "(", "new_stmt", ")", ":", "return", "True", "elif", "old_stmt", ".", "evidence_equals", "(", "new_...
Return True of given statement is equivalent to on in a list Determines whether the statement is equivalent to any statement in the given list of statements, with equivalency determined by Statement's equals method. Parameters ---------- new_stmt : indra.statements.Statement The statement to compare with old_stmt_list : list[indra.statements.Statement] The statement list whose entries we compare with statement Returns ------- in_list : bool True if statement is equivalent to any statements in the list
[ "Return", "True", "of", "given", "statement", "is", "equivalent", "to", "on", "in", "a", "list" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L62-L145
19,426
sorgerlab/indra
indra/sources/medscan/processor.py
normalize_medscan_name
def normalize_medscan_name(name): """Removes the "complex" and "complex complex" suffixes from a medscan agent name so that it better corresponds with the grounding map. Parameters ---------- name: str The Medscan agent name Returns ------- norm_name: str The Medscan agent name with the "complex" and "complex complex" suffixes removed. """ suffix = ' complex' for i in range(2): if name.endswith(suffix): name = name[:-len(suffix)] return name
python
def normalize_medscan_name(name): suffix = ' complex' for i in range(2): if name.endswith(suffix): name = name[:-len(suffix)] return name
[ "def", "normalize_medscan_name", "(", "name", ")", ":", "suffix", "=", "' complex'", "for", "i", "in", "range", "(", "2", ")", ":", "if", "name", ".", "endswith", "(", "suffix", ")", ":", "name", "=", "name", "[", ":", "-", "len", "(", "suffix", ")...
Removes the "complex" and "complex complex" suffixes from a medscan agent name so that it better corresponds with the grounding map. Parameters ---------- name: str The Medscan agent name Returns ------- norm_name: str The Medscan agent name with the "complex" and "complex complex" suffixes removed.
[ "Removes", "the", "complex", "and", "complex", "complex", "suffixes", "from", "a", "medscan", "agent", "name", "so", "that", "it", "better", "corresponds", "with", "the", "grounding", "map", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L893-L913
19,427
sorgerlab/indra
indra/sources/medscan/processor.py
_urn_to_db_refs
def _urn_to_db_refs(urn): """Converts a Medscan URN to an INDRA db_refs dictionary with grounding information. Parameters ---------- urn : str A Medscan URN Returns ------- db_refs : dict A dictionary with grounding information, mapping databases to database identifiers. If the Medscan URN is not recognized, returns an empty dictionary. db_name : str The Famplex name, if available; otherwise the HGNC name if available; otherwise None """ # Convert a urn to a db_refs dictionary if urn is None: return {}, None m = URN_PATT.match(urn) if m is None: return None, None urn_type, urn_id = m.groups() db_refs = {} db_name = None # TODO: support more types of URNs if urn_type == 'agi-cas': # Identifier is CAS, convert to CHEBI chebi_id = get_chebi_id_from_cas(urn_id) if chebi_id: db_refs['CHEBI'] = 'CHEBI:%s' % chebi_id db_name = get_chebi_name_from_id(chebi_id) elif urn_type == 'agi-llid': # This is an Entrez ID, convert to HGNC hgnc_id = get_hgnc_from_entrez(urn_id) if hgnc_id is not None: db_refs['HGNC'] = hgnc_id # Convert the HGNC ID to a Uniprot ID uniprot_id = get_uniprot_id(hgnc_id) if uniprot_id is not None: db_refs['UP'] = uniprot_id # Try to lookup HGNC name; if it's available, set it to the # agent name db_name = get_hgnc_name(hgnc_id) elif urn_type in ['agi-meshdis', 'agi-ncimorgan', 'agi-ncimtissue', 'agi-ncimcelltype']: if urn_id.startswith('C') and urn_id[1:].isdigit(): # Identifier is probably UMLS db_refs['UMLS'] = urn_id else: # Identifier is MESH urn_mesh_name = unquote(urn_id) mesh_id, mesh_name = mesh_client.get_mesh_id_name(urn_mesh_name) if mesh_id: db_refs['MESH'] = mesh_id db_name = mesh_name else: db_name = urn_mesh_name elif urn_type == 'agi-gocomplex': # Identifier is GO db_refs['GO'] = 'GO:%s' % urn_id elif urn_type == 'agi-go': # Identifier is GO db_refs['GO'] = 'GO:%s' % urn_id # If we have a GO or MESH grounding, see if there is a corresponding # Famplex grounding db_sometimes_maps_to_famplex = ['GO', 'MESH'] for db in db_sometimes_maps_to_famplex: if db in db_refs: key = (db, db_refs[db]) if key in famplex_map: db_refs['FPLX'] = famplex_map[key] # If the urn corresponds to an eccode, groudn to famplex if that eccode # is in the Famplex equivalences table if urn.startswith('urn:agi-enz'): tokens = urn.split(':') eccode = tokens[2] key = ('ECCODE', eccode) if key in famplex_map: db_refs['FPLX'] = famplex_map[key] # If the Medscan URN itself maps to a Famplex id, add a Famplex grounding key = ('MEDSCAN', urn) if key in famplex_map: db_refs['FPLX'] = famplex_map[key] # If there is a Famplex grounding, use Famplex for entity name if 'FPLX' in db_refs: db_name = db_refs['FPLX'] elif 'GO' in db_refs: db_name = go_client.get_go_label(db_refs['GO']) return db_refs, db_name
python
def _urn_to_db_refs(urn): # Convert a urn to a db_refs dictionary if urn is None: return {}, None m = URN_PATT.match(urn) if m is None: return None, None urn_type, urn_id = m.groups() db_refs = {} db_name = None # TODO: support more types of URNs if urn_type == 'agi-cas': # Identifier is CAS, convert to CHEBI chebi_id = get_chebi_id_from_cas(urn_id) if chebi_id: db_refs['CHEBI'] = 'CHEBI:%s' % chebi_id db_name = get_chebi_name_from_id(chebi_id) elif urn_type == 'agi-llid': # This is an Entrez ID, convert to HGNC hgnc_id = get_hgnc_from_entrez(urn_id) if hgnc_id is not None: db_refs['HGNC'] = hgnc_id # Convert the HGNC ID to a Uniprot ID uniprot_id = get_uniprot_id(hgnc_id) if uniprot_id is not None: db_refs['UP'] = uniprot_id # Try to lookup HGNC name; if it's available, set it to the # agent name db_name = get_hgnc_name(hgnc_id) elif urn_type in ['agi-meshdis', 'agi-ncimorgan', 'agi-ncimtissue', 'agi-ncimcelltype']: if urn_id.startswith('C') and urn_id[1:].isdigit(): # Identifier is probably UMLS db_refs['UMLS'] = urn_id else: # Identifier is MESH urn_mesh_name = unquote(urn_id) mesh_id, mesh_name = mesh_client.get_mesh_id_name(urn_mesh_name) if mesh_id: db_refs['MESH'] = mesh_id db_name = mesh_name else: db_name = urn_mesh_name elif urn_type == 'agi-gocomplex': # Identifier is GO db_refs['GO'] = 'GO:%s' % urn_id elif urn_type == 'agi-go': # Identifier is GO db_refs['GO'] = 'GO:%s' % urn_id # If we have a GO or MESH grounding, see if there is a corresponding # Famplex grounding db_sometimes_maps_to_famplex = ['GO', 'MESH'] for db in db_sometimes_maps_to_famplex: if db in db_refs: key = (db, db_refs[db]) if key in famplex_map: db_refs['FPLX'] = famplex_map[key] # If the urn corresponds to an eccode, groudn to famplex if that eccode # is in the Famplex equivalences table if urn.startswith('urn:agi-enz'): tokens = urn.split(':') eccode = tokens[2] key = ('ECCODE', eccode) if key in famplex_map: db_refs['FPLX'] = famplex_map[key] # If the Medscan URN itself maps to a Famplex id, add a Famplex grounding key = ('MEDSCAN', urn) if key in famplex_map: db_refs['FPLX'] = famplex_map[key] # If there is a Famplex grounding, use Famplex for entity name if 'FPLX' in db_refs: db_name = db_refs['FPLX'] elif 'GO' in db_refs: db_name = go_client.get_go_label(db_refs['GO']) return db_refs, db_name
[ "def", "_urn_to_db_refs", "(", "urn", ")", ":", "# Convert a urn to a db_refs dictionary", "if", "urn", "is", "None", ":", "return", "{", "}", ",", "None", "m", "=", "URN_PATT", ".", "match", "(", "urn", ")", "if", "m", "is", "None", ":", "return", "None...
Converts a Medscan URN to an INDRA db_refs dictionary with grounding information. Parameters ---------- urn : str A Medscan URN Returns ------- db_refs : dict A dictionary with grounding information, mapping databases to database identifiers. If the Medscan URN is not recognized, returns an empty dictionary. db_name : str The Famplex name, if available; otherwise the HGNC name if available; otherwise None
[ "Converts", "a", "Medscan", "URN", "to", "an", "INDRA", "db_refs", "dictionary", "with", "grounding", "information", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L976-L1079
19,428
sorgerlab/indra
indra/sources/medscan/processor.py
_untag_sentence
def _untag_sentence(tagged_sentence): """Removes all tags in the sentence, returning the original sentence without Medscan annotations. Parameters ---------- tagged_sentence : str The tagged sentence Returns ------- untagged_sentence : str Sentence with tags and annotations stripped out """ untagged_sentence = TAG_PATT.sub('\\2', tagged_sentence) clean_sentence = JUNK_PATT.sub('', untagged_sentence) return clean_sentence.strip()
python
def _untag_sentence(tagged_sentence): untagged_sentence = TAG_PATT.sub('\\2', tagged_sentence) clean_sentence = JUNK_PATT.sub('', untagged_sentence) return clean_sentence.strip()
[ "def", "_untag_sentence", "(", "tagged_sentence", ")", ":", "untagged_sentence", "=", "TAG_PATT", ".", "sub", "(", "'\\\\2'", ",", "tagged_sentence", ")", "clean_sentence", "=", "JUNK_PATT", ".", "sub", "(", "''", ",", "untagged_sentence", ")", "return", "clean_...
Removes all tags in the sentence, returning the original sentence without Medscan annotations. Parameters ---------- tagged_sentence : str The tagged sentence Returns ------- untagged_sentence : str Sentence with tags and annotations stripped out
[ "Removes", "all", "tags", "in", "the", "sentence", "returning", "the", "original", "sentence", "without", "Medscan", "annotations", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L1109-L1125
19,429
sorgerlab/indra
indra/sources/medscan/processor.py
_extract_sentence_tags
def _extract_sentence_tags(tagged_sentence): """Given a tagged sentence, extracts a dictionary mapping tags to the words or phrases that they tag. Parameters ---------- tagged_sentence : str The sentence with Medscan annotations and tags Returns ------- tags : dict A dictionary mapping tags to the words or phrases that they tag. """ untagged_sentence = _untag_sentence(tagged_sentence) decluttered_sentence = JUNK_PATT.sub('', tagged_sentence) tags = {} # Iteratively look for all matches of this pattern endpos = 0 while True: match = TAG_PATT.search(decluttered_sentence, pos=endpos) if not match: break endpos = match.end() text = match.group(2) text = text.replace('CONTEXT', '') text = text.replace('GLOSSARY', '') text = text.strip() start = untagged_sentence.index(text) stop = start + len(text) tag_key = match.group(1) if ',' in tag_key: for sub_key in tag_key.split(','): if sub_key == '0': continue tags[sub_key] = {'text': text, 'bounds': (start, stop)} else: tags[tag_key] = {'text': text, 'bounds': (start, stop)} return tags
python
def _extract_sentence_tags(tagged_sentence): untagged_sentence = _untag_sentence(tagged_sentence) decluttered_sentence = JUNK_PATT.sub('', tagged_sentence) tags = {} # Iteratively look for all matches of this pattern endpos = 0 while True: match = TAG_PATT.search(decluttered_sentence, pos=endpos) if not match: break endpos = match.end() text = match.group(2) text = text.replace('CONTEXT', '') text = text.replace('GLOSSARY', '') text = text.strip() start = untagged_sentence.index(text) stop = start + len(text) tag_key = match.group(1) if ',' in tag_key: for sub_key in tag_key.split(','): if sub_key == '0': continue tags[sub_key] = {'text': text, 'bounds': (start, stop)} else: tags[tag_key] = {'text': text, 'bounds': (start, stop)} return tags
[ "def", "_extract_sentence_tags", "(", "tagged_sentence", ")", ":", "untagged_sentence", "=", "_untag_sentence", "(", "tagged_sentence", ")", "decluttered_sentence", "=", "JUNK_PATT", ".", "sub", "(", "''", ",", "tagged_sentence", ")", "tags", "=", "{", "}", "# Ite...
Given a tagged sentence, extracts a dictionary mapping tags to the words or phrases that they tag. Parameters ---------- tagged_sentence : str The sentence with Medscan annotations and tags Returns ------- tags : dict A dictionary mapping tags to the words or phrases that they tag.
[ "Given", "a", "tagged", "sentence", "extracts", "a", "dictionary", "mapping", "tags", "to", "the", "words", "or", "phrases", "that", "they", "tag", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L1128-L1168
19,430
sorgerlab/indra
indra/sources/medscan/processor.py
ProteinSiteInfo.get_sites
def get_sites(self): """Parse the site-text string and return a list of sites. Returns ------- sites : list[Site] A list of position-residue pairs corresponding to the site-text """ st = self.site_text suffixes = [' residue', ' residues', ',', '/'] for suffix in suffixes: if st.endswith(suffix): st = st[:-len(suffix)] assert(not st.endswith(',')) # Strip parentheses st = st.replace('(', '') st = st.replace(')', '') st = st.replace(' or ', ' and ') # Treat end and or the same sites = [] parts = st.split(' and ') for part in parts: if part.endswith(','): part = part[:-1] if len(part.strip()) > 0: sites.extend(ReachProcessor._parse_site_text(part.strip())) return sites
python
def get_sites(self): st = self.site_text suffixes = [' residue', ' residues', ',', '/'] for suffix in suffixes: if st.endswith(suffix): st = st[:-len(suffix)] assert(not st.endswith(',')) # Strip parentheses st = st.replace('(', '') st = st.replace(')', '') st = st.replace(' or ', ' and ') # Treat end and or the same sites = [] parts = st.split(' and ') for part in parts: if part.endswith(','): part = part[:-1] if len(part.strip()) > 0: sites.extend(ReachProcessor._parse_site_text(part.strip())) return sites
[ "def", "get_sites", "(", "self", ")", ":", "st", "=", "self", ".", "site_text", "suffixes", "=", "[", "' residue'", ",", "' residues'", ",", "','", ",", "'/'", "]", "for", "suffix", "in", "suffixes", ":", "if", "st", ".", "endswith", "(", "suffix", "...
Parse the site-text string and return a list of sites. Returns ------- sites : list[Site] A list of position-residue pairs corresponding to the site-text
[ "Parse", "the", "site", "-", "text", "string", "and", "return", "a", "list", "of", "sites", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L163-L190
19,431
sorgerlab/indra
indra/sources/medscan/processor.py
MedscanProcessor.process_csxml_file
def process_csxml_file(self, filename, interval=None, lazy=False): """Processes a filehandle to MedScan csxml input into INDRA statements. The CSXML format consists of a top-level `<batch>` root element containing a series of `<doc>` (document) elements, in turn containing `<sec>` (section) elements, and in turn containing `<sent>` (sentence) elements. Within the `<sent>` element, a series of additional elements appear in the following order: * `<toks>`, which contains a tokenized form of the sentence in its text attribute * `<textmods>`, which describes any preprocessing/normalization done to the underlying text * `<match>` elements, each of which contains one of more `<entity>` elements, describing entities in the text with their identifiers. The local IDs of each entities are given in the `msid` attribute of this element; these IDs are then referenced in any subsequent SVO elements. * `<svo>` elements, representing subject-verb-object triples. SVO elements with a `type` attribute of `CONTROL` represent normalized regulation relationships; they often represent the normalized extraction of the immediately preceding (but unnormalized SVO element). However, in some cases there can be a "CONTROL" SVO element without its parent immediately preceding it. Parameters ---------- filename : string The path to a Medscan csxml file. interval : (start, end) or None Select the interval of documents to read, starting with the `start`th document and ending before the `end`th document. If either is None, the value is considered undefined. If the value exceeds the bounds of available documents, it will simply be ignored. lazy : bool If True, only create a generator which can be used by the `get_statements` method. If True, populate the statements list now. """ if interval is None: interval = (None, None) tmp_fname = tempfile.mktemp(os.path.basename(filename)) fix_character_encoding(filename, tmp_fname) self.__f = open(tmp_fname, 'rb') self._gen = self._iter_through_csxml_file_from_handle(*interval) if not lazy: for stmt in self._gen: self.statements.append(stmt) return
python
def process_csxml_file(self, filename, interval=None, lazy=False): if interval is None: interval = (None, None) tmp_fname = tempfile.mktemp(os.path.basename(filename)) fix_character_encoding(filename, tmp_fname) self.__f = open(tmp_fname, 'rb') self._gen = self._iter_through_csxml_file_from_handle(*interval) if not lazy: for stmt in self._gen: self.statements.append(stmt) return
[ "def", "process_csxml_file", "(", "self", ",", "filename", ",", "interval", "=", "None", ",", "lazy", "=", "False", ")", ":", "if", "interval", "is", "None", ":", "interval", "=", "(", "None", ",", "None", ")", "tmp_fname", "=", "tempfile", ".", "mktem...
Processes a filehandle to MedScan csxml input into INDRA statements. The CSXML format consists of a top-level `<batch>` root element containing a series of `<doc>` (document) elements, in turn containing `<sec>` (section) elements, and in turn containing `<sent>` (sentence) elements. Within the `<sent>` element, a series of additional elements appear in the following order: * `<toks>`, which contains a tokenized form of the sentence in its text attribute * `<textmods>`, which describes any preprocessing/normalization done to the underlying text * `<match>` elements, each of which contains one of more `<entity>` elements, describing entities in the text with their identifiers. The local IDs of each entities are given in the `msid` attribute of this element; these IDs are then referenced in any subsequent SVO elements. * `<svo>` elements, representing subject-verb-object triples. SVO elements with a `type` attribute of `CONTROL` represent normalized regulation relationships; they often represent the normalized extraction of the immediately preceding (but unnormalized SVO element). However, in some cases there can be a "CONTROL" SVO element without its parent immediately preceding it. Parameters ---------- filename : string The path to a Medscan csxml file. interval : (start, end) or None Select the interval of documents to read, starting with the `start`th document and ending before the `end`th document. If either is None, the value is considered undefined. If the value exceeds the bounds of available documents, it will simply be ignored. lazy : bool If True, only create a generator which can be used by the `get_statements` method. If True, populate the statements list now.
[ "Processes", "a", "filehandle", "to", "MedScan", "csxml", "input", "into", "INDRA", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L328-L381
19,432
sorgerlab/indra
indra/tools/reading/util/script_tools.py
get_parser
def get_parser(description, input_desc): """Get a parser that is generic to reading scripts. Parameters ---------- description : str A description of the tool, usually about one line long. input_desc: str A string describing the nature of the input file used by the reading tool. Returns ------- parser : argparse.ArgumentParser instance An argument parser object, to which further arguments can be added. """ parser = ArgumentParser(description=description) parser.add_argument( dest='input_file', help=input_desc ) parser.add_argument( '-r', '--readers', choices=['reach', 'sparser', 'trips'], help='List of readers to be used.', nargs='+' ) parser.add_argument( '-n', '--num_procs', dest='n_proc', help='Select the number of processes to use.', type=int, default=1 ) parser.add_argument( '-s', '--sample', dest='n_samp', help='Read a random sample of size N_SAMP of the inputs.', type=int ) parser.add_argument( '-I', '--in_range', dest='range_str', help='Only read input lines in the range given as <start>:<end>.' ) parser.add_argument( '-v', '--verbose', help='Include output from the readers.', action='store_true' ) parser.add_argument( '-q', '--quiet', help='Suppress most output. Overrides -v and -d options.', action='store_true' ) parser.add_argument( '-d', '--debug', help='Set the logging to debug level.', action='store_true' ) # parser.add_argument( # '-m', '--messy', # help='Do not clean up directories created while reading.', # action='store_true' # ) return parser
python
def get_parser(description, input_desc): parser = ArgumentParser(description=description) parser.add_argument( dest='input_file', help=input_desc ) parser.add_argument( '-r', '--readers', choices=['reach', 'sparser', 'trips'], help='List of readers to be used.', nargs='+' ) parser.add_argument( '-n', '--num_procs', dest='n_proc', help='Select the number of processes to use.', type=int, default=1 ) parser.add_argument( '-s', '--sample', dest='n_samp', help='Read a random sample of size N_SAMP of the inputs.', type=int ) parser.add_argument( '-I', '--in_range', dest='range_str', help='Only read input lines in the range given as <start>:<end>.' ) parser.add_argument( '-v', '--verbose', help='Include output from the readers.', action='store_true' ) parser.add_argument( '-q', '--quiet', help='Suppress most output. Overrides -v and -d options.', action='store_true' ) parser.add_argument( '-d', '--debug', help='Set the logging to debug level.', action='store_true' ) # parser.add_argument( # '-m', '--messy', # help='Do not clean up directories created while reading.', # action='store_true' # ) return parser
[ "def", "get_parser", "(", "description", ",", "input_desc", ")", ":", "parser", "=", "ArgumentParser", "(", "description", "=", "description", ")", "parser", ".", "add_argument", "(", "dest", "=", "'input_file'", ",", "help", "=", "input_desc", ")", "parser", ...
Get a parser that is generic to reading scripts. Parameters ---------- description : str A description of the tool, usually about one line long. input_desc: str A string describing the nature of the input file used by the reading tool. Returns ------- parser : argparse.ArgumentParser instance An argument parser object, to which further arguments can be added.
[ "Get", "a", "parser", "that", "is", "generic", "to", "reading", "scripts", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/script_tools.py#L11-L76
19,433
sorgerlab/indra
indra/literature/newsapi_client.py
send_request
def send_request(endpoint, **kwargs): """Return the response to a query as JSON from the NewsAPI web service. The basic API is limited to 100 results which is chosen unless explicitly given as an argument. Beyond that, paging is supported through the "page" argument, if needed. Parameters ---------- endpoint : str Endpoint to query, e.g. "everything" or "top-headlines" kwargs : dict A list of keyword arguments passed as parameters with the query. The basic ones are "q" which is the search query, "from" is a start date formatted as for instance 2018-06-10 and "to" is an end date with the same format. Returns ------- res_json : dict The response from the web service as a JSON dict. """ if api_key is None: logger.error('NewsAPI cannot be used without an API key') return None url = '%s/%s' % (newsapi_url, endpoint) if 'apiKey' not in kwargs: kwargs['apiKey'] = api_key if 'pageSize' not in kwargs: kwargs['pageSize'] = 100 res = requests.get(url, params=kwargs) res.raise_for_status() res_json = res.json() return res_json
python
def send_request(endpoint, **kwargs): if api_key is None: logger.error('NewsAPI cannot be used without an API key') return None url = '%s/%s' % (newsapi_url, endpoint) if 'apiKey' not in kwargs: kwargs['apiKey'] = api_key if 'pageSize' not in kwargs: kwargs['pageSize'] = 100 res = requests.get(url, params=kwargs) res.raise_for_status() res_json = res.json() return res_json
[ "def", "send_request", "(", "endpoint", ",", "*", "*", "kwargs", ")", ":", "if", "api_key", "is", "None", ":", "logger", ".", "error", "(", "'NewsAPI cannot be used without an API key'", ")", "return", "None", "url", "=", "'%s/%s'", "%", "(", "newsapi_url", ...
Return the response to a query as JSON from the NewsAPI web service. The basic API is limited to 100 results which is chosen unless explicitly given as an argument. Beyond that, paging is supported through the "page" argument, if needed. Parameters ---------- endpoint : str Endpoint to query, e.g. "everything" or "top-headlines" kwargs : dict A list of keyword arguments passed as parameters with the query. The basic ones are "q" which is the search query, "from" is a start date formatted as for instance 2018-06-10 and "to" is an end date with the same format. Returns ------- res_json : dict The response from the web service as a JSON dict.
[ "Return", "the", "response", "to", "a", "query", "as", "JSON", "from", "the", "NewsAPI", "web", "service", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/newsapi_client.py#L29-L63
19,434
sorgerlab/indra
indra/sources/ndex_cx/api.py
process_cx_file
def process_cx_file(file_name, require_grounding=True): """Process a CX JSON file into Statements. Parameters ---------- file_name : str Path to file containing CX JSON. require_grounding: bool Whether network nodes lacking grounding information should be included among the extracted Statements (default is True). Returns ------- NdexCxProcessor Processor containing Statements. """ with open(file_name, 'rt') as fh: json_list = json.load(fh) return process_cx(json_list, require_grounding=require_grounding)
python
def process_cx_file(file_name, require_grounding=True): with open(file_name, 'rt') as fh: json_list = json.load(fh) return process_cx(json_list, require_grounding=require_grounding)
[ "def", "process_cx_file", "(", "file_name", ",", "require_grounding", "=", "True", ")", ":", "with", "open", "(", "file_name", ",", "'rt'", ")", "as", "fh", ":", "json_list", "=", "json", ".", "load", "(", "fh", ")", "return", "process_cx", "(", "json_li...
Process a CX JSON file into Statements. Parameters ---------- file_name : str Path to file containing CX JSON. require_grounding: bool Whether network nodes lacking grounding information should be included among the extracted Statements (default is True). Returns ------- NdexCxProcessor Processor containing Statements.
[ "Process", "a", "CX", "JSON", "file", "into", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/api.py#L12-L30
19,435
sorgerlab/indra
indra/sources/ndex_cx/api.py
process_ndex_network
def process_ndex_network(network_id, username=None, password=None, require_grounding=True): """Process an NDEx network into Statements. Parameters ---------- network_id : str NDEx network ID. username : str NDEx username. password : str NDEx password. require_grounding: bool Whether network nodes lacking grounding information should be included among the extracted Statements (default is True). Returns ------- NdexCxProcessor Processor containing Statements. Returns None if there if the HTTP status code indicates an unsuccessful request. """ nd = ndex2.client.Ndex2(username=username, password=password) res = nd.get_network_as_cx_stream(network_id) if res.status_code != 200: logger.error('Problem downloading network: status code %s' % res.status_code) logger.error('Response: %s' % res.text) return None json_list = res.json() summary = nd.get_network_summary(network_id) return process_cx(json_list, summary=summary, require_grounding=require_grounding)
python
def process_ndex_network(network_id, username=None, password=None, require_grounding=True): nd = ndex2.client.Ndex2(username=username, password=password) res = nd.get_network_as_cx_stream(network_id) if res.status_code != 200: logger.error('Problem downloading network: status code %s' % res.status_code) logger.error('Response: %s' % res.text) return None json_list = res.json() summary = nd.get_network_summary(network_id) return process_cx(json_list, summary=summary, require_grounding=require_grounding)
[ "def", "process_ndex_network", "(", "network_id", ",", "username", "=", "None", ",", "password", "=", "None", ",", "require_grounding", "=", "True", ")", ":", "nd", "=", "ndex2", ".", "client", ".", "Ndex2", "(", "username", "=", "username", ",", "password...
Process an NDEx network into Statements. Parameters ---------- network_id : str NDEx network ID. username : str NDEx username. password : str NDEx password. require_grounding: bool Whether network nodes lacking grounding information should be included among the extracted Statements (default is True). Returns ------- NdexCxProcessor Processor containing Statements. Returns None if there if the HTTP status code indicates an unsuccessful request.
[ "Process", "an", "NDEx", "network", "into", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/api.py#L33-L65
19,436
sorgerlab/indra
indra/sources/ndex_cx/api.py
process_cx
def process_cx(cx_json, summary=None, require_grounding=True): """Process a CX JSON object into Statements. Parameters ---------- cx_json : list CX JSON object. summary : Optional[dict] The network summary object which can be obtained via get_network_summary through the web service. THis contains metadata such as the owner and the creation time of the network. require_grounding: bool Whether network nodes lacking grounding information should be included among the extracted Statements (default is True). Returns ------- NdexCxProcessor Processor containing Statements. """ ncp = NdexCxProcessor(cx_json, summary=summary, require_grounding=require_grounding) ncp.get_statements() return ncp
python
def process_cx(cx_json, summary=None, require_grounding=True): ncp = NdexCxProcessor(cx_json, summary=summary, require_grounding=require_grounding) ncp.get_statements() return ncp
[ "def", "process_cx", "(", "cx_json", ",", "summary", "=", "None", ",", "require_grounding", "=", "True", ")", ":", "ncp", "=", "NdexCxProcessor", "(", "cx_json", ",", "summary", "=", "summary", ",", "require_grounding", "=", "require_grounding", ")", "ncp", ...
Process a CX JSON object into Statements. Parameters ---------- cx_json : list CX JSON object. summary : Optional[dict] The network summary object which can be obtained via get_network_summary through the web service. THis contains metadata such as the owner and the creation time of the network. require_grounding: bool Whether network nodes lacking grounding information should be included among the extracted Statements (default is True). Returns ------- NdexCxProcessor Processor containing Statements.
[ "Process", "a", "CX", "JSON", "object", "into", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/api.py#L68-L91
19,437
sorgerlab/indra
indra/tools/reading/read_files.py
read_files
def read_files(files, readers, **kwargs): """Read the files in `files` with the reader objects in `readers`. Parameters ---------- files : list [str] A list of file paths to be read by the readers. Supported files are limited to text and nxml files. readers : list [Reader instances] A list of Reader objects to be used reading the files. **kwargs : Other keyword arguments are passed to the `read` method of the readers. Returns ------- output_list : list [ReadingData] A list of ReadingData objects with the contents of the readings. """ reading_content = [Content.from_file(filepath) for filepath in files] output_list = [] for reader in readers: res_list = reader.read(reading_content, **kwargs) if res_list is None: logger.info("Nothing read by %s." % reader.name) else: logger.info("Successfully read %d content entries with %s." % (len(res_list), reader.name)) output_list += res_list logger.info("Read %s text content entries in all." % len(output_list)) return output_list
python
def read_files(files, readers, **kwargs): reading_content = [Content.from_file(filepath) for filepath in files] output_list = [] for reader in readers: res_list = reader.read(reading_content, **kwargs) if res_list is None: logger.info("Nothing read by %s." % reader.name) else: logger.info("Successfully read %d content entries with %s." % (len(res_list), reader.name)) output_list += res_list logger.info("Read %s text content entries in all." % len(output_list)) return output_list
[ "def", "read_files", "(", "files", ",", "readers", ",", "*", "*", "kwargs", ")", ":", "reading_content", "=", "[", "Content", ".", "from_file", "(", "filepath", ")", "for", "filepath", "in", "files", "]", "output_list", "=", "[", "]", "for", "reader", ...
Read the files in `files` with the reader objects in `readers`. Parameters ---------- files : list [str] A list of file paths to be read by the readers. Supported files are limited to text and nxml files. readers : list [Reader instances] A list of Reader objects to be used reading the files. **kwargs : Other keyword arguments are passed to the `read` method of the readers. Returns ------- output_list : list [ReadingData] A list of ReadingData objects with the contents of the readings.
[ "Read", "the", "files", "in", "files", "with", "the", "reader", "objects", "in", "readers", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/read_files.py#L30-L59
19,438
sorgerlab/indra
indra/tools/expand_families.py
Expander.expand_families
def expand_families(self, stmts): """Generate statements by expanding members of families and complexes. """ new_stmts = [] for stmt in stmts: # Put together the lists of families, with their members. E.g., # for a statement involving RAF and MEK, should return a list of # tuples like [(BRAF, RAF1, ARAF), (MAP2K1, MAP2K2)] families_list = [] for ag in stmt.agent_list(): ag_children = self.get_children(ag) # If the agent has no children, then we use the agent itself if len(ag_children) == 0: families_list.append([ag]) # Otherwise, we add the tuple of namespaces/IDs for the children else: families_list.append(ag_children) # Now, put together new statements frmo the cross product of the # expanded family members for ag_combo in itertools.product(*families_list): # Create new agents based on the namespaces/IDs, with # appropriate name and db_refs entries child_agents = [] for ag_entry in ag_combo: # If we got an agent, or None, that means there were no # children; so we use the original agent rather than # construct a new agent if ag_entry is None or isinstance(ag_entry, Agent): new_agent = ag_entry # Otherwise, create a new agent from the ns/ID elif isinstance(ag_entry, tuple): # FIXME FIXME FIXME # This doesn't reproduce agent state from the original # family-level statements! ag_ns, ag_id = ag_entry new_agent = _agent_from_ns_id(ag_ns, ag_id) else: raise Exception('Unrecognized agent entry type.') # Add agent to our list of child agents child_agents.append(new_agent) # Create a copy of the statement new_stmt = deepcopy(stmt) # Replace the agents in the statement with the newly-created # child agents new_stmt.set_agent_list(child_agents) # Add to list new_stmts.append(new_stmt) return new_stmts
python
def expand_families(self, stmts): new_stmts = [] for stmt in stmts: # Put together the lists of families, with their members. E.g., # for a statement involving RAF and MEK, should return a list of # tuples like [(BRAF, RAF1, ARAF), (MAP2K1, MAP2K2)] families_list = [] for ag in stmt.agent_list(): ag_children = self.get_children(ag) # If the agent has no children, then we use the agent itself if len(ag_children) == 0: families_list.append([ag]) # Otherwise, we add the tuple of namespaces/IDs for the children else: families_list.append(ag_children) # Now, put together new statements frmo the cross product of the # expanded family members for ag_combo in itertools.product(*families_list): # Create new agents based on the namespaces/IDs, with # appropriate name and db_refs entries child_agents = [] for ag_entry in ag_combo: # If we got an agent, or None, that means there were no # children; so we use the original agent rather than # construct a new agent if ag_entry is None or isinstance(ag_entry, Agent): new_agent = ag_entry # Otherwise, create a new agent from the ns/ID elif isinstance(ag_entry, tuple): # FIXME FIXME FIXME # This doesn't reproduce agent state from the original # family-level statements! ag_ns, ag_id = ag_entry new_agent = _agent_from_ns_id(ag_ns, ag_id) else: raise Exception('Unrecognized agent entry type.') # Add agent to our list of child agents child_agents.append(new_agent) # Create a copy of the statement new_stmt = deepcopy(stmt) # Replace the agents in the statement with the newly-created # child agents new_stmt.set_agent_list(child_agents) # Add to list new_stmts.append(new_stmt) return new_stmts
[ "def", "expand_families", "(", "self", ",", "stmts", ")", ":", "new_stmts", "=", "[", "]", "for", "stmt", "in", "stmts", ":", "# Put together the lists of families, with their members. E.g.,", "# for a statement involving RAF and MEK, should return a list of", "# tuples like [(...
Generate statements by expanding members of families and complexes.
[ "Generate", "statements", "by", "expanding", "members", "of", "families", "and", "complexes", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/expand_families.py#L22-L69
19,439
sorgerlab/indra
indra/preassembler/make_eidos_hume_ontologies.py
update_ontology
def update_ontology(ont_url, rdf_path): """Load an ontology formatted like Eidos' from github.""" yaml_root = load_yaml_from_url(ont_url) G = rdf_graph_from_yaml(yaml_root) save_hierarchy(G, rdf_path)
python
def update_ontology(ont_url, rdf_path): yaml_root = load_yaml_from_url(ont_url) G = rdf_graph_from_yaml(yaml_root) save_hierarchy(G, rdf_path)
[ "def", "update_ontology", "(", "ont_url", ",", "rdf_path", ")", ":", "yaml_root", "=", "load_yaml_from_url", "(", "ont_url", ")", "G", "=", "rdf_graph_from_yaml", "(", "yaml_root", ")", "save_hierarchy", "(", "G", ",", "rdf_path", ")" ]
Load an ontology formatted like Eidos' from github.
[ "Load", "an", "ontology", "formatted", "like", "Eidos", "from", "github", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/make_eidos_hume_ontologies.py#L69-L73
19,440
sorgerlab/indra
indra/preassembler/make_eidos_hume_ontologies.py
rdf_graph_from_yaml
def rdf_graph_from_yaml(yaml_root): """Convert the YAML object into an RDF Graph object.""" G = Graph() for top_entry in yaml_root: assert len(top_entry) == 1 node = list(top_entry.keys())[0] build_relations(G, node, top_entry[node], None) return G
python
def rdf_graph_from_yaml(yaml_root): G = Graph() for top_entry in yaml_root: assert len(top_entry) == 1 node = list(top_entry.keys())[0] build_relations(G, node, top_entry[node], None) return G
[ "def", "rdf_graph_from_yaml", "(", "yaml_root", ")", ":", "G", "=", "Graph", "(", ")", "for", "top_entry", "in", "yaml_root", ":", "assert", "len", "(", "top_entry", ")", "==", "1", "node", "=", "list", "(", "top_entry", ".", "keys", "(", ")", ")", "...
Convert the YAML object into an RDF Graph object.
[ "Convert", "the", "YAML", "object", "into", "an", "RDF", "Graph", "object", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/make_eidos_hume_ontologies.py#L76-L83
19,441
sorgerlab/indra
indra/preassembler/make_eidos_hume_ontologies.py
load_yaml_from_url
def load_yaml_from_url(ont_url): """Return a YAML object loaded from a YAML file URL.""" res = requests.get(ont_url) if res.status_code != 200: raise Exception('Could not load ontology from %s' % ont_url) root = yaml.load(res.content) return root
python
def load_yaml_from_url(ont_url): res = requests.get(ont_url) if res.status_code != 200: raise Exception('Could not load ontology from %s' % ont_url) root = yaml.load(res.content) return root
[ "def", "load_yaml_from_url", "(", "ont_url", ")", ":", "res", "=", "requests", ".", "get", "(", "ont_url", ")", "if", "res", ".", "status_code", "!=", "200", ":", "raise", "Exception", "(", "'Could not load ontology from %s'", "%", "ont_url", ")", "root", "=...
Return a YAML object loaded from a YAML file URL.
[ "Return", "a", "YAML", "object", "loaded", "from", "a", "YAML", "file", "URL", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/make_eidos_hume_ontologies.py#L86-L92
19,442
sorgerlab/indra
indra/sources/isi/preprocessor.py
IsiPreprocessor.register_preprocessed_file
def register_preprocessed_file(self, infile, pmid, extra_annotations): """Set up already preprocessed text file for reading with ISI reader. This is essentially a mock function to "register" already preprocessed files and get an IsiPreprocessor object that can be passed to the IsiProcessor. Parameters ---------- infile : str Path to an already preprocessed text file (i.e. one ready to be sent for reading to ISI reader). pmid : str The PMID corresponding to the file extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden) """ infile_base = os.path.basename(infile) outfile = os.path.join(self.preprocessed_dir, infile_base) shutil.copyfile(infile, outfile) infile_key = os.path.splitext(infile_base)[0] self.pmids[infile_key] = pmid self.extra_annotations[infile_key] = extra_annotations
python
def register_preprocessed_file(self, infile, pmid, extra_annotations): infile_base = os.path.basename(infile) outfile = os.path.join(self.preprocessed_dir, infile_base) shutil.copyfile(infile, outfile) infile_key = os.path.splitext(infile_base)[0] self.pmids[infile_key] = pmid self.extra_annotations[infile_key] = extra_annotations
[ "def", "register_preprocessed_file", "(", "self", ",", "infile", ",", "pmid", ",", "extra_annotations", ")", ":", "infile_base", "=", "os", ".", "path", ".", "basename", "(", "infile", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "self", "."...
Set up already preprocessed text file for reading with ISI reader. This is essentially a mock function to "register" already preprocessed files and get an IsiPreprocessor object that can be passed to the IsiProcessor. Parameters ---------- infile : str Path to an already preprocessed text file (i.e. one ready to be sent for reading to ISI reader). pmid : str The PMID corresponding to the file extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden)
[ "Set", "up", "already", "preprocessed", "text", "file", "for", "reading", "with", "ISI", "reader", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/preprocessor.py#L54-L80
19,443
sorgerlab/indra
indra/sources/isi/preprocessor.py
IsiPreprocessor.preprocess_plain_text_string
def preprocess_plain_text_string(self, text, pmid, extra_annotations): """Preprocess plain text string for use by ISI reader. Preprocessing is done by tokenizing into sentences and writing each sentence on its own line in a plain text file. All other preprocessing functions ultimately call this one. Parameters ---------- text : str The plain text of the article of abstract pmid : str The PMID from which it comes, or None if not specified extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden) """ output_file = '%s.txt' % self.next_file_id output_file = os.path.join(self.preprocessed_dir, output_file) # Tokenize sentence sentences = nltk.sent_tokenize(text) # Write sentences to text file first_sentence = True with codecs.open(output_file, 'w', encoding='utf-8') as f: for sentence in sentences: if not first_sentence: f.write('\n') f.write(sentence.rstrip()) first_sentence = False # Store annotations self.pmids[str(self.next_file_id)] = pmid self.extra_annotations[str(self.next_file_id)] = extra_annotations # Increment file id self.next_file_id += 1
python
def preprocess_plain_text_string(self, text, pmid, extra_annotations): output_file = '%s.txt' % self.next_file_id output_file = os.path.join(self.preprocessed_dir, output_file) # Tokenize sentence sentences = nltk.sent_tokenize(text) # Write sentences to text file first_sentence = True with codecs.open(output_file, 'w', encoding='utf-8') as f: for sentence in sentences: if not first_sentence: f.write('\n') f.write(sentence.rstrip()) first_sentence = False # Store annotations self.pmids[str(self.next_file_id)] = pmid self.extra_annotations[str(self.next_file_id)] = extra_annotations # Increment file id self.next_file_id += 1
[ "def", "preprocess_plain_text_string", "(", "self", ",", "text", ",", "pmid", ",", "extra_annotations", ")", ":", "output_file", "=", "'%s.txt'", "%", "self", ".", "next_file_id", "output_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "preproc...
Preprocess plain text string for use by ISI reader. Preprocessing is done by tokenizing into sentences and writing each sentence on its own line in a plain text file. All other preprocessing functions ultimately call this one. Parameters ---------- text : str The plain text of the article of abstract pmid : str The PMID from which it comes, or None if not specified extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden)
[ "Preprocess", "plain", "text", "string", "for", "use", "by", "ISI", "reader", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/preprocessor.py#L82-L120
19,444
sorgerlab/indra
indra/sources/isi/preprocessor.py
IsiPreprocessor.preprocess_plain_text_file
def preprocess_plain_text_file(self, filename, pmid, extra_annotations): """Preprocess a plain text file for use with ISI reder. Preprocessing results in a new text file with one sentence per line. Parameters ---------- filename : str The name of the plain text file pmid : str The PMID from which it comes, or None if not specified extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden) """ with codecs.open(filename, 'r', encoding='utf-8') as f: content = f.read() self.preprocess_plain_text_string(content, pmid, extra_annotations)
python
def preprocess_plain_text_file(self, filename, pmid, extra_annotations): with codecs.open(filename, 'r', encoding='utf-8') as f: content = f.read() self.preprocess_plain_text_string(content, pmid, extra_annotations)
[ "def", "preprocess_plain_text_file", "(", "self", ",", "filename", ",", "pmid", ",", "extra_annotations", ")", ":", "with", "codecs", ".", "open", "(", "filename", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "content", "=", "f", "."...
Preprocess a plain text file for use with ISI reder. Preprocessing results in a new text file with one sentence per line. Parameters ---------- filename : str The name of the plain text file pmid : str The PMID from which it comes, or None if not specified extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden)
[ "Preprocess", "a", "plain", "text", "file", "for", "use", "with", "ISI", "reder", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/preprocessor.py#L122-L142
19,445
sorgerlab/indra
indra/sources/isi/preprocessor.py
IsiPreprocessor.preprocess_nxml_file
def preprocess_nxml_file(self, filename, pmid, extra_annotations): """Preprocess an NXML file for use with the ISI reader. Preprocessing is done by extracting plain text from NXML and then creating a text file with one sentence per line. Parameters ---------- filename : str Filename of an nxml file to process pmid : str The PMID from which it comes, or None if not specified extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden) """ # Create a temporary directory tmp_dir = tempfile.mkdtemp('indra_isi_nxml2txt_output') # Run nxml2txt if nxml2txt_path is None: logger.error('NXML2TXT_PATH not specified in config file or ' + 'environment variable') return if python2_path is None: logger.error('PYTHON2_PATH not specified in config file or ' + 'environment variable') return else: txt_out = os.path.join(tmp_dir, 'out.txt') so_out = os.path.join(tmp_dir, 'out.so') command = [python2_path, os.path.join(nxml2txt_path, 'nxml2txt'), filename, txt_out, so_out] ret = subprocess.call(command) if ret != 0: logger.warning('nxml2txt returned non-zero error code') with open(txt_out, 'r') as f: txt_content = f.read() # Remote temporary directory shutil.rmtree(tmp_dir) # We need to remove some common LaTEX commands from the converted text # or the reader will get confused cmd1 = '[^ \{\}]+\{[^\{\}]+\}\{[^\{\}]+\}' cmd2 = '[^ \{\}]+\{[^\{\}]+\}' txt_content = re.sub(cmd1, '', txt_content) txt_content = re.sub(cmd2, '', txt_content) with open('tmp.txt', 'w') as f: f.write(txt_content) # Prepocess text extracted from nxml self.preprocess_plain_text_string(txt_content, pmid, extra_annotations)
python
def preprocess_nxml_file(self, filename, pmid, extra_annotations): # Create a temporary directory tmp_dir = tempfile.mkdtemp('indra_isi_nxml2txt_output') # Run nxml2txt if nxml2txt_path is None: logger.error('NXML2TXT_PATH not specified in config file or ' + 'environment variable') return if python2_path is None: logger.error('PYTHON2_PATH not specified in config file or ' + 'environment variable') return else: txt_out = os.path.join(tmp_dir, 'out.txt') so_out = os.path.join(tmp_dir, 'out.so') command = [python2_path, os.path.join(nxml2txt_path, 'nxml2txt'), filename, txt_out, so_out] ret = subprocess.call(command) if ret != 0: logger.warning('nxml2txt returned non-zero error code') with open(txt_out, 'r') as f: txt_content = f.read() # Remote temporary directory shutil.rmtree(tmp_dir) # We need to remove some common LaTEX commands from the converted text # or the reader will get confused cmd1 = '[^ \{\}]+\{[^\{\}]+\}\{[^\{\}]+\}' cmd2 = '[^ \{\}]+\{[^\{\}]+\}' txt_content = re.sub(cmd1, '', txt_content) txt_content = re.sub(cmd2, '', txt_content) with open('tmp.txt', 'w') as f: f.write(txt_content) # Prepocess text extracted from nxml self.preprocess_plain_text_string(txt_content, pmid, extra_annotations)
[ "def", "preprocess_nxml_file", "(", "self", ",", "filename", ",", "pmid", ",", "extra_annotations", ")", ":", "# Create a temporary directory", "tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", "'indra_isi_nxml2txt_output'", ")", "# Run nxml2txt", "if", "nxml2txt_path", ...
Preprocess an NXML file for use with the ISI reader. Preprocessing is done by extracting plain text from NXML and then creating a text file with one sentence per line. Parameters ---------- filename : str Filename of an nxml file to process pmid : str The PMID from which it comes, or None if not specified extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden)
[ "Preprocess", "an", "NXML", "file", "for", "use", "with", "the", "ISI", "reader", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/preprocessor.py#L144-L202
19,446
sorgerlab/indra
indra/sources/isi/preprocessor.py
IsiPreprocessor.preprocess_abstract_list
def preprocess_abstract_list(self, abstract_list): """Preprocess abstracts in database pickle dump format for ISI reader. For each abstract, creates a plain text file with one sentence per line, and stores metadata to be included with each statement from that abstract. Parameters ---------- abstract_list : list[dict] Compressed abstracts with corresopnding metadata in INDRA database pickle dump format. """ for abstract_struct in abstract_list: abs_format = abstract_struct['format'] content_type = abstract_struct['text_type'] content_zipped = abstract_struct['content'] tcid = abstract_struct['tcid'] trid = abstract_struct['trid'] assert(abs_format == 'text') assert(content_type == 'abstract') pmid = None # Don't worry about pmid for now extra_annotations = {'tcid': tcid, 'trid': trid} # Uncompress content content = zlib.decompress(content_zipped, zlib.MAX_WBITS+16).decode('utf-8') self.preprocess_plain_text_string(content, pmid, extra_annotations)
python
def preprocess_abstract_list(self, abstract_list): for abstract_struct in abstract_list: abs_format = abstract_struct['format'] content_type = abstract_struct['text_type'] content_zipped = abstract_struct['content'] tcid = abstract_struct['tcid'] trid = abstract_struct['trid'] assert(abs_format == 'text') assert(content_type == 'abstract') pmid = None # Don't worry about pmid for now extra_annotations = {'tcid': tcid, 'trid': trid} # Uncompress content content = zlib.decompress(content_zipped, zlib.MAX_WBITS+16).decode('utf-8') self.preprocess_plain_text_string(content, pmid, extra_annotations)
[ "def", "preprocess_abstract_list", "(", "self", ",", "abstract_list", ")", ":", "for", "abstract_struct", "in", "abstract_list", ":", "abs_format", "=", "abstract_struct", "[", "'format'", "]", "content_type", "=", "abstract_struct", "[", "'text_type'", "]", "conten...
Preprocess abstracts in database pickle dump format for ISI reader. For each abstract, creates a plain text file with one sentence per line, and stores metadata to be included with each statement from that abstract. Parameters ---------- abstract_list : list[dict] Compressed abstracts with corresopnding metadata in INDRA database pickle dump format.
[ "Preprocess", "abstracts", "in", "database", "pickle", "dump", "format", "for", "ISI", "reader", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/preprocessor.py#L204-L234
19,447
sorgerlab/indra
indra/sources/geneways/api.py
process_geneways_files
def process_geneways_files(input_folder=data_folder, get_evidence=True): """Reads in Geneways data and returns a list of statements. Parameters ---------- input_folder : Optional[str] A folder in which to search for Geneways data. Looks for these Geneways extraction data files: human_action.txt, human_actionmention.txt, human_symbols.txt. Omit this parameter to use the default input folder which is indra/data. get_evidence : Optional[bool] Attempt to find the evidence text for an extraction by downloading the corresponding text content and searching for the given offset in the text to get the evidence sentence. Default: True Returns ------- gp : GenewaysProcessor A GenewaysProcessor object which contains a list of INDRA statements generated from the Geneways action mentions. """ gp = GenewaysProcessor(input_folder, get_evidence) return gp
python
def process_geneways_files(input_folder=data_folder, get_evidence=True): gp = GenewaysProcessor(input_folder, get_evidence) return gp
[ "def", "process_geneways_files", "(", "input_folder", "=", "data_folder", ",", "get_evidence", "=", "True", ")", ":", "gp", "=", "GenewaysProcessor", "(", "input_folder", ",", "get_evidence", ")", "return", "gp" ]
Reads in Geneways data and returns a list of statements. Parameters ---------- input_folder : Optional[str] A folder in which to search for Geneways data. Looks for these Geneways extraction data files: human_action.txt, human_actionmention.txt, human_symbols.txt. Omit this parameter to use the default input folder which is indra/data. get_evidence : Optional[bool] Attempt to find the evidence text for an extraction by downloading the corresponding text content and searching for the given offset in the text to get the evidence sentence. Default: True Returns ------- gp : GenewaysProcessor A GenewaysProcessor object which contains a list of INDRA statements generated from the Geneways action mentions.
[ "Reads", "in", "Geneways", "data", "and", "returns", "a", "list", "of", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/api.py#L24-L47
19,448
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.post_flag_create
def post_flag_create(self, post_id, reason): """Function to flag a post. Parameters: post_id (int): The id of the flagged post. reason (str): The reason of the flagging. """ params = {'post_flag[post_id]': post_id, 'post_flag[reason]': reason} return self._get('post_flags.json', params, 'POST', auth=True)
python
def post_flag_create(self, post_id, reason): params = {'post_flag[post_id]': post_id, 'post_flag[reason]': reason} return self._get('post_flags.json', params, 'POST', auth=True)
[ "def", "post_flag_create", "(", "self", ",", "post_id", ",", "reason", ")", ":", "params", "=", "{", "'post_flag[post_id]'", ":", "post_id", ",", "'post_flag[reason]'", ":", "reason", "}", "return", "self", ".", "_get", "(", "'post_flags.json'", ",", "params",...
Function to flag a post. Parameters: post_id (int): The id of the flagged post. reason (str): The reason of the flagging.
[ "Function", "to", "flag", "a", "post", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L165-L173
19,449
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.post_versions_list
def post_versions_list(self, updater_name=None, updater_id=None, post_id=None, start_id=None): """Get list of post versions. Parameters: updater_name (str): updater_id (int): post_id (int): start_id (int): """ params = { 'search[updater_name]': updater_name, 'search[updater_id]': updater_id, 'search[post_id]': post_id, 'search[start_id]': start_id } return self._get('post_versions.json', params)
python
def post_versions_list(self, updater_name=None, updater_id=None, post_id=None, start_id=None): params = { 'search[updater_name]': updater_name, 'search[updater_id]': updater_id, 'search[post_id]': post_id, 'search[start_id]': start_id } return self._get('post_versions.json', params)
[ "def", "post_versions_list", "(", "self", ",", "updater_name", "=", "None", ",", "updater_id", "=", "None", ",", "post_id", "=", "None", ",", "start_id", "=", "None", ")", ":", "params", "=", "{", "'search[updater_name]'", ":", "updater_name", ",", "'search[...
Get list of post versions. Parameters: updater_name (str): updater_id (int): post_id (int): start_id (int):
[ "Get", "list", "of", "post", "versions", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L210-L226
19,450
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.artist_list
def artist_list(self, query=None, artist_id=None, creator_name=None, creator_id=None, is_active=None, is_banned=None, empty_only=None, order=None): """Get an artist of a list of artists. Parameters: query (str): This field has multiple uses depending on what the query starts with: 'http:desired_url': Search for artist with this URL. 'name:desired_url': Search for artists with the given name as their base name. 'other:other_name': Search for artists with the given name in their other names. 'group:group_name': Search for artists belonging to the group with the given name. 'status:banned': Search for artists that are banned. else Search for the given name in the base name and the other names. artist_id (id): The artist id. creator_name (str): Exact creator name. creator_id (id): Artist creator id. is_active (bool): Can be: true, false is_banned (bool): Can be: true, false empty_only (True): Search for artists that have 0 posts. Can be: true order (str): Can be: name, updated_at. """ params = { 'search[name]': query, 'search[id]': artist_id, 'search[creator_name]': creator_name, 'search[creator_id]': creator_id, 'search[is_active]': is_active, 'search[is_banned]': is_banned, 'search[empty_only]': empty_only, 'search[order]': order } return self._get('artists.json', params)
python
def artist_list(self, query=None, artist_id=None, creator_name=None, creator_id=None, is_active=None, is_banned=None, empty_only=None, order=None): params = { 'search[name]': query, 'search[id]': artist_id, 'search[creator_name]': creator_name, 'search[creator_id]': creator_id, 'search[is_active]': is_active, 'search[is_banned]': is_banned, 'search[empty_only]': empty_only, 'search[order]': order } return self._get('artists.json', params)
[ "def", "artist_list", "(", "self", ",", "query", "=", "None", ",", "artist_id", "=", "None", ",", "creator_name", "=", "None", ",", "creator_id", "=", "None", ",", "is_active", "=", "None", ",", "is_banned", "=", "None", ",", "empty_only", "=", "None", ...
Get an artist of a list of artists. Parameters: query (str): This field has multiple uses depending on what the query starts with: 'http:desired_url': Search for artist with this URL. 'name:desired_url': Search for artists with the given name as their base name. 'other:other_name': Search for artists with the given name in their other names. 'group:group_name': Search for artists belonging to the group with the given name. 'status:banned': Search for artists that are banned. else Search for the given name in the base name and the other names. artist_id (id): The artist id. creator_name (str): Exact creator name. creator_id (id): Artist creator id. is_active (bool): Can be: true, false is_banned (bool): Can be: true, false empty_only (True): Search for artists that have 0 posts. Can be: true order (str): Can be: name, updated_at.
[ "Get", "an", "artist", "of", "a", "list", "of", "artists", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L496-L537
19,451
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.artist_commentary_list
def artist_commentary_list(self, text_matches=None, post_id=None, post_tags_match=None, original_present=None, translated_present=None): """list artist commentary. Parameters: text_matches (str): post_id (int): post_tags_match (str): The commentary's post's tags match the giventerms. Meta-tags not supported. original_present (str): Can be: yes, no. translated_present (str): Can be: yes, no. """ params = { 'search[text_matches]': text_matches, 'search[post_id]': post_id, 'search[post_tags_match]': post_tags_match, 'search[original_present]': original_present, 'search[translated_present]': translated_present } return self._get('artist_commentaries.json', params)
python
def artist_commentary_list(self, text_matches=None, post_id=None, post_tags_match=None, original_present=None, translated_present=None): params = { 'search[text_matches]': text_matches, 'search[post_id]': post_id, 'search[post_tags_match]': post_tags_match, 'search[original_present]': original_present, 'search[translated_present]': translated_present } return self._get('artist_commentaries.json', params)
[ "def", "artist_commentary_list", "(", "self", ",", "text_matches", "=", "None", ",", "post_id", "=", "None", ",", "post_tags_match", "=", "None", ",", "original_present", "=", "None", ",", "translated_present", "=", "None", ")", ":", "params", "=", "{", "'se...
list artist commentary. Parameters: text_matches (str): post_id (int): post_tags_match (str): The commentary's post's tags match the giventerms. Meta-tags not supported. original_present (str): Can be: yes, no. translated_present (str): Can be: yes, no.
[ "list", "artist", "commentary", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L655-L675
19,452
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.artist_commentary_versions
def artist_commentary_versions(self, post_id, updater_id): """Return list of artist commentary versions. Parameters: updater_id (int): post_id (int): """ params = {'search[updater_id]': updater_id, 'search[post_id]': post_id} return self._get('artist_commentary_versions.json', params)
python
def artist_commentary_versions(self, post_id, updater_id): params = {'search[updater_id]': updater_id, 'search[post_id]': post_id} return self._get('artist_commentary_versions.json', params)
[ "def", "artist_commentary_versions", "(", "self", ",", "post_id", ",", "updater_id", ")", ":", "params", "=", "{", "'search[updater_id]'", ":", "updater_id", ",", "'search[post_id]'", ":", "post_id", "}", "return", "self", ".", "_get", "(", "'artist_commentary_ver...
Return list of artist commentary versions. Parameters: updater_id (int): post_id (int):
[ "Return", "list", "of", "artist", "commentary", "versions", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L711-L719
19,453
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.note_list
def note_list(self, body_matches=None, post_id=None, post_tags_match=None, creator_name=None, creator_id=None, is_active=None): """Return list of notes. Parameters: body_matches (str): The note's body matches the given terms. post_id (int): A specific post. post_tags_match (str): The note's post's tags match the given terms. creator_name (str): The creator's name. Exact match. creator_id (int): The creator's user id. is_active (bool): Can be: True, False. """ params = { 'search[body_matches]': body_matches, 'search[post_id]': post_id, 'search[post_tags_match]': post_tags_match, 'search[creator_name]': creator_name, 'search[creator_id]': creator_id, 'search[is_active]': is_active } return self._get('notes.json', params)
python
def note_list(self, body_matches=None, post_id=None, post_tags_match=None, creator_name=None, creator_id=None, is_active=None): params = { 'search[body_matches]': body_matches, 'search[post_id]': post_id, 'search[post_tags_match]': post_tags_match, 'search[creator_name]': creator_name, 'search[creator_id]': creator_id, 'search[is_active]': is_active } return self._get('notes.json', params)
[ "def", "note_list", "(", "self", ",", "body_matches", "=", "None", ",", "post_id", "=", "None", ",", "post_tags_match", "=", "None", ",", "creator_name", "=", "None", ",", "creator_id", "=", "None", ",", "is_active", "=", "None", ")", ":", "params", "=",...
Return list of notes. Parameters: body_matches (str): The note's body matches the given terms. post_id (int): A specific post. post_tags_match (str): The note's post's tags match the given terms. creator_name (str): The creator's name. Exact match. creator_id (int): The creator's user id. is_active (bool): Can be: True, False.
[ "Return", "list", "of", "notes", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L721-L741
19,454
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.note_versions
def note_versions(self, updater_id=None, post_id=None, note_id=None): """Get list of note versions. Parameters: updater_id (int): post_id (int): note_id (int): """ params = { 'search[updater_id]': updater_id, 'search[post_id]': post_id, 'search[note_id]': note_id } return self._get('note_versions.json', params)
python
def note_versions(self, updater_id=None, post_id=None, note_id=None): params = { 'search[updater_id]': updater_id, 'search[post_id]': post_id, 'search[note_id]': note_id } return self._get('note_versions.json', params)
[ "def", "note_versions", "(", "self", ",", "updater_id", "=", "None", ",", "post_id", "=", "None", ",", "note_id", "=", "None", ")", ":", "params", "=", "{", "'search[updater_id]'", ":", "updater_id", ",", "'search[post_id]'", ":", "post_id", ",", "'search[no...
Get list of note versions. Parameters: updater_id (int): post_id (int): note_id (int):
[ "Get", "list", "of", "note", "versions", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L817-L830
19,455
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.user_list
def user_list(self, name=None, name_matches=None, min_level=None, max_level=None, level=None, user_id=None, order=None): """Function to get a list of users or a specific user. Levels: Users have a number attribute called level representing their role. The current levels are: Member 20, Gold 30, Platinum 31, Builder 32, Contributor 33, Janitor 35, Moderator 40 and Admin 50. Parameters: name (str): Supports patterns. name_matches (str): Same functionality as name. min_level (int): Minimum level (see section on levels). max_level (int): Maximum level (see section on levels). level (int): Current level (see section on levels). user_id (int): The user id. order (str): Can be: 'name', 'post_upload_count', 'note_count', 'post_update_count', 'date'. """ params = { 'search[name]': name, 'search[name_matches]': name_matches, 'search[min_level]': min_level, 'search[max_level]': max_level, 'search[level]': level, 'search[id]': user_id, 'search[order]': order } return self._get('users.json', params)
python
def user_list(self, name=None, name_matches=None, min_level=None, max_level=None, level=None, user_id=None, order=None): params = { 'search[name]': name, 'search[name_matches]': name_matches, 'search[min_level]': min_level, 'search[max_level]': max_level, 'search[level]': level, 'search[id]': user_id, 'search[order]': order } return self._get('users.json', params)
[ "def", "user_list", "(", "self", ",", "name", "=", "None", ",", "name_matches", "=", "None", ",", "min_level", "=", "None", ",", "max_level", "=", "None", ",", "level", "=", "None", ",", "user_id", "=", "None", ",", "order", "=", "None", ")", ":", ...
Function to get a list of users or a specific user. Levels: Users have a number attribute called level representing their role. The current levels are: Member 20, Gold 30, Platinum 31, Builder 32, Contributor 33, Janitor 35, Moderator 40 and Admin 50. Parameters: name (str): Supports patterns. name_matches (str): Same functionality as name. min_level (int): Minimum level (see section on levels). max_level (int): Maximum level (see section on levels). level (int): Current level (see section on levels). user_id (int): The user id. order (str): Can be: 'name', 'post_upload_count', 'note_count', 'post_update_count', 'date'.
[ "Function", "to", "get", "a", "list", "of", "users", "or", "a", "specific", "user", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L832-L862
19,456
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.pool_list
def pool_list(self, name_matches=None, pool_ids=None, category=None, description_matches=None, creator_name=None, creator_id=None, is_deleted=None, is_active=None, order=None): """Get a list of pools. Parameters: name_matches (str): pool_ids (str): Can search for multiple ID's at once, separated by commas. description_matches (str): creator_name (str): creator_id (int): is_active (bool): Can be: true, false. is_deleted (bool): Can be: True, False. order (str): Can be: name, created_at, post_count, date. category (str): Can be: series, collection. """ params = { 'search[name_matches]': name_matches, 'search[id]': pool_ids, 'search[description_matches]': description_matches, 'search[creator_name]': creator_name, 'search[creator_id]': creator_id, 'search[is_active]': is_active, 'search[is_deleted]': is_deleted, 'search[order]': order, 'search[category]': category } return self._get('pools.json', params)
python
def pool_list(self, name_matches=None, pool_ids=None, category=None, description_matches=None, creator_name=None, creator_id=None, is_deleted=None, is_active=None, order=None): params = { 'search[name_matches]': name_matches, 'search[id]': pool_ids, 'search[description_matches]': description_matches, 'search[creator_name]': creator_name, 'search[creator_id]': creator_id, 'search[is_active]': is_active, 'search[is_deleted]': is_deleted, 'search[order]': order, 'search[category]': category } return self._get('pools.json', params)
[ "def", "pool_list", "(", "self", ",", "name_matches", "=", "None", ",", "pool_ids", "=", "None", ",", "category", "=", "None", ",", "description_matches", "=", "None", ",", "creator_name", "=", "None", ",", "creator_id", "=", "None", ",", "is_deleted", "="...
Get a list of pools. Parameters: name_matches (str): pool_ids (str): Can search for multiple ID's at once, separated by commas. description_matches (str): creator_name (str): creator_id (int): is_active (bool): Can be: true, false. is_deleted (bool): Can be: True, False. order (str): Can be: name, created_at, post_count, date. category (str): Can be: series, collection.
[ "Get", "a", "list", "of", "pools", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L872-L900
19,457
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.pool_versions
def pool_versions(self, updater_id=None, updater_name=None, pool_id=None): """Get list of pool versions. Parameters: updater_id (int): updater_name (str): pool_id (int): """ params = { 'search[updater_id]': updater_id, 'search[updater_name]': updater_name, 'search[pool_id]': pool_id } return self._get('pool_versions.json', params)
python
def pool_versions(self, updater_id=None, updater_name=None, pool_id=None): params = { 'search[updater_id]': updater_id, 'search[updater_name]': updater_name, 'search[pool_id]': pool_id } return self._get('pool_versions.json', params)
[ "def", "pool_versions", "(", "self", ",", "updater_id", "=", "None", ",", "updater_name", "=", "None", ",", "pool_id", "=", "None", ")", ":", "params", "=", "{", "'search[updater_id]'", ":", "updater_id", ",", "'search[updater_name]'", ":", "updater_name", ","...
Get list of pool versions. Parameters: updater_id (int): updater_name (str): pool_id (int):
[ "Get", "list", "of", "pool", "versions", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L975-L988
19,458
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.tag_aliases
def tag_aliases(self, name_matches=None, antecedent_name=None, tag_id=None): """Get tags aliases. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): The tag alias id. """ params = { 'search[name_matches]': name_matches, 'search[antecedent_name]': antecedent_name, 'search[id]': tag_id } return self._get('tag_aliases.json', params)
python
def tag_aliases(self, name_matches=None, antecedent_name=None, tag_id=None): params = { 'search[name_matches]': name_matches, 'search[antecedent_name]': antecedent_name, 'search[id]': tag_id } return self._get('tag_aliases.json', params)
[ "def", "tag_aliases", "(", "self", ",", "name_matches", "=", "None", ",", "antecedent_name", "=", "None", ",", "tag_id", "=", "None", ")", ":", "params", "=", "{", "'search[name_matches]'", ":", "name_matches", ",", "'search[antecedent_name]'", ":", "antecedent_...
Get tags aliases. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): The tag alias id.
[ "Get", "tags", "aliases", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1039-L1053
19,459
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.tag_implications
def tag_implications(self, name_matches=None, antecedent_name=None, tag_id=None): """Get tags implications. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): Tag implication id. """ params = { 'search[name_matches]': name_matches, 'search[antecedent_name]': antecedent_name, 'search[id]': tag_id } return self._get('tag_implications.json', params)
python
def tag_implications(self, name_matches=None, antecedent_name=None, tag_id=None): params = { 'search[name_matches]': name_matches, 'search[antecedent_name]': antecedent_name, 'search[id]': tag_id } return self._get('tag_implications.json', params)
[ "def", "tag_implications", "(", "self", ",", "name_matches", "=", "None", ",", "antecedent_name", "=", "None", ",", "tag_id", "=", "None", ")", ":", "params", "=", "{", "'search[name_matches]'", ":", "name_matches", ",", "'search[antecedent_name]'", ":", "antece...
Get tags implications. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): Tag implication id.
[ "Get", "tags", "implications", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1055-L1069
19,460
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.tag_related
def tag_related(self, query, category=None): """Get related tags. Parameters: query (str): The tag to find the related tags for. category (str): If specified, show only tags of a specific category. Can be: General 0, Artist 1, Copyright 3 and Character 4. """ params = {'query': query, 'category': category} return self._get('related_tag.json', params)
python
def tag_related(self, query, category=None): params = {'query': query, 'category': category} return self._get('related_tag.json', params)
[ "def", "tag_related", "(", "self", ",", "query", ",", "category", "=", "None", ")", ":", "params", "=", "{", "'query'", ":", "query", ",", "'category'", ":", "category", "}", "return", "self", ".", "_get", "(", "'related_tag.json'", ",", "params", ")" ]
Get related tags. Parameters: query (str): The tag to find the related tags for. category (str): If specified, show only tags of a specific category. Can be: General 0, Artist 1, Copyright 3 and Character 4.
[ "Get", "related", "tags", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1071-L1081
19,461
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.wiki_list
def wiki_list(self, title=None, creator_id=None, body_matches=None, other_names_match=None, creator_name=None, hide_deleted=None, other_names_present=None, order=None): """Function to retrieves a list of every wiki page. Parameters: title (str): Page title. creator_id (int): Creator id. body_matches (str): Page content. other_names_match (str): Other names. creator_name (str): Creator name. hide_deleted (str): Can be: yes, no. other_names_present (str): Can be: yes, no. order (str): Can be: date, title. """ params = { 'search[title]': title, 'search[creator_id]': creator_id, 'search[body_matches]': body_matches, 'search[other_names_match]': other_names_match, 'search[creator_name]': creator_name, 'search[hide_deleted]': hide_deleted, 'search[other_names_present]': other_names_present, 'search[order]': order } return self._get('wiki_pages.json', params)
python
def wiki_list(self, title=None, creator_id=None, body_matches=None, other_names_match=None, creator_name=None, hide_deleted=None, other_names_present=None, order=None): params = { 'search[title]': title, 'search[creator_id]': creator_id, 'search[body_matches]': body_matches, 'search[other_names_match]': other_names_match, 'search[creator_name]': creator_name, 'search[hide_deleted]': hide_deleted, 'search[other_names_present]': other_names_present, 'search[order]': order } return self._get('wiki_pages.json', params)
[ "def", "wiki_list", "(", "self", ",", "title", "=", "None", ",", "creator_id", "=", "None", ",", "body_matches", "=", "None", ",", "other_names_match", "=", "None", ",", "creator_name", "=", "None", ",", "hide_deleted", "=", "None", ",", "other_names_present...
Function to retrieves a list of every wiki page. Parameters: title (str): Page title. creator_id (int): Creator id. body_matches (str): Page content. other_names_match (str): Other names. creator_name (str): Creator name. hide_deleted (str): Can be: yes, no. other_names_present (str): Can be: yes, no. order (str): Can be: date, title.
[ "Function", "to", "retrieves", "a", "list", "of", "every", "wiki", "page", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1083-L1108
19,462
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.wiki_versions_list
def wiki_versions_list(self, page_id, updater_id): """Return a list of wiki page version. Parameters: page_id (int): updater_id (int): """ params = { 'earch[updater_id]': updater_id, 'search[wiki_page_id]': page_id } return self._get('wiki_page_versions.json', params)
python
def wiki_versions_list(self, page_id, updater_id): params = { 'earch[updater_id]': updater_id, 'search[wiki_page_id]': page_id } return self._get('wiki_page_versions.json', params)
[ "def", "wiki_versions_list", "(", "self", ",", "page_id", ",", "updater_id", ")", ":", "params", "=", "{", "'earch[updater_id]'", ":", "updater_id", ",", "'search[wiki_page_id]'", ":", "page_id", "}", "return", "self", ".", "_get", "(", "'wiki_page_versions.json'"...
Return a list of wiki page version. Parameters: page_id (int): updater_id (int):
[ "Return", "a", "list", "of", "wiki", "page", "version", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1172-L1183
19,463
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.forum_topic_list
def forum_topic_list(self, title_matches=None, title=None, category_id=None): """Function to get forum topics. Parameters: title_matches (str): Search body for the given terms. title (str): Exact title match. category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively). """ params = { 'search[title_matches]': title_matches, 'search[title]': title, 'search[category_id]': category_id } return self._get('forum_topics.json', params)
python
def forum_topic_list(self, title_matches=None, title=None, category_id=None): params = { 'search[title_matches]': title_matches, 'search[title]': title, 'search[category_id]': category_id } return self._get('forum_topics.json', params)
[ "def", "forum_topic_list", "(", "self", ",", "title_matches", "=", "None", ",", "title", "=", "None", ",", "category_id", "=", "None", ")", ":", "params", "=", "{", "'search[title_matches]'", ":", "title_matches", ",", "'search[title]'", ":", "title", ",", "...
Function to get forum topics. Parameters: title_matches (str): Search body for the given terms. title (str): Exact title match. category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively).
[ "Function", "to", "get", "forum", "topics", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1193-L1208
19,464
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.forum_post_list
def forum_post_list(self, creator_id=None, creator_name=None, topic_id=None, topic_title_matches=None, topic_category_id=None, body_matches=None): """Return a list of forum posts. Parameters: creator_id (int): creator_name (str): topic_id (int): topic_title_matches (str): topic_category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively). body_matches (str): Can be part of the post content. """ params = { 'search[creator_id]': creator_id, 'search[creator_name]': creator_name, 'search[topic_id]': topic_id, 'search[topic_title_matches]': topic_title_matches, 'search[topic_category_id]': topic_category_id, 'search[body_matches]': body_matches } return self._get('forum_posts.json', params)
python
def forum_post_list(self, creator_id=None, creator_name=None, topic_id=None, topic_title_matches=None, topic_category_id=None, body_matches=None): params = { 'search[creator_id]': creator_id, 'search[creator_name]': creator_name, 'search[topic_id]': topic_id, 'search[topic_title_matches]': topic_title_matches, 'search[topic_category_id]': topic_category_id, 'search[body_matches]': body_matches } return self._get('forum_posts.json', params)
[ "def", "forum_post_list", "(", "self", ",", "creator_id", "=", "None", ",", "creator_name", "=", "None", ",", "topic_id", "=", "None", ",", "topic_title_matches", "=", "None", ",", "topic_category_id", "=", "None", ",", "body_matches", "=", "None", ")", ":",...
Return a list of forum posts. Parameters: creator_id (int): creator_name (str): topic_id (int): topic_title_matches (str): topic_category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively). body_matches (str): Can be part of the post content.
[ "Return", "a", "list", "of", "forum", "posts", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1268-L1290
19,465
LuqueDaniel/pybooru
pybooru/pybooru.py
_Pybooru.site_name
def site_name(self, site_name): """Function that sets and checks the site name and set url. Parameters: site_name (str): The site name in 'SITE_LIST', default sites. Raises: PybooruError: When 'site_name' isn't valid. """ if site_name in SITE_LIST: self.__site_name = site_name self.__site_url = SITE_LIST[site_name]['url'] else: raise PybooruError( "The 'site_name' is not valid, specify a valid 'site_name'.")
python
def site_name(self, site_name): if site_name in SITE_LIST: self.__site_name = site_name self.__site_url = SITE_LIST[site_name]['url'] else: raise PybooruError( "The 'site_name' is not valid, specify a valid 'site_name'.")
[ "def", "site_name", "(", "self", ",", "site_name", ")", ":", "if", "site_name", "in", "SITE_LIST", ":", "self", ".", "__site_name", "=", "site_name", "self", ".", "__site_url", "=", "SITE_LIST", "[", "site_name", "]", "[", "'url'", "]", "else", ":", "rai...
Function that sets and checks the site name and set url. Parameters: site_name (str): The site name in 'SITE_LIST', default sites. Raises: PybooruError: When 'site_name' isn't valid.
[ "Function", "that", "sets", "and", "checks", "the", "site", "name", "and", "set", "url", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/pybooru.py#L79-L93
19,466
LuqueDaniel/pybooru
pybooru/pybooru.py
_Pybooru.site_url
def site_url(self, url): """URL setter and validator for site_url property. Parameters: url (str): URL of on Moebooru/Danbooru based sites. Raises: PybooruError: When URL scheme or URL are invalid. """ # Regular expression to URL validate regex = re.compile( r'^(?:http|https)://' # Scheme only HTTP/HTTPS r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \ [A-Z0-9-]{2,}(?<!-)\.?)|' # Domain r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # or ipv4 r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # or ipv6 r'(?::\d+)?' # Port r'(?:/?|[/?]\S+)$', re.IGNORECASE) # Validate URL if re.match('^(?:http|https)://', url): if re.search(regex, url): self.__site_url = url else: raise PybooruError("Invalid URL: {0}".format(url)) else: raise PybooruError( "Invalid URL scheme, use HTTP or HTTPS: {0}".format(url))
python
def site_url(self, url): # Regular expression to URL validate regex = re.compile( r'^(?:http|https)://' # Scheme only HTTP/HTTPS r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \ [A-Z0-9-]{2,}(?<!-)\.?)|' # Domain r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # or ipv4 r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # or ipv6 r'(?::\d+)?' # Port r'(?:/?|[/?]\S+)$', re.IGNORECASE) # Validate URL if re.match('^(?:http|https)://', url): if re.search(regex, url): self.__site_url = url else: raise PybooruError("Invalid URL: {0}".format(url)) else: raise PybooruError( "Invalid URL scheme, use HTTP or HTTPS: {0}".format(url))
[ "def", "site_url", "(", "self", ",", "url", ")", ":", "# Regular expression to URL validate", "regex", "=", "re", ".", "compile", "(", "r'^(?:http|https)://'", "# Scheme only HTTP/HTTPS", "r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?| \\\n [A-Z0-9-]...
URL setter and validator for site_url property. Parameters: url (str): URL of on Moebooru/Danbooru based sites. Raises: PybooruError: When URL scheme or URL are invalid.
[ "URL", "setter", "and", "validator", "for", "site_url", "property", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/pybooru.py#L106-L134
19,467
LuqueDaniel/pybooru
pybooru/pybooru.py
_Pybooru._request
def _request(self, url, api_call, request_args, method='GET'): """Function to request and returning JSON data. Parameters: url (str): Base url call. api_call (str): API function to be called. request_args (dict): All requests parameters. method (str): (Defauld: GET) HTTP method 'GET' or 'POST' Raises: PybooruHTTPError: HTTP Error. requests.exceptions.Timeout: When HTTP Timeout. ValueError: When can't decode JSON response. """ try: if method != 'GET': # Reset content-type for data encoded as a multipart form self.client.headers.update({'content-type': None}) response = self.client.request(method, url, **request_args) self.last_call.update({ 'API': api_call, 'url': response.url, 'status_code': response.status_code, 'status': self._get_status(response.status_code), 'headers': response.headers }) if response.status_code in (200, 201, 202, 204): return response.json() raise PybooruHTTPError("In _request", response.status_code, response.url) except requests.exceptions.Timeout: raise PybooruError("Timeout! url: {0}".format(response.url)) except ValueError as e: raise PybooruError("JSON Error: {0} in line {1} column {2}".format( e.msg, e.lineno, e.colno))
python
def _request(self, url, api_call, request_args, method='GET'): try: if method != 'GET': # Reset content-type for data encoded as a multipart form self.client.headers.update({'content-type': None}) response = self.client.request(method, url, **request_args) self.last_call.update({ 'API': api_call, 'url': response.url, 'status_code': response.status_code, 'status': self._get_status(response.status_code), 'headers': response.headers }) if response.status_code in (200, 201, 202, 204): return response.json() raise PybooruHTTPError("In _request", response.status_code, response.url) except requests.exceptions.Timeout: raise PybooruError("Timeout! url: {0}".format(response.url)) except ValueError as e: raise PybooruError("JSON Error: {0} in line {1} column {2}".format( e.msg, e.lineno, e.colno))
[ "def", "_request", "(", "self", ",", "url", ",", "api_call", ",", "request_args", ",", "method", "=", "'GET'", ")", ":", "try", ":", "if", "method", "!=", "'GET'", ":", "# Reset content-type for data encoded as a multipart form", "self", ".", "client", ".", "h...
Function to request and returning JSON data. Parameters: url (str): Base url call. api_call (str): API function to be called. request_args (dict): All requests parameters. method (str): (Defauld: GET) HTTP method 'GET' or 'POST' Raises: PybooruHTTPError: HTTP Error. requests.exceptions.Timeout: When HTTP Timeout. ValueError: When can't decode JSON response.
[ "Function", "to", "request", "and", "returning", "JSON", "data", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/pybooru.py#L149-L186
19,468
LuqueDaniel/pybooru
pybooru/api_moebooru.py
MoebooruApi_Mixin.post_update
def post_update(self, post_id, tags=None, file_=None, rating=None, source=None, is_rating_locked=None, is_note_locked=None, parent_id=None): """Update a specific post. Only the 'post_id' parameter is required. Leave the other parameters blank if you don't want to change them (Requires login). Parameters: post_id (int): The id number of the post to update. tags (str): A space delimited list of tags. Specify previous tags. file_ (str): The file data ENCODED as a multipart form. rating (str): The rating for the post. Can be: safe, questionable, or explicit. source (str): If this is a URL, Moebooru will download the file. rating_locked (bool): Set to True to prevent others from changing the rating. note_locked (bool): Set to True to prevent others from adding notes. parent_id (int): The ID of the parent post. """ params = { 'id': post_id, 'post[tags]': tags, 'post[rating]': rating, 'post[source]': source, 'post[is_rating_locked]': is_rating_locked, 'post[is_note_locked]': is_note_locked, 'post[parent_id]': parent_id } if file_ is not None: file_ = {'post[file]': open(file_, 'rb')} return self._get('post/update', params, 'PUT', file_) else: return self._get('post/update', params, 'PUT')
python
def post_update(self, post_id, tags=None, file_=None, rating=None, source=None, is_rating_locked=None, is_note_locked=None, parent_id=None): params = { 'id': post_id, 'post[tags]': tags, 'post[rating]': rating, 'post[source]': source, 'post[is_rating_locked]': is_rating_locked, 'post[is_note_locked]': is_note_locked, 'post[parent_id]': parent_id } if file_ is not None: file_ = {'post[file]': open(file_, 'rb')} return self._get('post/update', params, 'PUT', file_) else: return self._get('post/update', params, 'PUT')
[ "def", "post_update", "(", "self", ",", "post_id", ",", "tags", "=", "None", ",", "file_", "=", "None", ",", "rating", "=", "None", ",", "source", "=", "None", ",", "is_rating_locked", "=", "None", ",", "is_note_locked", "=", "None", ",", "parent_id", ...
Update a specific post. Only the 'post_id' parameter is required. Leave the other parameters blank if you don't want to change them (Requires login). Parameters: post_id (int): The id number of the post to update. tags (str): A space delimited list of tags. Specify previous tags. file_ (str): The file data ENCODED as a multipart form. rating (str): The rating for the post. Can be: safe, questionable, or explicit. source (str): If this is a URL, Moebooru will download the file. rating_locked (bool): Set to True to prevent others from changing the rating. note_locked (bool): Set to True to prevent others from adding notes. parent_id (int): The ID of the parent post.
[ "Update", "a", "specific", "post", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_moebooru.py#L79-L113
19,469
LuqueDaniel/pybooru
pybooru/moebooru.py
Moebooru.site_name
def site_name(self, site_name): """Sets api_version and hash_string. Parameters: site_name (str): The site name in 'SITE_LIST', default sites. Raises: PybooruError: When 'site_name' isn't valid. """ # Set base class property site_name _Pybooru.site_name.fset(self, site_name) if ('api_version' and 'hashed_string') in SITE_LIST[site_name]: self.api_version = SITE_LIST[site_name]['api_version'] self.hash_string = SITE_LIST[site_name]['hashed_string']
python
def site_name(self, site_name): # Set base class property site_name _Pybooru.site_name.fset(self, site_name) if ('api_version' and 'hashed_string') in SITE_LIST[site_name]: self.api_version = SITE_LIST[site_name]['api_version'] self.hash_string = SITE_LIST[site_name]['hashed_string']
[ "def", "site_name", "(", "self", ",", "site_name", ")", ":", "# Set base class property site_name", "_Pybooru", ".", "site_name", ".", "fset", "(", "self", ",", "site_name", ")", "if", "(", "'api_version'", "and", "'hashed_string'", ")", "in", "SITE_LIST", "[", ...
Sets api_version and hash_string. Parameters: site_name (str): The site name in 'SITE_LIST', default sites. Raises: PybooruError: When 'site_name' isn't valid.
[ "Sets", "api_version", "and", "hash_string", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/moebooru.py#L74-L88
19,470
LuqueDaniel/pybooru
pybooru/moebooru.py
Moebooru._build_url
def _build_url(self, api_call): """Build request url. Parameters: api_call (str): Base API Call. Returns: Complete url (str). """ if self.api_version in ('1.13.0', '1.13.0+update.1', '1.13.0+update.2'): if '/' not in api_call: return "{0}/{1}/index.json".format(self.site_url, api_call) return "{0}/{1}.json".format(self.site_url, api_call)
python
def _build_url(self, api_call): if self.api_version in ('1.13.0', '1.13.0+update.1', '1.13.0+update.2'): if '/' not in api_call: return "{0}/{1}/index.json".format(self.site_url, api_call) return "{0}/{1}.json".format(self.site_url, api_call)
[ "def", "_build_url", "(", "self", ",", "api_call", ")", ":", "if", "self", ".", "api_version", "in", "(", "'1.13.0'", ",", "'1.13.0+update.1'", ",", "'1.13.0+update.2'", ")", ":", "if", "'/'", "not", "in", "api_call", ":", "return", "\"{0}/{1}/index.json\"", ...
Build request url. Parameters: api_call (str): Base API Call. Returns: Complete url (str).
[ "Build", "request", "url", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/moebooru.py#L90-L102
19,471
LuqueDaniel/pybooru
pybooru/moebooru.py
Moebooru._build_hash_string
def _build_hash_string(self): """Function for build password hash string. Raises: PybooruError: When isn't provide hash string. PybooruError: When aren't provide username or password. PybooruError: When Pybooru can't add password to hash strring. """ # Build AUTENTICATION hash_string # Check if hash_string exists if self.site_name in SITE_LIST or self.hash_string: if self.username and self.password: try: hash_string = self.hash_string.format(self.password) except TypeError: raise PybooruError("Pybooru can't add 'password' " "to 'hash_string'") # encrypt hashed_string to SHA1 and return hexdigest string self.password_hash = hashlib.sha1( hash_string.encode('utf-8')).hexdigest() else: raise PybooruError("Specify the 'username' and 'password' " "parameters of the Pybooru object, for " "setting 'password_hash' attribute.") else: raise PybooruError( "Specify the 'hash_string' parameter of the Pybooru" " object, for the functions that requires login.")
python
def _build_hash_string(self): # Build AUTENTICATION hash_string # Check if hash_string exists if self.site_name in SITE_LIST or self.hash_string: if self.username and self.password: try: hash_string = self.hash_string.format(self.password) except TypeError: raise PybooruError("Pybooru can't add 'password' " "to 'hash_string'") # encrypt hashed_string to SHA1 and return hexdigest string self.password_hash = hashlib.sha1( hash_string.encode('utf-8')).hexdigest() else: raise PybooruError("Specify the 'username' and 'password' " "parameters of the Pybooru object, for " "setting 'password_hash' attribute.") else: raise PybooruError( "Specify the 'hash_string' parameter of the Pybooru" " object, for the functions that requires login.")
[ "def", "_build_hash_string", "(", "self", ")", ":", "# Build AUTENTICATION hash_string", "# Check if hash_string exists", "if", "self", ".", "site_name", "in", "SITE_LIST", "or", "self", ".", "hash_string", ":", "if", "self", ".", "username", "and", "self", ".", "...
Function for build password hash string. Raises: PybooruError: When isn't provide hash string. PybooruError: When aren't provide username or password. PybooruError: When Pybooru can't add password to hash strring.
[ "Function", "for", "build", "password", "hash", "string", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/moebooru.py#L104-L131
19,472
bjodah/pyodesys
pyodesys/symbolic.py
_is_autonomous
def _is_autonomous(indep, exprs): """ Whether the expressions for the dependent variables are autonomous. Note that the system may still behave as an autonomous system on the interface of :meth:`integrate` due to use of pre-/post-processors. """ if indep is None: return True for expr in exprs: try: in_there = indep in expr.free_symbols except: in_there = expr.has(indep) if in_there: return False return True
python
def _is_autonomous(indep, exprs): if indep is None: return True for expr in exprs: try: in_there = indep in expr.free_symbols except: in_there = expr.has(indep) if in_there: return False return True
[ "def", "_is_autonomous", "(", "indep", ",", "exprs", ")", ":", "if", "indep", "is", "None", ":", "return", "True", "for", "expr", "in", "exprs", ":", "try", ":", "in_there", "=", "indep", "in", "expr", ".", "free_symbols", "except", ":", "in_there", "=...
Whether the expressions for the dependent variables are autonomous. Note that the system may still behave as an autonomous system on the interface of :meth:`integrate` due to use of pre-/post-processors.
[ "Whether", "the", "expressions", "for", "the", "dependent", "variables", "are", "autonomous", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L81-L96
19,473
bjodah/pyodesys
pyodesys/symbolic.py
symmetricsys
def symmetricsys(dep_tr=None, indep_tr=None, SuperClass=TransformedSys, **kwargs): """ A factory function for creating symmetrically transformed systems. Creates a new subclass which applies the same transformation for each dependent variable. Parameters ---------- dep_tr : pair of callables (default: None) Forward and backward transformation callbacks to be applied to the dependent variables. indep_tr : pair of callables (default: None) Forward and backward transformation to be applied to the independent variable. SuperClass : class \*\*kwargs : Default keyword arguments for the TransformedSys subclass. Returns ------- Subclass of SuperClass (by default :class:`TransformedSys`). Examples -------- >>> import sympy >>> logexp = (sympy.log, sympy.exp) >>> def psimp(exprs): ... return [sympy.powsimp(expr.expand(), force=True) for expr in exprs] ... >>> LogLogSys = symmetricsys(logexp, logexp, exprs_process_cb=psimp) >>> mysys = LogLogSys.from_callback(lambda x, y, p: [-y[0], y[0] - y[1]], 2, 0) >>> mysys.exprs (-exp(x_0), -exp(x_0) + exp(x_0 + y_0 - y_1)) """ if dep_tr is not None: if not callable(dep_tr[0]) or not callable(dep_tr[1]): raise ValueError("Exceptected dep_tr to be a pair of callables") if indep_tr is not None: if not callable(indep_tr[0]) or not callable(indep_tr[1]): raise ValueError("Exceptected indep_tr to be a pair of callables") class _SymmetricSys(SuperClass): def __init__(self, dep_exprs, indep=None, **inner_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(inner_kwargs) dep, exprs = zip(*dep_exprs) super(_SymmetricSys, self).__init__( zip(dep, exprs), indep, dep_transf=list(zip( list(map(dep_tr[0], dep)), list(map(dep_tr[1], dep)) )) if dep_tr is not None else None, indep_transf=((indep_tr[0](indep), indep_tr[1](indep)) if indep_tr is not None else None), **new_kwargs) @classmethod def from_callback(cls, cb, ny=None, nparams=None, **inner_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(inner_kwargs) return SuperClass.from_callback( cb, ny, nparams, dep_transf_cbs=repeat(dep_tr) if dep_tr is not None else None, indep_transf_cbs=indep_tr, **new_kwargs) return _SymmetricSys
python
def symmetricsys(dep_tr=None, indep_tr=None, SuperClass=TransformedSys, **kwargs): if dep_tr is not None: if not callable(dep_tr[0]) or not callable(dep_tr[1]): raise ValueError("Exceptected dep_tr to be a pair of callables") if indep_tr is not None: if not callable(indep_tr[0]) or not callable(indep_tr[1]): raise ValueError("Exceptected indep_tr to be a pair of callables") class _SymmetricSys(SuperClass): def __init__(self, dep_exprs, indep=None, **inner_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(inner_kwargs) dep, exprs = zip(*dep_exprs) super(_SymmetricSys, self).__init__( zip(dep, exprs), indep, dep_transf=list(zip( list(map(dep_tr[0], dep)), list(map(dep_tr[1], dep)) )) if dep_tr is not None else None, indep_transf=((indep_tr[0](indep), indep_tr[1](indep)) if indep_tr is not None else None), **new_kwargs) @classmethod def from_callback(cls, cb, ny=None, nparams=None, **inner_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(inner_kwargs) return SuperClass.from_callback( cb, ny, nparams, dep_transf_cbs=repeat(dep_tr) if dep_tr is not None else None, indep_transf_cbs=indep_tr, **new_kwargs) return _SymmetricSys
[ "def", "symmetricsys", "(", "dep_tr", "=", "None", ",", "indep_tr", "=", "None", ",", "SuperClass", "=", "TransformedSys", ",", "*", "*", "kwargs", ")", ":", "if", "dep_tr", "is", "not", "None", ":", "if", "not", "callable", "(", "dep_tr", "[", "0", ...
A factory function for creating symmetrically transformed systems. Creates a new subclass which applies the same transformation for each dependent variable. Parameters ---------- dep_tr : pair of callables (default: None) Forward and backward transformation callbacks to be applied to the dependent variables. indep_tr : pair of callables (default: None) Forward and backward transformation to be applied to the independent variable. SuperClass : class \*\*kwargs : Default keyword arguments for the TransformedSys subclass. Returns ------- Subclass of SuperClass (by default :class:`TransformedSys`). Examples -------- >>> import sympy >>> logexp = (sympy.log, sympy.exp) >>> def psimp(exprs): ... return [sympy.powsimp(expr.expand(), force=True) for expr in exprs] ... >>> LogLogSys = symmetricsys(logexp, logexp, exprs_process_cb=psimp) >>> mysys = LogLogSys.from_callback(lambda x, y, p: [-y[0], y[0] - y[1]], 2, 0) >>> mysys.exprs (-exp(x_0), -exp(x_0) + exp(x_0 + y_0 - y_1))
[ "A", "factory", "function", "for", "creating", "symmetrically", "transformed", "systems", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L961-L1027
19,474
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.from_other
def from_other(cls, ori, **kwargs): """ Creates a new instance with an existing one as a template. Parameters ---------- ori : SymbolicSys instance \\*\\*kwargs: Keyword arguments used to create the new instance. Returns ------- A new instance of the class. """ for k in cls._attrs_to_copy + ('params', 'roots', 'init_indep', 'init_dep'): if k not in kwargs: val = getattr(ori, k) if val is not None: kwargs[k] = val if 'lower_bounds' not in kwargs and getattr(ori, 'lower_bounds') is not None: kwargs['lower_bounds'] = ori.lower_bounds if 'upper_bounds' not in kwargs and getattr(ori, 'upper_bounds') is not None: kwargs['upper_bounds'] = ori.upper_bounds if len(ori.pre_processors) > 0: if 'pre_processors' not in kwargs: kwargs['pre_processors'] = [] kwargs['pre_processors'] = kwargs['pre_processors'] + ori.pre_processors if len(ori.post_processors) > 0: if 'post_processors' not in kwargs: kwargs['post_processors'] = [] kwargs['post_processors'] = ori.post_processors + kwargs['post_processors'] if 'dep_exprs' not in kwargs: kwargs['dep_exprs'] = zip(ori.dep, ori.exprs) if 'indep' not in kwargs: kwargs['indep'] = ori.indep instance = cls(**kwargs) for attr in ori._attrs_to_copy: if attr not in cls._attrs_to_copy: setattr(instance, attr, getattr(ori, attr)) return instance
python
def from_other(cls, ori, **kwargs): for k in cls._attrs_to_copy + ('params', 'roots', 'init_indep', 'init_dep'): if k not in kwargs: val = getattr(ori, k) if val is not None: kwargs[k] = val if 'lower_bounds' not in kwargs and getattr(ori, 'lower_bounds') is not None: kwargs['lower_bounds'] = ori.lower_bounds if 'upper_bounds' not in kwargs and getattr(ori, 'upper_bounds') is not None: kwargs['upper_bounds'] = ori.upper_bounds if len(ori.pre_processors) > 0: if 'pre_processors' not in kwargs: kwargs['pre_processors'] = [] kwargs['pre_processors'] = kwargs['pre_processors'] + ori.pre_processors if len(ori.post_processors) > 0: if 'post_processors' not in kwargs: kwargs['post_processors'] = [] kwargs['post_processors'] = ori.post_processors + kwargs['post_processors'] if 'dep_exprs' not in kwargs: kwargs['dep_exprs'] = zip(ori.dep, ori.exprs) if 'indep' not in kwargs: kwargs['indep'] = ori.indep instance = cls(**kwargs) for attr in ori._attrs_to_copy: if attr not in cls._attrs_to_copy: setattr(instance, attr, getattr(ori, attr)) return instance
[ "def", "from_other", "(", "cls", ",", "ori", ",", "*", "*", "kwargs", ")", ":", "for", "k", "in", "cls", ".", "_attrs_to_copy", "+", "(", "'params'", ",", "'roots'", ",", "'init_indep'", ",", "'init_dep'", ")", ":", "if", "k", "not", "in", "kwargs", ...
Creates a new instance with an existing one as a template. Parameters ---------- ori : SymbolicSys instance \\*\\*kwargs: Keyword arguments used to create the new instance. Returns ------- A new instance of the class.
[ "Creates", "a", "new", "instance", "with", "an", "existing", "one", "as", "a", "template", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L466-L508
19,475
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_jac
def get_jac(self): """ Derives the jacobian from ``self.exprs`` and ``self.dep``. """ if self._jac is True: if self.sparse is True: self._jac, self._colptrs, self._rowvals = self.be.sparse_jacobian_csc(self.exprs, self.dep) elif self.band is not None: # Banded self._jac = self.be.banded_jacobian(self.exprs, self.dep, *self.band) else: f = self.be.Matrix(1, self.ny, self.exprs) self._jac = f.jacobian(self.be.Matrix(1, self.ny, self.dep)) elif self._jac is False: return False return self._jac
python
def get_jac(self): if self._jac is True: if self.sparse is True: self._jac, self._colptrs, self._rowvals = self.be.sparse_jacobian_csc(self.exprs, self.dep) elif self.band is not None: # Banded self._jac = self.be.banded_jacobian(self.exprs, self.dep, *self.band) else: f = self.be.Matrix(1, self.ny, self.exprs) self._jac = f.jacobian(self.be.Matrix(1, self.ny, self.dep)) elif self._jac is False: return False return self._jac
[ "def", "get_jac", "(", "self", ")", ":", "if", "self", ".", "_jac", "is", "True", ":", "if", "self", ".", "sparse", "is", "True", ":", "self", ".", "_jac", ",", "self", ".", "_colptrs", ",", "self", ".", "_rowvals", "=", "self", ".", "be", ".", ...
Derives the jacobian from ``self.exprs`` and ``self.dep``.
[ "Derives", "the", "jacobian", "from", "self", ".", "exprs", "and", "self", ".", "dep", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L637-L650
19,476
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_jtimes
def get_jtimes(self): """ Derive the jacobian-vector product from ``self.exprs`` and ``self.dep``""" if self._jtimes is False: return False if self._jtimes is True: r = self.be.Dummy('r') v = tuple(self.be.Dummy('v_{0}'.format(i)) for i in range(self.ny)) f = self.be.Matrix(1, self.ny, self.exprs) f = f.subs([(x_i, x_i + r * v_i) for x_i, v_i in zip(self.dep, v)]) return v, self.be.flatten(f.diff(r).subs(r, 0)) else: return tuple(zip(*self._jtimes))
python
def get_jtimes(self): if self._jtimes is False: return False if self._jtimes is True: r = self.be.Dummy('r') v = tuple(self.be.Dummy('v_{0}'.format(i)) for i in range(self.ny)) f = self.be.Matrix(1, self.ny, self.exprs) f = f.subs([(x_i, x_i + r * v_i) for x_i, v_i in zip(self.dep, v)]) return v, self.be.flatten(f.diff(r).subs(r, 0)) else: return tuple(zip(*self._jtimes))
[ "def", "get_jtimes", "(", "self", ")", ":", "if", "self", ".", "_jtimes", "is", "False", ":", "return", "False", "if", "self", ".", "_jtimes", "is", "True", ":", "r", "=", "self", ".", "be", ".", "Dummy", "(", "'r'", ")", "v", "=", "tuple", "(", ...
Derive the jacobian-vector product from ``self.exprs`` and ``self.dep``
[ "Derive", "the", "jacobian", "-", "vector", "product", "from", "self", ".", "exprs", "and", "self", ".", "dep" ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L652-L664
19,477
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.jacobian_singular
def jacobian_singular(self): """ Returns True if Jacobian is singular, else False. """ cses, (jac_in_cses,) = self.be.cse(self.get_jac()) if jac_in_cses.nullspace(): return True else: return False
python
def jacobian_singular(self): cses, (jac_in_cses,) = self.be.cse(self.get_jac()) if jac_in_cses.nullspace(): return True else: return False
[ "def", "jacobian_singular", "(", "self", ")", ":", "cses", ",", "(", "jac_in_cses", ",", ")", "=", "self", ".", "be", ".", "cse", "(", "self", ".", "get_jac", "(", ")", ")", "if", "jac_in_cses", ".", "nullspace", "(", ")", ":", "return", "True", "e...
Returns True if Jacobian is singular, else False.
[ "Returns", "True", "if", "Jacobian", "is", "singular", "else", "False", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L666-L672
19,478
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_dfdx
def get_dfdx(self): """ Calculates 2nd derivatives of ``self.exprs`` """ if self._dfdx is True: if self.indep is None: zero = 0*self.be.Dummy()**0 self._dfdx = self.be.Matrix(1, self.ny, [zero]*self.ny) else: self._dfdx = self.be.Matrix(1, self.ny, [expr.diff(self.indep) for expr in self.exprs]) elif self._dfdx is False: return False return self._dfdx
python
def get_dfdx(self): if self._dfdx is True: if self.indep is None: zero = 0*self.be.Dummy()**0 self._dfdx = self.be.Matrix(1, self.ny, [zero]*self.ny) else: self._dfdx = self.be.Matrix(1, self.ny, [expr.diff(self.indep) for expr in self.exprs]) elif self._dfdx is False: return False return self._dfdx
[ "def", "get_dfdx", "(", "self", ")", ":", "if", "self", ".", "_dfdx", "is", "True", ":", "if", "self", ".", "indep", "is", "None", ":", "zero", "=", "0", "*", "self", ".", "be", ".", "Dummy", "(", ")", "**", "0", "self", ".", "_dfdx", "=", "s...
Calculates 2nd derivatives of ``self.exprs``
[ "Calculates", "2nd", "derivatives", "of", "self", ".", "exprs" ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L674-L684
19,479
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_f_ty_callback
def get_f_ty_callback(self): """ Generates a callback for evaluating ``self.exprs``. """ cb = self._callback_factory(self.exprs) lb = self.lower_bounds ub = self.upper_bounds if lb is not None or ub is not None: def _bounds_wrapper(t, y, p=(), be=None): if lb is not None: if np.any(y < lb - 10*self._current_integration_kwargs['atol']): raise RecoverableError y = np.array(y) y[y < lb] = lb[y < lb] if ub is not None: if np.any(y > ub + 10*self._current_integration_kwargs['atol']): raise RecoverableError y = np.array(y) y[y > ub] = ub[y > ub] return cb(t, y, p, be) return _bounds_wrapper else: return cb
python
def get_f_ty_callback(self): cb = self._callback_factory(self.exprs) lb = self.lower_bounds ub = self.upper_bounds if lb is not None or ub is not None: def _bounds_wrapper(t, y, p=(), be=None): if lb is not None: if np.any(y < lb - 10*self._current_integration_kwargs['atol']): raise RecoverableError y = np.array(y) y[y < lb] = lb[y < lb] if ub is not None: if np.any(y > ub + 10*self._current_integration_kwargs['atol']): raise RecoverableError y = np.array(y) y[y > ub] = ub[y > ub] return cb(t, y, p, be) return _bounds_wrapper else: return cb
[ "def", "get_f_ty_callback", "(", "self", ")", ":", "cb", "=", "self", ".", "_callback_factory", "(", "self", ".", "exprs", ")", "lb", "=", "self", ".", "lower_bounds", "ub", "=", "self", ".", "upper_bounds", "if", "lb", "is", "not", "None", "or", "ub",...
Generates a callback for evaluating ``self.exprs``.
[ "Generates", "a", "callback", "for", "evaluating", "self", ".", "exprs", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L689-L709
19,480
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_j_ty_callback
def get_j_ty_callback(self): """ Generates a callback for evaluating the jacobian. """ j_exprs = self.get_jac() if j_exprs is False: return None cb = self._callback_factory(j_exprs) if self.sparse: from scipy.sparse import csc_matrix def sparse_cb(x, y, p=()): data = cb(x, y, p).flatten() return csc_matrix((data, self._rowvals, self._colptrs)) return sparse_cb else: return cb
python
def get_j_ty_callback(self): j_exprs = self.get_jac() if j_exprs is False: return None cb = self._callback_factory(j_exprs) if self.sparse: from scipy.sparse import csc_matrix def sparse_cb(x, y, p=()): data = cb(x, y, p).flatten() return csc_matrix((data, self._rowvals, self._colptrs)) return sparse_cb else: return cb
[ "def", "get_j_ty_callback", "(", "self", ")", ":", "j_exprs", "=", "self", ".", "get_jac", "(", ")", "if", "j_exprs", "is", "False", ":", "return", "None", "cb", "=", "self", ".", "_callback_factory", "(", "j_exprs", ")", "if", "self", ".", "sparse", "...
Generates a callback for evaluating the jacobian.
[ "Generates", "a", "callback", "for", "evaluating", "the", "jacobian", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L711-L726
19,481
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_dfdx_callback
def get_dfdx_callback(self): """ Generate a callback for evaluating derivative of ``self.exprs`` """ dfdx_exprs = self.get_dfdx() if dfdx_exprs is False: return None return self._callback_factory(dfdx_exprs)
python
def get_dfdx_callback(self): dfdx_exprs = self.get_dfdx() if dfdx_exprs is False: return None return self._callback_factory(dfdx_exprs)
[ "def", "get_dfdx_callback", "(", "self", ")", ":", "dfdx_exprs", "=", "self", ".", "get_dfdx", "(", ")", "if", "dfdx_exprs", "is", "False", ":", "return", "None", "return", "self", ".", "_callback_factory", "(", "dfdx_exprs", ")" ]
Generate a callback for evaluating derivative of ``self.exprs``
[ "Generate", "a", "callback", "for", "evaluating", "derivative", "of", "self", ".", "exprs" ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L728-L733
19,482
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_jtimes_callback
def get_jtimes_callback(self): """ Generate a callback fro evaluating the jacobian-vector product.""" jtimes = self.get_jtimes() if jtimes is False: return None v, jtimes_exprs = jtimes return _Callback(self.indep, tuple(self.dep) + tuple(v), self.params, jtimes_exprs, Lambdify=self.be.Lambdify)
python
def get_jtimes_callback(self): jtimes = self.get_jtimes() if jtimes is False: return None v, jtimes_exprs = jtimes return _Callback(self.indep, tuple(self.dep) + tuple(v), self.params, jtimes_exprs, Lambdify=self.be.Lambdify)
[ "def", "get_jtimes_callback", "(", "self", ")", ":", "jtimes", "=", "self", ".", "get_jtimes", "(", ")", "if", "jtimes", "is", "False", ":", "return", "None", "v", ",", "jtimes_exprs", "=", "jtimes", "return", "_Callback", "(", "self", ".", "indep", ",",...
Generate a callback fro evaluating the jacobian-vector product.
[ "Generate", "a", "callback", "fro", "evaluating", "the", "jacobian", "-", "vector", "product", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L735-L742
19,483
bjodah/pyodesys
pyodesys/symbolic.py
PartiallySolvedSystem.from_linear_invariants
def from_linear_invariants(cls, ori_sys, preferred=None, **kwargs): """ Reformulates the ODE system in fewer variables. Given linear invariant equations one can always reduce the number of dependent variables in the system by the rank of the matrix describing this linear system. Parameters ---------- ori_sys : :class:`SymbolicSys` instance preferred : iterable of preferred dependent variables Due to numerical rounding it is preferable to choose the variables which are expected to be of the largest magnitude during integration. \*\*kwargs : Keyword arguments passed on to constructor. """ _be = ori_sys.be A = _be.Matrix(ori_sys.linear_invariants) rA, pivots = A.rref() if len(pivots) < A.shape[0]: # If the linear system contains rows which a linearly dependent these could be removed. # The criterion for removal could be dictated by a user provided callback. # # An alternative would be to write the matrix in reduced row echelon form, however, # this would cause the invariants to become linear combinations of each other and # their intuitive meaning (original principles they were formulated from) will be lost. # Hence that is not the default behaviour. However, the user may choose to rewrite the # equations in reduced row echelon form if they choose to before calling this method. raise NotImplementedError("Linear invariants contain linear dependencies.") per_row_cols = [(ri, [ci for ci in range(A.cols) if A[ri, ci] != 0]) for ri in range(A.rows)] if preferred is None: preferred = ori_sys.names[:A.rows] if ori_sys.dep_by_name else list(range(A.rows)) targets = [ ori_sys.names.index(dep) if ori_sys.dep_by_name else ( dep if isinstance(dep, int) else ori_sys.dep.index(dep)) for dep in preferred] row_tgt = [] for ri, colids in sorted(per_row_cols, key=lambda k: len(k[1])): for tgt in targets: if tgt in colids: row_tgt.append((ri, tgt)) targets.remove(tgt) break if len(targets) == 0: break else: raise ValueError("Could not find a solutions for: %s" % targets) def analytic_factory(x0, y0, p0, be): return { ori_sys.dep[tgt]: y0[ori_sys.dep[tgt] if ori_sys.dep_by_name else tgt] - sum( [A[ri, ci]*(ori_sys.dep[ci] - y0[ori_sys.dep[ci] if ori_sys.dep_by_name else ci]) for ci in range(A.cols) if ci != tgt])/A[ri, tgt] for ri, tgt in row_tgt } ori_li_nms = ori_sys.linear_invariant_names or () new_lin_invar = [[cell for ci, cell in enumerate(row) if ci not in list(zip(*row_tgt))[1]] for ri, row in enumerate(A.tolist()) if ri not in list(zip(*row_tgt))[0]] new_lin_i_nms = [nam for ri, nam in enumerate(ori_li_nms) if ri not in list(zip(*row_tgt))[0]] return cls(ori_sys, analytic_factory, linear_invariants=new_lin_invar, linear_invariant_names=new_lin_i_nms, **kwargs)
python
def from_linear_invariants(cls, ori_sys, preferred=None, **kwargs): _be = ori_sys.be A = _be.Matrix(ori_sys.linear_invariants) rA, pivots = A.rref() if len(pivots) < A.shape[0]: # If the linear system contains rows which a linearly dependent these could be removed. # The criterion for removal could be dictated by a user provided callback. # # An alternative would be to write the matrix in reduced row echelon form, however, # this would cause the invariants to become linear combinations of each other and # their intuitive meaning (original principles they were formulated from) will be lost. # Hence that is not the default behaviour. However, the user may choose to rewrite the # equations in reduced row echelon form if they choose to before calling this method. raise NotImplementedError("Linear invariants contain linear dependencies.") per_row_cols = [(ri, [ci for ci in range(A.cols) if A[ri, ci] != 0]) for ri in range(A.rows)] if preferred is None: preferred = ori_sys.names[:A.rows] if ori_sys.dep_by_name else list(range(A.rows)) targets = [ ori_sys.names.index(dep) if ori_sys.dep_by_name else ( dep if isinstance(dep, int) else ori_sys.dep.index(dep)) for dep in preferred] row_tgt = [] for ri, colids in sorted(per_row_cols, key=lambda k: len(k[1])): for tgt in targets: if tgt in colids: row_tgt.append((ri, tgt)) targets.remove(tgt) break if len(targets) == 0: break else: raise ValueError("Could not find a solutions for: %s" % targets) def analytic_factory(x0, y0, p0, be): return { ori_sys.dep[tgt]: y0[ori_sys.dep[tgt] if ori_sys.dep_by_name else tgt] - sum( [A[ri, ci]*(ori_sys.dep[ci] - y0[ori_sys.dep[ci] if ori_sys.dep_by_name else ci]) for ci in range(A.cols) if ci != tgt])/A[ri, tgt] for ri, tgt in row_tgt } ori_li_nms = ori_sys.linear_invariant_names or () new_lin_invar = [[cell for ci, cell in enumerate(row) if ci not in list(zip(*row_tgt))[1]] for ri, row in enumerate(A.tolist()) if ri not in list(zip(*row_tgt))[0]] new_lin_i_nms = [nam for ri, nam in enumerate(ori_li_nms) if ri not in list(zip(*row_tgt))[0]] return cls(ori_sys, analytic_factory, linear_invariants=new_lin_invar, linear_invariant_names=new_lin_i_nms, **kwargs)
[ "def", "from_linear_invariants", "(", "cls", ",", "ori_sys", ",", "preferred", "=", "None", ",", "*", "*", "kwargs", ")", ":", "_be", "=", "ori_sys", ".", "be", "A", "=", "_be", ".", "Matrix", "(", "ori_sys", ".", "linear_invariants", ")", "rA", ",", ...
Reformulates the ODE system in fewer variables. Given linear invariant equations one can always reduce the number of dependent variables in the system by the rank of the matrix describing this linear system. Parameters ---------- ori_sys : :class:`SymbolicSys` instance preferred : iterable of preferred dependent variables Due to numerical rounding it is preferable to choose the variables which are expected to be of the largest magnitude during integration. \*\*kwargs : Keyword arguments passed on to constructor.
[ "Reformulates", "the", "ODE", "system", "in", "fewer", "variables", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L1275-L1335
19,484
bjodah/pyodesys
pyodesys/core.py
chained_parameter_variation
def chained_parameter_variation(subject, durations, y0, varied_params, default_params=None, integrate_kwargs=None, x0=None, npoints=1, numpy=None): """ Integrate an ODE-system for a serie of durations with some parameters changed in-between Parameters ---------- subject : function or ODESys instance If a function: should have the signature of :meth:`pyodesys.ODESys.integrate` (and resturn a :class:`pyodesys.results.Result` object). If a ODESys instance: the ``integrate`` method will be used. durations : iterable of floats Spans of the independent variable. y0 : dict or array_like varied_params : dict mapping parameter name (or index) to array_like Each array_like need to be of same length as durations. default_params : dict or array_like Default values for the parameters of the ODE system. integrate_kwargs : dict Keyword arguments passed on to ``integrate``. x0 : float-like First value of independent variable. default: 0. npoints : int Number of points per sub-interval. Examples -------- >>> odesys = ODESys(lambda t, y, p: [-p[0]*y[0]]) >>> int_kw = dict(integrator='cvode', method='adams', atol=1e-12, rtol=1e-12) >>> kwargs = dict(default_params=[0], integrate_kwargs=int_kw) >>> res = chained_parameter_variation(odesys, [2, 3], [42], {0: [.7, .1]}, **kwargs) >>> mask1 = res.xout <= 2 >>> import numpy as np >>> np.allclose(res.yout[mask1, 0], 42*np.exp(-.7*res.xout[mask1])) True >>> mask2 = 2 <= res.xout >>> np.allclose(res.yout[mask2, 0], res.yout[mask2, 0][0]*np.exp(-.1*(res.xout[mask2] - res.xout[mask2][0]))) True """ assert len(durations) > 0, 'need at least 1 duration (preferably many)' assert npoints > 0, 'need at least 1 point per duration' for k, v in varied_params.items(): if len(v) != len(durations): raise ValueError("Mismathced lengths of durations and varied_params") if isinstance(subject, ODESys): integrate = subject.integrate numpy = numpy or subject.numpy else: integrate = subject numpy = numpy or np default_params = default_params or {} integrate_kwargs = integrate_kwargs or {} def _get_idx(cont, idx): if isinstance(cont, dict): return {k: (v[idx] if hasattr(v, '__len__') and getattr(v, 'ndim', 1) > 0 else v) for k, v in cont.items()} else: return cont[idx] durations = numpy.cumsum(durations) for idx_dur in range(len(durations)): params = copy.copy(default_params) for k, v in varied_params.items(): params[k] = v[idx_dur] if idx_dur == 0: if x0 is None: x0 = durations[0]*0 out = integrate(numpy.linspace(x0, durations[0], npoints + 1), y0, params, **integrate_kwargs) else: if isinstance(out, Result): out.extend_by_integration(durations[idx_dur], params, npoints=npoints, **integrate_kwargs) else: for idx_res, r in enumerate(out): r.extend_by_integration(durations[idx_dur], _get_idx(params, idx_res), npoints=npoints, **integrate_kwargs) return out
python
def chained_parameter_variation(subject, durations, y0, varied_params, default_params=None, integrate_kwargs=None, x0=None, npoints=1, numpy=None): assert len(durations) > 0, 'need at least 1 duration (preferably many)' assert npoints > 0, 'need at least 1 point per duration' for k, v in varied_params.items(): if len(v) != len(durations): raise ValueError("Mismathced lengths of durations and varied_params") if isinstance(subject, ODESys): integrate = subject.integrate numpy = numpy or subject.numpy else: integrate = subject numpy = numpy or np default_params = default_params or {} integrate_kwargs = integrate_kwargs or {} def _get_idx(cont, idx): if isinstance(cont, dict): return {k: (v[idx] if hasattr(v, '__len__') and getattr(v, 'ndim', 1) > 0 else v) for k, v in cont.items()} else: return cont[idx] durations = numpy.cumsum(durations) for idx_dur in range(len(durations)): params = copy.copy(default_params) for k, v in varied_params.items(): params[k] = v[idx_dur] if idx_dur == 0: if x0 is None: x0 = durations[0]*0 out = integrate(numpy.linspace(x0, durations[0], npoints + 1), y0, params, **integrate_kwargs) else: if isinstance(out, Result): out.extend_by_integration(durations[idx_dur], params, npoints=npoints, **integrate_kwargs) else: for idx_res, r in enumerate(out): r.extend_by_integration(durations[idx_dur], _get_idx(params, idx_res), npoints=npoints, **integrate_kwargs) return out
[ "def", "chained_parameter_variation", "(", "subject", ",", "durations", ",", "y0", ",", "varied_params", ",", "default_params", "=", "None", ",", "integrate_kwargs", "=", "None", ",", "x0", "=", "None", ",", "npoints", "=", "1", ",", "numpy", "=", "None", ...
Integrate an ODE-system for a serie of durations with some parameters changed in-between Parameters ---------- subject : function or ODESys instance If a function: should have the signature of :meth:`pyodesys.ODESys.integrate` (and resturn a :class:`pyodesys.results.Result` object). If a ODESys instance: the ``integrate`` method will be used. durations : iterable of floats Spans of the independent variable. y0 : dict or array_like varied_params : dict mapping parameter name (or index) to array_like Each array_like need to be of same length as durations. default_params : dict or array_like Default values for the parameters of the ODE system. integrate_kwargs : dict Keyword arguments passed on to ``integrate``. x0 : float-like First value of independent variable. default: 0. npoints : int Number of points per sub-interval. Examples -------- >>> odesys = ODESys(lambda t, y, p: [-p[0]*y[0]]) >>> int_kw = dict(integrator='cvode', method='adams', atol=1e-12, rtol=1e-12) >>> kwargs = dict(default_params=[0], integrate_kwargs=int_kw) >>> res = chained_parameter_variation(odesys, [2, 3], [42], {0: [.7, .1]}, **kwargs) >>> mask1 = res.xout <= 2 >>> import numpy as np >>> np.allclose(res.yout[mask1, 0], 42*np.exp(-.7*res.xout[mask1])) True >>> mask2 = 2 <= res.xout >>> np.allclose(res.yout[mask2, 0], res.yout[mask2, 0][0]*np.exp(-.1*(res.xout[mask2] - res.xout[mask2][0]))) True
[ "Integrate", "an", "ODE", "-", "system", "for", "a", "serie", "of", "durations", "with", "some", "parameters", "changed", "in", "-", "between" ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L917-L996
19,485
bjodah/pyodesys
pyodesys/core.py
ODESys.pre_process
def pre_process(self, xout, y0, params=()): """ Transforms input to internal values, used internally. """ for pre_processor in self.pre_processors: xout, y0, params = pre_processor(xout, y0, params) return [self.numpy.atleast_1d(arr) for arr in (xout, y0, params)]
python
def pre_process(self, xout, y0, params=()): for pre_processor in self.pre_processors: xout, y0, params = pre_processor(xout, y0, params) return [self.numpy.atleast_1d(arr) for arr in (xout, y0, params)]
[ "def", "pre_process", "(", "self", ",", "xout", ",", "y0", ",", "params", "=", "(", ")", ")", ":", "for", "pre_processor", "in", "self", ".", "pre_processors", ":", "xout", ",", "y0", ",", "params", "=", "pre_processor", "(", "xout", ",", "y0", ",", ...
Transforms input to internal values, used internally.
[ "Transforms", "input", "to", "internal", "values", "used", "internally", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L286-L290
19,486
bjodah/pyodesys
pyodesys/core.py
ODESys.post_process
def post_process(self, xout, yout, params): """ Transforms internal values to output, used internally. """ for post_processor in self.post_processors: xout, yout, params = post_processor(xout, yout, params) return xout, yout, params
python
def post_process(self, xout, yout, params): for post_processor in self.post_processors: xout, yout, params = post_processor(xout, yout, params) return xout, yout, params
[ "def", "post_process", "(", "self", ",", "xout", ",", "yout", ",", "params", ")", ":", "for", "post_processor", "in", "self", ".", "post_processors", ":", "xout", ",", "yout", ",", "params", "=", "post_processor", "(", "xout", ",", "yout", ",", "params",...
Transforms internal values to output, used internally.
[ "Transforms", "internal", "values", "to", "output", "used", "internally", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L292-L296
19,487
bjodah/pyodesys
pyodesys/core.py
ODESys.adaptive
def adaptive(self, y0, x0, xend, params=(), **kwargs): """ Integrate with integrator chosen output. Parameters ---------- integrator : str See :meth:`integrate`. y0 : array_like See :meth:`integrate`. x0 : float Initial value of the independent variable. xend : float Final value of the independent variable. params : array_like See :meth:`integrate`. \*\*kwargs : See :meth:`integrate`. Returns ------- Same as :meth:`integrate` """ return self.integrate((x0, xend), y0, params=params, **kwargs)
python
def adaptive(self, y0, x0, xend, params=(), **kwargs): return self.integrate((x0, xend), y0, params=params, **kwargs)
[ "def", "adaptive", "(", "self", ",", "y0", ",", "x0", ",", "xend", ",", "params", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "integrate", "(", "(", "x0", ",", "xend", ")", ",", "y0", ",", "params", "=", "params", ...
Integrate with integrator chosen output. Parameters ---------- integrator : str See :meth:`integrate`. y0 : array_like See :meth:`integrate`. x0 : float Initial value of the independent variable. xend : float Final value of the independent variable. params : array_like See :meth:`integrate`. \*\*kwargs : See :meth:`integrate`. Returns ------- Same as :meth:`integrate`
[ "Integrate", "with", "integrator", "chosen", "output", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L298-L321
19,488
bjodah/pyodesys
pyodesys/core.py
ODESys.predefined
def predefined(self, y0, xout, params=(), **kwargs): """ Integrate with user chosen output. Parameters ---------- integrator : str See :meth:`integrate`. y0 : array_like See :meth:`integrate`. xout : array_like params : array_like See :meth:`integrate`. \*\*kwargs: See :meth:`integrate` Returns ------- Length 2 tuple : (yout, info) See :meth:`integrate`. """ xout, yout, info = self.integrate(xout, y0, params=params, force_predefined=True, **kwargs) return yout, info
python
def predefined(self, y0, xout, params=(), **kwargs): xout, yout, info = self.integrate(xout, y0, params=params, force_predefined=True, **kwargs) return yout, info
[ "def", "predefined", "(", "self", ",", "y0", ",", "xout", ",", "params", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "xout", ",", "yout", ",", "info", "=", "self", ".", "integrate", "(", "xout", ",", "y0", ",", "params", "=", "params", ",...
Integrate with user chosen output. Parameters ---------- integrator : str See :meth:`integrate`. y0 : array_like See :meth:`integrate`. xout : array_like params : array_like See :meth:`integrate`. \*\*kwargs: See :meth:`integrate` Returns ------- Length 2 tuple : (yout, info) See :meth:`integrate`.
[ "Integrate", "with", "user", "chosen", "output", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L323-L345
19,489
bjodah/pyodesys
pyodesys/core.py
ODESys.integrate
def integrate(self, x, y0, params=(), atol=1e-8, rtol=1e-8, **kwargs): """ Integrate the system of ordinary differential equations. Solves the initial value problem (IVP). Parameters ---------- x : array_like or pair (start and final time) or float if float: make it a pair: (0, x) if pair or length-2 array: initial and final value of the independent variable if array_like: values of independent variable report at y0 : array_like Initial values at x[0] for the dependent variables. params : array_like (default: tuple()) Value of parameters passed to user-supplied callbacks. integrator : str or None Name of integrator, one of: - 'scipy': :meth:`_integrate_scipy` - 'gsl': :meth:`_integrate_gsl` - 'odeint': :meth:`_integrate_odeint` - 'cvode': :meth:`_integrate_cvode` See respective method for more information. If ``None``: ``os.environ.get('PYODESYS_INTEGRATOR', 'scipy')`` atol : float Absolute tolerance rtol : float Relative tolerance with_jacobian : bool or None (default) Whether to use the jacobian. When ``None`` the choice is done automatically (only used when required). This matters when jacobian is derived at runtime (high computational cost). with_jtimes : bool (default: False) Whether to use the jacobian-vector product. This is only supported by ``cvode`` and only when ``linear_solver`` is one of: gmres', 'gmres_classic', 'bicgstab', 'tfqmr'. See the documentation for ``pycvodes`` for more information. force_predefined : bool (default: False) override behaviour of ``len(x) == 2`` => :meth:`adaptive` \\*\\*kwargs : Additional keyword arguments for ``_integrate_$(integrator)``. Returns ------- Length 3 tuple: (x, yout, info) x : array of values of the independent variable yout : array of the dependent variable(s) for the different values of x. info : dict ('nfev' is guaranteed to be a key) """ arrs = self.to_arrays(x, y0, params) _x, _y, _p = _arrs = self.pre_process(*arrs) ndims = [a.ndim for a in _arrs] if ndims == [1, 1, 1]: twodim = False elif ndims == [2, 2, 2]: twodim = True else: raise ValueError("Pre-processor made ndims inconsistent?") if self.append_iv: _p = self.numpy.concatenate((_p, _y), axis=-1) if hasattr(self, 'ny'): if _y.shape[-1] != self.ny: raise ValueError("Incorrect shape of intern_y0") if isinstance(atol, dict): kwargs['atol'] = [atol[k] for k in self.names] else: kwargs['atol'] = atol kwargs['rtol'] = rtol integrator = kwargs.pop('integrator', None) if integrator is None: integrator = os.environ.get('PYODESYS_INTEGRATOR', 'scipy') args = tuple(map(self.numpy.atleast_2d, (_x, _y, _p))) self._current_integration_kwargs = kwargs if isinstance(integrator, str): nfo = getattr(self, '_integrate_' + integrator)(*args, **kwargs) else: kwargs['with_jacobian'] = getattr(integrator, 'with_jacobian', None) nfo = self._integrate(integrator.integrate_adaptive, integrator.integrate_predefined, *args, **kwargs) if twodim: _xout = [d['internal_xout'] for d in nfo] _yout = [d['internal_yout'] for d in nfo] _params = [d['internal_params'] for d in nfo] res = [Result(*(self.post_process(_xout[i], _yout[i], _params[i]) + (nfo[i], self))) for i in range(len(nfo))] else: _xout = nfo[0]['internal_xout'] _yout = nfo[0]['internal_yout'] self._internal = _xout.copy(), _yout.copy(), _p.copy() nfo = nfo[0] res = Result(*(self.post_process(_xout, _yout, _p) + (nfo, self))) return res
python
def integrate(self, x, y0, params=(), atol=1e-8, rtol=1e-8, **kwargs): arrs = self.to_arrays(x, y0, params) _x, _y, _p = _arrs = self.pre_process(*arrs) ndims = [a.ndim for a in _arrs] if ndims == [1, 1, 1]: twodim = False elif ndims == [2, 2, 2]: twodim = True else: raise ValueError("Pre-processor made ndims inconsistent?") if self.append_iv: _p = self.numpy.concatenate((_p, _y), axis=-1) if hasattr(self, 'ny'): if _y.shape[-1] != self.ny: raise ValueError("Incorrect shape of intern_y0") if isinstance(atol, dict): kwargs['atol'] = [atol[k] for k in self.names] else: kwargs['atol'] = atol kwargs['rtol'] = rtol integrator = kwargs.pop('integrator', None) if integrator is None: integrator = os.environ.get('PYODESYS_INTEGRATOR', 'scipy') args = tuple(map(self.numpy.atleast_2d, (_x, _y, _p))) self._current_integration_kwargs = kwargs if isinstance(integrator, str): nfo = getattr(self, '_integrate_' + integrator)(*args, **kwargs) else: kwargs['with_jacobian'] = getattr(integrator, 'with_jacobian', None) nfo = self._integrate(integrator.integrate_adaptive, integrator.integrate_predefined, *args, **kwargs) if twodim: _xout = [d['internal_xout'] for d in nfo] _yout = [d['internal_yout'] for d in nfo] _params = [d['internal_params'] for d in nfo] res = [Result(*(self.post_process(_xout[i], _yout[i], _params[i]) + (nfo[i], self))) for i in range(len(nfo))] else: _xout = nfo[0]['internal_xout'] _yout = nfo[0]['internal_yout'] self._internal = _xout.copy(), _yout.copy(), _p.copy() nfo = nfo[0] res = Result(*(self.post_process(_xout, _yout, _p) + (nfo, self))) return res
[ "def", "integrate", "(", "self", ",", "x", ",", "y0", ",", "params", "=", "(", ")", ",", "atol", "=", "1e-8", ",", "rtol", "=", "1e-8", ",", "*", "*", "kwargs", ")", ":", "arrs", "=", "self", ".", "to_arrays", "(", "x", ",", "y0", ",", "param...
Integrate the system of ordinary differential equations. Solves the initial value problem (IVP). Parameters ---------- x : array_like or pair (start and final time) or float if float: make it a pair: (0, x) if pair or length-2 array: initial and final value of the independent variable if array_like: values of independent variable report at y0 : array_like Initial values at x[0] for the dependent variables. params : array_like (default: tuple()) Value of parameters passed to user-supplied callbacks. integrator : str or None Name of integrator, one of: - 'scipy': :meth:`_integrate_scipy` - 'gsl': :meth:`_integrate_gsl` - 'odeint': :meth:`_integrate_odeint` - 'cvode': :meth:`_integrate_cvode` See respective method for more information. If ``None``: ``os.environ.get('PYODESYS_INTEGRATOR', 'scipy')`` atol : float Absolute tolerance rtol : float Relative tolerance with_jacobian : bool or None (default) Whether to use the jacobian. When ``None`` the choice is done automatically (only used when required). This matters when jacobian is derived at runtime (high computational cost). with_jtimes : bool (default: False) Whether to use the jacobian-vector product. This is only supported by ``cvode`` and only when ``linear_solver`` is one of: gmres', 'gmres_classic', 'bicgstab', 'tfqmr'. See the documentation for ``pycvodes`` for more information. force_predefined : bool (default: False) override behaviour of ``len(x) == 2`` => :meth:`adaptive` \\*\\*kwargs : Additional keyword arguments for ``_integrate_$(integrator)``. Returns ------- Length 3 tuple: (x, yout, info) x : array of values of the independent variable yout : array of the dependent variable(s) for the different values of x. info : dict ('nfev' is guaranteed to be a key)
[ "Integrate", "the", "system", "of", "ordinary", "differential", "equations", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L347-L449
19,490
bjodah/pyodesys
pyodesys/core.py
ODESys.plot_phase_plane
def plot_phase_plane(self, indices=None, **kwargs): """ Plots a phase portrait from last integration. This method will be deprecated. Please use :meth:`Result.plot_phase_plane`. See :func:`pyodesys.plotting.plot_phase_plane` """ return self._plot(plot_phase_plane, indices=indices, **kwargs)
python
def plot_phase_plane(self, indices=None, **kwargs): return self._plot(plot_phase_plane, indices=indices, **kwargs)
[ "def", "plot_phase_plane", "(", "self", ",", "indices", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_plot", "(", "plot_phase_plane", ",", "indices", "=", "indices", ",", "*", "*", "kwargs", ")" ]
Plots a phase portrait from last integration. This method will be deprecated. Please use :meth:`Result.plot_phase_plane`. See :func:`pyodesys.plotting.plot_phase_plane`
[ "Plots", "a", "phase", "portrait", "from", "last", "integration", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L756-L762
19,491
neon-jungle/wagtailnews
wagtailnews/permissions.py
user_can_edit_news
def user_can_edit_news(user): """ Check if the user has permission to edit any of the registered NewsItem types. """ newsitem_models = [model.get_newsitem_model() for model in NEWSINDEX_MODEL_CLASSES] if user.is_active and user.is_superuser: # admin can edit news iff any news types exist return bool(newsitem_models) for NewsItem in newsitem_models: for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
python
def user_can_edit_news(user): newsitem_models = [model.get_newsitem_model() for model in NEWSINDEX_MODEL_CLASSES] if user.is_active and user.is_superuser: # admin can edit news iff any news types exist return bool(newsitem_models) for NewsItem in newsitem_models: for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
[ "def", "user_can_edit_news", "(", "user", ")", ":", "newsitem_models", "=", "[", "model", ".", "get_newsitem_model", "(", ")", "for", "model", "in", "NEWSINDEX_MODEL_CLASSES", "]", "if", "user", ".", "is_active", "and", "user", ".", "is_superuser", ":", "# adm...
Check if the user has permission to edit any of the registered NewsItem types.
[ "Check", "if", "the", "user", "has", "permission", "to", "edit", "any", "of", "the", "registered", "NewsItem", "types", "." ]
4cdec7013cca276dcfc658d3c986444ba6a42a84
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/permissions.py#L21-L38
19,492
neon-jungle/wagtailnews
wagtailnews/permissions.py
user_can_edit_newsitem
def user_can_edit_newsitem(user, NewsItem): """ Check if the user has permission to edit a particular NewsItem type. """ for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
python
def user_can_edit_newsitem(user, NewsItem): for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
[ "def", "user_can_edit_newsitem", "(", "user", ",", "NewsItem", ")", ":", "for", "perm", "in", "format_perms", "(", "NewsItem", ",", "[", "'add'", ",", "'change'", ",", "'delete'", "]", ")", ":", "if", "user", ".", "has_perm", "(", "perm", ")", ":", "re...
Check if the user has permission to edit a particular NewsItem type.
[ "Check", "if", "the", "user", "has", "permission", "to", "edit", "a", "particular", "NewsItem", "type", "." ]
4cdec7013cca276dcfc658d3c986444ba6a42a84
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/permissions.py#L41-L49
19,493
neon-jungle/wagtailnews
wagtailnews/models.py
get_date_or_404
def get_date_or_404(year, month, day): """Try to make a date from the given inputs, raising Http404 on error""" try: return datetime.date(int(year), int(month), int(day)) except ValueError: raise Http404
python
def get_date_or_404(year, month, day): try: return datetime.date(int(year), int(month), int(day)) except ValueError: raise Http404
[ "def", "get_date_or_404", "(", "year", ",", "month", ",", "day", ")", ":", "try", ":", "return", "datetime", ".", "date", "(", "int", "(", "year", ")", ",", "int", "(", "month", ")", ",", "int", "(", "day", ")", ")", "except", "ValueError", ":", ...
Try to make a date from the given inputs, raising Http404 on error
[ "Try", "to", "make", "a", "date", "from", "the", "given", "inputs", "raising", "Http404", "on", "error" ]
4cdec7013cca276dcfc658d3c986444ba6a42a84
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/models.py#L29-L34
19,494
neon-jungle/wagtailnews
wagtailnews/models.py
NewsIndexMixin.respond
def respond(self, request, view, newsitems, extra_context={}): """A helper that takes some news items and returns an HttpResponse""" context = self.get_context(request, view=view) context.update(self.paginate_newsitems(request, newsitems)) context.update(extra_context) template = self.get_template(request, view=view) return TemplateResponse(request, template, context)
python
def respond(self, request, view, newsitems, extra_context={}): context = self.get_context(request, view=view) context.update(self.paginate_newsitems(request, newsitems)) context.update(extra_context) template = self.get_template(request, view=view) return TemplateResponse(request, template, context)
[ "def", "respond", "(", "self", ",", "request", ",", "view", ",", "newsitems", ",", "extra_context", "=", "{", "}", ")", ":", "context", "=", "self", ".", "get_context", "(", "request", ",", "view", "=", "view", ")", "context", ".", "update", "(", "se...
A helper that takes some news items and returns an HttpResponse
[ "A", "helper", "that", "takes", "some", "news", "items", "and", "returns", "an", "HttpResponse" ]
4cdec7013cca276dcfc658d3c986444ba6a42a84
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/models.py#L80-L86
19,495
neon-jungle/wagtailnews
wagtailnews/views/chooser.py
get_newsitem_model
def get_newsitem_model(model_string): """ Get the NewsItem model from a model string. Raises ValueError if the model string is invalid, or references a model that is not a NewsItem. """ try: NewsItem = apps.get_model(model_string) assert issubclass(NewsItem, AbstractNewsItem) except (ValueError, LookupError, AssertionError): raise ValueError('Invalid news item model string'.format(model_string)) return NewsItem
python
def get_newsitem_model(model_string): try: NewsItem = apps.get_model(model_string) assert issubclass(NewsItem, AbstractNewsItem) except (ValueError, LookupError, AssertionError): raise ValueError('Invalid news item model string'.format(model_string)) return NewsItem
[ "def", "get_newsitem_model", "(", "model_string", ")", ":", "try", ":", "NewsItem", "=", "apps", ".", "get_model", "(", "model_string", ")", "assert", "issubclass", "(", "NewsItem", ",", "AbstractNewsItem", ")", "except", "(", "ValueError", ",", "LookupError", ...
Get the NewsItem model from a model string. Raises ValueError if the model string is invalid, or references a model that is not a NewsItem.
[ "Get", "the", "NewsItem", "model", "from", "a", "model", "string", ".", "Raises", "ValueError", "if", "the", "model", "string", "is", "invalid", "or", "references", "a", "model", "that", "is", "not", "a", "NewsItem", "." ]
4cdec7013cca276dcfc658d3c986444ba6a42a84
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/views/chooser.py#L119-L129
19,496
geometalab/pyGeoTile
pygeotile/tile.py
Tile.from_quad_tree
def from_quad_tree(cls, quad_tree): """Creates a tile from a Microsoft QuadTree""" assert bool(re.match('^[0-3]*$', quad_tree)), 'QuadTree value can only consists of the digits 0, 1, 2 and 3.' zoom = len(str(quad_tree)) offset = int(math.pow(2, zoom)) - 1 google_x, google_y = [reduce(lambda result, bit: (result << 1) | bit, bits, 0) for bits in zip(*(reversed(divmod(digit, 2)) for digit in (int(c) for c in str(quad_tree))))] return cls(tms_x=google_x, tms_y=(offset - google_y), zoom=zoom)
python
def from_quad_tree(cls, quad_tree): assert bool(re.match('^[0-3]*$', quad_tree)), 'QuadTree value can only consists of the digits 0, 1, 2 and 3.' zoom = len(str(quad_tree)) offset = int(math.pow(2, zoom)) - 1 google_x, google_y = [reduce(lambda result, bit: (result << 1) | bit, bits, 0) for bits in zip(*(reversed(divmod(digit, 2)) for digit in (int(c) for c in str(quad_tree))))] return cls(tms_x=google_x, tms_y=(offset - google_y), zoom=zoom)
[ "def", "from_quad_tree", "(", "cls", ",", "quad_tree", ")", ":", "assert", "bool", "(", "re", ".", "match", "(", "'^[0-3]*$'", ",", "quad_tree", ")", ")", ",", "'QuadTree value can only consists of the digits 0, 1, 2 and 3.'", "zoom", "=", "len", "(", "str", "("...
Creates a tile from a Microsoft QuadTree
[ "Creates", "a", "tile", "from", "a", "Microsoft", "QuadTree" ]
b1f44271698f5fc4d18c2add935797ed43254aa6
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L16-L24
19,497
geometalab/pyGeoTile
pygeotile/tile.py
Tile.from_google
def from_google(cls, google_x, google_y, zoom): """Creates a tile from Google format X Y and zoom""" max_tile = (2 ** zoom) - 1 assert 0 <= google_x <= max_tile, 'Google X needs to be a value between 0 and (2^zoom) -1.' assert 0 <= google_y <= max_tile, 'Google Y needs to be a value between 0 and (2^zoom) -1.' return cls(tms_x=google_x, tms_y=(2 ** zoom - 1) - google_y, zoom=zoom)
python
def from_google(cls, google_x, google_y, zoom): max_tile = (2 ** zoom) - 1 assert 0 <= google_x <= max_tile, 'Google X needs to be a value between 0 and (2^zoom) -1.' assert 0 <= google_y <= max_tile, 'Google Y needs to be a value between 0 and (2^zoom) -1.' return cls(tms_x=google_x, tms_y=(2 ** zoom - 1) - google_y, zoom=zoom)
[ "def", "from_google", "(", "cls", ",", "google_x", ",", "google_y", ",", "zoom", ")", ":", "max_tile", "=", "(", "2", "**", "zoom", ")", "-", "1", "assert", "0", "<=", "google_x", "<=", "max_tile", ",", "'Google X needs to be a value between 0 and (2^zoom) -1....
Creates a tile from Google format X Y and zoom
[ "Creates", "a", "tile", "from", "Google", "format", "X", "Y", "and", "zoom" ]
b1f44271698f5fc4d18c2add935797ed43254aa6
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L35-L40
19,498
geometalab/pyGeoTile
pygeotile/tile.py
Tile.for_point
def for_point(cls, point, zoom): """Creates a tile for given point""" latitude, longitude = point.latitude_longitude return cls.for_latitude_longitude(latitude=latitude, longitude=longitude, zoom=zoom)
python
def for_point(cls, point, zoom): latitude, longitude = point.latitude_longitude return cls.for_latitude_longitude(latitude=latitude, longitude=longitude, zoom=zoom)
[ "def", "for_point", "(", "cls", ",", "point", ",", "zoom", ")", ":", "latitude", ",", "longitude", "=", "point", ".", "latitude_longitude", "return", "cls", ".", "for_latitude_longitude", "(", "latitude", "=", "latitude", ",", "longitude", "=", "longitude", ...
Creates a tile for given point
[ "Creates", "a", "tile", "for", "given", "point" ]
b1f44271698f5fc4d18c2add935797ed43254aa6
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L43-L46
19,499
geometalab/pyGeoTile
pygeotile/tile.py
Tile.quad_tree
def quad_tree(self): """Gets the tile in the Microsoft QuadTree format, converted from TMS""" value = '' tms_x, tms_y = self.tms tms_y = (2 ** self.zoom - 1) - tms_y for i in range(self.zoom, 0, -1): digit = 0 mask = 1 << (i - 1) if (tms_x & mask) != 0: digit += 1 if (tms_y & mask) != 0: digit += 2 value += str(digit) return value
python
def quad_tree(self): value = '' tms_x, tms_y = self.tms tms_y = (2 ** self.zoom - 1) - tms_y for i in range(self.zoom, 0, -1): digit = 0 mask = 1 << (i - 1) if (tms_x & mask) != 0: digit += 1 if (tms_y & mask) != 0: digit += 2 value += str(digit) return value
[ "def", "quad_tree", "(", "self", ")", ":", "value", "=", "''", "tms_x", ",", "tms_y", "=", "self", ".", "tms", "tms_y", "=", "(", "2", "**", "self", ".", "zoom", "-", "1", ")", "-", "tms_y", "for", "i", "in", "range", "(", "self", ".", "zoom", ...
Gets the tile in the Microsoft QuadTree format, converted from TMS
[ "Gets", "the", "tile", "in", "the", "Microsoft", "QuadTree", "format", "converted", "from", "TMS" ]
b1f44271698f5fc4d18c2add935797ed43254aa6
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L75-L88