repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
belbio/bel | bel/resources/ortholog.py | load_orthologs | def load_orthologs(fo: IO, metadata: dict):
"""Load orthologs into ArangoDB
Args:
fo: file obj - orthologs file
metadata: dict containing the metadata for orthologs
"""
version = metadata["metadata"]["version"]
# LOAD ORTHOLOGS INTO ArangoDB
with timy.Timer("Load Orthologs") as timer:
arango_client = arangodb.get_client()
belns_db = arangodb.get_belns_handle(arango_client)
arangodb.batch_load_docs(
belns_db, orthologs_iterator(fo, version), on_duplicate="update"
)
log.info(
"Load orthologs",
elapsed=timer.elapsed,
source=metadata["metadata"]["source"],
)
# Clean up old entries
remove_old_ortholog_edges = f"""
FOR edge in ortholog_edges
FILTER edge.source == "{metadata["metadata"]["source"]}"
FILTER edge.version != "{version}"
REMOVE edge IN ortholog_edges
"""
remove_old_ortholog_nodes = f"""
FOR node in ortholog_nodes
FILTER node.source == "{metadata["metadata"]["source"]}"
FILTER node.version != "{version}"
REMOVE node IN ortholog_nodes
"""
arangodb.aql_query(belns_db, remove_old_ortholog_edges)
arangodb.aql_query(belns_db, remove_old_ortholog_nodes)
# Add metadata to resource metadata collection
metadata["_key"] = f"Orthologs_{metadata['metadata']['source']}"
try:
belns_db.collection(arangodb.belns_metadata_name).insert(metadata)
except ArangoError as ae:
belns_db.collection(arangodb.belns_metadata_name).replace(metadata) | python | def load_orthologs(fo: IO, metadata: dict):
"""Load orthologs into ArangoDB
Args:
fo: file obj - orthologs file
metadata: dict containing the metadata for orthologs
"""
version = metadata["metadata"]["version"]
# LOAD ORTHOLOGS INTO ArangoDB
with timy.Timer("Load Orthologs") as timer:
arango_client = arangodb.get_client()
belns_db = arangodb.get_belns_handle(arango_client)
arangodb.batch_load_docs(
belns_db, orthologs_iterator(fo, version), on_duplicate="update"
)
log.info(
"Load orthologs",
elapsed=timer.elapsed,
source=metadata["metadata"]["source"],
)
# Clean up old entries
remove_old_ortholog_edges = f"""
FOR edge in ortholog_edges
FILTER edge.source == "{metadata["metadata"]["source"]}"
FILTER edge.version != "{version}"
REMOVE edge IN ortholog_edges
"""
remove_old_ortholog_nodes = f"""
FOR node in ortholog_nodes
FILTER node.source == "{metadata["metadata"]["source"]}"
FILTER node.version != "{version}"
REMOVE node IN ortholog_nodes
"""
arangodb.aql_query(belns_db, remove_old_ortholog_edges)
arangodb.aql_query(belns_db, remove_old_ortholog_nodes)
# Add metadata to resource metadata collection
metadata["_key"] = f"Orthologs_{metadata['metadata']['source']}"
try:
belns_db.collection(arangodb.belns_metadata_name).insert(metadata)
except ArangoError as ae:
belns_db.collection(arangodb.belns_metadata_name).replace(metadata) | [
"def",
"load_orthologs",
"(",
"fo",
":",
"IO",
",",
"metadata",
":",
"dict",
")",
":",
"version",
"=",
"metadata",
"[",
"\"metadata\"",
"]",
"[",
"\"version\"",
"]",
"# LOAD ORTHOLOGS INTO ArangoDB",
"with",
"timy",
".",
"Timer",
"(",
"\"Load Orthologs\"",
")"... | Load orthologs into ArangoDB
Args:
fo: file obj - orthologs file
metadata: dict containing the metadata for orthologs | [
"Load",
"orthologs",
"into",
"ArangoDB"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/ortholog.py#L19-L64 | train | 50,900 |
belbio/bel | bel/resources/ortholog.py | orthologs_iterator | def orthologs_iterator(fo, version):
"""Ortholog node and edge iterator"""
species_list = config["bel_resources"].get("species_list", [])
fo.seek(0)
with gzip.open(fo, "rt") as f:
for line in f:
edge = json.loads(line)
if "metadata" in edge:
source = edge["metadata"]["source"]
continue
if "ortholog" in edge:
edge = edge["ortholog"]
subj_tax_id = edge["subject"]["tax_id"]
obj_tax_id = edge["object"]["tax_id"]
# Skip if species not listed in species_list
if species_list and subj_tax_id and subj_tax_id not in species_list:
continue
if species_list and obj_tax_id and obj_tax_id not in species_list:
continue
# Converted to ArangoDB legal chars for _key
subj_key = arangodb.arango_id_to_key(edge["subject"]["id"])
subj_id = edge["subject"]["id"]
# Converted to ArangoDB legal chars for _key
obj_key = arangodb.arango_id_to_key(edge["object"]["id"])
obj_id = edge["object"]["id"]
# Subject node
yield (
arangodb.ortholog_nodes_name,
{
"_key": subj_key,
"name": subj_id,
"tax_id": edge["subject"]["tax_id"],
"source": source,
"version": version,
},
)
# Object node
yield (
arangodb.ortholog_nodes_name,
{
"_key": obj_key,
"name": obj_id,
"tax_id": edge["object"]["tax_id"],
"source": source,
"version": version,
},
)
arango_edge = {
"_from": f"{arangodb.ortholog_nodes_name}/{subj_key}",
"_to": f"{arangodb.ortholog_nodes_name}/{obj_key}",
"_key": bel.utils._create_hash(f"{subj_id}>>{obj_id}"),
"type": "ortholog_to",
"source": source,
"version": version,
}
yield (arangodb.ortholog_edges_name, arango_edge) | python | def orthologs_iterator(fo, version):
"""Ortholog node and edge iterator"""
species_list = config["bel_resources"].get("species_list", [])
fo.seek(0)
with gzip.open(fo, "rt") as f:
for line in f:
edge = json.loads(line)
if "metadata" in edge:
source = edge["metadata"]["source"]
continue
if "ortholog" in edge:
edge = edge["ortholog"]
subj_tax_id = edge["subject"]["tax_id"]
obj_tax_id = edge["object"]["tax_id"]
# Skip if species not listed in species_list
if species_list and subj_tax_id and subj_tax_id not in species_list:
continue
if species_list and obj_tax_id and obj_tax_id not in species_list:
continue
# Converted to ArangoDB legal chars for _key
subj_key = arangodb.arango_id_to_key(edge["subject"]["id"])
subj_id = edge["subject"]["id"]
# Converted to ArangoDB legal chars for _key
obj_key = arangodb.arango_id_to_key(edge["object"]["id"])
obj_id = edge["object"]["id"]
# Subject node
yield (
arangodb.ortholog_nodes_name,
{
"_key": subj_key,
"name": subj_id,
"tax_id": edge["subject"]["tax_id"],
"source": source,
"version": version,
},
)
# Object node
yield (
arangodb.ortholog_nodes_name,
{
"_key": obj_key,
"name": obj_id,
"tax_id": edge["object"]["tax_id"],
"source": source,
"version": version,
},
)
arango_edge = {
"_from": f"{arangodb.ortholog_nodes_name}/{subj_key}",
"_to": f"{arangodb.ortholog_nodes_name}/{obj_key}",
"_key": bel.utils._create_hash(f"{subj_id}>>{obj_id}"),
"type": "ortholog_to",
"source": source,
"version": version,
}
yield (arangodb.ortholog_edges_name, arango_edge) | [
"def",
"orthologs_iterator",
"(",
"fo",
",",
"version",
")",
":",
"species_list",
"=",
"config",
"[",
"\"bel_resources\"",
"]",
".",
"get",
"(",
"\"species_list\"",
",",
"[",
"]",
")",
"fo",
".",
"seek",
"(",
"0",
")",
"with",
"gzip",
".",
"open",
"(",... | Ortholog node and edge iterator | [
"Ortholog",
"node",
"and",
"edge",
"iterator"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/ortholog.py#L67-L131 | train | 50,901 |
belbio/bel | bel/lang/migrate_1_2.py | migrate | def migrate(belstr: str) -> str:
"""Migrate BEL 1 to 2.0.0
Args:
bel: BEL 1
Returns:
bel: BEL 2
"""
bo.ast = bel.lang.partialparse.get_ast_obj(belstr, "2.0.0")
return migrate_ast(bo.ast).to_string() | python | def migrate(belstr: str) -> str:
"""Migrate BEL 1 to 2.0.0
Args:
bel: BEL 1
Returns:
bel: BEL 2
"""
bo.ast = bel.lang.partialparse.get_ast_obj(belstr, "2.0.0")
return migrate_ast(bo.ast).to_string() | [
"def",
"migrate",
"(",
"belstr",
":",
"str",
")",
"->",
"str",
":",
"bo",
".",
"ast",
"=",
"bel",
".",
"lang",
".",
"partialparse",
".",
"get_ast_obj",
"(",
"belstr",
",",
"\"2.0.0\"",
")",
"return",
"migrate_ast",
"(",
"bo",
".",
"ast",
")",
".",
... | Migrate BEL 1 to 2.0.0
Args:
bel: BEL 1
Returns:
bel: BEL 2 | [
"Migrate",
"BEL",
"1",
"to",
"2",
".",
"0",
".",
"0"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L26-L38 | train | 50,902 |
belbio/bel | bel/lang/migrate_1_2.py | migrate_into_triple | def migrate_into_triple(belstr: str) -> str:
"""Migrate BEL1 assertion into BEL 2.0.0 SRO triple"""
bo.ast = bel.lang.partialparse.get_ast_obj(belstr, "2.0.0")
return migrate_ast(bo.ast).to_triple() | python | def migrate_into_triple(belstr: str) -> str:
"""Migrate BEL1 assertion into BEL 2.0.0 SRO triple"""
bo.ast = bel.lang.partialparse.get_ast_obj(belstr, "2.0.0")
return migrate_ast(bo.ast).to_triple() | [
"def",
"migrate_into_triple",
"(",
"belstr",
":",
"str",
")",
"->",
"str",
":",
"bo",
".",
"ast",
"=",
"bel",
".",
"lang",
".",
"partialparse",
".",
"get_ast_obj",
"(",
"belstr",
",",
"\"2.0.0\"",
")",
"return",
"migrate_ast",
"(",
"bo",
".",
"ast",
")... | Migrate BEL1 assertion into BEL 2.0.0 SRO triple | [
"Migrate",
"BEL1",
"assertion",
"into",
"BEL",
"2",
".",
"0",
".",
"0",
"SRO",
"triple"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L41-L46 | train | 50,903 |
belbio/bel | bel/lang/migrate_1_2.py | convert | def convert(ast):
"""Convert BEL1 AST Function to BEL2 AST Function"""
if ast and ast.type == "Function":
# Activity function conversion
if (
ast.name != "molecularActivity"
and ast.name in spec["namespaces"]["Activity"]["list"]
):
print("name", ast.name, "type", ast.type)
ast = convert_activity(ast)
return ast # Otherwise - this will trigger on the BEL2 molecularActivity
# translocation conversion
elif ast.name in ["tloc", "translocation"]:
ast = convert_tloc(ast)
fus_flag = False
for idx, arg in enumerate(ast.args):
if arg.__class__.__name__ == "Function":
# Fix substitution -> variation()
if arg.name in ["sub", "substitution"]:
ast.args[idx] = convert_sub(arg)
elif arg.name in ["trunc", "truncation"]:
ast.args[idx] = convert_trunc(arg)
elif arg.name in ["pmod", "proteinModification"]:
ast.args[idx] = convert_pmod(arg)
elif arg.name in ["fus", "fusion"]:
fus_flag = True
# Recursively process Functions
ast.args[idx] = convert(ast.args[idx])
if fus_flag:
ast = convert_fus(ast)
return ast | python | def convert(ast):
"""Convert BEL1 AST Function to BEL2 AST Function"""
if ast and ast.type == "Function":
# Activity function conversion
if (
ast.name != "molecularActivity"
and ast.name in spec["namespaces"]["Activity"]["list"]
):
print("name", ast.name, "type", ast.type)
ast = convert_activity(ast)
return ast # Otherwise - this will trigger on the BEL2 molecularActivity
# translocation conversion
elif ast.name in ["tloc", "translocation"]:
ast = convert_tloc(ast)
fus_flag = False
for idx, arg in enumerate(ast.args):
if arg.__class__.__name__ == "Function":
# Fix substitution -> variation()
if arg.name in ["sub", "substitution"]:
ast.args[idx] = convert_sub(arg)
elif arg.name in ["trunc", "truncation"]:
ast.args[idx] = convert_trunc(arg)
elif arg.name in ["pmod", "proteinModification"]:
ast.args[idx] = convert_pmod(arg)
elif arg.name in ["fus", "fusion"]:
fus_flag = True
# Recursively process Functions
ast.args[idx] = convert(ast.args[idx])
if fus_flag:
ast = convert_fus(ast)
return ast | [
"def",
"convert",
"(",
"ast",
")",
":",
"if",
"ast",
"and",
"ast",
".",
"type",
"==",
"\"Function\"",
":",
"# Activity function conversion",
"if",
"(",
"ast",
".",
"name",
"!=",
"\"molecularActivity\"",
"and",
"ast",
".",
"name",
"in",
"spec",
"[",
"\"name... | Convert BEL1 AST Function to BEL2 AST Function | [
"Convert",
"BEL1",
"AST",
"Function",
"to",
"BEL2",
"AST",
"Function"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L65-L105 | train | 50,904 |
belbio/bel | bel/db/arangodb.py | get_client | def get_client(host=None, port=None, username=None, password=None, enable_logging=True):
"""Get arango client and edgestore db handle"""
host = utils.first_true(
[host, config["bel_api"]["servers"]["arangodb_host"], "localhost"]
)
port = utils.first_true([port, config["bel_api"]["servers"]["arangodb_port"], 8529])
username = utils.first_true(
[username, config["bel_api"]["servers"]["arangodb_username"], ""]
)
password = utils.first_true(
[
password,
config.get(
"secrets",
config["secrets"]["bel_api"]["servers"].get("arangodb_password"),
),
"",
]
)
client = arango.client.ArangoClient(
protocol=config["bel_api"]["servers"]["arangodb_protocol"], host=host, port=port
)
return client | python | def get_client(host=None, port=None, username=None, password=None, enable_logging=True):
"""Get arango client and edgestore db handle"""
host = utils.first_true(
[host, config["bel_api"]["servers"]["arangodb_host"], "localhost"]
)
port = utils.first_true([port, config["bel_api"]["servers"]["arangodb_port"], 8529])
username = utils.first_true(
[username, config["bel_api"]["servers"]["arangodb_username"], ""]
)
password = utils.first_true(
[
password,
config.get(
"secrets",
config["secrets"]["bel_api"]["servers"].get("arangodb_password"),
),
"",
]
)
client = arango.client.ArangoClient(
protocol=config["bel_api"]["servers"]["arangodb_protocol"], host=host, port=port
)
return client | [
"def",
"get_client",
"(",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"enable_logging",
"=",
"True",
")",
":",
"host",
"=",
"utils",
".",
"first_true",
"(",
"[",
"host",
",",
"config... | Get arango client and edgestore db handle | [
"Get",
"arango",
"client",
"and",
"edgestore",
"db",
"handle"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L62-L87 | train | 50,905 |
belbio/bel | bel/db/arangodb.py | get_edgestore_handle | def get_edgestore_handle(
client: arango.client.ArangoClient,
username=None,
password=None,
edgestore_db_name: str = edgestore_db_name,
edgestore_edges_name: str = edgestore_edges_name,
edgestore_nodes_name: str = edgestore_nodes_name,
edgestore_pipeline_name: str = edgestore_pipeline_name,
edgestore_pipeline_stats_name: str = edgestore_pipeline_stats_name,
edgestore_pipeline_errors_name: str = edgestore_pipeline_errors_name,
) -> arango.database.StandardDatabase:
"""Get Edgestore arangodb database handle
Args:
client (arango.client.ArangoClient): Description
username (None, optional): Description
password (None, optional): Description
edgestore_db_name (str, optional): Description
edgestore_edges_name (str, optional): Description
edgestore_nodes_name (str, optional): Description
Returns:
arango.database.StandardDatabase: Description
"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
# Create a new database named "edgestore"
try:
if username and password:
edgestore_db = sys_db.create_database(
name=edgestore_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
edgestore_db = sys_db.create_database(name=edgestore_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
edgestore_db = client.db(
edgestore_db_name, username=username, password=password
)
else:
edgestore_db = client.db(edgestore_db_name)
# TODO - add a skiplist index for _from? or _key? to be able to do paging?
# has_collection function doesn't seem to be working
# if not edgestore_db.has_collection(edgestore_nodes_name):
try:
nodes = edgestore_db.create_collection(
edgestore_nodes_name, index_bucket_count=64
)
nodes.add_hash_index(fields=["name"], unique=False)
nodes.add_hash_index(
fields=["components"], unique=False
) # add subject/object components as node properties
except Exception:
pass
# if not edgestore_db.has_collection(edgestore_edges_name):
try:
edges = edgestore_db.create_collection(
edgestore_edges_name, edge=True, index_bucket_count=64
)
edges.add_hash_index(fields=["relation"], unique=False)
edges.add_hash_index(fields=["edge_types"], unique=False)
edges.add_hash_index(fields=["nanopub_id"], unique=False)
edges.add_hash_index(fields=["metadata.project"], unique=False)
edges.add_hash_index(fields=["annotations[*].id"], unique=False)
except Exception:
pass
# if not edgestore_db.has_collection(edgestore_pipeline_name):
try:
edgestore_db.create_collection(edgestore_pipeline_name)
except Exception:
pass
try:
edgestore_db.create_collection(edgestore_pipeline_errors_name)
except Exception:
pass
try:
edgestore_db.create_collection(edgestore_pipeline_stats_name)
except arango.exceptions.CollectionCreateError as e:
pass
return edgestore_db | python | def get_edgestore_handle(
client: arango.client.ArangoClient,
username=None,
password=None,
edgestore_db_name: str = edgestore_db_name,
edgestore_edges_name: str = edgestore_edges_name,
edgestore_nodes_name: str = edgestore_nodes_name,
edgestore_pipeline_name: str = edgestore_pipeline_name,
edgestore_pipeline_stats_name: str = edgestore_pipeline_stats_name,
edgestore_pipeline_errors_name: str = edgestore_pipeline_errors_name,
) -> arango.database.StandardDatabase:
"""Get Edgestore arangodb database handle
Args:
client (arango.client.ArangoClient): Description
username (None, optional): Description
password (None, optional): Description
edgestore_db_name (str, optional): Description
edgestore_edges_name (str, optional): Description
edgestore_nodes_name (str, optional): Description
Returns:
arango.database.StandardDatabase: Description
"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
# Create a new database named "edgestore"
try:
if username and password:
edgestore_db = sys_db.create_database(
name=edgestore_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
edgestore_db = sys_db.create_database(name=edgestore_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
edgestore_db = client.db(
edgestore_db_name, username=username, password=password
)
else:
edgestore_db = client.db(edgestore_db_name)
# TODO - add a skiplist index for _from? or _key? to be able to do paging?
# has_collection function doesn't seem to be working
# if not edgestore_db.has_collection(edgestore_nodes_name):
try:
nodes = edgestore_db.create_collection(
edgestore_nodes_name, index_bucket_count=64
)
nodes.add_hash_index(fields=["name"], unique=False)
nodes.add_hash_index(
fields=["components"], unique=False
) # add subject/object components as node properties
except Exception:
pass
# if not edgestore_db.has_collection(edgestore_edges_name):
try:
edges = edgestore_db.create_collection(
edgestore_edges_name, edge=True, index_bucket_count=64
)
edges.add_hash_index(fields=["relation"], unique=False)
edges.add_hash_index(fields=["edge_types"], unique=False)
edges.add_hash_index(fields=["nanopub_id"], unique=False)
edges.add_hash_index(fields=["metadata.project"], unique=False)
edges.add_hash_index(fields=["annotations[*].id"], unique=False)
except Exception:
pass
# if not edgestore_db.has_collection(edgestore_pipeline_name):
try:
edgestore_db.create_collection(edgestore_pipeline_name)
except Exception:
pass
try:
edgestore_db.create_collection(edgestore_pipeline_errors_name)
except Exception:
pass
try:
edgestore_db.create_collection(edgestore_pipeline_stats_name)
except arango.exceptions.CollectionCreateError as e:
pass
return edgestore_db | [
"def",
"get_edgestore_handle",
"(",
"client",
":",
"arango",
".",
"client",
".",
"ArangoClient",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"edgestore_db_name",
":",
"str",
"=",
"edgestore_db_name",
",",
"edgestore_edges_name",
":",
"str",
... | Get Edgestore arangodb database handle
Args:
client (arango.client.ArangoClient): Description
username (None, optional): Description
password (None, optional): Description
edgestore_db_name (str, optional): Description
edgestore_edges_name (str, optional): Description
edgestore_nodes_name (str, optional): Description
Returns:
arango.database.StandardDatabase: Description | [
"Get",
"Edgestore",
"arangodb",
"database",
"handle"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L97-L186 | train | 50,906 |
belbio/bel | bel/db/arangodb.py | get_belns_handle | def get_belns_handle(client, username=None, password=None):
"""Get BEL namespace arango db handle"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
# Create a new database named "belns"
try:
if username and password:
belns_db = sys_db.create_database(
name=belns_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
belns_db = sys_db.create_database(name=belns_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
belns_db = client.db(belns_db_name, username=username, password=password)
else:
belns_db = client.db(belns_db_name)
try:
belns_db.create_collection(belns_metadata_name)
except Exception:
pass
try:
equiv_nodes = belns_db.create_collection(
equiv_nodes_name, index_bucket_count=64
)
equiv_nodes.add_hash_index(fields=["name"], unique=True)
except Exception:
pass
try:
belns_db.create_collection(equiv_edges_name, edge=True, index_bucket_count=64)
except Exception:
pass
try:
ortholog_nodes = belns_db.create_collection(
ortholog_nodes_name, index_bucket_count=64
)
ortholog_nodes.add_hash_index(fields=["name"], unique=True)
except Exception:
pass
try:
belns_db.create_collection(
ortholog_edges_name, edge=True, index_bucket_count=64
)
except Exception:
pass
return belns_db | python | def get_belns_handle(client, username=None, password=None):
"""Get BEL namespace arango db handle"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
# Create a new database named "belns"
try:
if username and password:
belns_db = sys_db.create_database(
name=belns_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
belns_db = sys_db.create_database(name=belns_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
belns_db = client.db(belns_db_name, username=username, password=password)
else:
belns_db = client.db(belns_db_name)
try:
belns_db.create_collection(belns_metadata_name)
except Exception:
pass
try:
equiv_nodes = belns_db.create_collection(
equiv_nodes_name, index_bucket_count=64
)
equiv_nodes.add_hash_index(fields=["name"], unique=True)
except Exception:
pass
try:
belns_db.create_collection(equiv_edges_name, edge=True, index_bucket_count=64)
except Exception:
pass
try:
ortholog_nodes = belns_db.create_collection(
ortholog_nodes_name, index_bucket_count=64
)
ortholog_nodes.add_hash_index(fields=["name"], unique=True)
except Exception:
pass
try:
belns_db.create_collection(
ortholog_edges_name, edge=True, index_bucket_count=64
)
except Exception:
pass
return belns_db | [
"def",
"get_belns_handle",
"(",
"client",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"(",
"username",
",",
"password",
")",
"=",
"get_user_creds",
"(",
"username",
",",
"password",
")",
"sys_db",
"=",
"client",
".",
"db",
"(",
... | Get BEL namespace arango db handle | [
"Get",
"BEL",
"namespace",
"arango",
"db",
"handle"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L189-L244 | train | 50,907 |
belbio/bel | bel/db/arangodb.py | get_belapi_handle | def get_belapi_handle(client, username=None, password=None):
"""Get BEL API arango db handle"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
# Create a new database named "belapi"
try:
if username and password:
belapi_db = sys_db.create_database(
name=belapi_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
belapi_db = sys_db.create_database(name=belapi_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
belapi_db = client.db(belapi_db_name, username=username, password=password)
else:
belapi_db = client.db(belapi_db_name)
try:
belapi_db.create_collection(belapi_settings_name)
except Exception:
pass
try:
belapi_db.create_collection(belapi_statemgmt_name)
except Exception:
pass
return belapi_db | python | def get_belapi_handle(client, username=None, password=None):
"""Get BEL API arango db handle"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
# Create a new database named "belapi"
try:
if username and password:
belapi_db = sys_db.create_database(
name=belapi_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
belapi_db = sys_db.create_database(name=belapi_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
belapi_db = client.db(belapi_db_name, username=username, password=password)
else:
belapi_db = client.db(belapi_db_name)
try:
belapi_db.create_collection(belapi_settings_name)
except Exception:
pass
try:
belapi_db.create_collection(belapi_statemgmt_name)
except Exception:
pass
return belapi_db | [
"def",
"get_belapi_handle",
"(",
"client",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"(",
"username",
",",
"password",
")",
"=",
"get_user_creds",
"(",
"username",
",",
"password",
")",
"sys_db",
"=",
"client",
".",
"db",
"(",... | Get BEL API arango db handle | [
"Get",
"BEL",
"API",
"arango",
"db",
"handle"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L247-L279 | train | 50,908 |
belbio/bel | bel/db/arangodb.py | delete_database | def delete_database(client, db_name, username=None, password=None):
"""Delete Arangodb database
"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
try:
return sys_db.delete_database(db_name)
except Exception:
log.warn("No arango database {db_name} to delete, does not exist") | python | def delete_database(client, db_name, username=None, password=None):
"""Delete Arangodb database
"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
try:
return sys_db.delete_database(db_name)
except Exception:
log.warn("No arango database {db_name} to delete, does not exist") | [
"def",
"delete_database",
"(",
"client",
",",
"db_name",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"(",
"username",
",",
"password",
")",
"=",
"get_user_creds",
"(",
"username",
",",
"password",
")",
"sys_db",
"=",
"client",
"... | Delete Arangodb database | [
"Delete",
"Arangodb",
"database"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L282-L294 | train | 50,909 |
belbio/bel | bel/db/arangodb.py | batch_load_docs | def batch_load_docs(db, doc_iterator, on_duplicate="replace"):
"""Batch load documents
Args:
db: ArangoDB client database handle
doc_iterator: function that yields (collection_name, doc_key, doc)
on_duplicate: defaults to replace, but can be error, update, replace or ignore
https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk
"""
batch_size = 100
counter = 0
collections = {}
docs = {}
if on_duplicate not in ["error", "update", "replace", "ignore"]:
log.error(f"Bad parameter for on_duplicate: {on_duplicate}")
return
for (collection_name, doc) in doc_iterator:
if collection_name not in collections:
collections[collection_name] = db.collection(collection_name)
docs[collection_name] = []
counter += 1
docs[collection_name].append(doc)
if counter % batch_size == 0:
log.info(f"Bulk import arangodb: {counter}")
for cname in docs:
collections[cname].import_bulk(
docs[cname], on_duplicate=on_duplicate, halt_on_error=False
)
docs[cname] = []
log.info(f"Bulk import arangodb: {counter}")
for cname in docs:
collections[cname].import_bulk(
docs[cname], on_duplicate=on_duplicate, halt_on_error=False
)
docs[cname] = [] | python | def batch_load_docs(db, doc_iterator, on_duplicate="replace"):
"""Batch load documents
Args:
db: ArangoDB client database handle
doc_iterator: function that yields (collection_name, doc_key, doc)
on_duplicate: defaults to replace, but can be error, update, replace or ignore
https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk
"""
batch_size = 100
counter = 0
collections = {}
docs = {}
if on_duplicate not in ["error", "update", "replace", "ignore"]:
log.error(f"Bad parameter for on_duplicate: {on_duplicate}")
return
for (collection_name, doc) in doc_iterator:
if collection_name not in collections:
collections[collection_name] = db.collection(collection_name)
docs[collection_name] = []
counter += 1
docs[collection_name].append(doc)
if counter % batch_size == 0:
log.info(f"Bulk import arangodb: {counter}")
for cname in docs:
collections[cname].import_bulk(
docs[cname], on_duplicate=on_duplicate, halt_on_error=False
)
docs[cname] = []
log.info(f"Bulk import arangodb: {counter}")
for cname in docs:
collections[cname].import_bulk(
docs[cname], on_duplicate=on_duplicate, halt_on_error=False
)
docs[cname] = [] | [
"def",
"batch_load_docs",
"(",
"db",
",",
"doc_iterator",
",",
"on_duplicate",
"=",
"\"replace\"",
")",
":",
"batch_size",
"=",
"100",
"counter",
"=",
"0",
"collections",
"=",
"{",
"}",
"docs",
"=",
"{",
"}",
"if",
"on_duplicate",
"not",
"in",
"[",
"\"er... | Batch load documents
Args:
db: ArangoDB client database handle
doc_iterator: function that yields (collection_name, doc_key, doc)
on_duplicate: defaults to replace, but can be error, update, replace or ignore
https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk | [
"Batch",
"load",
"documents"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L297-L340 | train | 50,910 |
belbio/bel | bel/resources/resource.py | load_resource | def load_resource(resource_url: str, forceupdate: bool = False):
"""Load BEL Resource file
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
resource_url: URL from which to download the resource to load into the BEL API
forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches
"""
log.info(f"Loading resource {resource_url}")
try:
# Download resource
fo = bel.utils.download_file(resource_url)
if not fo:
log.error(f"Could not download and open file {resource_url}")
return "Failed to download resource_url"
# Get metadata
fo.seek(0)
with gzip.open(fo, "rt") as f:
metadata = json.loads(f.__next__())
if "metadata" not in metadata:
log.error(f"Missing metadata entry for {resource_url}")
return "Cannot load resource file - missing metadata object in first line of file"
# Load resource files
if metadata["metadata"]["type"] == "namespace":
bel.resources.namespace.load_terms(fo, metadata, forceupdate)
elif metadata["metadata"]["type"] == "ortholog":
bel.resources.ortholog.load_orthologs(fo, metadata)
finally:
fo.close() | python | def load_resource(resource_url: str, forceupdate: bool = False):
"""Load BEL Resource file
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
resource_url: URL from which to download the resource to load into the BEL API
forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches
"""
log.info(f"Loading resource {resource_url}")
try:
# Download resource
fo = bel.utils.download_file(resource_url)
if not fo:
log.error(f"Could not download and open file {resource_url}")
return "Failed to download resource_url"
# Get metadata
fo.seek(0)
with gzip.open(fo, "rt") as f:
metadata = json.loads(f.__next__())
if "metadata" not in metadata:
log.error(f"Missing metadata entry for {resource_url}")
return "Cannot load resource file - missing metadata object in first line of file"
# Load resource files
if metadata["metadata"]["type"] == "namespace":
bel.resources.namespace.load_terms(fo, metadata, forceupdate)
elif metadata["metadata"]["type"] == "ortholog":
bel.resources.ortholog.load_orthologs(fo, metadata)
finally:
fo.close() | [
"def",
"load_resource",
"(",
"resource_url",
":",
"str",
",",
"forceupdate",
":",
"bool",
"=",
"False",
")",
":",
"log",
".",
"info",
"(",
"f\"Loading resource {resource_url}\"",
")",
"try",
":",
"# Download resource",
"fo",
"=",
"bel",
".",
"utils",
".",
"d... | Load BEL Resource file
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
resource_url: URL from which to download the resource to load into the BEL API
forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches | [
"Load",
"BEL",
"Resource",
"file"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/resource.py#L23-L61 | train | 50,911 |
belbio/bel | bel/terms/terms.py | get_normalized_term | def get_normalized_term(term_id: str, equivalents: list, namespace_targets: dict) -> str:
"""Get normalized term"""
if equivalents and len(equivalents) > 0:
for start_ns in namespace_targets:
if re.match(start_ns, term_id):
for target_ns in namespace_targets[start_ns]:
for e in equivalents:
if e and target_ns in e["namespace"] and e["primary"]:
normalized_term = e["term_id"]
return normalized_term
return term_id | python | def get_normalized_term(term_id: str, equivalents: list, namespace_targets: dict) -> str:
"""Get normalized term"""
if equivalents and len(equivalents) > 0:
for start_ns in namespace_targets:
if re.match(start_ns, term_id):
for target_ns in namespace_targets[start_ns]:
for e in equivalents:
if e and target_ns in e["namespace"] and e["primary"]:
normalized_term = e["term_id"]
return normalized_term
return term_id | [
"def",
"get_normalized_term",
"(",
"term_id",
":",
"str",
",",
"equivalents",
":",
"list",
",",
"namespace_targets",
":",
"dict",
")",
"->",
"str",
":",
"if",
"equivalents",
"and",
"len",
"(",
"equivalents",
")",
">",
"0",
":",
"for",
"start_ns",
"in",
"... | Get normalized term | [
"Get",
"normalized",
"term"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/terms/terms.py#L101-L113 | train | 50,912 |
belbio/bel | bel/terms/terms.py | get_labels | def get_labels(term_ids: list) -> dict:
"""Get term labels given term ids
This only takes the first term returned for a term_id so use the
unique term_id for a term not an alternate id that might not be unique.
"""
term_labels = {}
for term_id in term_ids:
term = get_terms(term_id)
term_labels[term_id] = term[0].get("label", "")
return term_labels | python | def get_labels(term_ids: list) -> dict:
"""Get term labels given term ids
This only takes the first term returned for a term_id so use the
unique term_id for a term not an alternate id that might not be unique.
"""
term_labels = {}
for term_id in term_ids:
term = get_terms(term_id)
term_labels[term_id] = term[0].get("label", "")
return term_labels | [
"def",
"get_labels",
"(",
"term_ids",
":",
"list",
")",
"->",
"dict",
":",
"term_labels",
"=",
"{",
"}",
"for",
"term_id",
"in",
"term_ids",
":",
"term",
"=",
"get_terms",
"(",
"term_id",
")",
"term_labels",
"[",
"term_id",
"]",
"=",
"term",
"[",
"0",
... | Get term labels given term ids
This only takes the first term returned for a term_id so use the
unique term_id for a term not an alternate id that might not be unique. | [
"Get",
"term",
"labels",
"given",
"term",
"ids"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/terms/terms.py#L116-L127 | train | 50,913 |
PayEx/pypayex | payex/handlers.py | BaseHandler._get_params | def _get_params(self):
"""
Generate SOAP parameters.
"""
params = {'accountNumber': self._service.accountNumber}
# Include object variables that are in field_order
for key, val in self.__dict__.iteritems():
if key in self.field_order:
# Turn into Unicode
if isinstance(val, str,):
val = val.decode('utf8')
params[key] = val
# Set missing parameters as empty strings
for key in self.field_order:
if key not in params:
params[key] = u''
# Parameter sorting method
def order_keys(k):
if k[0] in self.field_order:
return self.field_order.index(k[0])
return len(self.field_order) + 1
# Sort the ordered dictionary
params = OrderedDict(sorted(params.items(), key=order_keys))
# Add hash to dictionary if present
if hasattr(self, 'hash') and self.hash is not None:
params['hash'] = self.hash
return params | python | def _get_params(self):
"""
Generate SOAP parameters.
"""
params = {'accountNumber': self._service.accountNumber}
# Include object variables that are in field_order
for key, val in self.__dict__.iteritems():
if key in self.field_order:
# Turn into Unicode
if isinstance(val, str,):
val = val.decode('utf8')
params[key] = val
# Set missing parameters as empty strings
for key in self.field_order:
if key not in params:
params[key] = u''
# Parameter sorting method
def order_keys(k):
if k[0] in self.field_order:
return self.field_order.index(k[0])
return len(self.field_order) + 1
# Sort the ordered dictionary
params = OrderedDict(sorted(params.items(), key=order_keys))
# Add hash to dictionary if present
if hasattr(self, 'hash') and self.hash is not None:
params['hash'] = self.hash
return params | [
"def",
"_get_params",
"(",
"self",
")",
":",
"params",
"=",
"{",
"'accountNumber'",
":",
"self",
".",
"_service",
".",
"accountNumber",
"}",
"# Include object variables that are in field_order",
"for",
"key",
",",
"val",
"in",
"self",
".",
"__dict__",
".",
"iter... | Generate SOAP parameters. | [
"Generate",
"SOAP",
"parameters",
"."
] | 549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab | https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/handlers.py#L26-L61 | train | 50,914 |
PayEx/pypayex | payex/handlers.py | BaseHandler._generate_hash | def _generate_hash(self):
"""
Generates a hash based on the specific fields for the method.
"""
self.hash = None
str_hash = ''
for key, val in self._get_params().iteritems():
str_hash += smart_str(val)
# Append the encryption string
str_hash += self._service.encryption_key
# Set md5 hash on the object
self.hash = hashlib.md5(str_hash).hexdigest() | python | def _generate_hash(self):
"""
Generates a hash based on the specific fields for the method.
"""
self.hash = None
str_hash = ''
for key, val in self._get_params().iteritems():
str_hash += smart_str(val)
# Append the encryption string
str_hash += self._service.encryption_key
# Set md5 hash on the object
self.hash = hashlib.md5(str_hash).hexdigest() | [
"def",
"_generate_hash",
"(",
"self",
")",
":",
"self",
".",
"hash",
"=",
"None",
"str_hash",
"=",
"''",
"for",
"key",
",",
"val",
"in",
"self",
".",
"_get_params",
"(",
")",
".",
"iteritems",
"(",
")",
":",
"str_hash",
"+=",
"smart_str",
"(",
"val",... | Generates a hash based on the specific fields for the method. | [
"Generates",
"a",
"hash",
"based",
"on",
"the",
"specific",
"fields",
"for",
"the",
"method",
"."
] | 549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab | https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/handlers.py#L63-L78 | train | 50,915 |
PayEx/pypayex | payex/handlers.py | BaseHandler._send_request | def _send_request(self):
"""
Make the SOAP request and convert the result to a dictionary.
"""
# Generate the hash variable and parameters
self._generate_hash()
params = self._get_params()
# Make the SOAP request
try:
resp = self._endpoint(**params)
logger.debug(resp)
except WebFault, e:
logger.exception('An error occurred while making the SOAP request.')
return None
# Convert XML response into a dictionary
self.response = XmlDictConfig(ElementTree.XML(smart_str(resp)))
# Normalize dictionary values
self.response = normalize_dictionary_values(self.response)
# Log all non OK status codes
if self.response['status']['errorCode'] != 'OK':
logger.error(resp)
return self.response | python | def _send_request(self):
"""
Make the SOAP request and convert the result to a dictionary.
"""
# Generate the hash variable and parameters
self._generate_hash()
params = self._get_params()
# Make the SOAP request
try:
resp = self._endpoint(**params)
logger.debug(resp)
except WebFault, e:
logger.exception('An error occurred while making the SOAP request.')
return None
# Convert XML response into a dictionary
self.response = XmlDictConfig(ElementTree.XML(smart_str(resp)))
# Normalize dictionary values
self.response = normalize_dictionary_values(self.response)
# Log all non OK status codes
if self.response['status']['errorCode'] != 'OK':
logger.error(resp)
return self.response | [
"def",
"_send_request",
"(",
"self",
")",
":",
"# Generate the hash variable and parameters",
"self",
".",
"_generate_hash",
"(",
")",
"params",
"=",
"self",
".",
"_get_params",
"(",
")",
"# Make the SOAP request",
"try",
":",
"resp",
"=",
"self",
".",
"_endpoint"... | Make the SOAP request and convert the result to a dictionary. | [
"Make",
"the",
"SOAP",
"request",
"and",
"convert",
"the",
"result",
"to",
"a",
"dictionary",
"."
] | 549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab | https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/handlers.py#L80-L107 | train | 50,916 |
PayEx/pypayex | payex/handlers.py | BaseHandler.client_factory | def client_factory(self):
"""
Custom client factory to set proxy options.
"""
if self._service.production:
url = self.production_url
else:
url = self.testing_url
proxy_options = dict()
https_proxy_setting = os.environ.get('PAYEX_HTTPS_PROXY') or os.environ.get('https_proxy')
http_proxy_setting = os.environ.get('PAYEX_HTTP_PROXY') or os.environ.get('http_proxy')
if https_proxy_setting:
proxy_options['https'] = https_proxy_setting
if http_proxy_setting:
proxy_options['http'] = http_proxy_setting
return client.Client(url, proxy=proxy_options) | python | def client_factory(self):
"""
Custom client factory to set proxy options.
"""
if self._service.production:
url = self.production_url
else:
url = self.testing_url
proxy_options = dict()
https_proxy_setting = os.environ.get('PAYEX_HTTPS_PROXY') or os.environ.get('https_proxy')
http_proxy_setting = os.environ.get('PAYEX_HTTP_PROXY') or os.environ.get('http_proxy')
if https_proxy_setting:
proxy_options['https'] = https_proxy_setting
if http_proxy_setting:
proxy_options['http'] = http_proxy_setting
return client.Client(url, proxy=proxy_options) | [
"def",
"client_factory",
"(",
"self",
")",
":",
"if",
"self",
".",
"_service",
".",
"production",
":",
"url",
"=",
"self",
".",
"production_url",
"else",
":",
"url",
"=",
"self",
".",
"testing_url",
"proxy_options",
"=",
"dict",
"(",
")",
"https_proxy_sett... | Custom client factory to set proxy options. | [
"Custom",
"client",
"factory",
"to",
"set",
"proxy",
"options",
"."
] | 549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab | https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/handlers.py#L109-L128 | train | 50,917 |
DNX/django-keyboard-shorcuts | keyboard_shortcuts/utils.py | get_combination_action | def get_combination_action(combination):
"""
Prepares the action for a keyboard combination, also filters another
"strange" actions declared by the user.
"""
accepted_actions = ('link', 'js')
for action in accepted_actions:
if action in combination:
return {action: combination[action]}
return {} | python | def get_combination_action(combination):
"""
Prepares the action for a keyboard combination, also filters another
"strange" actions declared by the user.
"""
accepted_actions = ('link', 'js')
for action in accepted_actions:
if action in combination:
return {action: combination[action]}
return {} | [
"def",
"get_combination_action",
"(",
"combination",
")",
":",
"accepted_actions",
"=",
"(",
"'link'",
",",
"'js'",
")",
"for",
"action",
"in",
"accepted_actions",
":",
"if",
"action",
"in",
"combination",
":",
"return",
"{",
"action",
":",
"combination",
"[",... | Prepares the action for a keyboard combination, also filters another
"strange" actions declared by the user. | [
"Prepares",
"the",
"action",
"for",
"a",
"keyboard",
"combination",
"also",
"filters",
"another",
"strange",
"actions",
"declared",
"by",
"the",
"user",
"."
] | dd853a410614c0dfb7cce803eafda9b5fa47be17 | https://github.com/DNX/django-keyboard-shorcuts/blob/dd853a410614c0dfb7cce803eafda9b5fa47be17/keyboard_shortcuts/utils.py#L14-L23 | train | 50,918 |
DNX/django-keyboard-shorcuts | keyboard_shortcuts/utils.py | get_processed_hotkeys | def get_processed_hotkeys(hotkeys=None):
"""
Process passed dict with key combinations or the HOTKEYS dict from
settings.
"""
hotkeys = hotkeys or ks_settings.HOTKEYS
processed_hotkeys = AutoVivification()
if not hotkeys:
return processed_hotkeys
for combination in hotkeys:
key_codes = get_key_codes(combination['keys'])
if len(key_codes) == 1:
processed_hotkeys[key_codes[0]] = get_combination_action(combination)
elif len(key_codes) == 2:
processed_hotkeys[key_codes[0]][key_codes[1]] = get_combination_action(combination)
elif len(key_codes) == 3:
processed_hotkeys[key_codes[0]][key_codes[1]][key_codes[2]] = get_combination_action(combination)
# TODO: make dynamic vivification
return processed_hotkeys | python | def get_processed_hotkeys(hotkeys=None):
"""
Process passed dict with key combinations or the HOTKEYS dict from
settings.
"""
hotkeys = hotkeys or ks_settings.HOTKEYS
processed_hotkeys = AutoVivification()
if not hotkeys:
return processed_hotkeys
for combination in hotkeys:
key_codes = get_key_codes(combination['keys'])
if len(key_codes) == 1:
processed_hotkeys[key_codes[0]] = get_combination_action(combination)
elif len(key_codes) == 2:
processed_hotkeys[key_codes[0]][key_codes[1]] = get_combination_action(combination)
elif len(key_codes) == 3:
processed_hotkeys[key_codes[0]][key_codes[1]][key_codes[2]] = get_combination_action(combination)
# TODO: make dynamic vivification
return processed_hotkeys | [
"def",
"get_processed_hotkeys",
"(",
"hotkeys",
"=",
"None",
")",
":",
"hotkeys",
"=",
"hotkeys",
"or",
"ks_settings",
".",
"HOTKEYS",
"processed_hotkeys",
"=",
"AutoVivification",
"(",
")",
"if",
"not",
"hotkeys",
":",
"return",
"processed_hotkeys",
"for",
"com... | Process passed dict with key combinations or the HOTKEYS dict from
settings. | [
"Process",
"passed",
"dict",
"with",
"key",
"combinations",
"or",
"the",
"HOTKEYS",
"dict",
"from",
"settings",
"."
] | dd853a410614c0dfb7cce803eafda9b5fa47be17 | https://github.com/DNX/django-keyboard-shorcuts/blob/dd853a410614c0dfb7cce803eafda9b5fa47be17/keyboard_shortcuts/utils.py#L26-L46 | train | 50,919 |
belbio/bel | bel/lang/belobj.py | BEL.parse | def parse(
self,
assertion: Union[str, Mapping[str, str]],
strict: bool = False,
parseinfo: bool = False,
rule_name: str = "start",
error_level: str = "WARNING",
) -> "BEL":
"""Parse and semantically validate BEL statement
Parses a BEL statement given as a string and returns an AST, Abstract Syntax Tree (defined in ast.py)
if the statement is valid, self.parse_valid. Else, the AST attribute is None and there will be validation error messages
in self.validation_messages. self.validation_messages will contain WARNINGS if
warranted even if the statement parses correctly.
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
Args:
assertion: BEL statement (if str -> 'S R O', if dict {'subject': S, 'relation': R, 'object': O})
strict: specify to use strict or loose parsing; defaults to loose
parseinfo: specify whether or not to include Tatsu parse information in AST
rule_name: starting point in parser - defaults to 'start'
error_level: return ERRORs only or also WARNINGs
Returns:
ParseObject: The ParseObject which contain either an AST or error messages.
"""
self.ast = None
self.parse_valid = False
self.parse_visualize_error = ""
self.validation_messages = [] # Reset messages when parsing a new BEL Statement
if isinstance(assertion, dict):
if assertion.get("relation", False) and assertion.get("object", False):
statement = f"{assertion['subject']} {assertion['relation']} {assertion['object']}"
elif assertion.get("subject"):
statement = f"{assertion['subject']}"
else:
statement = ""
else:
statement = assertion
self.original_bel_stmt = statement
# pre-process to remove extra white space, add space after commas, etc.
self.bel_stmt = bel_utils.preprocess_bel_stmt(statement)
# TODO - double check these tests before enabling
# is_valid, messages = bel_utils.simple_checks(self.bel_stmt)
# if not is_valid:
# self.validation_messages.extend(messages)
# return self
# Check to see if empty string for bel statement
if len(self.bel_stmt) == 0:
self.validation_messages.append(
("ERROR", "Please include a valid BEL statement - found empty string.")
)
return self
try:
# see if an AST is returned without any parsing errors
ast_dict = self.parser.parse(
self.bel_stmt, rule_name=rule_name, trace=False, parseinfo=parseinfo
)
self.ast = lang_ast.ast_dict_to_objects(ast_dict, self)
self.parse_valid = True
except FailedParse as e:
# if an error is returned, send to handle_syntax, error
error, visualize_error = bel_utils.handle_parser_syntax_error(e)
self.parse_visualize_error = visualize_error
if visualize_error:
self.validation_messages.append(
("ERROR", f"{error}\n{visualize_error}")
)
else:
self.validation_messages.append(
("ERROR", f"{error}\nBEL: {self.bel_stmt}")
)
self.ast = None
except Exception as e:
log.error("Error {}, error type: {}".format(e, type(e)))
self.validation_messages.append(
("ERROR", "Error {}, error type: {}".format(e, type(e)))
)
return self | python | def parse(
self,
assertion: Union[str, Mapping[str, str]],
strict: bool = False,
parseinfo: bool = False,
rule_name: str = "start",
error_level: str = "WARNING",
) -> "BEL":
"""Parse and semantically validate BEL statement
Parses a BEL statement given as a string and returns an AST, Abstract Syntax Tree (defined in ast.py)
if the statement is valid, self.parse_valid. Else, the AST attribute is None and there will be validation error messages
in self.validation_messages. self.validation_messages will contain WARNINGS if
warranted even if the statement parses correctly.
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
Args:
assertion: BEL statement (if str -> 'S R O', if dict {'subject': S, 'relation': R, 'object': O})
strict: specify to use strict or loose parsing; defaults to loose
parseinfo: specify whether or not to include Tatsu parse information in AST
rule_name: starting point in parser - defaults to 'start'
error_level: return ERRORs only or also WARNINGs
Returns:
ParseObject: The ParseObject which contain either an AST or error messages.
"""
self.ast = None
self.parse_valid = False
self.parse_visualize_error = ""
self.validation_messages = [] # Reset messages when parsing a new BEL Statement
if isinstance(assertion, dict):
if assertion.get("relation", False) and assertion.get("object", False):
statement = f"{assertion['subject']} {assertion['relation']} {assertion['object']}"
elif assertion.get("subject"):
statement = f"{assertion['subject']}"
else:
statement = ""
else:
statement = assertion
self.original_bel_stmt = statement
# pre-process to remove extra white space, add space after commas, etc.
self.bel_stmt = bel_utils.preprocess_bel_stmt(statement)
# TODO - double check these tests before enabling
# is_valid, messages = bel_utils.simple_checks(self.bel_stmt)
# if not is_valid:
# self.validation_messages.extend(messages)
# return self
# Check to see if empty string for bel statement
if len(self.bel_stmt) == 0:
self.validation_messages.append(
("ERROR", "Please include a valid BEL statement - found empty string.")
)
return self
try:
# see if an AST is returned without any parsing errors
ast_dict = self.parser.parse(
self.bel_stmt, rule_name=rule_name, trace=False, parseinfo=parseinfo
)
self.ast = lang_ast.ast_dict_to_objects(ast_dict, self)
self.parse_valid = True
except FailedParse as e:
# if an error is returned, send to handle_syntax, error
error, visualize_error = bel_utils.handle_parser_syntax_error(e)
self.parse_visualize_error = visualize_error
if visualize_error:
self.validation_messages.append(
("ERROR", f"{error}\n{visualize_error}")
)
else:
self.validation_messages.append(
("ERROR", f"{error}\nBEL: {self.bel_stmt}")
)
self.ast = None
except Exception as e:
log.error("Error {}, error type: {}".format(e, type(e)))
self.validation_messages.append(
("ERROR", "Error {}, error type: {}".format(e, type(e)))
)
return self | [
"def",
"parse",
"(",
"self",
",",
"assertion",
":",
"Union",
"[",
"str",
",",
"Mapping",
"[",
"str",
",",
"str",
"]",
"]",
",",
"strict",
":",
"bool",
"=",
"False",
",",
"parseinfo",
":",
"bool",
"=",
"False",
",",
"rule_name",
":",
"str",
"=",
"... | Parse and semantically validate BEL statement
Parses a BEL statement given as a string and returns an AST, Abstract Syntax Tree (defined in ast.py)
if the statement is valid, self.parse_valid. Else, the AST attribute is None and there will be validation error messages
in self.validation_messages. self.validation_messages will contain WARNINGS if
warranted even if the statement parses correctly.
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
Args:
assertion: BEL statement (if str -> 'S R O', if dict {'subject': S, 'relation': R, 'object': O})
strict: specify to use strict or loose parsing; defaults to loose
parseinfo: specify whether or not to include Tatsu parse information in AST
rule_name: starting point in parser - defaults to 'start'
error_level: return ERRORs only or also WARNINGs
Returns:
ParseObject: The ParseObject which contain either an AST or error messages. | [
"Parse",
"and",
"semantically",
"validate",
"BEL",
"statement"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L92-L184 | train | 50,920 |
belbio/bel | bel/lang/belobj.py | BEL.canonicalize | def canonicalize(self, namespace_targets: Mapping[str, List[str]] = None) -> "BEL":
"""
Takes an AST and returns a canonicalized BEL statement string.
Args:
namespace_targets (Mapping[str, List[str]]): override default canonicalization
settings of BEL.bio API api_url - see {api_url}/status to get default canonicalization settings
Returns:
BEL: returns self
"""
# TODO Need to order position independent args
if not self.ast:
return self
# Collect canonical/decanonical NSArg values
if not self.ast.collected_nsarg_norms:
self = self.collect_nsarg_norms()
# TODO Need to pass namespace target overrides for canonicalization
self.ast.canonicalize()
# self.ast = bel_utils.convert_namespaces_ast(self.ast, canonicalize=True, api_url=self.api_url, namespace_targets=namespace_targets)
return self | python | def canonicalize(self, namespace_targets: Mapping[str, List[str]] = None) -> "BEL":
"""
Takes an AST and returns a canonicalized BEL statement string.
Args:
namespace_targets (Mapping[str, List[str]]): override default canonicalization
settings of BEL.bio API api_url - see {api_url}/status to get default canonicalization settings
Returns:
BEL: returns self
"""
# TODO Need to order position independent args
if not self.ast:
return self
# Collect canonical/decanonical NSArg values
if not self.ast.collected_nsarg_norms:
self = self.collect_nsarg_norms()
# TODO Need to pass namespace target overrides for canonicalization
self.ast.canonicalize()
# self.ast = bel_utils.convert_namespaces_ast(self.ast, canonicalize=True, api_url=self.api_url, namespace_targets=namespace_targets)
return self | [
"def",
"canonicalize",
"(",
"self",
",",
"namespace_targets",
":",
"Mapping",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
")",
"->",
"\"BEL\"",
":",
"# TODO Need to order position independent args",
"if",
"not",
"self",
".",
"ast",
":",
"retu... | Takes an AST and returns a canonicalized BEL statement string.
Args:
namespace_targets (Mapping[str, List[str]]): override default canonicalization
settings of BEL.bio API api_url - see {api_url}/status to get default canonicalization settings
Returns:
BEL: returns self | [
"Takes",
"an",
"AST",
"and",
"returns",
"a",
"canonicalized",
"BEL",
"statement",
"string",
"."
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L202-L228 | train | 50,921 |
belbio/bel | bel/lang/belobj.py | BEL.collect_nsarg_norms | def collect_nsarg_norms(self):
"""Adds canonical and decanonical values to NSArgs in AST
This prepares the AST object for (de)canonicalization
"""
start_time = datetime.datetime.now()
self.ast = bel_utils.populate_ast_nsarg_defaults(self.ast, self.ast)
self.ast.collected_nsarg_norms = True
if (
hasattr(self.ast, "bel_object")
and self.ast.bel_object
and self.ast.bel_object.type == "BELAst"
):
self.ast.bel_object.collected_nsarg_norms = True
end_time = datetime.datetime.now()
delta_ms = f"{(end_time - start_time).total_seconds() * 1000:.1f}"
log.info("Timing - prepare nsarg normalization", delta_ms=delta_ms)
return self | python | def collect_nsarg_norms(self):
"""Adds canonical and decanonical values to NSArgs in AST
This prepares the AST object for (de)canonicalization
"""
start_time = datetime.datetime.now()
self.ast = bel_utils.populate_ast_nsarg_defaults(self.ast, self.ast)
self.ast.collected_nsarg_norms = True
if (
hasattr(self.ast, "bel_object")
and self.ast.bel_object
and self.ast.bel_object.type == "BELAst"
):
self.ast.bel_object.collected_nsarg_norms = True
end_time = datetime.datetime.now()
delta_ms = f"{(end_time - start_time).total_seconds() * 1000:.1f}"
log.info("Timing - prepare nsarg normalization", delta_ms=delta_ms)
return self | [
"def",
"collect_nsarg_norms",
"(",
"self",
")",
":",
"start_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"self",
".",
"ast",
"=",
"bel_utils",
".",
"populate_ast_nsarg_defaults",
"(",
"self",
".",
"ast",
",",
"self",
".",
"ast",
")",
"s... | Adds canonical and decanonical values to NSArgs in AST
This prepares the AST object for (de)canonicalization | [
"Adds",
"canonical",
"and",
"decanonical",
"values",
"to",
"NSArgs",
"in",
"AST"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L256-L277 | train | 50,922 |
belbio/bel | bel/lang/belobj.py | BEL.orthologize | def orthologize(self, species_id: str) -> "BEL":
"""Orthologize BEL AST to given species_id
Will return original entity (ns:value) if no ortholog found.
Args:
species_id (str): species id to convert genes/rna/proteins into
Returns:
BEL: returns self
"""
if not self.ast:
return self
# Collect canonical/decanonical NSArg values
if not self.ast.collected_orthologs:
self = self.collect_orthologs([species_id])
self.ast.species = set()
self.ast = bel_utils.orthologize(self.ast, self, species_id)
return self | python | def orthologize(self, species_id: str) -> "BEL":
"""Orthologize BEL AST to given species_id
Will return original entity (ns:value) if no ortholog found.
Args:
species_id (str): species id to convert genes/rna/proteins into
Returns:
BEL: returns self
"""
if not self.ast:
return self
# Collect canonical/decanonical NSArg values
if not self.ast.collected_orthologs:
self = self.collect_orthologs([species_id])
self.ast.species = set()
self.ast = bel_utils.orthologize(self.ast, self, species_id)
return self | [
"def",
"orthologize",
"(",
"self",
",",
"species_id",
":",
"str",
")",
"->",
"\"BEL\"",
":",
"if",
"not",
"self",
".",
"ast",
":",
"return",
"self",
"# Collect canonical/decanonical NSArg values",
"if",
"not",
"self",
".",
"ast",
".",
"collected_orthologs",
":... | Orthologize BEL AST to given species_id
Will return original entity (ns:value) if no ortholog found.
Args:
species_id (str): species id to convert genes/rna/proteins into
Returns:
BEL: returns self | [
"Orthologize",
"BEL",
"AST",
"to",
"given",
"species_id"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L279-L301 | train | 50,923 |
belbio/bel | bel/lang/belobj.py | BEL.compute_edges | def compute_edges(
self, rules: List[str] = None, ast_result=False, fmt="medium"
) -> List[Mapping[str, Any]]:
"""Computed edges from primary BEL statement
Takes an AST and generates all computed edges based on BEL Specification YAML computed signatures.
Will run only the list of computed edge rules if given.
Args:
rules (list): a list of rules to filter; only the rules in this list will be applied to computed
fmt (str): short, medium or long version of BEL Edge (function and relation names)
Returns:
List[Mapping[str, Any]]: BEL Edges in medium format
"""
if not self.ast:
return self
edges_asts = bel.edge.computed.compute_edges(self.ast, self.spec)
if ast_result:
return edges_asts
edges = []
for ast in edges_asts:
edges.append(
{
"subject": ast.bel_subject.to_string(),
"relation": ast.bel_relation,
"object": ast.bel_object.to_string(),
}
)
return edges | python | def compute_edges(
self, rules: List[str] = None, ast_result=False, fmt="medium"
) -> List[Mapping[str, Any]]:
"""Computed edges from primary BEL statement
Takes an AST and generates all computed edges based on BEL Specification YAML computed signatures.
Will run only the list of computed edge rules if given.
Args:
rules (list): a list of rules to filter; only the rules in this list will be applied to computed
fmt (str): short, medium or long version of BEL Edge (function and relation names)
Returns:
List[Mapping[str, Any]]: BEL Edges in medium format
"""
if not self.ast:
return self
edges_asts = bel.edge.computed.compute_edges(self.ast, self.spec)
if ast_result:
return edges_asts
edges = []
for ast in edges_asts:
edges.append(
{
"subject": ast.bel_subject.to_string(),
"relation": ast.bel_relation,
"object": ast.bel_object.to_string(),
}
)
return edges | [
"def",
"compute_edges",
"(",
"self",
",",
"rules",
":",
"List",
"[",
"str",
"]",
"=",
"None",
",",
"ast_result",
"=",
"False",
",",
"fmt",
"=",
"\"medium\"",
")",
"->",
"List",
"[",
"Mapping",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"if",
"not",
"... | Computed edges from primary BEL statement
Takes an AST and generates all computed edges based on BEL Specification YAML computed signatures.
Will run only the list of computed edge rules if given.
Args:
rules (list): a list of rules to filter; only the rules in this list will be applied to computed
fmt (str): short, medium or long version of BEL Edge (function and relation names)
Returns:
List[Mapping[str, Any]]: BEL Edges in medium format | [
"Computed",
"edges",
"from",
"primary",
"BEL",
"statement"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L339-L372 | train | 50,924 |
belbio/bel | bel/lang/ast.py | ast_dict_to_objects | def ast_dict_to_objects(ast_dict: Mapping[str, Any], bel_obj) -> BELAst:
"""Convert Tatsu AST dictionary to BEL AST object
Args:
ast_dict (Mapping[str, Any])
Returns:
BELAst: object representing the BEL Statement AST
"""
ast_subject = ast_dict.get("subject", None)
ast_object = ast_dict.get("object", None)
bel_subject = None
bel_object = None
bel_relation = ast_dict.get("relation")
if ast_subject:
bel_subject = function_ast_to_objects(ast_subject, bel_obj)
if ast_object:
bel_object = function_ast_to_objects(ast_object, bel_obj)
ast_obj = BELAst(bel_subject, bel_relation, bel_object, bel_obj.spec)
return ast_obj | python | def ast_dict_to_objects(ast_dict: Mapping[str, Any], bel_obj) -> BELAst:
"""Convert Tatsu AST dictionary to BEL AST object
Args:
ast_dict (Mapping[str, Any])
Returns:
BELAst: object representing the BEL Statement AST
"""
ast_subject = ast_dict.get("subject", None)
ast_object = ast_dict.get("object", None)
bel_subject = None
bel_object = None
bel_relation = ast_dict.get("relation")
if ast_subject:
bel_subject = function_ast_to_objects(ast_subject, bel_obj)
if ast_object:
bel_object = function_ast_to_objects(ast_object, bel_obj)
ast_obj = BELAst(bel_subject, bel_relation, bel_object, bel_obj.spec)
return ast_obj | [
"def",
"ast_dict_to_objects",
"(",
"ast_dict",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
",",
"bel_obj",
")",
"->",
"BELAst",
":",
"ast_subject",
"=",
"ast_dict",
".",
"get",
"(",
"\"subject\"",
",",
"None",
")",
"ast_object",
"=",
"ast_dict",
".",
"g... | Convert Tatsu AST dictionary to BEL AST object
Args:
ast_dict (Mapping[str, Any])
Returns:
BELAst: object representing the BEL Statement AST | [
"Convert",
"Tatsu",
"AST",
"dictionary",
"to",
"BEL",
"AST",
"object"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L559-L583 | train | 50,925 |
belbio/bel | bel/lang/ast.py | Function.subcomponents | def subcomponents(self, subcomponents):
"""Generate subcomponents of the BEL subject or object
These subcomponents are used for matching parts of a BEL
subject or Object in the Edgestore.
Args:
AST
subcomponents: Pass an empty list to start a new subcomponents request
Returns:
List[str]: subcomponents of BEL subject or object
"""
for arg in self.args:
if arg.__class__.__name__ == "Function":
subcomponents.append(arg.to_string())
if arg.function_type == "primary":
arg.subcomponents(subcomponents)
else:
subcomponents.append(arg.to_string())
return subcomponents | python | def subcomponents(self, subcomponents):
"""Generate subcomponents of the BEL subject or object
These subcomponents are used for matching parts of a BEL
subject or Object in the Edgestore.
Args:
AST
subcomponents: Pass an empty list to start a new subcomponents request
Returns:
List[str]: subcomponents of BEL subject or object
"""
for arg in self.args:
if arg.__class__.__name__ == "Function":
subcomponents.append(arg.to_string())
if arg.function_type == "primary":
arg.subcomponents(subcomponents)
else:
subcomponents.append(arg.to_string())
return subcomponents | [
"def",
"subcomponents",
"(",
"self",
",",
"subcomponents",
")",
":",
"for",
"arg",
"in",
"self",
".",
"args",
":",
"if",
"arg",
".",
"__class__",
".",
"__name__",
"==",
"\"Function\"",
":",
"subcomponents",
".",
"append",
"(",
"arg",
".",
"to_string",
"(... | Generate subcomponents of the BEL subject or object
These subcomponents are used for matching parts of a BEL
subject or Object in the Edgestore.
Args:
AST
subcomponents: Pass an empty list to start a new subcomponents request
Returns:
List[str]: subcomponents of BEL subject or object | [
"Generate",
"subcomponents",
"of",
"the",
"BEL",
"subject",
"or",
"object"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L358-L380 | train | 50,926 |
belbio/bel | bel/lang/ast.py | NSArg.update_nsval | def update_nsval(
self, *, nsval: str = None, ns: str = None, val: str = None
) -> None:
"""Update Namespace and valueast.
Args:
nsval: e.g. HGNC:AKT1
ns: namespace
val: value of entity
"""
if not (ns and val) and nsval:
(ns, val) = nsval.split(":", 1)
elif not (ns and val) and not nsval:
log.error("Did not update NSArg - no ns:val or nsval provided")
self.namespace = ns
self.value = val | python | def update_nsval(
self, *, nsval: str = None, ns: str = None, val: str = None
) -> None:
"""Update Namespace and valueast.
Args:
nsval: e.g. HGNC:AKT1
ns: namespace
val: value of entity
"""
if not (ns and val) and nsval:
(ns, val) = nsval.split(":", 1)
elif not (ns and val) and not nsval:
log.error("Did not update NSArg - no ns:val or nsval provided")
self.namespace = ns
self.value = val | [
"def",
"update_nsval",
"(",
"self",
",",
"*",
",",
"nsval",
":",
"str",
"=",
"None",
",",
"ns",
":",
"str",
"=",
"None",
",",
"val",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"if",
"not",
"(",
"ns",
"and",
"val",
")",
"and",
"nsval",
"... | Update Namespace and valueast.
Args:
nsval: e.g. HGNC:AKT1
ns: namespace
val: value of entity | [
"Update",
"Namespace",
"and",
"valueast",
"."
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L436-L453 | train | 50,927 |
belbio/bel | bel/lang/ast.py | NSArg.orthologize | def orthologize(self, ortho_species_id, belast):
"""Decanonical ortholog name used"""
if (
self.orthologs
and ortho_species_id in self.orthologs
and ortho_species_id != self.species_id
):
self.orthology_species = ortho_species_id
self.canonical = self.orthologs[ortho_species_id]["canonical"]
self.decanonical = self.orthologs[ortho_species_id]["decanonical"]
self.update_nsval(nsval=self.decanonical)
self.orthologized = True
elif self.species_id and ortho_species_id not in self.orthologs:
self.orthologized = False
belast.partially_orthologized = True
return self | python | def orthologize(self, ortho_species_id, belast):
"""Decanonical ortholog name used"""
if (
self.orthologs
and ortho_species_id in self.orthologs
and ortho_species_id != self.species_id
):
self.orthology_species = ortho_species_id
self.canonical = self.orthologs[ortho_species_id]["canonical"]
self.decanonical = self.orthologs[ortho_species_id]["decanonical"]
self.update_nsval(nsval=self.decanonical)
self.orthologized = True
elif self.species_id and ortho_species_id not in self.orthologs:
self.orthologized = False
belast.partially_orthologized = True
return self | [
"def",
"orthologize",
"(",
"self",
",",
"ortho_species_id",
",",
"belast",
")",
":",
"if",
"(",
"self",
".",
"orthologs",
"and",
"ortho_species_id",
"in",
"self",
".",
"orthologs",
"and",
"ortho_species_id",
"!=",
"self",
".",
"species_id",
")",
":",
"self",... | Decanonical ortholog name used | [
"Decanonical",
"ortholog",
"name",
"used"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L483-L501 | train | 50,928 |
belbio/bel | bel/nanopub/belscripts.py | convert_csv_str_to_list | def convert_csv_str_to_list(csv_str: str) -> list:
"""Convert CSV str to list"""
csv_str = re.sub("^\s*{", "", csv_str)
csv_str = re.sub("}\s*$", "", csv_str)
r = csv.reader([csv_str])
row = list(r)[0]
new = []
for col in row:
col = re.sub('^\s*"?\s*', "", col)
col = re.sub('\s*"?\s*$', "", col)
new.append(col)
return new | python | def convert_csv_str_to_list(csv_str: str) -> list:
"""Convert CSV str to list"""
csv_str = re.sub("^\s*{", "", csv_str)
csv_str = re.sub("}\s*$", "", csv_str)
r = csv.reader([csv_str])
row = list(r)[0]
new = []
for col in row:
col = re.sub('^\s*"?\s*', "", col)
col = re.sub('\s*"?\s*$', "", col)
new.append(col)
return new | [
"def",
"convert_csv_str_to_list",
"(",
"csv_str",
":",
"str",
")",
"->",
"list",
":",
"csv_str",
"=",
"re",
".",
"sub",
"(",
"\"^\\s*{\"",
",",
"\"\"",
",",
"csv_str",
")",
"csv_str",
"=",
"re",
".",
"sub",
"(",
"\"}\\s*$\"",
",",
"\"\"",
",",
"csv_str... | Convert CSV str to list | [
"Convert",
"CSV",
"str",
"to",
"list"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L34-L47 | train | 50,929 |
belbio/bel | bel/nanopub/belscripts.py | split_bel_stmt | def split_bel_stmt(stmt: str, line_num) -> tuple:
"""Split bel statement into subject, relation, object tuple"""
m = re.match(f"^(.*?\))\s+([a-zA-Z=\->\|:]+)\s+([\w(]+.*?)$", stmt, flags=0)
if m:
return (m.group(1), m.group(2), m.group(3))
else:
log.info(
f"Could not parse bel statement into components at line number: {line_num} assertion: {stmt}"
)
return (stmt, None, None) | python | def split_bel_stmt(stmt: str, line_num) -> tuple:
"""Split bel statement into subject, relation, object tuple"""
m = re.match(f"^(.*?\))\s+([a-zA-Z=\->\|:]+)\s+([\w(]+.*?)$", stmt, flags=0)
if m:
return (m.group(1), m.group(2), m.group(3))
else:
log.info(
f"Could not parse bel statement into components at line number: {line_num} assertion: {stmt}"
)
return (stmt, None, None) | [
"def",
"split_bel_stmt",
"(",
"stmt",
":",
"str",
",",
"line_num",
")",
"->",
"tuple",
":",
"m",
"=",
"re",
".",
"match",
"(",
"f\"^(.*?\\))\\s+([a-zA-Z=\\->\\|:]+)\\s+([\\w(]+.*?)$\"",
",",
"stmt",
",",
"flags",
"=",
"0",
")",
"if",
"m",
":",
"return",
"(... | Split bel statement into subject, relation, object tuple | [
"Split",
"bel",
"statement",
"into",
"subject",
"relation",
"object",
"tuple"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L94-L104 | train | 50,930 |
belbio/bel | bel/nanopub/belscripts.py | yield_nanopub | def yield_nanopub(assertions, annotations, line_num):
"""Yield nanopub object"""
if not assertions:
return {}
anno = copy.deepcopy(annotations)
evidence = anno.pop("evidence", None)
stmt_group = anno.pop("statement_group", None)
citation = anno.pop("citation", None)
anno_list = []
for anno_type in anno:
if isinstance(anno[anno_type], (list, tuple)):
for val in anno[anno_type]:
anno_list.append({"type": anno_type, "label": val})
else:
anno_list.append({"type": anno_type, "label": anno[anno_type]})
assertions_list = []
for assertion in assertions:
(subj, rel, obj) = split_bel_stmt(assertion, line_num)
assertions_list.append({"subject": subj, "relation": rel, "object": obj})
nanopub = {
"schema_uri": "https://raw.githubusercontent.com/belbio/schemas/master/schemas/nanopub_bel-1.0.0.yaml",
"type": copy.deepcopy(nanopub_type),
"annotations": copy.deepcopy(anno_list),
"citation": copy.deepcopy(citation),
"assertions": copy.deepcopy(assertions_list),
"evidence": evidence,
"metadata": {"statement_group": stmt_group},
}
return {"nanopub": copy.deepcopy(nanopub)} | python | def yield_nanopub(assertions, annotations, line_num):
"""Yield nanopub object"""
if not assertions:
return {}
anno = copy.deepcopy(annotations)
evidence = anno.pop("evidence", None)
stmt_group = anno.pop("statement_group", None)
citation = anno.pop("citation", None)
anno_list = []
for anno_type in anno:
if isinstance(anno[anno_type], (list, tuple)):
for val in anno[anno_type]:
anno_list.append({"type": anno_type, "label": val})
else:
anno_list.append({"type": anno_type, "label": anno[anno_type]})
assertions_list = []
for assertion in assertions:
(subj, rel, obj) = split_bel_stmt(assertion, line_num)
assertions_list.append({"subject": subj, "relation": rel, "object": obj})
nanopub = {
"schema_uri": "https://raw.githubusercontent.com/belbio/schemas/master/schemas/nanopub_bel-1.0.0.yaml",
"type": copy.deepcopy(nanopub_type),
"annotations": copy.deepcopy(anno_list),
"citation": copy.deepcopy(citation),
"assertions": copy.deepcopy(assertions_list),
"evidence": evidence,
"metadata": {"statement_group": stmt_group},
}
return {"nanopub": copy.deepcopy(nanopub)} | [
"def",
"yield_nanopub",
"(",
"assertions",
",",
"annotations",
",",
"line_num",
")",
":",
"if",
"not",
"assertions",
":",
"return",
"{",
"}",
"anno",
"=",
"copy",
".",
"deepcopy",
"(",
"annotations",
")",
"evidence",
"=",
"anno",
".",
"pop",
"(",
"\"evid... | Yield nanopub object | [
"Yield",
"nanopub",
"object"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L107-L142 | train | 50,931 |
belbio/bel | bel/nanopub/belscripts.py | process_documentline | def process_documentline(line, nanopubs_metadata):
"""Process SET DOCUMENT line in BEL script"""
matches = re.match('SET DOCUMENT\s+(\w+)\s+=\s+"?(.*?)"?$', line)
key = matches.group(1)
val = matches.group(2)
nanopubs_metadata[key] = val
return nanopubs_metadata | python | def process_documentline(line, nanopubs_metadata):
"""Process SET DOCUMENT line in BEL script"""
matches = re.match('SET DOCUMENT\s+(\w+)\s+=\s+"?(.*?)"?$', line)
key = matches.group(1)
val = matches.group(2)
nanopubs_metadata[key] = val
return nanopubs_metadata | [
"def",
"process_documentline",
"(",
"line",
",",
"nanopubs_metadata",
")",
":",
"matches",
"=",
"re",
".",
"match",
"(",
"'SET DOCUMENT\\s+(\\w+)\\s+=\\s+\"?(.*?)\"?$'",
",",
"line",
")",
"key",
"=",
"matches",
".",
"group",
"(",
"1",
")",
"val",
"=",
"matches... | Process SET DOCUMENT line in BEL script | [
"Process",
"SET",
"DOCUMENT",
"line",
"in",
"BEL",
"script"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L145-L153 | train | 50,932 |
belbio/bel | bel/nanopub/belscripts.py | process_definition | def process_definition(line, nanopubs_metadata):
"""Process DEFINE line in BEL script"""
matches = re.match('DEFINE\s+(\w+)\s+(\w+)\s+AS\s+URL\s+"(.*?)"\s*$', line)
if matches:
def_type = matches.group(1).lower()
if def_type == "namespace":
def_type = "namespaces"
elif def_type == "annotation":
def_type == "annotations"
key = matches.group(2)
val = matches.group(3)
if def_type in nanopubs_metadata:
nanopubs_metadata[def_type][key] = val
else:
nanopubs_metadata[def_type] = {key: val}
matches = re.match("DEFINE\s+(\w+)\s+(\w+)\s+AS\s+LIST\s+{(.*?)}\s*$", line)
if matches:
def_type = matches.group(1).lower()
if def_type == "namespace":
def_type = "namespaces"
elif def_type == "annotation":
def_type == "annotations"
key = matches.group(2)
val = matches.group(3)
vals = convert_csv_str_to_list(val)
if def_type in nanopubs_metadata:
nanopubs_metadata[def_type][key] = vals
else:
nanopubs_metadata[def_type] = {key: vals}
return nanopubs_metadata | python | def process_definition(line, nanopubs_metadata):
"""Process DEFINE line in BEL script"""
matches = re.match('DEFINE\s+(\w+)\s+(\w+)\s+AS\s+URL\s+"(.*?)"\s*$', line)
if matches:
def_type = matches.group(1).lower()
if def_type == "namespace":
def_type = "namespaces"
elif def_type == "annotation":
def_type == "annotations"
key = matches.group(2)
val = matches.group(3)
if def_type in nanopubs_metadata:
nanopubs_metadata[def_type][key] = val
else:
nanopubs_metadata[def_type] = {key: val}
matches = re.match("DEFINE\s+(\w+)\s+(\w+)\s+AS\s+LIST\s+{(.*?)}\s*$", line)
if matches:
def_type = matches.group(1).lower()
if def_type == "namespace":
def_type = "namespaces"
elif def_type == "annotation":
def_type == "annotations"
key = matches.group(2)
val = matches.group(3)
vals = convert_csv_str_to_list(val)
if def_type in nanopubs_metadata:
nanopubs_metadata[def_type][key] = vals
else:
nanopubs_metadata[def_type] = {key: vals}
return nanopubs_metadata | [
"def",
"process_definition",
"(",
"line",
",",
"nanopubs_metadata",
")",
":",
"matches",
"=",
"re",
".",
"match",
"(",
"'DEFINE\\s+(\\w+)\\s+(\\w+)\\s+AS\\s+URL\\s+\"(.*?)\"\\s*$'",
",",
"line",
")",
"if",
"matches",
":",
"def_type",
"=",
"matches",
".",
"group",
... | Process DEFINE line in BEL script | [
"Process",
"DEFINE",
"line",
"in",
"BEL",
"script"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L156-L192 | train | 50,933 |
belbio/bel | bel/nanopub/belscripts.py | process_unset | def process_unset(line, annotations):
"""Process UNSET lines in BEL Script"""
matches = re.match('UNSET\s+"?(.*?)"?\s*$', line)
if matches:
val = matches.group(1)
if val == "ALL" or val == "STATEMENT_GROUP":
annotations = {}
elif re.match("{", val):
vals = convert_csv_str_to_list(val)
for val in vals:
annotations.pop(val, None)
else:
annotations.pop(val, None)
else:
log.warn(f"Problem with UNSET line: {line}")
return annotations | python | def process_unset(line, annotations):
"""Process UNSET lines in BEL Script"""
matches = re.match('UNSET\s+"?(.*?)"?\s*$', line)
if matches:
val = matches.group(1)
if val == "ALL" or val == "STATEMENT_GROUP":
annotations = {}
elif re.match("{", val):
vals = convert_csv_str_to_list(val)
for val in vals:
annotations.pop(val, None)
else:
annotations.pop(val, None)
else:
log.warn(f"Problem with UNSET line: {line}")
return annotations | [
"def",
"process_unset",
"(",
"line",
",",
"annotations",
")",
":",
"matches",
"=",
"re",
".",
"match",
"(",
"'UNSET\\s+\"?(.*?)\"?\\s*$'",
",",
"line",
")",
"if",
"matches",
":",
"val",
"=",
"matches",
".",
"group",
"(",
"1",
")",
"if",
"val",
"==",
"\... | Process UNSET lines in BEL Script | [
"Process",
"UNSET",
"lines",
"in",
"BEL",
"Script"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L195-L213 | train | 50,934 |
belbio/bel | bel/nanopub/belscripts.py | process_set | def process_set(line, annotations):
"""Convert annotations into nanopub_bel annotations format"""
matches = re.match('SET\s+(\w+)\s*=\s*"?(.*?)"?\s*$', line)
key = None
if matches:
key = matches.group(1)
val = matches.group(2)
if key == "STATEMENT_GROUP":
annotations["statement_group"] = val
elif key == "Citation":
annotations["citation"] = process_citation(val)
elif key.lower() == "support" or key.lower() == "evidence":
annotations["evidence"] = val
elif re.match("\s*{.*?}", val):
vals = convert_csv_str_to_list(val)
annotations[key] = vals
else:
annotations[key] = val
return annotations | python | def process_set(line, annotations):
"""Convert annotations into nanopub_bel annotations format"""
matches = re.match('SET\s+(\w+)\s*=\s*"?(.*?)"?\s*$', line)
key = None
if matches:
key = matches.group(1)
val = matches.group(2)
if key == "STATEMENT_GROUP":
annotations["statement_group"] = val
elif key == "Citation":
annotations["citation"] = process_citation(val)
elif key.lower() == "support" or key.lower() == "evidence":
annotations["evidence"] = val
elif re.match("\s*{.*?}", val):
vals = convert_csv_str_to_list(val)
annotations[key] = vals
else:
annotations[key] = val
return annotations | [
"def",
"process_set",
"(",
"line",
",",
"annotations",
")",
":",
"matches",
"=",
"re",
".",
"match",
"(",
"'SET\\s+(\\w+)\\s*=\\s*\"?(.*?)\"?\\s*$'",
",",
"line",
")",
"key",
"=",
"None",
"if",
"matches",
":",
"key",
"=",
"matches",
".",
"group",
"(",
"1",... | Convert annotations into nanopub_bel annotations format | [
"Convert",
"annotations",
"into",
"nanopub_bel",
"annotations",
"format"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L216-L238 | train | 50,935 |
belbio/bel | bel/nanopub/belscripts.py | preprocess_belscript | def preprocess_belscript(lines):
""" Convert any multi-line SET statements into single line SET statements"""
set_flag = False
for line in lines:
if set_flag is False and re.match("SET", line):
set_flag = True
set_line = [line.rstrip()]
# SET following SET
elif set_flag and re.match("SET", line):
yield f"{' '.join(set_line)}\n"
set_line = [line.rstrip()]
# Blank line following SET yields single line SET
elif set_flag and re.match("\s+$", line):
yield f"{' '.join(set_line)}\n"
yield line
set_flag = False
# Append second, third, ... lines to SET
elif set_flag:
set_line.append(line.rstrip())
else:
yield line | python | def preprocess_belscript(lines):
""" Convert any multi-line SET statements into single line SET statements"""
set_flag = False
for line in lines:
if set_flag is False and re.match("SET", line):
set_flag = True
set_line = [line.rstrip()]
# SET following SET
elif set_flag and re.match("SET", line):
yield f"{' '.join(set_line)}\n"
set_line = [line.rstrip()]
# Blank line following SET yields single line SET
elif set_flag and re.match("\s+$", line):
yield f"{' '.join(set_line)}\n"
yield line
set_flag = False
# Append second, third, ... lines to SET
elif set_flag:
set_line.append(line.rstrip())
else:
yield line | [
"def",
"preprocess_belscript",
"(",
"lines",
")",
":",
"set_flag",
"=",
"False",
"for",
"line",
"in",
"lines",
":",
"if",
"set_flag",
"is",
"False",
"and",
"re",
".",
"match",
"(",
"\"SET\"",
",",
"line",
")",
":",
"set_flag",
"=",
"True",
"set_line",
... | Convert any multi-line SET statements into single line SET statements | [
"Convert",
"any",
"multi",
"-",
"line",
"SET",
"statements",
"into",
"single",
"line",
"SET",
"statements"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L266-L288 | train | 50,936 |
belbio/bel | bel/nanopub/belscripts.py | parse_belscript | def parse_belscript(lines):
"""Lines from the BELScript - can be an iterator or list
yields Nanopubs in nanopubs_bel-1.0.0 format
"""
nanopubs_metadata = {}
annotations = {}
assertions = []
# # Turn a list into an iterator
# if not isinstance(lines, collections.Iterator):
# lines = iter(lines)
line_num = 0
# for line in preprocess_belscript(lines):
for line in set_single_line(lines):
line_num += 1
# Get rid of trailing comments
line = re.sub("\/\/.*?$", "", line)
line = line.rstrip()
# Collapse continuation lines
while re.search("\\\s*$", line):
line = line.replace("\\", "") + next(lines)
# Process lines #################################
if re.match("\s*#", line) or re.match("\s*$", line):
# Skip comments and empty lines
continue
elif re.match("SET DOCUMENT", line):
nanopubs_metadata = process_documentline(line, nanopubs_metadata)
elif re.match("DEFINE", line):
nanopubs_metadata = process_definition(line, nanopubs_metadata)
elif re.match("UNSET", line):
# Process any assertions prior to changing annotations
if assertions:
yield yield_nanopub(assertions, annotations, line_num)
assertions = []
annotations = process_unset(line, annotations)
elif re.match("SET", line):
# Create nanopubs metadata prior to starting BEL Script statements section
if nanopubs_metadata:
yield yield_metadata(nanopubs_metadata)
nanopubs_metadata = {}
# Process any assertions prior to changing annotations
if assertions:
yield yield_nanopub(assertions, annotations, line_num)
assertions = []
annotations = process_set(line, annotations)
else:
assertions.append(line)
# Catch any leftover bel statements
yield_nanopub(assertions, annotations, line_num) | python | def parse_belscript(lines):
"""Lines from the BELScript - can be an iterator or list
yields Nanopubs in nanopubs_bel-1.0.0 format
"""
nanopubs_metadata = {}
annotations = {}
assertions = []
# # Turn a list into an iterator
# if not isinstance(lines, collections.Iterator):
# lines = iter(lines)
line_num = 0
# for line in preprocess_belscript(lines):
for line in set_single_line(lines):
line_num += 1
# Get rid of trailing comments
line = re.sub("\/\/.*?$", "", line)
line = line.rstrip()
# Collapse continuation lines
while re.search("\\\s*$", line):
line = line.replace("\\", "") + next(lines)
# Process lines #################################
if re.match("\s*#", line) or re.match("\s*$", line):
# Skip comments and empty lines
continue
elif re.match("SET DOCUMENT", line):
nanopubs_metadata = process_documentline(line, nanopubs_metadata)
elif re.match("DEFINE", line):
nanopubs_metadata = process_definition(line, nanopubs_metadata)
elif re.match("UNSET", line):
# Process any assertions prior to changing annotations
if assertions:
yield yield_nanopub(assertions, annotations, line_num)
assertions = []
annotations = process_unset(line, annotations)
elif re.match("SET", line):
# Create nanopubs metadata prior to starting BEL Script statements section
if nanopubs_metadata:
yield yield_metadata(nanopubs_metadata)
nanopubs_metadata = {}
# Process any assertions prior to changing annotations
if assertions:
yield yield_nanopub(assertions, annotations, line_num)
assertions = []
annotations = process_set(line, annotations)
else:
assertions.append(line)
# Catch any leftover bel statements
yield_nanopub(assertions, annotations, line_num) | [
"def",
"parse_belscript",
"(",
"lines",
")",
":",
"nanopubs_metadata",
"=",
"{",
"}",
"annotations",
"=",
"{",
"}",
"assertions",
"=",
"[",
"]",
"# # Turn a list into an iterator",
"# if not isinstance(lines, collections.Iterator):",
"# lines = iter(lines)",
"line_num",... | Lines from the BELScript - can be an iterator or list
yields Nanopubs in nanopubs_bel-1.0.0 format | [
"Lines",
"from",
"the",
"BELScript",
"-",
"can",
"be",
"an",
"iterator",
"or",
"list"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L291-L353 | train | 50,937 |
RockFeng0/rtsf-http | httpdriver/actions.py | RequestTrackInfo.__stringify_body | def __stringify_body(self, request_or_response):
''' this method reference from httprunner '''
headers = self.__track_info['{}_headers'.format(request_or_response)]
body = self.__track_info.get('{}_body'.format(request_or_response))
if isinstance(body, CaseInsensitiveDict):
body = json.dumps(dict(body), ensure_ascii=False)
elif isinstance(body, (dict, list)):
body = json.dumps(body, indent=2, ensure_ascii=False)
elif isinstance(body, bytes):
resp_content_type = headers.get("Content-Type", "")
try:
if "image" in resp_content_type:
self.__track_info["response_data_type"] = "image"
body = "data:{};base64,{}".format(
resp_content_type,
b64encode(body).decode('utf-8')
)
else:
body = escape(body.decode("utf-8"))
except UnicodeDecodeError:
pass
elif not isinstance(body, (basestring, numeric_types, Iterable)):
# class instance, e.g. MultipartEncoder()
body = repr(body)
self.__track_info['{}_body'.format(request_or_response)] = body | python | def __stringify_body(self, request_or_response):
''' this method reference from httprunner '''
headers = self.__track_info['{}_headers'.format(request_or_response)]
body = self.__track_info.get('{}_body'.format(request_or_response))
if isinstance(body, CaseInsensitiveDict):
body = json.dumps(dict(body), ensure_ascii=False)
elif isinstance(body, (dict, list)):
body = json.dumps(body, indent=2, ensure_ascii=False)
elif isinstance(body, bytes):
resp_content_type = headers.get("Content-Type", "")
try:
if "image" in resp_content_type:
self.__track_info["response_data_type"] = "image"
body = "data:{};base64,{}".format(
resp_content_type,
b64encode(body).decode('utf-8')
)
else:
body = escape(body.decode("utf-8"))
except UnicodeDecodeError:
pass
elif not isinstance(body, (basestring, numeric_types, Iterable)):
# class instance, e.g. MultipartEncoder()
body = repr(body)
self.__track_info['{}_body'.format(request_or_response)] = body | [
"def",
"__stringify_body",
"(",
"self",
",",
"request_or_response",
")",
":",
"headers",
"=",
"self",
".",
"__track_info",
"[",
"'{}_headers'",
".",
"format",
"(",
"request_or_response",
")",
"]",
"body",
"=",
"self",
".",
"__track_info",
".",
"get",
"(",
"'... | this method reference from httprunner | [
"this",
"method",
"reference",
"from",
"httprunner"
] | 3280cc9a01b0c92c52d699b0ebc29e55e62611a0 | https://github.com/RockFeng0/rtsf-http/blob/3280cc9a01b0c92c52d699b0ebc29e55e62611a0/httpdriver/actions.py#L73-L102 | train | 50,938 |
belbio/bel | bel/nanopub/files.py | read_nanopubs | def read_nanopubs(fn: str) -> Iterable[Mapping[str, Any]]:
"""Read file and generate nanopubs
If filename has *.gz, will read as a gzip file
If filename has *.jsonl*, will parsed as a JSONLines file
IF filename has *.json*, will be parsed as a JSON file
If filename has *.yaml* or *.yml*, will be parsed as a YAML file
Args:
filename (str): filename to read nanopubs from
Returns:
Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format
"""
jsonl_flag, json_flag, yaml_flag = False, False, False
if fn == "-" or "jsonl" in fn:
jsonl_flag = True
elif "json" in fn:
json_flag = True
elif re.search("ya?ml", fn):
yaml_flag = True
else:
log.error("Do not recognize nanopub file format - neither json nor jsonl format.")
return {}
try:
if re.search("gz$", fn):
f = gzip.open(fn, "rt")
else:
try:
f = click.open_file(fn, mode="rt")
except Exception as e:
log.info(f"Can not open file {fn} Error: {e}")
quit()
if jsonl_flag:
for line in f:
yield json.loads(line)
elif json_flag:
nanopubs = json.load(f)
for nanopub in nanopubs:
yield nanopub
elif yaml_flag:
nanopubs = yaml.load(f, Loader=yaml.SafeLoader)
for nanopub in nanopubs:
yield nanopub
except Exception as e:
log.error(f"Could not open file: {fn}") | python | def read_nanopubs(fn: str) -> Iterable[Mapping[str, Any]]:
"""Read file and generate nanopubs
If filename has *.gz, will read as a gzip file
If filename has *.jsonl*, will parsed as a JSONLines file
IF filename has *.json*, will be parsed as a JSON file
If filename has *.yaml* or *.yml*, will be parsed as a YAML file
Args:
filename (str): filename to read nanopubs from
Returns:
Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format
"""
jsonl_flag, json_flag, yaml_flag = False, False, False
if fn == "-" or "jsonl" in fn:
jsonl_flag = True
elif "json" in fn:
json_flag = True
elif re.search("ya?ml", fn):
yaml_flag = True
else:
log.error("Do not recognize nanopub file format - neither json nor jsonl format.")
return {}
try:
if re.search("gz$", fn):
f = gzip.open(fn, "rt")
else:
try:
f = click.open_file(fn, mode="rt")
except Exception as e:
log.info(f"Can not open file {fn} Error: {e}")
quit()
if jsonl_flag:
for line in f:
yield json.loads(line)
elif json_flag:
nanopubs = json.load(f)
for nanopub in nanopubs:
yield nanopub
elif yaml_flag:
nanopubs = yaml.load(f, Loader=yaml.SafeLoader)
for nanopub in nanopubs:
yield nanopub
except Exception as e:
log.error(f"Could not open file: {fn}") | [
"def",
"read_nanopubs",
"(",
"fn",
":",
"str",
")",
"->",
"Iterable",
"[",
"Mapping",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"jsonl_flag",
",",
"json_flag",
",",
"yaml_flag",
"=",
"False",
",",
"False",
",",
"False",
"if",
"fn",
"==",
"\"-\"",
"or",
... | Read file and generate nanopubs
If filename has *.gz, will read as a gzip file
If filename has *.jsonl*, will parsed as a JSONLines file
IF filename has *.json*, will be parsed as a JSON file
If filename has *.yaml* or *.yml*, will be parsed as a YAML file
Args:
filename (str): filename to read nanopubs from
Returns:
Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format | [
"Read",
"file",
"and",
"generate",
"nanopubs"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/files.py#L23-L72 | train | 50,939 |
belbio/bel | bel/nanopub/files.py | create_nanopubs_fh | def create_nanopubs_fh(output_fn: str):
"""Create Nanopubs output filehandle
\b
If output fn is '-' will write JSONlines to STDOUT
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
Args:
output_fn: Name of output file
Returns:
(filehandle, yaml_flag, jsonl_flag, json_flag)
"""
# output file
# set output flags
json_flag, jsonl_flag, yaml_flag = False, False, False
if output_fn:
if re.search("gz$", output_fn):
out_fh = gzip.open(output_fn, "wt")
else:
out_fh = click.open_file(output_fn, mode="wt")
if re.search("ya?ml", output_fn):
yaml_flag = True
elif "jsonl" in output_fn or "-" == output_fn:
jsonl_flag = True
elif "json" in output_fn:
json_flag = True
else:
out_fh = sys.stdout
return (out_fh, yaml_flag, jsonl_flag, json_flag) | python | def create_nanopubs_fh(output_fn: str):
"""Create Nanopubs output filehandle
\b
If output fn is '-' will write JSONlines to STDOUT
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
Args:
output_fn: Name of output file
Returns:
(filehandle, yaml_flag, jsonl_flag, json_flag)
"""
# output file
# set output flags
json_flag, jsonl_flag, yaml_flag = False, False, False
if output_fn:
if re.search("gz$", output_fn):
out_fh = gzip.open(output_fn, "wt")
else:
out_fh = click.open_file(output_fn, mode="wt")
if re.search("ya?ml", output_fn):
yaml_flag = True
elif "jsonl" in output_fn or "-" == output_fn:
jsonl_flag = True
elif "json" in output_fn:
json_flag = True
else:
out_fh = sys.stdout
return (out_fh, yaml_flag, jsonl_flag, json_flag) | [
"def",
"create_nanopubs_fh",
"(",
"output_fn",
":",
"str",
")",
":",
"# output file",
"# set output flags",
"json_flag",
",",
"jsonl_flag",
",",
"yaml_flag",
"=",
"False",
",",
"False",
",",
"False",
"if",
"output_fn",
":",
"if",
"re",
".",
"search",
"(",
"\... | Create Nanopubs output filehandle
\b
If output fn is '-' will write JSONlines to STDOUT
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
Args:
output_fn: Name of output file
Returns:
(filehandle, yaml_flag, jsonl_flag, json_flag) | [
"Create",
"Nanopubs",
"output",
"filehandle"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/files.py#L75-L111 | train | 50,940 |
belbio/bel | bel/nanopub/files.py | write_edges | def write_edges(
edges: Mapping[str, Any],
filename: str,
jsonlines: bool = False,
gzipflag: bool = False,
yaml: bool = False,
):
"""Write edges to file
Args:
edges (Mapping[str, Any]): in edges JSON Schema format
filename (str): filename to write
jsonlines (bool): output in JSONLines format?
gzipflag (bool): create gzipped file?
yaml (bool): create yaml file?
"""
pass | python | def write_edges(
edges: Mapping[str, Any],
filename: str,
jsonlines: bool = False,
gzipflag: bool = False,
yaml: bool = False,
):
"""Write edges to file
Args:
edges (Mapping[str, Any]): in edges JSON Schema format
filename (str): filename to write
jsonlines (bool): output in JSONLines format?
gzipflag (bool): create gzipped file?
yaml (bool): create yaml file?
"""
pass | [
"def",
"write_edges",
"(",
"edges",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
",",
"filename",
":",
"str",
",",
"jsonlines",
":",
"bool",
"=",
"False",
",",
"gzipflag",
":",
"bool",
"=",
"False",
",",
"yaml",
":",
"bool",
"=",
"False",
",",
")",... | Write edges to file
Args:
edges (Mapping[str, Any]): in edges JSON Schema format
filename (str): filename to write
jsonlines (bool): output in JSONLines format?
gzipflag (bool): create gzipped file?
yaml (bool): create yaml file? | [
"Write",
"edges",
"to",
"file"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/files.py#L153-L169 | train | 50,941 |
belbio/bel | bel/db/elasticsearch.py | add_index_alias | def add_index_alias(es, index_name, alias_name):
"""Add index alias to index_name"""
es.indices.put_alias(index=index_name, name=terms_alias) | python | def add_index_alias(es, index_name, alias_name):
"""Add index alias to index_name"""
es.indices.put_alias(index=index_name, name=terms_alias) | [
"def",
"add_index_alias",
"(",
"es",
",",
"index_name",
",",
"alias_name",
")",
":",
"es",
".",
"indices",
".",
"put_alias",
"(",
"index",
"=",
"index_name",
",",
"name",
"=",
"terms_alias",
")"
] | Add index alias to index_name | [
"Add",
"index",
"alias",
"to",
"index_name"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/elasticsearch.py#L25-L28 | train | 50,942 |
belbio/bel | bel/db/elasticsearch.py | delete_index | def delete_index(es, index_name: str):
"""Delete the terms index"""
if not index_name:
log.warn("No index name given to delete")
return None
result = es.indices.delete(index=index_name)
return result | python | def delete_index(es, index_name: str):
"""Delete the terms index"""
if not index_name:
log.warn("No index name given to delete")
return None
result = es.indices.delete(index=index_name)
return result | [
"def",
"delete_index",
"(",
"es",
",",
"index_name",
":",
"str",
")",
":",
"if",
"not",
"index_name",
":",
"log",
".",
"warn",
"(",
"\"No index name given to delete\"",
")",
"return",
"None",
"result",
"=",
"es",
".",
"indices",
".",
"delete",
"(",
"index"... | Delete the terms index | [
"Delete",
"the",
"terms",
"index"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/elasticsearch.py#L40-L48 | train | 50,943 |
belbio/bel | bel/db/elasticsearch.py | create_terms_index | def create_terms_index(es, index_name: str):
"""Create terms index"""
with open(mappings_terms_fn, "r") as f:
mappings_terms = yaml.load(f, Loader=yaml.SafeLoader)
try:
es.indices.create(index=index_name, body=mappings_terms)
except Exception as e:
log.error(f"Could not create elasticsearch terms index: {e}") | python | def create_terms_index(es, index_name: str):
"""Create terms index"""
with open(mappings_terms_fn, "r") as f:
mappings_terms = yaml.load(f, Loader=yaml.SafeLoader)
try:
es.indices.create(index=index_name, body=mappings_terms)
except Exception as e:
log.error(f"Could not create elasticsearch terms index: {e}") | [
"def",
"create_terms_index",
"(",
"es",
",",
"index_name",
":",
"str",
")",
":",
"with",
"open",
"(",
"mappings_terms_fn",
",",
"\"r\"",
")",
"as",
"f",
":",
"mappings_terms",
"=",
"yaml",
".",
"load",
"(",
"f",
",",
"Loader",
"=",
"yaml",
".",
"SafeLo... | Create terms index | [
"Create",
"terms",
"index"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/elasticsearch.py#L51-L61 | train | 50,944 |
belbio/bel | bel/db/elasticsearch.py | delete_terms_indexes | def delete_terms_indexes(es, index_name: str = "terms_*"):
"""Delete all terms indexes"""
try:
es.indices.delete(index=index_name)
except Exception as e:
log.error(f"Could not delete all terms indices: {e}") | python | def delete_terms_indexes(es, index_name: str = "terms_*"):
"""Delete all terms indexes"""
try:
es.indices.delete(index=index_name)
except Exception as e:
log.error(f"Could not delete all terms indices: {e}") | [
"def",
"delete_terms_indexes",
"(",
"es",
",",
"index_name",
":",
"str",
"=",
"\"terms_*\"",
")",
":",
"try",
":",
"es",
".",
"indices",
".",
"delete",
"(",
"index",
"=",
"index_name",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(... | Delete all terms indexes | [
"Delete",
"all",
"terms",
"indexes"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/elasticsearch.py#L64-L70 | train | 50,945 |
belbio/bel | bel/db/elasticsearch.py | bulk_load_docs | def bulk_load_docs(es, docs):
"""Bulk load docs
Args:
es: elasticsearch handle
docs: Iterator of doc objects - includes index_name
"""
chunk_size = 200
try:
results = elasticsearch.helpers.bulk(es, docs, chunk_size=chunk_size)
log.debug(f"Elasticsearch documents loaded: {results[0]}")
# elasticsearch.helpers.parallel_bulk(es, terms, chunk_size=chunk_size, thread_count=4)
if len(results[1]) > 0:
log.error("Bulk load errors {}".format(results))
except elasticsearch.ElasticsearchException as e:
log.error("Indexing error: {}\n".format(e)) | python | def bulk_load_docs(es, docs):
"""Bulk load docs
Args:
es: elasticsearch handle
docs: Iterator of doc objects - includes index_name
"""
chunk_size = 200
try:
results = elasticsearch.helpers.bulk(es, docs, chunk_size=chunk_size)
log.debug(f"Elasticsearch documents loaded: {results[0]}")
# elasticsearch.helpers.parallel_bulk(es, terms, chunk_size=chunk_size, thread_count=4)
if len(results[1]) > 0:
log.error("Bulk load errors {}".format(results))
except elasticsearch.ElasticsearchException as e:
log.error("Indexing error: {}\n".format(e)) | [
"def",
"bulk_load_docs",
"(",
"es",
",",
"docs",
")",
":",
"chunk_size",
"=",
"200",
"try",
":",
"results",
"=",
"elasticsearch",
".",
"helpers",
".",
"bulk",
"(",
"es",
",",
"docs",
",",
"chunk_size",
"=",
"chunk_size",
")",
"log",
".",
"debug",
"(",
... | Bulk load docs
Args:
es: elasticsearch handle
docs: Iterator of doc objects - includes index_name | [
"Bulk",
"load",
"docs"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/elasticsearch.py#L85-L103 | train | 50,946 |
belbio/bel | bel/lang/semantics.py | validate | def validate(bo, error_level: str = "WARNING") -> Tuple[bool, List[Tuple[str, str]]]:
"""Semantically validate BEL AST
Add errors and warnings to bel_obj.validation_messages
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
Args:
bo: main BEL language object
error_level: return ERRORs only or also WARNINGs
Returns:
Tuple[bool, List[Tuple[str, str]]]: (is_valid, messages)
"""
if bo.ast:
bo = validate_functions(bo.ast, bo) # No WARNINGs generated in this function
if error_level == "WARNING":
bo = validate_arg_values(bo.ast, bo) # validates NSArg and StrArg values
else:
bo.validation_messages.append(("ERROR", "Invalid BEL Statement - cannot parse"))
for msg in bo.validation_messages:
if msg[0] == "ERROR":
bo.parse_valid = False
break
return bo | python | def validate(bo, error_level: str = "WARNING") -> Tuple[bool, List[Tuple[str, str]]]:
"""Semantically validate BEL AST
Add errors and warnings to bel_obj.validation_messages
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
Args:
bo: main BEL language object
error_level: return ERRORs only or also WARNINGs
Returns:
Tuple[bool, List[Tuple[str, str]]]: (is_valid, messages)
"""
if bo.ast:
bo = validate_functions(bo.ast, bo) # No WARNINGs generated in this function
if error_level == "WARNING":
bo = validate_arg_values(bo.ast, bo) # validates NSArg and StrArg values
else:
bo.validation_messages.append(("ERROR", "Invalid BEL Statement - cannot parse"))
for msg in bo.validation_messages:
if msg[0] == "ERROR":
bo.parse_valid = False
break
return bo | [
"def",
"validate",
"(",
"bo",
",",
"error_level",
":",
"str",
"=",
"\"WARNING\"",
")",
"->",
"Tuple",
"[",
"bool",
",",
"List",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"]",
":",
"if",
"bo",
".",
"ast",
":",
"bo",
"=",
"validate_functions",
... | Semantically validate BEL AST
Add errors and warnings to bel_obj.validation_messages
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
Args:
bo: main BEL language object
error_level: return ERRORs only or also WARNINGs
Returns:
Tuple[bool, List[Tuple[str, str]]]: (is_valid, messages) | [
"Semantically",
"validate",
"BEL",
"AST"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/semantics.py#L14-L43 | train | 50,947 |
belbio/bel | bel/lang/semantics.py | validate_functions | def validate_functions(ast: BELAst, bo):
"""Recursively validate function signatures
Determine if function matches one of the available signatures. Also,
1. Add entity types to AST NSArg, e.g. Abundance, ...
2. Add optional to AST Arg (optional means it is not a
fixed, required argument and needs to be sorted for
canonicalization, e.g. reactants(A, B, C) )
Args:
bo: bel object
Returns:
bel object
"""
if isinstance(ast, Function):
log.debug(f"Validating: {ast.name}, {ast.function_type}, {ast.args}")
function_signatures = bo.spec["functions"]["signatures"][ast.name]["signatures"]
function_name = ast.name
(valid_function, messages) = check_function_args(
ast.args, function_signatures, function_name
)
if not valid_function:
message = ", ".join(messages)
bo.validation_messages.append(
(
"ERROR",
"Invalid BEL Statement function {} - problem with function signatures: {}".format(
ast.to_string(), message
),
)
)
bo.parse_valid = False
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
validate_functions(arg, bo)
return bo | python | def validate_functions(ast: BELAst, bo):
"""Recursively validate function signatures
Determine if function matches one of the available signatures. Also,
1. Add entity types to AST NSArg, e.g. Abundance, ...
2. Add optional to AST Arg (optional means it is not a
fixed, required argument and needs to be sorted for
canonicalization, e.g. reactants(A, B, C) )
Args:
bo: bel object
Returns:
bel object
"""
if isinstance(ast, Function):
log.debug(f"Validating: {ast.name}, {ast.function_type}, {ast.args}")
function_signatures = bo.spec["functions"]["signatures"][ast.name]["signatures"]
function_name = ast.name
(valid_function, messages) = check_function_args(
ast.args, function_signatures, function_name
)
if not valid_function:
message = ", ".join(messages)
bo.validation_messages.append(
(
"ERROR",
"Invalid BEL Statement function {} - problem with function signatures: {}".format(
ast.to_string(), message
),
)
)
bo.parse_valid = False
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
validate_functions(arg, bo)
return bo | [
"def",
"validate_functions",
"(",
"ast",
":",
"BELAst",
",",
"bo",
")",
":",
"if",
"isinstance",
"(",
"ast",
",",
"Function",
")",
":",
"log",
".",
"debug",
"(",
"f\"Validating: {ast.name}, {ast.function_type}, {ast.args}\"",
")",
"function_signatures",
"=",
"bo",... | Recursively validate function signatures
Determine if function matches one of the available signatures. Also,
1. Add entity types to AST NSArg, e.g. Abundance, ...
2. Add optional to AST Arg (optional means it is not a
fixed, required argument and needs to be sorted for
canonicalization, e.g. reactants(A, B, C) )
Args:
bo: bel object
Returns:
bel object | [
"Recursively",
"validate",
"function",
"signatures"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/semantics.py#L46-L88 | train | 50,948 |
belbio/bel | bel/Config.py | get_belbio_conf_files | def get_belbio_conf_files():
"""Get belbio configuration from files
"""
home = os.path.expanduser("~")
cwd = os.getcwd()
belbio_conf_fp, belbio_secrets_fp = "", ""
env_conf_dir = os.getenv("BELBIO_CONF", "").rstrip("/")
conf_paths = [
f"{cwd}/belbio_conf.yaml",
f"{cwd}/belbio_conf.yml",
f"{env_conf_dir}/belbio_conf.yaml",
f"{env_conf_dir}/belbio_conf.yml",
f"{home}/.belbio/conf",
]
secret_paths = [
f"{cwd}/belbio_secrets.yaml",
f"{cwd}/belbio_secrets.yml",
f"{env_conf_dir}/belbio_secrets.yaml",
f"{env_conf_dir}/belbio_secrets.yml",
f"{home}/.belbio/secrets",
]
for fn in conf_paths:
if os.path.exists(fn):
belbio_conf_fp = fn
break
else:
log.error(
"No BELBio configuration file found - please add one (see http://bel.readthedocs.io/en/latest/configuration.html)"
)
for fn in secret_paths:
if os.path.exists(fn):
belbio_secrets_fp = fn
break
return (belbio_conf_fp, belbio_secrets_fp) | python | def get_belbio_conf_files():
"""Get belbio configuration from files
"""
home = os.path.expanduser("~")
cwd = os.getcwd()
belbio_conf_fp, belbio_secrets_fp = "", ""
env_conf_dir = os.getenv("BELBIO_CONF", "").rstrip("/")
conf_paths = [
f"{cwd}/belbio_conf.yaml",
f"{cwd}/belbio_conf.yml",
f"{env_conf_dir}/belbio_conf.yaml",
f"{env_conf_dir}/belbio_conf.yml",
f"{home}/.belbio/conf",
]
secret_paths = [
f"{cwd}/belbio_secrets.yaml",
f"{cwd}/belbio_secrets.yml",
f"{env_conf_dir}/belbio_secrets.yaml",
f"{env_conf_dir}/belbio_secrets.yml",
f"{home}/.belbio/secrets",
]
for fn in conf_paths:
if os.path.exists(fn):
belbio_conf_fp = fn
break
else:
log.error(
"No BELBio configuration file found - please add one (see http://bel.readthedocs.io/en/latest/configuration.html)"
)
for fn in secret_paths:
if os.path.exists(fn):
belbio_secrets_fp = fn
break
return (belbio_conf_fp, belbio_secrets_fp) | [
"def",
"get_belbio_conf_files",
"(",
")",
":",
"home",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"belbio_conf_fp",
",",
"belbio_secrets_fp",
"=",
"\"\"",
",",
"\"\"",
"env_conf_dir",
"=",
"os... | Get belbio configuration from files | [
"Get",
"belbio",
"configuration",
"from",
"files"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L26-L66 | train | 50,949 |
belbio/bel | bel/Config.py | load_configuration | def load_configuration():
"""Load the configuration"""
(belbio_conf_fp, belbio_secrets_fp) = get_belbio_conf_files()
log.info(f"Using conf: {belbio_conf_fp} and secrets files: {belbio_secrets_fp} ")
config = {}
if belbio_conf_fp:
with open(belbio_conf_fp, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
config["source_files"] = {}
config["source_files"]["conf"] = belbio_conf_fp
if belbio_secrets_fp:
with open(belbio_secrets_fp, "r") as f:
secrets = yaml.load(f, Loader=yaml.SafeLoader)
config["secrets"] = copy.deepcopy(secrets)
if "source_files" in config:
config["source_files"]["secrets"] = belbio_secrets_fp
get_versions(config)
# TODO - needs to be completed
# add_environment_vars(config)
return config | python | def load_configuration():
"""Load the configuration"""
(belbio_conf_fp, belbio_secrets_fp) = get_belbio_conf_files()
log.info(f"Using conf: {belbio_conf_fp} and secrets files: {belbio_secrets_fp} ")
config = {}
if belbio_conf_fp:
with open(belbio_conf_fp, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
config["source_files"] = {}
config["source_files"]["conf"] = belbio_conf_fp
if belbio_secrets_fp:
with open(belbio_secrets_fp, "r") as f:
secrets = yaml.load(f, Loader=yaml.SafeLoader)
config["secrets"] = copy.deepcopy(secrets)
if "source_files" in config:
config["source_files"]["secrets"] = belbio_secrets_fp
get_versions(config)
# TODO - needs to be completed
# add_environment_vars(config)
return config | [
"def",
"load_configuration",
"(",
")",
":",
"(",
"belbio_conf_fp",
",",
"belbio_secrets_fp",
")",
"=",
"get_belbio_conf_files",
"(",
")",
"log",
".",
"info",
"(",
"f\"Using conf: {belbio_conf_fp} and secrets files: {belbio_secrets_fp} \"",
")",
"config",
"=",
"{",
"}",
... | Load the configuration | [
"Load",
"the",
"configuration"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L69-L94 | train | 50,950 |
belbio/bel | bel/Config.py | get_versions | def get_versions(config) -> dict:
"""Get versions of bel modules and tools"""
# Collect bel package version
try:
import bel.__version__
config["bel"]["version"] = bel.__version__.__version__
except KeyError:
config["bel"] = {"version": bel.__version__.__version__}
except ModuleNotFoundError:
pass
# Collect bel_resources version
try:
import tools.__version__
config["bel_resources"]["version"] = tools.__version__.__version__
except KeyError:
config["bel_resources"] = {"version": tools.__version__.__version__}
except ModuleNotFoundError:
pass
# Collect bel_api version
try:
import __version__
if __version__.__name__ == "BELBIO API":
config["bel_api"]["version"] = __version__.__version__
except KeyError:
if __version__.__name__ == "BELBIO API":
config["bel_api"] = {"version": __version__.__version__}
except ModuleNotFoundError:
pass | python | def get_versions(config) -> dict:
"""Get versions of bel modules and tools"""
# Collect bel package version
try:
import bel.__version__
config["bel"]["version"] = bel.__version__.__version__
except KeyError:
config["bel"] = {"version": bel.__version__.__version__}
except ModuleNotFoundError:
pass
# Collect bel_resources version
try:
import tools.__version__
config["bel_resources"]["version"] = tools.__version__.__version__
except KeyError:
config["bel_resources"] = {"version": tools.__version__.__version__}
except ModuleNotFoundError:
pass
# Collect bel_api version
try:
import __version__
if __version__.__name__ == "BELBIO API":
config["bel_api"]["version"] = __version__.__version__
except KeyError:
if __version__.__name__ == "BELBIO API":
config["bel_api"] = {"version": __version__.__version__}
except ModuleNotFoundError:
pass | [
"def",
"get_versions",
"(",
"config",
")",
"->",
"dict",
":",
"# Collect bel package version",
"try",
":",
"import",
"bel",
".",
"__version__",
"config",
"[",
"\"bel\"",
"]",
"[",
"\"version\"",
"]",
"=",
"bel",
".",
"__version__",
".",
"__version__",
"except"... | Get versions of bel modules and tools | [
"Get",
"versions",
"of",
"bel",
"modules",
"and",
"tools"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L97-L130 | train | 50,951 |
belbio/bel | bel/Config.py | add_environment_vars | def add_environment_vars(config: MutableMapping[str, Any]):
"""Override config with environment variables
Environment variables have to be prefixed with BELBIO_
which will be stripped before splitting on '__' and lower-casing
the environment variable name that is left into keys for the
config dictionary.
Example:
BELBIO_BEL_API__SERVERS__API_URL=http://api.bel.bio
1. BELBIO_BEL_API__SERVERS__API_URL ==> BEL_API__SERVERS__API_URL
2. BEL_API__SERVERS__API_URL ==> bel_api__servers__api_url
3. bel_api__servers__api_url ==> [bel_api, servers, api_url]
4. [bel_api, servers, api_url] ==> config['bel_api']['servers']['api_url'] = http://api.bel.bio
"""
# TODO need to redo config - can't add value to dictionary without recursively building up the dict
# check into config libraries again
for e in os.environ:
if re.match("BELBIO_", e):
val = os.environ.get(e)
if val:
e.replace("BELBIO_", "")
env_keys = e.lower().split("__")
if len(env_keys) > 1:
joined = '"]["'.join(env_keys)
eval_config = f'config["{joined}"] = val'
try:
eval(eval_config)
except Exception as exc:
log.warn("Cannot process {e} into config")
else:
config[env_keys[0]] = val | python | def add_environment_vars(config: MutableMapping[str, Any]):
"""Override config with environment variables
Environment variables have to be prefixed with BELBIO_
which will be stripped before splitting on '__' and lower-casing
the environment variable name that is left into keys for the
config dictionary.
Example:
BELBIO_BEL_API__SERVERS__API_URL=http://api.bel.bio
1. BELBIO_BEL_API__SERVERS__API_URL ==> BEL_API__SERVERS__API_URL
2. BEL_API__SERVERS__API_URL ==> bel_api__servers__api_url
3. bel_api__servers__api_url ==> [bel_api, servers, api_url]
4. [bel_api, servers, api_url] ==> config['bel_api']['servers']['api_url'] = http://api.bel.bio
"""
# TODO need to redo config - can't add value to dictionary without recursively building up the dict
# check into config libraries again
for e in os.environ:
if re.match("BELBIO_", e):
val = os.environ.get(e)
if val:
e.replace("BELBIO_", "")
env_keys = e.lower().split("__")
if len(env_keys) > 1:
joined = '"]["'.join(env_keys)
eval_config = f'config["{joined}"] = val'
try:
eval(eval_config)
except Exception as exc:
log.warn("Cannot process {e} into config")
else:
config[env_keys[0]] = val | [
"def",
"add_environment_vars",
"(",
"config",
":",
"MutableMapping",
"[",
"str",
",",
"Any",
"]",
")",
":",
"# TODO need to redo config - can't add value to dictionary without recursively building up the dict",
"# check into config libraries again",
"for",
"e",
"in",
"os"... | Override config with environment variables
Environment variables have to be prefixed with BELBIO_
which will be stripped before splitting on '__' and lower-casing
the environment variable name that is left into keys for the
config dictionary.
Example:
BELBIO_BEL_API__SERVERS__API_URL=http://api.bel.bio
1. BELBIO_BEL_API__SERVERS__API_URL ==> BEL_API__SERVERS__API_URL
2. BEL_API__SERVERS__API_URL ==> bel_api__servers__api_url
3. bel_api__servers__api_url ==> [bel_api, servers, api_url]
4. [bel_api, servers, api_url] ==> config['bel_api']['servers']['api_url'] = http://api.bel.bio | [
"Override",
"config",
"with",
"environment",
"variables"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L134-L168 | train | 50,952 |
belbio/bel | bel/Config.py | merge_config | def merge_config(
config: Mapping[str, Any],
override_config: Mapping[str, Any] = None,
override_config_fn: str = None,
) -> Mapping[str, Any]:
"""Override config with additional configuration in override_config or override_config_fn
Used in script to merge CLI options with Config
Args:
config: original configuration
override_config: new configuration to override/extend current config
override_config_fn: new configuration filename as YAML file
"""
if override_config_fn:
with open(override_config_fn, "r") as f:
override_config = yaml.load(f, Loader=yaml.SafeLoader)
if not override_config:
log.info("Missing override_config")
return functools.reduce(rec_merge, (config, override_config)) | python | def merge_config(
config: Mapping[str, Any],
override_config: Mapping[str, Any] = None,
override_config_fn: str = None,
) -> Mapping[str, Any]:
"""Override config with additional configuration in override_config or override_config_fn
Used in script to merge CLI options with Config
Args:
config: original configuration
override_config: new configuration to override/extend current config
override_config_fn: new configuration filename as YAML file
"""
if override_config_fn:
with open(override_config_fn, "r") as f:
override_config = yaml.load(f, Loader=yaml.SafeLoader)
if not override_config:
log.info("Missing override_config")
return functools.reduce(rec_merge, (config, override_config)) | [
"def",
"merge_config",
"(",
"config",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
",",
"override_config",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
"=",
"None",
",",
"override_config_fn",
":",
"str",
"=",
"None",
",",
")",
"->",
"Mapping",
"[",
"... | Override config with additional configuration in override_config or override_config_fn
Used in script to merge CLI options with Config
Args:
config: original configuration
override_config: new configuration to override/extend current config
override_config_fn: new configuration filename as YAML file | [
"Override",
"config",
"with",
"additional",
"configuration",
"in",
"override_config",
"or",
"override_config_fn"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L171-L193 | train | 50,953 |
belbio/bel | bel/Config.py | rec_merge | def rec_merge(d1, d2):
""" Recursively merge two dictionaries
Update two dicts of dicts recursively,
if either mapping has leaves that are non-dicts,
the second's leaf overwrites the first's.
import collections
import functools
e.g. functools.reduce(rec_merge, (d1, d2, d3, d4))
"""
for k, v in d1.items():
if k in d2:
# this next check is the only difference!
if all(isinstance(e, collections.MutableMapping) for e in (v, d2[k])):
d2[k] = rec_merge(v, d2[k])
# we could further check types and merge as appropriate here.
d3 = d1.copy()
d3.update(d2)
return d3 | python | def rec_merge(d1, d2):
""" Recursively merge two dictionaries
Update two dicts of dicts recursively,
if either mapping has leaves that are non-dicts,
the second's leaf overwrites the first's.
import collections
import functools
e.g. functools.reduce(rec_merge, (d1, d2, d3, d4))
"""
for k, v in d1.items():
if k in d2:
# this next check is the only difference!
if all(isinstance(e, collections.MutableMapping) for e in (v, d2[k])):
d2[k] = rec_merge(v, d2[k])
# we could further check types and merge as appropriate here.
d3 = d1.copy()
d3.update(d2)
return d3 | [
"def",
"rec_merge",
"(",
"d1",
",",
"d2",
")",
":",
"for",
"k",
",",
"v",
"in",
"d1",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"d2",
":",
"# this next check is the only difference!",
"if",
"all",
"(",
"isinstance",
"(",
"e",
",",
"collections",
... | Recursively merge two dictionaries
Update two dicts of dicts recursively,
if either mapping has leaves that are non-dicts,
the second's leaf overwrites the first's.
import collections
import functools
e.g. functools.reduce(rec_merge, (d1, d2, d3, d4)) | [
"Recursively",
"merge",
"two",
"dictionaries"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L197-L218 | train | 50,954 |
belbio/bel | bel/resources/namespace.py | load_terms | def load_terms(fo: IO, metadata: dict, forceupdate: bool):
"""Load terms into Elasticsearch and ArangoDB
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
fo: file obj - terminology file
metadata: dict containing the metadata for terminology
forceupdate: force full update - e.g. don't leave Elasticsearch indexes
alone if their version ID matches
"""
version = metadata["metadata"]["version"]
# LOAD TERMS INTO Elasticsearch
with timy.Timer("Load Terms") as timer:
es = bel.db.elasticsearch.get_client()
es_version = version.replace("T", "").replace("-", "").replace(":", "")
index_prefix = f"terms_{metadata['metadata']['namespace'].lower()}"
index_name = f"{index_prefix}_{es_version}"
# Create index with mapping
if not elasticsearch.index_exists(es, index_name):
elasticsearch.create_terms_index(es, index_name)
elif forceupdate: # force an update to the index
index_name += "_alt"
elasticsearch.create_terms_index(es, index_name)
else:
return # Skip loading if not forced and not a new namespace
terms_iterator = terms_iterator_for_elasticsearch(fo, index_name)
elasticsearch.bulk_load_docs(es, terms_iterator)
# Remove old namespace index
index_names = elasticsearch.get_all_index_names(es)
for name in index_names:
if name != index_name and index_prefix in name:
elasticsearch.delete_index(es, name)
# Add terms_alias to this index
elasticsearch.add_index_alias(es, index_name, terms_alias)
log.info(
"Load namespace terms",
elapsed=timer.elapsed,
namespace=metadata["metadata"]["namespace"],
)
# LOAD EQUIVALENCES INTO ArangoDB
with timy.Timer("Load Term Equivalences") as timer:
arango_client = arangodb.get_client()
belns_db = arangodb.get_belns_handle(arango_client)
arangodb.batch_load_docs(
belns_db, terms_iterator_for_arangodb(fo, version), on_duplicate="update"
)
log.info(
"Loaded namespace equivalences",
elapsed=timer.elapsed,
namespace=metadata["metadata"]["namespace"],
)
# Clean up old entries
remove_old_equivalence_edges = f"""
FOR edge in equivalence_edges
FILTER edge.source == "{metadata["metadata"]["namespace"]}"
FILTER edge.version != "{version}"
REMOVE edge IN equivalence_edges
"""
remove_old_equivalence_nodes = f"""
FOR node in equivalence_nodes
FILTER node.source == "{metadata["metadata"]["namespace"]}"
FILTER node.version != "{version}"
REMOVE node IN equivalence_nodes
"""
arangodb.aql_query(belns_db, remove_old_equivalence_edges)
arangodb.aql_query(belns_db, remove_old_equivalence_nodes)
# Add metadata to resource metadata collection
metadata["_key"] = f"Namespace_{metadata['metadata']['namespace']}"
try:
belns_db.collection(arangodb.belns_metadata_name).insert(metadata)
except ArangoError as ae:
belns_db.collection(arangodb.belns_metadata_name).replace(metadata) | python | def load_terms(fo: IO, metadata: dict, forceupdate: bool):
"""Load terms into Elasticsearch and ArangoDB
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
fo: file obj - terminology file
metadata: dict containing the metadata for terminology
forceupdate: force full update - e.g. don't leave Elasticsearch indexes
alone if their version ID matches
"""
version = metadata["metadata"]["version"]
# LOAD TERMS INTO Elasticsearch
with timy.Timer("Load Terms") as timer:
es = bel.db.elasticsearch.get_client()
es_version = version.replace("T", "").replace("-", "").replace(":", "")
index_prefix = f"terms_{metadata['metadata']['namespace'].lower()}"
index_name = f"{index_prefix}_{es_version}"
# Create index with mapping
if not elasticsearch.index_exists(es, index_name):
elasticsearch.create_terms_index(es, index_name)
elif forceupdate: # force an update to the index
index_name += "_alt"
elasticsearch.create_terms_index(es, index_name)
else:
return # Skip loading if not forced and not a new namespace
terms_iterator = terms_iterator_for_elasticsearch(fo, index_name)
elasticsearch.bulk_load_docs(es, terms_iterator)
# Remove old namespace index
index_names = elasticsearch.get_all_index_names(es)
for name in index_names:
if name != index_name and index_prefix in name:
elasticsearch.delete_index(es, name)
# Add terms_alias to this index
elasticsearch.add_index_alias(es, index_name, terms_alias)
log.info(
"Load namespace terms",
elapsed=timer.elapsed,
namespace=metadata["metadata"]["namespace"],
)
# LOAD EQUIVALENCES INTO ArangoDB
with timy.Timer("Load Term Equivalences") as timer:
arango_client = arangodb.get_client()
belns_db = arangodb.get_belns_handle(arango_client)
arangodb.batch_load_docs(
belns_db, terms_iterator_for_arangodb(fo, version), on_duplicate="update"
)
log.info(
"Loaded namespace equivalences",
elapsed=timer.elapsed,
namespace=metadata["metadata"]["namespace"],
)
# Clean up old entries
remove_old_equivalence_edges = f"""
FOR edge in equivalence_edges
FILTER edge.source == "{metadata["metadata"]["namespace"]}"
FILTER edge.version != "{version}"
REMOVE edge IN equivalence_edges
"""
remove_old_equivalence_nodes = f"""
FOR node in equivalence_nodes
FILTER node.source == "{metadata["metadata"]["namespace"]}"
FILTER node.version != "{version}"
REMOVE node IN equivalence_nodes
"""
arangodb.aql_query(belns_db, remove_old_equivalence_edges)
arangodb.aql_query(belns_db, remove_old_equivalence_nodes)
# Add metadata to resource metadata collection
metadata["_key"] = f"Namespace_{metadata['metadata']['namespace']}"
try:
belns_db.collection(arangodb.belns_metadata_name).insert(metadata)
except ArangoError as ae:
belns_db.collection(arangodb.belns_metadata_name).replace(metadata) | [
"def",
"load_terms",
"(",
"fo",
":",
"IO",
",",
"metadata",
":",
"dict",
",",
"forceupdate",
":",
"bool",
")",
":",
"version",
"=",
"metadata",
"[",
"\"metadata\"",
"]",
"[",
"\"version\"",
"]",
"# LOAD TERMS INTO Elasticsearch",
"with",
"timy",
".",
"Timer"... | Load terms into Elasticsearch and ArangoDB
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
fo: file obj - terminology file
metadata: dict containing the metadata for terminology
forceupdate: force full update - e.g. don't leave Elasticsearch indexes
alone if their version ID matches | [
"Load",
"terms",
"into",
"Elasticsearch",
"and",
"ArangoDB"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/namespace.py#L27-L112 | train | 50,955 |
belbio/bel | bel/resources/namespace.py | terms_iterator_for_elasticsearch | def terms_iterator_for_elasticsearch(fo: IO, index_name: str):
"""Add index_name to term documents for bulk load"""
species_list = config["bel_resources"].get("species_list", [])
fo.seek(0) # Seek back to beginning of file
with gzip.open(fo, "rt") as f:
for line in f:
term = json.loads(line)
# skip if not term record (e.g. is a metadata record)
if "term" not in term:
continue
term = term["term"]
# Filter species if enabled in config
species_id = term.get("species_id", None)
if species_list and species_id and species_id not in species_list:
continue
all_term_ids = set()
for term_id in [term["id"]] + term.get("alt_ids", []):
all_term_ids.add(term_id)
all_term_ids.add(lowercase_term_id(term_id))
term["alt_ids"] = copy.copy(list(all_term_ids))
yield {
"_op_type": "index",
"_index": index_name,
"_type": "term",
"_id": term["id"],
"_source": copy.deepcopy(term),
} | python | def terms_iterator_for_elasticsearch(fo: IO, index_name: str):
"""Add index_name to term documents for bulk load"""
species_list = config["bel_resources"].get("species_list", [])
fo.seek(0) # Seek back to beginning of file
with gzip.open(fo, "rt") as f:
for line in f:
term = json.loads(line)
# skip if not term record (e.g. is a metadata record)
if "term" not in term:
continue
term = term["term"]
# Filter species if enabled in config
species_id = term.get("species_id", None)
if species_list and species_id and species_id not in species_list:
continue
all_term_ids = set()
for term_id in [term["id"]] + term.get("alt_ids", []):
all_term_ids.add(term_id)
all_term_ids.add(lowercase_term_id(term_id))
term["alt_ids"] = copy.copy(list(all_term_ids))
yield {
"_op_type": "index",
"_index": index_name,
"_type": "term",
"_id": term["id"],
"_source": copy.deepcopy(term),
} | [
"def",
"terms_iterator_for_elasticsearch",
"(",
"fo",
":",
"IO",
",",
"index_name",
":",
"str",
")",
":",
"species_list",
"=",
"config",
"[",
"\"bel_resources\"",
"]",
".",
"get",
"(",
"\"species_list\"",
",",
"[",
"]",
")",
"fo",
".",
"seek",
"(",
"0",
... | Add index_name to term documents for bulk load | [
"Add",
"index_name",
"to",
"term",
"documents",
"for",
"bulk",
"load"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/namespace.py#L206-L238 | train | 50,956 |
belbio/bel | bel/nanopub/pubmed.py | get_pubtator | def get_pubtator(pmid):
"""Get Pubtator Bioconcepts from Pubmed Abstract
Re-configure the denotations into an annotation dictionary format
and collapse duplicate terms so that their spans are in a list.
"""
r = get_url(PUBTATOR_TMPL.replace("PMID", pmid), timeout=10)
if r and r.status_code == 200:
pubtator = r.json()[0]
else:
log.error(
f"Cannot access Pubtator, status: {r.status_code} url: {PUBTATOR_TMPL.replace('PMID', pmid)}"
)
return None
known_types = ["CHEBI", "Chemical", "Disease", "Gene", "Species"]
for idx, anno in enumerate(pubtator["denotations"]):
s_match = re.match(r"(\w+):(\w+)", anno["obj"])
c_match = re.match(r"(\w+):(\w+):(\w+)", anno["obj"])
if c_match:
(ctype, namespace, cid) = (
c_match.group(1),
c_match.group(2),
c_match.group(3),
)
if ctype not in known_types:
log.info(f"{ctype} not in known_types for Pubtator")
if namespace not in known_types:
log.info(f"{namespace} not in known_types for Pubtator")
pubtator["denotations"][idx][
"obj"
] = f'{pubtator_ns_convert.get(namespace, "UNKNOWN")}:{cid}'
pubtator["denotations"][idx]["entity_type"] = pubtator_entity_convert.get(
ctype, None
)
pubtator["denotations"][idx][
"annotation_type"
] = pubtator_annotation_convert.get(ctype, None)
elif s_match:
(ctype, cid) = (s_match.group(1), s_match.group(2))
if ctype not in known_types:
log.info(f"{ctype} not in known_types for Pubtator")
pubtator["denotations"][idx][
"obj"
] = f'{pubtator_ns_convert.get(ctype, "UNKNOWN")}:{cid}'
pubtator["denotations"][idx]["entity_type"] = pubtator_entity_convert.get(
ctype, None
)
pubtator["denotations"][idx][
"annotation_type"
] = pubtator_annotation_convert.get(ctype, None)
annotations = {}
for anno in pubtator["denotations"]:
log.info(anno)
if anno["obj"] not in annotations:
annotations[anno["obj"]] = {"spans": [anno["span"]]}
annotations[anno["obj"]]["entity_types"] = [anno.get("entity_type", [])]
annotations[anno["obj"]]["annotation_types"] = [
anno.get("annotation_type", [])
]
else:
annotations[anno["obj"]]["spans"].append(anno["span"])
del pubtator["denotations"]
pubtator["annotations"] = copy.deepcopy(annotations)
return pubtator | python | def get_pubtator(pmid):
"""Get Pubtator Bioconcepts from Pubmed Abstract
Re-configure the denotations into an annotation dictionary format
and collapse duplicate terms so that their spans are in a list.
"""
r = get_url(PUBTATOR_TMPL.replace("PMID", pmid), timeout=10)
if r and r.status_code == 200:
pubtator = r.json()[0]
else:
log.error(
f"Cannot access Pubtator, status: {r.status_code} url: {PUBTATOR_TMPL.replace('PMID', pmid)}"
)
return None
known_types = ["CHEBI", "Chemical", "Disease", "Gene", "Species"]
for idx, anno in enumerate(pubtator["denotations"]):
s_match = re.match(r"(\w+):(\w+)", anno["obj"])
c_match = re.match(r"(\w+):(\w+):(\w+)", anno["obj"])
if c_match:
(ctype, namespace, cid) = (
c_match.group(1),
c_match.group(2),
c_match.group(3),
)
if ctype not in known_types:
log.info(f"{ctype} not in known_types for Pubtator")
if namespace not in known_types:
log.info(f"{namespace} not in known_types for Pubtator")
pubtator["denotations"][idx][
"obj"
] = f'{pubtator_ns_convert.get(namespace, "UNKNOWN")}:{cid}'
pubtator["denotations"][idx]["entity_type"] = pubtator_entity_convert.get(
ctype, None
)
pubtator["denotations"][idx][
"annotation_type"
] = pubtator_annotation_convert.get(ctype, None)
elif s_match:
(ctype, cid) = (s_match.group(1), s_match.group(2))
if ctype not in known_types:
log.info(f"{ctype} not in known_types for Pubtator")
pubtator["denotations"][idx][
"obj"
] = f'{pubtator_ns_convert.get(ctype, "UNKNOWN")}:{cid}'
pubtator["denotations"][idx]["entity_type"] = pubtator_entity_convert.get(
ctype, None
)
pubtator["denotations"][idx][
"annotation_type"
] = pubtator_annotation_convert.get(ctype, None)
annotations = {}
for anno in pubtator["denotations"]:
log.info(anno)
if anno["obj"] not in annotations:
annotations[anno["obj"]] = {"spans": [anno["span"]]}
annotations[anno["obj"]]["entity_types"] = [anno.get("entity_type", [])]
annotations[anno["obj"]]["annotation_types"] = [
anno.get("annotation_type", [])
]
else:
annotations[anno["obj"]]["spans"].append(anno["span"])
del pubtator["denotations"]
pubtator["annotations"] = copy.deepcopy(annotations)
return pubtator | [
"def",
"get_pubtator",
"(",
"pmid",
")",
":",
"r",
"=",
"get_url",
"(",
"PUBTATOR_TMPL",
".",
"replace",
"(",
"\"PMID\"",
",",
"pmid",
")",
",",
"timeout",
"=",
"10",
")",
"if",
"r",
"and",
"r",
".",
"status_code",
"==",
"200",
":",
"pubtator",
"=",
... | Get Pubtator Bioconcepts from Pubmed Abstract
Re-configure the denotations into an annotation dictionary format
and collapse duplicate terms so that their spans are in a list. | [
"Get",
"Pubtator",
"Bioconcepts",
"from",
"Pubmed",
"Abstract"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L62-L135 | train | 50,957 |
belbio/bel | bel/nanopub/pubmed.py | process_pub_date | def process_pub_date(year, mon, day):
"""Create pub_date from what Pubmed provides in Journal PubDate entry
"""
pub_date = None
if year and re.match("[a-zA-Z]+", mon):
pub_date = datetime.datetime.strptime(
f"{year}-{mon}-{day}", "%Y-%b-%d"
).strftime("%Y-%m-%d")
elif year:
pub_date = f"{year}-{mon}-{day}"
return pub_date | python | def process_pub_date(year, mon, day):
"""Create pub_date from what Pubmed provides in Journal PubDate entry
"""
pub_date = None
if year and re.match("[a-zA-Z]+", mon):
pub_date = datetime.datetime.strptime(
f"{year}-{mon}-{day}", "%Y-%b-%d"
).strftime("%Y-%m-%d")
elif year:
pub_date = f"{year}-{mon}-{day}"
return pub_date | [
"def",
"process_pub_date",
"(",
"year",
",",
"mon",
",",
"day",
")",
":",
"pub_date",
"=",
"None",
"if",
"year",
"and",
"re",
".",
"match",
"(",
"\"[a-zA-Z]+\"",
",",
"mon",
")",
":",
"pub_date",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
... | Create pub_date from what Pubmed provides in Journal PubDate entry | [
"Create",
"pub_date",
"from",
"what",
"Pubmed",
"provides",
"in",
"Journal",
"PubDate",
"entry"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L138-L150 | train | 50,958 |
belbio/bel | bel/nanopub/pubmed.py | enhance_pubmed_annotations | def enhance_pubmed_annotations(pubmed: Mapping[str, Any]) -> Mapping[str, Any]:
"""Enhance pubmed namespace IDs
Add additional entity and annotation types to annotations
Use preferred id for namespaces as needed
Add strings from Title, Abstract matching Pubtator BioConcept spans
NOTE - basically duplicated code with bel_api:api.services.pubmed
Args:
pubmed
Returns:
pubmed object
"""
text = pubmed["title"] + pubmed["abstract"]
annotations = {}
for nsarg in pubmed["annotations"]:
url = f'{config["bel_api"]["servers"]["api_url"]}/terms/{url_path_param_quoting(nsarg)}'
log.info(f"URL: {url}")
r = get_url(url)
log.info(f"Result: {r}")
new_nsarg = ""
if r and r.status_code == 200:
term = r.json()
new_nsarg = bel_utils.convert_nsarg(term["id"], decanonicalize=True)
pubmed["annotations"][nsarg]["name"] = term["name"]
pubmed["annotations"][nsarg]["label"] = term["label"]
pubmed["annotations"][nsarg]["entity_types"] = list(
set(
pubmed["annotations"][nsarg]["entity_types"]
+ term.get("entity_types", [])
)
)
pubmed["annotations"][nsarg]["annotation_types"] = list(
set(
pubmed["annotations"][nsarg]["annotation_types"]
+ term.get("annotation_types", [])
)
)
if new_nsarg != nsarg:
annotations[new_nsarg] = copy.deepcopy(pubmed["annotations"][nsarg])
else:
annotations[nsarg] = copy.deepcopy(pubmed["annotations"][nsarg])
for nsarg in annotations:
for idx, span in enumerate(annotations[nsarg]["spans"]):
string = text[span["begin"] - 1 : span["end"] - 1]
annotations[nsarg]["spans"][idx]["text"] = string
pubmed["annotations"] = copy.deepcopy(annotations)
return pubmed | python | def enhance_pubmed_annotations(pubmed: Mapping[str, Any]) -> Mapping[str, Any]:
"""Enhance pubmed namespace IDs
Add additional entity and annotation types to annotations
Use preferred id for namespaces as needed
Add strings from Title, Abstract matching Pubtator BioConcept spans
NOTE - basically duplicated code with bel_api:api.services.pubmed
Args:
pubmed
Returns:
pubmed object
"""
text = pubmed["title"] + pubmed["abstract"]
annotations = {}
for nsarg in pubmed["annotations"]:
url = f'{config["bel_api"]["servers"]["api_url"]}/terms/{url_path_param_quoting(nsarg)}'
log.info(f"URL: {url}")
r = get_url(url)
log.info(f"Result: {r}")
new_nsarg = ""
if r and r.status_code == 200:
term = r.json()
new_nsarg = bel_utils.convert_nsarg(term["id"], decanonicalize=True)
pubmed["annotations"][nsarg]["name"] = term["name"]
pubmed["annotations"][nsarg]["label"] = term["label"]
pubmed["annotations"][nsarg]["entity_types"] = list(
set(
pubmed["annotations"][nsarg]["entity_types"]
+ term.get("entity_types", [])
)
)
pubmed["annotations"][nsarg]["annotation_types"] = list(
set(
pubmed["annotations"][nsarg]["annotation_types"]
+ term.get("annotation_types", [])
)
)
if new_nsarg != nsarg:
annotations[new_nsarg] = copy.deepcopy(pubmed["annotations"][nsarg])
else:
annotations[nsarg] = copy.deepcopy(pubmed["annotations"][nsarg])
for nsarg in annotations:
for idx, span in enumerate(annotations[nsarg]["spans"]):
string = text[span["begin"] - 1 : span["end"] - 1]
annotations[nsarg]["spans"][idx]["text"] = string
pubmed["annotations"] = copy.deepcopy(annotations)
return pubmed | [
"def",
"enhance_pubmed_annotations",
"(",
"pubmed",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Mapping",
"[",
"str",
",",
"Any",
"]",
":",
"text",
"=",
"pubmed",
"[",
"\"title\"",
"]",
"+",
"pubmed",
"[",
"\"abstract\"",
"]",
"annotations",... | Enhance pubmed namespace IDs
Add additional entity and annotation types to annotations
Use preferred id for namespaces as needed
Add strings from Title, Abstract matching Pubtator BioConcept spans
NOTE - basically duplicated code with bel_api:api.services.pubmed
Args:
pubmed
Returns:
pubmed object | [
"Enhance",
"pubmed",
"namespace",
"IDs"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L242-L299 | train | 50,959 |
belbio/bel | bel/terms/orthologs.py | get_orthologs | def get_orthologs(canonical_gene_id: str, species: list = []) -> List[dict]:
"""Get orthologs for given gene_id and species
Canonicalize prior to ortholog query and decanonicalize
the resulting ortholog
Args:
canonical_gene_id: canonical gene_id for which to retrieve ortholog
species: target species for ortholog - tax id format TAX:<number>
Returns:
List[dict]: {'tax_id': <tax_id>, 'canonical': canonical_id, 'decanonical': decanonical_id}
"""
gene_id_key = bel.db.arangodb.arango_id_to_key(canonical_gene_id)
orthologs = {}
if species:
query_filter = f"FILTER vertex.tax_id IN {species}"
query = f"""
LET start = (
FOR vertex in ortholog_nodes
FILTER vertex._key == "{gene_id_key}"
RETURN {{ "name": vertex.name, "tax_id": vertex.tax_id }}
)
LET orthologs = (
FOR vertex IN 1..3
ANY "ortholog_nodes/{gene_id_key}" ortholog_edges
OPTIONS {{ bfs: true, uniqueVertices : 'global' }}
{query_filter}
RETURN DISTINCT {{ "name": vertex.name, "tax_id": vertex.tax_id }}
)
RETURN {{ 'orthologs': FLATTEN(UNION(start, orthologs)) }}
"""
cursor = belns_db.aql.execute(query, batch_size=20)
results = cursor.pop()
for ortholog in results["orthologs"]:
norms = bel.terms.terms.get_normalized_terms(ortholog["name"])
orthologs[ortholog["tax_id"]] = {
"canonical": norms["canonical"],
"decanonical": norms["decanonical"],
}
return orthologs | python | def get_orthologs(canonical_gene_id: str, species: list = []) -> List[dict]:
"""Get orthologs for given gene_id and species
Canonicalize prior to ortholog query and decanonicalize
the resulting ortholog
Args:
canonical_gene_id: canonical gene_id for which to retrieve ortholog
species: target species for ortholog - tax id format TAX:<number>
Returns:
List[dict]: {'tax_id': <tax_id>, 'canonical': canonical_id, 'decanonical': decanonical_id}
"""
gene_id_key = bel.db.arangodb.arango_id_to_key(canonical_gene_id)
orthologs = {}
if species:
query_filter = f"FILTER vertex.tax_id IN {species}"
query = f"""
LET start = (
FOR vertex in ortholog_nodes
FILTER vertex._key == "{gene_id_key}"
RETURN {{ "name": vertex.name, "tax_id": vertex.tax_id }}
)
LET orthologs = (
FOR vertex IN 1..3
ANY "ortholog_nodes/{gene_id_key}" ortholog_edges
OPTIONS {{ bfs: true, uniqueVertices : 'global' }}
{query_filter}
RETURN DISTINCT {{ "name": vertex.name, "tax_id": vertex.tax_id }}
)
RETURN {{ 'orthologs': FLATTEN(UNION(start, orthologs)) }}
"""
cursor = belns_db.aql.execute(query, batch_size=20)
results = cursor.pop()
for ortholog in results["orthologs"]:
norms = bel.terms.terms.get_normalized_terms(ortholog["name"])
orthologs[ortholog["tax_id"]] = {
"canonical": norms["canonical"],
"decanonical": norms["decanonical"],
}
return orthologs | [
"def",
"get_orthologs",
"(",
"canonical_gene_id",
":",
"str",
",",
"species",
":",
"list",
"=",
"[",
"]",
")",
"->",
"List",
"[",
"dict",
"]",
":",
"gene_id_key",
"=",
"bel",
".",
"db",
".",
"arangodb",
".",
"arango_id_to_key",
"(",
"canonical_gene_id",
... | Get orthologs for given gene_id and species
Canonicalize prior to ortholog query and decanonicalize
the resulting ortholog
Args:
canonical_gene_id: canonical gene_id for which to retrieve ortholog
species: target species for ortholog - tax id format TAX:<number>
Returns:
List[dict]: {'tax_id': <tax_id>, 'canonical': canonical_id, 'decanonical': decanonical_id} | [
"Get",
"orthologs",
"for",
"given",
"gene_id",
"and",
"species"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/terms/orthologs.py#L16-L63 | train | 50,960 |
PayEx/pypayex | payex/utils.py | normalize_value | def normalize_value(val):
"""
Normalize strings with booleans into Python types.
"""
if val is not None:
if val.lower() == 'false':
val = False
elif val.lower() == 'true':
val = True
return val | python | def normalize_value(val):
"""
Normalize strings with booleans into Python types.
"""
if val is not None:
if val.lower() == 'false':
val = False
elif val.lower() == 'true':
val = True
return val | [
"def",
"normalize_value",
"(",
"val",
")",
":",
"if",
"val",
"is",
"not",
"None",
":",
"if",
"val",
".",
"lower",
"(",
")",
"==",
"'false'",
":",
"val",
"=",
"False",
"elif",
"val",
".",
"lower",
"(",
")",
"==",
"'true'",
":",
"val",
"=",
"True",... | Normalize strings with booleans into Python types. | [
"Normalize",
"strings",
"with",
"booleans",
"into",
"Python",
"types",
"."
] | 549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab | https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/utils.py#L12-L23 | train | 50,961 |
PayEx/pypayex | payex/utils.py | normalize_dictionary_values | def normalize_dictionary_values(dictionary):
"""
Normalizes the values in a dictionary recursivly.
"""
for key, val in dictionary.iteritems():
if isinstance(val, dict):
dictionary[key] = normalize_dictionary_values(val)
elif isinstance(val, list):
dictionary[key] = list(val)
else:
dictionary[key] = normalize_value(val)
return dictionary | python | def normalize_dictionary_values(dictionary):
"""
Normalizes the values in a dictionary recursivly.
"""
for key, val in dictionary.iteritems():
if isinstance(val, dict):
dictionary[key] = normalize_dictionary_values(val)
elif isinstance(val, list):
dictionary[key] = list(val)
else:
dictionary[key] = normalize_value(val)
return dictionary | [
"def",
"normalize_dictionary_values",
"(",
"dictionary",
")",
":",
"for",
"key",
",",
"val",
"in",
"dictionary",
".",
"iteritems",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"dictionary",
"[",
"key",
"]",
"=",
"normalize_dictiona... | Normalizes the values in a dictionary recursivly. | [
"Normalizes",
"the",
"values",
"in",
"a",
"dictionary",
"recursivly",
"."
] | 549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab | https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/utils.py#L25-L38 | train | 50,962 |
belbio/bel | bel/utils.py | timespan | def timespan(start_time):
"""Return time in milliseconds from start_time"""
timespan = datetime.datetime.now() - start_time
timespan_ms = timespan.total_seconds() * 1000
return timespan_ms | python | def timespan(start_time):
"""Return time in milliseconds from start_time"""
timespan = datetime.datetime.now() - start_time
timespan_ms = timespan.total_seconds() * 1000
return timespan_ms | [
"def",
"timespan",
"(",
"start_time",
")",
":",
"timespan",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"start_time",
"timespan_ms",
"=",
"timespan",
".",
"total_seconds",
"(",
")",
"*",
"1000",
"return",
"timespan_ms"
] | Return time in milliseconds from start_time | [
"Return",
"time",
"in",
"milliseconds",
"from",
"start_time"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L59-L64 | train | 50,963 |
belbio/bel | bel/utils.py | first_true | def first_true(iterable, default=False, pred=None):
"""Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item
for which pred(item) is true.
"""
# first_true([a,b,c], x) --> a or b or c or x
# first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
return next(filter(pred, iterable), default) | python | def first_true(iterable, default=False, pred=None):
"""Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item
for which pred(item) is true.
"""
# first_true([a,b,c], x) --> a or b or c or x
# first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
return next(filter(pred, iterable), default) | [
"def",
"first_true",
"(",
"iterable",
",",
"default",
"=",
"False",
",",
"pred",
"=",
"None",
")",
":",
"# first_true([a,b,c], x) --> a or b or c or x",
"# first_true([a,b], x, f) --> a if f(a) else b if f(b) else x",
"return",
"next",
"(",
"filter",
"(",
"pred",
",",
"... | Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item
for which pred(item) is true. | [
"Returns",
"the",
"first",
"true",
"value",
"in",
"the",
"iterable",
"."
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L90-L101 | train | 50,964 |
belbio/bel | bel/utils.py | _create_hash_from_doc | def _create_hash_from_doc(doc: Mapping[str, Any]) -> str:
"""Create hash Id from edge record
Args:
edge (Mapping[str, Any]): edge record to create hash from
Returns:
str: Murmur3 128 bit hash
"""
doc_string = json.dumps(doc, sort_keys=True)
return _create_hash(doc_string) | python | def _create_hash_from_doc(doc: Mapping[str, Any]) -> str:
"""Create hash Id from edge record
Args:
edge (Mapping[str, Any]): edge record to create hash from
Returns:
str: Murmur3 128 bit hash
"""
doc_string = json.dumps(doc, sort_keys=True)
return _create_hash(doc_string) | [
"def",
"_create_hash_from_doc",
"(",
"doc",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"str",
":",
"doc_string",
"=",
"json",
".",
"dumps",
"(",
"doc",
",",
"sort_keys",
"=",
"True",
")",
"return",
"_create_hash",
"(",
"doc_string",
")"
] | Create hash Id from edge record
Args:
edge (Mapping[str, Any]): edge record to create hash from
Returns:
str: Murmur3 128 bit hash | [
"Create",
"hash",
"Id",
"from",
"edge",
"record"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L104-L115 | train | 50,965 |
belbio/bel | bel/utils.py | Timer.elapsed | def elapsed(self):
""" Return the current elapsed time since start
If the `elapsed` property is called in the context manager scope,
the elapsed time bewteen start and property access is returned.
However, if it is accessed outside of the context manager scope,
it returns the elapsed time bewteen entering and exiting the scope.
The `elapsed` property can thus be accessed at different points within
the context manager scope, to time different parts of the block.
"""
if self.end is None:
# if elapsed is called in the context manager scope
return (self() - self.start) * self.factor
else:
# if elapsed is called out of the context manager scope
return (self.end - self.start) * self.factor | python | def elapsed(self):
""" Return the current elapsed time since start
If the `elapsed` property is called in the context manager scope,
the elapsed time bewteen start and property access is returned.
However, if it is accessed outside of the context manager scope,
it returns the elapsed time bewteen entering and exiting the scope.
The `elapsed` property can thus be accessed at different points within
the context manager scope, to time different parts of the block.
"""
if self.end is None:
# if elapsed is called in the context manager scope
return (self() - self.start) * self.factor
else:
# if elapsed is called out of the context manager scope
return (self.end - self.start) * self.factor | [
"def",
"elapsed",
"(",
"self",
")",
":",
"if",
"self",
".",
"end",
"is",
"None",
":",
"# if elapsed is called in the context manager scope",
"return",
"(",
"self",
"(",
")",
"-",
"self",
".",
"start",
")",
"*",
"self",
".",
"factor",
"else",
":",
"# if ela... | Return the current elapsed time since start
If the `elapsed` property is called in the context manager scope,
the elapsed time bewteen start and property access is returned.
However, if it is accessed outside of the context manager scope,
it returns the elapsed time bewteen entering and exiting the scope.
The `elapsed` property can thus be accessed at different points within
the context manager scope, to time different parts of the block. | [
"Return",
"the",
"current",
"elapsed",
"time",
"since",
"start",
"If",
"the",
"elapsed",
"property",
"is",
"called",
"in",
"the",
"context",
"manager",
"scope",
"the",
"elapsed",
"time",
"bewteen",
"start",
"and",
"property",
"access",
"is",
"returned",
".",
... | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L245-L259 | train | 50,966 |
belbio/bel | bel/edge/pipeline.py | load_edges_into_db | def load_edges_into_db(
nanopub_id: str,
nanopub_url: str,
edges: list = [],
edges_coll_name: str = edges_coll_name,
nodes_coll_name: str = nodes_coll_name,
):
"""Load edges into Edgestore"""
start_time = datetime.datetime.now()
# Clean out edges for nanopub in edgestore
query = f"""
FOR edge IN {edges_coll_name}
FILTER edge.nanopub_id == "{nanopub_id}"
REMOVE edge IN edges
"""
try:
edgestore_db.aql.execute(query)
except Exception as e:
log.debug(f"Could not remove nanopub-related edges: {query} msg: {e}")
end_time1 = datetime.datetime.now()
delta_ms = f"{(end_time1 - start_time).total_seconds() * 1000:.1f}"
log.info("Timing - Delete edges for nanopub", delta_ms=delta_ms)
# Clean out errors for nanopub in pipeline_errors
query = f"""
FOR e IN pipeline_errors
FILTER e.nanopub_id == "{nanopub_id}"
REMOVE e IN pipeline_errors
"""
try:
edgestore_db.aql.execute(query)
except Exception as e:
log.debug(f"Could not remove nanopub-related errors: {query} msg: {e}")
end_time2 = datetime.datetime.now()
delta_ms = f"{(end_time2 - end_time1).total_seconds() * 1000:.1f}"
log.info("Timing - Delete pipeline errors for nanopub", delta_ms=delta_ms)
# Collect edges and nodes to load into arangodb
node_list, edge_list = [], []
for doc in edge_iterator(edges=edges):
if doc[0] == "nodes":
node_list.append(doc[1])
else:
edge_list.append(doc[1])
end_time3 = datetime.datetime.now()
delta_ms = f"{(end_time3 - end_time2).total_seconds() * 1000:.1f}"
log.info("Timing - Collect edges and nodes", delta_ms=delta_ms)
try:
results = edgestore_db.collection(edges_coll_name).import_bulk(
edge_list, on_duplicate="replace", halt_on_error=False
)
except Exception as e:
log.error(f"Could not load edges msg: {e}")
end_time4 = datetime.datetime.now()
delta_ms = f"{(end_time4 - end_time3).total_seconds() * 1000:.1f}"
log.info("Timing - Load edges into edgestore", delta_ms=delta_ms)
try:
results = edgestore_db.collection(nodes_coll_name).import_bulk(
node_list, on_duplicate="replace", halt_on_error=False
)
except Exception as e:
log.error(f"Could not load nodes msg: {e}")
end_time5 = datetime.datetime.now()
delta_ms = f"{(end_time5 - end_time4).total_seconds() * 1000:.1f}"
log.info("Timing - Load nodes into edgestore", delta_ms=delta_ms) | python | def load_edges_into_db(
nanopub_id: str,
nanopub_url: str,
edges: list = [],
edges_coll_name: str = edges_coll_name,
nodes_coll_name: str = nodes_coll_name,
):
"""Load edges into Edgestore"""
start_time = datetime.datetime.now()
# Clean out edges for nanopub in edgestore
query = f"""
FOR edge IN {edges_coll_name}
FILTER edge.nanopub_id == "{nanopub_id}"
REMOVE edge IN edges
"""
try:
edgestore_db.aql.execute(query)
except Exception as e:
log.debug(f"Could not remove nanopub-related edges: {query} msg: {e}")
end_time1 = datetime.datetime.now()
delta_ms = f"{(end_time1 - start_time).total_seconds() * 1000:.1f}"
log.info("Timing - Delete edges for nanopub", delta_ms=delta_ms)
# Clean out errors for nanopub in pipeline_errors
query = f"""
FOR e IN pipeline_errors
FILTER e.nanopub_id == "{nanopub_id}"
REMOVE e IN pipeline_errors
"""
try:
edgestore_db.aql.execute(query)
except Exception as e:
log.debug(f"Could not remove nanopub-related errors: {query} msg: {e}")
end_time2 = datetime.datetime.now()
delta_ms = f"{(end_time2 - end_time1).total_seconds() * 1000:.1f}"
log.info("Timing - Delete pipeline errors for nanopub", delta_ms=delta_ms)
# Collect edges and nodes to load into arangodb
node_list, edge_list = [], []
for doc in edge_iterator(edges=edges):
if doc[0] == "nodes":
node_list.append(doc[1])
else:
edge_list.append(doc[1])
end_time3 = datetime.datetime.now()
delta_ms = f"{(end_time3 - end_time2).total_seconds() * 1000:.1f}"
log.info("Timing - Collect edges and nodes", delta_ms=delta_ms)
try:
results = edgestore_db.collection(edges_coll_name).import_bulk(
edge_list, on_duplicate="replace", halt_on_error=False
)
except Exception as e:
log.error(f"Could not load edges msg: {e}")
end_time4 = datetime.datetime.now()
delta_ms = f"{(end_time4 - end_time3).total_seconds() * 1000:.1f}"
log.info("Timing - Load edges into edgestore", delta_ms=delta_ms)
try:
results = edgestore_db.collection(nodes_coll_name).import_bulk(
node_list, on_duplicate="replace", halt_on_error=False
)
except Exception as e:
log.error(f"Could not load nodes msg: {e}")
end_time5 = datetime.datetime.now()
delta_ms = f"{(end_time5 - end_time4).total_seconds() * 1000:.1f}"
log.info("Timing - Load nodes into edgestore", delta_ms=delta_ms) | [
"def",
"load_edges_into_db",
"(",
"nanopub_id",
":",
"str",
",",
"nanopub_url",
":",
"str",
",",
"edges",
":",
"list",
"=",
"[",
"]",
",",
"edges_coll_name",
":",
"str",
"=",
"edges_coll_name",
",",
"nodes_coll_name",
":",
"str",
"=",
"nodes_coll_name",
",",... | Load edges into Edgestore | [
"Load",
"edges",
"into",
"Edgestore"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/pipeline.py#L141-L215 | train | 50,967 |
belbio/bel | bel/edge/pipeline.py | edge_iterator | def edge_iterator(edges=[], edges_fn=None):
"""Yield documents from edge for loading into ArangoDB"""
for edge in itertools.chain(edges, files.read_edges(edges_fn)):
subj = copy.deepcopy(edge["edge"]["subject"])
subj_id = str(utils._create_hash_from_doc(subj))
subj["_key"] = subj_id
obj = copy.deepcopy(edge["edge"]["object"])
obj_id = str(utils._create_hash_from_doc(obj))
obj["_key"] = obj_id
relation = copy.deepcopy(edge["edge"]["relation"])
relation["_from"] = f"nodes/{subj_id}"
relation["_to"] = f"nodes/{obj_id}"
# Create edge _key
relation_hash = copy.deepcopy(relation)
relation_hash.pop("edge_dt", None)
relation_hash.pop("edge_hash", None)
relation_hash.pop("nanopub_dt", None)
relation_hash.pop("nanopub_url", None)
relation_hash.pop("subject_canon", None)
relation_hash.pop("object_canon", None)
relation_hash.pop("public_flag", None)
relation_hash.pop("metadata", None)
relation_id = str(utils._create_hash_from_doc(relation_hash))
relation["_key"] = relation_id
if edge.get("nanopub_id", None):
if "metadata" not in relation:
relation["metadata"] = {}
relation["metadata"]["nanopub_id"] = edge["nanopub_id"]
yield ("nodes", subj)
yield ("nodes", obj)
yield ("edges", relation) | python | def edge_iterator(edges=[], edges_fn=None):
"""Yield documents from edge for loading into ArangoDB"""
for edge in itertools.chain(edges, files.read_edges(edges_fn)):
subj = copy.deepcopy(edge["edge"]["subject"])
subj_id = str(utils._create_hash_from_doc(subj))
subj["_key"] = subj_id
obj = copy.deepcopy(edge["edge"]["object"])
obj_id = str(utils._create_hash_from_doc(obj))
obj["_key"] = obj_id
relation = copy.deepcopy(edge["edge"]["relation"])
relation["_from"] = f"nodes/{subj_id}"
relation["_to"] = f"nodes/{obj_id}"
# Create edge _key
relation_hash = copy.deepcopy(relation)
relation_hash.pop("edge_dt", None)
relation_hash.pop("edge_hash", None)
relation_hash.pop("nanopub_dt", None)
relation_hash.pop("nanopub_url", None)
relation_hash.pop("subject_canon", None)
relation_hash.pop("object_canon", None)
relation_hash.pop("public_flag", None)
relation_hash.pop("metadata", None)
relation_id = str(utils._create_hash_from_doc(relation_hash))
relation["_key"] = relation_id
if edge.get("nanopub_id", None):
if "metadata" not in relation:
relation["metadata"] = {}
relation["metadata"]["nanopub_id"] = edge["nanopub_id"]
yield ("nodes", subj)
yield ("nodes", obj)
yield ("edges", relation) | [
"def",
"edge_iterator",
"(",
"edges",
"=",
"[",
"]",
",",
"edges_fn",
"=",
"None",
")",
":",
"for",
"edge",
"in",
"itertools",
".",
"chain",
"(",
"edges",
",",
"files",
".",
"read_edges",
"(",
"edges_fn",
")",
")",
":",
"subj",
"=",
"copy",
".",
"d... | Yield documents from edge for loading into ArangoDB | [
"Yield",
"documents",
"from",
"edge",
"for",
"loading",
"into",
"ArangoDB"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/pipeline.py#L218-L256 | train | 50,968 |
belbio/bel | bel/nanopub/nanopubstore.py | update_nanopubstore_start_dt | def update_nanopubstore_start_dt(url: str, start_dt: str):
"""Add nanopubstore start_dt to belapi.state_mgmt collection
Args:
url: url of nanopubstore
start_dt: datetime of last query against nanopubstore for new ID's
"""
hostname = urllib.parse.urlsplit(url)[1]
start_dates_doc = state_mgmt.get(start_dates_doc_key)
if not start_dates_doc:
start_dates_doc = {
"_key": start_dates_doc_key,
"start_dates": [{"nanopubstore": hostname, "start_dt": start_dt}],
}
state_mgmt.insert(start_dates_doc)
else:
for idx, start_date in enumerate(start_dates_doc["start_dates"]):
if start_date["nanopubstore"] == hostname:
start_dates_doc["start_dates"][idx]["start_dt"] = start_dt
break
else:
start_dates_doc["start_dates"].append(
{"nanopubstore": hostname, "start_dt": start_dt}
)
state_mgmt.replace(start_dates_doc) | python | def update_nanopubstore_start_dt(url: str, start_dt: str):
"""Add nanopubstore start_dt to belapi.state_mgmt collection
Args:
url: url of nanopubstore
start_dt: datetime of last query against nanopubstore for new ID's
"""
hostname = urllib.parse.urlsplit(url)[1]
start_dates_doc = state_mgmt.get(start_dates_doc_key)
if not start_dates_doc:
start_dates_doc = {
"_key": start_dates_doc_key,
"start_dates": [{"nanopubstore": hostname, "start_dt": start_dt}],
}
state_mgmt.insert(start_dates_doc)
else:
for idx, start_date in enumerate(start_dates_doc["start_dates"]):
if start_date["nanopubstore"] == hostname:
start_dates_doc["start_dates"][idx]["start_dt"] = start_dt
break
else:
start_dates_doc["start_dates"].append(
{"nanopubstore": hostname, "start_dt": start_dt}
)
state_mgmt.replace(start_dates_doc) | [
"def",
"update_nanopubstore_start_dt",
"(",
"url",
":",
"str",
",",
"start_dt",
":",
"str",
")",
":",
"hostname",
"=",
"urllib",
".",
"parse",
".",
"urlsplit",
"(",
"url",
")",
"[",
"1",
"]",
"start_dates_doc",
"=",
"state_mgmt",
".",
"get",
"(",
"start_... | Add nanopubstore start_dt to belapi.state_mgmt collection
Args:
url: url of nanopubstore
start_dt: datetime of last query against nanopubstore for new ID's | [
"Add",
"nanopubstore",
"start_dt",
"to",
"belapi",
".",
"state_mgmt",
"collection"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubstore.py#L23-L50 | train | 50,969 |
belbio/bel | bel/nanopub/nanopubstore.py | get_nanopubstore_start_dt | def get_nanopubstore_start_dt(url: str):
"""Get last start_dt recorded for getting new nanopub ID's"""
hostname = urllib.parse.urlsplit(url)[1]
start_dates_doc = state_mgmt.get(start_dates_doc_key)
if start_dates_doc and start_dates_doc.get("start_dates"):
date = [
dt["start_dt"]
for dt in start_dates_doc["start_dates"]
if dt["nanopubstore"] == hostname
]
log.info(f"Selected start_dt: {date} len: {len(date)}")
if len(date) == 1:
return date[0]
return "1900-01-01T00:00:00.000Z" | python | def get_nanopubstore_start_dt(url: str):
"""Get last start_dt recorded for getting new nanopub ID's"""
hostname = urllib.parse.urlsplit(url)[1]
start_dates_doc = state_mgmt.get(start_dates_doc_key)
if start_dates_doc and start_dates_doc.get("start_dates"):
date = [
dt["start_dt"]
for dt in start_dates_doc["start_dates"]
if dt["nanopubstore"] == hostname
]
log.info(f"Selected start_dt: {date} len: {len(date)}")
if len(date) == 1:
return date[0]
return "1900-01-01T00:00:00.000Z" | [
"def",
"get_nanopubstore_start_dt",
"(",
"url",
":",
"str",
")",
":",
"hostname",
"=",
"urllib",
".",
"parse",
".",
"urlsplit",
"(",
"url",
")",
"[",
"1",
"]",
"start_dates_doc",
"=",
"state_mgmt",
".",
"get",
"(",
"start_dates_doc_key",
")",
"if",
"start_... | Get last start_dt recorded for getting new nanopub ID's | [
"Get",
"last",
"start_dt",
"recorded",
"for",
"getting",
"new",
"nanopub",
"ID",
"s"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubstore.py#L53-L69 | train | 50,970 |
belbio/bel | bel/nanopub/nanopubstore.py | get_nanopub_urls | def get_nanopub_urls(ns_root_url: str = None, start_dt: str = None) -> dict:
"""Get modified and deleted nanopub urls
Limited by last datetime retrieved (start_dt). Modified includes new and updated nanopubs
Returns:
dict: {'modified': [], 'deleted': []}
"""
if not ns_root_url:
ns_root_url = config["bel_api"]["servers"]["nanopubstore"]
url = f"{ns_root_url}/nanopubs/timed"
if not start_dt:
start_dt = get_nanopubstore_start_dt(ns_root_url)
params = {"startTime": start_dt, "published": True}
# TODO - this is coming back without a status code in some cases - why?
r = bel.utils.get_url(url, params=params, cache=False)
if r and r.status_code == 200:
data = r.json()
new_start_dt = data["queryTime"]
update_nanopubstore_start_dt(ns_root_url, new_start_dt)
nanopub_urls = {"modified": [], "deleted": []}
# Deleted nanopubs
for nid in data["deleteddata"]:
nanopub_urls["deleted"].append(f"{ns_root_url}/nanopubs/{nid}")
# Modified nanopubs
for nid in data["data"]:
nanopub_urls["modified"].append(f"{ns_root_url}/nanopubs/{nid}")
return nanopub_urls
else:
log.error(
f"Bad request to Nanopubstore",
url=url,
status=r.status_code,
type="api_request",
)
return {} | python | def get_nanopub_urls(ns_root_url: str = None, start_dt: str = None) -> dict:
"""Get modified and deleted nanopub urls
Limited by last datetime retrieved (start_dt). Modified includes new and updated nanopubs
Returns:
dict: {'modified': [], 'deleted': []}
"""
if not ns_root_url:
ns_root_url = config["bel_api"]["servers"]["nanopubstore"]
url = f"{ns_root_url}/nanopubs/timed"
if not start_dt:
start_dt = get_nanopubstore_start_dt(ns_root_url)
params = {"startTime": start_dt, "published": True}
# TODO - this is coming back without a status code in some cases - why?
r = bel.utils.get_url(url, params=params, cache=False)
if r and r.status_code == 200:
data = r.json()
new_start_dt = data["queryTime"]
update_nanopubstore_start_dt(ns_root_url, new_start_dt)
nanopub_urls = {"modified": [], "deleted": []}
# Deleted nanopubs
for nid in data["deleteddata"]:
nanopub_urls["deleted"].append(f"{ns_root_url}/nanopubs/{nid}")
# Modified nanopubs
for nid in data["data"]:
nanopub_urls["modified"].append(f"{ns_root_url}/nanopubs/{nid}")
return nanopub_urls
else:
log.error(
f"Bad request to Nanopubstore",
url=url,
status=r.status_code,
type="api_request",
)
return {} | [
"def",
"get_nanopub_urls",
"(",
"ns_root_url",
":",
"str",
"=",
"None",
",",
"start_dt",
":",
"str",
"=",
"None",
")",
"->",
"dict",
":",
"if",
"not",
"ns_root_url",
":",
"ns_root_url",
"=",
"config",
"[",
"\"bel_api\"",
"]",
"[",
"\"servers\"",
"]",
"["... | Get modified and deleted nanopub urls
Limited by last datetime retrieved (start_dt). Modified includes new and updated nanopubs
Returns:
dict: {'modified': [], 'deleted': []} | [
"Get",
"modified",
"and",
"deleted",
"nanopub",
"urls"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubstore.py#L72-L115 | train | 50,971 |
belbio/bel | bel/nanopub/nanopubstore.py | get_nanopub | def get_nanopub(url):
"""Get Nanopub from nanopubstore given url"""
r = bel.utils.get_url(url, cache=False)
if r and r.json():
return r.json()
else:
return {} | python | def get_nanopub(url):
"""Get Nanopub from nanopubstore given url"""
r = bel.utils.get_url(url, cache=False)
if r and r.json():
return r.json()
else:
return {} | [
"def",
"get_nanopub",
"(",
"url",
")",
":",
"r",
"=",
"bel",
".",
"utils",
".",
"get_url",
"(",
"url",
",",
"cache",
"=",
"False",
")",
"if",
"r",
"and",
"r",
".",
"json",
"(",
")",
":",
"return",
"r",
".",
"json",
"(",
")",
"else",
":",
"ret... | Get Nanopub from nanopubstore given url | [
"Get",
"Nanopub",
"from",
"nanopubstore",
"given",
"url"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubstore.py#L118-L125 | train | 50,972 |
belbio/bel | bel/scripts.py | convert_belscript | def convert_belscript(ctx, input_fn, output_fn):
"""Convert belscript to nanopubs_bel format
This will convert the OpenBEL BELScript file format to
nanopub_bel-1.0.0 format.
\b
input_fn:
If input fn has *.gz, will read as a gzip file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
"""
try:
(
out_fh,
yaml_flag,
jsonl_flag,
json_flag,
) = bel.nanopub.files.create_nanopubs_fh(output_fn)
if yaml_flag or json_flag:
docs = []
# input file
if re.search("gz$", input_fn):
f = gzip.open(input_fn, "rt")
else:
f = open(input_fn, "rt")
# process belscript
for doc in bel.nanopub.belscripts.parse_belscript(f):
if yaml_flag or json_flag:
docs.append(doc)
elif jsonl_flag:
out_fh.write("{}\n".format(json.dumps(doc)))
if yaml_flag:
yaml.dump(docs, out_fh)
elif json_flag:
json.dump(docs, out_fh, indent=4)
finally:
f.close()
out_fh.close() | python | def convert_belscript(ctx, input_fn, output_fn):
"""Convert belscript to nanopubs_bel format
This will convert the OpenBEL BELScript file format to
nanopub_bel-1.0.0 format.
\b
input_fn:
If input fn has *.gz, will read as a gzip file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
"""
try:
(
out_fh,
yaml_flag,
jsonl_flag,
json_flag,
) = bel.nanopub.files.create_nanopubs_fh(output_fn)
if yaml_flag or json_flag:
docs = []
# input file
if re.search("gz$", input_fn):
f = gzip.open(input_fn, "rt")
else:
f = open(input_fn, "rt")
# process belscript
for doc in bel.nanopub.belscripts.parse_belscript(f):
if yaml_flag or json_flag:
docs.append(doc)
elif jsonl_flag:
out_fh.write("{}\n".format(json.dumps(doc)))
if yaml_flag:
yaml.dump(docs, out_fh)
elif json_flag:
json.dump(docs, out_fh, indent=4)
finally:
f.close()
out_fh.close() | [
"def",
"convert_belscript",
"(",
"ctx",
",",
"input_fn",
",",
"output_fn",
")",
":",
"try",
":",
"(",
"out_fh",
",",
"yaml_flag",
",",
"jsonl_flag",
",",
"json_flag",
",",
")",
"=",
"bel",
".",
"nanopub",
".",
"files",
".",
"create_nanopubs_fh",
"(",
"ou... | Convert belscript to nanopubs_bel format
This will convert the OpenBEL BELScript file format to
nanopub_bel-1.0.0 format.
\b
input_fn:
If input fn has *.gz, will read as a gzip file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file | [
"Convert",
"belscript",
"to",
"nanopubs_bel",
"format"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L252-L302 | train | 50,973 |
belbio/bel | bel/scripts.py | reformat | def reformat(ctx, input_fn, output_fn):
"""Reformat between JSON, YAML, JSONLines formats
\b
input_fn:
If input fn has *.gz, will read as a gzip file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
"""
try:
(
out_fh,
yaml_flag,
jsonl_flag,
json_flag,
) = bel.nanopub.files.create_nanopubs_fh(output_fn)
if yaml_flag or json_flag:
docs = []
# input file
if re.search("gz$", input_fn):
f = gzip.open(input_fn, "rt")
else:
f = open(input_fn, "rt")
for np in bnf.read_nanopubs(input_fn):
if yaml_flag or json_flag:
docs.append(np)
elif jsonl_flag:
out_fh.write("{}\n".format(json.dumps(np)))
if yaml_flag:
yaml.dump(docs, out_fh)
elif json_flag:
json.dump(docs, out_fh, indent=4)
finally:
f.close()
out_fh.close() | python | def reformat(ctx, input_fn, output_fn):
"""Reformat between JSON, YAML, JSONLines formats
\b
input_fn:
If input fn has *.gz, will read as a gzip file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
"""
try:
(
out_fh,
yaml_flag,
jsonl_flag,
json_flag,
) = bel.nanopub.files.create_nanopubs_fh(output_fn)
if yaml_flag or json_flag:
docs = []
# input file
if re.search("gz$", input_fn):
f = gzip.open(input_fn, "rt")
else:
f = open(input_fn, "rt")
for np in bnf.read_nanopubs(input_fn):
if yaml_flag or json_flag:
docs.append(np)
elif jsonl_flag:
out_fh.write("{}\n".format(json.dumps(np)))
if yaml_flag:
yaml.dump(docs, out_fh)
elif json_flag:
json.dump(docs, out_fh, indent=4)
finally:
f.close()
out_fh.close() | [
"def",
"reformat",
"(",
"ctx",
",",
"input_fn",
",",
"output_fn",
")",
":",
"try",
":",
"(",
"out_fh",
",",
"yaml_flag",
",",
"jsonl_flag",
",",
"json_flag",
",",
")",
"=",
"bel",
".",
"nanopub",
".",
"files",
".",
"create_nanopubs_fh",
"(",
"output_fn",... | Reformat between JSON, YAML, JSONLines formats
\b
input_fn:
If input fn has *.gz, will read as a gzip file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file | [
"Reformat",
"between",
"JSON",
"YAML",
"JSONLines",
"formats"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L309-L355 | train | 50,974 |
belbio/bel | bel/scripts.py | nanopub_stats | def nanopub_stats(ctx, input_fn):
"""Collect statistics on nanopub file
input_fn can be json, jsonl or yaml and additionally gzipped
"""
counts = {
"nanopubs": 0,
"assertions": {"total": 0, "subject_only": 0, "nested": 0, "relations": {}},
}
for np in bnf.read_nanopubs(input_fn):
if "nanopub" in np:
counts["nanopubs"] += 1
counts["assertions"]["total"] += len(np["nanopub"]["assertions"])
for assertion in np["nanopub"]["assertions"]:
if assertion["relation"] is None:
counts["assertions"]["subject_only"] += 1
else:
if re.match("\s*\(", assertion["object"]):
counts["assertions"]["nested"] += 1
if (
not assertion.get("relation")
in counts["assertions"]["relations"]
):
counts["assertions"]["relations"][assertion.get("relation")] = 1
else:
counts["assertions"]["relations"][
assertion.get("relation")
] += 1
counts["assertions"]["relations"] = sorted(counts["assertions"]["relations"])
print("DumpVar:\n", json.dumps(counts, indent=4)) | python | def nanopub_stats(ctx, input_fn):
"""Collect statistics on nanopub file
input_fn can be json, jsonl or yaml and additionally gzipped
"""
counts = {
"nanopubs": 0,
"assertions": {"total": 0, "subject_only": 0, "nested": 0, "relations": {}},
}
for np in bnf.read_nanopubs(input_fn):
if "nanopub" in np:
counts["nanopubs"] += 1
counts["assertions"]["total"] += len(np["nanopub"]["assertions"])
for assertion in np["nanopub"]["assertions"]:
if assertion["relation"] is None:
counts["assertions"]["subject_only"] += 1
else:
if re.match("\s*\(", assertion["object"]):
counts["assertions"]["nested"] += 1
if (
not assertion.get("relation")
in counts["assertions"]["relations"]
):
counts["assertions"]["relations"][assertion.get("relation")] = 1
else:
counts["assertions"]["relations"][
assertion.get("relation")
] += 1
counts["assertions"]["relations"] = sorted(counts["assertions"]["relations"])
print("DumpVar:\n", json.dumps(counts, indent=4)) | [
"def",
"nanopub_stats",
"(",
"ctx",
",",
"input_fn",
")",
":",
"counts",
"=",
"{",
"\"nanopubs\"",
":",
"0",
",",
"\"assertions\"",
":",
"{",
"\"total\"",
":",
"0",
",",
"\"subject_only\"",
":",
"0",
",",
"\"nested\"",
":",
"0",
",",
"\"relations\"",
":"... | Collect statistics on nanopub file
input_fn can be json, jsonl or yaml and additionally gzipped | [
"Collect",
"statistics",
"on",
"nanopub",
"file"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L361-L395 | train | 50,975 |
belbio/bel | bel/scripts.py | edges | def edges(ctx, statement, rules, species, namespace_targets, version, api, config_fn):
"""Create BEL Edges from BEL Statement"""
if config_fn:
config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn)
else:
config = ctx.config
# Configuration - will return the first truthy result in list else the default option
if namespace_targets:
namespace_targets = json.loads(namespace_targets)
if rules:
rules = rules.replace(" ", "").split(",")
namespace_targets = utils.first_true(
[namespace_targets, config["bel"]["lang"].get("canonical")], None
)
api_url = utils.first_true(
[api, config["bel_api"]["servers"].get("api_url", None)], None
)
version = utils.first_true(
[version, config["bel"]["lang"].get("default_bel_version", None)], None
)
print("------------------------------")
print("BEL version: {}".format(version))
print("API Endpoint: {}".format(api))
print("------------------------------")
bo = BEL(version=version, endpoint=api_url)
if species:
edges = (
bo.parse(statement)
.orthologize(species)
.canonicalize(namespace_targets=namespace_targets)
.compute_edges(rules=rules)
)
else:
edges = (
bo.parse(statement)
.canonicalize(namespace_targets=namespace_targets)
.compute_edges(rules=rules)
)
if edges is None:
print(bo.original_bel_stmt)
print(bo.parse_visualize_error)
print(bo.validation_messages)
else:
print(json.dumps(edges, indent=4))
if bo.validation_messages:
print(bo.validation_messages)
else:
print("No problems found")
return | python | def edges(ctx, statement, rules, species, namespace_targets, version, api, config_fn):
"""Create BEL Edges from BEL Statement"""
if config_fn:
config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn)
else:
config = ctx.config
# Configuration - will return the first truthy result in list else the default option
if namespace_targets:
namespace_targets = json.loads(namespace_targets)
if rules:
rules = rules.replace(" ", "").split(",")
namespace_targets = utils.first_true(
[namespace_targets, config["bel"]["lang"].get("canonical")], None
)
api_url = utils.first_true(
[api, config["bel_api"]["servers"].get("api_url", None)], None
)
version = utils.first_true(
[version, config["bel"]["lang"].get("default_bel_version", None)], None
)
print("------------------------------")
print("BEL version: {}".format(version))
print("API Endpoint: {}".format(api))
print("------------------------------")
bo = BEL(version=version, endpoint=api_url)
if species:
edges = (
bo.parse(statement)
.orthologize(species)
.canonicalize(namespace_targets=namespace_targets)
.compute_edges(rules=rules)
)
else:
edges = (
bo.parse(statement)
.canonicalize(namespace_targets=namespace_targets)
.compute_edges(rules=rules)
)
if edges is None:
print(bo.original_bel_stmt)
print(bo.parse_visualize_error)
print(bo.validation_messages)
else:
print(json.dumps(edges, indent=4))
if bo.validation_messages:
print(bo.validation_messages)
else:
print("No problems found")
return | [
"def",
"edges",
"(",
"ctx",
",",
"statement",
",",
"rules",
",",
"species",
",",
"namespace_targets",
",",
"version",
",",
"api",
",",
"config_fn",
")",
":",
"if",
"config_fn",
":",
"config",
"=",
"bel",
".",
"db",
".",
"Config",
".",
"merge_config",
"... | Create BEL Edges from BEL Statement | [
"Create",
"BEL",
"Edges",
"from",
"BEL",
"Statement"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L582-L637 | train | 50,976 |
belbio/bel | bel/scripts.py | elasticsearch | def elasticsearch(delete, index_name):
"""Setup Elasticsearch namespace indexes
This will by default only create the indexes and run the namespace index mapping
if the indexes don't exist. The --delete option will force removal of the
index if it exists.
The index_name should be aliased to the index 'terms' when it's ready"""
if delete:
bel.db.elasticsearch.get_client(delete=True)
else:
bel.db.elasticsearch.get_client() | python | def elasticsearch(delete, index_name):
"""Setup Elasticsearch namespace indexes
This will by default only create the indexes and run the namespace index mapping
if the indexes don't exist. The --delete option will force removal of the
index if it exists.
The index_name should be aliased to the index 'terms' when it's ready"""
if delete:
bel.db.elasticsearch.get_client(delete=True)
else:
bel.db.elasticsearch.get_client() | [
"def",
"elasticsearch",
"(",
"delete",
",",
"index_name",
")",
":",
"if",
"delete",
":",
"bel",
".",
"db",
".",
"elasticsearch",
".",
"get_client",
"(",
"delete",
"=",
"True",
")",
"else",
":",
"bel",
".",
"db",
".",
"elasticsearch",
".",
"get_client",
... | Setup Elasticsearch namespace indexes
This will by default only create the indexes and run the namespace index mapping
if the indexes don't exist. The --delete option will force removal of the
index if it exists.
The index_name should be aliased to the index 'terms' when it's ready | [
"Setup",
"Elasticsearch",
"namespace",
"indexes"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L655-L667 | train | 50,977 |
belbio/bel | bel/scripts.py | arangodb | def arangodb(delete, db_name):
"""Setup ArangoDB database
db_name: Either 'belns' or 'edgestore' - must be one or the other
This will create the database, collections and indexes on the collection if it doesn't exist.
The --delete option will force removal of the database if it exists."""
if delete:
client = bel.db.arangodb.get_client()
bel.db.arangodb.delete_database(client, db_name)
if db_name == "belns":
bel.db.arangodb.get_belns_handle(client)
elif db_name == "edgestore":
bel.db.arangodb.get_edgestore_handle(client) | python | def arangodb(delete, db_name):
"""Setup ArangoDB database
db_name: Either 'belns' or 'edgestore' - must be one or the other
This will create the database, collections and indexes on the collection if it doesn't exist.
The --delete option will force removal of the database if it exists."""
if delete:
client = bel.db.arangodb.get_client()
bel.db.arangodb.delete_database(client, db_name)
if db_name == "belns":
bel.db.arangodb.get_belns_handle(client)
elif db_name == "edgestore":
bel.db.arangodb.get_edgestore_handle(client) | [
"def",
"arangodb",
"(",
"delete",
",",
"db_name",
")",
":",
"if",
"delete",
":",
"client",
"=",
"bel",
".",
"db",
".",
"arangodb",
".",
"get_client",
"(",
")",
"bel",
".",
"db",
".",
"arangodb",
".",
"delete_database",
"(",
"client",
",",
"db_name",
... | Setup ArangoDB database
db_name: Either 'belns' or 'edgestore' - must be one or the other
This will create the database, collections and indexes on the collection if it doesn't exist.
The --delete option will force removal of the database if it exists. | [
"Setup",
"ArangoDB",
"database"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L675-L691 | train | 50,978 |
belbio/bel | bel/nanopub/nanopubs.py | validate_to_schema | def validate_to_schema(nanopub, schema) -> Tuple[bool, List[Tuple[str, str]]]:
"""Validate nanopub against jsonschema for nanopub
Args:
nanopub (Mapping[str, Any]): nanopub dict
schema (Mapping[str, Any]): nanopub schema
Returns:
Tuple[bool, List[str]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg)
e.g. [('ERROR', "'subject' is a required property")]
"""
v = jsonschema.Draft4Validator(schema)
messages = []
errors = sorted(v.iter_errors(nanopub), key=lambda e: e.path)
for error in errors:
for suberror in sorted(error.context, key=lambda e: e.schema_path):
print(list(suberror.schema_path), suberror.message, sep=", ")
messages.append(("ERROR", suberror.message))
is_valid = True
if errors:
is_valid = False
return (is_valid, messages) | python | def validate_to_schema(nanopub, schema) -> Tuple[bool, List[Tuple[str, str]]]:
"""Validate nanopub against jsonschema for nanopub
Args:
nanopub (Mapping[str, Any]): nanopub dict
schema (Mapping[str, Any]): nanopub schema
Returns:
Tuple[bool, List[str]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg)
e.g. [('ERROR', "'subject' is a required property")]
"""
v = jsonschema.Draft4Validator(schema)
messages = []
errors = sorted(v.iter_errors(nanopub), key=lambda e: e.path)
for error in errors:
for suberror in sorted(error.context, key=lambda e: e.schema_path):
print(list(suberror.schema_path), suberror.message, sep=", ")
messages.append(("ERROR", suberror.message))
is_valid = True
if errors:
is_valid = False
return (is_valid, messages) | [
"def",
"validate_to_schema",
"(",
"nanopub",
",",
"schema",
")",
"->",
"Tuple",
"[",
"bool",
",",
"List",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"]",
":",
"v",
"=",
"jsonschema",
".",
"Draft4Validator",
"(",
"schema",
")",
"messages",
"=",
"... | Validate nanopub against jsonschema for nanopub
Args:
nanopub (Mapping[str, Any]): nanopub dict
schema (Mapping[str, Any]): nanopub schema
Returns:
Tuple[bool, List[str]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg)
e.g. [('ERROR', "'subject' is a required property")] | [
"Validate",
"nanopub",
"against",
"jsonschema",
"for",
"nanopub"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubs.py#L141-L167 | train | 50,979 |
belbio/bel | bel/nanopub/nanopubs.py | hash_nanopub | def hash_nanopub(nanopub: Mapping[str, Any]) -> str:
"""Create CityHash64 from nanopub for duplicate check
TODO - check that this hash value is consistent between C# and Python running on
laptop and server
Build string to hash
Collect flat array of (all values.strip()):
nanopub.type.name
nanopub.type.version
One of:
nanopub.citation.database.name
nanopub.citation.database.id
OR
nanopub.citation.database.uri
OR
nanopub.citation.database.reference
Extend with sorted list of assertions (SRO as single string with space between S, R and O)
Extend with sorted list of annotations (nanopub.annotations.type + ' ' + nanopub.annotations.id)
Convert array to string by joining array elements separated by a space
Create CityHash64(str) and return
"""
hash_list = []
# Type
hash_list.append(nanopub["nanopub"]["type"].get("name", "").strip())
hash_list.append(nanopub["nanopub"]["type"].get("version", "").strip())
# Citation
if nanopub["nanopub"]["citation"].get("database", False):
hash_list.append(
nanopub["nanopub"]["citation"]["database"].get("name", "").strip()
)
hash_list.append(
nanopub["nanopub"]["citation"]["database"].get("id", "").strip()
)
elif nanopub["nanopub"]["citation"].get("uri", False):
hash_list.append(nanopub["nanopub"]["citation"].get("uri", "").strip())
elif nanopub["nanopub"]["citation"].get("reference", False):
hash_list.append(nanopub["nanopub"]["citation"].get("reference", "").strip())
# Assertions
assertions = []
for assertion in nanopub["nanopub"]["assertions"]:
if assertion.get("relation") is None:
assertion["relation"] = ""
if assertion.get("object") is None:
assertion["object"] = ""
assertions.append(
" ".join(
(
assertion["subject"].strip(),
assertion.get("relation", "").strip(),
assertion.get("object", "").strip(),
)
).strip()
)
assertions = sorted(assertions)
hash_list.extend(assertions)
# Annotations
annotations = []
for anno in nanopub["nanopub"]["annotations"]:
annotations.append(
" ".join((anno.get("type", "").strip(), anno.get("id", "").strip())).strip()
)
annotations = sorted(annotations)
hash_list.extend(annotations)
np_string = " ".join([l.lower() for l in hash_list])
return "{:x}".format(CityHash64(np_string)) | python | def hash_nanopub(nanopub: Mapping[str, Any]) -> str:
"""Create CityHash64 from nanopub for duplicate check
TODO - check that this hash value is consistent between C# and Python running on
laptop and server
Build string to hash
Collect flat array of (all values.strip()):
nanopub.type.name
nanopub.type.version
One of:
nanopub.citation.database.name
nanopub.citation.database.id
OR
nanopub.citation.database.uri
OR
nanopub.citation.database.reference
Extend with sorted list of assertions (SRO as single string with space between S, R and O)
Extend with sorted list of annotations (nanopub.annotations.type + ' ' + nanopub.annotations.id)
Convert array to string by joining array elements separated by a space
Create CityHash64(str) and return
"""
hash_list = []
# Type
hash_list.append(nanopub["nanopub"]["type"].get("name", "").strip())
hash_list.append(nanopub["nanopub"]["type"].get("version", "").strip())
# Citation
if nanopub["nanopub"]["citation"].get("database", False):
hash_list.append(
nanopub["nanopub"]["citation"]["database"].get("name", "").strip()
)
hash_list.append(
nanopub["nanopub"]["citation"]["database"].get("id", "").strip()
)
elif nanopub["nanopub"]["citation"].get("uri", False):
hash_list.append(nanopub["nanopub"]["citation"].get("uri", "").strip())
elif nanopub["nanopub"]["citation"].get("reference", False):
hash_list.append(nanopub["nanopub"]["citation"].get("reference", "").strip())
# Assertions
assertions = []
for assertion in nanopub["nanopub"]["assertions"]:
if assertion.get("relation") is None:
assertion["relation"] = ""
if assertion.get("object") is None:
assertion["object"] = ""
assertions.append(
" ".join(
(
assertion["subject"].strip(),
assertion.get("relation", "").strip(),
assertion.get("object", "").strip(),
)
).strip()
)
assertions = sorted(assertions)
hash_list.extend(assertions)
# Annotations
annotations = []
for anno in nanopub["nanopub"]["annotations"]:
annotations.append(
" ".join((anno.get("type", "").strip(), anno.get("id", "").strip())).strip()
)
annotations = sorted(annotations)
hash_list.extend(annotations)
np_string = " ".join([l.lower() for l in hash_list])
return "{:x}".format(CityHash64(np_string)) | [
"def",
"hash_nanopub",
"(",
"nanopub",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"str",
":",
"hash_list",
"=",
"[",
"]",
"# Type",
"hash_list",
".",
"append",
"(",
"nanopub",
"[",
"\"nanopub\"",
"]",
"[",
"\"type\"",
"]",
".",
"get",
"(... | Create CityHash64 from nanopub for duplicate check
TODO - check that this hash value is consistent between C# and Python running on
laptop and server
Build string to hash
Collect flat array of (all values.strip()):
nanopub.type.name
nanopub.type.version
One of:
nanopub.citation.database.name
nanopub.citation.database.id
OR
nanopub.citation.database.uri
OR
nanopub.citation.database.reference
Extend with sorted list of assertions (SRO as single string with space between S, R and O)
Extend with sorted list of annotations (nanopub.annotations.type + ' ' + nanopub.annotations.id)
Convert array to string by joining array elements separated by a space
Create CityHash64(str) and return | [
"Create",
"CityHash64",
"from",
"nanopub",
"for",
"duplicate",
"check"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubs.py#L171-L256 | train | 50,980 |
belbio/bel | bel/nanopub/nanopubs.py | Nanopub.validate | def validate(
self, nanopub: Mapping[str, Any]
) -> Tuple[bool, List[Tuple[str, str]]]:
"""Validates using the nanopub schema
Args:
nanopub (Mapping[str, Any]): nanopub dict
Returns:
Tuple[bool, List[Tuple[str, str]]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)
e.g. [('WARNING', "Context ID not found")] """
# Validate nanopub
(is_valid, messages) = validate_to_schema(nanopub, self.nanopub_schema)
if not is_valid:
return messages
# Extract BEL Version
if nanopub["nanopub"]["type"]["name"].upper() == "BEL":
bel_version = nanopub["nanopub"]["type"]["version"]
else:
is_valid = False
return (
is_valid,
f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}",
)
all_messages = []
# Validate BEL Statements
bel_obj = bel.lang.belobj.BEL(bel_version, self.endpoint)
for edge in nanopub["nanopub"]["edges"]:
bel_statement = f"{edge['subject']} {edge['relation']} {edge['object']}"
parse_obj = bel_obj.parse(bel_statement)
if not parse_obj.valid:
all_messages.extend(
(
"ERROR",
f"BEL statement parse error {parse_obj.error}, {parse_obj.err_visual}",
)
)
# Validate nanopub.context
for context in nanopub["nanopub"]["context"]:
(is_valid, messages) = self.validate_context(context)
all_messages.extend(messages)
is_valid = True
for _type, msg in all_messages:
if _type == "ERROR":
is_valid = False
return (is_valid, all_messages) | python | def validate(
self, nanopub: Mapping[str, Any]
) -> Tuple[bool, List[Tuple[str, str]]]:
"""Validates using the nanopub schema
Args:
nanopub (Mapping[str, Any]): nanopub dict
Returns:
Tuple[bool, List[Tuple[str, str]]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)
e.g. [('WARNING', "Context ID not found")] """
# Validate nanopub
(is_valid, messages) = validate_to_schema(nanopub, self.nanopub_schema)
if not is_valid:
return messages
# Extract BEL Version
if nanopub["nanopub"]["type"]["name"].upper() == "BEL":
bel_version = nanopub["nanopub"]["type"]["version"]
else:
is_valid = False
return (
is_valid,
f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}",
)
all_messages = []
# Validate BEL Statements
bel_obj = bel.lang.belobj.BEL(bel_version, self.endpoint)
for edge in nanopub["nanopub"]["edges"]:
bel_statement = f"{edge['subject']} {edge['relation']} {edge['object']}"
parse_obj = bel_obj.parse(bel_statement)
if not parse_obj.valid:
all_messages.extend(
(
"ERROR",
f"BEL statement parse error {parse_obj.error}, {parse_obj.err_visual}",
)
)
# Validate nanopub.context
for context in nanopub["nanopub"]["context"]:
(is_valid, messages) = self.validate_context(context)
all_messages.extend(messages)
is_valid = True
for _type, msg in all_messages:
if _type == "ERROR":
is_valid = False
return (is_valid, all_messages) | [
"def",
"validate",
"(",
"self",
",",
"nanopub",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Tuple",
"[",
"bool",
",",
"List",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"]",
":",
"# Validate nanopub",
"(",
"is_valid",
",",
"messag... | Validates using the nanopub schema
Args:
nanopub (Mapping[str, Any]): nanopub dict
Returns:
Tuple[bool, List[Tuple[str, str]]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)
e.g. [('WARNING', "Context ID not found")] | [
"Validates",
"using",
"the",
"nanopub",
"schema"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubs.py#L30-L83 | train | 50,981 |
belbio/bel | bel/nanopub/nanopubs.py | Nanopub.bel_edges | def bel_edges(
self,
nanopub: Mapping[str, Any],
namespace_targets: Mapping[str, List[str]] = {},
rules: List[str] = [],
orthologize_target: str = None,
) -> List[Mapping[str, Any]]:
"""Create BEL Edges from BEL nanopub
Args:
nanopub (Mapping[str, Any]): bel nanopub
namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize
rules (List[str]): which computed edge rules to process, default is all,
look at BEL Specification yaml file for computed edge signature keys,
e.g. degradation, if any rule in list is 'skip', then skip computing edges
just return primary_edge
orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize
Returns:
List[Mapping[str, Any]]: edge list with edge attributes (e.g. context)
"""
edges = bel.edge.edges.create_edges(
nanopub,
self.endpoint,
namespace_targets=namespace_targets,
rules=rules,
orthologize_target=orthologize_target,
)
return edges | python | def bel_edges(
self,
nanopub: Mapping[str, Any],
namespace_targets: Mapping[str, List[str]] = {},
rules: List[str] = [],
orthologize_target: str = None,
) -> List[Mapping[str, Any]]:
"""Create BEL Edges from BEL nanopub
Args:
nanopub (Mapping[str, Any]): bel nanopub
namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize
rules (List[str]): which computed edge rules to process, default is all,
look at BEL Specification yaml file for computed edge signature keys,
e.g. degradation, if any rule in list is 'skip', then skip computing edges
just return primary_edge
orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize
Returns:
List[Mapping[str, Any]]: edge list with edge attributes (e.g. context)
"""
edges = bel.edge.edges.create_edges(
nanopub,
self.endpoint,
namespace_targets=namespace_targets,
rules=rules,
orthologize_target=orthologize_target,
)
return edges | [
"def",
"bel_edges",
"(",
"self",
",",
"nanopub",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
",",
"namespace_targets",
":",
"Mapping",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"{",
"}",
",",
"rules",
":",
"List",
"[",
"str",
"]",
"=",... | Create BEL Edges from BEL nanopub
Args:
nanopub (Mapping[str, Any]): bel nanopub
namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize
rules (List[str]): which computed edge rules to process, default is all,
look at BEL Specification yaml file for computed edge signature keys,
e.g. degradation, if any rule in list is 'skip', then skip computing edges
just return primary_edge
orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize
Returns:
List[Mapping[str, Any]]: edge list with edge attributes (e.g. context) | [
"Create",
"BEL",
"Edges",
"from",
"BEL",
"nanopub"
] | 60333e8815625b942b4836903f3b618cf44b3771 | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubs.py#L108-L138 | train | 50,982 |
RockFeng0/rtsf-http | httpdriver/cli.py | main_hrun | def main_hrun():
""" parse command line options and run commands."""
parser = argparse.ArgumentParser(description="Tools for http(s) test. Base on rtsf.")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--log-file',
help="Write logs to specified file path.")
parser.add_argument(
'case_file',
help="yaml testcase file")
color_print("httpdriver {}".format(__version__), "GREEN")
args = parser.parse_args()
logger.setup_logger(args.log_level, args.log_file)
runner = TestRunner(runner = HttpDriver).run(args.case_file)
html_report = runner.gen_html_report()
color_print("report: {}".format(html_report)) | python | def main_hrun():
""" parse command line options and run commands."""
parser = argparse.ArgumentParser(description="Tools for http(s) test. Base on rtsf.")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--log-file',
help="Write logs to specified file path.")
parser.add_argument(
'case_file',
help="yaml testcase file")
color_print("httpdriver {}".format(__version__), "GREEN")
args = parser.parse_args()
logger.setup_logger(args.log_level, args.log_file)
runner = TestRunner(runner = HttpDriver).run(args.case_file)
html_report = runner.gen_html_report()
color_print("report: {}".format(html_report)) | [
"def",
"main_hrun",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Tools for http(s) test. Base on rtsf.\"",
")",
"parser",
".",
"add_argument",
"(",
"'--log-level'",
",",
"default",
"=",
"'INFO'",
",",
"help",
"=",
... | parse command line options and run commands. | [
"parse",
"command",
"line",
"options",
"and",
"run",
"commands",
"."
] | 3280cc9a01b0c92c52d699b0ebc29e55e62611a0 | https://github.com/RockFeng0/rtsf-http/blob/3280cc9a01b0c92c52d699b0ebc29e55e62611a0/httpdriver/cli.py#L28-L51 | train | 50,983 |
urschrei/simplification | simplification/util.py | _void_array_to_nested_list | def _void_array_to_nested_list(res, _func, _args):
""" Dereference the FFI result to a list of coordinates """
try:
shape = res.coords.len, 2
ptr = cast(res.coords.data, POINTER(c_double))
array = np.ctypeslib.as_array(ptr, shape)
return array.tolist()
finally:
drop_array(res.coords) | python | def _void_array_to_nested_list(res, _func, _args):
""" Dereference the FFI result to a list of coordinates """
try:
shape = res.coords.len, 2
ptr = cast(res.coords.data, POINTER(c_double))
array = np.ctypeslib.as_array(ptr, shape)
return array.tolist()
finally:
drop_array(res.coords) | [
"def",
"_void_array_to_nested_list",
"(",
"res",
",",
"_func",
",",
"_args",
")",
":",
"try",
":",
"shape",
"=",
"res",
".",
"coords",
".",
"len",
",",
"2",
"ptr",
"=",
"cast",
"(",
"res",
".",
"coords",
".",
"data",
",",
"POINTER",
"(",
"c_double",
... | Dereference the FFI result to a list of coordinates | [
"Dereference",
"the",
"FFI",
"result",
"to",
"a",
"list",
"of",
"coordinates"
] | 58491fc08cffa2fab5fe19d17c2ceb9d442530c3 | https://github.com/urschrei/simplification/blob/58491fc08cffa2fab5fe19d17c2ceb9d442530c3/simplification/util.py#L92-L100 | train | 50,984 |
MacHu-GWU/dataIO-project | dataIO/js.py | lower_ext | def lower_ext(abspath):
"""Convert file extension to lowercase.
"""
fname, ext = os.path.splitext(abspath)
return fname + ext.lower() | python | def lower_ext(abspath):
"""Convert file extension to lowercase.
"""
fname, ext = os.path.splitext(abspath)
return fname + ext.lower() | [
"def",
"lower_ext",
"(",
"abspath",
")",
":",
"fname",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"abspath",
")",
"return",
"fname",
"+",
"ext",
".",
"lower",
"(",
")"
] | Convert file extension to lowercase. | [
"Convert",
"file",
"extension",
"to",
"lowercase",
"."
] | 7e1cc192b5e53426eed6dbd742918619b8fd60ab | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/js.py#L71-L75 | train | 50,985 |
MacHu-GWU/dataIO-project | dataIO/js.py | pretty_dumps | def pretty_dumps(data):
"""Return json string in pretty format.
**中文文档**
将字典转化成格式化后的字符串。
"""
try:
return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)
except:
return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=True) | python | def pretty_dumps(data):
"""Return json string in pretty format.
**中文文档**
将字典转化成格式化后的字符串。
"""
try:
return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)
except:
return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=True) | [
"def",
"pretty_dumps",
"(",
"data",
")",
":",
"try",
":",
"return",
"json",
".",
"dumps",
"(",
"data",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
",",
"ensure_ascii",
"=",
"False",
")",
"except",
":",
"return",
"json",
".",
"dumps",
"(",... | Return json string in pretty format.
**中文文档**
将字典转化成格式化后的字符串。 | [
"Return",
"json",
"string",
"in",
"pretty",
"format",
"."
] | 7e1cc192b5e53426eed6dbd742918619b8fd60ab | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/js.py#L269-L279 | train | 50,986 |
RI-imaging/nrefocus | nrefocus/pad.py | _get_pad_left_right | def _get_pad_left_right(small, large):
""" Compute left and right padding values.
Here we use the convention that if the padding
size is odd, we pad the odd part to the right
and the even part to the left.
Parameters
----------
small : int
Old size of original 1D array
large : int
New size off padded 1D array
Returns
-------
(padleft, padright) : tuple
The proposed padding sizes.
"""
assert small < large, "Can only pad when new size larger than old size"
padsize = large - small
if padsize % 2 != 0:
leftpad = (padsize - 1)/2
else:
leftpad = padsize/2
rightpad = padsize-leftpad
return int(leftpad), int(rightpad) | python | def _get_pad_left_right(small, large):
""" Compute left and right padding values.
Here we use the convention that if the padding
size is odd, we pad the odd part to the right
and the even part to the left.
Parameters
----------
small : int
Old size of original 1D array
large : int
New size off padded 1D array
Returns
-------
(padleft, padright) : tuple
The proposed padding sizes.
"""
assert small < large, "Can only pad when new size larger than old size"
padsize = large - small
if padsize % 2 != 0:
leftpad = (padsize - 1)/2
else:
leftpad = padsize/2
rightpad = padsize-leftpad
return int(leftpad), int(rightpad) | [
"def",
"_get_pad_left_right",
"(",
"small",
",",
"large",
")",
":",
"assert",
"small",
"<",
"large",
",",
"\"Can only pad when new size larger than old size\"",
"padsize",
"=",
"large",
"-",
"small",
"if",
"padsize",
"%",
"2",
"!=",
"0",
":",
"leftpad",
"=",
"... | Compute left and right padding values.
Here we use the convention that if the padding
size is odd, we pad the odd part to the right
and the even part to the left.
Parameters
----------
small : int
Old size of original 1D array
large : int
New size off padded 1D array
Returns
-------
(padleft, padright) : tuple
The proposed padding sizes. | [
"Compute",
"left",
"and",
"right",
"padding",
"values",
"."
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/pad.py#L12-L40 | train | 50,987 |
RI-imaging/nrefocus | nrefocus/pad.py | pad_add | def pad_add(av, size=None, stlen=10):
""" Perform linear padding for complex array
The input array `av` is padded with a linear ramp starting at the
edges and going outwards to an average value computed from a band
of thickness `stlen` at the outer boundary of the array.
Pads will only be appended, not prepended to the array.
If the input array is complex, pads will be complex numbers
The average is computed for phase and amplitude separately.
Parameters
----------
av : complex 1D or 2D ndarray
The array that will be padded.
size : int or tuple of length 1 (1D) or tuple of length 2 (2D), optional
The final size of the padded array. Defaults to double the size
of the input array.
stlen : int, optional
The thickness of the frame within `av` that will be used to
compute an average value for padding.
Returns
-------
pv : complex 1D or 2D ndarray
Padded array `av` with pads appended to right and bottom.
"""
if size is None:
size = list()
for s in av.shape:
size.append(int(2*s))
elif not hasattr(size, "__len__"):
size = [size]
assert len(av.shape) in [1, 2], "Only 1D and 2D arrays!"
assert len(av.shape) == len(
size), "`size` must have same length as `av.shape`!"
if len(av.shape) == 2:
return _pad_add_2d(av, size, stlen)
else:
return _pad_add_1d(av, size, stlen) | python | def pad_add(av, size=None, stlen=10):
""" Perform linear padding for complex array
The input array `av` is padded with a linear ramp starting at the
edges and going outwards to an average value computed from a band
of thickness `stlen` at the outer boundary of the array.
Pads will only be appended, not prepended to the array.
If the input array is complex, pads will be complex numbers
The average is computed for phase and amplitude separately.
Parameters
----------
av : complex 1D or 2D ndarray
The array that will be padded.
size : int or tuple of length 1 (1D) or tuple of length 2 (2D), optional
The final size of the padded array. Defaults to double the size
of the input array.
stlen : int, optional
The thickness of the frame within `av` that will be used to
compute an average value for padding.
Returns
-------
pv : complex 1D or 2D ndarray
Padded array `av` with pads appended to right and bottom.
"""
if size is None:
size = list()
for s in av.shape:
size.append(int(2*s))
elif not hasattr(size, "__len__"):
size = [size]
assert len(av.shape) in [1, 2], "Only 1D and 2D arrays!"
assert len(av.shape) == len(
size), "`size` must have same length as `av.shape`!"
if len(av.shape) == 2:
return _pad_add_2d(av, size, stlen)
else:
return _pad_add_1d(av, size, stlen) | [
"def",
"pad_add",
"(",
"av",
",",
"size",
"=",
"None",
",",
"stlen",
"=",
"10",
")",
":",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"list",
"(",
")",
"for",
"s",
"in",
"av",
".",
"shape",
":",
"size",
".",
"append",
"(",
"int",
"(",
"2",
... | Perform linear padding for complex array
The input array `av` is padded with a linear ramp starting at the
edges and going outwards to an average value computed from a band
of thickness `stlen` at the outer boundary of the array.
Pads will only be appended, not prepended to the array.
If the input array is complex, pads will be complex numbers
The average is computed for phase and amplitude separately.
Parameters
----------
av : complex 1D or 2D ndarray
The array that will be padded.
size : int or tuple of length 1 (1D) or tuple of length 2 (2D), optional
The final size of the padded array. Defaults to double the size
of the input array.
stlen : int, optional
The thickness of the frame within `av` that will be used to
compute an average value for padding.
Returns
-------
pv : complex 1D or 2D ndarray
Padded array `av` with pads appended to right and bottom. | [
"Perform",
"linear",
"padding",
"for",
"complex",
"array"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/pad.py#L43-L86 | train | 50,988 |
RI-imaging/nrefocus | nrefocus/pad.py | _pad_add_1d | def _pad_add_1d(av, size, stlen):
""" 2D component of `pad_add`
"""
assert len(size) == 1
padx = _get_pad_left_right(av.shape[0], size[0])
mask = np.zeros(av.shape, dtype=bool)
mask[stlen:-stlen] = True
border = av[~mask]
if av.dtype.name.count("complex"):
padval = np.average(np.abs(border)) * \
np.exp(1j*np.average(np.angle(border)))
else:
padval = np.average(border)
if np.__version__[:3] in ["1.7", "1.8", "1.9"]:
end_values = ((padval, padval),)
else:
end_values = (padval,)
bv = np.pad(av,
padx,
mode="linear_ramp",
end_values=end_values)
# roll the array so that the padding values are on the right
bv = np.roll(bv, -padx[0], 0)
return bv | python | def _pad_add_1d(av, size, stlen):
""" 2D component of `pad_add`
"""
assert len(size) == 1
padx = _get_pad_left_right(av.shape[0], size[0])
mask = np.zeros(av.shape, dtype=bool)
mask[stlen:-stlen] = True
border = av[~mask]
if av.dtype.name.count("complex"):
padval = np.average(np.abs(border)) * \
np.exp(1j*np.average(np.angle(border)))
else:
padval = np.average(border)
if np.__version__[:3] in ["1.7", "1.8", "1.9"]:
end_values = ((padval, padval),)
else:
end_values = (padval,)
bv = np.pad(av,
padx,
mode="linear_ramp",
end_values=end_values)
# roll the array so that the padding values are on the right
bv = np.roll(bv, -padx[0], 0)
return bv | [
"def",
"_pad_add_1d",
"(",
"av",
",",
"size",
",",
"stlen",
")",
":",
"assert",
"len",
"(",
"size",
")",
"==",
"1",
"padx",
"=",
"_get_pad_left_right",
"(",
"av",
".",
"shape",
"[",
"0",
"]",
",",
"size",
"[",
"0",
"]",
")",
"mask",
"=",
"np",
... | 2D component of `pad_add` | [
"2D",
"component",
"of",
"pad_add"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/pad.py#L89-L114 | train | 50,989 |
RI-imaging/nrefocus | nrefocus/pad.py | pad_rem | def pad_rem(pv, size=None):
""" Removes linear padding from array
This is a convenience function that does the opposite
of `pad_add`.
Parameters
----------
pv : 1D or 2D ndarray
The array from which the padding will be removed.
size : tuple of length 1 (1D) or 2 (2D), optional
The final size of the un-padded array. Defaults to half the size
of the input array.
Returns
-------
pv : 1D or 2D ndarray
Padded array `av` with pads appended to right and bottom.
"""
if size is None:
size = list()
for s in pv.shape:
assert s % 2 == 0, "Uneven size; specify correct size of output!"
size.append(int(s/2))
elif not hasattr(size, "__len__"):
size = [size]
assert len(pv.shape) in [1, 2], "Only 1D and 2D arrays!"
assert len(pv.shape) == len(
size), "`size` must have same length as `av.shape`!"
if len(pv.shape) == 2:
return pv[:size[0], :size[1]]
else:
return pv[:size[0]] | python | def pad_rem(pv, size=None):
""" Removes linear padding from array
This is a convenience function that does the opposite
of `pad_add`.
Parameters
----------
pv : 1D or 2D ndarray
The array from which the padding will be removed.
size : tuple of length 1 (1D) or 2 (2D), optional
The final size of the un-padded array. Defaults to half the size
of the input array.
Returns
-------
pv : 1D or 2D ndarray
Padded array `av` with pads appended to right and bottom.
"""
if size is None:
size = list()
for s in pv.shape:
assert s % 2 == 0, "Uneven size; specify correct size of output!"
size.append(int(s/2))
elif not hasattr(size, "__len__"):
size = [size]
assert len(pv.shape) in [1, 2], "Only 1D and 2D arrays!"
assert len(pv.shape) == len(
size), "`size` must have same length as `av.shape`!"
if len(pv.shape) == 2:
return pv[:size[0], :size[1]]
else:
return pv[:size[0]] | [
"def",
"pad_rem",
"(",
"pv",
",",
"size",
"=",
"None",
")",
":",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"list",
"(",
")",
"for",
"s",
"in",
"pv",
".",
"shape",
":",
"assert",
"s",
"%",
"2",
"==",
"0",
",",
"\"Uneven size; specify correct siz... | Removes linear padding from array
This is a convenience function that does the opposite
of `pad_add`.
Parameters
----------
pv : 1D or 2D ndarray
The array from which the padding will be removed.
size : tuple of length 1 (1D) or 2 (2D), optional
The final size of the un-padded array. Defaults to half the size
of the input array.
Returns
-------
pv : 1D or 2D ndarray
Padded array `av` with pads appended to right and bottom. | [
"Removes",
"linear",
"padding",
"from",
"array"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/pad.py#L147-L182 | train | 50,990 |
anteater/anteater | anteater/src/virus_total.py | VirusTotal.rate_limit | def rate_limit(self):
"""
Simple rate limit function using redis
"""
rate_limited_msg = False
while True:
is_rate_limited = self.limit.is_rate_limited(uuid)
if is_rate_limited:
time.sleep(0.3) # save hammering redis
if not rate_limited_msg:
self.logger.info('Rate limit active..please wait...')
rate_limited_msg = True
if not is_rate_limited:
self.logger.info('Rate limit clear.')
self.limit.attempt(uuid)
return True | python | def rate_limit(self):
"""
Simple rate limit function using redis
"""
rate_limited_msg = False
while True:
is_rate_limited = self.limit.is_rate_limited(uuid)
if is_rate_limited:
time.sleep(0.3) # save hammering redis
if not rate_limited_msg:
self.logger.info('Rate limit active..please wait...')
rate_limited_msg = True
if not is_rate_limited:
self.logger.info('Rate limit clear.')
self.limit.attempt(uuid)
return True | [
"def",
"rate_limit",
"(",
"self",
")",
":",
"rate_limited_msg",
"=",
"False",
"while",
"True",
":",
"is_rate_limited",
"=",
"self",
".",
"limit",
".",
"is_rate_limited",
"(",
"uuid",
")",
"if",
"is_rate_limited",
":",
"time",
".",
"sleep",
"(",
"0.3",
")",... | Simple rate limit function using redis | [
"Simple",
"rate",
"limit",
"function",
"using",
"redis"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L60-L77 | train | 50,991 |
anteater/anteater | anteater/src/virus_total.py | VirusTotal.scan_file | def scan_file(self, filename, apikey):
"""
Sends a file to virus total for assessment
"""
url = self.base_url + "file/scan"
params = {'apikey': apikey}
scanfile = {"file": open(filename, 'rb')}
response = requests.post(url, files=scanfile, params=params)
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", filename, response.status_code) | python | def scan_file(self, filename, apikey):
"""
Sends a file to virus total for assessment
"""
url = self.base_url + "file/scan"
params = {'apikey': apikey}
scanfile = {"file": open(filename, 'rb')}
response = requests.post(url, files=scanfile, params=params)
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", filename, response.status_code) | [
"def",
"scan_file",
"(",
"self",
",",
"filename",
",",
"apikey",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"\"file/scan\"",
"params",
"=",
"{",
"'apikey'",
":",
"apikey",
"}",
"scanfile",
"=",
"{",
"\"file\"",
":",
"open",
"(",
"filename",
",... | Sends a file to virus total for assessment | [
"Sends",
"a",
"file",
"to",
"virus",
"total",
"for",
"assessment"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L79-L95 | train | 50,992 |
anteater/anteater | anteater/src/virus_total.py | VirusTotal.rescan_file | def rescan_file(self, filename, sha256hash, apikey):
"""
just send the hash, check the date
"""
url = self.base_url + "file/rescan"
params = {
'apikey': apikey,
'resource': sha256hash
}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, params=params)
if response.status_code == self.HTTP_OK:
self.logger.info("sent: %s, HTTP: %d, content: %s", os.path.basename(filename), response.status_code, response.text)
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", os.path.basename(filename), response.status_code)
return response | python | def rescan_file(self, filename, sha256hash, apikey):
"""
just send the hash, check the date
"""
url = self.base_url + "file/rescan"
params = {
'apikey': apikey,
'resource': sha256hash
}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, params=params)
if response.status_code == self.HTTP_OK:
self.logger.info("sent: %s, HTTP: %d, content: %s", os.path.basename(filename), response.status_code, response.text)
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", os.path.basename(filename), response.status_code)
return response | [
"def",
"rescan_file",
"(",
"self",
",",
"filename",
",",
"sha256hash",
",",
"apikey",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"\"file/rescan\"",
"params",
"=",
"{",
"'apikey'",
":",
"apikey",
",",
"'resource'",
":",
"sha256hash",
"}",
"rate_lim... | just send the hash, check the date | [
"just",
"send",
"the",
"hash",
"check",
"the",
"date"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L97-L116 | train | 50,993 |
anteater/anteater | anteater/src/virus_total.py | VirusTotal.binary_report | def binary_report(self, sha256sum, apikey):
"""
retrieve report from file scan
"""
url = self.base_url + "file/report"
params = {"apikey": apikey, "resource": sha256sum}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, data=params)
if response.status_code == self.HTTP_OK:
json_response = response.json()
response_code = json_response['response_code']
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.warning("retrieve report: %s, HTTP code: %d", os.path.basename(filename), response.status_code) | python | def binary_report(self, sha256sum, apikey):
"""
retrieve report from file scan
"""
url = self.base_url + "file/report"
params = {"apikey": apikey, "resource": sha256sum}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, data=params)
if response.status_code == self.HTTP_OK:
json_response = response.json()
response_code = json_response['response_code']
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.warning("retrieve report: %s, HTTP code: %d", os.path.basename(filename), response.status_code) | [
"def",
"binary_report",
"(",
"self",
",",
"sha256sum",
",",
"apikey",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"\"file/report\"",
"params",
"=",
"{",
"\"apikey\"",
":",
"apikey",
",",
"\"resource\"",
":",
"sha256sum",
"}",
"rate_limit_clear",
"=",... | retrieve report from file scan | [
"retrieve",
"report",
"from",
"file",
"scan"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L118-L136 | train | 50,994 |
anteater/anteater | anteater/src/virus_total.py | VirusTotal.send_ip | def send_ip(self, ipaddr, apikey):
"""
Send IP address for list of past malicous domain associations
"""
url = self.base_url + "ip-address/report"
parameters = {"ip": ipaddr, "apikey": apikey}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.get(url, params=parameters)
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", ipaddr, response.status_code)
time.sleep(self.public_api_sleep_time) | python | def send_ip(self, ipaddr, apikey):
"""
Send IP address for list of past malicous domain associations
"""
url = self.base_url + "ip-address/report"
parameters = {"ip": ipaddr, "apikey": apikey}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.get(url, params=parameters)
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", ipaddr, response.status_code)
time.sleep(self.public_api_sleep_time) | [
"def",
"send_ip",
"(",
"self",
",",
"ipaddr",
",",
"apikey",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"\"ip-address/report\"",
"parameters",
"=",
"{",
"\"ip\"",
":",
"ipaddr",
",",
"\"apikey\"",
":",
"apikey",
"}",
"rate_limit_clear",
"=",
"self... | Send IP address for list of past malicous domain associations | [
"Send",
"IP",
"address",
"for",
"list",
"of",
"past",
"malicous",
"domain",
"associations"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L138-L154 | train | 50,995 |
anteater/anteater | anteater/src/virus_total.py | VirusTotal.url_report | def url_report(self, scan_url, apikey):
"""
Send URLS for list of past malicous associations
"""
url = self.base_url + "url/report"
params = {"apikey": apikey, 'resource': scan_url}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, params=params, headers=self.headers)
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", scan_url, response.status_code)
time.sleep(self.public_api_sleep_time) | python | def url_report(self, scan_url, apikey):
"""
Send URLS for list of past malicous associations
"""
url = self.base_url + "url/report"
params = {"apikey": apikey, 'resource': scan_url}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, params=params, headers=self.headers)
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", scan_url, response.status_code)
time.sleep(self.public_api_sleep_time) | [
"def",
"url_report",
"(",
"self",
",",
"scan_url",
",",
"apikey",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"\"url/report\"",
"params",
"=",
"{",
"\"apikey\"",
":",
"apikey",
",",
"'resource'",
":",
"scan_url",
"}",
"rate_limit_clear",
"=",
"self... | Send URLS for list of past malicous associations | [
"Send",
"URLS",
"for",
"list",
"of",
"past",
"malicous",
"associations"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L156-L172 | train | 50,996 |
rapidpro/expressions | python/setup.py | _read_requirements | def _read_requirements(filename, extra_packages):
"""Returns a list of package requirements read from the file."""
requirements_file = open(filename).read()
hard_requirements = []
for line in requirements_file.splitlines():
if _is_requirement(line):
if line.find(';') > -1:
dep, condition = tuple(line.split(';'))
extra_packages[condition.strip()].append(dep.strip())
else:
hard_requirements.append(line.strip())
return hard_requirements, extra_packages | python | def _read_requirements(filename, extra_packages):
"""Returns a list of package requirements read from the file."""
requirements_file = open(filename).read()
hard_requirements = []
for line in requirements_file.splitlines():
if _is_requirement(line):
if line.find(';') > -1:
dep, condition = tuple(line.split(';'))
extra_packages[condition.strip()].append(dep.strip())
else:
hard_requirements.append(line.strip())
return hard_requirements, extra_packages | [
"def",
"_read_requirements",
"(",
"filename",
",",
"extra_packages",
")",
":",
"requirements_file",
"=",
"open",
"(",
"filename",
")",
".",
"read",
"(",
")",
"hard_requirements",
"=",
"[",
"]",
"for",
"line",
"in",
"requirements_file",
".",
"splitlines",
"(",
... | Returns a list of package requirements read from the file. | [
"Returns",
"a",
"list",
"of",
"package",
"requirements",
"read",
"from",
"the",
"file",
"."
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/setup.py#L14-L26 | train | 50,997 |
rapidpro/expressions | python/temba_expressions/functions/custom.py | field | def field(ctx, text, index, delimiter=' '):
"""
Reference a field in string separated by a delimiter
"""
splits = text.split(delimiter)
# remove our delimiters and whitespace
splits = [f for f in splits if f != delimiter and len(f.strip()) > 0]
index = conversions.to_integer(index, ctx)
if index < 1:
raise ValueError('Field index cannot be less than 1')
if index <= len(splits):
return splits[index-1]
else:
return '' | python | def field(ctx, text, index, delimiter=' '):
"""
Reference a field in string separated by a delimiter
"""
splits = text.split(delimiter)
# remove our delimiters and whitespace
splits = [f for f in splits if f != delimiter and len(f.strip()) > 0]
index = conversions.to_integer(index, ctx)
if index < 1:
raise ValueError('Field index cannot be less than 1')
if index <= len(splits):
return splits[index-1]
else:
return '' | [
"def",
"field",
"(",
"ctx",
",",
"text",
",",
"index",
",",
"delimiter",
"=",
"' '",
")",
":",
"splits",
"=",
"text",
".",
"split",
"(",
"delimiter",
")",
"# remove our delimiters and whitespace",
"splits",
"=",
"[",
"f",
"for",
"f",
"in",
"splits",
"if"... | Reference a field in string separated by a delimiter | [
"Reference",
"a",
"field",
"in",
"string",
"separated",
"by",
"a",
"delimiter"
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L10-L26 | train | 50,998 |
rapidpro/expressions | python/temba_expressions/functions/custom.py | epoch | def epoch(ctx, datetime):
"""
Converts the given date to the number of seconds since January 1st, 1970 UTC
"""
return conversions.to_decimal(str(conversions.to_datetime(datetime, ctx).timestamp()), ctx) | python | def epoch(ctx, datetime):
"""
Converts the given date to the number of seconds since January 1st, 1970 UTC
"""
return conversions.to_decimal(str(conversions.to_datetime(datetime, ctx).timestamp()), ctx) | [
"def",
"epoch",
"(",
"ctx",
",",
"datetime",
")",
":",
"return",
"conversions",
".",
"to_decimal",
"(",
"str",
"(",
"conversions",
".",
"to_datetime",
"(",
"datetime",
",",
"ctx",
")",
".",
"timestamp",
"(",
")",
")",
",",
"ctx",
")"
] | Converts the given date to the number of seconds since January 1st, 1970 UTC | [
"Converts",
"the",
"given",
"date",
"to",
"the",
"number",
"of",
"seconds",
"since",
"January",
"1st",
"1970",
"UTC"
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L44-L48 | train | 50,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.