repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
Metatab/metapack
metapack/package/excel.py
ExcelPackageBuilder._load_resources
def _load_resources(self): """Remove the geography from the files, since it isn't particularly useful in Excel""" for t in self.doc.find('Root.Table'): for c in t.find('Table.Column'): if c.get_value('datatype') == 'geometry': c['transform'] = '^empty_str' c['datatype'] = 'text' return super()._load_resources()
python
def _load_resources(self): """Remove the geography from the files, since it isn't particularly useful in Excel""" for t in self.doc.find('Root.Table'): for c in t.find('Table.Column'): if c.get_value('datatype') == 'geometry': c['transform'] = '^empty_str' c['datatype'] = 'text' return super()._load_resources()
[ "def", "_load_resources", "(", "self", ")", ":", "for", "t", "in", "self", ".", "doc", ".", "find", "(", "'Root.Table'", ")", ":", "for", "c", "in", "t", ".", "find", "(", "'Table.Column'", ")", ":", "if", "c", ".", "get_value", "(", "'datatype'", ...
Remove the geography from the files, since it isn't particularly useful in Excel
[ "Remove", "the", "geography", "from", "the", "files", "since", "it", "isn", "t", "particularly", "useful", "in", "Excel" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/excel.py#L91-L100
train
50,800
Metatab/metapack
metapack/cli/metaaws.py
make_bucket_policy_statements
def make_bucket_policy_statements(bucket): """Return the statemtns in a bucket policy as a dict of dicts""" import yaml from os.path import dirname, join, abspath import copy import metatab with open(join(dirname(abspath(metatab.__file__)), 'support', 'policy_parts.yaml')) as f: parts = yaml.load(f) statements = {} cl = copy.deepcopy(parts['list']) cl['Resource'] = arn_prefix + bucket statements['list'] = cl cl = copy.deepcopy(parts['bucket']) cl['Resource'] = arn_prefix + bucket statements['bucket'] = cl for sd in TOP_LEVEL_DIRS: cl = copy.deepcopy(parts['read']) cl['Resource'] = arn_prefix + bucket + '/' + sd + '/*' cl['Sid'] = cl['Sid'].title() + sd.title() statements[cl['Sid']] = cl cl = copy.deepcopy(parts['write']) cl['Resource'] = arn_prefix + bucket + '/' + sd + '/*' cl['Sid'] = cl['Sid'].title() + sd.title() statements[cl['Sid']] = cl cl = copy.deepcopy(parts['listb']) cl['Resource'] = arn_prefix + bucket cl['Sid'] = cl['Sid'].title() + sd.title() cl['Condition']['StringLike']['s3:prefix'] = [sd + '/*'] statements[cl['Sid']] = cl return statements
python
def make_bucket_policy_statements(bucket): """Return the statemtns in a bucket policy as a dict of dicts""" import yaml from os.path import dirname, join, abspath import copy import metatab with open(join(dirname(abspath(metatab.__file__)), 'support', 'policy_parts.yaml')) as f: parts = yaml.load(f) statements = {} cl = copy.deepcopy(parts['list']) cl['Resource'] = arn_prefix + bucket statements['list'] = cl cl = copy.deepcopy(parts['bucket']) cl['Resource'] = arn_prefix + bucket statements['bucket'] = cl for sd in TOP_LEVEL_DIRS: cl = copy.deepcopy(parts['read']) cl['Resource'] = arn_prefix + bucket + '/' + sd + '/*' cl['Sid'] = cl['Sid'].title() + sd.title() statements[cl['Sid']] = cl cl = copy.deepcopy(parts['write']) cl['Resource'] = arn_prefix + bucket + '/' + sd + '/*' cl['Sid'] = cl['Sid'].title() + sd.title() statements[cl['Sid']] = cl cl = copy.deepcopy(parts['listb']) cl['Resource'] = arn_prefix + bucket cl['Sid'] = cl['Sid'].title() + sd.title() cl['Condition']['StringLike']['s3:prefix'] = [sd + '/*'] statements[cl['Sid']] = cl return statements
[ "def", "make_bucket_policy_statements", "(", "bucket", ")", ":", "import", "yaml", "from", "os", ".", "path", "import", "dirname", ",", "join", ",", "abspath", "import", "copy", "import", "metatab", "with", "open", "(", "join", "(", "dirname", "(", "abspath"...
Return the statemtns in a bucket policy as a dict of dicts
[ "Return", "the", "statemtns", "in", "a", "bucket", "policy", "as", "a", "dict", "of", "dicts" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L254-L294
train
50,801
Metatab/metapack
metapack/cli/metaaws.py
bucket_dict_to_policy
def bucket_dict_to_policy(args, bucket_name, d): """ Create a bucket policy document from a permissions dict. The dictionary d maps (user, prefix) to 'R' or 'W'. :param bucket_name: :param d: :return: """ import json iam = get_resource(args, 'iam') statements = make_bucket_policy_statements(bucket_name) user_stats = set() # statement tripples for (user, prefix), mode in d.items(): user_stats.add((user, 'list')) user_stats.add((user, 'bucket')) if mode == 'R': user_stats.add((user, 'Read' + prefix.title())) user_stats.add((user, 'List' + prefix.title())) elif mode == 'W': user_stats.add((user, 'List' + prefix.title())) user_stats.add((user, 'Read' + prefix.title())) user_stats.add((user, 'Write' + prefix.title())) users_arns = {} for user_name, section in user_stats: section = statements[section] if user_name not in users_arns: user = iam.User(user_name) users_arns[user.name] = user else: user = users_arns[user_name] section['Principal']['AWS'].append(user.arn) for sid in list(statements.keys()): if not statements[sid]['Principal']['AWS']: del statements[sid] return json.dumps(dict(Version="2012-10-17", Statement=list(statements.values())), indent=4)
python
def bucket_dict_to_policy(args, bucket_name, d): """ Create a bucket policy document from a permissions dict. The dictionary d maps (user, prefix) to 'R' or 'W'. :param bucket_name: :param d: :return: """ import json iam = get_resource(args, 'iam') statements = make_bucket_policy_statements(bucket_name) user_stats = set() # statement tripples for (user, prefix), mode in d.items(): user_stats.add((user, 'list')) user_stats.add((user, 'bucket')) if mode == 'R': user_stats.add((user, 'Read' + prefix.title())) user_stats.add((user, 'List' + prefix.title())) elif mode == 'W': user_stats.add((user, 'List' + prefix.title())) user_stats.add((user, 'Read' + prefix.title())) user_stats.add((user, 'Write' + prefix.title())) users_arns = {} for user_name, section in user_stats: section = statements[section] if user_name not in users_arns: user = iam.User(user_name) users_arns[user.name] = user else: user = users_arns[user_name] section['Principal']['AWS'].append(user.arn) for sid in list(statements.keys()): if not statements[sid]['Principal']['AWS']: del statements[sid] return json.dumps(dict(Version="2012-10-17", Statement=list(statements.values())), indent=4)
[ "def", "bucket_dict_to_policy", "(", "args", ",", "bucket_name", ",", "d", ")", ":", "import", "json", "iam", "=", "get_resource", "(", "args", ",", "'iam'", ")", "statements", "=", "make_bucket_policy_statements", "(", "bucket_name", ")", "user_stats", "=", "...
Create a bucket policy document from a permissions dict. The dictionary d maps (user, prefix) to 'R' or 'W'. :param bucket_name: :param d: :return:
[ "Create", "a", "bucket", "policy", "document", "from", "a", "permissions", "dict", "." ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L297-L346
train
50,802
Metatab/metapack
metapack/cli/metaaws.py
bucket_policy_to_dict
def bucket_policy_to_dict(policy): """Produce a dictionary of read, write permissions for an existing bucket policy document""" import json if not isinstance(policy, dict): policy = json.loads(policy) statements = {s['Sid']: s for s in policy['Statement']} d = {} for rw in ('Read', 'Write'): for prefix in TOP_LEVEL_DIRS: sid = rw.title() + prefix.title() if sid in statements: if isinstance(statements[sid]['Principal']['AWS'], list): for principal in statements[sid]['Principal']['AWS']: user_name = principal.split('/').pop() d[(user_name, prefix)] = rw[0] else: user_name = statements[sid]['Principal']['AWS'].split('/').pop() d[(user_name, prefix)] = rw[0] return d
python
def bucket_policy_to_dict(policy): """Produce a dictionary of read, write permissions for an existing bucket policy document""" import json if not isinstance(policy, dict): policy = json.loads(policy) statements = {s['Sid']: s for s in policy['Statement']} d = {} for rw in ('Read', 'Write'): for prefix in TOP_LEVEL_DIRS: sid = rw.title() + prefix.title() if sid in statements: if isinstance(statements[sid]['Principal']['AWS'], list): for principal in statements[sid]['Principal']['AWS']: user_name = principal.split('/').pop() d[(user_name, prefix)] = rw[0] else: user_name = statements[sid]['Principal']['AWS'].split('/').pop() d[(user_name, prefix)] = rw[0] return d
[ "def", "bucket_policy_to_dict", "(", "policy", ")", ":", "import", "json", "if", "not", "isinstance", "(", "policy", ",", "dict", ")", ":", "policy", "=", "json", ".", "loads", "(", "policy", ")", "statements", "=", "{", "s", "[", "'Sid'", "]", ":", ...
Produce a dictionary of read, write permissions for an existing bucket policy document
[ "Produce", "a", "dictionary", "of", "read", "write", "permissions", "for", "an", "existing", "bucket", "policy", "document" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L349-L375
train
50,803
Metatab/metapack
metapack/cli/metaaws.py
get_iam_account
def get_iam_account(l, args, user_name): """Return the local Account for a user name, by fetching User and looking up the arn. """ iam = get_resource(args, 'iam') user = iam.User(user_name) user.load() return l.find_or_new_account(user.arn)
python
def get_iam_account(l, args, user_name): """Return the local Account for a user name, by fetching User and looking up the arn. """ iam = get_resource(args, 'iam') user = iam.User(user_name) user.load() return l.find_or_new_account(user.arn)
[ "def", "get_iam_account", "(", "l", ",", "args", ",", "user_name", ")", ":", "iam", "=", "get_resource", "(", "args", ",", "'iam'", ")", "user", "=", "iam", ".", "User", "(", "user_name", ")", "user", ".", "load", "(", ")", "return", "l", ".", "fin...
Return the local Account for a user name, by fetching User and looking up the arn.
[ "Return", "the", "local", "Account", "for", "a", "user", "name", "by", "fetching", "User", "and", "looking", "up", "the", "arn", "." ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L540-L548
train
50,804
boazmohar/pySparkUtils
pySparkUtils/SVD.py
getSVD
def getSVD(data, k, getComponents=False, getS=False, normalization='mean'): """ Wrapper for computeSVD that will normalize and handle a Thunder Images object :param data: Thunder Images object :param k: number of components to keep :param getComponents: will return the components if true, otherwise will return None :returns: projections, components, s """ if normalization == 'nanmean': data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - np.nanmean(x))) elif normalization == 'mean': data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - x.mean())) elif normalization is 'zscore': data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(zscore(x.flatten()))) elif normalization is None: data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten())) else: raise ValueError('Normalization should be one of: mean, nanmean, zscore, None. Got: %s' % normalization) mat = RowMatrix(data2) mat.rows.cache() mat.rows.count() svd = compute_svd(row_matrix=mat, k=k, compute_u=False) if getComponents: components = svd.call("V").toArray() components = components.transpose(1, 0).reshape((k,) + data.shape[1:]) else: components = None projection = np.array(RowMatrix_new(data2).multiply(svd.call("V")).rows.collect()) if getS: s = svd.call("s").toArray() else: s = None return projection, components, s
python
def getSVD(data, k, getComponents=False, getS=False, normalization='mean'): """ Wrapper for computeSVD that will normalize and handle a Thunder Images object :param data: Thunder Images object :param k: number of components to keep :param getComponents: will return the components if true, otherwise will return None :returns: projections, components, s """ if normalization == 'nanmean': data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - np.nanmean(x))) elif normalization == 'mean': data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - x.mean())) elif normalization is 'zscore': data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(zscore(x.flatten()))) elif normalization is None: data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten())) else: raise ValueError('Normalization should be one of: mean, nanmean, zscore, None. Got: %s' % normalization) mat = RowMatrix(data2) mat.rows.cache() mat.rows.count() svd = compute_svd(row_matrix=mat, k=k, compute_u=False) if getComponents: components = svd.call("V").toArray() components = components.transpose(1, 0).reshape((k,) + data.shape[1:]) else: components = None projection = np.array(RowMatrix_new(data2).multiply(svd.call("V")).rows.collect()) if getS: s = svd.call("s").toArray() else: s = None return projection, components, s
[ "def", "getSVD", "(", "data", ",", "k", ",", "getComponents", "=", "False", ",", "getS", "=", "False", ",", "normalization", "=", "'mean'", ")", ":", "if", "normalization", "==", "'nanmean'", ":", "data2", "=", "data", ".", "tordd", "(", ")", ".", "s...
Wrapper for computeSVD that will normalize and handle a Thunder Images object :param data: Thunder Images object :param k: number of components to keep :param getComponents: will return the components if true, otherwise will return None :returns: projections, components, s
[ "Wrapper", "for", "computeSVD", "that", "will", "normalize", "and", "handle", "a", "Thunder", "Images", "object" ]
5891b75327eb8b91af8558642edf7af82c5991b1
https://github.com/boazmohar/pySparkUtils/blob/5891b75327eb8b91af8558642edf7af82c5991b1/pySparkUtils/SVD.py#L77-L111
train
50,805
NicolasLM/spinach
spinach/utils.py
human_duration
def human_duration(duration_seconds: float) -> str: """Convert a duration in seconds into a human friendly string.""" if duration_seconds < 0.001: return '0 ms' if duration_seconds < 1: return '{} ms'.format(int(duration_seconds * 1000)) return '{} s'.format(int(duration_seconds))
python
def human_duration(duration_seconds: float) -> str: """Convert a duration in seconds into a human friendly string.""" if duration_seconds < 0.001: return '0 ms' if duration_seconds < 1: return '{} ms'.format(int(duration_seconds * 1000)) return '{} s'.format(int(duration_seconds))
[ "def", "human_duration", "(", "duration_seconds", ":", "float", ")", "->", "str", ":", "if", "duration_seconds", "<", "0.001", ":", "return", "'0 ms'", "if", "duration_seconds", "<", "1", ":", "return", "'{} ms'", ".", "format", "(", "int", "(", "duration_se...
Convert a duration in seconds into a human friendly string.
[ "Convert", "a", "duration", "in", "seconds", "into", "a", "human", "friendly", "string", "." ]
0122f916643101eab5cdc1f3da662b9446e372aa
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/utils.py#L13-L19
train
50,806
NicolasLM/spinach
spinach/utils.py
call_with_retry
def call_with_retry(func: Callable, exceptions, max_retries: int, logger: Logger, *args, **kwargs): """Call a function and retry it on failure.""" attempt = 0 while True: try: return func(*args, **kwargs) except exceptions as e: attempt += 1 if attempt >= max_retries: raise delay = exponential_backoff(attempt, cap=60) logger.warning('%s: retrying in %s', e, delay) time.sleep(delay.total_seconds())
python
def call_with_retry(func: Callable, exceptions, max_retries: int, logger: Logger, *args, **kwargs): """Call a function and retry it on failure.""" attempt = 0 while True: try: return func(*args, **kwargs) except exceptions as e: attempt += 1 if attempt >= max_retries: raise delay = exponential_backoff(attempt, cap=60) logger.warning('%s: retrying in %s', e, delay) time.sleep(delay.total_seconds())
[ "def", "call_with_retry", "(", "func", ":", "Callable", ",", "exceptions", ",", "max_retries", ":", "int", ",", "logger", ":", "Logger", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "attempt", "=", "0", "while", "True", ":", "try", ":", "retu...
Call a function and retry it on failure.
[ "Call", "a", "function", "and", "retry", "it", "on", "failure", "." ]
0122f916643101eab5cdc1f3da662b9446e372aa
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/utils.py#L49-L63
train
50,807
NicolasLM/spinach
spinach/utils.py
exponential_backoff
def exponential_backoff(attempt: int, cap: int=1200) -> timedelta: """Calculate a delay to retry using an exponential backoff algorithm. It is an exponential backoff with random jitter to prevent failures from being retried at the same time. It is a good fit for most applications. :arg attempt: the number of attempts made :arg cap: maximum delay, defaults to 20 minutes """ base = 3 temp = min(base * 2 ** attempt, cap) return timedelta(seconds=temp / 2 + random.randint(0, temp / 2))
python
def exponential_backoff(attempt: int, cap: int=1200) -> timedelta: """Calculate a delay to retry using an exponential backoff algorithm. It is an exponential backoff with random jitter to prevent failures from being retried at the same time. It is a good fit for most applications. :arg attempt: the number of attempts made :arg cap: maximum delay, defaults to 20 minutes """ base = 3 temp = min(base * 2 ** attempt, cap) return timedelta(seconds=temp / 2 + random.randint(0, temp / 2))
[ "def", "exponential_backoff", "(", "attempt", ":", "int", ",", "cap", ":", "int", "=", "1200", ")", "->", "timedelta", ":", "base", "=", "3", "temp", "=", "min", "(", "base", "*", "2", "**", "attempt", ",", "cap", ")", "return", "timedelta", "(", "...
Calculate a delay to retry using an exponential backoff algorithm. It is an exponential backoff with random jitter to prevent failures from being retried at the same time. It is a good fit for most applications. :arg attempt: the number of attempts made :arg cap: maximum delay, defaults to 20 minutes
[ "Calculate", "a", "delay", "to", "retry", "using", "an", "exponential", "backoff", "algorithm", "." ]
0122f916643101eab5cdc1f3da662b9446e372aa
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/utils.py#L66-L78
train
50,808
project-rig/rig
rig/type_casts.py
float_to_fp
def float_to_fp(signed, n_bits, n_frac): """Return a function to convert a floating point value to a fixed point value. For example, a function to convert a float to a signed fractional representation with 8 bits overall and 4 fractional bits (S3.4) can be constructed and used with:: >>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4) >>> hex(int(s34(0.5))) '0x8' The fixed point conversion is saturating:: >>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4 >>> hex(int(q34(-0.5))) '0x0' >>> hex(int(q34(15.0))) '0xf0' >>> hex(int(q34(16.0))) '0xff' Parameters ---------- signed : bool Whether the values that are to be converted should be signed, or clipped at zero. >>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed '-0x8' >>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned '0x0' n_bits : int Total number of bits in the fixed-point representation (including sign bit and fractional bits). n_frac : int Number of fractional bits in the fixed-point representation. """ # Calculate the maximum and minimum values if signed: max_v = (1 << (n_bits - 1)) - 1 min_v = -max_v - 1 else: min_v = 0 max_v = (1 << n_bits) - 1 # Compute the scale scale = 2.0**n_frac def bitsk(value): """Convert a floating point value to a fixed point value. Parameters ---------- value : float The value to convert. """ int_val = int(scale * value) return max((min(max_v, int_val), min_v)) return bitsk
python
def float_to_fp(signed, n_bits, n_frac): """Return a function to convert a floating point value to a fixed point value. For example, a function to convert a float to a signed fractional representation with 8 bits overall and 4 fractional bits (S3.4) can be constructed and used with:: >>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4) >>> hex(int(s34(0.5))) '0x8' The fixed point conversion is saturating:: >>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4 >>> hex(int(q34(-0.5))) '0x0' >>> hex(int(q34(15.0))) '0xf0' >>> hex(int(q34(16.0))) '0xff' Parameters ---------- signed : bool Whether the values that are to be converted should be signed, or clipped at zero. >>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed '-0x8' >>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned '0x0' n_bits : int Total number of bits in the fixed-point representation (including sign bit and fractional bits). n_frac : int Number of fractional bits in the fixed-point representation. """ # Calculate the maximum and minimum values if signed: max_v = (1 << (n_bits - 1)) - 1 min_v = -max_v - 1 else: min_v = 0 max_v = (1 << n_bits) - 1 # Compute the scale scale = 2.0**n_frac def bitsk(value): """Convert a floating point value to a fixed point value. Parameters ---------- value : float The value to convert. """ int_val = int(scale * value) return max((min(max_v, int_val), min_v)) return bitsk
[ "def", "float_to_fp", "(", "signed", ",", "n_bits", ",", "n_frac", ")", ":", "# Calculate the maximum and minimum values", "if", "signed", ":", "max_v", "=", "(", "1", "<<", "(", "n_bits", "-", "1", ")", ")", "-", "1", "min_v", "=", "-", "max_v", "-", ...
Return a function to convert a floating point value to a fixed point value. For example, a function to convert a float to a signed fractional representation with 8 bits overall and 4 fractional bits (S3.4) can be constructed and used with:: >>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4) >>> hex(int(s34(0.5))) '0x8' The fixed point conversion is saturating:: >>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4 >>> hex(int(q34(-0.5))) '0x0' >>> hex(int(q34(15.0))) '0xf0' >>> hex(int(q34(16.0))) '0xff' Parameters ---------- signed : bool Whether the values that are to be converted should be signed, or clipped at zero. >>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed '-0x8' >>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned '0x0' n_bits : int Total number of bits in the fixed-point representation (including sign bit and fractional bits). n_frac : int Number of fractional bits in the fixed-point representation.
[ "Return", "a", "function", "to", "convert", "a", "floating", "point", "value", "to", "a", "fixed", "point", "value", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/type_casts.py#L7-L70
train
50,809
happyleavesaoc/python-voobly
voobly/__init__.py
get_metadata_path
def get_metadata_path(name): """Get reference metadata file path.""" return pkg_resources.resource_filename('voobly', os.path.join(METADATA_PATH, '{}.json'.format(name)))
python
def get_metadata_path(name): """Get reference metadata file path.""" return pkg_resources.resource_filename('voobly', os.path.join(METADATA_PATH, '{}.json'.format(name)))
[ "def", "get_metadata_path", "(", "name", ")", ":", "return", "pkg_resources", ".", "resource_filename", "(", "'voobly'", ",", "os", ".", "path", ".", "join", "(", "METADATA_PATH", ",", "'{}.json'", ".", "format", "(", "name", ")", ")", ")" ]
Get reference metadata file path.
[ "Get", "reference", "metadata", "file", "path", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L85-L87
train
50,810
happyleavesaoc/python-voobly
voobly/__init__.py
_make_request
def _make_request(session, url, argument=None, params=None, raw=False): """Make a request to API endpoint.""" if not params: params = {} params['key'] = session.auth.key try: if argument: request_url = '{}{}{}{}'.format(session.auth.base_url, VOOBLY_API_URL, url, argument) else: request_url = '{}{}'.format(VOOBLY_API_URL, url) resp = session.get(request_url, params=params) except RequestException: raise VooblyError('failed to connect') if resp.text == 'bad-key': raise VooblyError('bad api key') elif resp.text == 'too-busy': raise VooblyError('service too busy') elif not resp.text: raise VooblyError('no data returned') if raw: return resp.text try: return tablib.Dataset().load(resp.text).dict except UnsupportedFormat: raise VooblyError('unexpected error {}'.format(resp.text))
python
def _make_request(session, url, argument=None, params=None, raw=False): """Make a request to API endpoint.""" if not params: params = {} params['key'] = session.auth.key try: if argument: request_url = '{}{}{}{}'.format(session.auth.base_url, VOOBLY_API_URL, url, argument) else: request_url = '{}{}'.format(VOOBLY_API_URL, url) resp = session.get(request_url, params=params) except RequestException: raise VooblyError('failed to connect') if resp.text == 'bad-key': raise VooblyError('bad api key') elif resp.text == 'too-busy': raise VooblyError('service too busy') elif not resp.text: raise VooblyError('no data returned') if raw: return resp.text try: return tablib.Dataset().load(resp.text).dict except UnsupportedFormat: raise VooblyError('unexpected error {}'.format(resp.text))
[ "def", "_make_request", "(", "session", ",", "url", ",", "argument", "=", "None", ",", "params", "=", "None", ",", "raw", "=", "False", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "params", "[", "'key'", "]", "=", "session", ".",...
Make a request to API endpoint.
[ "Make", "a", "request", "to", "API", "endpoint", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L112-L136
train
50,811
happyleavesaoc/python-voobly
voobly/__init__.py
make_scrape_request
def make_scrape_request(session, url, mode='get', data=None): """Make a request to URL.""" try: html = session.request(mode, url, data=data) except RequestException: raise VooblyError('failed to connect') if SCRAPE_FETCH_ERROR in html.text: raise VooblyError('not logged in') if html.status_code != 200 or SCRAPE_PAGE_NOT_FOUND in html.text: raise VooblyError('page not found') return bs4.BeautifulSoup(html.text, features='lxml')
python
def make_scrape_request(session, url, mode='get', data=None): """Make a request to URL.""" try: html = session.request(mode, url, data=data) except RequestException: raise VooblyError('failed to connect') if SCRAPE_FETCH_ERROR in html.text: raise VooblyError('not logged in') if html.status_code != 200 or SCRAPE_PAGE_NOT_FOUND in html.text: raise VooblyError('page not found') return bs4.BeautifulSoup(html.text, features='lxml')
[ "def", "make_scrape_request", "(", "session", ",", "url", ",", "mode", "=", "'get'", ",", "data", "=", "None", ")", ":", "try", ":", "html", "=", "session", ".", "request", "(", "mode", ",", "url", ",", "data", "=", "data", ")", "except", "RequestExc...
Make a request to URL.
[ "Make", "a", "request", "to", "URL", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L139-L149
train
50,812
happyleavesaoc/python-voobly
voobly/__init__.py
get_ladder
def get_ladder(session, ladder_id, user_id=None, user_ids=None, start=0, limit=LADDER_RESULT_LIMIT): """Get ladder.""" params = { 'start': start, 'limit': limit } if isinstance(ladder_id, str): ladder_id = lookup_ladder_id(ladder_id) if limit > LADDER_RESULT_LIMIT: raise VooblyError('limited to 40 rows') if user_ids: params['uidlist'] = ','.join([str(uid) for uid in user_ids]) elif user_id: params['uid'] = user_id resp = _make_request(session, LADDER_URL, ladder_id, params) if user_id: if not resp: raise VooblyError('user not ranked') return resp[0] return resp
python
def get_ladder(session, ladder_id, user_id=None, user_ids=None, start=0, limit=LADDER_RESULT_LIMIT): """Get ladder.""" params = { 'start': start, 'limit': limit } if isinstance(ladder_id, str): ladder_id = lookup_ladder_id(ladder_id) if limit > LADDER_RESULT_LIMIT: raise VooblyError('limited to 40 rows') if user_ids: params['uidlist'] = ','.join([str(uid) for uid in user_ids]) elif user_id: params['uid'] = user_id resp = _make_request(session, LADDER_URL, ladder_id, params) if user_id: if not resp: raise VooblyError('user not ranked') return resp[0] return resp
[ "def", "get_ladder", "(", "session", ",", "ladder_id", ",", "user_id", "=", "None", ",", "user_ids", "=", "None", ",", "start", "=", "0", ",", "limit", "=", "LADDER_RESULT_LIMIT", ")", ":", "params", "=", "{", "'start'", ":", "start", ",", "'limit'", "...
Get ladder.
[ "Get", "ladder", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L167-L186
train
50,813
happyleavesaoc/python-voobly
voobly/__init__.py
get_lobbies
def get_lobbies(session, game_id): """Get lobbies for a game.""" if isinstance(game_id, str): game_id = lookup_game_id(game_id) lobbies = _make_request(session, LOBBY_URL, game_id) for lobby in lobbies: # pylint: disable=len-as-condition if len(lobby['ladders']) > 0: lobby['ladders'] = lobby['ladders'][:-1].split('|') return lobbies
python
def get_lobbies(session, game_id): """Get lobbies for a game.""" if isinstance(game_id, str): game_id = lookup_game_id(game_id) lobbies = _make_request(session, LOBBY_URL, game_id) for lobby in lobbies: # pylint: disable=len-as-condition if len(lobby['ladders']) > 0: lobby['ladders'] = lobby['ladders'][:-1].split('|') return lobbies
[ "def", "get_lobbies", "(", "session", ",", "game_id", ")", ":", "if", "isinstance", "(", "game_id", ",", "str", ")", ":", "game_id", "=", "lookup_game_id", "(", "game_id", ")", "lobbies", "=", "_make_request", "(", "session", ",", "LOBBY_URL", ",", "game_i...
Get lobbies for a game.
[ "Get", "lobbies", "for", "a", "game", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L189-L198
train
50,814
happyleavesaoc/python-voobly
voobly/__init__.py
get_user
def get_user(session, user_id): """Get user.""" try: user_id = int(user_id) except ValueError: user_id = find_user(session, user_id) resp = _make_request(session, USER_URL, user_id) if not resp: raise VooblyError('user id not found') return resp[0]
python
def get_user(session, user_id): """Get user.""" try: user_id = int(user_id) except ValueError: user_id = find_user(session, user_id) resp = _make_request(session, USER_URL, user_id) if not resp: raise VooblyError('user id not found') return resp[0]
[ "def", "get_user", "(", "session", ",", "user_id", ")", ":", "try", ":", "user_id", "=", "int", "(", "user_id", ")", "except", "ValueError", ":", "user_id", "=", "find_user", "(", "session", ",", "user_id", ")", "resp", "=", "_make_request", "(", "sessio...
Get user.
[ "Get", "user", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L201-L210
train
50,815
happyleavesaoc/python-voobly
voobly/__init__.py
find_user
def find_user(session, username): """Find user by name - returns user ID.""" resp = _make_request(session, FIND_USER_URL, username) if not resp: raise VooblyError('user not found') try: return int(resp[0]['uid']) except ValueError: raise VooblyError('user not found')
python
def find_user(session, username): """Find user by name - returns user ID.""" resp = _make_request(session, FIND_USER_URL, username) if not resp: raise VooblyError('user not found') try: return int(resp[0]['uid']) except ValueError: raise VooblyError('user not found')
[ "def", "find_user", "(", "session", ",", "username", ")", ":", "resp", "=", "_make_request", "(", "session", ",", "FIND_USER_URL", ",", "username", ")", "if", "not", "resp", ":", "raise", "VooblyError", "(", "'user not found'", ")", "try", ":", "return", "...
Find user by name - returns user ID.
[ "Find", "user", "by", "name", "-", "returns", "user", "ID", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L213-L221
train
50,816
happyleavesaoc/python-voobly
voobly/__init__.py
find_users
def find_users(session, *usernames): """Find multiple users by name.""" user_string = ','.join(usernames) return _make_request(session, FIND_USERS_URL, user_string)
python
def find_users(session, *usernames): """Find multiple users by name.""" user_string = ','.join(usernames) return _make_request(session, FIND_USERS_URL, user_string)
[ "def", "find_users", "(", "session", ",", "*", "usernames", ")", ":", "user_string", "=", "','", ".", "join", "(", "usernames", ")", "return", "_make_request", "(", "session", ",", "FIND_USERS_URL", ",", "user_string", ")" ]
Find multiple users by name.
[ "Find", "multiple", "users", "by", "name", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L224-L227
train
50,817
happyleavesaoc/python-voobly
voobly/__init__.py
user
def user(session, uid, ladder_ids=None): """Get all possible user info by name.""" data = get_user(session, uid) resp = dict(data) if not ladder_ids: return resp resp['ladders'] = {} for ladder_id in ladder_ids: if isinstance(ladder_id, str): ladder_id = lookup_ladder_id(ladder_id) try: ladder_data = dict(get_ladder(session, ladder_id, user_id=uid)) resp['ladders'][ladder_id] = ladder_data except VooblyError: # No ranking on ladder pass return resp
python
def user(session, uid, ladder_ids=None): """Get all possible user info by name.""" data = get_user(session, uid) resp = dict(data) if not ladder_ids: return resp resp['ladders'] = {} for ladder_id in ladder_ids: if isinstance(ladder_id, str): ladder_id = lookup_ladder_id(ladder_id) try: ladder_data = dict(get_ladder(session, ladder_id, user_id=uid)) resp['ladders'][ladder_id] = ladder_data except VooblyError: # No ranking on ladder pass return resp
[ "def", "user", "(", "session", ",", "uid", ",", "ladder_ids", "=", "None", ")", ":", "data", "=", "get_user", "(", "session", ",", "uid", ")", "resp", "=", "dict", "(", "data", ")", "if", "not", "ladder_ids", ":", "return", "resp", "resp", "[", "'l...
Get all possible user info by name.
[ "Get", "all", "possible", "user", "info", "by", "name", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L236-L252
train
50,818
happyleavesaoc/python-voobly
voobly/__init__.py
ladders
def ladders(session, game_id): """Get a list of ladder IDs.""" if isinstance(game_id, str): game_id = lookup_game_id(game_id) lobbies = get_lobbies(session, game_id) ladder_ids = set() for lobby in lobbies: ladder_ids |= set(lobby['ladders']) return list(ladder_ids)
python
def ladders(session, game_id): """Get a list of ladder IDs.""" if isinstance(game_id, str): game_id = lookup_game_id(game_id) lobbies = get_lobbies(session, game_id) ladder_ids = set() for lobby in lobbies: ladder_ids |= set(lobby['ladders']) return list(ladder_ids)
[ "def", "ladders", "(", "session", ",", "game_id", ")", ":", "if", "isinstance", "(", "game_id", ",", "str", ")", ":", "game_id", "=", "lookup_game_id", "(", "game_id", ")", "lobbies", "=", "get_lobbies", "(", "session", ",", "game_id", ")", "ladder_ids", ...
Get a list of ladder IDs.
[ "Get", "a", "list", "of", "ladder", "IDs", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L255-L263
train
50,819
happyleavesaoc/python-voobly
voobly/__init__.py
get_clan_matches
def get_clan_matches(session, subdomain, clan_id, from_timestamp=None, limit=None): """Get recent matches by clan.""" return get_recent_matches(session, 'https://{}.voobly.com/{}/{}/0'.format( subdomain, TEAM_MATCHES_URL, clan_id), from_timestamp, limit)
python
def get_clan_matches(session, subdomain, clan_id, from_timestamp=None, limit=None): """Get recent matches by clan.""" return get_recent_matches(session, 'https://{}.voobly.com/{}/{}/0'.format( subdomain, TEAM_MATCHES_URL, clan_id), from_timestamp, limit)
[ "def", "get_clan_matches", "(", "session", ",", "subdomain", ",", "clan_id", ",", "from_timestamp", "=", "None", ",", "limit", "=", "None", ")", ":", "return", "get_recent_matches", "(", "session", ",", "'https://{}.voobly.com/{}/{}/0'", ".", "format", "(", "sub...
Get recent matches by clan.
[ "Get", "recent", "matches", "by", "clan", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L368-L371
train
50,820
happyleavesaoc/python-voobly
voobly/__init__.py
get_user_matches
def get_user_matches(session, user_id, from_timestamp=None, limit=None): """Get recent matches by user.""" return get_recent_matches(session, '{}{}/{}/Matches/games/matches/user/{}/0'.format( session.auth.base_url, PROFILE_URL, user_id, user_id), from_timestamp, limit)
python
def get_user_matches(session, user_id, from_timestamp=None, limit=None): """Get recent matches by user.""" return get_recent_matches(session, '{}{}/{}/Matches/games/matches/user/{}/0'.format( session.auth.base_url, PROFILE_URL, user_id, user_id), from_timestamp, limit)
[ "def", "get_user_matches", "(", "session", ",", "user_id", ",", "from_timestamp", "=", "None", ",", "limit", "=", "None", ")", ":", "return", "get_recent_matches", "(", "session", ",", "'{}{}/{}/Matches/games/matches/user/{}/0'", ".", "format", "(", "session", "."...
Get recent matches by user.
[ "Get", "recent", "matches", "by", "user", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L375-L378
train
50,821
happyleavesaoc/python-voobly
voobly/__init__.py
get_recent_matches
def get_recent_matches(session, init_url, from_timestamp, limit): """Get recently played user matches.""" if not from_timestamp: from_timestamp = datetime.datetime.now() - datetime.timedelta(days=1) matches = [] page_id = 0 done = False while not done and page_id < MAX_MATCH_PAGE_ID: url = '{}/{}'.format(init_url, page_id) parsed = make_scrape_request(session, url) for row in parsed.find('table').find_all('tr')[1:]: cols = row.find_all('td') played_at = dateparser.parse(cols[2].text) match_id = int(cols[5].find('a').text[1:]) has_rec = cols[6].find('a').find('img') if played_at < from_timestamp or (limit and len(matches) == limit): done = True break if not has_rec: continue matches.append({ 'timestamp': played_at, 'match_id': match_id }) if not matches: break page_id += 1 return matches
python
def get_recent_matches(session, init_url, from_timestamp, limit): """Get recently played user matches.""" if not from_timestamp: from_timestamp = datetime.datetime.now() - datetime.timedelta(days=1) matches = [] page_id = 0 done = False while not done and page_id < MAX_MATCH_PAGE_ID: url = '{}/{}'.format(init_url, page_id) parsed = make_scrape_request(session, url) for row in parsed.find('table').find_all('tr')[1:]: cols = row.find_all('td') played_at = dateparser.parse(cols[2].text) match_id = int(cols[5].find('a').text[1:]) has_rec = cols[6].find('a').find('img') if played_at < from_timestamp or (limit and len(matches) == limit): done = True break if not has_rec: continue matches.append({ 'timestamp': played_at, 'match_id': match_id }) if not matches: break page_id += 1 return matches
[ "def", "get_recent_matches", "(", "session", ",", "init_url", ",", "from_timestamp", ",", "limit", ")", ":", "if", "not", "from_timestamp", ":", "from_timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "datetime", ".", "timedelta", "(",...
Get recently played user matches.
[ "Get", "recently", "played", "user", "matches", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L381-L408
train
50,822
happyleavesaoc/python-voobly
voobly/__init__.py
get_ladder_matches
def get_ladder_matches(session, ladder_id, from_timestamp=None, limit=LADDER_MATCH_LIMIT): """Get recently played ladder matches.""" if not from_timestamp: from_timestamp = datetime.datetime.now() - datetime.timedelta(days=1) matches = [] page_id = 0 done = False i = 0 while not done and page_id < MAX_LADDER_PAGE_ID: url = '{}{}/{}/{}'.format(session.auth.base_url, LADDER_MATCHES_URL, lookup_ladder_id(ladder_id), page_id) parsed = make_scrape_request(session, url) for row in parsed.find(text='Recent Matches').find_next('table').find_all('tr')[1:]: cols = row.find_all('td') played_at = dateparser.parse(cols[0].text) match_id = int(cols[1].find('a').text[1:]) has_rec = cols[4].find('a').find('img') if not has_rec: continue if played_at < from_timestamp or i >= limit: done = True break matches.append({ 'timestamp': played_at, 'match_id': match_id }) i += 1 page_id += 1 return matches
python
def get_ladder_matches(session, ladder_id, from_timestamp=None, limit=LADDER_MATCH_LIMIT): """Get recently played ladder matches.""" if not from_timestamp: from_timestamp = datetime.datetime.now() - datetime.timedelta(days=1) matches = [] page_id = 0 done = False i = 0 while not done and page_id < MAX_LADDER_PAGE_ID: url = '{}{}/{}/{}'.format(session.auth.base_url, LADDER_MATCHES_URL, lookup_ladder_id(ladder_id), page_id) parsed = make_scrape_request(session, url) for row in parsed.find(text='Recent Matches').find_next('table').find_all('tr')[1:]: cols = row.find_all('td') played_at = dateparser.parse(cols[0].text) match_id = int(cols[1].find('a').text[1:]) has_rec = cols[4].find('a').find('img') if not has_rec: continue if played_at < from_timestamp or i >= limit: done = True break matches.append({ 'timestamp': played_at, 'match_id': match_id }) i += 1 page_id += 1 return matches
[ "def", "get_ladder_matches", "(", "session", ",", "ladder_id", ",", "from_timestamp", "=", "None", ",", "limit", "=", "LADDER_MATCH_LIMIT", ")", ":", "if", "not", "from_timestamp", ":", "from_timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ...
Get recently played ladder matches.
[ "Get", "recently", "played", "ladder", "matches", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L412-L439
train
50,823
happyleavesaoc/python-voobly
voobly/__init__.py
get_match
def get_match(session, match_id): """Get match metadata.""" url = '{}{}/{}'.format(session.auth.base_url, MATCH_URL, match_id) parsed = make_scrape_request(session, url) game = parsed.find('h3').text if game != GAME_AOC: raise ValueError('not an aoc match') date_played = parsed.find(text=MATCH_DATE_PLAYED).find_next('td').text players = [] colors = {} player_count = int(parsed.find('td', text='Players:').find_next('td').text) for div in parsed.find_all('div', style=True): if not div['style'].startswith('background-color:'): continue if len(players) == player_count: break username_elem = div.find_next('a', href=re.compile(PROFILE_PATH)) username = username_elem.text color = div['style'].split(':')[1].split(';')[0].strip() colors[username] = color rec = None for dl_elem in parsed.find_all('a', href=re.compile('^/files/view')): rec_name = dl_elem.find('b', text=re.compile(username+'$')) if rec_name: rec = rec_name.parent user = parsed.find('a', text=username) if not user: # bugged match page continue user_id = int(user['href'].split('/')[-1]) children = list(user.find_next('span').children) rate_after = None rate_before = None if str(children[0]).strip() == MATCH_NEW_RATE: rate_after = int(children[1].text) rate_before = rate_after - int(children[3].text) elif str(children[4]).strip() == MATCH_NEW_RATE: rate_after = int(children[5].text) rate_before = rate_after - int(children[3].text) players.append({ 'url': rec['href'] if rec else None, 'id': user_id, 'username': username, 'color_id': COLOR_MAPPING.get(colors[username]), 'rate_before': rate_before, 'rate_after': rate_after }) return { 'timestamp': dateparser.parse(date_played), 'players': players }
python
def get_match(session, match_id): """Get match metadata.""" url = '{}{}/{}'.format(session.auth.base_url, MATCH_URL, match_id) parsed = make_scrape_request(session, url) game = parsed.find('h3').text if game != GAME_AOC: raise ValueError('not an aoc match') date_played = parsed.find(text=MATCH_DATE_PLAYED).find_next('td').text players = [] colors = {} player_count = int(parsed.find('td', text='Players:').find_next('td').text) for div in parsed.find_all('div', style=True): if not div['style'].startswith('background-color:'): continue if len(players) == player_count: break username_elem = div.find_next('a', href=re.compile(PROFILE_PATH)) username = username_elem.text color = div['style'].split(':')[1].split(';')[0].strip() colors[username] = color rec = None for dl_elem in parsed.find_all('a', href=re.compile('^/files/view')): rec_name = dl_elem.find('b', text=re.compile(username+'$')) if rec_name: rec = rec_name.parent user = parsed.find('a', text=username) if not user: # bugged match page continue user_id = int(user['href'].split('/')[-1]) children = list(user.find_next('span').children) rate_after = None rate_before = None if str(children[0]).strip() == MATCH_NEW_RATE: rate_after = int(children[1].text) rate_before = rate_after - int(children[3].text) elif str(children[4]).strip() == MATCH_NEW_RATE: rate_after = int(children[5].text) rate_before = rate_after - int(children[3].text) players.append({ 'url': rec['href'] if rec else None, 'id': user_id, 'username': username, 'color_id': COLOR_MAPPING.get(colors[username]), 'rate_before': rate_before, 'rate_after': rate_after }) return { 'timestamp': dateparser.parse(date_played), 'players': players }
[ "def", "get_match", "(", "session", ",", "match_id", ")", ":", "url", "=", "'{}{}/{}'", ".", "format", "(", "session", ".", "auth", ".", "base_url", ",", "MATCH_URL", ",", "match_id", ")", "parsed", "=", "make_scrape_request", "(", "session", ",", "url", ...
Get match metadata.
[ "Get", "match", "metadata", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L443-L495
train
50,824
happyleavesaoc/python-voobly
voobly/__init__.py
download_rec
def download_rec(session, rec_url, target_path): """Download and extract a recorded game.""" try: resp = session.get(session.auth.base_url + rec_url) except RequestException: raise VooblyError('failed to connect for download') try: downloaded = zipfile.ZipFile(io.BytesIO(resp.content)) downloaded.extractall(target_path) except zipfile.BadZipFile: raise VooblyError('invalid zip file') return downloaded.namelist()[0]
python
def download_rec(session, rec_url, target_path): """Download and extract a recorded game.""" try: resp = session.get(session.auth.base_url + rec_url) except RequestException: raise VooblyError('failed to connect for download') try: downloaded = zipfile.ZipFile(io.BytesIO(resp.content)) downloaded.extractall(target_path) except zipfile.BadZipFile: raise VooblyError('invalid zip file') return downloaded.namelist()[0]
[ "def", "download_rec", "(", "session", ",", "rec_url", ",", "target_path", ")", ":", "try", ":", "resp", "=", "session", ".", "get", "(", "session", ".", "auth", ".", "base_url", "+", "rec_url", ")", "except", "RequestException", ":", "raise", "VooblyError...
Download and extract a recorded game.
[ "Download", "and", "extract", "a", "recorded", "game", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L499-L510
train
50,825
happyleavesaoc/python-voobly
voobly/__init__.py
login
def login(session): """Login to Voobly.""" if not session.auth.username or not session.auth.password: raise VooblyError('must supply username and password') _LOGGER.info("logging in (no valid cookie found)") session.cookies.clear() try: session.get(session.auth.base_url + LOGIN_PAGE) resp = session.post(session.auth.base_url + LOGIN_URL, data={ 'username': session.auth.username, 'password': session.auth.password }) except RequestException: raise VooblyError('failed to connect for login') if resp.status_code != 200: raise VooblyError('failed to login') _save_cookies(session.cookies, session.auth.cookie_path)
python
def login(session): """Login to Voobly.""" if not session.auth.username or not session.auth.password: raise VooblyError('must supply username and password') _LOGGER.info("logging in (no valid cookie found)") session.cookies.clear() try: session.get(session.auth.base_url + LOGIN_PAGE) resp = session.post(session.auth.base_url + LOGIN_URL, data={ 'username': session.auth.username, 'password': session.auth.password }) except RequestException: raise VooblyError('failed to connect for login') if resp.status_code != 200: raise VooblyError('failed to login') _save_cookies(session.cookies, session.auth.cookie_path)
[ "def", "login", "(", "session", ")", ":", "if", "not", "session", ".", "auth", ".", "username", "or", "not", "session", ".", "auth", ".", "password", ":", "raise", "VooblyError", "(", "'must supply username and password'", ")", "_LOGGER", ".", "info", "(", ...
Login to Voobly.
[ "Login", "to", "Voobly", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L513-L529
train
50,826
happyleavesaoc/python-voobly
voobly/__init__.py
get_session
def get_session(key=None, username=None, password=None, cache=True, cache_expiry=datetime.timedelta(days=7), cookie_path=COOKIE_PATH, backend='memory', version=VERSION_GLOBAL): """Get Voobly API session.""" class VooblyAuth(AuthBase): # pylint: disable=too-few-public-methods """Voobly authorization storage.""" def __init__(self, key, username, password, cookie_path, version): """Init.""" self.key = key self.username = username self.password = password self.cookie_path = cookie_path self.base_url = BASE_URLS[version] def __call__(self, r): """Call is no-op.""" return r if version not in BASE_URLS: raise ValueError('unsupported voobly version') session = requests.session() if cache: session = requests_cache.core.CachedSession(expire_after=cache_expiry, backend=backend) session.auth = VooblyAuth(key, username, password, cookie_path, version) if os.path.exists(cookie_path): _LOGGER.info("cookie found at: %s", cookie_path) session.cookies = _load_cookies(cookie_path) return session
python
def get_session(key=None, username=None, password=None, cache=True, cache_expiry=datetime.timedelta(days=7), cookie_path=COOKIE_PATH, backend='memory', version=VERSION_GLOBAL): """Get Voobly API session.""" class VooblyAuth(AuthBase): # pylint: disable=too-few-public-methods """Voobly authorization storage.""" def __init__(self, key, username, password, cookie_path, version): """Init.""" self.key = key self.username = username self.password = password self.cookie_path = cookie_path self.base_url = BASE_URLS[version] def __call__(self, r): """Call is no-op.""" return r if version not in BASE_URLS: raise ValueError('unsupported voobly version') session = requests.session() if cache: session = requests_cache.core.CachedSession(expire_after=cache_expiry, backend=backend) session.auth = VooblyAuth(key, username, password, cookie_path, version) if os.path.exists(cookie_path): _LOGGER.info("cookie found at: %s", cookie_path) session.cookies = _load_cookies(cookie_path) return session
[ "def", "get_session", "(", "key", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "cache", "=", "True", ",", "cache_expiry", "=", "datetime", ".", "timedelta", "(", "days", "=", "7", ")", ",", "cookie_path", "=", "COOKIE_P...
Get Voobly API session.
[ "Get", "Voobly", "API", "session", "." ]
83b4ab7d630a00459c2a64e55e3ac85c7be38194
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L532-L561
train
50,827
openstack/networking-hyperv
networking_hyperv/neutron/agent/hyperv_neutron_agent.py
main
def main(): """The entry point for the Hyper-V Neutron Agent.""" neutron_config.register_agent_state_opts_helper(CONF) common_config.init(sys.argv[1:]) neutron_config.setup_logging() hyperv_agent = HyperVNeutronAgent() # Start everything. LOG.info("Agent initialized successfully, now running... ") hyperv_agent.daemon_loop()
python
def main(): """The entry point for the Hyper-V Neutron Agent.""" neutron_config.register_agent_state_opts_helper(CONF) common_config.init(sys.argv[1:]) neutron_config.setup_logging() hyperv_agent = HyperVNeutronAgent() # Start everything. LOG.info("Agent initialized successfully, now running... ") hyperv_agent.daemon_loop()
[ "def", "main", "(", ")", ":", "neutron_config", ".", "register_agent_state_opts_helper", "(", "CONF", ")", "common_config", ".", "init", "(", "sys", ".", "argv", "[", "1", ":", "]", ")", "neutron_config", ".", "setup_logging", "(", ")", "hyperv_agent", "=", ...
The entry point for the Hyper-V Neutron Agent.
[ "The", "entry", "point", "for", "the", "Hyper", "-", "V", "Neutron", "Agent", "." ]
7a89306ab0586c95b99debb44d898f70834508b9
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hyperv_neutron_agent.py#L291-L301
train
50,828
openstack/networking-hyperv
networking_hyperv/neutron/agent/hyperv_neutron_agent.py
HyperVNeutronAgent._setup_qos_extension
def _setup_qos_extension(self): """Setup the QOS extension if it is required.""" if not CONF.AGENT.enable_qos_extension: return self._qos_ext = qos_extension.QosAgentExtension() self._qos_ext.consume_api(self) self._qos_ext.initialize(self._connection, 'hyperv')
python
def _setup_qos_extension(self): """Setup the QOS extension if it is required.""" if not CONF.AGENT.enable_qos_extension: return self._qos_ext = qos_extension.QosAgentExtension() self._qos_ext.consume_api(self) self._qos_ext.initialize(self._connection, 'hyperv')
[ "def", "_setup_qos_extension", "(", "self", ")", ":", "if", "not", "CONF", ".", "AGENT", ".", "enable_qos_extension", ":", "return", "self", ".", "_qos_ext", "=", "qos_extension", ".", "QosAgentExtension", "(", ")", "self", ".", "_qos_ext", ".", "consume_api",...
Setup the QOS extension if it is required.
[ "Setup", "the", "QOS", "extension", "if", "it", "is", "required", "." ]
7a89306ab0586c95b99debb44d898f70834508b9
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hyperv_neutron_agent.py#L126-L132
train
50,829
Metatab/metapack
metapack/jupyter/script.py
ScriptIPython.run_cell_magic
def run_cell_magic(self, magic_name, line, cell): """Run a limited number of magics from scripts, without IPython""" if magic_name == 'bash': self.shebang("bash", cell) elif magic_name == 'metatab': self.mm.metatab(line, cell)
python
def run_cell_magic(self, magic_name, line, cell): """Run a limited number of magics from scripts, without IPython""" if magic_name == 'bash': self.shebang("bash", cell) elif magic_name == 'metatab': self.mm.metatab(line, cell)
[ "def", "run_cell_magic", "(", "self", ",", "magic_name", ",", "line", ",", "cell", ")", ":", "if", "magic_name", "==", "'bash'", ":", "self", ".", "shebang", "(", "\"bash\"", ",", "cell", ")", "elif", "magic_name", "==", "'metatab'", ":", "self", ".", ...
Run a limited number of magics from scripts, without IPython
[ "Run", "a", "limited", "number", "of", "magics", "from", "scripts", "without", "IPython" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/script.py#L51-L57
train
50,830
Metatab/metapack
metapack/jupyter/script.py
ScriptIPython.var_expand
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()): """Expand python variables in a string. The depth argument indicates how many frames above the caller should be walked to look for the local namespace where to expand variables. The global namespace for expansion is always the user's interactive namespace. """ ns = self.user_ns.copy() try: frame = sys._getframe(depth + 1) except ValueError: # This is thrown if there aren't that many frames on the stack, # e.g. if a script called run_line_magic() directly. pass else: ns.update(frame.f_locals) try: # We have to use .vformat() here, because 'self' is a valid and common # name, and expanding **ns for .format() would make it collide with # the 'self' argument of the method. cmd = formatter.vformat(cmd, args=[], kwargs=ns) except Exception: # if formatter couldn't format, just let it go untransformed pass return cmd
python
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()): """Expand python variables in a string. The depth argument indicates how many frames above the caller should be walked to look for the local namespace where to expand variables. The global namespace for expansion is always the user's interactive namespace. """ ns = self.user_ns.copy() try: frame = sys._getframe(depth + 1) except ValueError: # This is thrown if there aren't that many frames on the stack, # e.g. if a script called run_line_magic() directly. pass else: ns.update(frame.f_locals) try: # We have to use .vformat() here, because 'self' is a valid and common # name, and expanding **ns for .format() would make it collide with # the 'self' argument of the method. cmd = formatter.vformat(cmd, args=[], kwargs=ns) except Exception: # if formatter couldn't format, just let it go untransformed pass return cmd
[ "def", "var_expand", "(", "self", ",", "cmd", ",", "depth", "=", "0", ",", "formatter", "=", "DollarFormatter", "(", ")", ")", ":", "ns", "=", "self", ".", "user_ns", ".", "copy", "(", ")", "try", ":", "frame", "=", "sys", ".", "_getframe", "(", ...
Expand python variables in a string. The depth argument indicates how many frames above the caller should be walked to look for the local namespace where to expand variables. The global namespace for expansion is always the user's interactive namespace.
[ "Expand", "python", "variables", "in", "a", "string", "." ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/script.py#L85-L112
train
50,831
Metatab/metapack
metapack/jupyter/script.py
ScriptIPython.shebang
def shebang(self, line, cell): """Run a cell via a shell command The `%%script` line is like the #! line of script, specifying a program (bash, perl, ruby, etc.) with which to run. The rest of the cell is run by that program. Examples -------- :: In [1]: %%script bash ...: for i in 1 2 3; do ...: echo $i ...: done 1 2 3 """ argv = arg_split(line, posix=not sys.platform.startswith('win')) args, cmd = self.shebang.parser.parse_known_args(argv) try: p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE) except OSError as e: if e.errno == errno.ENOENT: print("Couldn't find program: %r" % cmd[0]) return else: raise if not cell.endswith('\n'): cell += '\n' cell = cell.encode('utf8', 'replace') if args.bg: self.bg_processes.append(p) self._gc_bg_processes() if args.out: self.shell.user_ns[args.out] = p.stdout if args.err: self.shell.user_ns[args.err] = p.stderr self.job_manager.new(self._run_script, p, cell, daemon=True) if args.proc: self.shell.user_ns[args.proc] = p return try: out, err = p.communicate(cell) except KeyboardInterrupt: try: p.send_signal(signal.SIGINT) time.sleep(0.1) if p.poll() is not None: print("Process is interrupted.") return p.terminate() time.sleep(0.1) if p.poll() is not None: print("Process is terminated.") return p.kill() print("Process is killed.") except OSError: pass except Exception as e: print("Error while terminating subprocess (pid=%i): %s" \ % (p.pid, e)) return out = py3compat.bytes_to_str(out) err = py3compat.bytes_to_str(err) if args.out: self.shell.user_ns[args.out] = out else: sys.stdout.write(out) sys.stdout.flush() if args.err: self.shell.user_ns[args.err] = err else: sys.stderr.write(err) sys.stderr.flush()
python
def shebang(self, line, cell): """Run a cell via a shell command The `%%script` line is like the #! line of script, specifying a program (bash, perl, ruby, etc.) with which to run. The rest of the cell is run by that program. Examples -------- :: In [1]: %%script bash ...: for i in 1 2 3; do ...: echo $i ...: done 1 2 3 """ argv = arg_split(line, posix=not sys.platform.startswith('win')) args, cmd = self.shebang.parser.parse_known_args(argv) try: p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE) except OSError as e: if e.errno == errno.ENOENT: print("Couldn't find program: %r" % cmd[0]) return else: raise if not cell.endswith('\n'): cell += '\n' cell = cell.encode('utf8', 'replace') if args.bg: self.bg_processes.append(p) self._gc_bg_processes() if args.out: self.shell.user_ns[args.out] = p.stdout if args.err: self.shell.user_ns[args.err] = p.stderr self.job_manager.new(self._run_script, p, cell, daemon=True) if args.proc: self.shell.user_ns[args.proc] = p return try: out, err = p.communicate(cell) except KeyboardInterrupt: try: p.send_signal(signal.SIGINT) time.sleep(0.1) if p.poll() is not None: print("Process is interrupted.") return p.terminate() time.sleep(0.1) if p.poll() is not None: print("Process is terminated.") return p.kill() print("Process is killed.") except OSError: pass except Exception as e: print("Error while terminating subprocess (pid=%i): %s" \ % (p.pid, e)) return out = py3compat.bytes_to_str(out) err = py3compat.bytes_to_str(err) if args.out: self.shell.user_ns[args.out] = out else: sys.stdout.write(out) sys.stdout.flush() if args.err: self.shell.user_ns[args.err] = err else: sys.stderr.write(err) sys.stderr.flush()
[ "def", "shebang", "(", "self", ",", "line", ",", "cell", ")", ":", "argv", "=", "arg_split", "(", "line", ",", "posix", "=", "not", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ")", "args", ",", "cmd", "=", "self", ".", "shebang", ...
Run a cell via a shell command The `%%script` line is like the #! line of script, specifying a program (bash, perl, ruby, etc.) with which to run. The rest of the cell is run by that program. Examples -------- :: In [1]: %%script bash ...: for i in 1 2 3; do ...: echo $i ...: done 1 2 3
[ "Run", "a", "cell", "via", "a", "shell", "command" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/script.py#L119-L199
train
50,832
project-rig/rig
rig/place_and_route/place/rcm.py
_get_vertices_neighbours
def _get_vertices_neighbours(nets): """Generate a listing of each vertex's immedate neighbours in an undirected interpretation of a graph. Returns ------- {vertex: {vertex: weight, ...}), ...} """ zero_fn = (lambda: 0) vertices_neighbours = defaultdict(lambda: defaultdict(zero_fn)) for net in nets: if net.weight != 0: for sink in net.sinks: vertices_neighbours[net.source][sink] += net.weight vertices_neighbours[sink][net.source] += net.weight return vertices_neighbours
python
def _get_vertices_neighbours(nets): """Generate a listing of each vertex's immedate neighbours in an undirected interpretation of a graph. Returns ------- {vertex: {vertex: weight, ...}), ...} """ zero_fn = (lambda: 0) vertices_neighbours = defaultdict(lambda: defaultdict(zero_fn)) for net in nets: if net.weight != 0: for sink in net.sinks: vertices_neighbours[net.source][sink] += net.weight vertices_neighbours[sink][net.source] += net.weight return vertices_neighbours
[ "def", "_get_vertices_neighbours", "(", "nets", ")", ":", "zero_fn", "=", "(", "lambda", ":", "0", ")", "vertices_neighbours", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "zero_fn", ")", ")", "for", "net", "in", "nets", ":", "if", "net", ...
Generate a listing of each vertex's immedate neighbours in an undirected interpretation of a graph. Returns ------- {vertex: {vertex: weight, ...}), ...}
[ "Generate", "a", "listing", "of", "each", "vertex", "s", "immedate", "neighbours", "in", "an", "undirected", "interpretation", "of", "a", "graph", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L13-L28
train
50,833
project-rig/rig
rig/place_and_route/place/rcm.py
_dfs
def _dfs(vertex, vertices_neighbours): """Generate all the vertices connected to the supplied vertex in depth-first-search order. """ visited = set() to_visit = deque([vertex]) while to_visit: vertex = to_visit.pop() if vertex not in visited: yield vertex visited.add(vertex) to_visit.extend(vertices_neighbours[vertex])
python
def _dfs(vertex, vertices_neighbours): """Generate all the vertices connected to the supplied vertex in depth-first-search order. """ visited = set() to_visit = deque([vertex]) while to_visit: vertex = to_visit.pop() if vertex not in visited: yield vertex visited.add(vertex) to_visit.extend(vertices_neighbours[vertex])
[ "def", "_dfs", "(", "vertex", ",", "vertices_neighbours", ")", ":", "visited", "=", "set", "(", ")", "to_visit", "=", "deque", "(", "[", "vertex", "]", ")", "while", "to_visit", ":", "vertex", "=", "to_visit", ".", "pop", "(", ")", "if", "vertex", "n...
Generate all the vertices connected to the supplied vertex in depth-first-search order.
[ "Generate", "all", "the", "vertices", "connected", "to", "the", "supplied", "vertex", "in", "depth", "-", "first", "-", "search", "order", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L31-L42
train
50,834
project-rig/rig
rig/place_and_route/place/rcm.py
_get_connected_subgraphs
def _get_connected_subgraphs(vertices, vertices_neighbours): """Break a graph containing unconnected subgraphs into a list of connected subgraphs. Returns ------- [set([vertex, ...]), ...] """ remaining_vertices = set(vertices) subgraphs = [] while remaining_vertices: subgraph = set(_dfs(remaining_vertices.pop(), vertices_neighbours)) remaining_vertices.difference_update(subgraph) subgraphs.append(subgraph) return subgraphs
python
def _get_connected_subgraphs(vertices, vertices_neighbours): """Break a graph containing unconnected subgraphs into a list of connected subgraphs. Returns ------- [set([vertex, ...]), ...] """ remaining_vertices = set(vertices) subgraphs = [] while remaining_vertices: subgraph = set(_dfs(remaining_vertices.pop(), vertices_neighbours)) remaining_vertices.difference_update(subgraph) subgraphs.append(subgraph) return subgraphs
[ "def", "_get_connected_subgraphs", "(", "vertices", ",", "vertices_neighbours", ")", ":", "remaining_vertices", "=", "set", "(", "vertices", ")", "subgraphs", "=", "[", "]", "while", "remaining_vertices", ":", "subgraph", "=", "set", "(", "_dfs", "(", "remaining...
Break a graph containing unconnected subgraphs into a list of connected subgraphs. Returns ------- [set([vertex, ...]), ...]
[ "Break", "a", "graph", "containing", "unconnected", "subgraphs", "into", "a", "list", "of", "connected", "subgraphs", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L45-L60
train
50,835
project-rig/rig
rig/place_and_route/place/rcm.py
_cuthill_mckee
def _cuthill_mckee(vertices, vertices_neighbours): """Yield the Cuthill-McKee order for a connected, undirected graph. `Wikipedia <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_ provides a good introduction to the Cuthill-McKee algorithm. The RCM algorithm attempts to order vertices in a graph such that their adjacency matrix's bandwidth is reduced. In brief the RCM algorithm is a breadth-first search with the following tweaks: * The search starts from the vertex with the lowest degree. * Vertices discovered in each layer of the search are sorted by ascending order of their degree in the output. .. warning:: This function must not be called on a disconnected or empty graph. Returns ------- [vertex, ...] """ vertices_degrees = {v: sum(itervalues(vertices_neighbours[v])) for v in vertices} peripheral_vertex = min(vertices, key=(lambda v: vertices_degrees[v])) visited = set([peripheral_vertex]) cm_order = [peripheral_vertex] previous_layer = set([peripheral_vertex]) while len(cm_order) < len(vertices): adjacent = set() for vertex in previous_layer: adjacent.update(vertices_neighbours[vertex]) adjacent.difference_update(visited) visited.update(adjacent) cm_order.extend(sorted(adjacent, key=(lambda v: vertices_degrees[v]))) previous_layer = adjacent return cm_order
python
def _cuthill_mckee(vertices, vertices_neighbours): """Yield the Cuthill-McKee order for a connected, undirected graph. `Wikipedia <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_ provides a good introduction to the Cuthill-McKee algorithm. The RCM algorithm attempts to order vertices in a graph such that their adjacency matrix's bandwidth is reduced. In brief the RCM algorithm is a breadth-first search with the following tweaks: * The search starts from the vertex with the lowest degree. * Vertices discovered in each layer of the search are sorted by ascending order of their degree in the output. .. warning:: This function must not be called on a disconnected or empty graph. Returns ------- [vertex, ...] """ vertices_degrees = {v: sum(itervalues(vertices_neighbours[v])) for v in vertices} peripheral_vertex = min(vertices, key=(lambda v: vertices_degrees[v])) visited = set([peripheral_vertex]) cm_order = [peripheral_vertex] previous_layer = set([peripheral_vertex]) while len(cm_order) < len(vertices): adjacent = set() for vertex in previous_layer: adjacent.update(vertices_neighbours[vertex]) adjacent.difference_update(visited) visited.update(adjacent) cm_order.extend(sorted(adjacent, key=(lambda v: vertices_degrees[v]))) previous_layer = adjacent return cm_order
[ "def", "_cuthill_mckee", "(", "vertices", ",", "vertices_neighbours", ")", ":", "vertices_degrees", "=", "{", "v", ":", "sum", "(", "itervalues", "(", "vertices_neighbours", "[", "v", "]", ")", ")", "for", "v", "in", "vertices", "}", "peripheral_vertex", "="...
Yield the Cuthill-McKee order for a connected, undirected graph. `Wikipedia <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_ provides a good introduction to the Cuthill-McKee algorithm. The RCM algorithm attempts to order vertices in a graph such that their adjacency matrix's bandwidth is reduced. In brief the RCM algorithm is a breadth-first search with the following tweaks: * The search starts from the vertex with the lowest degree. * Vertices discovered in each layer of the search are sorted by ascending order of their degree in the output. .. warning:: This function must not be called on a disconnected or empty graph. Returns ------- [vertex, ...]
[ "Yield", "the", "Cuthill", "-", "McKee", "order", "for", "a", "connected", "undirected", "graph", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L63-L103
train
50,836
project-rig/rig
rig/place_and_route/place/rcm.py
rcm_vertex_order
def rcm_vertex_order(vertices_resources, nets): """A generator which iterates over the vertices in Reverse-Cuthill-McKee order. For use as a vertex ordering for the sequential placer. """ vertices_neighbours = _get_vertices_neighbours(nets) for subgraph_vertices in _get_connected_subgraphs(vertices_resources, vertices_neighbours): cm_order = _cuthill_mckee(subgraph_vertices, vertices_neighbours) for vertex in reversed(cm_order): yield vertex
python
def rcm_vertex_order(vertices_resources, nets): """A generator which iterates over the vertices in Reverse-Cuthill-McKee order. For use as a vertex ordering for the sequential placer. """ vertices_neighbours = _get_vertices_neighbours(nets) for subgraph_vertices in _get_connected_subgraphs(vertices_resources, vertices_neighbours): cm_order = _cuthill_mckee(subgraph_vertices, vertices_neighbours) for vertex in reversed(cm_order): yield vertex
[ "def", "rcm_vertex_order", "(", "vertices_resources", ",", "nets", ")", ":", "vertices_neighbours", "=", "_get_vertices_neighbours", "(", "nets", ")", "for", "subgraph_vertices", "in", "_get_connected_subgraphs", "(", "vertices_resources", ",", "vertices_neighbours", ")",...
A generator which iterates over the vertices in Reverse-Cuthill-McKee order. For use as a vertex ordering for the sequential placer.
[ "A", "generator", "which", "iterates", "over", "the", "vertices", "in", "Reverse", "-", "Cuthill", "-", "McKee", "order", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L106-L117
train
50,837
project-rig/rig
rig/place_and_route/place/rcm.py
rcm_chip_order
def rcm_chip_order(machine): """A generator which iterates over a set of chips in a machine in Reverse-Cuthill-McKee order. For use as a chip ordering for the sequential placer. """ # Convert the Machine description into a placement-problem-style-graph # where the vertices are chip coordinate tuples (x, y) and each net # represents the links leaving each chip. This allows us to re-use the # rcm_vertex_order function above to generate an RCM ordering of chips in # the machine. vertices = list(machine) nets = [] for (x, y) in vertices: neighbours = [] for link in Links: if (x, y, link) in machine: dx, dy = link.to_vector() neighbour = ((x + dx) % machine.width, (y + dy) % machine.height) # In principle if the link to chip is marked as working, that # chip should be working. In practice this might not be the # case (especially for carelessly hand-defined Machine # objects). if neighbour in machine: neighbours.append(neighbour) nets.append(Net((x, y), neighbours)) return rcm_vertex_order(vertices, nets)
python
def rcm_chip_order(machine): """A generator which iterates over a set of chips in a machine in Reverse-Cuthill-McKee order. For use as a chip ordering for the sequential placer. """ # Convert the Machine description into a placement-problem-style-graph # where the vertices are chip coordinate tuples (x, y) and each net # represents the links leaving each chip. This allows us to re-use the # rcm_vertex_order function above to generate an RCM ordering of chips in # the machine. vertices = list(machine) nets = [] for (x, y) in vertices: neighbours = [] for link in Links: if (x, y, link) in machine: dx, dy = link.to_vector() neighbour = ((x + dx) % machine.width, (y + dy) % machine.height) # In principle if the link to chip is marked as working, that # chip should be working. In practice this might not be the # case (especially for carelessly hand-defined Machine # objects). if neighbour in machine: neighbours.append(neighbour) nets.append(Net((x, y), neighbours)) return rcm_vertex_order(vertices, nets)
[ "def", "rcm_chip_order", "(", "machine", ")", ":", "# Convert the Machine description into a placement-problem-style-graph", "# where the vertices are chip coordinate tuples (x, y) and each net", "# represents the links leaving each chip. This allows us to re-use the", "# rcm_vertex_order function...
A generator which iterates over a set of chips in a machine in Reverse-Cuthill-McKee order. For use as a chip ordering for the sequential placer.
[ "A", "generator", "which", "iterates", "over", "a", "set", "of", "chips", "in", "a", "machine", "in", "Reverse", "-", "Cuthill", "-", "McKee", "order", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L120-L149
train
50,838
NicolasLM/spinach
spinach/contrib/datadog.py
register_datadog
def register_datadog(tracer=None, namespace: Optional[str]=None, service: str='spinach'): """Register the Datadog integration. Exceptions making jobs fail are sent to Sentry. :param tracer: optionally use a custom ddtrace Tracer instead of the global one. :param namespace: optionally only register the Datadog integration for a particular Spinach :class:`Engine` :param service: Datadog service associated with the trace, defaults to `spinach` """ if tracer is None: from ddtrace import tracer @signals.job_started.connect_via(namespace) def job_started(namespace, job, **kwargs): tracer.trace( 'spinach.task', service=service, span_type='worker', resource=job.task_name ) @signals.job_finished.connect_via(namespace) def job_finished(namespace, job, **kwargs): root_span = tracer.current_root_span() for attr in job.__slots__: root_span.set_tag(attr, getattr(job, attr)) root_span.finish() @signals.job_failed.connect_via(namespace) def job_failed(namespace, job, **kwargs): root_span = tracer.current_root_span() root_span.set_traceback() @signals.job_schedule_retry.connect_via(namespace) def job_schedule_retry(namespace, job, **kwargs): root_span = tracer.current_root_span() root_span.set_traceback()
python
def register_datadog(tracer=None, namespace: Optional[str]=None, service: str='spinach'): """Register the Datadog integration. Exceptions making jobs fail are sent to Sentry. :param tracer: optionally use a custom ddtrace Tracer instead of the global one. :param namespace: optionally only register the Datadog integration for a particular Spinach :class:`Engine` :param service: Datadog service associated with the trace, defaults to `spinach` """ if tracer is None: from ddtrace import tracer @signals.job_started.connect_via(namespace) def job_started(namespace, job, **kwargs): tracer.trace( 'spinach.task', service=service, span_type='worker', resource=job.task_name ) @signals.job_finished.connect_via(namespace) def job_finished(namespace, job, **kwargs): root_span = tracer.current_root_span() for attr in job.__slots__: root_span.set_tag(attr, getattr(job, attr)) root_span.finish() @signals.job_failed.connect_via(namespace) def job_failed(namespace, job, **kwargs): root_span = tracer.current_root_span() root_span.set_traceback() @signals.job_schedule_retry.connect_via(namespace) def job_schedule_retry(namespace, job, **kwargs): root_span = tracer.current_root_span() root_span.set_traceback()
[ "def", "register_datadog", "(", "tracer", "=", "None", ",", "namespace", ":", "Optional", "[", "str", "]", "=", "None", ",", "service", ":", "str", "=", "'spinach'", ")", ":", "if", "tracer", "is", "None", ":", "from", "ddtrace", "import", "tracer", "@...
Register the Datadog integration. Exceptions making jobs fail are sent to Sentry. :param tracer: optionally use a custom ddtrace Tracer instead of the global one. :param namespace: optionally only register the Datadog integration for a particular Spinach :class:`Engine` :param service: Datadog service associated with the trace, defaults to `spinach`
[ "Register", "the", "Datadog", "integration", "." ]
0122f916643101eab5cdc1f3da662b9446e372aa
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/contrib/datadog.py#L6-L44
train
50,839
Metatab/metapack
metapack/rowgenerator.py
copy_reference
def copy_reference(resource, doc, env, *args, **kwargs): """A row-generating function that yields from a reference. This permits an upstream package to be copied and modified by this package, while being formally referenced as a dependency The function will generate rows from a reference that has the same name as the resource term """ yield from doc.reference(resource.name)
python
def copy_reference(resource, doc, env, *args, **kwargs): """A row-generating function that yields from a reference. This permits an upstream package to be copied and modified by this package, while being formally referenced as a dependency The function will generate rows from a reference that has the same name as the resource term """ yield from doc.reference(resource.name)
[ "def", "copy_reference", "(", "resource", ",", "doc", ",", "env", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "yield", "from", "doc", ".", "reference", "(", "resource", ".", "name", ")" ]
A row-generating function that yields from a reference. This permits an upstream package to be copied and modified by this package, while being formally referenced as a dependency The function will generate rows from a reference that has the same name as the resource term
[ "A", "row", "-", "generating", "function", "that", "yields", "from", "a", "reference", ".", "This", "permits", "an", "upstream", "package", "to", "be", "copied", "and", "modified", "by", "this", "package", "while", "being", "formally", "referenced", "as", "a...
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/rowgenerator.py#L164-L171
train
50,840
Metatab/metapack
metapack/rowgenerator.py
copy_reference_group
def copy_reference_group(resource, doc, env, *args, **kwargs): """ A Row generating function that copies all of the references that have the same 'Group' argument as this reference The 'RefArgs' argument is a comma seperated list of arguments from the references that will be prepended to each row. :param resource: :param doc: :param env: :param args: :param kwargs: :return: """ all_headers = [] # Combine all of the headers into a list of tuples by position for ref in doc.references(): if ref.get_value('Group') == resource.get_value('Group'): for row in ref.iterrowproxy(): all_headers.append(list(row.keys())) break # For each position, add the headers that are not already in the header set. # this merges the headers from all datasets, maintaining the order. mostly. headers = [] for e in zip(*all_headers): for c in set(e): if c not in headers: headers.append(c) if resource.get_value('RefArgs'): ref_args = [e.strip() for e in resource.get_value('RefArgs').strip().split(',')] else: ref_args = [] yield ref_args+headers for ref in doc.references(): if ref.get_value('Group') == resource.get_value('Group'): ref_args_values = [ ref.get_value(e) for e in ref_args] for row in ref.iterdict: yield ref_args_values + [ row.get(c) for c in headers]
python
def copy_reference_group(resource, doc, env, *args, **kwargs): """ A Row generating function that copies all of the references that have the same 'Group' argument as this reference The 'RefArgs' argument is a comma seperated list of arguments from the references that will be prepended to each row. :param resource: :param doc: :param env: :param args: :param kwargs: :return: """ all_headers = [] # Combine all of the headers into a list of tuples by position for ref in doc.references(): if ref.get_value('Group') == resource.get_value('Group'): for row in ref.iterrowproxy(): all_headers.append(list(row.keys())) break # For each position, add the headers that are not already in the header set. # this merges the headers from all datasets, maintaining the order. mostly. headers = [] for e in zip(*all_headers): for c in set(e): if c not in headers: headers.append(c) if resource.get_value('RefArgs'): ref_args = [e.strip() for e in resource.get_value('RefArgs').strip().split(',')] else: ref_args = [] yield ref_args+headers for ref in doc.references(): if ref.get_value('Group') == resource.get_value('Group'): ref_args_values = [ ref.get_value(e) for e in ref_args] for row in ref.iterdict: yield ref_args_values + [ row.get(c) for c in headers]
[ "def", "copy_reference_group", "(", "resource", ",", "doc", ",", "env", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "all_headers", "=", "[", "]", "# Combine all of the headers into a list of tuples by position", "for", "ref", "in", "doc", ".", "referenc...
A Row generating function that copies all of the references that have the same 'Group' argument as this reference The 'RefArgs' argument is a comma seperated list of arguments from the references that will be prepended to each row. :param resource: :param doc: :param env: :param args: :param kwargs: :return:
[ "A", "Row", "generating", "function", "that", "copies", "all", "of", "the", "references", "that", "have", "the", "same", "Group", "argument", "as", "this", "reference" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/rowgenerator.py#L173-L218
train
50,841
Metatab/metapack
metapack/package/filesystem.py
FileSystemPackageBuilder.is_older_than_metadata
def is_older_than_metadata(self): """ Return True if the package save file is older than the metadata. If it is, it should be rebuilt. Returns False if the time of either can't be determined :param path: Optional extra save path, used in save_path() """ try: path = self.doc_file.path except AttributeError: path = self.doc_file source_ref = self._doc.ref.path try: age_diff = getmtime(source_ref) - getmtime(path) return age_diff > 0 except (FileNotFoundError, OSError): return False
python
def is_older_than_metadata(self): """ Return True if the package save file is older than the metadata. If it is, it should be rebuilt. Returns False if the time of either can't be determined :param path: Optional extra save path, used in save_path() """ try: path = self.doc_file.path except AttributeError: path = self.doc_file source_ref = self._doc.ref.path try: age_diff = getmtime(source_ref) - getmtime(path) return age_diff > 0 except (FileNotFoundError, OSError): return False
[ "def", "is_older_than_metadata", "(", "self", ")", ":", "try", ":", "path", "=", "self", ".", "doc_file", ".", "path", "except", "AttributeError", ":", "path", "=", "self", ".", "doc_file", "source_ref", "=", "self", ".", "_doc", ".", "ref", ".", "path",...
Return True if the package save file is older than the metadata. If it is, it should be rebuilt. Returns False if the time of either can't be determined :param path: Optional extra save path, used in save_path()
[ "Return", "True", "if", "the", "package", "save", "file", "is", "older", "than", "the", "metadata", ".", "If", "it", "is", "it", "should", "be", "rebuilt", ".", "Returns", "False", "if", "the", "time", "of", "either", "can", "t", "be", "determined" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/filesystem.py#L82-L104
train
50,842
Metatab/metapack
metapack/package/filesystem.py
FileSystemPackageBuilder._load_resource
def _load_resource(self, source_r, abs_path=False): """The CSV package has no resources, so we just need to resolve the URLs to them. Usually, the CSV package is built from a file system ackage on a publically acessible server. """ from itertools import islice from metapack.exc import MetapackError from os.path import splitext # Refetch the resource ... IIRC b/c the source_r resource may actually be from # a different package. So r is the resource we want to possibly modify in this package, # while source_r is from a different souce package, whose data is being loaded into this # one. r = self.datafile(source_r.name) if self.reuse_resources: self.prt("Re-using data for '{}' ".format(r.name)) else: self.prt("Loading data for '{}' ".format(r.name)) if not r.name: raise MetapackError(f"Resource/reference term has no name: {str(r)}") # Special handing for SQL should not be done here; it should be done in Rowgenerators, probably. if r.term_is('root.sql'): new_r = self.doc['Resources'].new_term('Root.Datafile', '') new_r.name = r.name self.doc.remove_term(r) r = new_r r.url = 'data/' + r.name + '.csv' # Re-writing the URL for the resource. path = join(self.package_path.path, r.url) makedirs(dirname(path), exist_ok=True) if not self.reuse_resources or not exists(path): if self.reuse_resources: self.prt("Resource {} doesn't exist, rebuilding".format(path)) if exists(path): remove(path) gen = islice(source_r, 1, None) headers = source_r.headers self.write_csv(path, headers, gen) for k, v in source_r.post_iter_meta.items(): r[k] = v try: if source_r.errors: for col_name, errors in source_r.errors.items(): self.warn("ERRORS for column '{}' ".format(col_name)) for e in islice(errors,5): self.warn(' {}'.format(e)) if len(errors) > 5: self.warn("... and {} more ".format(len(errors)-5)) except AttributeError: pass # Maybe generator does not track errors if source_r.errors: self.err("Resource processing generated conversion errors") # Writing between resources so row-generating programs and notebooks can # access previously created resources. We have to clean the doc before writing it ref = self._write_doc() # What a wreck ... we also have to get rid of the 'Transform' values, since the CSV files # that are written don't need them, and a lot of intermediate processsing ( specifically, # jupyter Notebooks, ) does not load them. p = FileSystemPackageBuilder(ref, self.package_root) p._clean_doc() ref = p._write_doc()
python
def _load_resource(self, source_r, abs_path=False): """The CSV package has no resources, so we just need to resolve the URLs to them. Usually, the CSV package is built from a file system ackage on a publically acessible server. """ from itertools import islice from metapack.exc import MetapackError from os.path import splitext # Refetch the resource ... IIRC b/c the source_r resource may actually be from # a different package. So r is the resource we want to possibly modify in this package, # while source_r is from a different souce package, whose data is being loaded into this # one. r = self.datafile(source_r.name) if self.reuse_resources: self.prt("Re-using data for '{}' ".format(r.name)) else: self.prt("Loading data for '{}' ".format(r.name)) if not r.name: raise MetapackError(f"Resource/reference term has no name: {str(r)}") # Special handing for SQL should not be done here; it should be done in Rowgenerators, probably. if r.term_is('root.sql'): new_r = self.doc['Resources'].new_term('Root.Datafile', '') new_r.name = r.name self.doc.remove_term(r) r = new_r r.url = 'data/' + r.name + '.csv' # Re-writing the URL for the resource. path = join(self.package_path.path, r.url) makedirs(dirname(path), exist_ok=True) if not self.reuse_resources or not exists(path): if self.reuse_resources: self.prt("Resource {} doesn't exist, rebuilding".format(path)) if exists(path): remove(path) gen = islice(source_r, 1, None) headers = source_r.headers self.write_csv(path, headers, gen) for k, v in source_r.post_iter_meta.items(): r[k] = v try: if source_r.errors: for col_name, errors in source_r.errors.items(): self.warn("ERRORS for column '{}' ".format(col_name)) for e in islice(errors,5): self.warn(' {}'.format(e)) if len(errors) > 5: self.warn("... and {} more ".format(len(errors)-5)) except AttributeError: pass # Maybe generator does not track errors if source_r.errors: self.err("Resource processing generated conversion errors") # Writing between resources so row-generating programs and notebooks can # access previously created resources. We have to clean the doc before writing it ref = self._write_doc() # What a wreck ... we also have to get rid of the 'Transform' values, since the CSV files # that are written don't need them, and a lot of intermediate processsing ( specifically, # jupyter Notebooks, ) does not load them. p = FileSystemPackageBuilder(ref, self.package_root) p._clean_doc() ref = p._write_doc()
[ "def", "_load_resource", "(", "self", ",", "source_r", ",", "abs_path", "=", "False", ")", ":", "from", "itertools", "import", "islice", "from", "metapack", ".", "exc", "import", "MetapackError", "from", "os", ".", "path", "import", "splitext", "# Refetch the ...
The CSV package has no resources, so we just need to resolve the URLs to them. Usually, the CSV package is built from a file system ackage on a publically acessible server.
[ "The", "CSV", "package", "has", "no", "resources", "so", "we", "just", "need", "to", "resolve", "the", "URLs", "to", "them", ".", "Usually", "the", "CSV", "package", "is", "built", "from", "a", "file", "system", "ackage", "on", "a", "publically", "acessi...
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/filesystem.py#L150-L228
train
50,843
Metatab/metapack
metapack/package/filesystem.py
FileSystemPackageBuilder._load_documentation
def _load_documentation(self, term, contents, file_name): """Load a single documentation entry""" try: title = term['title'].value except KeyError: self.warn("Documentation has no title, skipping: '{}' ".format(term.value)) return if term.term_is('Root.Readme'): # This term type has inline content, not a url package_sub_dir = 'docs' else: try: eu = term.expanded_url parsed_url = term.parsed_url except AttributeError: parsed_url = eu = parse_app_url(term.value) # Can't used expanded_url here because expansion makes file system URLS absolute. if eu.proto == 'file' and not parsed_url.path_is_absolute: package_sub_dir = parsed_url.fspath.parent else: package_sub_dir = 'docs' path = join(self.package_path.path, package_sub_dir, file_name) self.prt("Loading documentation for '{}', '{}' to '{}' ".format(title, file_name, path)) makedirs(dirname(path), exist_ok=True) if exists(path): remove(path) with open(path, 'wb') as f: f.write(contents)
python
def _load_documentation(self, term, contents, file_name): """Load a single documentation entry""" try: title = term['title'].value except KeyError: self.warn("Documentation has no title, skipping: '{}' ".format(term.value)) return if term.term_is('Root.Readme'): # This term type has inline content, not a url package_sub_dir = 'docs' else: try: eu = term.expanded_url parsed_url = term.parsed_url except AttributeError: parsed_url = eu = parse_app_url(term.value) # Can't used expanded_url here because expansion makes file system URLS absolute. if eu.proto == 'file' and not parsed_url.path_is_absolute: package_sub_dir = parsed_url.fspath.parent else: package_sub_dir = 'docs' path = join(self.package_path.path, package_sub_dir, file_name) self.prt("Loading documentation for '{}', '{}' to '{}' ".format(title, file_name, path)) makedirs(dirname(path), exist_ok=True) if exists(path): remove(path) with open(path, 'wb') as f: f.write(contents)
[ "def", "_load_documentation", "(", "self", ",", "term", ",", "contents", ",", "file_name", ")", ":", "try", ":", "title", "=", "term", "[", "'title'", "]", ".", "value", "except", "KeyError", ":", "self", ".", "warn", "(", "\"Documentation has no title, skip...
Load a single documentation entry
[ "Load", "a", "single", "documentation", "entry" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/filesystem.py#L272-L307
train
50,844
narwhaljames/restapi-logging-handler
restapi_logging_handler/restapi_logging_handler.py
RestApiHandler._getPayload
def _getPayload(self, record): """ The data that will be sent to the RESTful API """ try: # top level payload items d = record.__dict__ pid = d.pop('process', 'nopid') tid = d.pop('thread', 'notid') payload = { k: v for (k, v) in d.items() if k in TOP_KEYS } # logging meta attributes payload['meta'] = { k: v for (k, v) in d.items() if k in META_KEYS } # everything else goes in details payload['details'] = { k: simple_json(v) for (k, v) in d.items() if k not in self.detail_ignore_set } payload['log'] = payload.pop('name', 'n/a') payload['level'] = payload.pop('levelname', 'n/a') payload['meta']['line'] = payload['meta'].pop('lineno', 'n/a') payload['message'] = record.getMessage() tb = self._getTraceback(record) if tb: payload['traceback'] = tb except Exception as e: payload = { 'level': 'ERROR', 'message': 'could not format', 'exception': repr(e), } payload['pid'] = 'p-{}'.format(pid) payload['tid'] = 't-{}'.format(tid) return payload
python
def _getPayload(self, record): """ The data that will be sent to the RESTful API """ try: # top level payload items d = record.__dict__ pid = d.pop('process', 'nopid') tid = d.pop('thread', 'notid') payload = { k: v for (k, v) in d.items() if k in TOP_KEYS } # logging meta attributes payload['meta'] = { k: v for (k, v) in d.items() if k in META_KEYS } # everything else goes in details payload['details'] = { k: simple_json(v) for (k, v) in d.items() if k not in self.detail_ignore_set } payload['log'] = payload.pop('name', 'n/a') payload['level'] = payload.pop('levelname', 'n/a') payload['meta']['line'] = payload['meta'].pop('lineno', 'n/a') payload['message'] = record.getMessage() tb = self._getTraceback(record) if tb: payload['traceback'] = tb except Exception as e: payload = { 'level': 'ERROR', 'message': 'could not format', 'exception': repr(e), } payload['pid'] = 'p-{}'.format(pid) payload['tid'] = 't-{}'.format(tid) return payload
[ "def", "_getPayload", "(", "self", ",", "record", ")", ":", "try", ":", "# top level payload items", "d", "=", "record", ".", "__dict__", "pid", "=", "d", ".", "pop", "(", "'process'", ",", "'nopid'", ")", "tid", "=", "d", ".", "pop", "(", "'thread'", ...
The data that will be sent to the RESTful API
[ "The", "data", "that", "will", "be", "sent", "to", "the", "RESTful", "API" ]
edaedd3e702e68cfd102bc9fbfa4a33e0c002913
https://github.com/narwhaljames/restapi-logging-handler/blob/edaedd3e702e68cfd102bc9fbfa4a33e0c002913/restapi_logging_handler/restapi_logging_handler.py#L166-L211
train
50,845
narwhaljames/restapi-logging-handler
restapi_logging_handler/loggly_handler.py
LogglyHandler._getEndpoint
def _getEndpoint(self, add_tags=None): """ Override Build Loggly's RESTful API endpoint """ return 'https://logs-01.loggly.com/bulk/{0}/tag/{1}/'.format( self.custom_token, self._implodeTags(add_tags=add_tags) )
python
def _getEndpoint(self, add_tags=None): """ Override Build Loggly's RESTful API endpoint """ return 'https://logs-01.loggly.com/bulk/{0}/tag/{1}/'.format( self.custom_token, self._implodeTags(add_tags=add_tags) )
[ "def", "_getEndpoint", "(", "self", ",", "add_tags", "=", "None", ")", ":", "return", "'https://logs-01.loggly.com/bulk/{0}/tag/{1}/'", ".", "format", "(", "self", ".", "custom_token", ",", "self", ".", "_implodeTags", "(", "add_tags", "=", "add_tags", ")", ")" ...
Override Build Loggly's RESTful API endpoint
[ "Override", "Build", "Loggly", "s", "RESTful", "API", "endpoint" ]
edaedd3e702e68cfd102bc9fbfa4a33e0c002913
https://github.com/narwhaljames/restapi-logging-handler/blob/edaedd3e702e68cfd102bc9fbfa4a33e0c002913/restapi_logging_handler/loggly_handler.py#L109-L117
train
50,846
narwhaljames/restapi-logging-handler
restapi_logging_handler/loggly_handler.py
LogglyHandler._getPayload
def _getPayload(self, record): """ The data that will be sent to loggly. """ payload = super(LogglyHandler, self)._getPayload(record) payload['tags'] = self._implodeTags() return payload
python
def _getPayload(self, record): """ The data that will be sent to loggly. """ payload = super(LogglyHandler, self)._getPayload(record) payload['tags'] = self._implodeTags() return payload
[ "def", "_getPayload", "(", "self", ",", "record", ")", ":", "payload", "=", "super", "(", "LogglyHandler", ",", "self", ")", ".", "_getPayload", "(", "record", ")", "payload", "[", "'tags'", "]", "=", "self", ".", "_implodeTags", "(", ")", "return", "p...
The data that will be sent to loggly.
[ "The", "data", "that", "will", "be", "sent", "to", "loggly", "." ]
edaedd3e702e68cfd102bc9fbfa4a33e0c002913
https://github.com/narwhaljames/restapi-logging-handler/blob/edaedd3e702e68cfd102bc9fbfa4a33e0c002913/restapi_logging_handler/loggly_handler.py#L128-L135
train
50,847
Metatab/metapack
metapack/doc.py
MetapackDoc.nonver_name
def nonver_name(self): """Return the non versioned name""" nv = self.as_version(None) if not nv: import re nv = re.sub(r'-[^-]+$', '', self.name) return nv
python
def nonver_name(self): """Return the non versioned name""" nv = self.as_version(None) if not nv: import re nv = re.sub(r'-[^-]+$', '', self.name) return nv
[ "def", "nonver_name", "(", "self", ")", ":", "nv", "=", "self", ".", "as_version", "(", "None", ")", "if", "not", "nv", ":", "import", "re", "nv", "=", "re", ".", "sub", "(", "r'-[^-]+$'", ",", "''", ",", "self", ".", "name", ")", "return", "nv" ...
Return the non versioned name
[ "Return", "the", "non", "versioned", "name" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L101-L108
train
50,848
Metatab/metapack
metapack/doc.py
MetapackDoc.set_wrappable_term
def set_wrappable_term(self, v, term): """Set the Root.Description, possibly splitting long descriptions across multiple terms. """ import textwrap for t in self['Root'].find(term): self.remove_term(t) for l in textwrap.wrap(v, 80): self['Root'].new_term(term, l)
python
def set_wrappable_term(self, v, term): """Set the Root.Description, possibly splitting long descriptions across multiple terms. """ import textwrap for t in self['Root'].find(term): self.remove_term(t) for l in textwrap.wrap(v, 80): self['Root'].new_term(term, l)
[ "def", "set_wrappable_term", "(", "self", ",", "v", ",", "term", ")", ":", "import", "textwrap", "for", "t", "in", "self", "[", "'Root'", "]", ".", "find", "(", "term", ")", ":", "self", ".", "remove_term", "(", "t", ")", "for", "l", "in", "textwra...
Set the Root.Description, possibly splitting long descriptions across multiple terms.
[ "Set", "the", "Root", ".", "Description", "possibly", "splitting", "long", "descriptions", "across", "multiple", "terms", "." ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L130-L139
train
50,849
Metatab/metapack
metapack/doc.py
MetapackDoc.get_lib_module_dict
def get_lib_module_dict(self): """Load the 'lib' directory as a python module, so it can be used to provide functions for rowpipe transforms. This only works filesystem packages""" from importlib import import_module if not self.ref: return {} u = parse_app_url(self.ref) if u.scheme == 'file': if not self.set_sys_path(): return {} for module_name in self.lib_dir_names: try: m = import_module(module_name) return {k: v for k, v in m.__dict__.items() if not k.startswith('__')} except ModuleNotFoundError as e: # We need to know if it is the datapackage's module that is missing # or if it is a module that it imported if not module_name in str(e): raise # If not our module, it's a real error. continue else: return {}
python
def get_lib_module_dict(self): """Load the 'lib' directory as a python module, so it can be used to provide functions for rowpipe transforms. This only works filesystem packages""" from importlib import import_module if not self.ref: return {} u = parse_app_url(self.ref) if u.scheme == 'file': if not self.set_sys_path(): return {} for module_name in self.lib_dir_names: try: m = import_module(module_name) return {k: v for k, v in m.__dict__.items() if not k.startswith('__')} except ModuleNotFoundError as e: # We need to know if it is the datapackage's module that is missing # or if it is a module that it imported if not module_name in str(e): raise # If not our module, it's a real error. continue else: return {}
[ "def", "get_lib_module_dict", "(", "self", ")", ":", "from", "importlib", "import", "import_module", "if", "not", "self", ".", "ref", ":", "return", "{", "}", "u", "=", "parse_app_url", "(", "self", ".", "ref", ")", "if", "u", ".", "scheme", "==", "'fi...
Load the 'lib' directory as a python module, so it can be used to provide functions for rowpipe transforms. This only works filesystem packages
[ "Load", "the", "lib", "directory", "as", "a", "python", "module", "so", "it", "can", "be", "used", "to", "provide", "functions", "for", "rowpipe", "transforms", ".", "This", "only", "works", "filesystem", "packages" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L187-L219
train
50,850
Metatab/metapack
metapack/doc.py
MetapackDoc._repr_html_
def _repr_html_(self, **kwargs): """Produce HTML for Jupyter Notebook""" from jinja2 import Template from markdown import markdown as convert_markdown extensions = [ 'markdown.extensions.extra', 'markdown.extensions.admonition' ] return convert_markdown(self.markdown, extensions)
python
def _repr_html_(self, **kwargs): """Produce HTML for Jupyter Notebook""" from jinja2 import Template from markdown import markdown as convert_markdown extensions = [ 'markdown.extensions.extra', 'markdown.extensions.admonition' ] return convert_markdown(self.markdown, extensions)
[ "def", "_repr_html_", "(", "self", ",", "*", "*", "kwargs", ")", ":", "from", "jinja2", "import", "Template", "from", "markdown", "import", "markdown", "as", "convert_markdown", "extensions", "=", "[", "'markdown.extensions.extra'", ",", "'markdown.extensions.admoni...
Produce HTML for Jupyter Notebook
[ "Produce", "HTML", "for", "Jupyter", "Notebook" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L233-L243
train
50,851
Metatab/metapack
metapack/doc.py
MetapackDoc.write_csv
def write_csv(self, path=None): """Write CSV file. Sorts the sections before calling the superclass write_csv""" # Sort the Sections self.sort_sections(['Root', 'Contacts', 'Documentation', 'References', 'Resources', 'Citations', 'Schema']) # Sort Terms in the root section # Re-wrap the description and abstract if self.description: self.description = self.description if self.abstract: self.description = self.abstract t = self['Root'].get_or_new_term('Root.Modified') t.value = datetime_now() self.sort_by_term() return super().write_csv(str(path))
python
def write_csv(self, path=None): """Write CSV file. Sorts the sections before calling the superclass write_csv""" # Sort the Sections self.sort_sections(['Root', 'Contacts', 'Documentation', 'References', 'Resources', 'Citations', 'Schema']) # Sort Terms in the root section # Re-wrap the description and abstract if self.description: self.description = self.description if self.abstract: self.description = self.abstract t = self['Root'].get_or_new_term('Root.Modified') t.value = datetime_now() self.sort_by_term() return super().write_csv(str(path))
[ "def", "write_csv", "(", "self", ",", "path", "=", "None", ")", ":", "# Sort the Sections", "self", ".", "sort_sections", "(", "[", "'Root'", ",", "'Contacts'", ",", "'Documentation'", ",", "'References'", ",", "'Resources'", ",", "'Citations'", ",", "'Schema'...
Write CSV file. Sorts the sections before calling the superclass write_csv
[ "Write", "CSV", "file", ".", "Sorts", "the", "sections", "before", "calling", "the", "superclass", "write_csv" ]
8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L283-L304
train
50,852
project-rig/rig
rig/wizard.py
dimensions_wizard
def dimensions_wizard(): """A wizard which attempts to determine the dimensions of a SpiNNaker system. .. warning:: Since SC&MP v2.0.0 it is not necessary to know the dimensions of a SpiNNaker machine in order to boot it. As a result, most applications will no longer require this wizard step. Returns ``{"dimensions": (x, y)}`` via the :py:exc:`~rig.wizard.Success` exception. """ option = yield MultipleChoice( "What type of SpiNNaker system to you have?", ["A single four-chip 'SpiNN-3' board", "A single forty-eight-chip 'SpiNN-5' board", "Multiple forty-eight-chip 'SpiNN-5' boards", "Other"], None) assert 0 <= option < 4 if option == 0: raise Success({"dimensions": (2, 2)}) elif option == 1: raise Success({"dimensions": (8, 8)}) elif option == 2: # Infer the system's dimensions from the number of boards supplied num_boards = yield Text("How many 'SpiNN-5' boards are in the system?") try: w, h = standard_system_dimensions(int(num_boards)) except ValueError: # May fail due to integer conversion or the function rejecting the # number of boards. raise Failure( "'{}' is not a valid number of boards.".format(num_boards)) raise Success({"dimensions": (w, h)}) else: dimensions = yield Text( "What are the dimensions of the network in chips (e.g. 24x12)?") match = re.match(r"\s*(\d+)\s*[xX]\s*(\d+)\s*", dimensions) if not match: raise Failure("'{}' is not a valid system size.".format( dimensions)) else: w = int(match.group(1)) h = int(match.group(2)) raise Success({"dimensions": (w, h)})
python
def dimensions_wizard(): """A wizard which attempts to determine the dimensions of a SpiNNaker system. .. warning:: Since SC&MP v2.0.0 it is not necessary to know the dimensions of a SpiNNaker machine in order to boot it. As a result, most applications will no longer require this wizard step. Returns ``{"dimensions": (x, y)}`` via the :py:exc:`~rig.wizard.Success` exception. """ option = yield MultipleChoice( "What type of SpiNNaker system to you have?", ["A single four-chip 'SpiNN-3' board", "A single forty-eight-chip 'SpiNN-5' board", "Multiple forty-eight-chip 'SpiNN-5' boards", "Other"], None) assert 0 <= option < 4 if option == 0: raise Success({"dimensions": (2, 2)}) elif option == 1: raise Success({"dimensions": (8, 8)}) elif option == 2: # Infer the system's dimensions from the number of boards supplied num_boards = yield Text("How many 'SpiNN-5' boards are in the system?") try: w, h = standard_system_dimensions(int(num_boards)) except ValueError: # May fail due to integer conversion or the function rejecting the # number of boards. raise Failure( "'{}' is not a valid number of boards.".format(num_boards)) raise Success({"dimensions": (w, h)}) else: dimensions = yield Text( "What are the dimensions of the network in chips (e.g. 24x12)?") match = re.match(r"\s*(\d+)\s*[xX]\s*(\d+)\s*", dimensions) if not match: raise Failure("'{}' is not a valid system size.".format( dimensions)) else: w = int(match.group(1)) h = int(match.group(2)) raise Success({"dimensions": (w, h)})
[ "def", "dimensions_wizard", "(", ")", ":", "option", "=", "yield", "MultipleChoice", "(", "\"What type of SpiNNaker system to you have?\"", ",", "[", "\"A single four-chip 'SpiNN-3' board\"", ",", "\"A single forty-eight-chip 'SpiNN-5' board\"", ",", "\"Multiple forty-eight-chip 'S...
A wizard which attempts to determine the dimensions of a SpiNNaker system. .. warning:: Since SC&MP v2.0.0 it is not necessary to know the dimensions of a SpiNNaker machine in order to boot it. As a result, most applications will no longer require this wizard step. Returns ``{"dimensions": (x, y)}`` via the :py:exc:`~rig.wizard.Success` exception.
[ "A", "wizard", "which", "attempts", "to", "determine", "the", "dimensions", "of", "a", "SpiNNaker", "system", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/wizard.py#L90-L137
train
50,853
project-rig/rig
rig/wizard.py
ip_address_wizard
def ip_address_wizard(): """A wizard which attempts to determine the IP of a SpiNNaker system. Returns ``{"ip_address": "..."}`` via the :py:exc:`~rig.wizard.Success` exception. """ option = yield MultipleChoice( "Would you like to auto-detect the SpiNNaker system's IP address?", ["Auto-detect", "Manually Enter IP address or hostname"], 0) assert 0 <= option < 2 if option == 0: yield Prompt( "Make sure the SpiNNaker system is switched on and is not booted.") yield Info("Discovering attached SpiNNaker systems...") ip_address = listen() if ip_address is None: raise Failure( "Did not discover a locally connected SpiNNaker system.") elif option == 1: # pragma: no branch ip_address = yield Text( "What is the IP address or hostname of the SpiNNaker system?") if ip_address == "": raise Failure("No IP address or hostname entered") raise Success({"ip_address": ip_address})
python
def ip_address_wizard(): """A wizard which attempts to determine the IP of a SpiNNaker system. Returns ``{"ip_address": "..."}`` via the :py:exc:`~rig.wizard.Success` exception. """ option = yield MultipleChoice( "Would you like to auto-detect the SpiNNaker system's IP address?", ["Auto-detect", "Manually Enter IP address or hostname"], 0) assert 0 <= option < 2 if option == 0: yield Prompt( "Make sure the SpiNNaker system is switched on and is not booted.") yield Info("Discovering attached SpiNNaker systems...") ip_address = listen() if ip_address is None: raise Failure( "Did not discover a locally connected SpiNNaker system.") elif option == 1: # pragma: no branch ip_address = yield Text( "What is the IP address or hostname of the SpiNNaker system?") if ip_address == "": raise Failure("No IP address or hostname entered") raise Success({"ip_address": ip_address})
[ "def", "ip_address_wizard", "(", ")", ":", "option", "=", "yield", "MultipleChoice", "(", "\"Would you like to auto-detect the SpiNNaker system's IP address?\"", ",", "[", "\"Auto-detect\"", ",", "\"Manually Enter IP address or hostname\"", "]", ",", "0", ")", "assert", "0"...
A wizard which attempts to determine the IP of a SpiNNaker system. Returns ``{"ip_address": "..."}`` via the :py:exc:`~rig.wizard.Success` exception.
[ "A", "wizard", "which", "attempts", "to", "determine", "the", "IP", "of", "a", "SpiNNaker", "system", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/wizard.py#L140-L167
train
50,854
project-rig/rig
rig/wizard.py
cat
def cat(*wizards): """A higher-order wizard which is the concatenation of a number of other wizards. The resulting data is the union of all wizard outputs. """ data = {} for wizard in wizards: try: response = None while True: response = yield wizard.send(response) except Success as s: data.update(s.data) raise Success(data)
python
def cat(*wizards): """A higher-order wizard which is the concatenation of a number of other wizards. The resulting data is the union of all wizard outputs. """ data = {} for wizard in wizards: try: response = None while True: response = yield wizard.send(response) except Success as s: data.update(s.data) raise Success(data)
[ "def", "cat", "(", "*", "wizards", ")", ":", "data", "=", "{", "}", "for", "wizard", "in", "wizards", ":", "try", ":", "response", "=", "None", "while", "True", ":", "response", "=", "yield", "wizard", ".", "send", "(", "response", ")", "except", "...
A higher-order wizard which is the concatenation of a number of other wizards. The resulting data is the union of all wizard outputs.
[ "A", "higher", "-", "order", "wizard", "which", "is", "the", "concatenation", "of", "a", "number", "of", "other", "wizards", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/wizard.py#L170-L186
train
50,855
project-rig/rig
rig/wizard.py
cli_wrapper
def cli_wrapper(generator): """Given a wizard, implements an interactive command-line human-friendly interface for it. Parameters ---------- generator A generator such as one created by calling :py:func:`rig.wizard.wizard_generator`. Returns ------- dict or None Returns a dictionary containing the results of the wizard or None if the wizard failed. """ first = True response = None while True: # Insert blank lines between prompts if not first: print() first = False try: message = generator.send(response) if isinstance(message, MultipleChoice): print(message.question) for num, choice in enumerate(message.options): print(" {}: {}".format(num, choice)) option = input("Select an option 0-{}{}: ".format( len(message.options) - 1, " (default: {})".format(message.default) if message.default is not None else "")) if option == "" and message.default is not None: option = message.default try: response = int(option) except ValueError: response = -1 if not (0 <= response < len(message.options)): print("ERROR: {} is not a valid option.".format(option)) return None elif isinstance(message, Text): print(message.question) response = input("> ") elif isinstance(message, Prompt): print(message.message) input("<Press enter to continue>") response = None elif isinstance(message, Info): # pragma: no branch print(message.message) response = None except Failure as f: print("ERROR: {}".format(str(f))) return None except Success as s: return s.data
python
def cli_wrapper(generator): """Given a wizard, implements an interactive command-line human-friendly interface for it. Parameters ---------- generator A generator such as one created by calling :py:func:`rig.wizard.wizard_generator`. Returns ------- dict or None Returns a dictionary containing the results of the wizard or None if the wizard failed. """ first = True response = None while True: # Insert blank lines between prompts if not first: print() first = False try: message = generator.send(response) if isinstance(message, MultipleChoice): print(message.question) for num, choice in enumerate(message.options): print(" {}: {}".format(num, choice)) option = input("Select an option 0-{}{}: ".format( len(message.options) - 1, " (default: {})".format(message.default) if message.default is not None else "")) if option == "" and message.default is not None: option = message.default try: response = int(option) except ValueError: response = -1 if not (0 <= response < len(message.options)): print("ERROR: {} is not a valid option.".format(option)) return None elif isinstance(message, Text): print(message.question) response = input("> ") elif isinstance(message, Prompt): print(message.message) input("<Press enter to continue>") response = None elif isinstance(message, Info): # pragma: no branch print(message.message) response = None except Failure as f: print("ERROR: {}".format(str(f))) return None except Success as s: return s.data
[ "def", "cli_wrapper", "(", "generator", ")", ":", "first", "=", "True", "response", "=", "None", "while", "True", ":", "# Insert blank lines between prompts", "if", "not", "first", ":", "print", "(", ")", "first", "=", "False", "try", ":", "message", "=", ...
Given a wizard, implements an interactive command-line human-friendly interface for it. Parameters ---------- generator A generator such as one created by calling :py:func:`rig.wizard.wizard_generator`. Returns ------- dict or None Returns a dictionary containing the results of the wizard or None if the wizard failed.
[ "Given", "a", "wizard", "implements", "an", "interactive", "command", "-", "line", "human", "-", "friendly", "interface", "for", "it", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/wizard.py#L189-L250
train
50,856
project-rig/rig
rig/place_and_route/place/sa/python_kernel.py
_net_cost
def _net_cost(net, placements, has_wrap_around_links, machine): """Get the cost of a given net. This function, in principle at least, should estimate the total network resources consumed by the given net. In practice this estimate is based on the size of the bounding-box of the net (i.e. HPWL). This should be improved at some later time to better account for the effects of large fan-outs. Parameters ---------- net : :py:class:`rig.netlist.Net` placements : {vertex: (x, y), ...} has_wrap_around_links : bool machine : :py:class:`rig.place_and_route.Machine` Returns ------- float """ # This function is by far the hottest code in the entire algorithm, as a # result, small performance improvements in here can have significant # impact on the runtime of the overall algorithm. As an unfortunate side # effect, this code is rather ugly since many higher-level constructs (e.g. # min/max) are outrageously slow. # XXX: This does not account for the hexagonal properties of the SpiNNaker # topology. if has_wrap_around_links: # When wrap-around links exist, we find the minimal bounding box and # return the HPWL weighted by the net weight. To do this the largest # gap between any pair of vertices is found:: # # | x x x | # ^-------------^ # max gap # # The minimal bounding box then goes the other way around:: # # | x x x | # ----------^ ^--- # First we collect the x and y coordinates of all vertices in the net # into a pair of (sorted) lists, xs and ys. x, y = placements[net.source] num_vertices = len(net.sinks) + 1 xs = [x] * num_vertices ys = [y] * num_vertices i = 1 for v in net.sinks: x, y = placements[v] xs[i] = x ys[i] = y i += 1 xs.sort() ys.sort() # The minimal bounding box is then found as above. x_max_delta = 0 last_x = xs[-1] - machine.width for x in xs: delta = x - last_x last_x = x if delta > x_max_delta: x_max_delta = delta y_max_delta = 0 last_y = ys[-1] - machine.height for y in ys: delta = y - last_y last_y = y if delta > y_max_delta: y_max_delta = delta return (((machine.width - x_max_delta) + (machine.height - y_max_delta)) * net.weight * math.sqrt(len(net.sinks) + 1)) else: # When no wrap-around links, find the bounding box around the vertices # in the net and return the HPWL weighted by the net weight. x1, y1 = x2, y2 = placements[net.source] for vertex in net.sinks: x, y = placements[vertex] x1 = x if x < x1 else x1 y1 = y if y < y1 else y1 x2 = x if x > x2 else x2 y2 = y if y > y2 else y2 return (((x2 - x1) + (y2 - y1)) * float(net.weight) * math.sqrt(len(net.sinks) + 1))
python
def _net_cost(net, placements, has_wrap_around_links, machine): """Get the cost of a given net. This function, in principle at least, should estimate the total network resources consumed by the given net. In practice this estimate is based on the size of the bounding-box of the net (i.e. HPWL). This should be improved at some later time to better account for the effects of large fan-outs. Parameters ---------- net : :py:class:`rig.netlist.Net` placements : {vertex: (x, y), ...} has_wrap_around_links : bool machine : :py:class:`rig.place_and_route.Machine` Returns ------- float """ # This function is by far the hottest code in the entire algorithm, as a # result, small performance improvements in here can have significant # impact on the runtime of the overall algorithm. As an unfortunate side # effect, this code is rather ugly since many higher-level constructs (e.g. # min/max) are outrageously slow. # XXX: This does not account for the hexagonal properties of the SpiNNaker # topology. if has_wrap_around_links: # When wrap-around links exist, we find the minimal bounding box and # return the HPWL weighted by the net weight. To do this the largest # gap between any pair of vertices is found:: # # | x x x | # ^-------------^ # max gap # # The minimal bounding box then goes the other way around:: # # | x x x | # ----------^ ^--- # First we collect the x and y coordinates of all vertices in the net # into a pair of (sorted) lists, xs and ys. x, y = placements[net.source] num_vertices = len(net.sinks) + 1 xs = [x] * num_vertices ys = [y] * num_vertices i = 1 for v in net.sinks: x, y = placements[v] xs[i] = x ys[i] = y i += 1 xs.sort() ys.sort() # The minimal bounding box is then found as above. x_max_delta = 0 last_x = xs[-1] - machine.width for x in xs: delta = x - last_x last_x = x if delta > x_max_delta: x_max_delta = delta y_max_delta = 0 last_y = ys[-1] - machine.height for y in ys: delta = y - last_y last_y = y if delta > y_max_delta: y_max_delta = delta return (((machine.width - x_max_delta) + (machine.height - y_max_delta)) * net.weight * math.sqrt(len(net.sinks) + 1)) else: # When no wrap-around links, find the bounding box around the vertices # in the net and return the HPWL weighted by the net weight. x1, y1 = x2, y2 = placements[net.source] for vertex in net.sinks: x, y = placements[vertex] x1 = x if x < x1 else x1 y1 = y if y < y1 else y1 x2 = x if x > x2 else x2 y2 = y if y > y2 else y2 return (((x2 - x1) + (y2 - y1)) * float(net.weight) * math.sqrt(len(net.sinks) + 1))
[ "def", "_net_cost", "(", "net", ",", "placements", ",", "has_wrap_around_links", ",", "machine", ")", ":", "# This function is by far the hottest code in the entire algorithm, as a", "# result, small performance improvements in here can have significant", "# impact on the runtime of the ...
Get the cost of a given net. This function, in principle at least, should estimate the total network resources consumed by the given net. In practice this estimate is based on the size of the bounding-box of the net (i.e. HPWL). This should be improved at some later time to better account for the effects of large fan-outs. Parameters ---------- net : :py:class:`rig.netlist.Net` placements : {vertex: (x, y), ...} has_wrap_around_links : bool machine : :py:class:`rig.place_and_route.Machine` Returns ------- float
[ "Get", "the", "cost", "of", "a", "given", "net", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/python_kernel.py#L118-L210
train
50,857
project-rig/rig
rig/place_and_route/place/sa/python_kernel.py
_vertex_net_cost
def _vertex_net_cost(vertex, v2n, placements, has_wrap_around_links, machine): """Get the total cost of the nets connected to the given vertex. Parameters ---------- vertex The vertex whose nets we're interested in. v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...} placements : {vertex: (x, y), ...} has_wrap_around_links : bool machine : :py:class:`rig.place_and_route.Machine` Returns ------- float """ total_cost = 0.0 for net in v2n[vertex]: total_cost += _net_cost(net, placements, has_wrap_around_links, machine) return total_cost
python
def _vertex_net_cost(vertex, v2n, placements, has_wrap_around_links, machine): """Get the total cost of the nets connected to the given vertex. Parameters ---------- vertex The vertex whose nets we're interested in. v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...} placements : {vertex: (x, y), ...} has_wrap_around_links : bool machine : :py:class:`rig.place_and_route.Machine` Returns ------- float """ total_cost = 0.0 for net in v2n[vertex]: total_cost += _net_cost(net, placements, has_wrap_around_links, machine) return total_cost
[ "def", "_vertex_net_cost", "(", "vertex", ",", "v2n", ",", "placements", ",", "has_wrap_around_links", ",", "machine", ")", ":", "total_cost", "=", "0.0", "for", "net", "in", "v2n", "[", "vertex", "]", ":", "total_cost", "+=", "_net_cost", "(", "net", ",",...
Get the total cost of the nets connected to the given vertex. Parameters ---------- vertex The vertex whose nets we're interested in. v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...} placements : {vertex: (x, y), ...} has_wrap_around_links : bool machine : :py:class:`rig.place_and_route.Machine` Returns ------- float
[ "Get", "the", "total", "cost", "of", "the", "nets", "connected", "to", "the", "given", "vertex", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/python_kernel.py#L213-L234
train
50,858
project-rig/rig
rig/place_and_route/place/sa/python_kernel.py
_get_candidate_swap
def _get_candidate_swap(resources, location, l2v, vertices_resources, fixed_vertices, machine): """Given a chip location, select a set of vertices which would have to be moved elsewhere to accommodate the arrival of the specified set of resources. Parameters ---------- resources : {resource: value, ...} The amount of resources which are required at the specified location. location : (x, y) The coordinates of the chip where the resources are sought. l2v : {(x, y): [vertex, ...], ...} vertices_resources : {vertex: {resource: value, ...}, ...} fixed_vertices : {vertex, ...} machine : :py:class:`rig.place_and_route.Machine` Returns ------- [Vertex, ...] or None If a (possibly empty) list, gives the set of vertices which should be removed from the specified location to make room. If None, the situation is impossible. """ # The resources already available at the given location chip_resources = machine[location] # The set of vertices at that location vertices = l2v[location] # The set of vertices to be moved from the location to free up the # specified amount of resources to_move = [] # While there's not enough free resource, remove an arbitrary (movable) # vertex from the chip. i = 0 while overallocated(subtract_resources(chip_resources, resources)): if i >= len(vertices): # Run out of vertices to remove from this chip, thus the situation # must be impossible. return None elif vertices[i] in fixed_vertices: # Can't move fixed vertices, just skip them. i += 1 continue else: # Work out the cost change when we remove the specified vertex vertex = vertices[i] chip_resources = add_resources(chip_resources, vertices_resources[vertex]) to_move.append(vertex) i += 1 return to_move
python
def _get_candidate_swap(resources, location, l2v, vertices_resources, fixed_vertices, machine): """Given a chip location, select a set of vertices which would have to be moved elsewhere to accommodate the arrival of the specified set of resources. Parameters ---------- resources : {resource: value, ...} The amount of resources which are required at the specified location. location : (x, y) The coordinates of the chip where the resources are sought. l2v : {(x, y): [vertex, ...], ...} vertices_resources : {vertex: {resource: value, ...}, ...} fixed_vertices : {vertex, ...} machine : :py:class:`rig.place_and_route.Machine` Returns ------- [Vertex, ...] or None If a (possibly empty) list, gives the set of vertices which should be removed from the specified location to make room. If None, the situation is impossible. """ # The resources already available at the given location chip_resources = machine[location] # The set of vertices at that location vertices = l2v[location] # The set of vertices to be moved from the location to free up the # specified amount of resources to_move = [] # While there's not enough free resource, remove an arbitrary (movable) # vertex from the chip. i = 0 while overallocated(subtract_resources(chip_resources, resources)): if i >= len(vertices): # Run out of vertices to remove from this chip, thus the situation # must be impossible. return None elif vertices[i] in fixed_vertices: # Can't move fixed vertices, just skip them. i += 1 continue else: # Work out the cost change when we remove the specified vertex vertex = vertices[i] chip_resources = add_resources(chip_resources, vertices_resources[vertex]) to_move.append(vertex) i += 1 return to_move
[ "def", "_get_candidate_swap", "(", "resources", ",", "location", ",", "l2v", ",", "vertices_resources", ",", "fixed_vertices", ",", "machine", ")", ":", "# The resources already available at the given location", "chip_resources", "=", "machine", "[", "location", "]", "#...
Given a chip location, select a set of vertices which would have to be moved elsewhere to accommodate the arrival of the specified set of resources. Parameters ---------- resources : {resource: value, ...} The amount of resources which are required at the specified location. location : (x, y) The coordinates of the chip where the resources are sought. l2v : {(x, y): [vertex, ...], ...} vertices_resources : {vertex: {resource: value, ...}, ...} fixed_vertices : {vertex, ...} machine : :py:class:`rig.place_and_route.Machine` Returns ------- [Vertex, ...] or None If a (possibly empty) list, gives the set of vertices which should be removed from the specified location to make room. If None, the situation is impossible.
[ "Given", "a", "chip", "location", "select", "a", "set", "of", "vertices", "which", "would", "have", "to", "be", "moved", "elsewhere", "to", "accommodate", "the", "arrival", "of", "the", "specified", "set", "of", "resources", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/python_kernel.py#L237-L292
train
50,859
project-rig/rig
rig/place_and_route/place/sa/python_kernel.py
_swap
def _swap(vas, vas_location, vbs, vbs_location, l2v, vertices_resources, placements, machine): """Swap the positions of two sets of vertices. Parameters ---------- vas : [vertex, ...] A set of vertices currently at vas_location. vas_location : (x, y) vbs : [vertex, ...] A set of vertices currently at vbs_location. vbs_location : (x, y) l2v : {(x, y): [vertex, ...], ...} vertices_resources : {vertex: {resource: value, ...}, ...} placements : {vertex: (x, y), ...} machine : :py:class:`rig.place_and_route.Machine` """ # Get the lists of vertices at either location vas_location2v = l2v[vas_location] vbs_location2v = l2v[vbs_location] # Get the resource availability at either location vas_resources = machine[vas_location] vbs_resources = machine[vbs_location] # Move all the vertices in vas into vbs. for va in vas: # Update the placements placements[va] = vbs_location # Update the location-to-vertex lookup vas_location2v.remove(va) vbs_location2v.append(va) # Update the resource consumption after the move resources = vertices_resources[va] vas_resources = add_resources(vas_resources, resources) vbs_resources = subtract_resources(vbs_resources, resources) for vb in vbs: # Update the placements placements[vb] = vas_location # Update the location-to-vertex lookup vbs_location2v.remove(vb) vas_location2v.append(vb) # Update the resource consumption after the move resources = vertices_resources[vb] vas_resources = subtract_resources(vas_resources, resources) vbs_resources = add_resources(vbs_resources, resources) # Update the resources in the machine machine[vas_location] = vas_resources machine[vbs_location] = vbs_resources
python
def _swap(vas, vas_location, vbs, vbs_location, l2v, vertices_resources, placements, machine): """Swap the positions of two sets of vertices. Parameters ---------- vas : [vertex, ...] A set of vertices currently at vas_location. vas_location : (x, y) vbs : [vertex, ...] A set of vertices currently at vbs_location. vbs_location : (x, y) l2v : {(x, y): [vertex, ...], ...} vertices_resources : {vertex: {resource: value, ...}, ...} placements : {vertex: (x, y), ...} machine : :py:class:`rig.place_and_route.Machine` """ # Get the lists of vertices at either location vas_location2v = l2v[vas_location] vbs_location2v = l2v[vbs_location] # Get the resource availability at either location vas_resources = machine[vas_location] vbs_resources = machine[vbs_location] # Move all the vertices in vas into vbs. for va in vas: # Update the placements placements[va] = vbs_location # Update the location-to-vertex lookup vas_location2v.remove(va) vbs_location2v.append(va) # Update the resource consumption after the move resources = vertices_resources[va] vas_resources = add_resources(vas_resources, resources) vbs_resources = subtract_resources(vbs_resources, resources) for vb in vbs: # Update the placements placements[vb] = vas_location # Update the location-to-vertex lookup vbs_location2v.remove(vb) vas_location2v.append(vb) # Update the resource consumption after the move resources = vertices_resources[vb] vas_resources = subtract_resources(vas_resources, resources) vbs_resources = add_resources(vbs_resources, resources) # Update the resources in the machine machine[vas_location] = vas_resources machine[vbs_location] = vbs_resources
[ "def", "_swap", "(", "vas", ",", "vas_location", ",", "vbs", ",", "vbs_location", ",", "l2v", ",", "vertices_resources", ",", "placements", ",", "machine", ")", ":", "# Get the lists of vertices at either location", "vas_location2v", "=", "l2v", "[", "vas_location",...
Swap the positions of two sets of vertices. Parameters ---------- vas : [vertex, ...] A set of vertices currently at vas_location. vas_location : (x, y) vbs : [vertex, ...] A set of vertices currently at vbs_location. vbs_location : (x, y) l2v : {(x, y): [vertex, ...], ...} vertices_resources : {vertex: {resource: value, ...}, ...} placements : {vertex: (x, y), ...} machine : :py:class:`rig.place_and_route.Machine`
[ "Swap", "the", "positions", "of", "two", "sets", "of", "vertices", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/python_kernel.py#L295-L349
train
50,860
NicolasLM/spinach
spinach/brokers/redis.py
RedisBroker._load_script
def _load_script(self, filename: str) -> Script: """Load a Lua script. Read the Lua script file to generate its Script object. If the script starts with a magic string, add it to the list of scripts requiring an idempotency token to execute. """ with open(path.join(here, 'redis_scripts', filename), mode='rb') as f: script_data = f.read() rv = self._r.register_script(script_data) if script_data.startswith(b'-- idempotency protected script'): self._idempotency_protected_scripts.append(rv) return rv
python
def _load_script(self, filename: str) -> Script: """Load a Lua script. Read the Lua script file to generate its Script object. If the script starts with a magic string, add it to the list of scripts requiring an idempotency token to execute. """ with open(path.join(here, 'redis_scripts', filename), mode='rb') as f: script_data = f.read() rv = self._r.register_script(script_data) if script_data.startswith(b'-- idempotency protected script'): self._idempotency_protected_scripts.append(rv) return rv
[ "def", "_load_script", "(", "self", ",", "filename", ":", "str", ")", "->", "Script", ":", "with", "open", "(", "path", ".", "join", "(", "here", ",", "'redis_scripts'", ",", "filename", ")", ",", "mode", "=", "'rb'", ")", "as", "f", ":", "script_dat...
Load a Lua script. Read the Lua script file to generate its Script object. If the script starts with a magic string, add it to the list of scripts requiring an idempotency token to execute.
[ "Load", "a", "Lua", "script", "." ]
0122f916643101eab5cdc1f3da662b9446e372aa
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L62-L74
train
50,861
openstack/networking-hyperv
networking_hyperv/neutron/agent/hnv_neutron_agent.py
main
def main(): """The entry point for the HNV Agent.""" neutron_config.register_agent_state_opts_helper(CONF) common_config.init(sys.argv[1:]) neutron_config.setup_logging() hnv_agent = HNVAgent() # Start everything. LOG.info("Agent initialized successfully, now running... ") hnv_agent.daemon_loop()
python
def main(): """The entry point for the HNV Agent.""" neutron_config.register_agent_state_opts_helper(CONF) common_config.init(sys.argv[1:]) neutron_config.setup_logging() hnv_agent = HNVAgent() # Start everything. LOG.info("Agent initialized successfully, now running... ") hnv_agent.daemon_loop()
[ "def", "main", "(", ")", ":", "neutron_config", ".", "register_agent_state_opts_helper", "(", "CONF", ")", "common_config", ".", "init", "(", "sys", ".", "argv", "[", "1", ":", "]", ")", "neutron_config", ".", "setup_logging", "(", ")", "hnv_agent", "=", "...
The entry point for the HNV Agent.
[ "The", "entry", "point", "for", "the", "HNV", "Agent", "." ]
7a89306ab0586c95b99debb44d898f70834508b9
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hnv_neutron_agent.py#L94-L104
train
50,862
project-rig/rig
rig/machine_control/scp_connection.py
SCPConnection.send_scp
def send_scp(self, buffer_size, x, y, p, cmd, arg1=0, arg2=0, arg3=0, data=b'', expected_args=3, timeout=0.0): """Transmit a packet to the SpiNNaker machine and block until an acknowledgement is received. Parameters ---------- buffer_size : int Number of bytes held in an SCP buffer by SARK, determines how many bytes will be expected in a socket. x : int y : int p : int cmd : int arg1 : int arg2 : int arg3 : int data : bytestring expected_args : int The number of arguments (0-3) that are expected in the returned packet. timeout : float Additional timeout in seconds to wait for a reply on top of the default specified upon instantiation. Returns ------- :py:class:`~rig.machine_control.packets.SCPPacket` The packet that was received in acknowledgement of the transmitted packet. """ # This is implemented as a single burst packet sent using the bursty # interface. This significantly reduces code duplication. # Construct a callable to retain the returned packet for us class Callback(object): def __init__(self): self.packet = None def __call__(self, packet): self.packet = SCPPacket.from_bytestring( packet, n_args=expected_args ) # Create the packet to send callback = Callback() packets = [ scpcall(x, y, p, cmd, arg1, arg2, arg3, data, callback, timeout) ] # Send the burst self.send_scp_burst(buffer_size, 1, packets) # Return the received packet assert callback.packet is not None return callback.packet
python
def send_scp(self, buffer_size, x, y, p, cmd, arg1=0, arg2=0, arg3=0, data=b'', expected_args=3, timeout=0.0): """Transmit a packet to the SpiNNaker machine and block until an acknowledgement is received. Parameters ---------- buffer_size : int Number of bytes held in an SCP buffer by SARK, determines how many bytes will be expected in a socket. x : int y : int p : int cmd : int arg1 : int arg2 : int arg3 : int data : bytestring expected_args : int The number of arguments (0-3) that are expected in the returned packet. timeout : float Additional timeout in seconds to wait for a reply on top of the default specified upon instantiation. Returns ------- :py:class:`~rig.machine_control.packets.SCPPacket` The packet that was received in acknowledgement of the transmitted packet. """ # This is implemented as a single burst packet sent using the bursty # interface. This significantly reduces code duplication. # Construct a callable to retain the returned packet for us class Callback(object): def __init__(self): self.packet = None def __call__(self, packet): self.packet = SCPPacket.from_bytestring( packet, n_args=expected_args ) # Create the packet to send callback = Callback() packets = [ scpcall(x, y, p, cmd, arg1, arg2, arg3, data, callback, timeout) ] # Send the burst self.send_scp_burst(buffer_size, 1, packets) # Return the received packet assert callback.packet is not None return callback.packet
[ "def", "send_scp", "(", "self", ",", "buffer_size", ",", "x", ",", "y", ",", "p", ",", "cmd", ",", "arg1", "=", "0", ",", "arg2", "=", "0", ",", "arg3", "=", "0", ",", "data", "=", "b''", ",", "expected_args", "=", "3", ",", "timeout", "=", "...
Transmit a packet to the SpiNNaker machine and block until an acknowledgement is received. Parameters ---------- buffer_size : int Number of bytes held in an SCP buffer by SARK, determines how many bytes will be expected in a socket. x : int y : int p : int cmd : int arg1 : int arg2 : int arg3 : int data : bytestring expected_args : int The number of arguments (0-3) that are expected in the returned packet. timeout : float Additional timeout in seconds to wait for a reply on top of the default specified upon instantiation. Returns ------- :py:class:`~rig.machine_control.packets.SCPPacket` The packet that was received in acknowledgement of the transmitted packet.
[ "Transmit", "a", "packet", "to", "the", "SpiNNaker", "machine", "and", "block", "until", "an", "acknowledgement", "is", "received", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/scp_connection.py#L83-L137
train
50,863
belbio/bel
bel/lang/bel_specification.py
get_specification
def get_specification(version: str) -> Mapping[str, Any]: """Get BEL Specification The json file this depends on is generated by belspec_yaml2json as part of the update_specifications function Args: version: e.g. 2.0.0 where the filename """ spec_dir = config["bel"]["lang"]["specifications"] spec_dict = {} bel_versions = get_bel_versions() if version not in bel_versions: log.error("Cannot get unknown version BEL specification") return {"error": "unknown version of BEL"} # use this variable to find our parser file since periods aren't recommended in python module names version_underscored = version.replace(".", "_") json_fn = f"{spec_dir}/bel_v{version_underscored}.json" with open(json_fn, "r") as f: spec_dict = json.load(f) return spec_dict
python
def get_specification(version: str) -> Mapping[str, Any]: """Get BEL Specification The json file this depends on is generated by belspec_yaml2json as part of the update_specifications function Args: version: e.g. 2.0.0 where the filename """ spec_dir = config["bel"]["lang"]["specifications"] spec_dict = {} bel_versions = get_bel_versions() if version not in bel_versions: log.error("Cannot get unknown version BEL specification") return {"error": "unknown version of BEL"} # use this variable to find our parser file since periods aren't recommended in python module names version_underscored = version.replace(".", "_") json_fn = f"{spec_dir}/bel_v{version_underscored}.json" with open(json_fn, "r") as f: spec_dict = json.load(f) return spec_dict
[ "def", "get_specification", "(", "version", ":", "str", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "spec_dir", "=", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", "[", "\"specifications\"", "]", "spec_dict", "=", "{", "}", "bel_version...
Get BEL Specification The json file this depends on is generated by belspec_yaml2json as part of the update_specifications function Args: version: e.g. 2.0.0 where the filename
[ "Get", "BEL", "Specification" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L96-L122
train
50,864
belbio/bel
bel/lang/bel_specification.py
get_bel_versions
def get_bel_versions() -> List[str]: """Get BEL Language versions supported Get the list of all BEL Language versions supported. The file this depends on is generated by belspec_yaml2json and is kept up to date using `make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json` directly as it's added as a command by pip install. Returns: List[str]: list of versions """ spec_dir = config["bel"]["lang"]["specifications"] fn = f"{spec_dir}/versions.json" with open(fn, "r") as f: versions = json.load(f) return versions
python
def get_bel_versions() -> List[str]: """Get BEL Language versions supported Get the list of all BEL Language versions supported. The file this depends on is generated by belspec_yaml2json and is kept up to date using `make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json` directly as it's added as a command by pip install. Returns: List[str]: list of versions """ spec_dir = config["bel"]["lang"]["specifications"] fn = f"{spec_dir}/versions.json" with open(fn, "r") as f: versions = json.load(f) return versions
[ "def", "get_bel_versions", "(", ")", "->", "List", "[", "str", "]", ":", "spec_dir", "=", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", "[", "\"specifications\"", "]", "fn", "=", "f\"{spec_dir}/versions.json\"", "with", "open", "(", "fn", ",", "\"r\...
Get BEL Language versions supported Get the list of all BEL Language versions supported. The file this depends on is generated by belspec_yaml2json and is kept up to date using `make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json` directly as it's added as a command by pip install. Returns: List[str]: list of versions
[ "Get", "BEL", "Language", "versions", "supported" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L125-L143
train
50,865
belbio/bel
bel/lang/bel_specification.py
update_specifications
def update_specifications(force: bool = False): """Update BEL specifications Collect BEL specifications from Github BELBio BEL Specification folder and store in local directory specified in belbio_conf.yaml Process all BEL Specifications in YAML into an enhanced JSON version and capture all BEL versions in a separate file for quick access. """ spec_dir = config["bel"]["lang"]["specifications"] if not os.path.isdir(spec_dir): os.mkdir(spec_dir) log.info(f"Updating BEL Specifications - stored in {spec_dir}") # Collect new specifications from Git repository if config["bel"]["lang"]["specification_github_repo"]: github_belspec_files(spec_dir, force=force) # Ensure that files use 'yaml' extension files = glob.glob(f"{spec_dir}/*.yml") for fn in files: new_fn = fn.replace("yml", "yaml") os.rename(fn, new_fn) # Convert YAML to enhanced JSON files = glob.glob(f"{spec_dir}/*.yaml") versions = {} for fn in files: filename = os.path.basename(fn) check_version = filename.replace("bel_v", "").replace(".yaml", "").replace("_", ".") json_fn = fn.replace(".yaml", ".json") version = belspec_yaml2json(fn, json_fn) if version != check_version: log.error( f"Version mis-match for {fn} - fn version: {check_version} version: {version}" ) versions[version] = filename with open(f"{spec_dir}/versions.json", "w") as f: json.dump(list(set(versions)), f, indent=4) # Convert YAML file to EBNF and then parser module create_ebnf_parser(files)
python
def update_specifications(force: bool = False): """Update BEL specifications Collect BEL specifications from Github BELBio BEL Specification folder and store in local directory specified in belbio_conf.yaml Process all BEL Specifications in YAML into an enhanced JSON version and capture all BEL versions in a separate file for quick access. """ spec_dir = config["bel"]["lang"]["specifications"] if not os.path.isdir(spec_dir): os.mkdir(spec_dir) log.info(f"Updating BEL Specifications - stored in {spec_dir}") # Collect new specifications from Git repository if config["bel"]["lang"]["specification_github_repo"]: github_belspec_files(spec_dir, force=force) # Ensure that files use 'yaml' extension files = glob.glob(f"{spec_dir}/*.yml") for fn in files: new_fn = fn.replace("yml", "yaml") os.rename(fn, new_fn) # Convert YAML to enhanced JSON files = glob.glob(f"{spec_dir}/*.yaml") versions = {} for fn in files: filename = os.path.basename(fn) check_version = filename.replace("bel_v", "").replace(".yaml", "").replace("_", ".") json_fn = fn.replace(".yaml", ".json") version = belspec_yaml2json(fn, json_fn) if version != check_version: log.error( f"Version mis-match for {fn} - fn version: {check_version} version: {version}" ) versions[version] = filename with open(f"{spec_dir}/versions.json", "w") as f: json.dump(list(set(versions)), f, indent=4) # Convert YAML file to EBNF and then parser module create_ebnf_parser(files)
[ "def", "update_specifications", "(", "force", ":", "bool", "=", "False", ")", ":", "spec_dir", "=", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", "[", "\"specifications\"", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "spec_dir", ")", ...
Update BEL specifications Collect BEL specifications from Github BELBio BEL Specification folder and store in local directory specified in belbio_conf.yaml Process all BEL Specifications in YAML into an enhanced JSON version and capture all BEL versions in a separate file for quick access.
[ "Update", "BEL", "specifications" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L146-L194
train
50,866
belbio/bel
bel/lang/bel_specification.py
github_belspec_files
def github_belspec_files(spec_dir, force: bool = False): """Get belspec files from Github repo Args: spec_dir: directory to store the BEL Specification and derived files force: force update of BEL Specifications from Github - skipped if local files less than 1 day old """ if not force: dtnow = datetime.datetime.utcnow() delta = datetime.timedelta(1) yesterday = dtnow - delta for fn in glob.glob(f"{spec_dir}/bel*yaml"): if datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday: log.info("Skipping BEL Specification update - specs less than 1 day old") return repo_url = "https://api.github.com/repos/belbio/bel_specifications/contents/specifications" params = {} github_access_token = os.getenv("GITHUB_ACCESS_TOKEN", "") if github_access_token: params = {"access_token": github_access_token} r = requests.get(repo_url, params=params) if r.status_code == 200: results = r.json() for f in results: url = f["download_url"] fn = os.path.basename(url) if "yaml" not in fn and "yml" in fn: fn = fn.replace("yml", "yaml") r = requests.get(url, params=params, allow_redirects=True) if r.status_code == 200: open(f"{spec_dir}/{fn}", "wb").write(r.content) else: sys.exit( f"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}" ) else: sys.exit( f"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}" )
python
def github_belspec_files(spec_dir, force: bool = False): """Get belspec files from Github repo Args: spec_dir: directory to store the BEL Specification and derived files force: force update of BEL Specifications from Github - skipped if local files less than 1 day old """ if not force: dtnow = datetime.datetime.utcnow() delta = datetime.timedelta(1) yesterday = dtnow - delta for fn in glob.glob(f"{spec_dir}/bel*yaml"): if datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday: log.info("Skipping BEL Specification update - specs less than 1 day old") return repo_url = "https://api.github.com/repos/belbio/bel_specifications/contents/specifications" params = {} github_access_token = os.getenv("GITHUB_ACCESS_TOKEN", "") if github_access_token: params = {"access_token": github_access_token} r = requests.get(repo_url, params=params) if r.status_code == 200: results = r.json() for f in results: url = f["download_url"] fn = os.path.basename(url) if "yaml" not in fn and "yml" in fn: fn = fn.replace("yml", "yaml") r = requests.get(url, params=params, allow_redirects=True) if r.status_code == 200: open(f"{spec_dir}/{fn}", "wb").write(r.content) else: sys.exit( f"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}" ) else: sys.exit( f"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}" )
[ "def", "github_belspec_files", "(", "spec_dir", ",", "force", ":", "bool", "=", "False", ")", ":", "if", "not", "force", ":", "dtnow", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "delta", "=", "datetime", ".", "timedelta", "(", "1", ")",...
Get belspec files from Github repo Args: spec_dir: directory to store the BEL Specification and derived files force: force update of BEL Specifications from Github - skipped if local files less than 1 day old
[ "Get", "belspec", "files", "from", "Github", "repo" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L197-L242
train
50,867
belbio/bel
bel/lang/bel_specification.py
belspec_yaml2json
def belspec_yaml2json(yaml_fn: str, json_fn: str) -> str: """Enhance BEL specification and save as JSON file Load all BEL Specification YAML files and convert to JSON files after enhancing them. Also create a bel_versions.json file with all available BEL versions for fast loading. Args: yaml_fn: original YAML version of BEL Spec json_fn: enhanced JSON version of BEL Spec Returns: str: version of BEL Spec """ try: spec_dict = yaml.load(open(yaml_fn, "r").read(), Loader=yaml.SafeLoader) # admin-related keys spec_dict["admin"] = {} spec_dict["admin"]["version_underscored"] = spec_dict["version"].replace(".", "_") spec_dict["admin"]["parser_fn"] = yaml_fn.replace(".yaml", "_parser.py") # add relation keys list, to_short, to_long add_relations(spec_dict) # add function keys list, to_short, to_long add_functions(spec_dict) # add namespace keys list, list_short, list_long, to_short, to_long add_namespaces(spec_dict) enhance_function_signatures(spec_dict) add_function_signature_help(spec_dict) with open(json_fn, "w") as f: json.dump(spec_dict, f) except Exception as e: log.error( "Warning: BEL Specification {yaml_fn} could not be read. Cannot proceed.".format( yaml_fn ) ) sys.exit() return spec_dict["version"]
python
def belspec_yaml2json(yaml_fn: str, json_fn: str) -> str: """Enhance BEL specification and save as JSON file Load all BEL Specification YAML files and convert to JSON files after enhancing them. Also create a bel_versions.json file with all available BEL versions for fast loading. Args: yaml_fn: original YAML version of BEL Spec json_fn: enhanced JSON version of BEL Spec Returns: str: version of BEL Spec """ try: spec_dict = yaml.load(open(yaml_fn, "r").read(), Loader=yaml.SafeLoader) # admin-related keys spec_dict["admin"] = {} spec_dict["admin"]["version_underscored"] = spec_dict["version"].replace(".", "_") spec_dict["admin"]["parser_fn"] = yaml_fn.replace(".yaml", "_parser.py") # add relation keys list, to_short, to_long add_relations(spec_dict) # add function keys list, to_short, to_long add_functions(spec_dict) # add namespace keys list, list_short, list_long, to_short, to_long add_namespaces(spec_dict) enhance_function_signatures(spec_dict) add_function_signature_help(spec_dict) with open(json_fn, "w") as f: json.dump(spec_dict, f) except Exception as e: log.error( "Warning: BEL Specification {yaml_fn} could not be read. Cannot proceed.".format( yaml_fn ) ) sys.exit() return spec_dict["version"]
[ "def", "belspec_yaml2json", "(", "yaml_fn", ":", "str", ",", "json_fn", ":", "str", ")", "->", "str", ":", "try", ":", "spec_dict", "=", "yaml", ".", "load", "(", "open", "(", "yaml_fn", ",", "\"r\"", ")", ".", "read", "(", ")", ",", "Loader", "=",...
Enhance BEL specification and save as JSON file Load all BEL Specification YAML files and convert to JSON files after enhancing them. Also create a bel_versions.json file with all available BEL versions for fast loading. Args: yaml_fn: original YAML version of BEL Spec json_fn: enhanced JSON version of BEL Spec Returns: str: version of BEL Spec
[ "Enhance", "BEL", "specification", "and", "save", "as", "JSON", "file" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L245-L289
train
50,868
belbio/bel
bel/lang/bel_specification.py
add_relations
def add_relations(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]: """Add relation keys to spec_dict Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: bel specification dictionary with added relation keys """ # Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances spec_dict["relations"]["list"] = [] spec_dict["relations"]["list_short"] = [] spec_dict["relations"]["list_long"] = [] spec_dict["relations"]["to_short"] = {} spec_dict["relations"]["to_long"] = {} for relation_name in spec_dict["relations"]["info"]: abbreviated_name = spec_dict["relations"]["info"][relation_name]["abbreviation"] spec_dict["relations"]["list"].extend((relation_name, abbreviated_name)) spec_dict["relations"]["list_long"].append(relation_name) spec_dict["relations"]["list_short"].append(abbreviated_name) spec_dict["relations"]["to_short"][relation_name] = abbreviated_name spec_dict["relations"]["to_short"][abbreviated_name] = abbreviated_name spec_dict["relations"]["to_long"][abbreviated_name] = relation_name spec_dict["relations"]["to_long"][relation_name] = relation_name return spec_dict
python
def add_relations(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]: """Add relation keys to spec_dict Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: bel specification dictionary with added relation keys """ # Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances spec_dict["relations"]["list"] = [] spec_dict["relations"]["list_short"] = [] spec_dict["relations"]["list_long"] = [] spec_dict["relations"]["to_short"] = {} spec_dict["relations"]["to_long"] = {} for relation_name in spec_dict["relations"]["info"]: abbreviated_name = spec_dict["relations"]["info"][relation_name]["abbreviation"] spec_dict["relations"]["list"].extend((relation_name, abbreviated_name)) spec_dict["relations"]["list_long"].append(relation_name) spec_dict["relations"]["list_short"].append(abbreviated_name) spec_dict["relations"]["to_short"][relation_name] = abbreviated_name spec_dict["relations"]["to_short"][abbreviated_name] = abbreviated_name spec_dict["relations"]["to_long"][abbreviated_name] = relation_name spec_dict["relations"]["to_long"][relation_name] = relation_name return spec_dict
[ "def", "add_relations", "(", "spec_dict", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "# Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances", "spec_dict", "[", "\"r...
Add relation keys to spec_dict Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: bel specification dictionary with added relation keys
[ "Add", "relation", "keys", "to", "spec_dict" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L367-L397
train
50,869
belbio/bel
bel/lang/bel_specification.py
add_functions
def add_functions(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]: """Add function keys to spec_dict Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: bel specification dictionary with added function keys """ # Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances spec_dict["functions"]["list"] = [] spec_dict["functions"]["list_long"] = [] spec_dict["functions"]["list_short"] = [] spec_dict["functions"]["primary"] = {} spec_dict["functions"]["primary"]["list_long"] = [] spec_dict["functions"]["primary"]["list_short"] = [] spec_dict["functions"]["modifier"] = {} spec_dict["functions"]["modifier"]["list_long"] = [] spec_dict["functions"]["modifier"]["list_short"] = [] spec_dict["functions"]["to_short"] = {} spec_dict["functions"]["to_long"] = {} for func_name in spec_dict["functions"]["info"]: abbreviated_name = spec_dict["functions"]["info"][func_name]["abbreviation"] spec_dict["functions"]["list"].extend((func_name, abbreviated_name)) spec_dict["functions"]["list_long"].append(func_name) spec_dict["functions"]["list_short"].append(abbreviated_name) if spec_dict["functions"]["info"][func_name]["type"] == "primary": spec_dict["functions"]["primary"]["list_long"].append(func_name) spec_dict["functions"]["primary"]["list_short"].append(abbreviated_name) else: spec_dict["functions"]["modifier"]["list_long"].append(func_name) spec_dict["functions"]["modifier"]["list_short"].append(abbreviated_name) spec_dict["functions"]["to_short"][abbreviated_name] = abbreviated_name spec_dict["functions"]["to_short"][func_name] = abbreviated_name spec_dict["functions"]["to_long"][abbreviated_name] = func_name spec_dict["functions"]["to_long"][func_name] = func_name return spec_dict
python
def add_functions(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]: """Add function keys to spec_dict Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: bel specification dictionary with added function keys """ # Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances spec_dict["functions"]["list"] = [] spec_dict["functions"]["list_long"] = [] spec_dict["functions"]["list_short"] = [] spec_dict["functions"]["primary"] = {} spec_dict["functions"]["primary"]["list_long"] = [] spec_dict["functions"]["primary"]["list_short"] = [] spec_dict["functions"]["modifier"] = {} spec_dict["functions"]["modifier"]["list_long"] = [] spec_dict["functions"]["modifier"]["list_short"] = [] spec_dict["functions"]["to_short"] = {} spec_dict["functions"]["to_long"] = {} for func_name in spec_dict["functions"]["info"]: abbreviated_name = spec_dict["functions"]["info"][func_name]["abbreviation"] spec_dict["functions"]["list"].extend((func_name, abbreviated_name)) spec_dict["functions"]["list_long"].append(func_name) spec_dict["functions"]["list_short"].append(abbreviated_name) if spec_dict["functions"]["info"][func_name]["type"] == "primary": spec_dict["functions"]["primary"]["list_long"].append(func_name) spec_dict["functions"]["primary"]["list_short"].append(abbreviated_name) else: spec_dict["functions"]["modifier"]["list_long"].append(func_name) spec_dict["functions"]["modifier"]["list_short"].append(abbreviated_name) spec_dict["functions"]["to_short"][abbreviated_name] = abbreviated_name spec_dict["functions"]["to_short"][func_name] = abbreviated_name spec_dict["functions"]["to_long"][abbreviated_name] = func_name spec_dict["functions"]["to_long"][func_name] = func_name return spec_dict
[ "def", "add_functions", "(", "spec_dict", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "# Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances", "spec_dict", "[", "\"f...
Add function keys to spec_dict Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: bel specification dictionary with added function keys
[ "Add", "function", "keys", "to", "spec_dict" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L400-L448
train
50,870
belbio/bel
bel/lang/bel_specification.py
enhance_function_signatures
def enhance_function_signatures(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]: """Enhance function signatures Add required and optional objects to signatures objects for semantic validation support. Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: return enhanced bel specification dict """ for func in spec_dict["functions"]["signatures"]: for i, sig in enumerate(spec_dict["functions"]["signatures"][func]["signatures"]): args = sig["arguments"] req_args = [] pos_args = [] opt_args = [] mult_args = [] for arg in args: # Multiple argument types if arg.get("multiple", False): if arg["type"] in ["Function", "Modifier"]: mult_args.extend(arg.get("values", [])) elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]: # Complex signature has this mult_args.append(arg["type"]) # Optional, position dependent - will be added after req_args based on order in bel_specification elif arg.get("optional", False) and arg.get("position", False): if arg["type"] in ["Function", "Modifier"]: pos_args.append(arg.get("values", [])) elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]: pos_args.append(arg["type"]) # Optional, position independent elif arg.get("optional", False): if arg["type"] in ["Function", "Modifier"]: opt_args.extend(arg.get("values", [])) elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]: opt_args.append(arg["type"]) # Required arguments, position dependent else: if arg["type"] in ["Function", "Modifier"]: req_args.append(arg.get("values", [])) elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]: req_args.append(arg["type"]) spec_dict["functions"]["signatures"][func]["signatures"][i]["req_args"] = copy.deepcopy( req_args ) spec_dict["functions"]["signatures"][func]["signatures"][i]["pos_args"] = copy.deepcopy( pos_args ) spec_dict["functions"]["signatures"][func]["signatures"][i]["opt_args"] = copy.deepcopy( opt_args ) spec_dict["functions"]["signatures"][func]["signatures"][i][ "mult_args" ] = copy.deepcopy(mult_args) return spec_dict
python
def enhance_function_signatures(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]: """Enhance function signatures Add required and optional objects to signatures objects for semantic validation support. Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: return enhanced bel specification dict """ for func in spec_dict["functions"]["signatures"]: for i, sig in enumerate(spec_dict["functions"]["signatures"][func]["signatures"]): args = sig["arguments"] req_args = [] pos_args = [] opt_args = [] mult_args = [] for arg in args: # Multiple argument types if arg.get("multiple", False): if arg["type"] in ["Function", "Modifier"]: mult_args.extend(arg.get("values", [])) elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]: # Complex signature has this mult_args.append(arg["type"]) # Optional, position dependent - will be added after req_args based on order in bel_specification elif arg.get("optional", False) and arg.get("position", False): if arg["type"] in ["Function", "Modifier"]: pos_args.append(arg.get("values", [])) elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]: pos_args.append(arg["type"]) # Optional, position independent elif arg.get("optional", False): if arg["type"] in ["Function", "Modifier"]: opt_args.extend(arg.get("values", [])) elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]: opt_args.append(arg["type"]) # Required arguments, position dependent else: if arg["type"] in ["Function", "Modifier"]: req_args.append(arg.get("values", [])) elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]: req_args.append(arg["type"]) spec_dict["functions"]["signatures"][func]["signatures"][i]["req_args"] = copy.deepcopy( req_args ) spec_dict["functions"]["signatures"][func]["signatures"][i]["pos_args"] = copy.deepcopy( pos_args ) spec_dict["functions"]["signatures"][func]["signatures"][i]["opt_args"] = copy.deepcopy( opt_args ) spec_dict["functions"]["signatures"][func]["signatures"][i][ "mult_args" ] = copy.deepcopy(mult_args) return spec_dict
[ "def", "enhance_function_signatures", "(", "spec_dict", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "for", "func", "in", "spec_dict", "[", "\"functions\"", "]", "[", "\"signatures\"", "]", ":", "fo...
Enhance function signatures Add required and optional objects to signatures objects for semantic validation support. Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: return enhanced bel specification dict
[ "Enhance", "function", "signatures" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L479-L542
train
50,871
belbio/bel
bel/lang/bel_specification.py
create_ebnf_parser
def create_ebnf_parser(files): """Create EBNF files and EBNF-based parsers""" flag = False for belspec_fn in files: # Get EBNF Jinja template from Github if enabled if config["bel"]["lang"]["specification_github_repo"]: tmpl_fn = get_ebnf_template() # Check if EBNF file is more recent than belspec_fn ebnf_fn = belspec_fn.replace(".yaml", ".ebnf") if not os.path.exists(ebnf_fn) or os.path.getmtime(belspec_fn) > os.path.getmtime(ebnf_fn): with open(belspec_fn, "r") as f: belspec = yaml.load(f, Loader=yaml.SafeLoader) tmpl_dir = os.path.dirname(tmpl_fn) tmpl_basename = os.path.basename(tmpl_fn) bel_major_version = belspec["version"].split(".")[0] env = jinja2.Environment( loader=jinja2.FileSystemLoader(tmpl_dir) ) # create environment for template template = env.get_template(tmpl_basename) # get the template # replace template placeholders with appropriate variables relations_list = [ (relation, belspec["relations"]["info"][relation]["abbreviation"]) for relation in belspec["relations"]["info"] ] relations_list = sorted(list(itertools.chain(*relations_list)), key=len, reverse=True) functions_list = [ (function, belspec["functions"]["info"][function]["abbreviation"]) for function in belspec["functions"]["info"] if belspec["functions"]["info"][function]["type"] == "primary" ] functions_list = sorted(list(itertools.chain(*functions_list)), key=len, reverse=True) modifiers_list = [ (function, belspec["functions"]["info"][function]["abbreviation"]) for function in belspec["functions"]["info"] if belspec["functions"]["info"][function]["type"] == "modifier" ] modifiers_list = sorted(list(itertools.chain(*modifiers_list)), key=len, reverse=True) created_time = datetime.datetime.now().strftime("%B %d, %Y - %I:%M:%S%p") ebnf = template.render( functions=functions_list, m_functions=modifiers_list, relations=relations_list, bel_version=belspec["version"], bel_major_version=bel_major_version, created_time=created_time, ) with open(ebnf_fn, "w") as f: f.write(ebnf) parser_fn = ebnf_fn.replace(".ebnf", "_parser.py") parser = tatsu.to_python_sourcecode(ebnf, filename=parser_fn) flag = True with open(parser_fn, "wt") as f: f.write(parser) if flag: # In case we created new parser modules importlib.invalidate_caches()
python
def create_ebnf_parser(files): """Create EBNF files and EBNF-based parsers""" flag = False for belspec_fn in files: # Get EBNF Jinja template from Github if enabled if config["bel"]["lang"]["specification_github_repo"]: tmpl_fn = get_ebnf_template() # Check if EBNF file is more recent than belspec_fn ebnf_fn = belspec_fn.replace(".yaml", ".ebnf") if not os.path.exists(ebnf_fn) or os.path.getmtime(belspec_fn) > os.path.getmtime(ebnf_fn): with open(belspec_fn, "r") as f: belspec = yaml.load(f, Loader=yaml.SafeLoader) tmpl_dir = os.path.dirname(tmpl_fn) tmpl_basename = os.path.basename(tmpl_fn) bel_major_version = belspec["version"].split(".")[0] env = jinja2.Environment( loader=jinja2.FileSystemLoader(tmpl_dir) ) # create environment for template template = env.get_template(tmpl_basename) # get the template # replace template placeholders with appropriate variables relations_list = [ (relation, belspec["relations"]["info"][relation]["abbreviation"]) for relation in belspec["relations"]["info"] ] relations_list = sorted(list(itertools.chain(*relations_list)), key=len, reverse=True) functions_list = [ (function, belspec["functions"]["info"][function]["abbreviation"]) for function in belspec["functions"]["info"] if belspec["functions"]["info"][function]["type"] == "primary" ] functions_list = sorted(list(itertools.chain(*functions_list)), key=len, reverse=True) modifiers_list = [ (function, belspec["functions"]["info"][function]["abbreviation"]) for function in belspec["functions"]["info"] if belspec["functions"]["info"][function]["type"] == "modifier" ] modifiers_list = sorted(list(itertools.chain(*modifiers_list)), key=len, reverse=True) created_time = datetime.datetime.now().strftime("%B %d, %Y - %I:%M:%S%p") ebnf = template.render( functions=functions_list, m_functions=modifiers_list, relations=relations_list, bel_version=belspec["version"], bel_major_version=bel_major_version, created_time=created_time, ) with open(ebnf_fn, "w") as f: f.write(ebnf) parser_fn = ebnf_fn.replace(".ebnf", "_parser.py") parser = tatsu.to_python_sourcecode(ebnf, filename=parser_fn) flag = True with open(parser_fn, "wt") as f: f.write(parser) if flag: # In case we created new parser modules importlib.invalidate_caches()
[ "def", "create_ebnf_parser", "(", "files", ")", ":", "flag", "=", "False", "for", "belspec_fn", "in", "files", ":", "# Get EBNF Jinja template from Github if enabled", "if", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", "[", "\"specification_github_repo\"", "...
Create EBNF files and EBNF-based parsers
[ "Create", "EBNF", "files", "and", "EBNF", "-", "based", "parsers" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L592-L661
train
50,872
belbio/bel
bel/lang/bel_specification.py
get_function_help
def get_function_help(function: str, bel_spec: BELSpec): """Get function_help given function name This will get the function summary template (argument summary in signature) and the argument help listing. """ function_long = bel_spec["functions"]["to_long"].get(function) function_help = [] if function_long: for signature in bel_spec["functions"]["signatures"][function_long]["signatures"]: function_help.append( { "function_summary": signature["argument_summary"], "argument_help": signature["argument_help_listing"], "description": bel_spec["functions"]["info"][function_long]["description"], } ) return function_help
python
def get_function_help(function: str, bel_spec: BELSpec): """Get function_help given function name This will get the function summary template (argument summary in signature) and the argument help listing. """ function_long = bel_spec["functions"]["to_long"].get(function) function_help = [] if function_long: for signature in bel_spec["functions"]["signatures"][function_long]["signatures"]: function_help.append( { "function_summary": signature["argument_summary"], "argument_help": signature["argument_help_listing"], "description": bel_spec["functions"]["info"][function_long]["description"], } ) return function_help
[ "def", "get_function_help", "(", "function", ":", "str", ",", "bel_spec", ":", "BELSpec", ")", ":", "function_long", "=", "bel_spec", "[", "\"functions\"", "]", "[", "\"to_long\"", "]", ".", "get", "(", "function", ")", "function_help", "=", "[", "]", "if"...
Get function_help given function name This will get the function summary template (argument summary in signature) and the argument help listing.
[ "Get", "function_help", "given", "function", "name" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L664-L684
train
50,873
belbio/bel
bel/lang/completion.py
in_span
def in_span(loc: int, span: Span) -> bool: """Checks if loc is inside span""" if loc >= span[0] and loc <= span[1]: return True else: return False
python
def in_span(loc: int, span: Span) -> bool: """Checks if loc is inside span""" if loc >= span[0] and loc <= span[1]: return True else: return False
[ "def", "in_span", "(", "loc", ":", "int", ",", "span", ":", "Span", ")", "->", "bool", ":", "if", "loc", ">=", "span", "[", "0", "]", "and", "loc", "<=", "span", "[", "1", "]", ":", "return", "True", "else", ":", "return", "False" ]
Checks if loc is inside span
[ "Checks", "if", "loc", "is", "inside", "span" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L35-L41
train
50,874
belbio/bel
bel/lang/completion.py
relation_completions
def relation_completions( completion_text: str, bel_spec: BELSpec, bel_fmt: str, size: int ) -> list: """Filter BEL relations by prefix Args: prefix: completion string bel_fmt: short, medium, long BEL formats spec: BEL specification Returns: list: list of BEL relations that match prefix """ if bel_fmt == "short": relation_list = bel_spec["relations"]["list_short"] else: relation_list = bel_spec["relations"]["list_long"] matches = [] for r in relation_list: if re.match(completion_text, r): matches.append(r) replace_list = [] for match in matches: highlight = match.replace(completion_text, f"<em>{completion_text}</em>") replace_list.append( { "replacement": match, "label": match, "highlight": highlight, "type": "Relation", } ) return replace_list[:size]
python
def relation_completions( completion_text: str, bel_spec: BELSpec, bel_fmt: str, size: int ) -> list: """Filter BEL relations by prefix Args: prefix: completion string bel_fmt: short, medium, long BEL formats spec: BEL specification Returns: list: list of BEL relations that match prefix """ if bel_fmt == "short": relation_list = bel_spec["relations"]["list_short"] else: relation_list = bel_spec["relations"]["list_long"] matches = [] for r in relation_list: if re.match(completion_text, r): matches.append(r) replace_list = [] for match in matches: highlight = match.replace(completion_text, f"<em>{completion_text}</em>") replace_list.append( { "replacement": match, "label": match, "highlight": highlight, "type": "Relation", } ) return replace_list[:size]
[ "def", "relation_completions", "(", "completion_text", ":", "str", ",", "bel_spec", ":", "BELSpec", ",", "bel_fmt", ":", "str", ",", "size", ":", "int", ")", "->", "list", ":", "if", "bel_fmt", "==", "\"short\"", ":", "relation_list", "=", "bel_spec", "[",...
Filter BEL relations by prefix Args: prefix: completion string bel_fmt: short, medium, long BEL formats spec: BEL specification Returns: list: list of BEL relations that match prefix
[ "Filter", "BEL", "relations", "by", "prefix" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L269-L305
train
50,875
belbio/bel
bel/lang/completion.py
function_completions
def function_completions( completion_text: str, bel_spec: BELSpec, function_list: list, bel_fmt: str, size: int, ) -> list: """Filter BEL functions by prefix Args: prefix: completion string bel_fmt: short, medium, long BEL formats spec: BEL specification Returns: list: list of BEL functions that match prefix """ # Convert provided function list to correct bel_fmt if isinstance(function_list, list): if bel_fmt in ["short", "medium"]: function_list = [ bel_spec["functions"]["to_short"][fn] for fn in function_list ] else: function_list = [ bel_spec["functions"]["to_long"][fn] for fn in function_list ] elif bel_fmt in ["short", "medium"]: function_list = bel_spec["functions"]["primary"]["list_short"] else: function_list = bel_spec["functions"]["primary"]["list_long"] matches = [] for f in function_list: escaped_completion_text = completion_text.replace(r"(", r"\(").replace( r")", r"\)" ) log.debug(f"Completion match: {escaped_completion_text} F: {f}") if re.match(escaped_completion_text, f): matches.append(f) replace_list = [] for match in matches: if completion_text: highlight = match.replace(completion_text, f"<em>{completion_text}</em>") else: highlight = completion_text replace_list.append( { "replacement": match, "label": f"{match}()", "highlight": highlight, "type": "Function", } ) return replace_list[:size]
python
def function_completions( completion_text: str, bel_spec: BELSpec, function_list: list, bel_fmt: str, size: int, ) -> list: """Filter BEL functions by prefix Args: prefix: completion string bel_fmt: short, medium, long BEL formats spec: BEL specification Returns: list: list of BEL functions that match prefix """ # Convert provided function list to correct bel_fmt if isinstance(function_list, list): if bel_fmt in ["short", "medium"]: function_list = [ bel_spec["functions"]["to_short"][fn] for fn in function_list ] else: function_list = [ bel_spec["functions"]["to_long"][fn] for fn in function_list ] elif bel_fmt in ["short", "medium"]: function_list = bel_spec["functions"]["primary"]["list_short"] else: function_list = bel_spec["functions"]["primary"]["list_long"] matches = [] for f in function_list: escaped_completion_text = completion_text.replace(r"(", r"\(").replace( r")", r"\)" ) log.debug(f"Completion match: {escaped_completion_text} F: {f}") if re.match(escaped_completion_text, f): matches.append(f) replace_list = [] for match in matches: if completion_text: highlight = match.replace(completion_text, f"<em>{completion_text}</em>") else: highlight = completion_text replace_list.append( { "replacement": match, "label": f"{match}()", "highlight": highlight, "type": "Function", } ) return replace_list[:size]
[ "def", "function_completions", "(", "completion_text", ":", "str", ",", "bel_spec", ":", "BELSpec", ",", "function_list", ":", "list", ",", "bel_fmt", ":", "str", ",", "size", ":", "int", ",", ")", "->", "list", ":", "# Convert provided function list to correct ...
Filter BEL functions by prefix Args: prefix: completion string bel_fmt: short, medium, long BEL formats spec: BEL specification Returns: list: list of BEL functions that match prefix
[ "Filter", "BEL", "functions", "by", "prefix" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L308-L366
train
50,876
belbio/bel
bel/lang/completion.py
add_completions
def add_completions( replace_list: list, belstr: str, replace_span: Span, completion_text: str ) -> List[Mapping[str, Any]]: """Create completions to return given replacement list Args: replace_list: list of completion replacement values belstr: BEL String replace_span: start, stop of belstr to replace completion_text: text to use for completion - used for creating highlight Returns: [{ "replacement": replacement, "cursor_loc": cursor_loc, "highlight": highlight, "label": label, }] """ completions = [] for r in replace_list: # if '(' not in belstr: # replacement = f'{r["replacement"]}()' # cursor_loc = len(replacement) - 1 # inside parenthesis # elif r['type'] == 'Function' and replace_span[1] == len(belstr) - 1: if len(belstr) > 0: belstr_end = len(belstr) - 1 else: belstr_end = 0 log.debug( f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r["type"] == "Function"} Test2 {replace_span[1] + 1 == len(belstr)}' ) # Put a space between comma and following function arg if ( r["type"] == "Function" and replace_span[0] > 0 and belstr[replace_span[0] - 1] == "," ): log.debug("prior char is a comma") replacement = ( belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()" + belstr[replace_span[1] + 1 :] ) cursor_loc = len( belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()" ) # Put a space between comman and following NSArg or StrArg elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == ",": log.debug("prior char is a comma") replacement = ( belstr[0 : replace_span[0]] + " " + r["replacement"] + belstr[replace_span[1] + 1 :] ) cursor_loc = len(belstr[0 : replace_span[0]] + " " + r["replacement"]) # Add function to end of belstr elif r["type"] == "Function" and replace_span[1] >= belstr_end: replacement = belstr[0 : replace_span[0]] + f"{r['replacement']}()" cursor_loc = len(replacement) - 1 # inside parenthesis log.debug(f"Replacement: {replacement}") # Insert replacement in beginning or middle of belstr else: replacement = ( belstr[0 : replace_span[0]] + r["replacement"] + belstr[replace_span[1] + 1 :] ) cursor_loc = len( belstr[0 : replace_span[0]] + r["replacement"] ) # move cursor just past replacement completions.append( { "replacement": replacement, "cursor_loc": cursor_loc, "highlight": r["highlight"], "label": r["label"], } ) return completions
python
def add_completions( replace_list: list, belstr: str, replace_span: Span, completion_text: str ) -> List[Mapping[str, Any]]: """Create completions to return given replacement list Args: replace_list: list of completion replacement values belstr: BEL String replace_span: start, stop of belstr to replace completion_text: text to use for completion - used for creating highlight Returns: [{ "replacement": replacement, "cursor_loc": cursor_loc, "highlight": highlight, "label": label, }] """ completions = [] for r in replace_list: # if '(' not in belstr: # replacement = f'{r["replacement"]}()' # cursor_loc = len(replacement) - 1 # inside parenthesis # elif r['type'] == 'Function' and replace_span[1] == len(belstr) - 1: if len(belstr) > 0: belstr_end = len(belstr) - 1 else: belstr_end = 0 log.debug( f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r["type"] == "Function"} Test2 {replace_span[1] + 1 == len(belstr)}' ) # Put a space between comma and following function arg if ( r["type"] == "Function" and replace_span[0] > 0 and belstr[replace_span[0] - 1] == "," ): log.debug("prior char is a comma") replacement = ( belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()" + belstr[replace_span[1] + 1 :] ) cursor_loc = len( belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()" ) # Put a space between comman and following NSArg or StrArg elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == ",": log.debug("prior char is a comma") replacement = ( belstr[0 : replace_span[0]] + " " + r["replacement"] + belstr[replace_span[1] + 1 :] ) cursor_loc = len(belstr[0 : replace_span[0]] + " " + r["replacement"]) # Add function to end of belstr elif r["type"] == "Function" and replace_span[1] >= belstr_end: replacement = belstr[0 : replace_span[0]] + f"{r['replacement']}()" cursor_loc = len(replacement) - 1 # inside parenthesis log.debug(f"Replacement: {replacement}") # Insert replacement in beginning or middle of belstr else: replacement = ( belstr[0 : replace_span[0]] + r["replacement"] + belstr[replace_span[1] + 1 :] ) cursor_loc = len( belstr[0 : replace_span[0]] + r["replacement"] ) # move cursor just past replacement completions.append( { "replacement": replacement, "cursor_loc": cursor_loc, "highlight": r["highlight"], "label": r["label"], } ) return completions
[ "def", "add_completions", "(", "replace_list", ":", "list", ",", "belstr", ":", "str", ",", "replace_span", ":", "Span", ",", "completion_text", ":", "str", ")", "->", "List", "[", "Mapping", "[", "str", ",", "Any", "]", "]", ":", "completions", "=", "...
Create completions to return given replacement list Args: replace_list: list of completion replacement values belstr: BEL String replace_span: start, stop of belstr to replace completion_text: text to use for completion - used for creating highlight Returns: [{ "replacement": replacement, "cursor_loc": cursor_loc, "highlight": highlight, "label": label, }]
[ "Create", "completions", "to", "return", "given", "replacement", "list" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L494-L582
train
50,877
belbio/bel
bel/lang/completion.py
get_completions
def get_completions( belstr: str, cursor_loc: int, bel_spec: BELSpec, bel_comp: str, bel_fmt: str, species_id: str, size: int, ): """Get BEL Assertion completions Args: Results: """ ast, errors = pparse.get_ast_dict(belstr) spans = pparse.collect_spans(ast) completion_text = "" completions = [] function_help = [] log.debug(f"Cursor location BELstr: {belstr} Cursor idx: {cursor_loc}") cursor_results = cursor(belstr, ast, cursor_loc) log.debug(f"Cursor results: {cursor_results}") if not cursor_results: log.debug("Cursor results is empty") return (completion_text, completions, function_help, spans) completion_text = cursor_results.get("completion_text", "") replace_span = cursor_results["replace_span"] namespace = cursor_results.get("namespace", None) if "parent_function" in cursor_results: parent_function = cursor_results["parent_function"] function_help = bel_specification.get_function_help( cursor_results["parent_function"], bel_spec ) args = cursor_results.get("args", []) arg_idx = cursor_results.get("arg_idx") replace_list = arg_completions( completion_text, parent_function, args, arg_idx, bel_spec, bel_fmt, species_id, namespace, size, ) elif cursor_results["type"] == "Function": function_list = None replace_list = function_completions( completion_text, bel_spec, function_list, bel_fmt, size ) elif cursor_results["type"] == "Relation": replace_list = relation_completions(completion_text, bel_spec, bel_fmt, size) completions.extend( add_completions(replace_list, belstr, replace_span, completion_text) ) return completion_text, completions, function_help, spans
python
def get_completions( belstr: str, cursor_loc: int, bel_spec: BELSpec, bel_comp: str, bel_fmt: str, species_id: str, size: int, ): """Get BEL Assertion completions Args: Results: """ ast, errors = pparse.get_ast_dict(belstr) spans = pparse.collect_spans(ast) completion_text = "" completions = [] function_help = [] log.debug(f"Cursor location BELstr: {belstr} Cursor idx: {cursor_loc}") cursor_results = cursor(belstr, ast, cursor_loc) log.debug(f"Cursor results: {cursor_results}") if not cursor_results: log.debug("Cursor results is empty") return (completion_text, completions, function_help, spans) completion_text = cursor_results.get("completion_text", "") replace_span = cursor_results["replace_span"] namespace = cursor_results.get("namespace", None) if "parent_function" in cursor_results: parent_function = cursor_results["parent_function"] function_help = bel_specification.get_function_help( cursor_results["parent_function"], bel_spec ) args = cursor_results.get("args", []) arg_idx = cursor_results.get("arg_idx") replace_list = arg_completions( completion_text, parent_function, args, arg_idx, bel_spec, bel_fmt, species_id, namespace, size, ) elif cursor_results["type"] == "Function": function_list = None replace_list = function_completions( completion_text, bel_spec, function_list, bel_fmt, size ) elif cursor_results["type"] == "Relation": replace_list = relation_completions(completion_text, bel_spec, bel_fmt, size) completions.extend( add_completions(replace_list, belstr, replace_span, completion_text) ) return completion_text, completions, function_help, spans
[ "def", "get_completions", "(", "belstr", ":", "str", ",", "cursor_loc", ":", "int", ",", "bel_spec", ":", "BELSpec", ",", "bel_comp", ":", "str", ",", "bel_fmt", ":", "str", ",", "species_id", ":", "str", ",", "size", ":", "int", ",", ")", ":", "ast"...
Get BEL Assertion completions Args: Results:
[ "Get", "BEL", "Assertion", "completions" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L585-L655
train
50,878
belbio/bel
bel/lang/partialparse.py
parse_functions
def parse_functions( bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors ) -> Tuple[Parsed, Errors]: """Parse functions from BEL using paren, comma, quote character locations Args: bels: BEL string as list of chars char_locs: paren, comma, quote character locations errors: Any error messages generated during the parse Returns: (functions, errors): function names and locations and error messages """ parens = char_locs["parens"] # Handle partial top-level function name if not parens: bels_len = len(bels) - 1 span = (0, bels_len) parsed[span] = { "name": "".join(bels), "type": "Function", "span": span, "name_span": (span), "function_level": "top", } return parsed, errors for sp in sorted(parens): # sp = starting paren, ep = ending_paren ep, function_level = parens[sp] # Functions can't have a space between function name and left paren if bels[sp - 1] == " ": continue # look in front of start paren for function name for i in range(sp - 1, 0, -1): if bels[i] in [" ", ",", "("]: # function name upstream boundary chars if i < sp - 1: if ep == -1: span = (i + 1, len(bels) - 1) else: span = (i + 1, ep) parsed[span] = { "name": "".join(bels[i + 1 : sp]), "type": "Function", "span": span, "name_span": (i + 1, sp - 1), "parens_span": (sp, ep), "function_level": function_level, } break else: if ep == -1: span = (0, len(bels) - 1) else: span = (0, ep) parsed[span] = { "name": "".join(bels[0:sp]), "type": "Function", "span": span, "name_span": (0, sp - 1), "parens_span": (sp, ep), "function_level": function_level, } return parsed, errors
python
def parse_functions( bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors ) -> Tuple[Parsed, Errors]: """Parse functions from BEL using paren, comma, quote character locations Args: bels: BEL string as list of chars char_locs: paren, comma, quote character locations errors: Any error messages generated during the parse Returns: (functions, errors): function names and locations and error messages """ parens = char_locs["parens"] # Handle partial top-level function name if not parens: bels_len = len(bels) - 1 span = (0, bels_len) parsed[span] = { "name": "".join(bels), "type": "Function", "span": span, "name_span": (span), "function_level": "top", } return parsed, errors for sp in sorted(parens): # sp = starting paren, ep = ending_paren ep, function_level = parens[sp] # Functions can't have a space between function name and left paren if bels[sp - 1] == " ": continue # look in front of start paren for function name for i in range(sp - 1, 0, -1): if bels[i] in [" ", ",", "("]: # function name upstream boundary chars if i < sp - 1: if ep == -1: span = (i + 1, len(bels) - 1) else: span = (i + 1, ep) parsed[span] = { "name": "".join(bels[i + 1 : sp]), "type": "Function", "span": span, "name_span": (i + 1, sp - 1), "parens_span": (sp, ep), "function_level": function_level, } break else: if ep == -1: span = (0, len(bels) - 1) else: span = (0, ep) parsed[span] = { "name": "".join(bels[0:sp]), "type": "Function", "span": span, "name_span": (0, sp - 1), "parens_span": (sp, ep), "function_level": function_level, } return parsed, errors
[ "def", "parse_functions", "(", "bels", ":", "list", ",", "char_locs", ":", "CharLocs", ",", "parsed", ":", "Parsed", ",", "errors", ":", "Errors", ")", "->", "Tuple", "[", "Parsed", ",", "Errors", "]", ":", "parens", "=", "char_locs", "[", "\"parens\"", ...
Parse functions from BEL using paren, comma, quote character locations Args: bels: BEL string as list of chars char_locs: paren, comma, quote character locations errors: Any error messages generated during the parse Returns: (functions, errors): function names and locations and error messages
[ "Parse", "functions", "from", "BEL", "using", "paren", "comma", "quote", "character", "locations" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L235-L303
train
50,879
belbio/bel
bel/lang/partialparse.py
parse_args
def parse_args( bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors ) -> Tuple[Parsed, Errors]: """Parse arguments from functions Args: bels: BEL string as list of chars char_locs: char locations for parens, commas and quotes parsed: function locations errors: error messages Returns: (functions, errors): function and arg locations plus error messages """ commas = char_locs["commas"] # Process each span key in parsed from beginning for span in parsed: if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]: continue # Skip if not argument-less sp, ep = parsed[span]["parens_span"] # calculate args_end position if ep == -1: # supports bel completion args_end = len(bels) - 1 # 1 else: args_end = ep - 1 # 1 # Parse arguments args = [] arg_start = sp + 1 each_arg_end_list = sorted([end - 1 for end in commas.get(sp, [])] + [args_end]) for arg_end in each_arg_end_list: # log.debug(f'Arg_start: {arg_start} Arg_end: {arg_end}') # Skip blanks at beginning of argument while arg_start < args_end and bels[arg_start] == " ": arg_start += 1 # Trim arg_end (e.g. HGNC:AKT1 , HGNC:EGF) - if there are spaces before comma trimmed_arg_end = arg_end while trimmed_arg_end > arg_start and bels[trimmed_arg_end] == " ": trimmed_arg_end -= 1 if trimmed_arg_end < arg_start: trimmed_arg_end = arg_start arg = "".join(bels[arg_start : trimmed_arg_end + 1]) # log.debug(f'Adding arg to args: {arg_start} {trimmed_arg_end}') args.append({"arg": arg, "span": (arg_start, trimmed_arg_end)}) arg_start = arg_end + 2 parsed[span]["args"] = args return parsed, errors
python
def parse_args( bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors ) -> Tuple[Parsed, Errors]: """Parse arguments from functions Args: bels: BEL string as list of chars char_locs: char locations for parens, commas and quotes parsed: function locations errors: error messages Returns: (functions, errors): function and arg locations plus error messages """ commas = char_locs["commas"] # Process each span key in parsed from beginning for span in parsed: if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]: continue # Skip if not argument-less sp, ep = parsed[span]["parens_span"] # calculate args_end position if ep == -1: # supports bel completion args_end = len(bels) - 1 # 1 else: args_end = ep - 1 # 1 # Parse arguments args = [] arg_start = sp + 1 each_arg_end_list = sorted([end - 1 for end in commas.get(sp, [])] + [args_end]) for arg_end in each_arg_end_list: # log.debug(f'Arg_start: {arg_start} Arg_end: {arg_end}') # Skip blanks at beginning of argument while arg_start < args_end and bels[arg_start] == " ": arg_start += 1 # Trim arg_end (e.g. HGNC:AKT1 , HGNC:EGF) - if there are spaces before comma trimmed_arg_end = arg_end while trimmed_arg_end > arg_start and bels[trimmed_arg_end] == " ": trimmed_arg_end -= 1 if trimmed_arg_end < arg_start: trimmed_arg_end = arg_start arg = "".join(bels[arg_start : trimmed_arg_end + 1]) # log.debug(f'Adding arg to args: {arg_start} {trimmed_arg_end}') args.append({"arg": arg, "span": (arg_start, trimmed_arg_end)}) arg_start = arg_end + 2 parsed[span]["args"] = args return parsed, errors
[ "def", "parse_args", "(", "bels", ":", "list", ",", "char_locs", ":", "CharLocs", ",", "parsed", ":", "Parsed", ",", "errors", ":", "Errors", ")", "->", "Tuple", "[", "Parsed", ",", "Errors", "]", ":", "commas", "=", "char_locs", "[", "\"commas\"", "]"...
Parse arguments from functions Args: bels: BEL string as list of chars char_locs: char locations for parens, commas and quotes parsed: function locations errors: error messages Returns: (functions, errors): function and arg locations plus error messages
[ "Parse", "arguments", "from", "functions" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L306-L362
train
50,880
belbio/bel
bel/lang/partialparse.py
arg_types
def arg_types(parsed: Parsed, errors: Errors) -> Tuple[Parsed, Errors]: """Add argument types to parsed function data structure Args: parsed: function and arg locations in BEL string errors: error messages Returns: (parsed, errors): parsed, arguments with arg types plus error messages """ func_pattern = re.compile(r"\s*[a-zA-Z]+\(") nsarg_pattern = re.compile(r"^\s*([A-Z]+):(.*?)\s*$") for span in parsed: if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]: continue for i, arg in enumerate(parsed[span]["args"]): nsarg_matches = nsarg_pattern.match(arg["arg"]) if func_pattern.match(arg["arg"]): parsed[span]["args"][i].update({"type": "Function"}) elif nsarg_matches: (start, end) = arg["span"] ns = nsarg_matches.group(1) ns_val = nsarg_matches.group(2) ns_span = nsarg_matches.span(1) ns_span = (ns_span[0] + start, ns_span[1] + start - 1) ns_val_span = nsarg_matches.span(2) ns_val_span = (ns_val_span[0] + start, ns_val_span[1] + start - 1) parsed[span]["args"][i].update( { "type": "NSArg", "ns": ns, "ns_span": ns_span, "ns_val": ns_val, "ns_val_span": ns_val_span, } ) else: parsed[span]["args"][i].update({"type": "StrArg"}) return parsed, errors
python
def arg_types(parsed: Parsed, errors: Errors) -> Tuple[Parsed, Errors]: """Add argument types to parsed function data structure Args: parsed: function and arg locations in BEL string errors: error messages Returns: (parsed, errors): parsed, arguments with arg types plus error messages """ func_pattern = re.compile(r"\s*[a-zA-Z]+\(") nsarg_pattern = re.compile(r"^\s*([A-Z]+):(.*?)\s*$") for span in parsed: if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]: continue for i, arg in enumerate(parsed[span]["args"]): nsarg_matches = nsarg_pattern.match(arg["arg"]) if func_pattern.match(arg["arg"]): parsed[span]["args"][i].update({"type": "Function"}) elif nsarg_matches: (start, end) = arg["span"] ns = nsarg_matches.group(1) ns_val = nsarg_matches.group(2) ns_span = nsarg_matches.span(1) ns_span = (ns_span[0] + start, ns_span[1] + start - 1) ns_val_span = nsarg_matches.span(2) ns_val_span = (ns_val_span[0] + start, ns_val_span[1] + start - 1) parsed[span]["args"][i].update( { "type": "NSArg", "ns": ns, "ns_span": ns_span, "ns_val": ns_val, "ns_val_span": ns_val_span, } ) else: parsed[span]["args"][i].update({"type": "StrArg"}) return parsed, errors
[ "def", "arg_types", "(", "parsed", ":", "Parsed", ",", "errors", ":", "Errors", ")", "->", "Tuple", "[", "Parsed", ",", "Errors", "]", ":", "func_pattern", "=", "re", ".", "compile", "(", "r\"\\s*[a-zA-Z]+\\(\"", ")", "nsarg_pattern", "=", "re", ".", "co...
Add argument types to parsed function data structure Args: parsed: function and arg locations in BEL string errors: error messages Returns: (parsed, errors): parsed, arguments with arg types plus error messages
[ "Add", "argument", "types", "to", "parsed", "function", "data", "structure" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L365-L408
train
50,881
belbio/bel
bel/lang/partialparse.py
parse_relations
def parse_relations( belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors ) -> Tuple[Parsed, Errors]: """Parse relations from BEL string Args: belstr: BEL string as one single string (not list of chars) char_locs: paren, comma and quote char locations parsed: data structure for parsed functions, relations, nested errors: error messages Returns: (parsed, errors): """ quotes = char_locs["quotes"] quoted_range = set([i for start, end in quotes.items() for i in range(start, end)]) for match in relations_pattern_middle.finditer(belstr): (start, end) = match.span(1) # log.debug(f'Relation-middle {match}') end = end - 1 # adjust end to match actual end character index if start != end: test_range = set(range(start, end)) else: test_range = set(start) # Skip if relation overlaps with quoted string if test_range.intersection(quoted_range): continue span_key = (start, end) parsed[span_key] = { "type": "Relation", "name": match.group(1), "span": (start, end), } for match in relations_pattern_end.finditer(belstr): (start, end) = match.span(1) log.debug(f"Relation-end {match}") end = end - 1 # adjust end to match actual end character index if start != end: test_range = set(range(start, end)) else: test_range = set(start) # Skip if relation overlaps with quoted string if test_range.intersection(quoted_range): continue span_key = (start, end) parsed[span_key] = { "type": "Relation", "name": match.group(1), "span": (start, end), } return parsed, errors
python
def parse_relations( belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors ) -> Tuple[Parsed, Errors]: """Parse relations from BEL string Args: belstr: BEL string as one single string (not list of chars) char_locs: paren, comma and quote char locations parsed: data structure for parsed functions, relations, nested errors: error messages Returns: (parsed, errors): """ quotes = char_locs["quotes"] quoted_range = set([i for start, end in quotes.items() for i in range(start, end)]) for match in relations_pattern_middle.finditer(belstr): (start, end) = match.span(1) # log.debug(f'Relation-middle {match}') end = end - 1 # adjust end to match actual end character index if start != end: test_range = set(range(start, end)) else: test_range = set(start) # Skip if relation overlaps with quoted string if test_range.intersection(quoted_range): continue span_key = (start, end) parsed[span_key] = { "type": "Relation", "name": match.group(1), "span": (start, end), } for match in relations_pattern_end.finditer(belstr): (start, end) = match.span(1) log.debug(f"Relation-end {match}") end = end - 1 # adjust end to match actual end character index if start != end: test_range = set(range(start, end)) else: test_range = set(start) # Skip if relation overlaps with quoted string if test_range.intersection(quoted_range): continue span_key = (start, end) parsed[span_key] = { "type": "Relation", "name": match.group(1), "span": (start, end), } return parsed, errors
[ "def", "parse_relations", "(", "belstr", ":", "str", ",", "char_locs", ":", "CharLocs", ",", "parsed", ":", "Parsed", ",", "errors", ":", "Errors", ")", "->", "Tuple", "[", "Parsed", ",", "Errors", "]", ":", "quotes", "=", "char_locs", "[", "\"quotes\"",...
Parse relations from BEL string Args: belstr: BEL string as one single string (not list of chars) char_locs: paren, comma and quote char locations parsed: data structure for parsed functions, relations, nested errors: error messages Returns: (parsed, errors):
[ "Parse", "relations", "from", "BEL", "string" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L411-L468
train
50,882
belbio/bel
bel/lang/partialparse.py
parse_nested
def parse_nested( bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors ) -> Tuple[Parsed, Errors]: """ Parse nested BEL object """ for sp in char_locs[ "nested_parens" ]: # sp = start parenthesis, ep = end parenthesis ep, level = char_locs["nested_parens"][sp] if ep == -1: ep = len(bels) + 1 parsed[(sp, ep)] = {"type": "Nested", "span": (sp, ep)} return parsed, errors
python
def parse_nested( bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors ) -> Tuple[Parsed, Errors]: """ Parse nested BEL object """ for sp in char_locs[ "nested_parens" ]: # sp = start parenthesis, ep = end parenthesis ep, level = char_locs["nested_parens"][sp] if ep == -1: ep = len(bels) + 1 parsed[(sp, ep)] = {"type": "Nested", "span": (sp, ep)} return parsed, errors
[ "def", "parse_nested", "(", "bels", ":", "list", ",", "char_locs", ":", "CharLocs", ",", "parsed", ":", "Parsed", ",", "errors", ":", "Errors", ")", "->", "Tuple", "[", "Parsed", ",", "Errors", "]", ":", "for", "sp", "in", "char_locs", "[", "\"nested_p...
Parse nested BEL object
[ "Parse", "nested", "BEL", "object" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L471-L484
train
50,883
belbio/bel
bel/lang/partialparse.py
dump_json
def dump_json(d: dict) -> None: """Dump json when using tuples for dictionary keys Have to convert tuples to strings to dump out as json """ import json k = d.keys() v = d.values() k1 = [str(i) for i in k] return json.dumps(dict(zip(*[k1, v])), indent=4)
python
def dump_json(d: dict) -> None: """Dump json when using tuples for dictionary keys Have to convert tuples to strings to dump out as json """ import json k = d.keys() v = d.values() k1 = [str(i) for i in k] return json.dumps(dict(zip(*[k1, v])), indent=4)
[ "def", "dump_json", "(", "d", ":", "dict", ")", "->", "None", ":", "import", "json", "k", "=", "d", ".", "keys", "(", ")", "v", "=", "d", ".", "values", "(", ")", "k1", "=", "[", "str", "(", "i", ")", "for", "i", "in", "k", "]", "return", ...
Dump json when using tuples for dictionary keys Have to convert tuples to strings to dump out as json
[ "Dump", "json", "when", "using", "tuples", "for", "dictionary", "keys" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L487-L499
train
50,884
belbio/bel
bel/lang/partialparse.py
collect_spans
def collect_spans(ast: AST) -> List[Tuple[str, Tuple[int, int]]]: """Collect flattened list of spans of BEL syntax types Provide simple list of BEL syntax type spans for highlighting. Function names, NSargs, NS prefix, NS value and StrArgs will be tagged. Args: ast: AST of BEL assertion Returns: List[Tuple[str, Tuple[int, int]]]: list of span objects (<type>, (<start>, <end>)) """ spans = [] if ast.get("subject", False): spans.extend(collect_spans(ast["subject"])) if ast.get("object", False): spans.extend(collect_spans(ast["object"])) if ast.get("nested", False): spans.extend(collect_spans(ast["nested"])) if ast.get("function", False): log.debug(f"Processing function") spans.append(("Function", ast["function"]["name_span"])) log.debug(f"Spans: {spans}") if ast.get("args", False): for idx, arg in enumerate(ast["args"]): log.debug(f"Arg {arg}") if arg.get("function", False): log.debug(f"Recursing on arg function") results = collect_spans(arg) log.debug(f"Results {results}") spans.extend(results) # Recurse arg function elif arg.get("nsarg", False): log.debug(f"Processing NSArg Arg {arg}") spans.append(("NSArg", arg["span"])) spans.append(("NSPrefix", arg["nsarg"]["ns_span"])) spans.append(("NSVal", arg["nsarg"]["ns_val_span"])) elif arg["type"] == "StrArg": spans.append(("StrArg", arg["span"])) log.debug(f"Spans: {spans}") return spans
python
def collect_spans(ast: AST) -> List[Tuple[str, Tuple[int, int]]]: """Collect flattened list of spans of BEL syntax types Provide simple list of BEL syntax type spans for highlighting. Function names, NSargs, NS prefix, NS value and StrArgs will be tagged. Args: ast: AST of BEL assertion Returns: List[Tuple[str, Tuple[int, int]]]: list of span objects (<type>, (<start>, <end>)) """ spans = [] if ast.get("subject", False): spans.extend(collect_spans(ast["subject"])) if ast.get("object", False): spans.extend(collect_spans(ast["object"])) if ast.get("nested", False): spans.extend(collect_spans(ast["nested"])) if ast.get("function", False): log.debug(f"Processing function") spans.append(("Function", ast["function"]["name_span"])) log.debug(f"Spans: {spans}") if ast.get("args", False): for idx, arg in enumerate(ast["args"]): log.debug(f"Arg {arg}") if arg.get("function", False): log.debug(f"Recursing on arg function") results = collect_spans(arg) log.debug(f"Results {results}") spans.extend(results) # Recurse arg function elif arg.get("nsarg", False): log.debug(f"Processing NSArg Arg {arg}") spans.append(("NSArg", arg["span"])) spans.append(("NSPrefix", arg["nsarg"]["ns_span"])) spans.append(("NSVal", arg["nsarg"]["ns_val_span"])) elif arg["type"] == "StrArg": spans.append(("StrArg", arg["span"])) log.debug(f"Spans: {spans}") return spans
[ "def", "collect_spans", "(", "ast", ":", "AST", ")", "->", "List", "[", "Tuple", "[", "str", ",", "Tuple", "[", "int", ",", "int", "]", "]", "]", ":", "spans", "=", "[", "]", "if", "ast", ".", "get", "(", "\"subject\"", ",", "False", ")", ":", ...
Collect flattened list of spans of BEL syntax types Provide simple list of BEL syntax type spans for highlighting. Function names, NSargs, NS prefix, NS value and StrArgs will be tagged. Args: ast: AST of BEL assertion Returns: List[Tuple[str, Tuple[int, int]]]: list of span objects (<type>, (<start>, <end>))
[ "Collect", "flattened", "list", "of", "spans", "of", "BEL", "syntax", "types" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L502-L550
train
50,885
belbio/bel
bel/lang/partialparse.py
print_spans
def print_spans(spans, max_idx: int) -> None: """Quick test to show how character spans match original BEL String Mostly for debugging purposes """ bel_spans = [" "] * (max_idx + 3) for val, span in spans: if val in ["Nested", "NSArg"]: continue for i in range(span[0], span[1] + 1): bel_spans[i] = val[0] # print(''.join(bel_spans)) # Add second layer for Nested Objects if available bel_spans = [" "] * (max_idx + 3) for val, span in spans: if val not in ["Nested"]: continue for i in range(span[0], span[1] + 1): bel_spans[i] = val[0]
python
def print_spans(spans, max_idx: int) -> None: """Quick test to show how character spans match original BEL String Mostly for debugging purposes """ bel_spans = [" "] * (max_idx + 3) for val, span in spans: if val in ["Nested", "NSArg"]: continue for i in range(span[0], span[1] + 1): bel_spans[i] = val[0] # print(''.join(bel_spans)) # Add second layer for Nested Objects if available bel_spans = [" "] * (max_idx + 3) for val, span in spans: if val not in ["Nested"]: continue for i in range(span[0], span[1] + 1): bel_spans[i] = val[0]
[ "def", "print_spans", "(", "spans", ",", "max_idx", ":", "int", ")", "->", "None", ":", "bel_spans", "=", "[", "\" \"", "]", "*", "(", "max_idx", "+", "3", ")", "for", "val", ",", "span", "in", "spans", ":", "if", "val", "in", "[", "\"Nested\"", ...
Quick test to show how character spans match original BEL String Mostly for debugging purposes
[ "Quick", "test", "to", "show", "how", "character", "spans", "match", "original", "BEL", "String" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L577-L598
train
50,886
belbio/bel
bel/lang/partialparse.py
parsed_function_to_ast
def parsed_function_to_ast(parsed: Parsed, parsed_key): """Create AST for top-level functions""" sub = parsed[parsed_key] subtree = { "type": "Function", "span": sub["span"], "function": { "name": sub["name"], "name_span": sub["name_span"], "parens_span": sub.get("parens_span", []), }, } args = [] for arg in parsed[parsed_key].get("args", []): # pdb.set_trace() if arg["type"] == "Function": args.append(parsed_function_to_ast(parsed, arg["span"])) elif arg["type"] == "NSArg": args.append( { "arg": arg["arg"], "type": arg["type"], "span": arg["span"], "nsarg": { "ns": arg["ns"], "ns_val": arg["ns_val"], "ns_span": arg["ns_span"], "ns_val_span": arg["ns_val_span"], }, } ) elif arg["type"] == "StrArg": args.append({"arg": arg["arg"], "type": arg["type"], "span": arg["span"]}) subtree["args"] = copy.deepcopy(args) return subtree
python
def parsed_function_to_ast(parsed: Parsed, parsed_key): """Create AST for top-level functions""" sub = parsed[parsed_key] subtree = { "type": "Function", "span": sub["span"], "function": { "name": sub["name"], "name_span": sub["name_span"], "parens_span": sub.get("parens_span", []), }, } args = [] for arg in parsed[parsed_key].get("args", []): # pdb.set_trace() if arg["type"] == "Function": args.append(parsed_function_to_ast(parsed, arg["span"])) elif arg["type"] == "NSArg": args.append( { "arg": arg["arg"], "type": arg["type"], "span": arg["span"], "nsarg": { "ns": arg["ns"], "ns_val": arg["ns_val"], "ns_span": arg["ns_span"], "ns_val_span": arg["ns_val_span"], }, } ) elif arg["type"] == "StrArg": args.append({"arg": arg["arg"], "type": arg["type"], "span": arg["span"]}) subtree["args"] = copy.deepcopy(args) return subtree
[ "def", "parsed_function_to_ast", "(", "parsed", ":", "Parsed", ",", "parsed_key", ")", ":", "sub", "=", "parsed", "[", "parsed_key", "]", "subtree", "=", "{", "\"type\"", ":", "\"Function\"", ",", "\"span\"", ":", "sub", "[", "\"span\"", "]", ",", "\"funct...
Create AST for top-level functions
[ "Create", "AST", "for", "top", "-", "level", "functions" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L603-L644
train
50,887
belbio/bel
bel/lang/partialparse.py
parsed_top_level_errors
def parsed_top_level_errors(parsed, errors, component_type: str = "") -> Errors: """Check full parse for errors Args: parsed: errors: component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input """ # Error check fn_cnt = 0 rel_cnt = 0 nested_cnt = 0 for key in parsed: if parsed[key]["type"] == "Function": fn_cnt += 1 if parsed[key]["type"] == "Relation": rel_cnt += 1 if parsed[key]["type"] == "Nested": nested_cnt += 1 if not component_type: if nested_cnt > 1: errors.append( ( "Error", "Too many nested objects - can only have one per BEL Assertion", ) ) if nested_cnt: if rel_cnt > 2: errors.append( ( "Error", "Too many relations - can only have two in a nested BEL Assertion", ) ) elif fn_cnt > 4: errors.append(("Error", "Too many BEL subject and object candidates")) else: if rel_cnt > 1: errors.append( ( "Error", "Too many relations - can only have one in a BEL Assertion", ) ) elif fn_cnt > 2: errors.append(("Error", "Too many BEL subject and object candidates")) elif component_type == "subject": if rel_cnt > 0: errors.append( ("Error", "Too many relations - cannot have any in a BEL Subject") ) elif fn_cnt > 1: errors.append( ("Error", "Too many BEL subject candidates - can only have one") ) elif component_type == "object": if nested_cnt: if rel_cnt > 1: errors.append( ( "Error", "Too many relations - can only have one in a nested BEL object", ) ) elif fn_cnt > 2: errors.append( ( "Error", "Too many BEL subject and object candidates in a nested BEL object", ) ) else: if rel_cnt > 0: errors.append( ("Error", "Too many relations - cannot have any in a BEL Subject") ) elif fn_cnt > 1: errors.append( ("Error", "Too many BEL subject candidates - can only have one") ) return errors
python
def parsed_top_level_errors(parsed, errors, component_type: str = "") -> Errors: """Check full parse for errors Args: parsed: errors: component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input """ # Error check fn_cnt = 0 rel_cnt = 0 nested_cnt = 0 for key in parsed: if parsed[key]["type"] == "Function": fn_cnt += 1 if parsed[key]["type"] == "Relation": rel_cnt += 1 if parsed[key]["type"] == "Nested": nested_cnt += 1 if not component_type: if nested_cnt > 1: errors.append( ( "Error", "Too many nested objects - can only have one per BEL Assertion", ) ) if nested_cnt: if rel_cnt > 2: errors.append( ( "Error", "Too many relations - can only have two in a nested BEL Assertion", ) ) elif fn_cnt > 4: errors.append(("Error", "Too many BEL subject and object candidates")) else: if rel_cnt > 1: errors.append( ( "Error", "Too many relations - can only have one in a BEL Assertion", ) ) elif fn_cnt > 2: errors.append(("Error", "Too many BEL subject and object candidates")) elif component_type == "subject": if rel_cnt > 0: errors.append( ("Error", "Too many relations - cannot have any in a BEL Subject") ) elif fn_cnt > 1: errors.append( ("Error", "Too many BEL subject candidates - can only have one") ) elif component_type == "object": if nested_cnt: if rel_cnt > 1: errors.append( ( "Error", "Too many relations - can only have one in a nested BEL object", ) ) elif fn_cnt > 2: errors.append( ( "Error", "Too many BEL subject and object candidates in a nested BEL object", ) ) else: if rel_cnt > 0: errors.append( ("Error", "Too many relations - cannot have any in a BEL Subject") ) elif fn_cnt > 1: errors.append( ("Error", "Too many BEL subject candidates - can only have one") ) return errors
[ "def", "parsed_top_level_errors", "(", "parsed", ",", "errors", ",", "component_type", ":", "str", "=", "\"\"", ")", "->", "Errors", ":", "# Error check", "fn_cnt", "=", "0", "rel_cnt", "=", "0", "nested_cnt", "=", "0", "for", "key", "in", "parsed", ":", ...
Check full parse for errors Args: parsed: errors: component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input
[ "Check", "full", "parse", "for", "errors" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L647-L736
train
50,888
belbio/bel
bel/lang/partialparse.py
parsed_to_ast
def parsed_to_ast(parsed: Parsed, errors: Errors, component_type: str = ""): """Convert parsed data struct to AST dictionary Args: parsed: errors: component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input """ ast = {} sorted_keys = sorted(parsed.keys()) # Setup top-level tree for key in sorted_keys: if parsed[key]["type"] == "Nested": nested_component_stack = ["subject", "object"] if component_type: component_stack = [component_type] else: component_stack = ["subject", "object"] for key in sorted_keys: if parsed[key]["type"] == "Function" and parsed[key]["function_level"] == "top": ast[component_stack.pop(0)] = parsed_function_to_ast(parsed, key) elif parsed[key]["type"] == "Relation" and "relation" not in ast: ast["relation"] = { "name": parsed[key]["name"], "type": "Relation", "span": key, } elif parsed[key]["type"] == "Nested": ast["nested"] = {} for nested_key in sorted_keys: if nested_key <= key: continue if ( parsed[nested_key]["type"] == "Function" and parsed[nested_key]["function_level"] == "top" ): ast["nested"][ nested_component_stack.pop(0) ] = parsed_function_to_ast(parsed, nested_key) elif ( parsed[nested_key]["type"] == "Relation" and "relation" not in ast["nested"] ): ast["nested"]["relation"] = { "name": parsed[nested_key]["name"], "type": "Relation", "span": parsed[nested_key]["span"], } return ast, errors return ast, errors
python
def parsed_to_ast(parsed: Parsed, errors: Errors, component_type: str = ""): """Convert parsed data struct to AST dictionary Args: parsed: errors: component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input """ ast = {} sorted_keys = sorted(parsed.keys()) # Setup top-level tree for key in sorted_keys: if parsed[key]["type"] == "Nested": nested_component_stack = ["subject", "object"] if component_type: component_stack = [component_type] else: component_stack = ["subject", "object"] for key in sorted_keys: if parsed[key]["type"] == "Function" and parsed[key]["function_level"] == "top": ast[component_stack.pop(0)] = parsed_function_to_ast(parsed, key) elif parsed[key]["type"] == "Relation" and "relation" not in ast: ast["relation"] = { "name": parsed[key]["name"], "type": "Relation", "span": key, } elif parsed[key]["type"] == "Nested": ast["nested"] = {} for nested_key in sorted_keys: if nested_key <= key: continue if ( parsed[nested_key]["type"] == "Function" and parsed[nested_key]["function_level"] == "top" ): ast["nested"][ nested_component_stack.pop(0) ] = parsed_function_to_ast(parsed, nested_key) elif ( parsed[nested_key]["type"] == "Relation" and "relation" not in ast["nested"] ): ast["nested"]["relation"] = { "name": parsed[nested_key]["name"], "type": "Relation", "span": parsed[nested_key]["span"], } return ast, errors return ast, errors
[ "def", "parsed_to_ast", "(", "parsed", ":", "Parsed", ",", "errors", ":", "Errors", ",", "component_type", ":", "str", "=", "\"\"", ")", ":", "ast", "=", "{", "}", "sorted_keys", "=", "sorted", "(", "parsed", ".", "keys", "(", ")", ")", "# Setup top-le...
Convert parsed data struct to AST dictionary Args: parsed: errors: component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input
[ "Convert", "parsed", "data", "struct", "to", "AST", "dictionary" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L739-L796
train
50,889
belbio/bel
bel/lang/partialparse.py
get_ast_dict
def get_ast_dict(belstr, component_type: str = ""): """Convert BEL string to AST dictionary Args: belstr: BEL string component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input """ errors = [] parsed = {} bels = list(belstr) char_locs, errors = parse_chars(bels, errors) parsed, errors = parse_functions(belstr, char_locs, parsed, errors) parsed, errors = parse_args(bels, char_locs, parsed, errors) parsed, errors = arg_types(parsed, errors) parsed, errors = parse_relations(belstr, char_locs, parsed, errors) parsed, errors = parse_nested(bels, char_locs, parsed, errors) errors = parsed_top_level_errors(parsed, errors) ast, errors = parsed_to_ast(parsed, errors, component_type=component_type) return ast, errors
python
def get_ast_dict(belstr, component_type: str = ""): """Convert BEL string to AST dictionary Args: belstr: BEL string component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input """ errors = [] parsed = {} bels = list(belstr) char_locs, errors = parse_chars(bels, errors) parsed, errors = parse_functions(belstr, char_locs, parsed, errors) parsed, errors = parse_args(bels, char_locs, parsed, errors) parsed, errors = arg_types(parsed, errors) parsed, errors = parse_relations(belstr, char_locs, parsed, errors) parsed, errors = parse_nested(bels, char_locs, parsed, errors) errors = parsed_top_level_errors(parsed, errors) ast, errors = parsed_to_ast(parsed, errors, component_type=component_type) return ast, errors
[ "def", "get_ast_dict", "(", "belstr", ",", "component_type", ":", "str", "=", "\"\"", ")", ":", "errors", "=", "[", "]", "parsed", "=", "{", "}", "bels", "=", "list", "(", "belstr", ")", "char_locs", ",", "errors", "=", "parse_chars", "(", "bels", ",...
Convert BEL string to AST dictionary Args: belstr: BEL string component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input
[ "Convert", "BEL", "string", "to", "AST", "dictionary" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L799-L821
train
50,890
belbio/bel
bel/lang/partialparse.py
get_ast_obj
def get_ast_obj(belstr, bel_version, component_type: str = ""): """Convert AST partialparse dict to BELAst""" ast_dict, errors = get_ast_dict(belstr, component_type) spec = bel_specification.get_specification(bel_version) subj = ast_dict["subject"] subj_ast = add_ast_fn(subj, spec) relation = None obj = None if "relation" in ast_dict: relation = ast_dict["relation"]["name"] if "object" in ast_dict: obj = ast_dict["object"] obj_ast = add_ast_fn(obj, spec) return BELAst(subj_ast, relation, obj_ast, spec) elif "nested" in ast_dict: nested_subj = ast_dict["nested"]["subject"] nested_subj_ast = add_ast_fn(nested_subj, spec) nested_relation = ast_dict["nested"]["relation"]["name"] nested_obj = ast_dict["nested"]["object"] nested_obj_ast = add_ast_fn(nested_obj, spec) return BELAst( subj_ast, relation, BELAst(nested_subj_ast, nested_relation, nested_obj_ast, spec), spec, ) return BELAst(subj_ast, None, None, spec)
python
def get_ast_obj(belstr, bel_version, component_type: str = ""): """Convert AST partialparse dict to BELAst""" ast_dict, errors = get_ast_dict(belstr, component_type) spec = bel_specification.get_specification(bel_version) subj = ast_dict["subject"] subj_ast = add_ast_fn(subj, spec) relation = None obj = None if "relation" in ast_dict: relation = ast_dict["relation"]["name"] if "object" in ast_dict: obj = ast_dict["object"] obj_ast = add_ast_fn(obj, spec) return BELAst(subj_ast, relation, obj_ast, spec) elif "nested" in ast_dict: nested_subj = ast_dict["nested"]["subject"] nested_subj_ast = add_ast_fn(nested_subj, spec) nested_relation = ast_dict["nested"]["relation"]["name"] nested_obj = ast_dict["nested"]["object"] nested_obj_ast = add_ast_fn(nested_obj, spec) return BELAst( subj_ast, relation, BELAst(nested_subj_ast, nested_relation, nested_obj_ast, spec), spec, ) return BELAst(subj_ast, None, None, spec)
[ "def", "get_ast_obj", "(", "belstr", ",", "bel_version", ",", "component_type", ":", "str", "=", "\"\"", ")", ":", "ast_dict", ",", "errors", "=", "get_ast_dict", "(", "belstr", ",", "component_type", ")", "spec", "=", "bel_specification", ".", "get_specificat...
Convert AST partialparse dict to BELAst
[ "Convert", "AST", "partialparse", "dict", "to", "BELAst" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L824-L859
train
50,891
belbio/bel
bel/lang/partialparse.py
add_ast_fn
def add_ast_fn(d, spec, parent_function=None): """Convert dict AST to object AST Function Args: ast_fn: AST object Function d: AST as dictionary spec: BEL Specification Return: ast_fn """ if d["type"] == "Function": ast_fn = Function(d["function"]["name"], spec, parent_function=parent_function) for arg in d["args"]: if arg["type"] == "Function": ast_fn.add_argument(add_ast_fn(arg, spec, parent_function=ast_fn)) elif arg["type"] == "NSArg": ast_fn.add_argument( NSArg(arg["nsarg"]["ns"], arg["nsarg"]["ns_val"], ast_fn) ) elif arg["type"] == "StrArg": ast_fn.add_argument(StrArg(arg["arg"], ast_fn)) return ast_fn
python
def add_ast_fn(d, spec, parent_function=None): """Convert dict AST to object AST Function Args: ast_fn: AST object Function d: AST as dictionary spec: BEL Specification Return: ast_fn """ if d["type"] == "Function": ast_fn = Function(d["function"]["name"], spec, parent_function=parent_function) for arg in d["args"]: if arg["type"] == "Function": ast_fn.add_argument(add_ast_fn(arg, spec, parent_function=ast_fn)) elif arg["type"] == "NSArg": ast_fn.add_argument( NSArg(arg["nsarg"]["ns"], arg["nsarg"]["ns_val"], ast_fn) ) elif arg["type"] == "StrArg": ast_fn.add_argument(StrArg(arg["arg"], ast_fn)) return ast_fn
[ "def", "add_ast_fn", "(", "d", ",", "spec", ",", "parent_function", "=", "None", ")", ":", "if", "d", "[", "\"type\"", "]", "==", "\"Function\"", ":", "ast_fn", "=", "Function", "(", "d", "[", "\"function\"", "]", "[", "\"name\"", "]", ",", "spec", "...
Convert dict AST to object AST Function Args: ast_fn: AST object Function d: AST as dictionary spec: BEL Specification Return: ast_fn
[ "Convert", "dict", "AST", "to", "object", "AST", "Function" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L862-L885
train
50,892
belbio/bel
bel/lang/bel_utils.py
convert_namespaces_str
def convert_namespaces_str( bel_str: str, api_url: str = None, namespace_targets: Mapping[str, List[str]] = None, canonicalize: bool = False, decanonicalize: bool = False, ) -> str: """Convert namespace in string Uses a regex expression to extract all NSArgs and replace them with the updated NSArg from the BEL.bio API terms endpoint. Args: bel_str (str): bel statement string or partial string (e.g. subject or object) api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1 namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example canonicalize (bool): use canonicalize endpoint/namespace targets decanonicalize (bool): use decanonicalize endpoint/namespace targets Results: str: bel statement with namespaces converted """ # pattern - look for capitalized namespace followed by colon # and either a quoted string or a string that # can include any char other than space, comma or ')' matches = re.findall(r'([A-Z]+:"(?:\\.|[^"\\])*"|[A-Z]+:(?:[^\),\s]+))', bel_str) for nsarg in matches: if "DEFAULT:" in nsarg: # skip default namespaces continue updated_nsarg = convert_nsarg( nsarg, api_url=api_url, namespace_targets=namespace_targets, canonicalize=canonicalize, decanonicalize=decanonicalize, ) if updated_nsarg != nsarg: bel_str = bel_str.replace(nsarg, updated_nsarg) return bel_str
python
def convert_namespaces_str( bel_str: str, api_url: str = None, namespace_targets: Mapping[str, List[str]] = None, canonicalize: bool = False, decanonicalize: bool = False, ) -> str: """Convert namespace in string Uses a regex expression to extract all NSArgs and replace them with the updated NSArg from the BEL.bio API terms endpoint. Args: bel_str (str): bel statement string or partial string (e.g. subject or object) api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1 namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example canonicalize (bool): use canonicalize endpoint/namespace targets decanonicalize (bool): use decanonicalize endpoint/namespace targets Results: str: bel statement with namespaces converted """ # pattern - look for capitalized namespace followed by colon # and either a quoted string or a string that # can include any char other than space, comma or ')' matches = re.findall(r'([A-Z]+:"(?:\\.|[^"\\])*"|[A-Z]+:(?:[^\),\s]+))', bel_str) for nsarg in matches: if "DEFAULT:" in nsarg: # skip default namespaces continue updated_nsarg = convert_nsarg( nsarg, api_url=api_url, namespace_targets=namespace_targets, canonicalize=canonicalize, decanonicalize=decanonicalize, ) if updated_nsarg != nsarg: bel_str = bel_str.replace(nsarg, updated_nsarg) return bel_str
[ "def", "convert_namespaces_str", "(", "bel_str", ":", "str", ",", "api_url", ":", "str", "=", "None", ",", "namespace_targets", ":", "Mapping", "[", "str", ",", "List", "[", "str", "]", "]", "=", "None", ",", "canonicalize", ":", "bool", "=", "False", ...
Convert namespace in string Uses a regex expression to extract all NSArgs and replace them with the updated NSArg from the BEL.bio API terms endpoint. Args: bel_str (str): bel statement string or partial string (e.g. subject or object) api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1 namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example canonicalize (bool): use canonicalize endpoint/namespace targets decanonicalize (bool): use decanonicalize endpoint/namespace targets Results: str: bel statement with namespaces converted
[ "Convert", "namespace", "in", "string" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L85-L126
train
50,893
belbio/bel
bel/lang/bel_utils.py
convert_namespaces_ast
def convert_namespaces_ast( ast, api_url: str = None, namespace_targets: Mapping[str, List[str]] = None, canonicalize: bool = False, decanonicalize: bool = False, ): """Recursively convert namespaces of BEL Entities in BEL AST using API endpoint Canonicalization and decanonicalization is determined by endpoint used and namespace_targets. Args: ast (BEL): BEL AST api_url (str): endpoint url with a placeholder for the term_id (either /terms/<term_id>/canonicalized or /terms/<term_id>/decanonicalized) namespace_targets (Mapping[str, List[str]]): (de)canonical targets for converting BEL Entities Returns: BEL: BEL AST """ if isinstance(ast, NSArg): given_term_id = "{}:{}".format(ast.namespace, ast.value) # Get normalized term if necessary if (canonicalize and not ast.canonical) or ( decanonicalize and not ast.decanonical ): normalized_term = convert_nsarg( given_term_id, api_url=api_url, namespace_targets=namespace_targets, canonicalize=canonicalize, decanonicalize=decanonicalize, ) if canonicalize: ast.canonical = normalized_term elif decanonicalize: ast.decanonical = normalized_term # Update normalized term if canonicalize: ns, value = ast.canonical.split(":") ast.change_nsvalue(ns, value) elif decanonicalize: ns, value = ast.canonical.split(":") ast.change_nsvalue(ns, value) # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: convert_namespaces_ast( arg, api_url=api_url, namespace_targets=namespace_targets, canonicalize=canonicalize, decanonicalize=decanonicalize, ) return ast
python
def convert_namespaces_ast( ast, api_url: str = None, namespace_targets: Mapping[str, List[str]] = None, canonicalize: bool = False, decanonicalize: bool = False, ): """Recursively convert namespaces of BEL Entities in BEL AST using API endpoint Canonicalization and decanonicalization is determined by endpoint used and namespace_targets. Args: ast (BEL): BEL AST api_url (str): endpoint url with a placeholder for the term_id (either /terms/<term_id>/canonicalized or /terms/<term_id>/decanonicalized) namespace_targets (Mapping[str, List[str]]): (de)canonical targets for converting BEL Entities Returns: BEL: BEL AST """ if isinstance(ast, NSArg): given_term_id = "{}:{}".format(ast.namespace, ast.value) # Get normalized term if necessary if (canonicalize and not ast.canonical) or ( decanonicalize and not ast.decanonical ): normalized_term = convert_nsarg( given_term_id, api_url=api_url, namespace_targets=namespace_targets, canonicalize=canonicalize, decanonicalize=decanonicalize, ) if canonicalize: ast.canonical = normalized_term elif decanonicalize: ast.decanonical = normalized_term # Update normalized term if canonicalize: ns, value = ast.canonical.split(":") ast.change_nsvalue(ns, value) elif decanonicalize: ns, value = ast.canonical.split(":") ast.change_nsvalue(ns, value) # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: convert_namespaces_ast( arg, api_url=api_url, namespace_targets=namespace_targets, canonicalize=canonicalize, decanonicalize=decanonicalize, ) return ast
[ "def", "convert_namespaces_ast", "(", "ast", ",", "api_url", ":", "str", "=", "None", ",", "namespace_targets", ":", "Mapping", "[", "str", ",", "List", "[", "str", "]", "]", "=", "None", ",", "canonicalize", ":", "bool", "=", "False", ",", "decanonicali...
Recursively convert namespaces of BEL Entities in BEL AST using API endpoint Canonicalization and decanonicalization is determined by endpoint used and namespace_targets. Args: ast (BEL): BEL AST api_url (str): endpoint url with a placeholder for the term_id (either /terms/<term_id>/canonicalized or /terms/<term_id>/decanonicalized) namespace_targets (Mapping[str, List[str]]): (de)canonical targets for converting BEL Entities Returns: BEL: BEL AST
[ "Recursively", "convert", "namespaces", "of", "BEL", "Entities", "in", "BEL", "AST", "using", "API", "endpoint" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L129-L187
train
50,894
belbio/bel
bel/lang/bel_utils.py
orthologize
def orthologize(ast, bo, species_id: str): """Recursively orthologize BEL Entities in BEL AST using API endpoint NOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog) Args: ast (BEL): BEL AST endpoint (str): endpoint url with a placeholder for the term_id Returns: BEL: BEL AST """ # if species_id == 'TAX:9606' and str(ast) == 'MGI:Sult2a1': # import pdb; pdb.set_trace() if not species_id: bo.validation_messages.append( ("WARNING", "No species id was provided for orthologization") ) return ast if isinstance(ast, NSArg): if ast.orthologs: # log.debug(f'AST: {ast.to_string()} species_id: {species_id} orthologs: {ast.orthologs}') if ast.orthologs.get(species_id, None): orthologized_nsarg_val = ast.orthologs[species_id]["decanonical"] ns, value = orthologized_nsarg_val.split(":") ast.change_nsvalue(ns, value) ast.canonical = ast.orthologs[species_id]["canonical"] ast.decanonical = ast.orthologs[species_id]["decanonical"] ast.orthologized = True bo.ast.species.add( (species_id, ast.orthologs[species_id]["species_label"]) ) else: bo.ast.species.add((ast.species_id, ast.species_label)) bo.validation_messages.append( ("WARNING", f"No ortholog found for {ast.namespace}:{ast.value}") ) elif ast.species_id: bo.ast.species.add((ast.species_id, ast.species_label)) # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: orthologize(arg, bo, species_id) return ast
python
def orthologize(ast, bo, species_id: str): """Recursively orthologize BEL Entities in BEL AST using API endpoint NOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog) Args: ast (BEL): BEL AST endpoint (str): endpoint url with a placeholder for the term_id Returns: BEL: BEL AST """ # if species_id == 'TAX:9606' and str(ast) == 'MGI:Sult2a1': # import pdb; pdb.set_trace() if not species_id: bo.validation_messages.append( ("WARNING", "No species id was provided for orthologization") ) return ast if isinstance(ast, NSArg): if ast.orthologs: # log.debug(f'AST: {ast.to_string()} species_id: {species_id} orthologs: {ast.orthologs}') if ast.orthologs.get(species_id, None): orthologized_nsarg_val = ast.orthologs[species_id]["decanonical"] ns, value = orthologized_nsarg_val.split(":") ast.change_nsvalue(ns, value) ast.canonical = ast.orthologs[species_id]["canonical"] ast.decanonical = ast.orthologs[species_id]["decanonical"] ast.orthologized = True bo.ast.species.add( (species_id, ast.orthologs[species_id]["species_label"]) ) else: bo.ast.species.add((ast.species_id, ast.species_label)) bo.validation_messages.append( ("WARNING", f"No ortholog found for {ast.namespace}:{ast.value}") ) elif ast.species_id: bo.ast.species.add((ast.species_id, ast.species_label)) # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: orthologize(arg, bo, species_id) return ast
[ "def", "orthologize", "(", "ast", ",", "bo", ",", "species_id", ":", "str", ")", ":", "# if species_id == 'TAX:9606' and str(ast) == 'MGI:Sult2a1':", "# import pdb; pdb.set_trace()", "if", "not", "species_id", ":", "bo", ".", "validation_messages", ".", "append", "(...
Recursively orthologize BEL Entities in BEL AST using API endpoint NOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog) Args: ast (BEL): BEL AST endpoint (str): endpoint url with a placeholder for the term_id Returns: BEL: BEL AST
[ "Recursively", "orthologize", "BEL", "Entities", "in", "BEL", "AST", "using", "API", "endpoint" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L235-L283
train
50,895
belbio/bel
bel/lang/bel_utils.py
populate_ast_nsarg_orthologs
def populate_ast_nsarg_orthologs(ast, species): """Recursively collect NSArg orthologs for BEL AST This requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available Args: ast: AST at recursive point in belobj species: dictionary of species ids vs labels for or """ ortholog_namespace = "EG" if isinstance(ast, NSArg): if re.match(ortholog_namespace, ast.canonical): orthologs = bel.terms.orthologs.get_orthologs( ast.canonical, list(species.keys()) ) for species_id in species: if species_id in orthologs: orthologs[species_id]["species_label"] = species[species_id] ast.orthologs = copy.deepcopy(orthologs) # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: populate_ast_nsarg_orthologs(arg, species) return ast
python
def populate_ast_nsarg_orthologs(ast, species): """Recursively collect NSArg orthologs for BEL AST This requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available Args: ast: AST at recursive point in belobj species: dictionary of species ids vs labels for or """ ortholog_namespace = "EG" if isinstance(ast, NSArg): if re.match(ortholog_namespace, ast.canonical): orthologs = bel.terms.orthologs.get_orthologs( ast.canonical, list(species.keys()) ) for species_id in species: if species_id in orthologs: orthologs[species_id]["species_label"] = species[species_id] ast.orthologs = copy.deepcopy(orthologs) # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: populate_ast_nsarg_orthologs(arg, species) return ast
[ "def", "populate_ast_nsarg_orthologs", "(", "ast", ",", "species", ")", ":", "ortholog_namespace", "=", "\"EG\"", "if", "isinstance", "(", "ast", ",", "NSArg", ")", ":", "if", "re", ".", "match", "(", "ortholog_namespace", ",", "ast", ".", "canonical", ")", ...
Recursively collect NSArg orthologs for BEL AST This requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available Args: ast: AST at recursive point in belobj species: dictionary of species ids vs labels for or
[ "Recursively", "collect", "NSArg", "orthologs", "for", "BEL", "AST" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L286-L314
train
50,896
belbio/bel
bel/lang/bel_utils.py
preprocess_bel_stmt
def preprocess_bel_stmt(stmt: str) -> str: """Clean up basic formatting of BEL statement Args: stmt: BEL statement as single string Returns: cleaned BEL statement """ stmt = stmt.strip() # remove newline at end of stmt stmt = re.sub(r",+", ",", stmt) # remove multiple commas stmt = re.sub(r",", ", ", stmt) # add space after each comma stmt = re.sub(r" +", " ", stmt) # remove multiple spaces return stmt
python
def preprocess_bel_stmt(stmt: str) -> str: """Clean up basic formatting of BEL statement Args: stmt: BEL statement as single string Returns: cleaned BEL statement """ stmt = stmt.strip() # remove newline at end of stmt stmt = re.sub(r",+", ",", stmt) # remove multiple commas stmt = re.sub(r",", ", ", stmt) # add space after each comma stmt = re.sub(r" +", " ", stmt) # remove multiple spaces return stmt
[ "def", "preprocess_bel_stmt", "(", "stmt", ":", "str", ")", "->", "str", ":", "stmt", "=", "stmt", ".", "strip", "(", ")", "# remove newline at end of stmt", "stmt", "=", "re", ".", "sub", "(", "r\",+\"", ",", "\",\"", ",", "stmt", ")", "# remove multiple ...
Clean up basic formatting of BEL statement Args: stmt: BEL statement as single string Returns: cleaned BEL statement
[ "Clean", "up", "basic", "formatting", "of", "BEL", "statement" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L317-L332
train
50,897
belbio/bel
bel/lang/bel_utils.py
_dump_spec
def _dump_spec(spec): """Dump bel specification dictionary using YAML Formats this with an extra indentation for lists to make it easier to use cold folding on the YAML version of the spec dictionary. """ with open("spec.yaml", "w") as f: yaml.dump(spec, f, Dumper=MyDumper, default_flow_style=False)
python
def _dump_spec(spec): """Dump bel specification dictionary using YAML Formats this with an extra indentation for lists to make it easier to use cold folding on the YAML version of the spec dictionary. """ with open("spec.yaml", "w") as f: yaml.dump(spec, f, Dumper=MyDumper, default_flow_style=False)
[ "def", "_dump_spec", "(", "spec", ")", ":", "with", "open", "(", "\"spec.yaml\"", ",", "\"w\"", ")", "as", "f", ":", "yaml", ".", "dump", "(", "spec", ",", "f", ",", "Dumper", "=", "MyDumper", ",", "default_flow_style", "=", "False", ")" ]
Dump bel specification dictionary using YAML Formats this with an extra indentation for lists to make it easier to use cold folding on the YAML version of the spec dictionary.
[ "Dump", "bel", "specification", "dictionary", "using", "YAML" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L450-L457
train
50,898
belbio/bel
bel/edge/computed.py
process_rule
def process_rule(edges: Edges, ast: Function, rule: Mapping[str, Any], spec: BELSpec): """Process computed edge rule Recursively processes BELAst versus a single computed edge rule Args: edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs ast (Function): BEL Function AST rule (Mapping[str, Any]: computed edge rule """ ast_type = ast.__class__.__name__ trigger_functions = rule.get("trigger_function", []) trigger_types = rule.get("trigger_type", []) rule_subject = rule.get("subject") rule_relation = rule.get("relation") rule_object = rule.get("object") log.debug(f"Running {rule_relation} Type: {ast_type}") if isinstance(ast, Function): function_name = ast.name args = ast.args parent_function = ast.parent_function if function_name in trigger_functions: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"1: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"2: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) elif ast_type in trigger_types: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"3: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"4: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) if isinstance(ast, NSArg): term = "{}:{}".format(ast.namespace, ast.value) parent_function = ast.parent_function if ast_type in trigger_types: if rule_subject == "trigger_value": subject = term if rule_object == "args": for arg in args: log.debug(f"5: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"6: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) # Recursively process every element by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: process_rule(edges, arg, rule, spec)
python
def process_rule(edges: Edges, ast: Function, rule: Mapping[str, Any], spec: BELSpec): """Process computed edge rule Recursively processes BELAst versus a single computed edge rule Args: edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs ast (Function): BEL Function AST rule (Mapping[str, Any]: computed edge rule """ ast_type = ast.__class__.__name__ trigger_functions = rule.get("trigger_function", []) trigger_types = rule.get("trigger_type", []) rule_subject = rule.get("subject") rule_relation = rule.get("relation") rule_object = rule.get("object") log.debug(f"Running {rule_relation} Type: {ast_type}") if isinstance(ast, Function): function_name = ast.name args = ast.args parent_function = ast.parent_function if function_name in trigger_functions: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"1: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"2: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) elif ast_type in trigger_types: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"3: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"4: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) if isinstance(ast, NSArg): term = "{}:{}".format(ast.namespace, ast.value) parent_function = ast.parent_function if ast_type in trigger_types: if rule_subject == "trigger_value": subject = term if rule_object == "args": for arg in args: log.debug(f"5: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"6: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) # Recursively process every element by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: process_rule(edges, arg, rule, spec)
[ "def", "process_rule", "(", "edges", ":", "Edges", ",", "ast", ":", "Function", ",", "rule", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "spec", ":", "BELSpec", ")", ":", "ast_type", "=", "ast", ".", "__class__", ".", "__name__", "trigger_functio...
Process computed edge rule Recursively processes BELAst versus a single computed edge rule Args: edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs ast (Function): BEL Function AST rule (Mapping[str, Any]: computed edge rule
[ "Process", "computed", "edge", "rule" ]
60333e8815625b942b4836903f3b618cf44b3771
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/computed.py#L151-L224
train
50,899