repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
rapidpro/expressions
python/temba_expressions/functions/custom.py
read_digits
def read_digits(ctx, text): """ Formats digits in text for reading in TTS """ def chunk(value, chunk_size): return [value[i: i + chunk_size] for i in range(0, len(value), chunk_size)] text = conversions.to_string(text, ctx).strip() if not text: return '' # trim off the plus for phone numbers if text[0] == '+': text = text[1:] length = len(text) # ssn if length == 9: result = ' '.join(text[:3]) result += ' , ' + ' '.join(text[3:5]) result += ' , ' + ' '.join(text[5:]) return result # triplets, most international phone numbers if length % 3 == 0 and length > 3: chunks = chunk(text, 3) return ' '.join(','.join(chunks)) # quads, credit cards if length % 4 == 0: chunks = chunk(text, 4) return ' '.join(','.join(chunks)) # otherwise, just put a comma between each number return ','.join(text)
python
def read_digits(ctx, text): """ Formats digits in text for reading in TTS """ def chunk(value, chunk_size): return [value[i: i + chunk_size] for i in range(0, len(value), chunk_size)] text = conversions.to_string(text, ctx).strip() if not text: return '' # trim off the plus for phone numbers if text[0] == '+': text = text[1:] length = len(text) # ssn if length == 9: result = ' '.join(text[:3]) result += ' , ' + ' '.join(text[3:5]) result += ' , ' + ' '.join(text[5:]) return result # triplets, most international phone numbers if length % 3 == 0 and length > 3: chunks = chunk(text, 3) return ' '.join(','.join(chunks)) # quads, credit cards if length % 4 == 0: chunks = chunk(text, 4) return ' '.join(','.join(chunks)) # otherwise, just put a comma between each number return ','.join(text)
[ "def", "read_digits", "(", "ctx", ",", "text", ")", ":", "def", "chunk", "(", "value", ",", "chunk_size", ")", ":", "return", "[", "value", "[", "i", ":", "i", "+", "chunk_size", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "value",...
Formats digits in text for reading in TTS
[ "Formats", "digits", "in", "text", "for", "reading", "in", "TTS" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L51-L86
train
51,000
rapidpro/expressions
python/temba_expressions/functions/custom.py
remove_first_word
def remove_first_word(ctx, text): """ Removes the first word from the given text string """ text = conversions.to_string(text, ctx).lstrip() first = first_word(ctx, text) return text[len(first):].lstrip() if first else ''
python
def remove_first_word(ctx, text): """ Removes the first word from the given text string """ text = conversions.to_string(text, ctx).lstrip() first = first_word(ctx, text) return text[len(first):].lstrip() if first else ''
[ "def", "remove_first_word", "(", "ctx", ",", "text", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", ".", "lstrip", "(", ")", "first", "=", "first_word", "(", "ctx", ",", "text", ")", "return", "text", "[", "len"...
Removes the first word from the given text string
[ "Removes", "the", "first", "word", "from", "the", "given", "text", "string" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L89-L95
train
51,001
rapidpro/expressions
python/temba_expressions/functions/custom.py
word
def word(ctx, text, number, by_spaces=False): """ Extracts the nth word from the given text string """ return word_slice(ctx, text, number, conversions.to_integer(number, ctx) + 1, by_spaces)
python
def word(ctx, text, number, by_spaces=False): """ Extracts the nth word from the given text string """ return word_slice(ctx, text, number, conversions.to_integer(number, ctx) + 1, by_spaces)
[ "def", "word", "(", "ctx", ",", "text", ",", "number", ",", "by_spaces", "=", "False", ")", ":", "return", "word_slice", "(", "ctx", ",", "text", ",", "number", ",", "conversions", ".", "to_integer", "(", "number", ",", "ctx", ")", "+", "1", ",", "...
Extracts the nth word from the given text string
[ "Extracts", "the", "nth", "word", "from", "the", "given", "text", "string" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L98-L102
train
51,002
rapidpro/expressions
python/temba_expressions/functions/custom.py
word_count
def word_count(ctx, text, by_spaces=False): """ Returns the number of words in the given text string """ text = conversions.to_string(text, ctx) by_spaces = conversions.to_boolean(by_spaces, ctx) return len(__get_words(text, by_spaces))
python
def word_count(ctx, text, by_spaces=False): """ Returns the number of words in the given text string """ text = conversions.to_string(text, ctx) by_spaces = conversions.to_boolean(by_spaces, ctx) return len(__get_words(text, by_spaces))
[ "def", "word_count", "(", "ctx", ",", "text", ",", "by_spaces", "=", "False", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "by_spaces", "=", "conversions", ".", "to_boolean", "(", "by_spaces", ",", "ctx", ")", "...
Returns the number of words in the given text string
[ "Returns", "the", "number", "of", "words", "in", "the", "given", "text", "string" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L105-L111
train
51,003
rapidpro/expressions
python/temba_expressions/functions/custom.py
word_slice
def word_slice(ctx, text, start, stop=0, by_spaces=False): """ Extracts a substring spanning from start up to but not-including stop """ text = conversions.to_string(text, ctx) start = conversions.to_integer(start, ctx) stop = conversions.to_integer(stop, ctx) by_spaces = conversions.to_boolean(by_spaces, ctx) if start == 0: raise ValueError("Start word cannot be zero") elif start > 0: start -= 1 # convert to a zero-based offset if stop == 0: # zero is treated as no end stop = None elif stop > 0: stop -= 1 # convert to a zero-based offset words = __get_words(text, by_spaces) selection = operator.getitem(words, slice(start, stop)) # re-combine selected words with a single space return ' '.join(selection)
python
def word_slice(ctx, text, start, stop=0, by_spaces=False): """ Extracts a substring spanning from start up to but not-including stop """ text = conversions.to_string(text, ctx) start = conversions.to_integer(start, ctx) stop = conversions.to_integer(stop, ctx) by_spaces = conversions.to_boolean(by_spaces, ctx) if start == 0: raise ValueError("Start word cannot be zero") elif start > 0: start -= 1 # convert to a zero-based offset if stop == 0: # zero is treated as no end stop = None elif stop > 0: stop -= 1 # convert to a zero-based offset words = __get_words(text, by_spaces) selection = operator.getitem(words, slice(start, stop)) # re-combine selected words with a single space return ' '.join(selection)
[ "def", "word_slice", "(", "ctx", ",", "text", ",", "start", ",", "stop", "=", "0", ",", "by_spaces", "=", "False", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "start", "=", "conversions", ".", "to_integer", "...
Extracts a substring spanning from start up to but not-including stop
[ "Extracts", "a", "substring", "spanning", "from", "start", "up", "to", "but", "not", "-", "including", "stop" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L114-L138
train
51,004
rapidpro/expressions
python/temba_expressions/functions/custom.py
regex_group
def regex_group(ctx, text, pattern, group_num): """ Tries to match the text with the given pattern and returns the value of matching group """ text = conversions.to_string(text, ctx) pattern = conversions.to_string(pattern, ctx) group_num = conversions.to_integer(group_num, ctx) expression = regex.compile(pattern, regex.UNICODE | regex.IGNORECASE | regex.MULTILINE | regex.V0) match = expression.search(text) if not match: return "" if group_num < 0 or group_num > len(match.groups()): raise ValueError("No such matching group %d" % group_num) return match.group(group_num)
python
def regex_group(ctx, text, pattern, group_num): """ Tries to match the text with the given pattern and returns the value of matching group """ text = conversions.to_string(text, ctx) pattern = conversions.to_string(pattern, ctx) group_num = conversions.to_integer(group_num, ctx) expression = regex.compile(pattern, regex.UNICODE | regex.IGNORECASE | regex.MULTILINE | regex.V0) match = expression.search(text) if not match: return "" if group_num < 0 or group_num > len(match.groups()): raise ValueError("No such matching group %d" % group_num) return match.group(group_num)
[ "def", "regex_group", "(", "ctx", ",", "text", ",", "pattern", ",", "group_num", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "pattern", "=", "conversions", ".", "to_string", "(", "pattern", ",", "ctx", ")", "gr...
Tries to match the text with the given pattern and returns the value of matching group
[ "Tries", "to", "match", "the", "text", "with", "the", "given", "pattern", "and", "returns", "the", "value", "of", "matching", "group" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L157-L174
train
51,005
rapidpro/expressions
python/temba_expressions/utils.py
decimal_round
def decimal_round(number, num_digits, rounding=ROUND_HALF_UP): """ Rounding for decimals with support for negative digits """ exp = Decimal(10) ** -num_digits if num_digits >= 0: return number.quantize(exp, rounding) else: return exp * (number / exp).to_integral_value(rounding)
python
def decimal_round(number, num_digits, rounding=ROUND_HALF_UP): """ Rounding for decimals with support for negative digits """ exp = Decimal(10) ** -num_digits if num_digits >= 0: return number.quantize(exp, rounding) else: return exp * (number / exp).to_integral_value(rounding)
[ "def", "decimal_round", "(", "number", ",", "num_digits", ",", "rounding", "=", "ROUND_HALF_UP", ")", ":", "exp", "=", "Decimal", "(", "10", ")", "**", "-", "num_digits", "if", "num_digits", ">=", "0", ":", "return", "number", ".", "quantize", "(", "exp"...
Rounding for decimals with support for negative digits
[ "Rounding", "for", "decimals", "with", "support", "for", "negative", "digits" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/utils.py#L22-L31
train
51,006
rapidpro/expressions
python/temba_expressions/utils.py
parse_json_date
def parse_json_date(value): """ Parses an ISO8601 formatted datetime from a string value """ if not value: return None return datetime.datetime.strptime(value, JSON_DATETIME_FORMAT).replace(tzinfo=pytz.UTC)
python
def parse_json_date(value): """ Parses an ISO8601 formatted datetime from a string value """ if not value: return None return datetime.datetime.strptime(value, JSON_DATETIME_FORMAT).replace(tzinfo=pytz.UTC)
[ "def", "parse_json_date", "(", "value", ")", ":", "if", "not", "value", ":", "return", "None", "return", "datetime", ".", "datetime", ".", "strptime", "(", "value", ",", "JSON_DATETIME_FORMAT", ")", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "UTC", ...
Parses an ISO8601 formatted datetime from a string value
[ "Parses", "an", "ISO8601", "formatted", "datetime", "from", "a", "string", "value" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/utils.py#L50-L57
train
51,007
rapidpro/expressions
python/temba_expressions/functions/excel.py
clean
def clean(ctx, text): """ Removes all non-printable characters from a text string """ text = conversions.to_string(text, ctx) return ''.join([c for c in text if ord(c) >= 32])
python
def clean(ctx, text): """ Removes all non-printable characters from a text string """ text = conversions.to_string(text, ctx) return ''.join([c for c in text if ord(c) >= 32])
[ "def", "clean", "(", "ctx", ",", "text", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "return", "''", ".", "join", "(", "[", "c", "for", "c", "in", "text", "if", "ord", "(", "c", ")", ">=", "32", "]", ...
Removes all non-printable characters from a text string
[ "Removes", "all", "non", "-", "printable", "characters", "from", "a", "text", "string" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L22-L27
train
51,008
rapidpro/expressions
python/temba_expressions/functions/excel.py
concatenate
def concatenate(ctx, *text): """ Joins text strings into one text string """ result = '' for arg in text: result += conversions.to_string(arg, ctx) return result
python
def concatenate(ctx, *text): """ Joins text strings into one text string """ result = '' for arg in text: result += conversions.to_string(arg, ctx) return result
[ "def", "concatenate", "(", "ctx", ",", "*", "text", ")", ":", "result", "=", "''", "for", "arg", "in", "text", ":", "result", "+=", "conversions", ".", "to_string", "(", "arg", ",", "ctx", ")", "return", "result" ]
Joins text strings into one text string
[ "Joins", "text", "strings", "into", "one", "text", "string" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L37-L44
train
51,009
rapidpro/expressions
python/temba_expressions/functions/excel.py
fixed
def fixed(ctx, number, decimals=2, no_commas=False): """ Formats the given number in decimal format using a period and commas """ value = _round(ctx, number, decimals) format_str = '{:f}' if no_commas else '{:,f}' return format_str.format(value)
python
def fixed(ctx, number, decimals=2, no_commas=False): """ Formats the given number in decimal format using a period and commas """ value = _round(ctx, number, decimals) format_str = '{:f}' if no_commas else '{:,f}' return format_str.format(value)
[ "def", "fixed", "(", "ctx", ",", "number", ",", "decimals", "=", "2", ",", "no_commas", "=", "False", ")", ":", "value", "=", "_round", "(", "ctx", ",", "number", ",", "decimals", ")", "format_str", "=", "'{:f}'", "if", "no_commas", "else", "'{:,f}'", ...
Formats the given number in decimal format using a period and commas
[ "Formats", "the", "given", "number", "in", "decimal", "format", "using", "a", "period", "and", "commas" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L47-L53
train
51,010
rapidpro/expressions
python/temba_expressions/functions/excel.py
left
def left(ctx, text, num_chars): """ Returns the first characters in a text string """ num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") return conversions.to_string(text, ctx)[0:num_chars]
python
def left(ctx, text, num_chars): """ Returns the first characters in a text string """ num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") return conversions.to_string(text, ctx)[0:num_chars]
[ "def", "left", "(", "ctx", ",", "text", ",", "num_chars", ")", ":", "num_chars", "=", "conversions", ".", "to_integer", "(", "num_chars", ",", "ctx", ")", "if", "num_chars", "<", "0", ":", "raise", "ValueError", "(", "\"Number of chars can't be negative\"", ...
Returns the first characters in a text string
[ "Returns", "the", "first", "characters", "in", "a", "text", "string" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L56-L63
train
51,011
rapidpro/expressions
python/temba_expressions/functions/excel.py
rept
def rept(ctx, text, number_times): """ Repeats text a given number of times """ if number_times < 0: raise ValueError("Number of times can't be negative") return conversions.to_string(text, ctx) * conversions.to_integer(number_times, ctx)
python
def rept(ctx, text, number_times): """ Repeats text a given number of times """ if number_times < 0: raise ValueError("Number of times can't be negative") return conversions.to_string(text, ctx) * conversions.to_integer(number_times, ctx)
[ "def", "rept", "(", "ctx", ",", "text", ",", "number_times", ")", ":", "if", "number_times", "<", "0", ":", "raise", "ValueError", "(", "\"Number of times can't be negative\"", ")", "return", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "*"...
Repeats text a given number of times
[ "Repeats", "text", "a", "given", "number", "of", "times" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L87-L93
train
51,012
rapidpro/expressions
python/temba_expressions/functions/excel.py
right
def right(ctx, text, num_chars): """ Returns the last characters in a text string """ num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") elif num_chars == 0: return '' else: return conversions.to_string(text, ctx)[-num_chars:]
python
def right(ctx, text, num_chars): """ Returns the last characters in a text string """ num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") elif num_chars == 0: return '' else: return conversions.to_string(text, ctx)[-num_chars:]
[ "def", "right", "(", "ctx", ",", "text", ",", "num_chars", ")", ":", "num_chars", "=", "conversions", ".", "to_integer", "(", "num_chars", ",", "ctx", ")", "if", "num_chars", "<", "0", ":", "raise", "ValueError", "(", "\"Number of chars can't be negative\"", ...
Returns the last characters in a text string
[ "Returns", "the", "last", "characters", "in", "a", "text", "string" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L96-L106
train
51,013
rapidpro/expressions
python/temba_expressions/functions/excel.py
substitute
def substitute(ctx, text, old_text, new_text, instance_num=-1): """ Substitutes new_text for old_text in a text string """ text = conversions.to_string(text, ctx) old_text = conversions.to_string(old_text, ctx) new_text = conversions.to_string(new_text, ctx) if instance_num < 0: return text.replace(old_text, new_text) else: splits = text.split(old_text) output = splits[0] instance = 1 for split in splits[1:]: sep = new_text if instance == instance_num else old_text output += sep + split instance += 1 return output
python
def substitute(ctx, text, old_text, new_text, instance_num=-1): """ Substitutes new_text for old_text in a text string """ text = conversions.to_string(text, ctx) old_text = conversions.to_string(old_text, ctx) new_text = conversions.to_string(new_text, ctx) if instance_num < 0: return text.replace(old_text, new_text) else: splits = text.split(old_text) output = splits[0] instance = 1 for split in splits[1:]: sep = new_text if instance == instance_num else old_text output += sep + split instance += 1 return output
[ "def", "substitute", "(", "ctx", ",", "text", ",", "old_text", ",", "new_text", ",", "instance_num", "=", "-", "1", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "old_text", "=", "conversions", ".", "to_string", ...
Substitutes new_text for old_text in a text string
[ "Substitutes", "new_text", "for", "old_text", "in", "a", "text", "string" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L109-L127
train
51,014
rapidpro/expressions
python/temba_expressions/functions/excel.py
_unicode
def _unicode(ctx, text): """ Returns a numeric code for the first character in a text string """ text = conversions.to_string(text, ctx) if len(text) == 0: raise ValueError("Text can't be empty") return ord(text[0])
python
def _unicode(ctx, text): """ Returns a numeric code for the first character in a text string """ text = conversions.to_string(text, ctx) if len(text) == 0: raise ValueError("Text can't be empty") return ord(text[0])
[ "def", "_unicode", "(", "ctx", ",", "text", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "if", "len", "(", "text", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Text can't be empty\"", ")", "return", "ord"...
Returns a numeric code for the first character in a text string
[ "Returns", "a", "numeric", "code", "for", "the", "first", "character", "in", "a", "text", "string" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L137-L144
train
51,015
rapidpro/expressions
python/temba_expressions/functions/excel.py
date
def date(ctx, year, month, day): """ Defines a date value """ return _date(conversions.to_integer(year, ctx), conversions.to_integer(month, ctx), conversions.to_integer(day, ctx))
python
def date(ctx, year, month, day): """ Defines a date value """ return _date(conversions.to_integer(year, ctx), conversions.to_integer(month, ctx), conversions.to_integer(day, ctx))
[ "def", "date", "(", "ctx", ",", "year", ",", "month", ",", "day", ")", ":", "return", "_date", "(", "conversions", ".", "to_integer", "(", "year", ",", "ctx", ")", ",", "conversions", ".", "to_integer", "(", "month", ",", "ctx", ")", ",", "conversion...
Defines a date value
[ "Defines", "a", "date", "value" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L157-L161
train
51,016
rapidpro/expressions
python/temba_expressions/functions/excel.py
datedif
def datedif(ctx, start_date, end_date, unit): """ Calculates the number of days, months, or years between two dates. """ start_date = conversions.to_date(start_date, ctx) end_date = conversions.to_date(end_date, ctx) unit = conversions.to_string(unit, ctx).lower() if start_date > end_date: raise ValueError("Start date cannot be after end date") if unit == 'y': return relativedelta(end_date, start_date).years elif unit == 'm': delta = relativedelta(end_date, start_date) return 12 * delta.years + delta.months elif unit == 'd': return (end_date - start_date).days elif unit == 'md': return relativedelta(end_date, start_date).days elif unit == 'ym': return relativedelta(end_date, start_date).months elif unit == 'yd': return (end_date - start_date.replace(year=end_date.year)).days raise ValueError("Invalid unit value: %s" % unit)
python
def datedif(ctx, start_date, end_date, unit): """ Calculates the number of days, months, or years between two dates. """ start_date = conversions.to_date(start_date, ctx) end_date = conversions.to_date(end_date, ctx) unit = conversions.to_string(unit, ctx).lower() if start_date > end_date: raise ValueError("Start date cannot be after end date") if unit == 'y': return relativedelta(end_date, start_date).years elif unit == 'm': delta = relativedelta(end_date, start_date) return 12 * delta.years + delta.months elif unit == 'd': return (end_date - start_date).days elif unit == 'md': return relativedelta(end_date, start_date).days elif unit == 'ym': return relativedelta(end_date, start_date).months elif unit == 'yd': return (end_date - start_date.replace(year=end_date.year)).days raise ValueError("Invalid unit value: %s" % unit)
[ "def", "datedif", "(", "ctx", ",", "start_date", ",", "end_date", ",", "unit", ")", ":", "start_date", "=", "conversions", ".", "to_date", "(", "start_date", ",", "ctx", ")", "end_date", "=", "conversions", ".", "to_date", "(", "end_date", ",", "ctx", ")...
Calculates the number of days, months, or years between two dates.
[ "Calculates", "the", "number", "of", "days", "months", "or", "years", "between", "two", "dates", "." ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L164-L189
train
51,017
rapidpro/expressions
python/temba_expressions/functions/excel.py
edate
def edate(ctx, date, months): """ Moves a date by the given number of months """ return conversions.to_date_or_datetime(date, ctx) + relativedelta(months=conversions.to_integer(months, ctx))
python
def edate(ctx, date, months): """ Moves a date by the given number of months """ return conversions.to_date_or_datetime(date, ctx) + relativedelta(months=conversions.to_integer(months, ctx))
[ "def", "edate", "(", "ctx", ",", "date", ",", "months", ")", ":", "return", "conversions", ".", "to_date_or_datetime", "(", "date", ",", "ctx", ")", "+", "relativedelta", "(", "months", "=", "conversions", ".", "to_integer", "(", "months", ",", "ctx", ")...
Moves a date by the given number of months
[ "Moves", "a", "date", "by", "the", "given", "number", "of", "months" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L213-L217
train
51,018
rapidpro/expressions
python/temba_expressions/functions/excel.py
time
def time(ctx, hours, minutes, seconds): """ Defines a time value """ return _time(conversions.to_integer(hours, ctx), conversions.to_integer(minutes, ctx), conversions.to_integer(seconds, ctx))
python
def time(ctx, hours, minutes, seconds): """ Defines a time value """ return _time(conversions.to_integer(hours, ctx), conversions.to_integer(minutes, ctx), conversions.to_integer(seconds, ctx))
[ "def", "time", "(", "ctx", ",", "hours", ",", "minutes", ",", "seconds", ")", ":", "return", "_time", "(", "conversions", ".", "to_integer", "(", "hours", ",", "ctx", ")", ",", "conversions", ".", "to_integer", "(", "minutes", ",", "ctx", ")", ",", "...
Defines a time value
[ "Defines", "a", "time", "value" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L255-L259
train
51,019
rapidpro/expressions
python/temba_expressions/functions/excel.py
_abs
def _abs(ctx, number): """ Returns the absolute value of a number """ return conversions.to_decimal(abs(conversions.to_decimal(number, ctx)), ctx)
python
def _abs(ctx, number): """ Returns the absolute value of a number """ return conversions.to_decimal(abs(conversions.to_decimal(number, ctx)), ctx)
[ "def", "_abs", "(", "ctx", ",", "number", ")", ":", "return", "conversions", ".", "to_decimal", "(", "abs", "(", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", ")", ",", "ctx", ")" ]
Returns the absolute value of a number
[ "Returns", "the", "absolute", "value", "of", "a", "number" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L293-L297
train
51,020
rapidpro/expressions
python/temba_expressions/functions/excel.py
_int
def _int(ctx, number): """ Rounds a number down to the nearest integer """ return conversions.to_integer(conversions.to_decimal(number, ctx).to_integral_value(ROUND_FLOOR), ctx)
python
def _int(ctx, number): """ Rounds a number down to the nearest integer """ return conversions.to_integer(conversions.to_decimal(number, ctx).to_integral_value(ROUND_FLOOR), ctx)
[ "def", "_int", "(", "ctx", ",", "number", ")", ":", "return", "conversions", ".", "to_integer", "(", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", ".", "to_integral_value", "(", "ROUND_FLOOR", ")", ",", "ctx", ")" ]
Rounds a number down to the nearest integer
[ "Rounds", "a", "number", "down", "to", "the", "nearest", "integer" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L314-L318
train
51,021
rapidpro/expressions
python/temba_expressions/functions/excel.py
_max
def _max(ctx, *number): """ Returns the maximum value of all arguments """ if len(number) == 0: raise ValueError("Wrong number of arguments") result = conversions.to_decimal(number[0], ctx) for arg in number[1:]: arg = conversions.to_decimal(arg, ctx) if arg > result: result = arg return result
python
def _max(ctx, *number): """ Returns the maximum value of all arguments """ if len(number) == 0: raise ValueError("Wrong number of arguments") result = conversions.to_decimal(number[0], ctx) for arg in number[1:]: arg = conversions.to_decimal(arg, ctx) if arg > result: result = arg return result
[ "def", "_max", "(", "ctx", ",", "*", "number", ")", ":", "if", "len", "(", "number", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Wrong number of arguments\"", ")", "result", "=", "conversions", ".", "to_decimal", "(", "number", "[", "0", "]", "...
Returns the maximum value of all arguments
[ "Returns", "the", "maximum", "value", "of", "all", "arguments" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L321-L333
train
51,022
rapidpro/expressions
python/temba_expressions/functions/excel.py
mod
def mod(ctx, number, divisor): """ Returns the remainder after number is divided by divisor """ number = conversions.to_decimal(number, ctx) divisor = conversions.to_decimal(divisor, ctx) return number - divisor * _int(ctx, number / divisor)
python
def mod(ctx, number, divisor): """ Returns the remainder after number is divided by divisor """ number = conversions.to_decimal(number, ctx) divisor = conversions.to_decimal(divisor, ctx) return number - divisor * _int(ctx, number / divisor)
[ "def", "mod", "(", "ctx", ",", "number", ",", "divisor", ")", ":", "number", "=", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", "divisor", "=", "conversions", ".", "to_decimal", "(", "divisor", ",", "ctx", ")", "return", "number", "...
Returns the remainder after number is divided by divisor
[ "Returns", "the", "remainder", "after", "number", "is", "divided", "by", "divisor" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L351-L357
train
51,023
rapidpro/expressions
python/temba_expressions/functions/excel.py
_power
def _power(ctx, number, power): """ Returns the result of a number raised to a power """ return decimal_pow(conversions.to_decimal(number, ctx), conversions.to_decimal(power, ctx))
python
def _power(ctx, number, power): """ Returns the result of a number raised to a power """ return decimal_pow(conversions.to_decimal(number, ctx), conversions.to_decimal(power, ctx))
[ "def", "_power", "(", "ctx", ",", "number", ",", "power", ")", ":", "return", "decimal_pow", "(", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", ",", "conversions", ".", "to_decimal", "(", "power", ",", "ctx", ")", ")" ]
Returns the result of a number raised to a power
[ "Returns", "the", "result", "of", "a", "number", "raised", "to", "a", "power" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L360-L364
train
51,024
rapidpro/expressions
python/temba_expressions/functions/excel.py
randbetween
def randbetween(ctx, bottom, top): """ Returns a random integer number between the numbers you specify """ bottom = conversions.to_integer(bottom, ctx) top = conversions.to_integer(top, ctx) return random.randint(bottom, top)
python
def randbetween(ctx, bottom, top): """ Returns a random integer number between the numbers you specify """ bottom = conversions.to_integer(bottom, ctx) top = conversions.to_integer(top, ctx) return random.randint(bottom, top)
[ "def", "randbetween", "(", "ctx", ",", "bottom", ",", "top", ")", ":", "bottom", "=", "conversions", ".", "to_integer", "(", "bottom", ",", "ctx", ")", "top", "=", "conversions", ".", "to_integer", "(", "top", ",", "ctx", ")", "return", "random", ".", ...
Returns a random integer number between the numbers you specify
[ "Returns", "a", "random", "integer", "number", "between", "the", "numbers", "you", "specify" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L374-L380
train
51,025
rapidpro/expressions
python/temba_expressions/functions/excel.py
_round
def _round(ctx, number, num_digits): """ Rounds a number to a specified number of digits """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_HALF_UP)
python
def _round(ctx, number, num_digits): """ Rounds a number to a specified number of digits """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_HALF_UP)
[ "def", "_round", "(", "ctx", ",", "number", ",", "num_digits", ")", ":", "number", "=", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", "num_digits", "=", "conversions", ".", "to_integer", "(", "num_digits", ",", "ctx", ")", "return", "...
Rounds a number to a specified number of digits
[ "Rounds", "a", "number", "to", "a", "specified", "number", "of", "digits" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L383-L390
train
51,026
rapidpro/expressions
python/temba_expressions/functions/excel.py
rounddown
def rounddown(ctx, number, num_digits): """ Rounds a number down, toward zero """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_DOWN)
python
def rounddown(ctx, number, num_digits): """ Rounds a number down, toward zero """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_DOWN)
[ "def", "rounddown", "(", "ctx", ",", "number", ",", "num_digits", ")", ":", "number", "=", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", "num_digits", "=", "conversions", ".", "to_integer", "(", "num_digits", ",", "ctx", ")", "return", ...
Rounds a number down, toward zero
[ "Rounds", "a", "number", "down", "toward", "zero" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L393-L400
train
51,027
rapidpro/expressions
python/temba_expressions/functions/excel.py
roundup
def roundup(ctx, number, num_digits): """ Rounds a number up, away from zero """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_UP)
python
def roundup(ctx, number, num_digits): """ Rounds a number up, away from zero """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_UP)
[ "def", "roundup", "(", "ctx", ",", "number", ",", "num_digits", ")", ":", "number", "=", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", "num_digits", "=", "conversions", ".", "to_integer", "(", "num_digits", ",", "ctx", ")", "return", ...
Rounds a number up, away from zero
[ "Rounds", "a", "number", "up", "away", "from", "zero" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L403-L410
train
51,028
rapidpro/expressions
python/temba_expressions/functions/excel.py
_sum
def _sum(ctx, *number): """ Returns the sum of all arguments """ if len(number) == 0: raise ValueError("Wrong number of arguments") result = Decimal(0) for arg in number: result += conversions.to_decimal(arg, ctx) return result
python
def _sum(ctx, *number): """ Returns the sum of all arguments """ if len(number) == 0: raise ValueError("Wrong number of arguments") result = Decimal(0) for arg in number: result += conversions.to_decimal(arg, ctx) return result
[ "def", "_sum", "(", "ctx", ",", "*", "number", ")", ":", "if", "len", "(", "number", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Wrong number of arguments\"", ")", "result", "=", "Decimal", "(", "0", ")", "for", "arg", "in", "number", ":", "r...
Returns the sum of all arguments
[ "Returns", "the", "sum", "of", "all", "arguments" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L413-L423
train
51,029
rapidpro/expressions
python/temba_expressions/functions/excel.py
trunc
def trunc(ctx, number): """ Truncates a number to an integer by removing the fractional part of the number """ return conversions.to_integer(conversions.to_decimal(number, ctx).to_integral_value(ROUND_DOWN), ctx)
python
def trunc(ctx, number): """ Truncates a number to an integer by removing the fractional part of the number """ return conversions.to_integer(conversions.to_decimal(number, ctx).to_integral_value(ROUND_DOWN), ctx)
[ "def", "trunc", "(", "ctx", ",", "number", ")", ":", "return", "conversions", ".", "to_integer", "(", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", ".", "to_integral_value", "(", "ROUND_DOWN", ")", ",", "ctx", ")" ]
Truncates a number to an integer by removing the fractional part of the number
[ "Truncates", "a", "number", "to", "an", "integer", "by", "removing", "the", "fractional", "part", "of", "the", "number" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L426-L430
train
51,030
rapidpro/expressions
python/temba_expressions/functions/excel.py
_and
def _and(ctx, *logical): """ Returns TRUE if and only if all its arguments evaluate to TRUE """ for arg in logical: if not conversions.to_boolean(arg, ctx): return False return True
python
def _and(ctx, *logical): """ Returns TRUE if and only if all its arguments evaluate to TRUE """ for arg in logical: if not conversions.to_boolean(arg, ctx): return False return True
[ "def", "_and", "(", "ctx", ",", "*", "logical", ")", ":", "for", "arg", "in", "logical", ":", "if", "not", "conversions", ".", "to_boolean", "(", "arg", ",", "ctx", ")", ":", "return", "False", "return", "True" ]
Returns TRUE if and only if all its arguments evaluate to TRUE
[ "Returns", "TRUE", "if", "and", "only", "if", "all", "its", "arguments", "evaluate", "to", "TRUE" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L435-L442
train
51,031
rapidpro/expressions
python/temba_expressions/functions/excel.py
_if
def _if(ctx, logical_test, value_if_true=0, value_if_false=False): """ Returns one value if the condition evaluates to TRUE, and another value if it evaluates to FALSE """ return value_if_true if conversions.to_boolean(logical_test, ctx) else value_if_false
python
def _if(ctx, logical_test, value_if_true=0, value_if_false=False): """ Returns one value if the condition evaluates to TRUE, and another value if it evaluates to FALSE """ return value_if_true if conversions.to_boolean(logical_test, ctx) else value_if_false
[ "def", "_if", "(", "ctx", ",", "logical_test", ",", "value_if_true", "=", "0", ",", "value_if_false", "=", "False", ")", ":", "return", "value_if_true", "if", "conversions", ".", "to_boolean", "(", "logical_test", ",", "ctx", ")", "else", "value_if_false" ]
Returns one value if the condition evaluates to TRUE, and another value if it evaluates to FALSE
[ "Returns", "one", "value", "if", "the", "condition", "evaluates", "to", "TRUE", "and", "another", "value", "if", "it", "evaluates", "to", "FALSE" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L452-L456
train
51,032
rapidpro/expressions
python/temba_expressions/functions/excel.py
_or
def _or(ctx, *logical): """ Returns TRUE if any argument is TRUE """ for arg in logical: if conversions.to_boolean(arg, ctx): return True return False
python
def _or(ctx, *logical): """ Returns TRUE if any argument is TRUE """ for arg in logical: if conversions.to_boolean(arg, ctx): return True return False
[ "def", "_or", "(", "ctx", ",", "*", "logical", ")", ":", "for", "arg", "in", "logical", ":", "if", "conversions", ".", "to_boolean", "(", "arg", ",", "ctx", ")", ":", "return", "True", "return", "False" ]
Returns TRUE if any argument is TRUE
[ "Returns", "TRUE", "if", "any", "argument", "is", "TRUE" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L459-L466
train
51,033
rainwoodman/kdcount
kdcount/correlate.py
compute_sum_values
def compute_sum_values(i, j, data1, data2): """ Return the sum1_ij and sum2_ij values given the input indices and data instances. Notes ----- This is called in `Binning.update_sums` to compute the `sum1` and `sum2` contributions for indices `(i,j)` Parameters ---------- i,j : array_like the bin indices for these pairs data1, data2 : `points`, `field` instances the two `points` or `field` objects Returns ------- sum1_ij, sum2_ij : float, array_like (N,...) contributions to sum1, sum2 -- either a float or array of shape (N, ...) where N is the length of `i`, `j` """ sum1_ij = 1. for idx, d in zip([i,j], [data1, data2]): if isinstance(d, field): sum1_ij *= d.wvalue[idx] elif isinstance(d, points): sum1_ij *= d.weights[idx] else: raise NotImplementedError("data type not recognized") sum2_ij = data1.weights[i] * data2.weights[j] return sum1_ij, sum2_ij
python
def compute_sum_values(i, j, data1, data2): """ Return the sum1_ij and sum2_ij values given the input indices and data instances. Notes ----- This is called in `Binning.update_sums` to compute the `sum1` and `sum2` contributions for indices `(i,j)` Parameters ---------- i,j : array_like the bin indices for these pairs data1, data2 : `points`, `field` instances the two `points` or `field` objects Returns ------- sum1_ij, sum2_ij : float, array_like (N,...) contributions to sum1, sum2 -- either a float or array of shape (N, ...) where N is the length of `i`, `j` """ sum1_ij = 1. for idx, d in zip([i,j], [data1, data2]): if isinstance(d, field): sum1_ij *= d.wvalue[idx] elif isinstance(d, points): sum1_ij *= d.weights[idx] else: raise NotImplementedError("data type not recognized") sum2_ij = data1.weights[i] * data2.weights[j] return sum1_ij, sum2_ij
[ "def", "compute_sum_values", "(", "i", ",", "j", ",", "data1", ",", "data2", ")", ":", "sum1_ij", "=", "1.", "for", "idx", ",", "d", "in", "zip", "(", "[", "i", ",", "j", "]", ",", "[", "data1", ",", "data2", "]", ")", ":", "if", "isinstance", ...
Return the sum1_ij and sum2_ij values given the input indices and data instances. Notes ----- This is called in `Binning.update_sums` to compute the `sum1` and `sum2` contributions for indices `(i,j)` Parameters ---------- i,j : array_like the bin indices for these pairs data1, data2 : `points`, `field` instances the two `points` or `field` objects Returns ------- sum1_ij, sum2_ij : float, array_like (N,...) contributions to sum1, sum2 -- either a float or array of shape (N, ...) where N is the length of `i`, `j`
[ "Return", "the", "sum1_ij", "and", "sum2_ij", "values", "given", "the", "input", "indices", "and", "data", "instances", "." ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L46-L77
train
51,034
rainwoodman/kdcount
kdcount/correlate.py
Binning._setup
def _setup(self): """ Set the binning info we need from the `edges` """ dtype = [('inv', 'f8'), ('min', 'f8'), ('max', 'f8'),('N', 'i4'), ('spacing','object')] dtype = numpy.dtype(dtype) self._info = numpy.empty(self.Ndim, dtype=dtype) self.min = self._info['min'] self.max = self._info['max'] self.N = self._info['N'] self.inv = self._info['inv'] self.spacing = self._info['spacing'] for i, dim in enumerate(self.dims): self.N[i] = len(self.edges[i])-1 self.min[i] = self.edges[i][0] self.max[i] = self.edges[i][-1] # determine the type of spacing self.spacing[i] = None lin_diff = numpy.diff(self.edges[i]) with numpy.errstate(divide='ignore', invalid='ignore'): log_diff = numpy.diff(numpy.log10(self.edges[i])) if numpy.allclose(lin_diff, lin_diff[0]): self.spacing[i] = 'linspace' self.inv[i] = self.N[i] * 1.0 / (self.max[i] - self.min[i]) elif numpy.allclose(log_diff, log_diff[0]): self.spacing[i] = 'logspace' self.inv[i] = self.N[i] * 1.0 / numpy.log10(self.max[i] / self.min[i]) self.shape = self.N + 2 # store Rmax self.Rmax = self.max[0]
python
def _setup(self): """ Set the binning info we need from the `edges` """ dtype = [('inv', 'f8'), ('min', 'f8'), ('max', 'f8'),('N', 'i4'), ('spacing','object')] dtype = numpy.dtype(dtype) self._info = numpy.empty(self.Ndim, dtype=dtype) self.min = self._info['min'] self.max = self._info['max'] self.N = self._info['N'] self.inv = self._info['inv'] self.spacing = self._info['spacing'] for i, dim in enumerate(self.dims): self.N[i] = len(self.edges[i])-1 self.min[i] = self.edges[i][0] self.max[i] = self.edges[i][-1] # determine the type of spacing self.spacing[i] = None lin_diff = numpy.diff(self.edges[i]) with numpy.errstate(divide='ignore', invalid='ignore'): log_diff = numpy.diff(numpy.log10(self.edges[i])) if numpy.allclose(lin_diff, lin_diff[0]): self.spacing[i] = 'linspace' self.inv[i] = self.N[i] * 1.0 / (self.max[i] - self.min[i]) elif numpy.allclose(log_diff, log_diff[0]): self.spacing[i] = 'logspace' self.inv[i] = self.N[i] * 1.0 / numpy.log10(self.max[i] / self.min[i]) self.shape = self.N + 2 # store Rmax self.Rmax = self.max[0]
[ "def", "_setup", "(", "self", ")", ":", "dtype", "=", "[", "(", "'inv'", ",", "'f8'", ")", ",", "(", "'min'", ",", "'f8'", ")", ",", "(", "'max'", ",", "'f8'", ")", ",", "(", "'N'", ",", "'i4'", ")", ",", "(", "'spacing'", ",", "'object'", ")...
Set the binning info we need from the `edges`
[ "Set", "the", "binning", "info", "we", "need", "from", "the", "edges" ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L135-L169
train
51,035
rainwoodman/kdcount
kdcount/correlate.py
Binning.linear
def linear(self, **paircoords): """ Linearize bin indices. This function is called by subclasses. Refer to the source code of :py:class:`RBinning` for an example. Parameters ---------- args : list a list of bin index, (xi, yi, zi, ..) Returns ------- linearlized bin index """ N = len(paircoords[list(paircoords.keys())[0]]) integer = numpy.empty(N, ('i8', (self.Ndim,))).T # do each dimension for i, dim in enumerate(self.dims): if self.spacing[i] == 'linspace': x = paircoords[dim] - self.min[i] integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] == 'logspace': x = paircoords[dim].copy() x[x == 0] = self.min[i] * 0.9 x = numpy.log10(x / self.min[i]) integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] is None: edge = self.edges if self.Ndim == 1 else self.edges[i] integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left') return numpy.ravel_multi_index(integer, self.shape, mode='clip')
python
def linear(self, **paircoords): """ Linearize bin indices. This function is called by subclasses. Refer to the source code of :py:class:`RBinning` for an example. Parameters ---------- args : list a list of bin index, (xi, yi, zi, ..) Returns ------- linearlized bin index """ N = len(paircoords[list(paircoords.keys())[0]]) integer = numpy.empty(N, ('i8', (self.Ndim,))).T # do each dimension for i, dim in enumerate(self.dims): if self.spacing[i] == 'linspace': x = paircoords[dim] - self.min[i] integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] == 'logspace': x = paircoords[dim].copy() x[x == 0] = self.min[i] * 0.9 x = numpy.log10(x / self.min[i]) integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] is None: edge = self.edges if self.Ndim == 1 else self.edges[i] integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left') return numpy.ravel_multi_index(integer, self.shape, mode='clip')
[ "def", "linear", "(", "self", ",", "*", "*", "paircoords", ")", ":", "N", "=", "len", "(", "paircoords", "[", "list", "(", "paircoords", ".", "keys", "(", ")", ")", "[", "0", "]", "]", ")", "integer", "=", "numpy", ".", "empty", "(", "N", ",", ...
Linearize bin indices. This function is called by subclasses. Refer to the source code of :py:class:`RBinning` for an example. Parameters ---------- args : list a list of bin index, (xi, yi, zi, ..) Returns ------- linearlized bin index
[ "Linearize", "bin", "indices", "." ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L172-L208
train
51,036
rainwoodman/kdcount
kdcount/correlate.py
Binning.update_sums
def update_sums(self, r, i, j, data1, data2, sum1, sum2, N=None, centers_sum=None): """ The main function that digitizes the pair counts, calls bincount for the appropriate `sum1` and `sum2` values, and adds them to the input arrays, will modify sum1, sum2, N, and centers_sum inplace. """ # the summation values for this (r,i,j) sum1_ij, sum2_ij = compute_sum_values(i, j, data1, data2) # digitize digr = self.digitize(r, i, j, data1, data2) if len(digr) == 3 and isinstance(digr[1], dict): dig, paircoords, weights = digr elif len(digr) == 2 and isinstance(digr[1], dict): dig, paircoords = digr weights = None else: dig = digr paircoords = None weights = None # sum 1 def add_one_channel(sum1c, sum1_ijc): if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1: sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size) else: for d in range(sum1c.shape[0]): sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size) if self.channels: if weights is None: raise RuntimeError("`digitize` of multi channel paircount did not return a weight array for the channels") sum1_ij = weights * sum1_ij # sum1_ij[ichannel, dig, dim] for ichannel in range(len(self.channels)): add_one_channel(sum1[ichannel], sum1_ij[ichannel]) else: # sum1_ij[dig, dim] add_one_channel(sum1, sum1_ij) # sum 2, if both data are not points if not numpy.isscalar(sum2): sum2.flat[:] += utils.bincount(dig, sum2_ij, minlength=sum2.size) if N is not None: if not paircoords: raise RuntimeError("Bin center is requested but not returned by digitize") # update the mean coords self._update_mean_coords(dig, N, centers_sum, **paircoords)
python
def update_sums(self, r, i, j, data1, data2, sum1, sum2, N=None, centers_sum=None): """ The main function that digitizes the pair counts, calls bincount for the appropriate `sum1` and `sum2` values, and adds them to the input arrays, will modify sum1, sum2, N, and centers_sum inplace. """ # the summation values for this (r,i,j) sum1_ij, sum2_ij = compute_sum_values(i, j, data1, data2) # digitize digr = self.digitize(r, i, j, data1, data2) if len(digr) == 3 and isinstance(digr[1], dict): dig, paircoords, weights = digr elif len(digr) == 2 and isinstance(digr[1], dict): dig, paircoords = digr weights = None else: dig = digr paircoords = None weights = None # sum 1 def add_one_channel(sum1c, sum1_ijc): if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1: sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size) else: for d in range(sum1c.shape[0]): sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size) if self.channels: if weights is None: raise RuntimeError("`digitize` of multi channel paircount did not return a weight array for the channels") sum1_ij = weights * sum1_ij # sum1_ij[ichannel, dig, dim] for ichannel in range(len(self.channels)): add_one_channel(sum1[ichannel], sum1_ij[ichannel]) else: # sum1_ij[dig, dim] add_one_channel(sum1, sum1_ij) # sum 2, if both data are not points if not numpy.isscalar(sum2): sum2.flat[:] += utils.bincount(dig, sum2_ij, minlength=sum2.size) if N is not None: if not paircoords: raise RuntimeError("Bin center is requested but not returned by digitize") # update the mean coords self._update_mean_coords(dig, N, centers_sum, **paircoords)
[ "def", "update_sums", "(", "self", ",", "r", ",", "i", ",", "j", ",", "data1", ",", "data2", ",", "sum1", ",", "sum2", ",", "N", "=", "None", ",", "centers_sum", "=", "None", ")", ":", "# the summation values for this (r,i,j)", "sum1_ij", ",", "sum2_ij",...
The main function that digitizes the pair counts, calls bincount for the appropriate `sum1` and `sum2` values, and adds them to the input arrays, will modify sum1, sum2, N, and centers_sum inplace.
[ "The", "main", "function", "that", "digitizes", "the", "pair", "counts", "calls", "bincount", "for", "the", "appropriate", "sum1", "and", "sum2", "values", "and", "adds", "them", "to", "the", "input", "arrays" ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L238-L291
train
51,037
rainwoodman/kdcount
kdcount/correlate.py
Binning.sum_shapes
def sum_shapes(self, data1, data2): """ Return the shapes of the summation arrays, given the input data and shape of the bins """ # the linear shape (put extra dimensions first) linearshape = [-1] + list(self.shape) # determine the full shape subshapes = [list(d.subshape) for d in [data1, data2] if isinstance(d, field)] subshape = [] if len(subshapes) == 2: assert subshapes[0] == subshapes[1] subshape = subshapes[0] elif len(subshapes) == 1: subshape = subshapes[0] fullshape = subshape + list(self.shape) # prepend the shape for different channels if self.channels: fullshape = [len(self.channels)] + fullshape return linearshape, fullshape
python
def sum_shapes(self, data1, data2): """ Return the shapes of the summation arrays, given the input data and shape of the bins """ # the linear shape (put extra dimensions first) linearshape = [-1] + list(self.shape) # determine the full shape subshapes = [list(d.subshape) for d in [data1, data2] if isinstance(d, field)] subshape = [] if len(subshapes) == 2: assert subshapes[0] == subshapes[1] subshape = subshapes[0] elif len(subshapes) == 1: subshape = subshapes[0] fullshape = subshape + list(self.shape) # prepend the shape for different channels if self.channels: fullshape = [len(self.channels)] + fullshape return linearshape, fullshape
[ "def", "sum_shapes", "(", "self", ",", "data1", ",", "data2", ")", ":", "# the linear shape (put extra dimensions first)", "linearshape", "=", "[", "-", "1", "]", "+", "list", "(", "self", ".", "shape", ")", "# determine the full shape", "subshapes", "=", "[", ...
Return the shapes of the summation arrays, given the input data and shape of the bins
[ "Return", "the", "shapes", "of", "the", "summation", "arrays", "given", "the", "input", "data", "and", "shape", "of", "the", "bins" ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L293-L315
train
51,038
rainwoodman/kdcount
kdcount/correlate.py
Binning._update_mean_coords
def _update_mean_coords(self, dig, N, centers_sum, **paircoords): """ Update the mean coordinate sums """ if N is None or centers_sum is None: return N.flat[:] += utils.bincount(dig, 1., minlength=N.size) for i, dim in enumerate(self.dims): size = centers_sum[i].size centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)
python
def _update_mean_coords(self, dig, N, centers_sum, **paircoords): """ Update the mean coordinate sums """ if N is None or centers_sum is None: return N.flat[:] += utils.bincount(dig, 1., minlength=N.size) for i, dim in enumerate(self.dims): size = centers_sum[i].size centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)
[ "def", "_update_mean_coords", "(", "self", ",", "dig", ",", "N", ",", "centers_sum", ",", "*", "*", "paircoords", ")", ":", "if", "N", "is", "None", "or", "centers_sum", "is", "None", ":", "return", "N", ".", "flat", "[", ":", "]", "+=", "utils", "...
Update the mean coordinate sums
[ "Update", "the", "mean", "coordinate", "sums" ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L317-L326
train
51,039
rainwoodman/kdcount
kdcount/correlate.py
paircount_queue.work
def work(self, i): """ Internal function that performs the pair-counting """ n1, n2 = self.p[i] # initialize the total arrays for this process sum1 = numpy.zeros_like(self.sum1g) sum2 = 1. if not self.pts_only: sum2 = numpy.zeros_like(self.sum2g) if self.compute_mean_coords: N = numpy.zeros_like(self.N) centers_sum = [numpy.zeros_like(c) for c in self.centers] else: N = None; centers_sum = None if self.bins.enable_fast_node_count: # field x points is not supported. # because it is more likely need to deal # with broadcasting sum1attrs = [ d.attr for d in self.data ] counts, sum1c = n1.count(n2, self.bins.edges, attrs=sum1attrs) sum1[..., :-1] = sum1c sum1[..., -1] = 0 else: def callback(r, i, j): # just call the binning function, passing the # sum arrays to fill in self.bins.update_sums(r, i, j, self.data[0], self.data[1], sum1, sum2, N=N, centers_sum=centers_sum) n1.enum(n2, self.bins.Rmax, process=callback) if not self.compute_mean_coords: return sum1, sum2 else: return sum1, sum2, N, centers_sum
python
def work(self, i): """ Internal function that performs the pair-counting """ n1, n2 = self.p[i] # initialize the total arrays for this process sum1 = numpy.zeros_like(self.sum1g) sum2 = 1. if not self.pts_only: sum2 = numpy.zeros_like(self.sum2g) if self.compute_mean_coords: N = numpy.zeros_like(self.N) centers_sum = [numpy.zeros_like(c) for c in self.centers] else: N = None; centers_sum = None if self.bins.enable_fast_node_count: # field x points is not supported. # because it is more likely need to deal # with broadcasting sum1attrs = [ d.attr for d in self.data ] counts, sum1c = n1.count(n2, self.bins.edges, attrs=sum1attrs) sum1[..., :-1] = sum1c sum1[..., -1] = 0 else: def callback(r, i, j): # just call the binning function, passing the # sum arrays to fill in self.bins.update_sums(r, i, j, self.data[0], self.data[1], sum1, sum2, N=N, centers_sum=centers_sum) n1.enum(n2, self.bins.Rmax, process=callback) if not self.compute_mean_coords: return sum1, sum2 else: return sum1, sum2, N, centers_sum
[ "def", "work", "(", "self", ",", "i", ")", ":", "n1", ",", "n2", "=", "self", ".", "p", "[", "i", "]", "# initialize the total arrays for this process", "sum1", "=", "numpy", ".", "zeros_like", "(", "self", ".", "sum1g", ")", "sum2", "=", "1.", "if", ...
Internal function that performs the pair-counting
[ "Internal", "function", "that", "performs", "the", "pair", "-", "counting" ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L739-L778
train
51,040
rainwoodman/kdcount
kdcount/correlate.py
paircount_queue.reduce
def reduce(self, sum1, sum2, *args): """ The internal reduce function that sums the results from various processors """ self.sum1g[...] += sum1 if not self.pts_only: self.sum2g[...] += sum2 if self.compute_mean_coords: N, centers_sum = args self.N[...] += N for i in range(self.bins.Ndim): self.centers[i][...] += centers_sum[i]
python
def reduce(self, sum1, sum2, *args): """ The internal reduce function that sums the results from various processors """ self.sum1g[...] += sum1 if not self.pts_only: self.sum2g[...] += sum2 if self.compute_mean_coords: N, centers_sum = args self.N[...] += N for i in range(self.bins.Ndim): self.centers[i][...] += centers_sum[i]
[ "def", "reduce", "(", "self", ",", "sum1", ",", "sum2", ",", "*", "args", ")", ":", "self", ".", "sum1g", "[", "...", "]", "+=", "sum1", "if", "not", "self", ".", "pts_only", ":", "self", ".", "sum2g", "[", "...", "]", "+=", "sum2", "if", "self...
The internal reduce function that sums the results from various processors
[ "The", "internal", "reduce", "function", "that", "sums", "the", "results", "from", "various", "processors" ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L780-L792
train
51,041
hammerlab/stanity
stanity/psis.py
psisloo
def psisloo(log_lik, **kwargs): r"""PSIS leave-one-out log predictive densities. Computes the log predictive densities given posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)` in input parameter `log_lik`. Returns a sum of the leave-one-out log predictive densities `loo`, individual leave-one-out log predictive density terms `loos` and an estimate of Pareto tail indeces `ks`. The estimates are unreliable if tail index ``k > 0.7`` (see more in the references listed in the module docstring). Additional keyword arguments are passed to the :meth:`psislw()` function (see the corresponding documentation). Parameters ---------- log_lik : ndarray Array of size n x m containing n posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)`. Returns ------- loo : scalar sum of the leave-one-out log predictive densities loos : ndarray individual leave-one-out log predictive density terms ks : ndarray estimated Pareto tail indeces """ # ensure overwrite flag in passed arguments kwargs['overwrite_lw'] = True # log raw weights from log_lik lw = -log_lik # compute Pareto smoothed log weights given raw log weights lw, ks = psislw(lw, **kwargs) # compute lw += log_lik loos = sumlogs(lw, axis=0) loo = loos.sum() return loo, loos, ks
python
def psisloo(log_lik, **kwargs): r"""PSIS leave-one-out log predictive densities. Computes the log predictive densities given posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)` in input parameter `log_lik`. Returns a sum of the leave-one-out log predictive densities `loo`, individual leave-one-out log predictive density terms `loos` and an estimate of Pareto tail indeces `ks`. The estimates are unreliable if tail index ``k > 0.7`` (see more in the references listed in the module docstring). Additional keyword arguments are passed to the :meth:`psislw()` function (see the corresponding documentation). Parameters ---------- log_lik : ndarray Array of size n x m containing n posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)`. Returns ------- loo : scalar sum of the leave-one-out log predictive densities loos : ndarray individual leave-one-out log predictive density terms ks : ndarray estimated Pareto tail indeces """ # ensure overwrite flag in passed arguments kwargs['overwrite_lw'] = True # log raw weights from log_lik lw = -log_lik # compute Pareto smoothed log weights given raw log weights lw, ks = psislw(lw, **kwargs) # compute lw += log_lik loos = sumlogs(lw, axis=0) loo = loos.sum() return loo, loos, ks
[ "def", "psisloo", "(", "log_lik", ",", "*", "*", "kwargs", ")", ":", "# ensure overwrite flag in passed arguments", "kwargs", "[", "'overwrite_lw'", "]", "=", "True", "# log raw weights from log_lik", "lw", "=", "-", "log_lik", "# compute Pareto smoothed log weights given...
r"""PSIS leave-one-out log predictive densities. Computes the log predictive densities given posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)` in input parameter `log_lik`. Returns a sum of the leave-one-out log predictive densities `loo`, individual leave-one-out log predictive density terms `loos` and an estimate of Pareto tail indeces `ks`. The estimates are unreliable if tail index ``k > 0.7`` (see more in the references listed in the module docstring). Additional keyword arguments are passed to the :meth:`psislw()` function (see the corresponding documentation). Parameters ---------- log_lik : ndarray Array of size n x m containing n posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)`. Returns ------- loo : scalar sum of the leave-one-out log predictive densities loos : ndarray individual leave-one-out log predictive density terms ks : ndarray estimated Pareto tail indeces
[ "r", "PSIS", "leave", "-", "one", "-", "out", "log", "predictive", "densities", "." ]
6c36abc207c4ce94f78968501dab839a56f35a41
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psis.py#L69-L110
train
51,042
hammerlab/stanity
stanity/psis.py
gpinv
def gpinv(p, k, sigma): """Inverse Generalised Pareto distribution function.""" x = np.empty(p.shape) x.fill(np.nan) if sigma <= 0: return x ok = (p > 0) & (p < 1) if np.all(ok): if np.abs(k) < np.finfo(float).eps: np.negative(p, out=x) np.log1p(x, out=x) np.negative(x, out=x) else: np.negative(p, out=x) np.log1p(x, out=x) x *= -k np.expm1(x, out=x) x /= k x *= sigma else: if np.abs(k) < np.finfo(float).eps: # x[ok] = - np.log1p(-p[ok]) temp = p[ok] np.negative(temp, out=temp) np.log1p(temp, out=temp) np.negative(temp, out=temp) x[ok] = temp else: # x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k temp = p[ok] np.negative(temp, out=temp) np.log1p(temp, out=temp) temp *= -k np.expm1(temp, out=temp) temp /= k x[ok] = temp x *= sigma x[p == 0] = 0 if k >= 0: x[p == 1] = np.inf else: x[p == 1] = -sigma / k return x
python
def gpinv(p, k, sigma): """Inverse Generalised Pareto distribution function.""" x = np.empty(p.shape) x.fill(np.nan) if sigma <= 0: return x ok = (p > 0) & (p < 1) if np.all(ok): if np.abs(k) < np.finfo(float).eps: np.negative(p, out=x) np.log1p(x, out=x) np.negative(x, out=x) else: np.negative(p, out=x) np.log1p(x, out=x) x *= -k np.expm1(x, out=x) x /= k x *= sigma else: if np.abs(k) < np.finfo(float).eps: # x[ok] = - np.log1p(-p[ok]) temp = p[ok] np.negative(temp, out=temp) np.log1p(temp, out=temp) np.negative(temp, out=temp) x[ok] = temp else: # x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k temp = p[ok] np.negative(temp, out=temp) np.log1p(temp, out=temp) temp *= -k np.expm1(temp, out=temp) temp /= k x[ok] = temp x *= sigma x[p == 0] = 0 if k >= 0: x[p == 1] = np.inf else: x[p == 1] = -sigma / k return x
[ "def", "gpinv", "(", "p", ",", "k", ",", "sigma", ")", ":", "x", "=", "np", ".", "empty", "(", "p", ".", "shape", ")", "x", ".", "fill", "(", "np", ".", "nan", ")", "if", "sigma", "<=", "0", ":", "return", "x", "ok", "=", "(", "p", ">", ...
Inverse Generalised Pareto distribution function.
[ "Inverse", "Generalised", "Pareto", "distribution", "function", "." ]
6c36abc207c4ce94f78968501dab839a56f35a41
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psis.py#L335-L377
train
51,043
hammerlab/stanity
stanity/psis.py
sumlogs
def sumlogs(x, axis=None, out=None): """Sum of vector where numbers are represented by their logarithms. Calculates ``np.log(np.sum(np.exp(x), axis=axis))`` in such a fashion that it works even when elements have large magnitude. """ maxx = x.max(axis=axis, keepdims=True) xnorm = x - maxx np.exp(xnorm, out=xnorm) out = np.sum(xnorm, axis=axis, out=out) if isinstance(out, np.ndarray): np.log(out, out=out) else: out = np.log(out) out += np.squeeze(maxx) return out
python
def sumlogs(x, axis=None, out=None): """Sum of vector where numbers are represented by their logarithms. Calculates ``np.log(np.sum(np.exp(x), axis=axis))`` in such a fashion that it works even when elements have large magnitude. """ maxx = x.max(axis=axis, keepdims=True) xnorm = x - maxx np.exp(xnorm, out=xnorm) out = np.sum(xnorm, axis=axis, out=out) if isinstance(out, np.ndarray): np.log(out, out=out) else: out = np.log(out) out += np.squeeze(maxx) return out
[ "def", "sumlogs", "(", "x", ",", "axis", "=", "None", ",", "out", "=", "None", ")", ":", "maxx", "=", "x", ".", "max", "(", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", "xnorm", "=", "x", "-", "maxx", "np", ".", "exp", "(", "xnorm"...
Sum of vector where numbers are represented by their logarithms. Calculates ``np.log(np.sum(np.exp(x), axis=axis))`` in such a fashion that it works even when elements have large magnitude.
[ "Sum", "of", "vector", "where", "numbers", "are", "represented", "by", "their", "logarithms", "." ]
6c36abc207c4ce94f78968501dab839a56f35a41
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psis.py#L380-L396
train
51,044
rapidpro/expressions
python/temba_expressions/functions/__init__.py
FunctionManager.build_listing
def build_listing(self): """ Builds a listing of all functions sorted A-Z, with their names and descriptions """ def func_entry(name, func): args, varargs, defaults = self._get_arg_spec(func) # add regular arguments params = [{'name': str(a), 'optional': a in defaults, 'vararg': False} for a in args if a != 'ctx'] # add possible variable argument if varargs: params += [{'name': str(varargs), 'optional': False, 'vararg': True}] return {'name': str(name.upper()), 'description': str(func.__doc__).strip(), 'params': params} listing = [func_entry(f_name, f) for f_name, f in self._functions.items()] return sorted(listing, key=lambda l: l['name'])
python
def build_listing(self): """ Builds a listing of all functions sorted A-Z, with their names and descriptions """ def func_entry(name, func): args, varargs, defaults = self._get_arg_spec(func) # add regular arguments params = [{'name': str(a), 'optional': a in defaults, 'vararg': False} for a in args if a != 'ctx'] # add possible variable argument if varargs: params += [{'name': str(varargs), 'optional': False, 'vararg': True}] return {'name': str(name.upper()), 'description': str(func.__doc__).strip(), 'params': params} listing = [func_entry(f_name, f) for f_name, f in self._functions.items()] return sorted(listing, key=lambda l: l['name'])
[ "def", "build_listing", "(", "self", ")", ":", "def", "func_entry", "(", "name", ",", "func", ")", ":", "args", ",", "varargs", ",", "defaults", "=", "self", ".", "_get_arg_spec", "(", "func", ")", "# add regular arguments", "params", "=", "[", "{", "'na...
Builds a listing of all functions sorted A-Z, with their names and descriptions
[ "Builds", "a", "listing", "of", "all", "functions", "sorted", "A", "-", "Z", "with", "their", "names", "and", "descriptions" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/__init__.py#L83-L102
train
51,045
rapidpro/expressions
python/temba_expressions/functions/__init__.py
FunctionManager._get_arg_spec
def _get_arg_spec(func): """ Gets the argument spec of the given function, returning defaults as a dict of param names to values """ args, varargs, keywords, defaults = inspect.getargspec(func) # build a mapping from argument names to their default values, if any: if defaults is None: defaults = {} else: defaulted_args = args[-len(defaults):] defaults = {name: val for name, val in zip(defaulted_args, defaults)} return args, varargs, defaults
python
def _get_arg_spec(func): """ Gets the argument spec of the given function, returning defaults as a dict of param names to values """ args, varargs, keywords, defaults = inspect.getargspec(func) # build a mapping from argument names to their default values, if any: if defaults is None: defaults = {} else: defaulted_args = args[-len(defaults):] defaults = {name: val for name, val in zip(defaulted_args, defaults)} return args, varargs, defaults
[ "def", "_get_arg_spec", "(", "func", ")", ":", "args", ",", "varargs", ",", "keywords", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "func", ")", "# build a mapping from argument names to their default values, if any:", "if", "defaults", "is", "None", ":...
Gets the argument spec of the given function, returning defaults as a dict of param names to values
[ "Gets", "the", "argument", "spec", "of", "the", "given", "function", "returning", "defaults", "as", "a", "dict", "of", "param", "names", "to", "values" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/__init__.py#L105-L118
train
51,046
BlueBrain/nat
nat/runOCR.py
check_ocrmypdf
def check_ocrmypdf(input_file, output_file, *args, env=None): "Run ocrmypdf and confirmed that a valid file was created" p, out, err = run_ocrmypdf(input_file, output_file, *args, env=env) if p.returncode != 0: print('stdout\n======') print(out) print('stderr\n======') print(err) #assert p.returncode == 0 #assert os.path.exists(output_file), "Output file not created" #assert os.stat(output_file).st_size > 100, "PDF too small or empty" return output_file
python
def check_ocrmypdf(input_file, output_file, *args, env=None): "Run ocrmypdf and confirmed that a valid file was created" p, out, err = run_ocrmypdf(input_file, output_file, *args, env=env) if p.returncode != 0: print('stdout\n======') print(out) print('stderr\n======') print(err) #assert p.returncode == 0 #assert os.path.exists(output_file), "Output file not created" #assert os.stat(output_file).st_size > 100, "PDF too small or empty" return output_file
[ "def", "check_ocrmypdf", "(", "input_file", ",", "output_file", ",", "*", "args", ",", "env", "=", "None", ")", ":", "p", ",", "out", ",", "err", "=", "run_ocrmypdf", "(", "input_file", ",", "output_file", ",", "*", "args", ",", "env", "=", "env", ")...
Run ocrmypdf and confirmed that a valid file was created
[ "Run", "ocrmypdf", "and", "confirmed", "that", "a", "valid", "file", "was", "created" ]
0934f06e48e6efedf55a9617b15becae0d7b277c
https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/runOCR.py#L24-L36
train
51,047
BlueBrain/nat
nat/runOCR.py
run_ocrmypdf
def run_ocrmypdf(input_file, output_file, *args, env=None): "Run ocrmypdf and let caller deal with results" if env is None: env = os.environ p_args = OCRMYPDF + list(args) + [input_file, output_file] p = Popen( p_args, close_fds=True, stdout=PIPE, stderr=PIPE, universal_newlines=True, env=env) out, err = p.communicate() return p, out, err
python
def run_ocrmypdf(input_file, output_file, *args, env=None): "Run ocrmypdf and let caller deal with results" if env is None: env = os.environ p_args = OCRMYPDF + list(args) + [input_file, output_file] p = Popen( p_args, close_fds=True, stdout=PIPE, stderr=PIPE, universal_newlines=True, env=env) out, err = p.communicate() return p, out, err
[ "def", "run_ocrmypdf", "(", "input_file", ",", "output_file", ",", "*", "args", ",", "env", "=", "None", ")", ":", "if", "env", "is", "None", ":", "env", "=", "os", ".", "environ", "p_args", "=", "OCRMYPDF", "+", "list", "(", "args", ")", "+", "[",...
Run ocrmypdf and let caller deal with results
[ "Run", "ocrmypdf", "and", "let", "caller", "deal", "with", "results" ]
0934f06e48e6efedf55a9617b15becae0d7b277c
https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/runOCR.py#L39-L50
train
51,048
ttinies/sc2gameMapRepo
sc2maptool/mapRecord.py
standardizeMapName
def standardizeMapName(mapName): """pretty-fy the name for pysc2 map lookup""" #print("foreignName: %s (%s)"%(mapName, mapName in c.mapNameTranslations)) #if mapName in c.mapNameTranslations: # return c.mapNameTranslations[mapName] newName = os.path.basename(mapName) newName = newName.split(".")[0] newName = newName.split("(")[0] newName = re.sub("[LT]E+$", "", newName) newName = re.sub("-", "", newName) newName = re.sub(' ', '', newName, flags=re.UNICODE) foreignName = newName#bytes(mapName, 'utf-16') #print("foreignName: %s (%s)"%(foreignName, foreignName in c.mapNameTranslations)) if foreignName in c.mapNameTranslations: return c.mapNameTranslations[foreignName] return newName
python
def standardizeMapName(mapName): """pretty-fy the name for pysc2 map lookup""" #print("foreignName: %s (%s)"%(mapName, mapName in c.mapNameTranslations)) #if mapName in c.mapNameTranslations: # return c.mapNameTranslations[mapName] newName = os.path.basename(mapName) newName = newName.split(".")[0] newName = newName.split("(")[0] newName = re.sub("[LT]E+$", "", newName) newName = re.sub("-", "", newName) newName = re.sub(' ', '', newName, flags=re.UNICODE) foreignName = newName#bytes(mapName, 'utf-16') #print("foreignName: %s (%s)"%(foreignName, foreignName in c.mapNameTranslations)) if foreignName in c.mapNameTranslations: return c.mapNameTranslations[foreignName] return newName
[ "def", "standardizeMapName", "(", "mapName", ")", ":", "#print(\"foreignName: %s (%s)\"%(mapName, mapName in c.mapNameTranslations))", "#if mapName in c.mapNameTranslations:", "# return c.mapNameTranslations[mapName]", "newName", "=", "os", ".", "path", ".", "basename", "(", "m...
pretty-fy the name for pysc2 map lookup
[ "pretty", "-", "fy", "the", "name", "for", "pysc2", "map", "lookup" ]
3a215067fae8f86f6a3ffe37272fbd7a5461cfab
https://github.com/ttinies/sc2gameMapRepo/blob/3a215067fae8f86f6a3ffe37272fbd7a5461cfab/sc2maptool/mapRecord.py#L9-L24
train
51,049
lablup/backend.ai-common
src/ai/backend/common/utils.py
env_info
def env_info(): ''' Returns a string that contains the Python version and runtime path. ''' v = sys.version_info pyver = f'Python {v.major}.{v.minor}.{v.micro}' if v.releaselevel == 'alpha': pyver += 'a' if v.releaselevel == 'beta': pyver += 'b' if v.releaselevel == 'candidate': pyver += 'rc' if v.releaselevel != 'final': pyver += str(v.serial) return f'{pyver} (env: {sys.prefix})'
python
def env_info(): ''' Returns a string that contains the Python version and runtime path. ''' v = sys.version_info pyver = f'Python {v.major}.{v.minor}.{v.micro}' if v.releaselevel == 'alpha': pyver += 'a' if v.releaselevel == 'beta': pyver += 'b' if v.releaselevel == 'candidate': pyver += 'rc' if v.releaselevel != 'final': pyver += str(v.serial) return f'{pyver} (env: {sys.prefix})'
[ "def", "env_info", "(", ")", ":", "v", "=", "sys", ".", "version_info", "pyver", "=", "f'Python {v.major}.{v.minor}.{v.micro}'", "if", "v", ".", "releaselevel", "==", "'alpha'", ":", "pyver", "+=", "'a'", "if", "v", ".", "releaselevel", "==", "'beta'", ":", ...
Returns a string that contains the Python version and runtime path.
[ "Returns", "a", "string", "that", "contains", "the", "Python", "version", "and", "runtime", "path", "." ]
20b3a2551ee5bb3b88e7836471bc244a70ad0ae6
https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/utils.py#L17-L31
train
51,050
lablup/backend.ai-common
src/ai/backend/common/utils.py
dict2kvlist
def dict2kvlist(o): ''' Serializes a dict-like object into a generator of the flatten list of repeating key-value pairs. It is useful when using HMSET method in Redis. Example: >>> list(dict2kvlist({'a': 1, 'b': 2})) ['a', 1, 'b', 2] ''' return chain.from_iterable((k, v) for k, v in o.items())
python
def dict2kvlist(o): ''' Serializes a dict-like object into a generator of the flatten list of repeating key-value pairs. It is useful when using HMSET method in Redis. Example: >>> list(dict2kvlist({'a': 1, 'b': 2})) ['a', 1, 'b', 2] ''' return chain.from_iterable((k, v) for k, v in o.items())
[ "def", "dict2kvlist", "(", "o", ")", ":", "return", "chain", ".", "from_iterable", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "o", ".", "items", "(", ")", ")" ]
Serializes a dict-like object into a generator of the flatten list of repeating key-value pairs. It is useful when using HMSET method in Redis. Example: >>> list(dict2kvlist({'a': 1, 'b': 2})) ['a', 1, 'b', 2]
[ "Serializes", "a", "dict", "-", "like", "object", "into", "a", "generator", "of", "the", "flatten", "list", "of", "repeating", "key", "-", "value", "pairs", ".", "It", "is", "useful", "when", "using", "HMSET", "method", "in", "Redis", "." ]
20b3a2551ee5bb3b88e7836471bc244a70ad0ae6
https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/utils.py#L43-L52
train
51,051
ska-sa/katversion
katversion/build.py
setuptools_entry
def setuptools_entry(dist, keyword, value): """Setuptools entry point for setting version and baking it into package.""" # If 'use_katversion' is False, ignore the rest if not value: return # Enforce the version obtained by katversion, overriding user setting version = get_version() if dist.metadata.version is not None: s = "Ignoring explicit version='{0}' in setup.py, using '{1}' instead" warnings.warn(s.format(dist.metadata.version, version)) dist.metadata.version = version # Extend build_py command to bake version string into installed package ExistingCustomBuildPy = dist.cmdclass.get('build_py', object) class KatVersionBuildPy(AddVersionToInitBuildPy, ExistingCustomBuildPy): """First perform existing build_py and then bake in version string.""" dist.cmdclass['build_py'] = KatVersionBuildPy # Extend sdist command to bake version string into source package ExistingCustomSdist = dist.cmdclass.get('sdist', object) class KatVersionSdist(AddVersionToInitSdist, ExistingCustomSdist): """First perform existing sdist and then bake in version string.""" dist.cmdclass['sdist'] = KatVersionSdist
python
def setuptools_entry(dist, keyword, value): """Setuptools entry point for setting version and baking it into package.""" # If 'use_katversion' is False, ignore the rest if not value: return # Enforce the version obtained by katversion, overriding user setting version = get_version() if dist.metadata.version is not None: s = "Ignoring explicit version='{0}' in setup.py, using '{1}' instead" warnings.warn(s.format(dist.metadata.version, version)) dist.metadata.version = version # Extend build_py command to bake version string into installed package ExistingCustomBuildPy = dist.cmdclass.get('build_py', object) class KatVersionBuildPy(AddVersionToInitBuildPy, ExistingCustomBuildPy): """First perform existing build_py and then bake in version string.""" dist.cmdclass['build_py'] = KatVersionBuildPy # Extend sdist command to bake version string into source package ExistingCustomSdist = dist.cmdclass.get('sdist', object) class KatVersionSdist(AddVersionToInitSdist, ExistingCustomSdist): """First perform existing sdist and then bake in version string.""" dist.cmdclass['sdist'] = KatVersionSdist
[ "def", "setuptools_entry", "(", "dist", ",", "keyword", ",", "value", ")", ":", "# If 'use_katversion' is False, ignore the rest", "if", "not", "value", ":", "return", "# Enforce the version obtained by katversion, overriding user setting", "version", "=", "get_version", "(",...
Setuptools entry point for setting version and baking it into package.
[ "Setuptools", "entry", "point", "for", "setting", "version", "and", "baking", "it", "into", "package", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/build.py#L102-L122
train
51,052
beregond/super_state_machine
super_state_machine/utils.py
is_
def is_(self, state): """Check if machine is in given state.""" translator = self._meta['translator'] state = translator.translate(state) return self.actual_state == state
python
def is_(self, state): """Check if machine is in given state.""" translator = self._meta['translator'] state = translator.translate(state) return self.actual_state == state
[ "def", "is_", "(", "self", ",", "state", ")", ":", "translator", "=", "self", ".", "_meta", "[", "'translator'", "]", "state", "=", "translator", ".", "translate", "(", "state", ")", "return", "self", ".", "actual_state", "==", "state" ]
Check if machine is in given state.
[ "Check", "if", "machine", "is", "in", "given", "state", "." ]
31ad527f4e6b7a01e315ce865735ca18957c223e
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L9-L13
train
51,053
beregond/super_state_machine
super_state_machine/utils.py
can_be_
def can_be_(self, state): """Check if machine can transit to given state.""" translator = self._meta['translator'] state = translator.translate(state) if self._meta['complete']: return True if self.actual_state is None: return True transitions = self._meta['transitions'][self.actual_state] return state in transitions
python
def can_be_(self, state): """Check if machine can transit to given state.""" translator = self._meta['translator'] state = translator.translate(state) if self._meta['complete']: return True if self.actual_state is None: return True transitions = self._meta['transitions'][self.actual_state] return state in transitions
[ "def", "can_be_", "(", "self", ",", "state", ")", ":", "translator", "=", "self", ".", "_meta", "[", "'translator'", "]", "state", "=", "translator", ".", "translate", "(", "state", ")", "if", "self", ".", "_meta", "[", "'complete'", "]", ":", "return"...
Check if machine can transit to given state.
[ "Check", "if", "machine", "can", "transit", "to", "given", "state", "." ]
31ad527f4e6b7a01e315ce865735ca18957c223e
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L16-L28
train
51,054
beregond/super_state_machine
super_state_machine/utils.py
force_set
def force_set(self, state): """Set new state without checking if transition is allowed.""" translator = self._meta['translator'] state = translator.translate(state) attr = self._meta['state_attribute_name'] setattr(self, attr, state)
python
def force_set(self, state): """Set new state without checking if transition is allowed.""" translator = self._meta['translator'] state = translator.translate(state) attr = self._meta['state_attribute_name'] setattr(self, attr, state)
[ "def", "force_set", "(", "self", ",", "state", ")", ":", "translator", "=", "self", ".", "_meta", "[", "'translator'", "]", "state", "=", "translator", ".", "translate", "(", "state", ")", "attr", "=", "self", ".", "_meta", "[", "'state_attribute_name'", ...
Set new state without checking if transition is allowed.
[ "Set", "new", "state", "without", "checking", "if", "transition", "is", "allowed", "." ]
31ad527f4e6b7a01e315ce865735ca18957c223e
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L31-L36
train
51,055
beregond/super_state_machine
super_state_machine/utils.py
set_
def set_(self, state): """Set new state for machine.""" if not self.can_be_(state): state = self._meta['translator'].translate(state) raise TransitionError( "Cannot transit from '{actual_value}' to '{value}'." .format(actual_value=self.actual_state.value, value=state.value) ) self.force_set(state)
python
def set_(self, state): """Set new state for machine.""" if not self.can_be_(state): state = self._meta['translator'].translate(state) raise TransitionError( "Cannot transit from '{actual_value}' to '{value}'." .format(actual_value=self.actual_state.value, value=state.value) ) self.force_set(state)
[ "def", "set_", "(", "self", ",", "state", ")", ":", "if", "not", "self", ".", "can_be_", "(", "state", ")", ":", "state", "=", "self", ".", "_meta", "[", "'translator'", "]", ".", "translate", "(", "state", ")", "raise", "TransitionError", "(", "\"Ca...
Set new state for machine.
[ "Set", "new", "state", "for", "machine", "." ]
31ad527f4e6b7a01e315ce865735ca18957c223e
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L39-L48
train
51,056
beregond/super_state_machine
super_state_machine/utils.py
generate_getter
def generate_getter(value): """Generate getter for given value.""" @property @wraps(is_) def getter(self): return self.is_(value) return getter
python
def generate_getter(value): """Generate getter for given value.""" @property @wraps(is_) def getter(self): return self.is_(value) return getter
[ "def", "generate_getter", "(", "value", ")", ":", "@", "property", "@", "wraps", "(", "is_", ")", "def", "getter", "(", "self", ")", ":", "return", "self", ".", "is_", "(", "value", ")", "return", "getter" ]
Generate getter for given value.
[ "Generate", "getter", "for", "given", "value", "." ]
31ad527f4e6b7a01e315ce865735ca18957c223e
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L64-L71
train
51,057
beregond/super_state_machine
super_state_machine/utils.py
generate_checker
def generate_checker(value): """Generate state checker for given value.""" @property @wraps(can_be_) def checker(self): return self.can_be_(value) return checker
python
def generate_checker(value): """Generate state checker for given value.""" @property @wraps(can_be_) def checker(self): return self.can_be_(value) return checker
[ "def", "generate_checker", "(", "value", ")", ":", "@", "property", "@", "wraps", "(", "can_be_", ")", "def", "checker", "(", "self", ")", ":", "return", "self", ".", "can_be_", "(", "value", ")", "return", "checker" ]
Generate state checker for given value.
[ "Generate", "state", "checker", "for", "given", "value", "." ]
31ad527f4e6b7a01e315ce865735ca18957c223e
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L74-L81
train
51,058
beregond/super_state_machine
super_state_machine/utils.py
generate_setter
def generate_setter(value): """Generate setter for given value.""" @wraps(set_) def setter(self): self.set_(value) return setter
python
def generate_setter(value): """Generate setter for given value.""" @wraps(set_) def setter(self): self.set_(value) return setter
[ "def", "generate_setter", "(", "value", ")", ":", "@", "wraps", "(", "set_", ")", "def", "setter", "(", "self", ")", ":", "self", ".", "set_", "(", "value", ")", "return", "setter" ]
Generate setter for given value.
[ "Generate", "setter", "for", "given", "value", "." ]
31ad527f4e6b7a01e315ce865735ca18957c223e
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L84-L90
train
51,059
beregond/super_state_machine
super_state_machine/utils.py
EnumValueTranslator.translate
def translate(self, value): """Translate value to enum instance. If value is already enum instance, check if this value belongs to base enum. """ if self._check_if_already_proper(value): return value try: return self.search_table[value] except KeyError: raise ValueError("Value {value} doesn't match any state.".format( value=value ))
python
def translate(self, value): """Translate value to enum instance. If value is already enum instance, check if this value belongs to base enum. """ if self._check_if_already_proper(value): return value try: return self.search_table[value] except KeyError: raise ValueError("Value {value} doesn't match any state.".format( value=value ))
[ "def", "translate", "(", "self", ",", "value", ")", ":", "if", "self", ".", "_check_if_already_proper", "(", "value", ")", ":", "return", "value", "try", ":", "return", "self", ".", "search_table", "[", "value", "]", "except", "KeyError", ":", "raise", "...
Translate value to enum instance. If value is already enum instance, check if this value belongs to base enum.
[ "Translate", "value", "to", "enum", "instance", "." ]
31ad527f4e6b7a01e315ce865735ca18957c223e
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L127-L142
train
51,060
vkosuri/dialogflow-lite
dialogflow_lite/dialogflow.py
Dialogflow._query
def _query(self, text): """ Takes natural language text and information as query parameters and returns information as JSON. """ params = ( ('v', self.api_version), ('query', text), ('lang', self.language), ('sessionId', self.session_id), ('timezone', self.timezone), ) # store query_response if required if self.query_response: self.previous_query_response = self.query_response self.query_response = result = self.session.get(url=self.query_url, params=params).json() return result
python
def _query(self, text): """ Takes natural language text and information as query parameters and returns information as JSON. """ params = ( ('v', self.api_version), ('query', text), ('lang', self.language), ('sessionId', self.session_id), ('timezone', self.timezone), ) # store query_response if required if self.query_response: self.previous_query_response = self.query_response self.query_response = result = self.session.get(url=self.query_url, params=params).json() return result
[ "def", "_query", "(", "self", ",", "text", ")", ":", "params", "=", "(", "(", "'v'", ",", "self", ".", "api_version", ")", ",", "(", "'query'", ",", "text", ")", ",", "(", "'lang'", ",", "self", ".", "language", ")", ",", "(", "'sessionId'", ",",...
Takes natural language text and information as query parameters and returns information as JSON.
[ "Takes", "natural", "language", "text", "and", "information", "as", "query", "parameters", "and", "returns", "information", "as", "JSON", "." ]
488d6ffb4128471e672c8304995514a3c8982edc
https://github.com/vkosuri/dialogflow-lite/blob/488d6ffb4128471e672c8304995514a3c8982edc/dialogflow_lite/dialogflow.py#L88-L105
train
51,061
MacHu-GWU/dataIO-project
dataIO/zzz_manual_install.py
is_venv
def is_venv(): """Check whether if this workspace is a virtualenv. """ dir_path = os.path.dirname(SRC) is_venv_flag = True if SYS_NAME == "Windows": executable_list = ["activate", "pip.exe", "python.exe"] elif SYS_NAME in ["Darwin", "Linux"]: executable_list = ["activate", "pip", "python"] for executable in executable_list: path = os.path.join(dir_path, BIN_SCRIPTS, executable) if not os.path.exists(path): is_venv_flag = False return is_venv_flag
python
def is_venv(): """Check whether if this workspace is a virtualenv. """ dir_path = os.path.dirname(SRC) is_venv_flag = True if SYS_NAME == "Windows": executable_list = ["activate", "pip.exe", "python.exe"] elif SYS_NAME in ["Darwin", "Linux"]: executable_list = ["activate", "pip", "python"] for executable in executable_list: path = os.path.join(dir_path, BIN_SCRIPTS, executable) if not os.path.exists(path): is_venv_flag = False return is_venv_flag
[ "def", "is_venv", "(", ")", ":", "dir_path", "=", "os", ".", "path", ".", "dirname", "(", "SRC", ")", "is_venv_flag", "=", "True", "if", "SYS_NAME", "==", "\"Windows\"", ":", "executable_list", "=", "[", "\"activate\"", ",", "\"pip.exe\"", ",", "\"python.e...
Check whether if this workspace is a virtualenv.
[ "Check", "whether", "if", "this", "workspace", "is", "a", "virtualenv", "." ]
7e1cc192b5e53426eed6dbd742918619b8fd60ab
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L47-L63
train
51,062
MacHu-GWU/dataIO-project
dataIO/zzz_manual_install.py
find_linux_venv_py_version
def find_linux_venv_py_version(): """Find python version name used in this virtualenv. For example: ``python2.7``, ``python3.4`` """ available_python_version = [ "python2.6", "python2.7", "python3.3", "python3.4", "python3.5", "python3.6", ] dir_path = os.path.dirname(SRC) for basename in os.listdir(os.path.join(dir_path, BIN_SCRIPTS)): for python_version in available_python_version: if python_version in basename: return python_version raise Exception("Can't find virtualenv python version!")
python
def find_linux_venv_py_version(): """Find python version name used in this virtualenv. For example: ``python2.7``, ``python3.4`` """ available_python_version = [ "python2.6", "python2.7", "python3.3", "python3.4", "python3.5", "python3.6", ] dir_path = os.path.dirname(SRC) for basename in os.listdir(os.path.join(dir_path, BIN_SCRIPTS)): for python_version in available_python_version: if python_version in basename: return python_version raise Exception("Can't find virtualenv python version!")
[ "def", "find_linux_venv_py_version", "(", ")", ":", "available_python_version", "=", "[", "\"python2.6\"", ",", "\"python2.7\"", ",", "\"python3.3\"", ",", "\"python3.4\"", ",", "\"python3.5\"", ",", "\"python3.6\"", ",", "]", "dir_path", "=", "os", ".", "path", "...
Find python version name used in this virtualenv. For example: ``python2.7``, ``python3.4``
[ "Find", "python", "version", "name", "used", "in", "this", "virtualenv", "." ]
7e1cc192b5e53426eed6dbd742918619b8fd60ab
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L66-L84
train
51,063
MacHu-GWU/dataIO-project
dataIO/zzz_manual_install.py
find_venv_DST
def find_venv_DST(): """Find where this package should be installed to in this virtualenv. For example: ``/path-to-venv/lib/python2.7/site-packages/package-name`` """ dir_path = os.path.dirname(SRC) if SYS_NAME == "Windows": DST = os.path.join(dir_path, "Lib", "site-packages", PKG_NAME) elif SYS_NAME in ["Darwin", "Linux"]: python_version = find_linux_venv_py_version() DST = os.path.join(dir_path, "lib", python_version, "site-packages", PKG_NAME) return DST
python
def find_venv_DST(): """Find where this package should be installed to in this virtualenv. For example: ``/path-to-venv/lib/python2.7/site-packages/package-name`` """ dir_path = os.path.dirname(SRC) if SYS_NAME == "Windows": DST = os.path.join(dir_path, "Lib", "site-packages", PKG_NAME) elif SYS_NAME in ["Darwin", "Linux"]: python_version = find_linux_venv_py_version() DST = os.path.join(dir_path, "lib", python_version, "site-packages", PKG_NAME) return DST
[ "def", "find_venv_DST", "(", ")", ":", "dir_path", "=", "os", ".", "path", ".", "dirname", "(", "SRC", ")", "if", "SYS_NAME", "==", "\"Windows\"", ":", "DST", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "\"Lib\"", ",", "\"site-packages\"...
Find where this package should be installed to in this virtualenv. For example: ``/path-to-venv/lib/python2.7/site-packages/package-name``
[ "Find", "where", "this", "package", "should", "be", "installed", "to", "in", "this", "virtualenv", "." ]
7e1cc192b5e53426eed6dbd742918619b8fd60ab
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L87-L100
train
51,064
MacHu-GWU/dataIO-project
dataIO/zzz_manual_install.py
find_DST
def find_DST(): """Find where this package should be installed to. """ if SYS_NAME == "Windows": return os.path.join(site.getsitepackages()[1], PKG_NAME) elif SYS_NAME in ["Darwin", "Linux"]: return os.path.join(site.getsitepackages()[0], PKG_NAME)
python
def find_DST(): """Find where this package should be installed to. """ if SYS_NAME == "Windows": return os.path.join(site.getsitepackages()[1], PKG_NAME) elif SYS_NAME in ["Darwin", "Linux"]: return os.path.join(site.getsitepackages()[0], PKG_NAME)
[ "def", "find_DST", "(", ")", ":", "if", "SYS_NAME", "==", "\"Windows\"", ":", "return", "os", ".", "path", ".", "join", "(", "site", ".", "getsitepackages", "(", ")", "[", "1", "]", ",", "PKG_NAME", ")", "elif", "SYS_NAME", "in", "[", "\"Darwin\"", "...
Find where this package should be installed to.
[ "Find", "where", "this", "package", "should", "be", "installed", "to", "." ]
7e1cc192b5e53426eed6dbd742918619b8fd60ab
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L103-L109
train
51,065
MacHu-GWU/dataIO-project
dataIO/zzz_manual_install.py
md5_of_file
def md5_of_file(abspath): """Md5 value of a file. """ chunk_size = 1024 * 1024 m = hashlib.md5() with open(abspath, "rb") as f: while True: data = f.read(chunk_size) if not data: break m.update(data) return m.hexdigest()
python
def md5_of_file(abspath): """Md5 value of a file. """ chunk_size = 1024 * 1024 m = hashlib.md5() with open(abspath, "rb") as f: while True: data = f.read(chunk_size) if not data: break m.update(data) return m.hexdigest()
[ "def", "md5_of_file", "(", "abspath", ")", ":", "chunk_size", "=", "1024", "*", "1024", "m", "=", "hashlib", ".", "md5", "(", ")", "with", "open", "(", "abspath", ",", "\"rb\"", ")", "as", "f", ":", "while", "True", ":", "data", "=", "f", ".", "r...
Md5 value of a file.
[ "Md5", "value", "of", "a", "file", "." ]
7e1cc192b5e53426eed6dbd742918619b8fd60ab
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L118-L129
train
51,066
MacHu-GWU/dataIO-project
dataIO/zzz_manual_install.py
check_need_install
def check_need_install(): """Check if installed package are exactly the same to this one. By checking md5 value of all files. """ need_install_flag = False for root, _, basename_list in os.walk(SRC): if os.path.basename(root) != "__pycache__": for basename in basename_list: src = os.path.join(root, basename) dst = os.path.join(root.replace(SRC, DST), basename) if os.path.exists(dst): if md5_of_file(src) != md5_of_file(dst): return True else: return True return need_install_flag
python
def check_need_install(): """Check if installed package are exactly the same to this one. By checking md5 value of all files. """ need_install_flag = False for root, _, basename_list in os.walk(SRC): if os.path.basename(root) != "__pycache__": for basename in basename_list: src = os.path.join(root, basename) dst = os.path.join(root.replace(SRC, DST), basename) if os.path.exists(dst): if md5_of_file(src) != md5_of_file(dst): return True else: return True return need_install_flag
[ "def", "check_need_install", "(", ")", ":", "need_install_flag", "=", "False", "for", "root", ",", "_", ",", "basename_list", "in", "os", ".", "walk", "(", "SRC", ")", ":", "if", "os", ".", "path", ".", "basename", "(", "root", ")", "!=", "\"__pycache_...
Check if installed package are exactly the same to this one. By checking md5 value of all files.
[ "Check", "if", "installed", "package", "are", "exactly", "the", "same", "to", "this", "one", ".", "By", "checking", "md5", "value", "of", "all", "files", "." ]
7e1cc192b5e53426eed6dbd742918619b8fd60ab
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L132-L147
train
51,067
ska-sa/katversion
katversion/version.py
is_git
def is_git(path): """Return True if this is a git repo.""" try: repo_dir = run_cmd(path, 'git', 'rev-parse', '--git-dir') return True if repo_dir else False except (OSError, RuntimeError): return False
python
def is_git(path): """Return True if this is a git repo.""" try: repo_dir = run_cmd(path, 'git', 'rev-parse', '--git-dir') return True if repo_dir else False except (OSError, RuntimeError): return False
[ "def", "is_git", "(", "path", ")", ":", "try", ":", "repo_dir", "=", "run_cmd", "(", "path", ",", "'git'", ",", "'rev-parse'", ",", "'--git-dir'", ")", "return", "True", "if", "repo_dir", "else", "False", "except", "(", "OSError", ",", "RuntimeError", ")...
Return True if this is a git repo.
[ "Return", "True", "if", "this", "is", "a", "git", "repo", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L45-L51
train
51,068
ska-sa/katversion
katversion/version.py
date_version
def date_version(scm=None): """Generate a version string based on the SCM type and the date.""" dt = str(time.strftime('%Y%m%d%H%M')) if scm: version = "0.0+unknown.{0}.{1}".format(scm, dt) else: version = "0.0+unknown." + dt return version
python
def date_version(scm=None): """Generate a version string based on the SCM type and the date.""" dt = str(time.strftime('%Y%m%d%H%M')) if scm: version = "0.0+unknown.{0}.{1}".format(scm, dt) else: version = "0.0+unknown." + dt return version
[ "def", "date_version", "(", "scm", "=", "None", ")", ":", "dt", "=", "str", "(", "time", ".", "strftime", "(", "'%Y%m%d%H%M'", ")", ")", "if", "scm", ":", "version", "=", "\"0.0+unknown.{0}.{1}\"", ".", "format", "(", "scm", ",", "dt", ")", "else", "...
Generate a version string based on the SCM type and the date.
[ "Generate", "a", "version", "string", "based", "on", "the", "SCM", "type", "and", "the", "date", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L63-L70
train
51,069
ska-sa/katversion
katversion/version.py
get_git_cleaned_branch_name
def get_git_cleaned_branch_name(path): """Get the git branch name of the current HEAD in path. The branch name is scrubbed to conform to PEP-440. PEP-440 Local version identifiers shall only consist out of: - ASCII letters ( [a-zA-Z] ) - ASCII digits ( [0-9] ) - periods ( . ) https://www.python.org/dev/peps/pep-0440/#local-version-identifiers Parameters ---------- path: str The path to run git commands in. """ # Get name of current branch (or 'HEAD' for a detached HEAD) branch_name = run_cmd(path, 'git', 'rev-parse', '--abbrev-ref', 'HEAD') branch_name = re.sub(r"[^A-Za-z0-9]+", ".", branch_name.strip()) return branch_name
python
def get_git_cleaned_branch_name(path): """Get the git branch name of the current HEAD in path. The branch name is scrubbed to conform to PEP-440. PEP-440 Local version identifiers shall only consist out of: - ASCII letters ( [a-zA-Z] ) - ASCII digits ( [0-9] ) - periods ( . ) https://www.python.org/dev/peps/pep-0440/#local-version-identifiers Parameters ---------- path: str The path to run git commands in. """ # Get name of current branch (or 'HEAD' for a detached HEAD) branch_name = run_cmd(path, 'git', 'rev-parse', '--abbrev-ref', 'HEAD') branch_name = re.sub(r"[^A-Za-z0-9]+", ".", branch_name.strip()) return branch_name
[ "def", "get_git_cleaned_branch_name", "(", "path", ")", ":", "# Get name of current branch (or 'HEAD' for a detached HEAD)", "branch_name", "=", "run_cmd", "(", "path", ",", "'git'", ",", "'rev-parse'", ",", "'--abbrev-ref'", ",", "'HEAD'", ")", "branch_name", "=", "re"...
Get the git branch name of the current HEAD in path. The branch name is scrubbed to conform to PEP-440. PEP-440 Local version identifiers shall only consist out of: - ASCII letters ( [a-zA-Z] ) - ASCII digits ( [0-9] ) - periods ( . ) https://www.python.org/dev/peps/pep-0440/#local-version-identifiers Parameters ---------- path: str The path to run git commands in.
[ "Get", "the", "git", "branch", "name", "of", "the", "current", "HEAD", "in", "path", ".", "The", "branch", "name", "is", "scrubbed", "to", "conform", "to", "PEP", "-", "440", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L73-L91
train
51,070
ska-sa/katversion
katversion/version.py
get_git_version
def get_git_version(path): """Get the GIT version.""" branch_name = get_git_cleaned_branch_name(path) # Determine whether working copy is dirty (i.e. contains modified files) mods = run_cmd(path, 'git', 'status', '--porcelain', '--untracked-files=no') dirty = '.dirty' if mods else '' # Get a list of all commits on branch, with corresponding branch/tag refs # Each line looks something like: "d3e4d42 (HEAD, master, tag: v0.1)" git_output = run_cmd(path, 'git', 'log', '--pretty="%h%d"') commits = git_output.strip().replace('"', '').split('\n') num_commits_since_branch = len(commits) # Short hash of the latest commit short_commit_name = commits[0].partition(' ')[0] # A valid version is sequence of dotted numbers optionally prefixed by 'v' valid_version = re.compile(r'^v?([\.\d]+)$') def tagged_version(commit): """First tag on commit that is valid version, as a list of numbers.""" refs = commit.partition(' ')[2] for ref in refs.lstrip('(').rstrip(')').split(', '): if ref.startswith('tag: '): tag = ref[5:].lower() found = valid_version.match(tag) if found: return [int(v) for v in found.group(1).split('.') if v] return [] # Walk back along branch and find first valid tagged version (or use 0.0) for commit in commits: version_numbers = tagged_version(commit) if version_numbers: break else: version_numbers = [0, 0] # It is a release if current commit has a version tag (and dir is clean) release = (commit == commits[0]) and not dirty if not release: # We are working towards the next (minor) release according to PEP 440 version_numbers[-1] += 1 version = '.'.join([str(v) for v in version_numbers]) if not release: # Development version contains extra embellishments version = ("%s.dev%d+%s.%s%s" % (version, num_commits_since_branch, branch_name, short_commit_name, dirty)) return version
python
def get_git_version(path): """Get the GIT version.""" branch_name = get_git_cleaned_branch_name(path) # Determine whether working copy is dirty (i.e. contains modified files) mods = run_cmd(path, 'git', 'status', '--porcelain', '--untracked-files=no') dirty = '.dirty' if mods else '' # Get a list of all commits on branch, with corresponding branch/tag refs # Each line looks something like: "d3e4d42 (HEAD, master, tag: v0.1)" git_output = run_cmd(path, 'git', 'log', '--pretty="%h%d"') commits = git_output.strip().replace('"', '').split('\n') num_commits_since_branch = len(commits) # Short hash of the latest commit short_commit_name = commits[0].partition(' ')[0] # A valid version is sequence of dotted numbers optionally prefixed by 'v' valid_version = re.compile(r'^v?([\.\d]+)$') def tagged_version(commit): """First tag on commit that is valid version, as a list of numbers.""" refs = commit.partition(' ')[2] for ref in refs.lstrip('(').rstrip(')').split(', '): if ref.startswith('tag: '): tag = ref[5:].lower() found = valid_version.match(tag) if found: return [int(v) for v in found.group(1).split('.') if v] return [] # Walk back along branch and find first valid tagged version (or use 0.0) for commit in commits: version_numbers = tagged_version(commit) if version_numbers: break else: version_numbers = [0, 0] # It is a release if current commit has a version tag (and dir is clean) release = (commit == commits[0]) and not dirty if not release: # We are working towards the next (minor) release according to PEP 440 version_numbers[-1] += 1 version = '.'.join([str(v) for v in version_numbers]) if not release: # Development version contains extra embellishments version = ("%s.dev%d+%s.%s%s" % (version, num_commits_since_branch, branch_name, short_commit_name, dirty)) return version
[ "def", "get_git_version", "(", "path", ")", ":", "branch_name", "=", "get_git_cleaned_branch_name", "(", "path", ")", "# Determine whether working copy is dirty (i.e. contains modified files)", "mods", "=", "run_cmd", "(", "path", ",", "'git'", ",", "'status'", ",", "'-...
Get the GIT version.
[ "Get", "the", "GIT", "version", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L94-L137
train
51,071
ska-sa/katversion
katversion/version.py
get_version_from_scm
def get_version_from_scm(path=None): """Get the current version string of this package using SCM tool. Parameters ---------- path : None or string, optional The SCM checkout path (default is current directory) Returns ------- version : string The version string for this package """ if is_git(path): return 'git', get_git_version(path) elif is_svn(path): return 'svn', get_svn_version(path) return None, None
python
def get_version_from_scm(path=None): """Get the current version string of this package using SCM tool. Parameters ---------- path : None or string, optional The SCM checkout path (default is current directory) Returns ------- version : string The version string for this package """ if is_git(path): return 'git', get_git_version(path) elif is_svn(path): return 'svn', get_svn_version(path) return None, None
[ "def", "get_version_from_scm", "(", "path", "=", "None", ")", ":", "if", "is_git", "(", "path", ")", ":", "return", "'git'", ",", "get_git_version", "(", "path", ")", "elif", "is_svn", "(", "path", ")", ":", "return", "'svn'", ",", "get_svn_version", "("...
Get the current version string of this package using SCM tool. Parameters ---------- path : None or string, optional The SCM checkout path (default is current directory) Returns ------- version : string The version string for this package
[ "Get", "the", "current", "version", "string", "of", "this", "package", "using", "SCM", "tool", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L147-L165
train
51,072
ska-sa/katversion
katversion/version.py
get_version_from_module
def get_version_from_module(module): """Use pkg_resources to get version of installed module by name.""" if module is not None: # Setup.py will not pass in a module, but creating __version__ from # __init__ will. module = str(module).split('.', 1)[0] try: package = pkg_resources.get_distribution(module) return package.version except pkg_resources.DistributionNotFound: # So there you have it the module is not installed. pass
python
def get_version_from_module(module): """Use pkg_resources to get version of installed module by name.""" if module is not None: # Setup.py will not pass in a module, but creating __version__ from # __init__ will. module = str(module).split('.', 1)[0] try: package = pkg_resources.get_distribution(module) return package.version except pkg_resources.DistributionNotFound: # So there you have it the module is not installed. pass
[ "def", "get_version_from_module", "(", "module", ")", ":", "if", "module", "is", "not", "None", ":", "# Setup.py will not pass in a module, but creating __version__ from", "# __init__ will.", "module", "=", "str", "(", "module", ")", ".", "split", "(", "'.'", ",", "...
Use pkg_resources to get version of installed module by name.
[ "Use", "pkg_resources", "to", "get", "version", "of", "installed", "module", "by", "name", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L168-L179
train
51,073
ska-sa/katversion
katversion/version.py
_must_decode
def _must_decode(value): """Copied from pkginfo 1.4.1, _compat module.""" if type(value) is bytes: try: return value.decode('utf-8') except UnicodeDecodeError: return value.decode('latin1') return value
python
def _must_decode(value): """Copied from pkginfo 1.4.1, _compat module.""" if type(value) is bytes: try: return value.decode('utf-8') except UnicodeDecodeError: return value.decode('latin1') return value
[ "def", "_must_decode", "(", "value", ")", ":", "if", "type", "(", "value", ")", "is", "bytes", ":", "try", ":", "return", "value", ".", "decode", "(", "'utf-8'", ")", "except", "UnicodeDecodeError", ":", "return", "value", ".", "decode", "(", "'latin1'",...
Copied from pkginfo 1.4.1, _compat module.
[ "Copied", "from", "pkginfo", "1", ".", "4", ".", "1", "_compat", "module", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L182-L189
train
51,074
ska-sa/katversion
katversion/version.py
get_version_from_unpacked_sdist
def get_version_from_unpacked_sdist(path): """Assume path points to unpacked source distribution and get version.""" # This is a condensed version of the relevant code in pkginfo 1.4.1 try: with open(os.path.join(path, 'PKG-INFO')) as f: data = f.read() except IOError: # Could not load path as an unpacked sdist as it had no PKG-INFO file return fp = StringIO(_must_decode(data)) msg = Parser().parse(fp) value = msg.get('Version') if value != 'UNKNOWN': return value
python
def get_version_from_unpacked_sdist(path): """Assume path points to unpacked source distribution and get version.""" # This is a condensed version of the relevant code in pkginfo 1.4.1 try: with open(os.path.join(path, 'PKG-INFO')) as f: data = f.read() except IOError: # Could not load path as an unpacked sdist as it had no PKG-INFO file return fp = StringIO(_must_decode(data)) msg = Parser().parse(fp) value = msg.get('Version') if value != 'UNKNOWN': return value
[ "def", "get_version_from_unpacked_sdist", "(", "path", ")", ":", "# This is a condensed version of the relevant code in pkginfo 1.4.1", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'PKG-INFO'", ")", ")", "as", "f", ":", "data...
Assume path points to unpacked source distribution and get version.
[ "Assume", "path", "points", "to", "unpacked", "source", "distribution", "and", "get", "version", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L192-L205
train
51,075
ska-sa/katversion
katversion/version.py
get_version_from_file
def get_version_from_file(path): """Find the VERSION_FILE and return its contents. Returns ------- version : string or None """ filename = os.path.join(path, VERSION_FILE) if not os.path.isfile(filename): # Look in the parent directory of path instead. filename = os.path.join(os.path.dirname(path), VERSION_FILE) if not os.path.isfile(filename): filename = '' if filename: with open(filename) as fh: version = fh.readline().strip() if version: return version
python
def get_version_from_file(path): """Find the VERSION_FILE and return its contents. Returns ------- version : string or None """ filename = os.path.join(path, VERSION_FILE) if not os.path.isfile(filename): # Look in the parent directory of path instead. filename = os.path.join(os.path.dirname(path), VERSION_FILE) if not os.path.isfile(filename): filename = '' if filename: with open(filename) as fh: version = fh.readline().strip() if version: return version
[ "def", "get_version_from_file", "(", "path", ")", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "path", ",", "VERSION_FILE", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "# Look in the parent directory of path i...
Find the VERSION_FILE and return its contents. Returns ------- version : string or None
[ "Find", "the", "VERSION_FILE", "and", "return", "its", "contents", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L208-L227
train
51,076
ska-sa/katversion
katversion/version.py
normalised
def normalised(version): """Normalise a version string according to PEP 440, if possible.""" norm_version = pkg_resources.parse_version(version) if not isinstance(norm_version, tuple): # Let setuptools (>= 8) do the normalisation return str(norm_version) else: # Homegrown normalisation for older setuptools (< 8) public, sep, local = version.lower().partition('+') # Remove leading 'v' from public version if len(public) >= 2: if public[0] == 'v' and public[1] in '0123456789': public = public[1:] # Turn all chars except alphanumerics into periods in local version local = NON_ALPHANUMERIC.sub('.', local) return public + sep + local
python
def normalised(version): """Normalise a version string according to PEP 440, if possible.""" norm_version = pkg_resources.parse_version(version) if not isinstance(norm_version, tuple): # Let setuptools (>= 8) do the normalisation return str(norm_version) else: # Homegrown normalisation for older setuptools (< 8) public, sep, local = version.lower().partition('+') # Remove leading 'v' from public version if len(public) >= 2: if public[0] == 'v' and public[1] in '0123456789': public = public[1:] # Turn all chars except alphanumerics into periods in local version local = NON_ALPHANUMERIC.sub('.', local) return public + sep + local
[ "def", "normalised", "(", "version", ")", ":", "norm_version", "=", "pkg_resources", ".", "parse_version", "(", "version", ")", "if", "not", "isinstance", "(", "norm_version", ",", "tuple", ")", ":", "# Let setuptools (>= 8) do the normalisation", "return", "str", ...
Normalise a version string according to PEP 440, if possible.
[ "Normalise", "a", "version", "string", "according", "to", "PEP", "440", "if", "possible", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L230-L245
train
51,077
ska-sa/katversion
katversion/version.py
get_version
def get_version(path=None, module=None): """Return the version string. This function ensures that the version string complies with PEP 440. The format of our version string is: - for RELEASE builds: <major>.<minor> e.g. 0.1 2.4 - for DEVELOPMENT builds: <major>.<minor>.dev<num_branch_commits> \ +<branch_name>.g<short_git_sha>[.dirty] e.g. 1.1.dev34+new.shiny.feature.gfa973da 0.1.dev7+master.gb91ffa6.dirty - for UNKNOWN builds: 0.0+unknown.[<scm_type>.]<date> e.g. 0.0+unknown.svn.201402031023 0.0+unknown.201602081715 The <major>.<minor> substring for development builds will be that of the NEXT (minor) release, in order to allow proper Python version ordering. Parameters ---------- path : None or string, optional A file or directory to use to find the SCM or sdist checkout path (default is the current working directory) module : None or string, optional Get version via module name (e.g. __name__ variable), which takes precedence over path if provided (ignore otherwise) Returns ------- version: string A string representation of the package version """ # Check the module option first. version = get_version_from_module(module) if version: return normalised(version) # Turn path into a valid directory (default is current directory) if path is None: path = os.getcwd() path = os.path.abspath(path) if os.path.exists(path) and not os.path.isdir(path): path = os.path.dirname(path) if not os.path.isdir(path): raise ValueError('No such package source directory: %r' % (path,)) # Check for an sdist in the process of being installed by pip. version = get_version_from_unpacked_sdist(path) if version: return normalised(version) # Check the SCM. scm, version = get_version_from_scm(path) if version: return normalised(version) # Check if there is a katversion file in the given path. version = get_version_from_file(path) if version: return normalised(version) # None of the above got a version so we will make one up based on the date. return normalised(date_version(scm))
python
def get_version(path=None, module=None): """Return the version string. This function ensures that the version string complies with PEP 440. The format of our version string is: - for RELEASE builds: <major>.<minor> e.g. 0.1 2.4 - for DEVELOPMENT builds: <major>.<minor>.dev<num_branch_commits> \ +<branch_name>.g<short_git_sha>[.dirty] e.g. 1.1.dev34+new.shiny.feature.gfa973da 0.1.dev7+master.gb91ffa6.dirty - for UNKNOWN builds: 0.0+unknown.[<scm_type>.]<date> e.g. 0.0+unknown.svn.201402031023 0.0+unknown.201602081715 The <major>.<minor> substring for development builds will be that of the NEXT (minor) release, in order to allow proper Python version ordering. Parameters ---------- path : None or string, optional A file or directory to use to find the SCM or sdist checkout path (default is the current working directory) module : None or string, optional Get version via module name (e.g. __name__ variable), which takes precedence over path if provided (ignore otherwise) Returns ------- version: string A string representation of the package version """ # Check the module option first. version = get_version_from_module(module) if version: return normalised(version) # Turn path into a valid directory (default is current directory) if path is None: path = os.getcwd() path = os.path.abspath(path) if os.path.exists(path) and not os.path.isdir(path): path = os.path.dirname(path) if not os.path.isdir(path): raise ValueError('No such package source directory: %r' % (path,)) # Check for an sdist in the process of being installed by pip. version = get_version_from_unpacked_sdist(path) if version: return normalised(version) # Check the SCM. scm, version = get_version_from_scm(path) if version: return normalised(version) # Check if there is a katversion file in the given path. version = get_version_from_file(path) if version: return normalised(version) # None of the above got a version so we will make one up based on the date. return normalised(date_version(scm))
[ "def", "get_version", "(", "path", "=", "None", ",", "module", "=", "None", ")", ":", "# Check the module option first.", "version", "=", "get_version_from_module", "(", "module", ")", "if", "version", ":", "return", "normalised", "(", "version", ")", "# Turn pa...
Return the version string. This function ensures that the version string complies with PEP 440. The format of our version string is: - for RELEASE builds: <major>.<minor> e.g. 0.1 2.4 - for DEVELOPMENT builds: <major>.<minor>.dev<num_branch_commits> \ +<branch_name>.g<short_git_sha>[.dirty] e.g. 1.1.dev34+new.shiny.feature.gfa973da 0.1.dev7+master.gb91ffa6.dirty - for UNKNOWN builds: 0.0+unknown.[<scm_type>.]<date> e.g. 0.0+unknown.svn.201402031023 0.0+unknown.201602081715 The <major>.<minor> substring for development builds will be that of the NEXT (minor) release, in order to allow proper Python version ordering. Parameters ---------- path : None or string, optional A file or directory to use to find the SCM or sdist checkout path (default is the current working directory) module : None or string, optional Get version via module name (e.g. __name__ variable), which takes precedence over path if provided (ignore otherwise) Returns ------- version: string A string representation of the package version
[ "Return", "the", "version", "string", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L248-L321
train
51,078
ska-sa/katversion
katversion/version.py
_sane_version_list
def _sane_version_list(version): """Ensure the major and minor are int. Parameters ---------- version: list Version components Returns ------- version: list List of components where first two components has been sanitised """ v0 = str(version[0]) if v0: # Test if the major is a number. try: v0 = v0.lstrip("v").lstrip("V") # Handle the common case where tags have v before major. v0 = int(v0) except ValueError: v0 = None if v0 is None: version = [0, 0] + version else: version[0] = v0 try: # Test if the minor is a number. version[1] = int(version[1]) except ValueError: # Insert Minor 0. version = [version[0], 0] + version[1:] return version
python
def _sane_version_list(version): """Ensure the major and minor are int. Parameters ---------- version: list Version components Returns ------- version: list List of components where first two components has been sanitised """ v0 = str(version[0]) if v0: # Test if the major is a number. try: v0 = v0.lstrip("v").lstrip("V") # Handle the common case where tags have v before major. v0 = int(v0) except ValueError: v0 = None if v0 is None: version = [0, 0] + version else: version[0] = v0 try: # Test if the minor is a number. version[1] = int(version[1]) except ValueError: # Insert Minor 0. version = [version[0], 0] + version[1:] return version
[ "def", "_sane_version_list", "(", "version", ")", ":", "v0", "=", "str", "(", "version", "[", "0", "]", ")", "if", "v0", ":", "# Test if the major is a number.", "try", ":", "v0", "=", "v0", ".", "lstrip", "(", "\"v\"", ")", ".", "lstrip", "(", "\"V\""...
Ensure the major and minor are int. Parameters ---------- version: list Version components Returns ------- version: list List of components where first two components has been sanitised
[ "Ensure", "the", "major", "and", "minor", "are", "int", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L324-L360
train
51,079
ska-sa/katversion
katversion/version.py
get_version_list
def get_version_list(path=None, module=None): """Return the version information as a tuple. This uses get_version and breaks the string up. Would make more sense if the version was a tuple throughout katversion. """ major = 0 minor = 0 patch = '' # PEP440 calls this prerelease, postrelease or devrelease ver = get_version(path, module) if ver is not None: ver_segments = _sane_version_list(ver.split(".", 2)) major = ver_segments[0] minor = ver_segments[1] patch = ".".join(ver_segments[2:]) # Rejoin the . # Return None as first field, makes substitution easier in next step. return [None, major, minor, patch]
python
def get_version_list(path=None, module=None): """Return the version information as a tuple. This uses get_version and breaks the string up. Would make more sense if the version was a tuple throughout katversion. """ major = 0 minor = 0 patch = '' # PEP440 calls this prerelease, postrelease or devrelease ver = get_version(path, module) if ver is not None: ver_segments = _sane_version_list(ver.split(".", 2)) major = ver_segments[0] minor = ver_segments[1] patch = ".".join(ver_segments[2:]) # Rejoin the . # Return None as first field, makes substitution easier in next step. return [None, major, minor, patch]
[ "def", "get_version_list", "(", "path", "=", "None", ",", "module", "=", "None", ")", ":", "major", "=", "0", "minor", "=", "0", "patch", "=", "''", "# PEP440 calls this prerelease, postrelease or devrelease", "ver", "=", "get_version", "(", "path", ",", "modu...
Return the version information as a tuple. This uses get_version and breaks the string up. Would make more sense if the version was a tuple throughout katversion.
[ "Return", "the", "version", "information", "as", "a", "tuple", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L363-L381
train
51,080
ska-sa/katversion
katversion/version.py
build_info
def build_info(name, path=None, module=None): """Return the build info tuple.""" verlist = get_version_list(path, module) verlist[0] = name return tuple(verlist)
python
def build_info(name, path=None, module=None): """Return the build info tuple.""" verlist = get_version_list(path, module) verlist[0] = name return tuple(verlist)
[ "def", "build_info", "(", "name", ",", "path", "=", "None", ",", "module", "=", "None", ")", ":", "verlist", "=", "get_version_list", "(", "path", ",", "module", ")", "verlist", "[", "0", "]", "=", "name", "return", "tuple", "(", "verlist", ")" ]
Return the build info tuple.
[ "Return", "the", "build", "info", "tuple", "." ]
f507e46e6c5610aec89a08dd480c9b3721da0f8a
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L384-L388
train
51,081
rainwoodman/kdcount
kdcount/cluster.py
fof.find
def find(self, groupid): """ return all of the indices of particles of groupid """ return self.indices[self.offset[groupid] :self.offset[groupid]+ self.length[groupid]]
python
def find(self, groupid): """ return all of the indices of particles of groupid """ return self.indices[self.offset[groupid] :self.offset[groupid]+ self.length[groupid]]
[ "def", "find", "(", "self", ",", "groupid", ")", ":", "return", "self", ".", "indices", "[", "self", ".", "offset", "[", "groupid", "]", ":", "self", ".", "offset", "[", "groupid", "]", "+", "self", ".", "length", "[", "groupid", "]", "]" ]
return all of the indices of particles of groupid
[ "return", "all", "of", "the", "indices", "of", "particles", "of", "groupid" ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/cluster.py#L79-L82
train
51,082
rainwoodman/kdcount
kdcount/cluster.py
fof.sum
def sum(self, weights=None): """ return the sum of weights of each object """ if weights is None: weights = self.data.weights return utils.bincount(self.labels, weights, self.N)
python
def sum(self, weights=None): """ return the sum of weights of each object """ if weights is None: weights = self.data.weights return utils.bincount(self.labels, weights, self.N)
[ "def", "sum", "(", "self", ",", "weights", "=", "None", ")", ":", "if", "weights", "is", "None", ":", "weights", "=", "self", ".", "data", ".", "weights", "return", "utils", ".", "bincount", "(", "self", ".", "labels", ",", "weights", ",", "self", ...
return the sum of weights of each object
[ "return", "the", "sum", "of", "weights", "of", "each", "object" ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/cluster.py#L84-L88
train
51,083
rainwoodman/kdcount
kdcount/cluster.py
fof.center
def center(self, weights=None): """ return the center of each object """ if weights is None: weights = self.data.weights mass = utils.bincount(self.labels, weights, self.N) cp = numpy.empty((len(mass), self.data.pos.shape[-1]), 'f8') for d in range(self.data.pos.shape[-1]): cp[..., d] = utils.bincount(self.labels, weights * self.data.pos[..., d], self.N) cp[..., d] /= mass return cp
python
def center(self, weights=None): """ return the center of each object """ if weights is None: weights = self.data.weights mass = utils.bincount(self.labels, weights, self.N) cp = numpy.empty((len(mass), self.data.pos.shape[-1]), 'f8') for d in range(self.data.pos.shape[-1]): cp[..., d] = utils.bincount(self.labels, weights * self.data.pos[..., d], self.N) cp[..., d] /= mass return cp
[ "def", "center", "(", "self", ",", "weights", "=", "None", ")", ":", "if", "weights", "is", "None", ":", "weights", "=", "self", ".", "data", ".", "weights", "mass", "=", "utils", ".", "bincount", "(", "self", ".", "labels", ",", "weights", ",", "s...
return the center of each object
[ "return", "the", "center", "of", "each", "object" ]
483548f6d27a4f245cd5d98880b5f4edd6cc8dc1
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/cluster.py#L90-L100
train
51,084
BlueBrain/nat
nat/paramSample.py
ParamSample.getParamValues
def getParamValues(self, paramName=None, paramId=None, useOnlyValids=True): """ Return the rows of sampleDF that are associated to the parameter specified in paramName. """ if not paramName is None: if not paramId is None: if getParameterTypeNameFromID(paramId) != paramName: raise ValueError("Parameters paramId and paramName " + "passed to ParamSample.getParamValues() are incompatible.") else: if paramId is None: raise ValueError("At least one of the attribute paramName and paramId " + "passed to ParamSample.getParamValues() most not be None.") paramName = getParameterTypeNameFromID(paramId) df = self.sampleDF if useOnlyValids: df = df[df["isValid"] == True] df.loc[:, "paramNames"] = [getParameterTypeNameFromID(param.typeId) for param in df["obj_parameter"]] return df[df["paramNames"] == paramName]
python
def getParamValues(self, paramName=None, paramId=None, useOnlyValids=True): """ Return the rows of sampleDF that are associated to the parameter specified in paramName. """ if not paramName is None: if not paramId is None: if getParameterTypeNameFromID(paramId) != paramName: raise ValueError("Parameters paramId and paramName " + "passed to ParamSample.getParamValues() are incompatible.") else: if paramId is None: raise ValueError("At least one of the attribute paramName and paramId " + "passed to ParamSample.getParamValues() most not be None.") paramName = getParameterTypeNameFromID(paramId) df = self.sampleDF if useOnlyValids: df = df[df["isValid"] == True] df.loc[:, "paramNames"] = [getParameterTypeNameFromID(param.typeId) for param in df["obj_parameter"]] return df[df["paramNames"] == paramName]
[ "def", "getParamValues", "(", "self", ",", "paramName", "=", "None", ",", "paramId", "=", "None", ",", "useOnlyValids", "=", "True", ")", ":", "if", "not", "paramName", "is", "None", ":", "if", "not", "paramId", "is", "None", ":", "if", "getParameterType...
Return the rows of sampleDF that are associated to the parameter specified in paramName.
[ "Return", "the", "rows", "of", "sampleDF", "that", "are", "associated", "to", "the", "parameter", "specified", "in", "paramName", "." ]
0934f06e48e6efedf55a9617b15becae0d7b277c
https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/paramSample.py#L365-L388
train
51,085
BlueBrain/nat
nat/paramSample.py
ParamSample.interpolate
def interpolate(self, interpValues): """ interpValues should be a dictionnary where the keys are the parameter names for the independant variable for which interpolation should be run and the values are the value to which the parameter should be interpolated. """ self.__operations.append(["interpolate", interpValues]) df = self.sampleDF self.interpValues = interpValues for interParamName, value in interpValues.items(): self.__report += "Interpolation of the parameters for independent variables '" \ + interParamName + "' at value " + str(value) + ".\n" for ind, (paramTrace, resType) in enumerate(zip(df["obj_parameter"], df["Result type"])): if resType == "numericalTrace" and interParamName in paramTrace.indepNames: val = paramTrace.getInterp1dValues(value, interParamName, statsToReturn=["mean"]) if isinstance(val, list): if len(val) == 1: val = val[0] else: raise ValueError("This case has not been implemented yet.") df.loc[ind, "Values"] = float(val)
python
def interpolate(self, interpValues): """ interpValues should be a dictionnary where the keys are the parameter names for the independant variable for which interpolation should be run and the values are the value to which the parameter should be interpolated. """ self.__operations.append(["interpolate", interpValues]) df = self.sampleDF self.interpValues = interpValues for interParamName, value in interpValues.items(): self.__report += "Interpolation of the parameters for independent variables '" \ + interParamName + "' at value " + str(value) + ".\n" for ind, (paramTrace, resType) in enumerate(zip(df["obj_parameter"], df["Result type"])): if resType == "numericalTrace" and interParamName in paramTrace.indepNames: val = paramTrace.getInterp1dValues(value, interParamName, statsToReturn=["mean"]) if isinstance(val, list): if len(val) == 1: val = val[0] else: raise ValueError("This case has not been implemented yet.") df.loc[ind, "Values"] = float(val)
[ "def", "interpolate", "(", "self", ",", "interpValues", ")", ":", "self", ".", "__operations", ".", "append", "(", "[", "\"interpolate\"", ",", "interpValues", "]", ")", "df", "=", "self", ".", "sampleDF", "self", ".", "interpValues", "=", "interpValues", ...
interpValues should be a dictionnary where the keys are the parameter names for the independant variable for which interpolation should be run and the values are the value to which the parameter should be interpolated.
[ "interpValues", "should", "be", "a", "dictionnary", "where", "the", "keys", "are", "the", "parameter", "names", "for", "the", "independant", "variable", "for", "which", "interpolation", "should", "be", "run", "and", "the", "values", "are", "the", "value", "to"...
0934f06e48e6efedf55a9617b15becae0d7b277c
https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/paramSample.py#L391-L412
train
51,086
hammerlab/stanity
stanity/psisloo.py
loo_compare
def loo_compare(psisloo1, psisloo2): """ Compares two models using pointwise approximate leave-one-out cross validation. For the method to be valid, the two models should have been fit on the same input data. Parameters ------------------- psisloo1 : Psisloo object for model1 psisloo2 : Psisloo object for model2 Returns ------------------- Dict with two values: diff: difference in elpd (estimated log predictive density) between two models, where a positive value indicates that model2 is a better fit than model1. se_diff: estimated standard error of the difference between model2 & model1. """ ## TODO: confirm that dimensions for psisloo1 & psisloo2 are the same loores = psisloo1.pointwise.join( psisloo2.pointwise, lsuffix = '_m1', rsuffix = '_m2') loores['pw_diff'] = loores.pointwise_elpd_m2 - loores.pointwise_elpd_m1 sum_elpd_diff = loores.apply(numpy.sum).pw_diff sd_elpd_diff = loores.apply(numpy.std).pw_diff elpd_diff = { 'diff' : sum_elpd_diff, 'se_diff' : math.sqrt(len(loores.pw_diff)) * sd_elpd_diff } return elpd_diff
python
def loo_compare(psisloo1, psisloo2): """ Compares two models using pointwise approximate leave-one-out cross validation. For the method to be valid, the two models should have been fit on the same input data. Parameters ------------------- psisloo1 : Psisloo object for model1 psisloo2 : Psisloo object for model2 Returns ------------------- Dict with two values: diff: difference in elpd (estimated log predictive density) between two models, where a positive value indicates that model2 is a better fit than model1. se_diff: estimated standard error of the difference between model2 & model1. """ ## TODO: confirm that dimensions for psisloo1 & psisloo2 are the same loores = psisloo1.pointwise.join( psisloo2.pointwise, lsuffix = '_m1', rsuffix = '_m2') loores['pw_diff'] = loores.pointwise_elpd_m2 - loores.pointwise_elpd_m1 sum_elpd_diff = loores.apply(numpy.sum).pw_diff sd_elpd_diff = loores.apply(numpy.std).pw_diff elpd_diff = { 'diff' : sum_elpd_diff, 'se_diff' : math.sqrt(len(loores.pw_diff)) * sd_elpd_diff } return elpd_diff
[ "def", "loo_compare", "(", "psisloo1", ",", "psisloo2", ")", ":", "## TODO: confirm that dimensions for psisloo1 & psisloo2 are the same", "loores", "=", "psisloo1", ".", "pointwise", ".", "join", "(", "psisloo2", ".", "pointwise", ",", "lsuffix", "=", "'_m1'", ",", ...
Compares two models using pointwise approximate leave-one-out cross validation. For the method to be valid, the two models should have been fit on the same input data. Parameters ------------------- psisloo1 : Psisloo object for model1 psisloo2 : Psisloo object for model2 Returns ------------------- Dict with two values: diff: difference in elpd (estimated log predictive density) between two models, where a positive value indicates that model2 is a better fit than model1. se_diff: estimated standard error of the difference between model2 & model1.
[ "Compares", "two", "models", "using", "pointwise", "approximate", "leave", "-", "one", "-", "out", "cross", "validation", ".", "For", "the", "method", "to", "be", "valid", "the", "two", "models", "should", "have", "been", "fit", "on", "the", "same", "input...
6c36abc207c4ce94f78968501dab839a56f35a41
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psisloo.py#L104-L143
train
51,087
hammerlab/stanity
stanity/psisloo.py
Psisloo.plot
def plot(self): """ Graphical summary of pointwise pareto-k importance-sampling indices Pareto-k tail indices are plotted (on the y axis) for each observation unit (on the x axis) """ seaborn.pointplot( y = self.pointwise.pareto_k, x = self.pointwise.index, join = False)
python
def plot(self): """ Graphical summary of pointwise pareto-k importance-sampling indices Pareto-k tail indices are plotted (on the y axis) for each observation unit (on the x axis) """ seaborn.pointplot( y = self.pointwise.pareto_k, x = self.pointwise.index, join = False)
[ "def", "plot", "(", "self", ")", ":", "seaborn", ".", "pointplot", "(", "y", "=", "self", ".", "pointwise", ".", "pareto_k", ",", "x", "=", "self", ".", "pointwise", ".", "index", ",", "join", "=", "False", ")" ]
Graphical summary of pointwise pareto-k importance-sampling indices Pareto-k tail indices are plotted (on the y axis) for each observation unit (on the x axis)
[ "Graphical", "summary", "of", "pointwise", "pareto", "-", "k", "importance", "-", "sampling", "indices" ]
6c36abc207c4ce94f78968501dab839a56f35a41
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psisloo.py#L65-L74
train
51,088
ornlneutronimaging/ImagingReso
ImagingReso/resonance.py
Resonance.get_isotopic_ratio
def get_isotopic_ratio(self, compound='', element=''): """returns the list of isotopes for the element of the compound defined with their stoichiometric values Parameters: =========== compound: string (default is empty). If empty, all the stoichiometric will be displayed element: string (default is same as compound). Raises: ======= ValueError if element is not defined in the stack """ _stack = self.stack compound = str(compound) if compound == '': _list_compounds = _stack.keys() list_all_dict = {} for _compound in _list_compounds: _compound = str(_compound) _list_element = _stack[_compound]['elements'] list_all_dict[_compound] = {} for _element in _list_element: list_all_dict[_compound][_element] = self.get_isotopic_ratio( compound=_compound, element=_element) return list_all_dict # checking compound is valid list_compounds = _stack.keys() if compound not in list_compounds: list_compounds_joined = ', '.join(list_compounds) raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined)) # checking element is valid if element == '': # we assume that the element and compounds names matched element = compound list_element = _stack[compound].keys() if element not in list_element: list_element_joined = ', '.join(list_element) raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined)) list_istopes = _stack[compound][element]['isotopes']['list'] list_ratio = _stack[compound][element]['isotopes']['isotopic_ratio'] iso_ratio = zip(list_istopes, list_ratio) _stoichiometric_ratio = {} for _iso, _ratio in iso_ratio: _stoichiometric_ratio[_iso] = _ratio return _stoichiometric_ratio
python
def get_isotopic_ratio(self, compound='', element=''): """returns the list of isotopes for the element of the compound defined with their stoichiometric values Parameters: =========== compound: string (default is empty). If empty, all the stoichiometric will be displayed element: string (default is same as compound). Raises: ======= ValueError if element is not defined in the stack """ _stack = self.stack compound = str(compound) if compound == '': _list_compounds = _stack.keys() list_all_dict = {} for _compound in _list_compounds: _compound = str(_compound) _list_element = _stack[_compound]['elements'] list_all_dict[_compound] = {} for _element in _list_element: list_all_dict[_compound][_element] = self.get_isotopic_ratio( compound=_compound, element=_element) return list_all_dict # checking compound is valid list_compounds = _stack.keys() if compound not in list_compounds: list_compounds_joined = ', '.join(list_compounds) raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined)) # checking element is valid if element == '': # we assume that the element and compounds names matched element = compound list_element = _stack[compound].keys() if element not in list_element: list_element_joined = ', '.join(list_element) raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined)) list_istopes = _stack[compound][element]['isotopes']['list'] list_ratio = _stack[compound][element]['isotopes']['isotopic_ratio'] iso_ratio = zip(list_istopes, list_ratio) _stoichiometric_ratio = {} for _iso, _ratio in iso_ratio: _stoichiometric_ratio[_iso] = _ratio return _stoichiometric_ratio
[ "def", "get_isotopic_ratio", "(", "self", ",", "compound", "=", "''", ",", "element", "=", "''", ")", ":", "_stack", "=", "self", ".", "stack", "compound", "=", "str", "(", "compound", ")", "if", "compound", "==", "''", ":", "_list_compounds", "=", "_s...
returns the list of isotopes for the element of the compound defined with their stoichiometric values Parameters: =========== compound: string (default is empty). If empty, all the stoichiometric will be displayed element: string (default is same as compound). Raises: ======= ValueError if element is not defined in the stack
[ "returns", "the", "list", "of", "isotopes", "for", "the", "element", "of", "the", "compound", "defined", "with", "their", "stoichiometric", "values" ]
2da5cd1f565b3128f59d86bcedfd9adc2b02218b
https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L145-L196
train
51,089
ornlneutronimaging/ImagingReso
ImagingReso/resonance.py
Resonance.get_density
def get_density(self, compound='', element=''): """returns the list of isotopes for the element of the compound defined with their density Parameters: =========== compound: string (default is empty). If empty, all the stoichiometric will be displayed element: string (default is same as compound). Raises: ======= ValueError if element is not defined in the stack """ _stack = self.stack if compound == '': _list_compounds = _stack.keys() list_all_dict = {} for _compound in _list_compounds: _list_element = _stack[_compound]['elements'] list_all_dict[_compound] = {} for _element in _list_element: list_all_dict[_compound][_element] = self.get_density( compound=_compound, element=_element) return list_all_dict # checking compound is valid list_compounds = _stack.keys() if compound not in list_compounds: list_compounds_joined = ', '.join(list_compounds) raise ValueError("Compound '{}' could not be find in {}".format(compile, list_compounds_joined)) # checking element is valid if element == '': # we assume that the element and compounds names matched element = compound list_element = _stack[compound].keys() if element not in list_element: list_element_joined = ', '.join(list_element) raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined)) return _stack[compound][element]['density']['value']
python
def get_density(self, compound='', element=''): """returns the list of isotopes for the element of the compound defined with their density Parameters: =========== compound: string (default is empty). If empty, all the stoichiometric will be displayed element: string (default is same as compound). Raises: ======= ValueError if element is not defined in the stack """ _stack = self.stack if compound == '': _list_compounds = _stack.keys() list_all_dict = {} for _compound in _list_compounds: _list_element = _stack[_compound]['elements'] list_all_dict[_compound] = {} for _element in _list_element: list_all_dict[_compound][_element] = self.get_density( compound=_compound, element=_element) return list_all_dict # checking compound is valid list_compounds = _stack.keys() if compound not in list_compounds: list_compounds_joined = ', '.join(list_compounds) raise ValueError("Compound '{}' could not be find in {}".format(compile, list_compounds_joined)) # checking element is valid if element == '': # we assume that the element and compounds names matched element = compound list_element = _stack[compound].keys() if element not in list_element: list_element_joined = ', '.join(list_element) raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined)) return _stack[compound][element]['density']['value']
[ "def", "get_density", "(", "self", ",", "compound", "=", "''", ",", "element", "=", "''", ")", ":", "_stack", "=", "self", ".", "stack", "if", "compound", "==", "''", ":", "_list_compounds", "=", "_stack", ".", "keys", "(", ")", "list_all_dict", "=", ...
returns the list of isotopes for the element of the compound defined with their density Parameters: =========== compound: string (default is empty). If empty, all the stoichiometric will be displayed element: string (default is same as compound). Raises: ======= ValueError if element is not defined in the stack
[ "returns", "the", "list", "of", "isotopes", "for", "the", "element", "of", "the", "compound", "defined", "with", "their", "density" ]
2da5cd1f565b3128f59d86bcedfd9adc2b02218b
https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L240-L281
train
51,090
ornlneutronimaging/ImagingReso
ImagingReso/resonance.py
Resonance.__calculate_atoms_per_cm3
def __calculate_atoms_per_cm3(self, used_lock=False): """calculate for each element, the atoms per cm3""" stack = self.stack _density_lock = self.density_lock for _name_of_compound in stack.keys(): if used_lock and _density_lock[_name_of_compound]: continue molar_mass_layer, atoms_per_cm3_layer = _utilities.get_atoms_per_cm3_of_layer( compound_dict=stack[_name_of_compound]) # Update layer molar mass stack[_name_of_compound]['molar_mass'] = {'value': molar_mass_layer, 'units': 'g/mol'} # Update atoms per cm3 stack[_name_of_compound]['atoms_per_cm3'] = atoms_per_cm3_layer for _index, _name_of_ele in enumerate(stack[_name_of_compound]['elements']): stack[_name_of_compound][_name_of_ele]['atoms_per_cm3'] = atoms_per_cm3_layer * \ stack[_name_of_compound][ 'stoichiometric_ratio'][_index] self.stack = stack
python
def __calculate_atoms_per_cm3(self, used_lock=False): """calculate for each element, the atoms per cm3""" stack = self.stack _density_lock = self.density_lock for _name_of_compound in stack.keys(): if used_lock and _density_lock[_name_of_compound]: continue molar_mass_layer, atoms_per_cm3_layer = _utilities.get_atoms_per_cm3_of_layer( compound_dict=stack[_name_of_compound]) # Update layer molar mass stack[_name_of_compound]['molar_mass'] = {'value': molar_mass_layer, 'units': 'g/mol'} # Update atoms per cm3 stack[_name_of_compound]['atoms_per_cm3'] = atoms_per_cm3_layer for _index, _name_of_ele in enumerate(stack[_name_of_compound]['elements']): stack[_name_of_compound][_name_of_ele]['atoms_per_cm3'] = atoms_per_cm3_layer * \ stack[_name_of_compound][ 'stoichiometric_ratio'][_index] self.stack = stack
[ "def", "__calculate_atoms_per_cm3", "(", "self", ",", "used_lock", "=", "False", ")", ":", "stack", "=", "self", ".", "stack", "_density_lock", "=", "self", ".", "density_lock", "for", "_name_of_compound", "in", "stack", ".", "keys", "(", ")", ":", "if", "...
calculate for each element, the atoms per cm3
[ "calculate", "for", "each", "element", "the", "atoms", "per", "cm3" ]
2da5cd1f565b3128f59d86bcedfd9adc2b02218b
https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L396-L415
train
51,091
ornlneutronimaging/ImagingReso
ImagingReso/resonance.py
Resonance.__update_stack_with_isotopes_infos
def __update_stack_with_isotopes_infos(self, stack: dict): """retrieve the isotopes, isotopes file names, mass and atomic_ratio from each element in stack""" for _key in stack: _elements = stack[_key]['elements'] for _element in _elements: _dict = _utilities.get_isotope_dicts(element=_element, database=self.database) stack[_key][_element] = _dict stack = self.__fill_missing_keys(stack=stack) return stack
python
def __update_stack_with_isotopes_infos(self, stack: dict): """retrieve the isotopes, isotopes file names, mass and atomic_ratio from each element in stack""" for _key in stack: _elements = stack[_key]['elements'] for _element in _elements: _dict = _utilities.get_isotope_dicts(element=_element, database=self.database) stack[_key][_element] = _dict stack = self.__fill_missing_keys(stack=stack) return stack
[ "def", "__update_stack_with_isotopes_infos", "(", "self", ",", "stack", ":", "dict", ")", ":", "for", "_key", "in", "stack", ":", "_elements", "=", "stack", "[", "_key", "]", "[", "'elements'", "]", "for", "_element", "in", "_elements", ":", "_dict", "=", ...
retrieve the isotopes, isotopes file names, mass and atomic_ratio from each element in stack
[ "retrieve", "the", "isotopes", "isotopes", "file", "names", "mass", "and", "atomic_ratio", "from", "each", "element", "in", "stack" ]
2da5cd1f565b3128f59d86bcedfd9adc2b02218b
https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L432-L441
train
51,092
ornlneutronimaging/ImagingReso
ImagingReso/resonance.py
Resonance.__update_layer_density
def __update_layer_density(self): """calculate or update the layer density""" _stack = self.stack _density_lock = self.density_lock list_compound = _stack.keys() for _key in list_compound: if _density_lock[_key]: continue _list_ratio = _stack[_key]['stoichiometric_ratio'] _list_density = [] for _element in _stack[_key]['elements']: _list_density.append(_stack[_key][_element]['density']['value']) _compound_density = _utilities.get_compound_density(list_density=_list_density, list_ratio=_list_ratio) _stack[_key]['density']['value'] = _compound_density self.stack = _stack
python
def __update_layer_density(self): """calculate or update the layer density""" _stack = self.stack _density_lock = self.density_lock list_compound = _stack.keys() for _key in list_compound: if _density_lock[_key]: continue _list_ratio = _stack[_key]['stoichiometric_ratio'] _list_density = [] for _element in _stack[_key]['elements']: _list_density.append(_stack[_key][_element]['density']['value']) _compound_density = _utilities.get_compound_density(list_density=_list_density, list_ratio=_list_ratio) _stack[_key]['density']['value'] = _compound_density self.stack = _stack
[ "def", "__update_layer_density", "(", "self", ")", ":", "_stack", "=", "self", ".", "stack", "_density_lock", "=", "self", ".", "density_lock", "list_compound", "=", "_stack", ".", "keys", "(", ")", "for", "_key", "in", "list_compound", ":", "if", "_density_...
calculate or update the layer density
[ "calculate", "or", "update", "the", "layer", "density" ]
2da5cd1f565b3128f59d86bcedfd9adc2b02218b
https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L443-L460
train
51,093
ornlneutronimaging/ImagingReso
ImagingReso/resonance.py
Resonance.__update_molar_mass
def __update_molar_mass(self, compound='', element=''): """Re-calculate the molar mass of the element given due to stoichiometric changes Parameters: ========== compound: string (default is '') name of compound element: string (default is '') name of element """ _molar_mass_element = 0 list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio'] list_mass = self.stack[compound][element]['isotopes']['mass']['value'] ratio_mass = zip(list_ratio, list_mass) for _ratio, _mass in ratio_mass: _molar_mass_element += np.float(_ratio) * np.float(_mass) self.stack[compound][element]['molar_mass']['value'] = _molar_mass_element
python
def __update_molar_mass(self, compound='', element=''): """Re-calculate the molar mass of the element given due to stoichiometric changes Parameters: ========== compound: string (default is '') name of compound element: string (default is '') name of element """ _molar_mass_element = 0 list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio'] list_mass = self.stack[compound][element]['isotopes']['mass']['value'] ratio_mass = zip(list_ratio, list_mass) for _ratio, _mass in ratio_mass: _molar_mass_element += np.float(_ratio) * np.float(_mass) self.stack[compound][element]['molar_mass']['value'] = _molar_mass_element
[ "def", "__update_molar_mass", "(", "self", ",", "compound", "=", "''", ",", "element", "=", "''", ")", ":", "_molar_mass_element", "=", "0", "list_ratio", "=", "self", ".", "stack", "[", "compound", "]", "[", "element", "]", "[", "'isotopes'", "]", "[", ...
Re-calculate the molar mass of the element given due to stoichiometric changes Parameters: ========== compound: string (default is '') name of compound element: string (default is '') name of element
[ "Re", "-", "calculate", "the", "molar", "mass", "of", "the", "element", "given", "due", "to", "stoichiometric", "changes" ]
2da5cd1f565b3128f59d86bcedfd9adc2b02218b
https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L498-L512
train
51,094
spockNinja/py-yaml-builder
yaml_builder/__init__.py
main
def main(): """Builds a yaml file""" parser = argparse.ArgumentParser(description='Compose a yaml file.') parser.add_argument( 'root', type=argparse.FileType('r'), help='The root yaml file to compose.' ) args = parser.parse_args() result = yaml.load(args.root, Loader=ComposeLoader) print(yaml.dump(result))
python
def main(): """Builds a yaml file""" parser = argparse.ArgumentParser(description='Compose a yaml file.') parser.add_argument( 'root', type=argparse.FileType('r'), help='The root yaml file to compose.' ) args = parser.parse_args() result = yaml.load(args.root, Loader=ComposeLoader) print(yaml.dump(result))
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Compose a yaml file.'", ")", "parser", ".", "add_argument", "(", "'root'", ",", "type", "=", "argparse", ".", "FileType", "(", "'r'", ")", ",", "help...
Builds a yaml file
[ "Builds", "a", "yaml", "file" ]
9a7fb3067afe107397cebd07d950dbb4238a8730
https://github.com/spockNinja/py-yaml-builder/blob/9a7fb3067afe107397cebd07d950dbb4238a8730/yaml_builder/__init__.py#L7-L20
train
51,095
rapidpro/expressions
python/temba_expressions/dates.py
DateParser._parse
def _parse(self, text, mode): """ Returns a date, datetime or time depending on what information is available """ if text is None or not text.strip(): return None # first try to parse as an ISO8601 date, if it doesn't work we'll try other options if len(text) >= 16: try: parsed = iso8601.parse_date(text, default_timezone=None) if not parsed.tzinfo: parsed = self._timezone.localize(parsed) return parsed except iso8601.ParseError: pass # split the text into numerical and text tokens tokens = regex.findall(r'([0-9]+|[^\W\d]+)', text, flags=regex.MULTILINE | regex.UNICODE | regex.V0) # get the possibilities for each token token_possibilities = [] for token in tokens: possibilities = self._get_token_possibilities(token, mode) if len(possibilities) > 0: token_possibilities.append(possibilities) # see what valid sequences we can make sequences = self._get_possible_sequences(mode, len(token_possibilities), self._date_style) for sequence in sequences: match = OrderedDict() for c in range(len(sequence)): component = sequence[c] value = token_possibilities[c].get(component, None) match[component] = value if value is None: break else: # try to make a valid result from this and return if successful obj = self._make_result(match, self._now, self._timezone) if obj is not None: return obj return None
python
def _parse(self, text, mode): """ Returns a date, datetime or time depending on what information is available """ if text is None or not text.strip(): return None # first try to parse as an ISO8601 date, if it doesn't work we'll try other options if len(text) >= 16: try: parsed = iso8601.parse_date(text, default_timezone=None) if not parsed.tzinfo: parsed = self._timezone.localize(parsed) return parsed except iso8601.ParseError: pass # split the text into numerical and text tokens tokens = regex.findall(r'([0-9]+|[^\W\d]+)', text, flags=regex.MULTILINE | regex.UNICODE | regex.V0) # get the possibilities for each token token_possibilities = [] for token in tokens: possibilities = self._get_token_possibilities(token, mode) if len(possibilities) > 0: token_possibilities.append(possibilities) # see what valid sequences we can make sequences = self._get_possible_sequences(mode, len(token_possibilities), self._date_style) for sequence in sequences: match = OrderedDict() for c in range(len(sequence)): component = sequence[c] value = token_possibilities[c].get(component, None) match[component] = value if value is None: break else: # try to make a valid result from this and return if successful obj = self._make_result(match, self._now, self._timezone) if obj is not None: return obj return None
[ "def", "_parse", "(", "self", ",", "text", ",", "mode", ")", ":", "if", "text", "is", "None", "or", "not", "text", ".", "strip", "(", ")", ":", "return", "None", "# first try to parse as an ISO8601 date, if it doesn't work we'll try other options", "if", "len", ...
Returns a date, datetime or time depending on what information is available
[ "Returns", "a", "date", "datetime", "or", "time", "depending", "on", "what", "information", "is", "available" ]
b03d91ec58fc328960bce90ecb5fa49dcf467627
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/dates.py#L98-L145
train
51,096
anteater/anteater
anteater/src/patch_scan.py
negative_report
def negative_report(binary_report, sha256hash, project, patch_file): """ If no match is made and file is clean """ report_url = binary_report['permalink'] scan_date = binary_report['scan_date'] logger.info('File scan date for %s shows a clean status on: %s', patch_file, scan_date) logger.info('Full report avaliable here: %s', report_url) logger.info('The following sha256 hash can be used in your %s.yaml file to suppress this scan:', project) logger.info('%s', sha256hash) with open(reports_dir + "binaries-" + project + ".log", "a") as gate_report: gate_report.write('Non Whitelisted Binary: {}\n'.format(patch_file)) gate_report.write('File scan date for {} shows a clean status on {}\n'.format(patch_file, scan_date)) gate_report.write('The following sha256 hash can be used in your {}.yaml file to suppress this scan:\n'.format(project)) gate_report.write('{}\n'.format(sha256hash))
python
def negative_report(binary_report, sha256hash, project, patch_file): """ If no match is made and file is clean """ report_url = binary_report['permalink'] scan_date = binary_report['scan_date'] logger.info('File scan date for %s shows a clean status on: %s', patch_file, scan_date) logger.info('Full report avaliable here: %s', report_url) logger.info('The following sha256 hash can be used in your %s.yaml file to suppress this scan:', project) logger.info('%s', sha256hash) with open(reports_dir + "binaries-" + project + ".log", "a") as gate_report: gate_report.write('Non Whitelisted Binary: {}\n'.format(patch_file)) gate_report.write('File scan date for {} shows a clean status on {}\n'.format(patch_file, scan_date)) gate_report.write('The following sha256 hash can be used in your {}.yaml file to suppress this scan:\n'.format(project)) gate_report.write('{}\n'.format(sha256hash))
[ "def", "negative_report", "(", "binary_report", ",", "sha256hash", ",", "project", ",", "patch_file", ")", ":", "report_url", "=", "binary_report", "[", "'permalink'", "]", "scan_date", "=", "binary_report", "[", "'scan_date'", "]", "logger", ".", "info", "(", ...
If no match is made and file is clean
[ "If", "no", "match", "is", "made", "and", "file", "is", "clean" ]
a980adbed8563ef92494f565acd371e91f50f155
https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/patch_scan.py#L243-L257
train
51,097
anteater/anteater
anteater/src/patch_scan.py
positive_report
def positive_report(binary_report, sha256hash, project, patch_file): """ If a Positive match is found """ failure = True report_url = binary_report['permalink'] scan_date = binary_report['scan_date'] logger.error("Virus Found!") logger.info('File scan date for %s shows a infected status on: %s', patch_file, scan_date) logger.info('Full report avaliable here: %s', report_url)
python
def positive_report(binary_report, sha256hash, project, patch_file): """ If a Positive match is found """ failure = True report_url = binary_report['permalink'] scan_date = binary_report['scan_date'] logger.error("Virus Found!") logger.info('File scan date for %s shows a infected status on: %s', patch_file, scan_date) logger.info('Full report avaliable here: %s', report_url)
[ "def", "positive_report", "(", "binary_report", ",", "sha256hash", ",", "project", ",", "patch_file", ")", ":", "failure", "=", "True", "report_url", "=", "binary_report", "[", "'permalink'", "]", "scan_date", "=", "binary_report", "[", "'scan_date'", "]", "logg...
If a Positive match is found
[ "If", "a", "Positive", "match", "is", "found" ]
a980adbed8563ef92494f565acd371e91f50f155
https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/patch_scan.py#L260-L269
train
51,098
RI-imaging/nrefocus
nrefocus/metrics.py
average_gradient
def average_gradient(data, *kwargs): """ Compute average gradient norm of an image """ return np.average(np.array(np.gradient(data))**2)
python
def average_gradient(data, *kwargs): """ Compute average gradient norm of an image """ return np.average(np.array(np.gradient(data))**2)
[ "def", "average_gradient", "(", "data", ",", "*", "kwargs", ")", ":", "return", "np", ".", "average", "(", "np", ".", "array", "(", "np", ".", "gradient", "(", "data", ")", ")", "**", "2", ")" ]
Compute average gradient norm of an image
[ "Compute", "average", "gradient", "norm", "of", "an", "image" ]
ad09aeecace609ab8f9effcb662d2b7d50826080
https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/metrics.py#L4-L7
train
51,099