repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
radzak/rtv-downloader
rtv/onetab.py
get_urls_from_onetab
def get_urls_from_onetab(onetab): """ Get video urls from a link to the onetab shared page. Args: onetab (str): Link to a onetab shared page. Returns: list: List of links to the videos. """ html = requests.get(onetab).text soup = BeautifulSoup(html, 'lxml') divs = soup.findAll('div', {'style': 'padding-left: 24px; ' 'padding-top: 8px; ' 'position: relative; ' 'font-size: 13px;'}) return [div.find('a').attrs['href'] for div in divs]
python
def get_urls_from_onetab(onetab): """ Get video urls from a link to the onetab shared page. Args: onetab (str): Link to a onetab shared page. Returns: list: List of links to the videos. """ html = requests.get(onetab).text soup = BeautifulSoup(html, 'lxml') divs = soup.findAll('div', {'style': 'padding-left: 24px; ' 'padding-top: 8px; ' 'position: relative; ' 'font-size: 13px;'}) return [div.find('a').attrs['href'] for div in divs]
[ "def", "get_urls_from_onetab", "(", "onetab", ")", ":", "html", "=", "requests", ".", "get", "(", "onetab", ")", ".", "text", "soup", "=", "BeautifulSoup", "(", "html", ",", "'lxml'", ")", "divs", "=", "soup", ".", "findAll", "(", "'div'", ",", "{", ...
Get video urls from a link to the onetab shared page. Args: onetab (str): Link to a onetab shared page. Returns: list: List of links to the videos.
[ "Get", "video", "urls", "from", "a", "link", "to", "the", "onetab", "shared", "page", "." ]
train
https://github.com/radzak/rtv-downloader/blob/b9114b7f4c35fabe6ec9ad1764a65858667a866e/rtv/onetab.py#L5-L24
lobocv/pyperform
pyperform/cprofile_parser.py
cProfileFuncStat.from_dict
def from_dict(cls, d): """Used to create an instance of this class from a pstats dict item""" stats = [] for (filename, lineno, name), stat_values in d.iteritems(): if len(stat_values) == 5: ncalls, ncall_nr, total_time, cum_time, subcall_stats = stat_values else: ncalls, ncall_nr, total_time, cum_time = stat_values subcall_stats = None stat = cProfileFuncStat(filename, lineno, name, ncalls, ncall_nr, total_time, cum_time, subcall_stats) stats.append(stat) return stats
python
def from_dict(cls, d): """Used to create an instance of this class from a pstats dict item""" stats = [] for (filename, lineno, name), stat_values in d.iteritems(): if len(stat_values) == 5: ncalls, ncall_nr, total_time, cum_time, subcall_stats = stat_values else: ncalls, ncall_nr, total_time, cum_time = stat_values subcall_stats = None stat = cProfileFuncStat(filename, lineno, name, ncalls, ncall_nr, total_time, cum_time, subcall_stats) stats.append(stat) return stats
[ "def", "from_dict", "(", "cls", ",", "d", ")", ":", "stats", "=", "[", "]", "for", "(", "filename", ",", "lineno", ",", "name", ")", ",", "stat_values", "in", "d", ".", "iteritems", "(", ")", ":", "if", "len", "(", "stat_values", ")", "==", "5", ...
Used to create an instance of this class from a pstats dict item
[ "Used", "to", "create", "an", "instance", "of", "this", "class", "from", "a", "pstats", "dict", "item" ]
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/cprofile_parser.py#L62-L74
lobocv/pyperform
pyperform/cprofile_parser.py
cProfileFuncStat.to_dict
def to_dict(self): """Convert back to the pstats dictionary representation (used for saving back as pstats binary file)""" if self.subcall is not None: if isinstance(self.subcall, dict): subcalls = self.subcall else: subcalls = {} for s in self.subcall: subcalls.update(s.to_dict()) return {(self.filename, self.line_number, self.name): \ (self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s, subcalls)} else: return {(self.filename, self.line_number, self.name): \ (self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s)}
python
def to_dict(self): """Convert back to the pstats dictionary representation (used for saving back as pstats binary file)""" if self.subcall is not None: if isinstance(self.subcall, dict): subcalls = self.subcall else: subcalls = {} for s in self.subcall: subcalls.update(s.to_dict()) return {(self.filename, self.line_number, self.name): \ (self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s, subcalls)} else: return {(self.filename, self.line_number, self.name): \ (self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s)}
[ "def", "to_dict", "(", "self", ")", ":", "if", "self", ".", "subcall", "is", "not", "None", ":", "if", "isinstance", "(", "self", ".", "subcall", ",", "dict", ")", ":", "subcalls", "=", "self", ".", "subcall", "else", ":", "subcalls", "=", "{", "}"...
Convert back to the pstats dictionary representation (used for saving back as pstats binary file)
[ "Convert", "back", "to", "the", "pstats", "dictionary", "representation", "(", "used", "for", "saving", "back", "as", "pstats", "binary", "file", ")" ]
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/cprofile_parser.py#L76-L89
lobocv/pyperform
pyperform/cprofile_parser.py
cProfileParser.exclude_functions
def exclude_functions(self, *funcs): """ Excludes the contributions from the following functions. """ for f in funcs: f.exclude = True run_time_s = sum(0 if s.exclude else s.own_time_s for s in self.stats) cProfileFuncStat.run_time_s = run_time_s
python
def exclude_functions(self, *funcs): """ Excludes the contributions from the following functions. """ for f in funcs: f.exclude = True run_time_s = sum(0 if s.exclude else s.own_time_s for s in self.stats) cProfileFuncStat.run_time_s = run_time_s
[ "def", "exclude_functions", "(", "self", ",", "*", "funcs", ")", ":", "for", "f", "in", "funcs", ":", "f", ".", "exclude", "=", "True", "run_time_s", "=", "sum", "(", "0", "if", "s", ".", "exclude", "else", "s", ".", "own_time_s", "for", "s", "in",...
Excludes the contributions from the following functions.
[ "Excludes", "the", "contributions", "from", "the", "following", "functions", "." ]
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/cprofile_parser.py#L111-L118
lobocv/pyperform
pyperform/cprofile_parser.py
cProfileParser.get_top
def get_top(self, stat, n): """Return the top n values when sorting by 'stat'""" return sorted(self.stats, key=lambda x: getattr(x, stat), reverse=True)[:n]
python
def get_top(self, stat, n): """Return the top n values when sorting by 'stat'""" return sorted(self.stats, key=lambda x: getattr(x, stat), reverse=True)[:n]
[ "def", "get_top", "(", "self", ",", "stat", ",", "n", ")", ":", "return", "sorted", "(", "self", ".", "stats", ",", "key", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "stat", ")", ",", "reverse", "=", "True", ")", "[", ":", "n", "]" ]
Return the top n values when sorting by 'stat
[ "Return", "the", "top", "n", "values", "when", "sorting", "by", "stat" ]
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/cprofile_parser.py#L120-L122
lobocv/pyperform
pyperform/cprofile_parser.py
cProfileParser.save_pstat
def save_pstat(self, path): """ Save the modified pstats file """ stats = {} for s in self.stats: if not s.exclude: stats.update(s.to_dict()) with open(path, 'wb') as f: marshal.dump(stats, f)
python
def save_pstat(self, path): """ Save the modified pstats file """ stats = {} for s in self.stats: if not s.exclude: stats.update(s.to_dict()) with open(path, 'wb') as f: marshal.dump(stats, f)
[ "def", "save_pstat", "(", "self", ",", "path", ")", ":", "stats", "=", "{", "}", "for", "s", "in", "self", ".", "stats", ":", "if", "not", "s", ".", "exclude", ":", "stats", ".", "update", "(", "s", ".", "to_dict", "(", ")", ")", "with", "open"...
Save the modified pstats file
[ "Save", "the", "modified", "pstats", "file" ]
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/cprofile_parser.py#L124-L134
paramono/duration
duration/__init__.py
safe_int
def safe_int(value): """ Tries to convert a value to int; returns 0 if conversion failed """ try: result = int(value) if result < 0: raise NegativeDurationError( 'Negative values in duration strings are not allowed!' ) except NegativeDurationError as exc: raise exc except (TypeError, ValueError): result = 0 return result
python
def safe_int(value): """ Tries to convert a value to int; returns 0 if conversion failed """ try: result = int(value) if result < 0: raise NegativeDurationError( 'Negative values in duration strings are not allowed!' ) except NegativeDurationError as exc: raise exc except (TypeError, ValueError): result = 0 return result
[ "def", "safe_int", "(", "value", ")", ":", "try", ":", "result", "=", "int", "(", "value", ")", "if", "result", "<", "0", ":", "raise", "NegativeDurationError", "(", "'Negative values in duration strings are not allowed!'", ")", "except", "NegativeDurationError", ...
Tries to convert a value to int; returns 0 if conversion failed
[ "Tries", "to", "convert", "a", "value", "to", "int", ";", "returns", "0", "if", "conversion", "failed" ]
train
https://github.com/paramono/duration/blob/e0ef5bfa0eba4d9ea031695bb0e9a9fa4fa24d9a/duration/__init__.py#L18-L32
paramono/duration
duration/__init__.py
_parse
def _parse(value, strict=True): """ Preliminary duration value parser strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration value exceed allowed values """ pattern = r'(?:(?P<hours>\d+):)?(?P<minutes>\d+):(?P<seconds>\d+)' match = re.match(pattern, value) if not match: raise ValueError('Invalid duration value: %s' % value) hours = safe_int(match.group('hours')) minutes = safe_int(match.group('minutes')) seconds = safe_int(match.group('seconds')) check_tuple((hours, minutes, seconds,), strict) return (hours, minutes, seconds,)
python
def _parse(value, strict=True): """ Preliminary duration value parser strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration value exceed allowed values """ pattern = r'(?:(?P<hours>\d+):)?(?P<minutes>\d+):(?P<seconds>\d+)' match = re.match(pattern, value) if not match: raise ValueError('Invalid duration value: %s' % value) hours = safe_int(match.group('hours')) minutes = safe_int(match.group('minutes')) seconds = safe_int(match.group('seconds')) check_tuple((hours, minutes, seconds,), strict) return (hours, minutes, seconds,)
[ "def", "_parse", "(", "value", ",", "strict", "=", "True", ")", ":", "pattern", "=", "r'(?:(?P<hours>\\d+):)?(?P<minutes>\\d+):(?P<seconds>\\d+)'", "match", "=", "re", ".", "match", "(", "pattern", ",", "value", ")", "if", "not", "match", ":", "raise", "ValueE...
Preliminary duration value parser strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration value exceed allowed values
[ "Preliminary", "duration", "value", "parser" ]
train
https://github.com/paramono/duration/blob/e0ef5bfa0eba4d9ea031695bb0e9a9fa4fa24d9a/duration/__init__.py#L43-L60
paramono/duration
duration/__init__.py
to_iso8601
def to_iso8601(value, strict=True, force_int=True): """ converts duration value to ISO8601 string accepts integers, hh:mm:ss or mm:ss strings, timedelta objects strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration string exceed allowed values """ # split seconds to larger units # seconds = value.total_seconds() seconds = to_seconds(value, strict, force_int) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) days, hours = divmod(hours, 24) days, hours, minutes = map(int, (days, hours, minutes)) seconds = round(seconds, 6) # build date date = '' if days: date = '%sD' % days # build time time = 'T' # hours bigger_exists = date or hours if bigger_exists: time += '{:02}H'.format(hours) # minutes bigger_exists = bigger_exists or minutes if bigger_exists: time += '{:02}M'.format(minutes) # seconds if isinstance(seconds, int) or force_int: seconds = '{:02}'.format(int(seconds)) else: # 9 chars long w/leading 0, 6 digits after decimal seconds = '%09.6f' % seconds time += '{}S'.format(seconds) return 'P' + date + time
python
def to_iso8601(value, strict=True, force_int=True): """ converts duration value to ISO8601 string accepts integers, hh:mm:ss or mm:ss strings, timedelta objects strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration string exceed allowed values """ # split seconds to larger units # seconds = value.total_seconds() seconds = to_seconds(value, strict, force_int) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) days, hours = divmod(hours, 24) days, hours, minutes = map(int, (days, hours, minutes)) seconds = round(seconds, 6) # build date date = '' if days: date = '%sD' % days # build time time = 'T' # hours bigger_exists = date or hours if bigger_exists: time += '{:02}H'.format(hours) # minutes bigger_exists = bigger_exists or minutes if bigger_exists: time += '{:02}M'.format(minutes) # seconds if isinstance(seconds, int) or force_int: seconds = '{:02}'.format(int(seconds)) else: # 9 chars long w/leading 0, 6 digits after decimal seconds = '%09.6f' % seconds time += '{}S'.format(seconds) return 'P' + date + time
[ "def", "to_iso8601", "(", "value", ",", "strict", "=", "True", ",", "force_int", "=", "True", ")", ":", "# split seconds to larger units", "# seconds = value.total_seconds()", "seconds", "=", "to_seconds", "(", "value", ",", "strict", ",", "force_int", ")", "minut...
converts duration value to ISO8601 string accepts integers, hh:mm:ss or mm:ss strings, timedelta objects strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration string exceed allowed values
[ "converts", "duration", "value", "to", "ISO8601", "string", "accepts", "integers", "hh", ":", "mm", ":", "ss", "or", "mm", ":", "ss", "strings", "timedelta", "objects" ]
train
https://github.com/paramono/duration/blob/e0ef5bfa0eba4d9ea031695bb0e9a9fa4fa24d9a/duration/__init__.py#L92-L136
paramono/duration
duration/__init__.py
to_seconds
def to_seconds(value, strict=True, force_int=True): """ converts duration value to integer seconds strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration value exceed allowed values """ if isinstance(value, int): return value # assuming it's seconds elif isinstance(value, timedelta): seconds = value.total_seconds() if force_int: seconds = int(round(seconds)) return seconds elif isinstance(value, str): hours, minutes, seconds = _parse(value, strict) elif isinstance(value, tuple): check_tuple(value, strict) hours, minutes, seconds = value else: raise TypeError( 'Value %s (type %s) not supported' % ( value, type(value).__name__ ) ) if not (hours or minutes or seconds): raise ValueError('No hours, minutes or seconds found') result = hours*3600 + minutes*60 + seconds return result
python
def to_seconds(value, strict=True, force_int=True): """ converts duration value to integer seconds strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration value exceed allowed values """ if isinstance(value, int): return value # assuming it's seconds elif isinstance(value, timedelta): seconds = value.total_seconds() if force_int: seconds = int(round(seconds)) return seconds elif isinstance(value, str): hours, minutes, seconds = _parse(value, strict) elif isinstance(value, tuple): check_tuple(value, strict) hours, minutes, seconds = value else: raise TypeError( 'Value %s (type %s) not supported' % ( value, type(value).__name__ ) ) if not (hours or minutes or seconds): raise ValueError('No hours, minutes or seconds found') result = hours*3600 + minutes*60 + seconds return result
[ "def", "to_seconds", "(", "value", ",", "strict", "=", "True", ",", "force_int", "=", "True", ")", ":", "if", "isinstance", "(", "value", ",", "int", ")", ":", "return", "value", "# assuming it's seconds", "elif", "isinstance", "(", "value", ",", "timedelt...
converts duration value to integer seconds strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration value exceed allowed values
[ "converts", "duration", "value", "to", "integer", "seconds" ]
train
https://github.com/paramono/duration/blob/e0ef5bfa0eba4d9ea031695bb0e9a9fa4fa24d9a/duration/__init__.py#L139-L169
paramono/duration
duration/__init__.py
to_timedelta
def to_timedelta(value, strict=True): """ converts duration string to timedelta strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration string exceed allowed values """ if isinstance(value, int): return timedelta(seconds=value) # assuming it's seconds elif isinstance(value, timedelta): return value elif isinstance(value, str): hours, minutes, seconds = _parse(value, strict) elif isinstance(value, tuple): check_tuple(value, strict) hours, minutes, seconds = value else: raise TypeError( 'Value %s (type %s) not supported' % ( value, type(value).__name__ ) ) return timedelta(hours=hours, minutes=minutes, seconds=seconds)
python
def to_timedelta(value, strict=True): """ converts duration string to timedelta strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration string exceed allowed values """ if isinstance(value, int): return timedelta(seconds=value) # assuming it's seconds elif isinstance(value, timedelta): return value elif isinstance(value, str): hours, minutes, seconds = _parse(value, strict) elif isinstance(value, tuple): check_tuple(value, strict) hours, minutes, seconds = value else: raise TypeError( 'Value %s (type %s) not supported' % ( value, type(value).__name__ ) ) return timedelta(hours=hours, minutes=minutes, seconds=seconds)
[ "def", "to_timedelta", "(", "value", ",", "strict", "=", "True", ")", ":", "if", "isinstance", "(", "value", ",", "int", ")", ":", "return", "timedelta", "(", "seconds", "=", "value", ")", "# assuming it's seconds", "elif", "isinstance", "(", "value", ",",...
converts duration string to timedelta strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration string exceed allowed values
[ "converts", "duration", "string", "to", "timedelta" ]
train
https://github.com/paramono/duration/blob/e0ef5bfa0eba4d9ea031695bb0e9a9fa4fa24d9a/duration/__init__.py#L172-L194
paramono/duration
duration/__init__.py
to_tuple
def to_tuple(value, strict=True, force_int=True): """ converts duration value to tuple of integers strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration value exceed allowed values """ if isinstance(value, int): seconds = value minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) elif isinstance(value, str): hours, minutes, seconds = _fix_tuple( _parse(value, strict) ) elif isinstance(value, tuple): check_tuple(value, strict) hours, minutes, seconds = _fix_tuple(value) elif isinstance(value, timedelta): seconds = value.total_seconds() if force_int: seconds = int(round(seconds)) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) return (hours, minutes, seconds,)
python
def to_tuple(value, strict=True, force_int=True): """ converts duration value to tuple of integers strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration value exceed allowed values """ if isinstance(value, int): seconds = value minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) elif isinstance(value, str): hours, minutes, seconds = _fix_tuple( _parse(value, strict) ) elif isinstance(value, tuple): check_tuple(value, strict) hours, minutes, seconds = _fix_tuple(value) elif isinstance(value, timedelta): seconds = value.total_seconds() if force_int: seconds = int(round(seconds)) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) return (hours, minutes, seconds,)
[ "def", "to_tuple", "(", "value", ",", "strict", "=", "True", ",", "force_int", "=", "True", ")", ":", "if", "isinstance", "(", "value", ",", "int", ")", ":", "seconds", "=", "value", "minutes", ",", "seconds", "=", "divmod", "(", "seconds", ",", "60"...
converts duration value to tuple of integers strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration value exceed allowed values
[ "converts", "duration", "value", "to", "tuple", "of", "integers" ]
train
https://github.com/paramono/duration/blob/e0ef5bfa0eba4d9ea031695bb0e9a9fa4fa24d9a/duration/__init__.py#L197-L222
tonybaloney/libcloud.api
libcloud_api/api/utils.py
name_url
def name_url(provider, cloud, method_name): """ Get a URL for a method in a driver """ snake_parts = method_name.split('_') if len(snake_parts) <= 1: return False # Convention for libcloud is ex_ are extended methods if snake_parts[0] == 'ex': extra = True method_name = method_name.replace('ex_', '', 1) else: extra = False snake_parts = method_name.split('_') # Try to semantically match the method name to a REST action if snake_parts[0] in get_sem_verbs: method = 'GET' for verb in get_sem_verbs: method_name = method_name.replace('%s_' % verb, '', 1) elif snake_parts[0] in delete_sem_verbs: method = 'DELETE' elif snake_parts[0] in put_sem_verbs: method = 'PUT' else: method = 'POST' uri = '/%s/%s/%s%s' % (provider, cloud, 'extensions/' if extra else '', method_name) return (method, uri)
python
def name_url(provider, cloud, method_name): """ Get a URL for a method in a driver """ snake_parts = method_name.split('_') if len(snake_parts) <= 1: return False # Convention for libcloud is ex_ are extended methods if snake_parts[0] == 'ex': extra = True method_name = method_name.replace('ex_', '', 1) else: extra = False snake_parts = method_name.split('_') # Try to semantically match the method name to a REST action if snake_parts[0] in get_sem_verbs: method = 'GET' for verb in get_sem_verbs: method_name = method_name.replace('%s_' % verb, '', 1) elif snake_parts[0] in delete_sem_verbs: method = 'DELETE' elif snake_parts[0] in put_sem_verbs: method = 'PUT' else: method = 'POST' uri = '/%s/%s/%s%s' % (provider, cloud, 'extensions/' if extra else '', method_name) return (method, uri)
[ "def", "name_url", "(", "provider", ",", "cloud", ",", "method_name", ")", ":", "snake_parts", "=", "method_name", ".", "split", "(", "'_'", ")", "if", "len", "(", "snake_parts", ")", "<=", "1", ":", "return", "False", "# Convention for libcloud is ex_ are ext...
Get a URL for a method in a driver
[ "Get", "a", "URL", "for", "a", "method", "in", "a", "driver" ]
train
https://github.com/tonybaloney/libcloud.api/blob/496999971bceb28d94afc9f134b5c83118837454/libcloud_api/api/utils.py#L9-L40
taddeus/wspy
frame.py
contains_frame
def contains_frame(data): """ Read the frame length from the start of `data` and check if the data is long enough to contain the entire frame. """ if len(data) < 2: return False b2 = struct.unpack('!B', data[1])[0] payload_len = b2 & 0x7F payload_start = 2 if payload_len == 126: if len(data) > 4: payload_len = struct.unpack('!H', data[2:4])[0] payload_start = 4 elif payload_len == 127: if len(data) > 12: payload_len = struct.unpack('!Q', data[4:12])[0] payload_start = 12 return len(data) >= payload_len + payload_start
python
def contains_frame(data): """ Read the frame length from the start of `data` and check if the data is long enough to contain the entire frame. """ if len(data) < 2: return False b2 = struct.unpack('!B', data[1])[0] payload_len = b2 & 0x7F payload_start = 2 if payload_len == 126: if len(data) > 4: payload_len = struct.unpack('!H', data[2:4])[0] payload_start = 4 elif payload_len == 127: if len(data) > 12: payload_len = struct.unpack('!Q', data[4:12])[0] payload_start = 12 return len(data) >= payload_len + payload_start
[ "def", "contains_frame", "(", "data", ")", ":", "if", "len", "(", "data", ")", "<", "2", ":", "return", "False", "b2", "=", "struct", ".", "unpack", "(", "'!B'", ",", "data", "[", "1", "]", ")", "[", "0", "]", "payload_len", "=", "b2", "&", "0x...
Read the frame length from the start of `data` and check if the data is long enough to contain the entire frame.
[ "Read", "the", "frame", "length", "from", "the", "start", "of", "data", "and", "check", "if", "the", "data", "is", "long", "enough", "to", "contain", "the", "entire", "frame", "." ]
train
https://github.com/taddeus/wspy/blob/13f054a72442bb8dcc37b0ac011cab6025830d66/frame.py#L294-L317
taddeus/wspy
frame.py
mask
def mask(key, original): """ Mask an octet string using the given masking key. The following masking algorithm is used, as defined in RFC 6455: for each octet: j = i MOD 4 transformed-octet-i = original-octet-i XOR masking-key-octet-j """ if len(key) != 4: raise ValueError('invalid masking key "%s"' % key) key = map(ord, key) masked = bytearray(original) for i in xrange(len(masked)): masked[i] ^= key[i % 4] return masked
python
def mask(key, original): """ Mask an octet string using the given masking key. The following masking algorithm is used, as defined in RFC 6455: for each octet: j = i MOD 4 transformed-octet-i = original-octet-i XOR masking-key-octet-j """ if len(key) != 4: raise ValueError('invalid masking key "%s"' % key) key = map(ord, key) masked = bytearray(original) for i in xrange(len(masked)): masked[i] ^= key[i % 4] return masked
[ "def", "mask", "(", "key", ",", "original", ")", ":", "if", "len", "(", "key", ")", "!=", "4", ":", "raise", "ValueError", "(", "'invalid masking key \"%s\"'", "%", "key", ")", "key", "=", "map", "(", "ord", ",", "key", ")", "masked", "=", "bytearray...
Mask an octet string using the given masking key. The following masking algorithm is used, as defined in RFC 6455: for each octet: j = i MOD 4 transformed-octet-i = original-octet-i XOR masking-key-octet-j
[ "Mask", "an", "octet", "string", "using", "the", "given", "masking", "key", ".", "The", "following", "masking", "algorithm", "is", "used", "as", "defined", "in", "RFC", "6455", ":" ]
train
https://github.com/taddeus/wspy/blob/13f054a72442bb8dcc37b0ac011cab6025830d66/frame.py#L320-L338
taddeus/wspy
frame.py
Frame.pack
def pack(self): """ Pack the frame into a string according to the following scheme: +-+-+-+-+-------+-+-------------+-------------------------------+ |F|R|R|R| opcode|M| Payload len | Extended payload length | |I|S|S|S| (4) |A| (7) | (16/64) | |N|V|V|V| |S| | (if payload len==126/127) | | |1|2|3| |K| | | +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - + | Extended payload length continued, if payload len == 127 | + - - - - - - - - - - - - - - - +-------------------------------+ | |Masking-key, if MASK set to 1 | +-------------------------------+-------------------------------+ | Masking-key (continued) | Payload Data | +-------------------------------- - - - - - - - - - - - - - - - + : Payload Data continued ... : + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + | Payload Data continued ... | +---------------------------------------------------------------+ """ header = struct.pack('!B', (self.final << 7) | (self.rsv1 << 6) | (self.rsv2 << 5) | (self.rsv3 << 4) | (self.opcode & 0xf)) mask = bool(self.masking_key) << 7 payload_len = len(self.payload) if payload_len <= 125: header += struct.pack('!B', mask | payload_len) elif payload_len < (1 << 16): header += struct.pack('!BH', mask | 126, payload_len) elif payload_len < (1 << 63): header += struct.pack('!BQ', mask | 127, payload_len) else: # FIXME: RFC 6455 defines an action for this... raise Exception('the payload length is too damn high!') if mask: return header + self.masking_key + self.mask_payload() return header + self.payload
python
def pack(self): """ Pack the frame into a string according to the following scheme: +-+-+-+-+-------+-+-------------+-------------------------------+ |F|R|R|R| opcode|M| Payload len | Extended payload length | |I|S|S|S| (4) |A| (7) | (16/64) | |N|V|V|V| |S| | (if payload len==126/127) | | |1|2|3| |K| | | +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - + | Extended payload length continued, if payload len == 127 | + - - - - - - - - - - - - - - - +-------------------------------+ | |Masking-key, if MASK set to 1 | +-------------------------------+-------------------------------+ | Masking-key (continued) | Payload Data | +-------------------------------- - - - - - - - - - - - - - - - + : Payload Data continued ... : + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + | Payload Data continued ... | +---------------------------------------------------------------+ """ header = struct.pack('!B', (self.final << 7) | (self.rsv1 << 6) | (self.rsv2 << 5) | (self.rsv3 << 4) | (self.opcode & 0xf)) mask = bool(self.masking_key) << 7 payload_len = len(self.payload) if payload_len <= 125: header += struct.pack('!B', mask | payload_len) elif payload_len < (1 << 16): header += struct.pack('!BH', mask | 126, payload_len) elif payload_len < (1 << 63): header += struct.pack('!BQ', mask | 127, payload_len) else: # FIXME: RFC 6455 defines an action for this... raise Exception('the payload length is too damn high!') if mask: return header + self.masking_key + self.mask_payload() return header + self.payload
[ "def", "pack", "(", "self", ")", ":", "header", "=", "struct", ".", "pack", "(", "'!B'", ",", "(", "self", ".", "final", "<<", "7", ")", "|", "(", "self", ".", "rsv1", "<<", "6", ")", "|", "(", "self", ".", "rsv2", "<<", "5", ")", "|", "(",...
Pack the frame into a string according to the following scheme: +-+-+-+-+-------+-+-------------+-------------------------------+ |F|R|R|R| opcode|M| Payload len | Extended payload length | |I|S|S|S| (4) |A| (7) | (16/64) | |N|V|V|V| |S| | (if payload len==126/127) | | |1|2|3| |K| | | +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - + | Extended payload length continued, if payload len == 127 | + - - - - - - - - - - - - - - - +-------------------------------+ | |Masking-key, if MASK set to 1 | +-------------------------------+-------------------------------+ | Masking-key (continued) | Payload Data | +-------------------------------- - - - - - - - - - - - - - - - + : Payload Data continued ... : + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + | Payload Data continued ... | +---------------------------------------------------------------+
[ "Pack", "the", "frame", "into", "a", "string", "according", "to", "the", "following", "scheme", ":" ]
train
https://github.com/taddeus/wspy/blob/13f054a72442bb8dcc37b0ac011cab6025830d66/frame.py#L70-L110
taddeus/wspy
frame.py
Frame.fragment
def fragment(self, fragment_size, mask=False): """ Fragment the frame into a chain of fragment frames: - An initial frame with non-zero opcode - Zero or more frames with opcode = 0 and final = False - A final frame with opcode = 0 and final = True The first and last frame may be the same frame, having a non-zero opcode and final = True. Thus, this function returns a list containing at least a single frame. `fragment_size` indicates the maximum payload size of each fragment. The payload of the original frame is split into one or more parts, and each part is converted to a Frame instance. `mask` is a boolean (default False) indicating whether the payloads should be masked. If True, each frame is assigned a randomly generated masking key. """ frames = [] for start in xrange(0, len(self.payload), fragment_size): payload = self.payload[start:start + fragment_size] frames.append(Frame(OPCODE_CONTINUATION, payload, mask=mask, final=False)) frames[0].opcode = self.opcode frames[-1].final = True return frames
python
def fragment(self, fragment_size, mask=False): """ Fragment the frame into a chain of fragment frames: - An initial frame with non-zero opcode - Zero or more frames with opcode = 0 and final = False - A final frame with opcode = 0 and final = True The first and last frame may be the same frame, having a non-zero opcode and final = True. Thus, this function returns a list containing at least a single frame. `fragment_size` indicates the maximum payload size of each fragment. The payload of the original frame is split into one or more parts, and each part is converted to a Frame instance. `mask` is a boolean (default False) indicating whether the payloads should be masked. If True, each frame is assigned a randomly generated masking key. """ frames = [] for start in xrange(0, len(self.payload), fragment_size): payload = self.payload[start:start + fragment_size] frames.append(Frame(OPCODE_CONTINUATION, payload, mask=mask, final=False)) frames[0].opcode = self.opcode frames[-1].final = True return frames
[ "def", "fragment", "(", "self", ",", "fragment_size", ",", "mask", "=", "False", ")", ":", "frames", "=", "[", "]", "for", "start", "in", "xrange", "(", "0", ",", "len", "(", "self", ".", "payload", ")", ",", "fragment_size", ")", ":", "payload", "...
Fragment the frame into a chain of fragment frames: - An initial frame with non-zero opcode - Zero or more frames with opcode = 0 and final = False - A final frame with opcode = 0 and final = True The first and last frame may be the same frame, having a non-zero opcode and final = True. Thus, this function returns a list containing at least a single frame. `fragment_size` indicates the maximum payload size of each fragment. The payload of the original frame is split into one or more parts, and each part is converted to a Frame instance. `mask` is a boolean (default False) indicating whether the payloads should be masked. If True, each frame is assigned a randomly generated masking key.
[ "Fragment", "the", "frame", "into", "a", "chain", "of", "fragment", "frames", ":", "-", "An", "initial", "frame", "with", "non", "-", "zero", "opcode", "-", "Zero", "or", "more", "frames", "with", "opcode", "=", "0", "and", "final", "=", "False", "-", ...
train
https://github.com/taddeus/wspy/blob/13f054a72442bb8dcc37b0ac011cab6025830d66/frame.py#L115-L144
taddeus/wspy
frame.py
ControlFrame.unpack_close
def unpack_close(self): """ Unpack a close message into a status code and a reason. If no payload is given, the code is None and the reason is an empty string. """ if self.payload: code = struct.unpack('!H', str(self.payload[:2]))[0] reason = str(self.payload[2:]) else: code = None reason = '' return code, reason
python
def unpack_close(self): """ Unpack a close message into a status code and a reason. If no payload is given, the code is None and the reason is an empty string. """ if self.payload: code = struct.unpack('!H', str(self.payload[:2]))[0] reason = str(self.payload[2:]) else: code = None reason = '' return code, reason
[ "def", "unpack_close", "(", "self", ")", ":", "if", "self", ".", "payload", ":", "code", "=", "struct", ".", "unpack", "(", "'!H'", ",", "str", "(", "self", ".", "payload", "[", ":", "2", "]", ")", ")", "[", "0", "]", "reason", "=", "str", "(",...
Unpack a close message into a status code and a reason. If no payload is given, the code is None and the reason is an empty string.
[ "Unpack", "a", "close", "message", "into", "a", "status", "code", "and", "a", "reason", ".", "If", "no", "payload", "is", "given", "the", "code", "is", "None", "and", "the", "reason", "is", "an", "empty", "string", "." ]
train
https://github.com/taddeus/wspy/blob/13f054a72442bb8dcc37b0ac011cab6025830d66/frame.py#L198-L210
taddeus/wspy
frame.py
SocketReader.readn
def readn(self, n): """ Keep receiving data until exactly `n` bytes have been read. """ data = '' while len(data) < n: received = self.sock.recv(n - len(data)) if not len(received): raise socket.error('no data read from socket') data += received return data
python
def readn(self, n): """ Keep receiving data until exactly `n` bytes have been read. """ data = '' while len(data) < n: received = self.sock.recv(n - len(data)) if not len(received): raise socket.error('no data read from socket') data += received return data
[ "def", "readn", "(", "self", ",", "n", ")", ":", "data", "=", "''", "while", "len", "(", "data", ")", "<", "n", ":", "received", "=", "self", ".", "sock", ".", "recv", "(", "n", "-", "len", "(", "data", ")", ")", "if", "not", "len", "(", "r...
Keep receiving data until exactly `n` bytes have been read.
[ "Keep", "receiving", "data", "until", "exactly", "n", "bytes", "have", "been", "read", "." ]
train
https://github.com/taddeus/wspy/blob/13f054a72442bb8dcc37b0ac011cab6025830d66/frame.py#L277-L291
radzak/rtv-downloader
rtv/extractors/wp.py
Wp.quality_comparator
def quality_comparator(video_data): """Custom comparator used to choose the right format based on the resolution.""" def parse_resolution(res: str) -> Tuple[int, ...]: return tuple(map(int, res.split('x'))) raw_resolution = video_data['resolution'] resolution = parse_resolution(raw_resolution) return resolution
python
def quality_comparator(video_data): """Custom comparator used to choose the right format based on the resolution.""" def parse_resolution(res: str) -> Tuple[int, ...]: return tuple(map(int, res.split('x'))) raw_resolution = video_data['resolution'] resolution = parse_resolution(raw_resolution) return resolution
[ "def", "quality_comparator", "(", "video_data", ")", ":", "def", "parse_resolution", "(", "res", ":", "str", ")", "->", "Tuple", "[", "int", ",", "...", "]", ":", "return", "tuple", "(", "map", "(", "int", ",", "res", ".", "split", "(", "'x'", ")", ...
Custom comparator used to choose the right format based on the resolution.
[ "Custom", "comparator", "used", "to", "choose", "the", "right", "format", "based", "on", "the", "resolution", "." ]
train
https://github.com/radzak/rtv-downloader/blob/b9114b7f4c35fabe6ec9ad1764a65858667a866e/rtv/extractors/wp.py#L56-L63
ianclegg/ntlmlib
ntlmlib/context.py
_mic_required
def _mic_required(target_info): """ Checks the MsvAvFlags field of the supplied TargetInfo structure to determine in the MIC flags is set :param target_info: The TargetInfo structure to check :return: a boolean value indicating that the MIC flag is set """ if target_info is not None and target_info[TargetInfo.NTLMSSP_AV_FLAGS] is not None: flags = struct.unpack('<I', target_info[TargetInfo.NTLMSSP_AV_FLAGS][1])[0] return bool(flags & 0x00000002)
python
def _mic_required(target_info): """ Checks the MsvAvFlags field of the supplied TargetInfo structure to determine in the MIC flags is set :param target_info: The TargetInfo structure to check :return: a boolean value indicating that the MIC flag is set """ if target_info is not None and target_info[TargetInfo.NTLMSSP_AV_FLAGS] is not None: flags = struct.unpack('<I', target_info[TargetInfo.NTLMSSP_AV_FLAGS][1])[0] return bool(flags & 0x00000002)
[ "def", "_mic_required", "(", "target_info", ")", ":", "if", "target_info", "is", "not", "None", "and", "target_info", "[", "TargetInfo", ".", "NTLMSSP_AV_FLAGS", "]", "is", "not", "None", ":", "flags", "=", "struct", ".", "unpack", "(", "'<I'", ",", "targe...
Checks the MsvAvFlags field of the supplied TargetInfo structure to determine in the MIC flags is set :param target_info: The TargetInfo structure to check :return: a boolean value indicating that the MIC flag is set
[ "Checks", "the", "MsvAvFlags", "field", "of", "the", "supplied", "TargetInfo", "structure", "to", "determine", "in", "the", "MIC", "flags", "is", "set", ":", "param", "target_info", ":", "The", "TargetInfo", "structure", "to", "check", ":", "return", ":", "a...
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/context.py#L223-L231
ianclegg/ntlmlib
ntlmlib/context.py
NtlmContext.initialize_security_context
def initialize_security_context(self): """ Idiomatic Python implementation of initialize_security_context, implemented as a generator function using yield to both accept incoming and return outgoing authentication tokens :return: The response to be returned to the server """ # Generate the NTLM Negotiate Request negotiate_token = self._negotiate(self.flags) challenge_token = yield negotiate_token # Generate the Authenticate Response authenticate_token = self._challenge_response(negotiate_token, challenge_token) yield authenticate_token
python
def initialize_security_context(self): """ Idiomatic Python implementation of initialize_security_context, implemented as a generator function using yield to both accept incoming and return outgoing authentication tokens :return: The response to be returned to the server """ # Generate the NTLM Negotiate Request negotiate_token = self._negotiate(self.flags) challenge_token = yield negotiate_token # Generate the Authenticate Response authenticate_token = self._challenge_response(negotiate_token, challenge_token) yield authenticate_token
[ "def", "initialize_security_context", "(", "self", ")", ":", "# Generate the NTLM Negotiate Request", "negotiate_token", "=", "self", ".", "_negotiate", "(", "self", ".", "flags", ")", "challenge_token", "=", "yield", "negotiate_token", "# Generate the Authenticate Response...
Idiomatic Python implementation of initialize_security_context, implemented as a generator function using yield to both accept incoming and return outgoing authentication tokens :return: The response to be returned to the server
[ "Idiomatic", "Python", "implementation", "of", "initialize_security_context", "implemented", "as", "a", "generator", "function", "using", "yield", "to", "both", "accept", "incoming", "and", "return", "outgoing", "authentication", "tokens", ":", "return", ":", "The", ...
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/context.py#L84-L96
ianclegg/ntlmlib
ntlmlib/context.py
NtlmContext.wrap_message
def wrap_message(self, message): """ Cryptographically signs and optionally encrypts the supplied message. The message is only encrypted if 'confidentiality' was negotiated, otherwise the message is left untouched. :return: A tuple containing the message signature and the optionally encrypted message """ if not self.is_established: raise Exception("Context has not been established") if self._wrapper is None: raise Exception("Neither sealing or signing have been negotiated") else: return self._wrapper.wrap(message)
python
def wrap_message(self, message): """ Cryptographically signs and optionally encrypts the supplied message. The message is only encrypted if 'confidentiality' was negotiated, otherwise the message is left untouched. :return: A tuple containing the message signature and the optionally encrypted message """ if not self.is_established: raise Exception("Context has not been established") if self._wrapper is None: raise Exception("Neither sealing or signing have been negotiated") else: return self._wrapper.wrap(message)
[ "def", "wrap_message", "(", "self", ",", "message", ")", ":", "if", "not", "self", ".", "is_established", ":", "raise", "Exception", "(", "\"Context has not been established\"", ")", "if", "self", ".", "_wrapper", "is", "None", ":", "raise", "Exception", "(", ...
Cryptographically signs and optionally encrypts the supplied message. The message is only encrypted if 'confidentiality' was negotiated, otherwise the message is left untouched. :return: A tuple containing the message signature and the optionally encrypted message
[ "Cryptographically", "signs", "and", "optionally", "encrypts", "the", "supplied", "message", ".", "The", "message", "is", "only", "encrypted", "if", "confidentiality", "was", "negotiated", "otherwise", "the", "message", "is", "left", "untouched", ".", ":", "return...
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/context.py#L114-L125
ianclegg/ntlmlib
ntlmlib/context.py
NtlmContext.unwrap_message
def unwrap_message(self, message, signature): """ Verifies the supplied signature against the message and decrypts the message if 'confidentiality' was negotiated. A SignatureException is raised if the signature cannot be parsed or the version is unsupported A SequenceException is raised if the sequence number in the signature is incorrect A ChecksumException is raised if the in the signature checksum is invalid :return: The decrypted message """ if not self.is_established: raise Exception("Context has not been established") if self._wrapper is None: raise Exception("Neither sealing or signing have been negotiated") else: return self._wrapper.unwrap(message, signature)
python
def unwrap_message(self, message, signature): """ Verifies the supplied signature against the message and decrypts the message if 'confidentiality' was negotiated. A SignatureException is raised if the signature cannot be parsed or the version is unsupported A SequenceException is raised if the sequence number in the signature is incorrect A ChecksumException is raised if the in the signature checksum is invalid :return: The decrypted message """ if not self.is_established: raise Exception("Context has not been established") if self._wrapper is None: raise Exception("Neither sealing or signing have been negotiated") else: return self._wrapper.unwrap(message, signature)
[ "def", "unwrap_message", "(", "self", ",", "message", ",", "signature", ")", ":", "if", "not", "self", ".", "is_established", ":", "raise", "Exception", "(", "\"Context has not been established\"", ")", "if", "self", ".", "_wrapper", "is", "None", ":", "raise"...
Verifies the supplied signature against the message and decrypts the message if 'confidentiality' was negotiated. A SignatureException is raised if the signature cannot be parsed or the version is unsupported A SequenceException is raised if the sequence number in the signature is incorrect A ChecksumException is raised if the in the signature checksum is invalid :return: The decrypted message
[ "Verifies", "the", "supplied", "signature", "against", "the", "message", "and", "decrypts", "the", "message", "if", "confidentiality", "was", "negotiated", ".", "A", "SignatureException", "is", "raised", "if", "the", "signature", "cannot", "be", "parsed", "or", ...
train
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/context.py#L127-L141
radzak/rtv-downloader
rtv/options.py
parse_options
def parse_options(): """ Parse command line arguments. Returns: options, args """ parser = argparse.ArgumentParser(description='Video downloader by radzak.', prog='RTVdownloader') urls_group = parser.add_mutually_exclusive_group(required=True) urls_group.add_argument('urls', type=str, metavar='URL', default=[], nargs='*', help='urls of sites containing videos you wish to download' ) urls_group.add_argument('-f', type=argparse.FileType('r'), dest='files', metavar='FILE', default=[], nargs='*', help='text file with urls of sites containing videos you ' 'wish to download ' ) urls_group.add_argument('-o', type=str, dest='onetabs', metavar='ONETAB', default=[], nargs='*', help='onetab links containing urls of the videos you wish to download' ) options = DEFAULT_OPTIONS # TODO: add dir option that defaults to the DEFAULT_OPTIONS['dl_path'] args = parser.parse_args() return options, args
python
def parse_options(): """ Parse command line arguments. Returns: options, args """ parser = argparse.ArgumentParser(description='Video downloader by radzak.', prog='RTVdownloader') urls_group = parser.add_mutually_exclusive_group(required=True) urls_group.add_argument('urls', type=str, metavar='URL', default=[], nargs='*', help='urls of sites containing videos you wish to download' ) urls_group.add_argument('-f', type=argparse.FileType('r'), dest='files', metavar='FILE', default=[], nargs='*', help='text file with urls of sites containing videos you ' 'wish to download ' ) urls_group.add_argument('-o', type=str, dest='onetabs', metavar='ONETAB', default=[], nargs='*', help='onetab links containing urls of the videos you wish to download' ) options = DEFAULT_OPTIONS # TODO: add dir option that defaults to the DEFAULT_OPTIONS['dl_path'] args = parser.parse_args() return options, args
[ "def", "parse_options", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Video downloader by radzak.'", ",", "prog", "=", "'RTVdownloader'", ")", "urls_group", "=", "parser", ".", "add_mutually_exclusive_group", "(", "requi...
Parse command line arguments. Returns: options, args
[ "Parse", "command", "line", "arguments", "." ]
train
https://github.com/radzak/rtv-downloader/blob/b9114b7f4c35fabe6ec9ad1764a65858667a866e/rtv/options.py#L24-L67
radzak/rtv-downloader
rtv/extractors/tvpinfo.py
TvpInfo.get_article_url
def get_article_url(self): """ Get the url of the TVP Info article itself, not the url of the preview with the 'Przejdź do artykułu' hyperlink. Returns: (str): Url of the article with the video. """ html = requests.get(self.url).text soup = BeautifulSoup(html, 'lxml') div = soup.find('div', class_='more-back') if div: parsed_uri = urlparse(self.url) domain = '{uri.scheme}://{uri.netloc}'.format(uri=parsed_uri) suffix = div.find('a', href=True)['href'].strip() article_url = domain + suffix return article_url else: return self.url
python
def get_article_url(self): """ Get the url of the TVP Info article itself, not the url of the preview with the 'Przejdź do artykułu' hyperlink. Returns: (str): Url of the article with the video. """ html = requests.get(self.url).text soup = BeautifulSoup(html, 'lxml') div = soup.find('div', class_='more-back') if div: parsed_uri = urlparse(self.url) domain = '{uri.scheme}://{uri.netloc}'.format(uri=parsed_uri) suffix = div.find('a', href=True)['href'].strip() article_url = domain + suffix return article_url else: return self.url
[ "def", "get_article_url", "(", "self", ")", ":", "html", "=", "requests", ".", "get", "(", "self", ".", "url", ")", ".", "text", "soup", "=", "BeautifulSoup", "(", "html", ",", "'lxml'", ")", "div", "=", "soup", ".", "find", "(", "'div'", ",", "cla...
Get the url of the TVP Info article itself, not the url of the preview with the 'Przejdź do artykułu' hyperlink. Returns: (str): Url of the article with the video.
[ "Get", "the", "url", "of", "the", "TVP", "Info", "article", "itself", "not", "the", "url", "of", "the", "preview", "with", "the", "Przejdź", "do", "artykułu", "hyperlink", "." ]
train
https://github.com/radzak/rtv-downloader/blob/b9114b7f4c35fabe6ec9ad1764a65858667a866e/rtv/extractors/tvpinfo.py#L39-L59
airysen/caimcaim
caimcaim/caimcaim.py
CAIMD.fit
def fit(self, X, y): """ Fit CAIM Parameters ---------- X : array-like, pandas dataframe, shape [n_samples, n_feature] Input array can contain missing values y: array-like, pandas dataframe, shape [n_samples] Target variable. Must be categorical. Returns ------- self """ self.split_scheme = dict() if isinstance(X, pd.DataFrame): # self.indx = X.index # self.columns = X.columns if isinstance(self._features, list): self.categorical = [X.columns.get_loc(label) for label in self._features] X = X.values y = y.values if self._features == 'auto': self.categorical = self.check_categorical(X, y) categorical = self.categorical print('Categorical', categorical) min_splits = np.unique(y).shape[0] for j in range(X.shape[1]): if j in categorical: continue xj = X[:, j] xj = xj[np.invert(np.isnan(xj))] new_index = xj.argsort() xj = xj[new_index] yj = y[new_index] allsplits = np.unique(xj)[1:-1].tolist() # potential split points global_caim = -1 mainscheme = [xj[0], xj[-1]] best_caim = 0 k = 1 while (k <= min_splits) or ((global_caim < best_caim) and (allsplits)): split_points = np.random.permutation(allsplits).tolist() best_scheme = None best_point = None best_caim = 0 k = k + 1 while split_points: scheme = mainscheme[:] sp = split_points.pop() scheme.append(sp) scheme.sort() c = self.get_caim(scheme, xj, yj) if c > best_caim: best_caim = c best_scheme = scheme best_point = sp if (k <= min_splits) or (best_caim > global_caim): mainscheme = best_scheme global_caim = best_caim try: allsplits.remove(best_point) except ValueError: raise NotEnoughPoints('The feature #' + str(j) + ' does not have' + ' enough unique values for discretization!' + ' Add it to categorical list!') self.split_scheme[j] = mainscheme print('#', j, ' GLOBAL CAIM ', global_caim) return self
python
def fit(self, X, y): """ Fit CAIM Parameters ---------- X : array-like, pandas dataframe, shape [n_samples, n_feature] Input array can contain missing values y: array-like, pandas dataframe, shape [n_samples] Target variable. Must be categorical. Returns ------- self """ self.split_scheme = dict() if isinstance(X, pd.DataFrame): # self.indx = X.index # self.columns = X.columns if isinstance(self._features, list): self.categorical = [X.columns.get_loc(label) for label in self._features] X = X.values y = y.values if self._features == 'auto': self.categorical = self.check_categorical(X, y) categorical = self.categorical print('Categorical', categorical) min_splits = np.unique(y).shape[0] for j in range(X.shape[1]): if j in categorical: continue xj = X[:, j] xj = xj[np.invert(np.isnan(xj))] new_index = xj.argsort() xj = xj[new_index] yj = y[new_index] allsplits = np.unique(xj)[1:-1].tolist() # potential split points global_caim = -1 mainscheme = [xj[0], xj[-1]] best_caim = 0 k = 1 while (k <= min_splits) or ((global_caim < best_caim) and (allsplits)): split_points = np.random.permutation(allsplits).tolist() best_scheme = None best_point = None best_caim = 0 k = k + 1 while split_points: scheme = mainscheme[:] sp = split_points.pop() scheme.append(sp) scheme.sort() c = self.get_caim(scheme, xj, yj) if c > best_caim: best_caim = c best_scheme = scheme best_point = sp if (k <= min_splits) or (best_caim > global_caim): mainscheme = best_scheme global_caim = best_caim try: allsplits.remove(best_point) except ValueError: raise NotEnoughPoints('The feature #' + str(j) + ' does not have' + ' enough unique values for discretization!' + ' Add it to categorical list!') self.split_scheme[j] = mainscheme print('#', j, ' GLOBAL CAIM ', global_caim) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ")", ":", "self", ".", "split_scheme", "=", "dict", "(", ")", "if", "isinstance", "(", "X", ",", "pd", ".", "DataFrame", ")", ":", "# self.indx = X.index", "# self.columns = X.columns", "if", "isinstance", "...
Fit CAIM Parameters ---------- X : array-like, pandas dataframe, shape [n_samples, n_feature] Input array can contain missing values y: array-like, pandas dataframe, shape [n_samples] Target variable. Must be categorical. Returns ------- self
[ "Fit", "CAIM", "Parameters", "----------", "X", ":", "array", "-", "like", "pandas", "dataframe", "shape", "[", "n_samples", "n_feature", "]", "Input", "array", "can", "contain", "missing", "values", "y", ":", "array", "-", "like", "pandas", "dataframe", "sh...
train
https://github.com/airysen/caimcaim/blob/82e3ce700da8c23ab6199524f646c790cce0b460/caimcaim/caimcaim.py#L63-L133
airysen/caimcaim
caimcaim/caimcaim.py
CAIMD.transform
def transform(self, X): """ Discretize X using a split scheme obtained with CAIM. Parameters ---------- X : array-like or pandas dataframe, shape [n_samples, n_features] Input array can contain missing values Returns ------- X_di : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ if isinstance(X, pd.DataFrame): self.indx = X.index self.columns = X.columns X = X.values X_di = X.copy() categorical = self.categorical scheme = self.split_scheme for j in range(X.shape[1]): if j in categorical: continue sh = scheme[j] sh[-1] = sh[-1] + 1 xj = X[:, j] # xi = xi[np.invert(np.isnan(xi))] for i in range(len(sh) - 1): ind = np.where((xj >= sh[i]) & (xj < sh[i + 1]))[0] X_di[ind, j] = i if hasattr(self, 'indx'): return pd.DataFrame(X_di, index=self.indx, columns=self.columns) return X_di
python
def transform(self, X): """ Discretize X using a split scheme obtained with CAIM. Parameters ---------- X : array-like or pandas dataframe, shape [n_samples, n_features] Input array can contain missing values Returns ------- X_di : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ if isinstance(X, pd.DataFrame): self.indx = X.index self.columns = X.columns X = X.values X_di = X.copy() categorical = self.categorical scheme = self.split_scheme for j in range(X.shape[1]): if j in categorical: continue sh = scheme[j] sh[-1] = sh[-1] + 1 xj = X[:, j] # xi = xi[np.invert(np.isnan(xi))] for i in range(len(sh) - 1): ind = np.where((xj >= sh[i]) & (xj < sh[i + 1]))[0] X_di[ind, j] = i if hasattr(self, 'indx'): return pd.DataFrame(X_di, index=self.indx, columns=self.columns) return X_di
[ "def", "transform", "(", "self", ",", "X", ")", ":", "if", "isinstance", "(", "X", ",", "pd", ".", "DataFrame", ")", ":", "self", ".", "indx", "=", "X", ".", "index", "self", ".", "columns", "=", "X", ".", "columns", "X", "=", "X", ".", "values...
Discretize X using a split scheme obtained with CAIM. Parameters ---------- X : array-like or pandas dataframe, shape [n_samples, n_features] Input array can contain missing values Returns ------- X_di : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input.
[ "Discretize", "X", "using", "a", "split", "scheme", "obtained", "with", "CAIM", ".", "Parameters", "----------", "X", ":", "array", "-", "like", "or", "pandas", "dataframe", "shape", "[", "n_samples", "n_features", "]", "Input", "array", "can", "contain", "m...
train
https://github.com/airysen/caimcaim/blob/82e3ce700da8c23ab6199524f646c790cce0b460/caimcaim/caimcaim.py#L135-L168
mk-fg/feedjack
feedjack/templatetags/html.py
prettyhtml
def prettyhtml(value, autoescape=None): 'Clean (and optionally escape) passed html of unsafe tags and attributes.' value = html_cleaner(value) return escape(value) if autoescape\ and not isinstance(value, SafeData) else mark_safe(value)
python
def prettyhtml(value, autoescape=None): 'Clean (and optionally escape) passed html of unsafe tags and attributes.' value = html_cleaner(value) return escape(value) if autoescape\ and not isinstance(value, SafeData) else mark_safe(value)
[ "def", "prettyhtml", "(", "value", ",", "autoescape", "=", "None", ")", ":", "value", "=", "html_cleaner", "(", "value", ")", "return", "escape", "(", "value", ")", "if", "autoescape", "and", "not", "isinstance", "(", "value", ",", "SafeData", ")", "else...
Clean (and optionally escape) passed html of unsafe tags and attributes.
[ "Clean", "(", "and", "optionally", "escape", ")", "passed", "html", "of", "unsafe", "tags", "and", "attributes", "." ]
train
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/templatetags/html.py#L19-L23
mk-fg/feedjack
feedjack/templatetags/html.py
hash
def hash(value, chars=None): 'Get N chars (default: all) of secure hash hexdigest of value.' value = hash_func(value).hexdigest() if chars: value = value[:chars] return mark_safe(value)
python
def hash(value, chars=None): 'Get N chars (default: all) of secure hash hexdigest of value.' value = hash_func(value).hexdigest() if chars: value = value[:chars] return mark_safe(value)
[ "def", "hash", "(", "value", ",", "chars", "=", "None", ")", ":", "value", "=", "hash_func", "(", "value", ")", ".", "hexdigest", "(", ")", "if", "chars", ":", "value", "=", "value", "[", ":", "chars", "]", "return", "mark_safe", "(", "value", ")" ...
Get N chars (default: all) of secure hash hexdigest of value.
[ "Get", "N", "chars", "(", "default", ":", "all", ")", "of", "secure", "hash", "hexdigest", "of", "value", "." ]
train
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/templatetags/html.py#L27-L31
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/readnoisecurves.py
read_noise_curve
def read_noise_curve(noise_curve, noise_type_in='ASD', noise_type_out='ASD', add_wd_noise=False, wd_noise='HB_wd_noise', wd_noise_type_in='ASD'): """Simple auxillary function that can read noise curves in. This function can read in noise curves from a provided file or those that are preinstalled with this installation. All pre-installed noise curves are in the form of an amplitude spectral density. Information on each one is found in each specific file. These are located in the `noise_curves` folder. Pre-installed really just means in the noise_curves folder. Therefore, curves can be added and called with only a string. Arguments: noise_curve (str): Either a file path to a noise curve or a str represented pre-loaded sensitivity curve. If using pre-loaded curve, choices are LPA (LISA Phase A), PL (Proposed LISA), CL (Classic LISA), CLLF (Classic LISA Low Frequency), PLCS (Proposed LISA Constant Slope), or PLHB (Proposed LISA Higher Break). See the arXiv paper above for the meaning behind each choice and a plot with each curve. noise_type_in/noise_type_out (str, optional): Type of noise input/output. Choices are `ASD`, `PSD`, or `char_strain`. Default for both is `ASD`. add_wd_noise (bool, optional): If True, include wd noise. wd_noise (str, optional): File path to wd background noise or string representing those in the noise curves folder. Default is the Hiscock et al 2000 approximation of the Hils & Bender 1997 white dwarf background (`HB_wd_noise`). wd_noise_type_in (str, optional): Type of wd noise input. The output will be the same as ``noise_type_out``. Choices are `ASD`, `PSD`, or `char_strain`. Default for both is `ASD`. Returns: (tuple of arrays): Frequency and amplitude arrays of type ``noise_type_out``. """ possible_noise_types = ['ASD', 'PSD', 'char_strain'] if noise_type_in not in possible_noise_types: raise ValueError('noise_type_in must be either ASD, PSD, or char_strain.') if noise_type_out not in possible_noise_types: raise ValueError('noise_type_out must be either ASD, PSD, or char_strain.') # find the noise curve file if noise_curve[-4:] == '.txt': noise = ascii.read(noise_curve) else: cfd = os.path.dirname(os.path.abspath(__file__)) noise = ascii.read(cfd + '/noise_curves/' + noise_curve + '.txt') # read it in f_n = np.asarray(noise['f']) amp_n = np.asarray(noise[noise_type_in]) if noise_type_in != noise_type_out: amp_n = globals()[noise_type_in.lower() + '_to_' + noise_type_out.lower()](f_n, amp_n) # add wd_noise if true if add_wd_noise: if wd_noise_type_in not in possible_noise_types: raise ValueError('wd_noise_type_in must be either ASD, PSD, or char_strain.') if wd_noise[-4:] == '.txt': wd_data = ascii.read(wd_noise) else: cfd = os.path.dirname(os.path.abspath(__file__)) wd_data = ascii.read(cfd + '/noise_curves/' + wd_noise + '.txt') f_n_wd = np.asarray(wd_data['f']) amp_n_wd = np.asarray(wd_data[wd_noise_type_in]) if wd_noise_type_in != noise_type_out: amp_n_wd = globals()[noise_type_in.lower() + '_to_' + noise_type_out.lower()](f_n_wd, amp_n_wd) f_n, amp_n = combine_with_wd_noise(f_n, amp_n, f_n_wd, amp_n_wd) return f_n, amp_n
python
def read_noise_curve(noise_curve, noise_type_in='ASD', noise_type_out='ASD', add_wd_noise=False, wd_noise='HB_wd_noise', wd_noise_type_in='ASD'): """Simple auxillary function that can read noise curves in. This function can read in noise curves from a provided file or those that are preinstalled with this installation. All pre-installed noise curves are in the form of an amplitude spectral density. Information on each one is found in each specific file. These are located in the `noise_curves` folder. Pre-installed really just means in the noise_curves folder. Therefore, curves can be added and called with only a string. Arguments: noise_curve (str): Either a file path to a noise curve or a str represented pre-loaded sensitivity curve. If using pre-loaded curve, choices are LPA (LISA Phase A), PL (Proposed LISA), CL (Classic LISA), CLLF (Classic LISA Low Frequency), PLCS (Proposed LISA Constant Slope), or PLHB (Proposed LISA Higher Break). See the arXiv paper above for the meaning behind each choice and a plot with each curve. noise_type_in/noise_type_out (str, optional): Type of noise input/output. Choices are `ASD`, `PSD`, or `char_strain`. Default for both is `ASD`. add_wd_noise (bool, optional): If True, include wd noise. wd_noise (str, optional): File path to wd background noise or string representing those in the noise curves folder. Default is the Hiscock et al 2000 approximation of the Hils & Bender 1997 white dwarf background (`HB_wd_noise`). wd_noise_type_in (str, optional): Type of wd noise input. The output will be the same as ``noise_type_out``. Choices are `ASD`, `PSD`, or `char_strain`. Default for both is `ASD`. Returns: (tuple of arrays): Frequency and amplitude arrays of type ``noise_type_out``. """ possible_noise_types = ['ASD', 'PSD', 'char_strain'] if noise_type_in not in possible_noise_types: raise ValueError('noise_type_in must be either ASD, PSD, or char_strain.') if noise_type_out not in possible_noise_types: raise ValueError('noise_type_out must be either ASD, PSD, or char_strain.') # find the noise curve file if noise_curve[-4:] == '.txt': noise = ascii.read(noise_curve) else: cfd = os.path.dirname(os.path.abspath(__file__)) noise = ascii.read(cfd + '/noise_curves/' + noise_curve + '.txt') # read it in f_n = np.asarray(noise['f']) amp_n = np.asarray(noise[noise_type_in]) if noise_type_in != noise_type_out: amp_n = globals()[noise_type_in.lower() + '_to_' + noise_type_out.lower()](f_n, amp_n) # add wd_noise if true if add_wd_noise: if wd_noise_type_in not in possible_noise_types: raise ValueError('wd_noise_type_in must be either ASD, PSD, or char_strain.') if wd_noise[-4:] == '.txt': wd_data = ascii.read(wd_noise) else: cfd = os.path.dirname(os.path.abspath(__file__)) wd_data = ascii.read(cfd + '/noise_curves/' + wd_noise + '.txt') f_n_wd = np.asarray(wd_data['f']) amp_n_wd = np.asarray(wd_data[wd_noise_type_in]) if wd_noise_type_in != noise_type_out: amp_n_wd = globals()[noise_type_in.lower() + '_to_' + noise_type_out.lower()](f_n_wd, amp_n_wd) f_n, amp_n = combine_with_wd_noise(f_n, amp_n, f_n_wd, amp_n_wd) return f_n, amp_n
[ "def", "read_noise_curve", "(", "noise_curve", ",", "noise_type_in", "=", "'ASD'", ",", "noise_type_out", "=", "'ASD'", ",", "add_wd_noise", "=", "False", ",", "wd_noise", "=", "'HB_wd_noise'", ",", "wd_noise_type_in", "=", "'ASD'", ")", ":", "possible_noise_types...
Simple auxillary function that can read noise curves in. This function can read in noise curves from a provided file or those that are preinstalled with this installation. All pre-installed noise curves are in the form of an amplitude spectral density. Information on each one is found in each specific file. These are located in the `noise_curves` folder. Pre-installed really just means in the noise_curves folder. Therefore, curves can be added and called with only a string. Arguments: noise_curve (str): Either a file path to a noise curve or a str represented pre-loaded sensitivity curve. If using pre-loaded curve, choices are LPA (LISA Phase A), PL (Proposed LISA), CL (Classic LISA), CLLF (Classic LISA Low Frequency), PLCS (Proposed LISA Constant Slope), or PLHB (Proposed LISA Higher Break). See the arXiv paper above for the meaning behind each choice and a plot with each curve. noise_type_in/noise_type_out (str, optional): Type of noise input/output. Choices are `ASD`, `PSD`, or `char_strain`. Default for both is `ASD`. add_wd_noise (bool, optional): If True, include wd noise. wd_noise (str, optional): File path to wd background noise or string representing those in the noise curves folder. Default is the Hiscock et al 2000 approximation of the Hils & Bender 1997 white dwarf background (`HB_wd_noise`). wd_noise_type_in (str, optional): Type of wd noise input. The output will be the same as ``noise_type_out``. Choices are `ASD`, `PSD`, or `char_strain`. Default for both is `ASD`. Returns: (tuple of arrays): Frequency and amplitude arrays of type ``noise_type_out``.
[ "Simple", "auxillary", "function", "that", "can", "read", "noise", "curves", "in", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/readnoisecurves.py#L102-L175
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/readnoisecurves.py
combine_with_wd_noise
def combine_with_wd_noise(f_n, amp_n, f_n_wd, amp_n_wd): """Combine noise with wd noise. Combines noise and white dwarf background noise based on greater amplitude value at each noise curve step. Args: f_n (float array): Frequencies of noise curve. amp_n (float array): Amplitude values of noise curve. f_n_wd (float array): Frequencies of wd noise. amp_n_wd (float array): Amplitude values of wd noise. Returns: (tuple of float arrays): Amplitude values of combined noise curve. """ # interpolate wd noise amp_n_wd_interp = interpolate.interp1d(f_n_wd, amp_n_wd, bounds_error=False, fill_value=1e-30) # find points of wd noise amplitude at noise curve frequencies amp_n_wd = amp_n_wd_interp(f_n) # keep the greater value at each frequency amp_n = amp_n*(amp_n >= amp_n_wd) + amp_n_wd*(amp_n < amp_n_wd) return f_n, amp_n
python
def combine_with_wd_noise(f_n, amp_n, f_n_wd, amp_n_wd): """Combine noise with wd noise. Combines noise and white dwarf background noise based on greater amplitude value at each noise curve step. Args: f_n (float array): Frequencies of noise curve. amp_n (float array): Amplitude values of noise curve. f_n_wd (float array): Frequencies of wd noise. amp_n_wd (float array): Amplitude values of wd noise. Returns: (tuple of float arrays): Amplitude values of combined noise curve. """ # interpolate wd noise amp_n_wd_interp = interpolate.interp1d(f_n_wd, amp_n_wd, bounds_error=False, fill_value=1e-30) # find points of wd noise amplitude at noise curve frequencies amp_n_wd = amp_n_wd_interp(f_n) # keep the greater value at each frequency amp_n = amp_n*(amp_n >= amp_n_wd) + amp_n_wd*(amp_n < amp_n_wd) return f_n, amp_n
[ "def", "combine_with_wd_noise", "(", "f_n", ",", "amp_n", ",", "f_n_wd", ",", "amp_n_wd", ")", ":", "# interpolate wd noise", "amp_n_wd_interp", "=", "interpolate", ".", "interp1d", "(", "f_n_wd", ",", "amp_n_wd", ",", "bounds_error", "=", "False", ",", "fill_va...
Combine noise with wd noise. Combines noise and white dwarf background noise based on greater amplitude value at each noise curve step. Args: f_n (float array): Frequencies of noise curve. amp_n (float array): Amplitude values of noise curve. f_n_wd (float array): Frequencies of wd noise. amp_n_wd (float array): Amplitude values of wd noise. Returns: (tuple of float arrays): Amplitude values of combined noise curve.
[ "Combine", "noise", "with", "wd", "noise", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/readnoisecurves.py#L178-L203
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/readnoisecurves.py
show_available_noise_curves
def show_available_noise_curves(return_curves=True, print_curves=False): """List available sensitivity curves This function lists the available sensitivity curve strings in noise_curves folder. Args: return_curves (bool, optional): If True, return a list of curve options. print_curves (bool, optional): If True, print each curve option. Returns: (optional list of str): List of curve options. Raises: ValueError: Both args are False. """ if return_curves is False and print_curves is False: raise ValueError("Both return curves and print_curves are False." + " You will not see the options") cfd = os.path.dirname(os.path.abspath(__file__)) curves = [curve.split('.')[0] for curve in os.listdir(cfd + '/noise_curves/')] if print_curves: for f in curves: print(f) if return_curves: return curves return
python
def show_available_noise_curves(return_curves=True, print_curves=False): """List available sensitivity curves This function lists the available sensitivity curve strings in noise_curves folder. Args: return_curves (bool, optional): If True, return a list of curve options. print_curves (bool, optional): If True, print each curve option. Returns: (optional list of str): List of curve options. Raises: ValueError: Both args are False. """ if return_curves is False and print_curves is False: raise ValueError("Both return curves and print_curves are False." + " You will not see the options") cfd = os.path.dirname(os.path.abspath(__file__)) curves = [curve.split('.')[0] for curve in os.listdir(cfd + '/noise_curves/')] if print_curves: for f in curves: print(f) if return_curves: return curves return
[ "def", "show_available_noise_curves", "(", "return_curves", "=", "True", ",", "print_curves", "=", "False", ")", ":", "if", "return_curves", "is", "False", "and", "print_curves", "is", "False", ":", "raise", "ValueError", "(", "\"Both return curves and print_curves ar...
List available sensitivity curves This function lists the available sensitivity curve strings in noise_curves folder. Args: return_curves (bool, optional): If True, return a list of curve options. print_curves (bool, optional): If True, print each curve option. Returns: (optional list of str): List of curve options. Raises: ValueError: Both args are False.
[ "List", "available", "sensitivity", "curves" ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/readnoisecurves.py#L206-L232
hayd/ctox
ctox/subst.py
expand_curlys
def expand_curlys(s): """Takes string and returns list of options: Example ------- >>> expand_curlys("py{26, 27}") ["py26", "py27"] """ from functools import reduce curleys = list(re.finditer(r"{[^{}]*}", s)) return reduce(_replace_curly, reversed(curleys), [s])
python
def expand_curlys(s): """Takes string and returns list of options: Example ------- >>> expand_curlys("py{26, 27}") ["py26", "py27"] """ from functools import reduce curleys = list(re.finditer(r"{[^{}]*}", s)) return reduce(_replace_curly, reversed(curleys), [s])
[ "def", "expand_curlys", "(", "s", ")", ":", "from", "functools", "import", "reduce", "curleys", "=", "list", "(", "re", ".", "finditer", "(", "r\"{[^{}]*}\"", ",", "s", ")", ")", "return", "reduce", "(", "_replace_curly", ",", "reversed", "(", "curleys", ...
Takes string and returns list of options: Example ------- >>> expand_curlys("py{26, 27}") ["py26", "py27"]
[ "Takes", "string", "and", "returns", "list", "of", "options", ":" ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L33-L44
hayd/ctox
ctox/subst.py
_split_out_of_braces
def _split_out_of_braces(s): """Generator to split comma seperated string, but not split commas inside curly braces. >>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32")) >>>['py{26, 27}-django{15, 16}, py32'] """ prev = 0 for m in re.finditer(r"{[^{}]*}|\s*,\s*", s): if not m.group().startswith("{"): part = s[prev:m.start()] if part: yield s[prev:m.start()] prev = m.end() part = s[prev:] if part: yield part
python
def _split_out_of_braces(s): """Generator to split comma seperated string, but not split commas inside curly braces. >>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32")) >>>['py{26, 27}-django{15, 16}, py32'] """ prev = 0 for m in re.finditer(r"{[^{}]*}|\s*,\s*", s): if not m.group().startswith("{"): part = s[prev:m.start()] if part: yield s[prev:m.start()] prev = m.end() part = s[prev:] if part: yield part
[ "def", "_split_out_of_braces", "(", "s", ")", ":", "prev", "=", "0", "for", "m", "in", "re", ".", "finditer", "(", "r\"{[^{}]*}|\\s*,\\s*\"", ",", "s", ")", ":", "if", "not", "m", ".", "group", "(", ")", ".", "startswith", "(", "\"{\"", ")", ":", "...
Generator to split comma seperated string, but not split commas inside curly braces. >>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32")) >>>['py{26, 27}-django{15, 16}, py32']
[ "Generator", "to", "split", "comma", "seperated", "string", "but", "not", "split", "commas", "inside", "curly", "braces", "." ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L69-L86
hayd/ctox
ctox/subst.py
expand_factor_conditions
def expand_factor_conditions(s, env): """If env matches the expanded factor then return value else return ''. Example ------- >>> s = 'py{33,34}: docformatter' >>> expand_factor_conditions(s, Env(name="py34", ...)) "docformatter" >>> expand_factor_conditions(s, Env(name="py26", ...)) "" """ try: factor, value = re.split(r'\s*\:\s*', s) except ValueError: return s if matches_factor_conditions(factor, env): return value else: return ''
python
def expand_factor_conditions(s, env): """If env matches the expanded factor then return value else return ''. Example ------- >>> s = 'py{33,34}: docformatter' >>> expand_factor_conditions(s, Env(name="py34", ...)) "docformatter" >>> expand_factor_conditions(s, Env(name="py26", ...)) "" """ try: factor, value = re.split(r'\s*\:\s*', s) except ValueError: return s if matches_factor_conditions(factor, env): return value else: return ''
[ "def", "expand_factor_conditions", "(", "s", ",", "env", ")", ":", "try", ":", "factor", ",", "value", "=", "re", ".", "split", "(", "r'\\s*\\:\\s*'", ",", "s", ")", "except", "ValueError", ":", "return", "s", "if", "matches_factor_conditions", "(", "facto...
If env matches the expanded factor then return value else return ''. Example ------- >>> s = 'py{33,34}: docformatter' >>> expand_factor_conditions(s, Env(name="py34", ...)) "docformatter" >>> expand_factor_conditions(s, Env(name="py26", ...)) ""
[ "If", "env", "matches", "the", "expanded", "factor", "then", "return", "value", "else", "return", "." ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L89-L109
hayd/ctox
ctox/subst.py
matches_factor_conditions
def matches_factor_conditions(s, env): """"Returns True if py{33, 34} expanded is contained in env.name.""" env_labels = set(env.name.split('-')) labels = set(bash_expand(s)) return bool(labels & env_labels)
python
def matches_factor_conditions(s, env): """"Returns True if py{33, 34} expanded is contained in env.name.""" env_labels = set(env.name.split('-')) labels = set(bash_expand(s)) return bool(labels & env_labels)
[ "def", "matches_factor_conditions", "(", "s", ",", "env", ")", ":", "env_labels", "=", "set", "(", "env", ".", "name", ".", "split", "(", "'-'", ")", ")", "labels", "=", "set", "(", "bash_expand", "(", "s", ")", ")", "return", "bool", "(", "labels", ...
Returns True if py{33, 34} expanded is contained in env.name.
[ "Returns", "True", "if", "py", "{", "33", "34", "}", "expanded", "is", "contained", "in", "env", ".", "name", "." ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L112-L116
hayd/ctox
ctox/subst.py
split_on
def split_on(s, sep=" "): """Split s by sep, unless it's inside a quote.""" pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
python
def split_on(s, sep=" "): """Split s by sep, unless it's inside a quote.""" pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
[ "def", "split_on", "(", "s", ",", "sep", "=", "\" \"", ")", ":", "pattern", "=", "'''((?:[^%s\"']|\"[^\"]*\"|'[^']*')+)'''", "%", "sep", "return", "[", "_strip_speechmarks", "(", "t", ")", "for", "t", "in", "re", ".", "split", "(", "pattern", ",", "s", "...
Split s by sep, unless it's inside a quote.
[ "Split", "s", "by", "sep", "unless", "it", "s", "inside", "a", "quote", "." ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L119-L123
hayd/ctox
ctox/subst.py
replace_braces
def replace_braces(s, env): """Makes tox substitutions to s, with respect to environment env. Example ------- >>> replace_braces("echo {posargs:{env:USER:} passed no posargs}") "echo andy passed no posargs" Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""), the "{posargs:andy}" is replaced with "andy" (since no posargs were passed). """ def replace(m): return _replace_match(m, env) for _ in range(DEPTH): s = re.sub(r"{[^{}]*}", replace, s) return s
python
def replace_braces(s, env): """Makes tox substitutions to s, with respect to environment env. Example ------- >>> replace_braces("echo {posargs:{env:USER:} passed no posargs}") "echo andy passed no posargs" Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""), the "{posargs:andy}" is replaced with "andy" (since no posargs were passed). """ def replace(m): return _replace_match(m, env) for _ in range(DEPTH): s = re.sub(r"{[^{}]*}", replace, s) return s
[ "def", "replace_braces", "(", "s", ",", "env", ")", ":", "def", "replace", "(", "m", ")", ":", "return", "_replace_match", "(", "m", ",", "env", ")", "for", "_", "in", "range", "(", "DEPTH", ")", ":", "s", "=", "re", ".", "sub", "(", "r\"{[^{}]*}...
Makes tox substitutions to s, with respect to environment env. Example ------- >>> replace_braces("echo {posargs:{env:USER:} passed no posargs}") "echo andy passed no posargs" Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""), the "{posargs:andy}" is replaced with "andy" (since no posargs were passed).
[ "Makes", "tox", "substitutions", "to", "s", "with", "respect", "to", "environment", "env", "." ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L133-L150
hayd/ctox
ctox/subst.py
_replace_match
def _replace_match(m, env): """Given a match object, having matched something inside curly braces, replace the contents if matches one of the supported tox-substitutions.""" # ditch the curly braces s = m.group()[1:-1].strip() try: # get the env attributes e.g. envpython or toxinidir. # Note: if you ask for a env methodname this will raise # later on... so don't do that. return getattr(env, s) except AttributeError: pass for r in [_replace_envvar, _replace_config, _replace_posargs]: try: return r(s, env) except ValueError: pass raise NotImplementedError("{%s} not understood in tox.ini file." % s)
python
def _replace_match(m, env): """Given a match object, having matched something inside curly braces, replace the contents if matches one of the supported tox-substitutions.""" # ditch the curly braces s = m.group()[1:-1].strip() try: # get the env attributes e.g. envpython or toxinidir. # Note: if you ask for a env methodname this will raise # later on... so don't do that. return getattr(env, s) except AttributeError: pass for r in [_replace_envvar, _replace_config, _replace_posargs]: try: return r(s, env) except ValueError: pass raise NotImplementedError("{%s} not understood in tox.ini file." % s)
[ "def", "_replace_match", "(", "m", ",", "env", ")", ":", "# ditch the curly braces", "s", "=", "m", ".", "group", "(", ")", "[", "1", ":", "-", "1", "]", ".", "strip", "(", ")", "try", ":", "# get the env attributes e.g. envpython or toxinidir.", "# Note: if...
Given a match object, having matched something inside curly braces, replace the contents if matches one of the supported tox-substitutions.
[ "Given", "a", "match", "object", "having", "matched", "something", "inside", "curly", "braces", "replace", "the", "contents", "if", "matches", "one", "of", "the", "supported", "tox", "-", "substitutions", "." ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L153-L173
hayd/ctox
ctox/subst.py
_replace_envvar
def _replace_envvar(s, _): """env:KEY or env:KEY:DEFAULT""" e = s.split(":") if len(e) > 3 or len(e) == 1 or e[0] != "env": raise ValueError() elif len(e) == 2: # Note: this can/should raise a KeyError (according to spec). return os.environ[e[1]] else: # len(e) == 3 return os.environ.get(e[1], e[2])
python
def _replace_envvar(s, _): """env:KEY or env:KEY:DEFAULT""" e = s.split(":") if len(e) > 3 or len(e) == 1 or e[0] != "env": raise ValueError() elif len(e) == 2: # Note: this can/should raise a KeyError (according to spec). return os.environ[e[1]] else: # len(e) == 3 return os.environ.get(e[1], e[2])
[ "def", "_replace_envvar", "(", "s", ",", "_", ")", ":", "e", "=", "s", ".", "split", "(", "\":\"", ")", "if", "len", "(", "e", ")", ">", "3", "or", "len", "(", "e", ")", "==", "1", "or", "e", "[", "0", "]", "!=", "\"env\"", ":", "raise", ...
env:KEY or env:KEY:DEFAULT
[ "env", ":", "KEY", "or", "env", ":", "KEY", ":", "DEFAULT" ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L176-L185
hayd/ctox
ctox/subst.py
_replace_config
def _replace_config(s, env): """[sectionname]optionname""" m = re.match(r"\[(.*?)\](.*)", s) if m: section, option = m.groups() expanded = env.config.get(section, option) return '\n'.join([expand_factor_conditions(e, env) for e in expanded.split("\n")]) else: raise ValueError()
python
def _replace_config(s, env): """[sectionname]optionname""" m = re.match(r"\[(.*?)\](.*)", s) if m: section, option = m.groups() expanded = env.config.get(section, option) return '\n'.join([expand_factor_conditions(e, env) for e in expanded.split("\n")]) else: raise ValueError()
[ "def", "_replace_config", "(", "s", ",", "env", ")", ":", "m", "=", "re", ".", "match", "(", "r\"\\[(.*?)\\](.*)\"", ",", "s", ")", "if", "m", ":", "section", ",", "option", "=", "m", ".", "groups", "(", ")", "expanded", "=", "env", ".", "config", ...
[sectionname]optionname
[ "[", "sectionname", "]", "optionname" ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L188-L197
hayd/ctox
ctox/subst.py
_replace_posargs
def _replace_posargs(s, env): "posargs:DEFAULT" e = re.split(r'\s*\:\s*', s) if e and e[0] == "posargs": from ctox.main import positional_args return (" ".join(positional_args(env.options)) or (e[1] if len(e) > 1 else "")) else: raise ValueError()
python
def _replace_posargs(s, env): "posargs:DEFAULT" e = re.split(r'\s*\:\s*', s) if e and e[0] == "posargs": from ctox.main import positional_args return (" ".join(positional_args(env.options)) or (e[1] if len(e) > 1 else "")) else: raise ValueError()
[ "def", "_replace_posargs", "(", "s", ",", "env", ")", ":", "e", "=", "re", ".", "split", "(", "r'\\s*\\:\\s*'", ",", "s", ")", "if", "e", "and", "e", "[", "0", "]", "==", "\"posargs\"", ":", "from", "ctox", ".", "main", "import", "positional_args", ...
posargs:DEFAULT
[ "posargs", ":", "DEFAULT" ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L200-L208
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/csnr.py
csnr
def csnr(freqs, hc, hn, fmrg, fpeak, prefactor=1.0): """Calculate the SNR of a frequency domain waveform. SNRCalculation is a function that takes waveforms (frequencies and hcs) and a noise curve, and returns SNRs for all binary phases and the whole waveform. Arguments: freqs (1D or 2D array of floats): Frequencies corresponding to the waveforms. Shape is (num binaries, num_points) if 2D. Shape is (num_points,) if 1D for one binary. hc (1D or 2D array of floats): Characteristic strain of the waveforms. Shape is (num binaries, num_points) if 2D. Shape is (num_points,) if 1D for one binary. fmrg: (scalar float or 1D array of floats): Merger frequency of each binary separating inspiral from merger phase. (0.014/M) Shape is (num binaries,) if more than one binary. fpeak: (scalar float or 1D array of floats): Peak frequency of each binary separating merger from ringdown phase. (0.014/M) Shape is (num binaries,) if more than one binary. hn: (1D or 2D array of floats): Characteristic strain of the noise. Shape is (num binaries, num_points) if 2D. Shape is (num_points,) if 1D for one binary. prefactor (float, optional): Factor to multiply snr (not snr^2) integral values by. Default is 1.0. Returns: (dict): Dictionary with SNRs from each phase. """ cfd = os.path.dirname(os.path.abspath(__file__)) if 'phenomd.cpython-35m-darwin.so' in os.listdir(cfd): exec_call = cfd + '/phenomd.cpython-35m-darwin.so' else: exec_call = cfd + '/phenomd/phenomd.so' c_obj = ctypes.CDLL(exec_call) # check dimensionality remove_axis = False try: len(fmrg) except TypeError: remove_axis = True freqs, hc = np.array([freqs]), np.array([hc]) hn, fmrg, fpeak = np.array([hn]), np.array([fmrg]), np.array([fpeak]) # this implimentation in ctypes works with 1D arrays freqs_in = freqs.flatten() hc_in = hc.flatten() hn_in = hn.flatten() num_binaries, length_of_signal = hc.shape # prepare outout arrays snr_cast = ctypes.c_double*num_binaries snr_all = snr_cast() snr_ins = snr_cast() snr_mrg = snr_cast() snr_rd = snr_cast() # find SNR values c_obj.SNR_function(ctypes.byref(snr_all), ctypes.byref(snr_ins), ctypes.byref(snr_mrg), ctypes.byref(snr_rd), freqs_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), hc_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), hn_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), fmrg.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), fpeak.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), ctypes.c_int(length_of_signal), ctypes.c_int(num_binaries)) # make into numpy arrays snr_all, snr_ins, = np.ctypeslib.as_array(snr_all), np.ctypeslib.as_array(snr_ins) snr_mrg, snr_rd = np.ctypeslib.as_array(snr_mrg), np.ctypeslib.as_array(snr_rd) # remove axis if one binary if remove_axis: snr_all, snr_ins, snr_mrg, snr_rd = snr_all[0], snr_ins[0], snr_mrg[0], snr_rd[0] # prepare output by multiplying by prefactor return ({'all': snr_all*prefactor, 'ins': snr_ins*prefactor, 'mrg': snr_mrg*prefactor, 'rd': snr_rd*prefactor})
python
def csnr(freqs, hc, hn, fmrg, fpeak, prefactor=1.0): """Calculate the SNR of a frequency domain waveform. SNRCalculation is a function that takes waveforms (frequencies and hcs) and a noise curve, and returns SNRs for all binary phases and the whole waveform. Arguments: freqs (1D or 2D array of floats): Frequencies corresponding to the waveforms. Shape is (num binaries, num_points) if 2D. Shape is (num_points,) if 1D for one binary. hc (1D or 2D array of floats): Characteristic strain of the waveforms. Shape is (num binaries, num_points) if 2D. Shape is (num_points,) if 1D for one binary. fmrg: (scalar float or 1D array of floats): Merger frequency of each binary separating inspiral from merger phase. (0.014/M) Shape is (num binaries,) if more than one binary. fpeak: (scalar float or 1D array of floats): Peak frequency of each binary separating merger from ringdown phase. (0.014/M) Shape is (num binaries,) if more than one binary. hn: (1D or 2D array of floats): Characteristic strain of the noise. Shape is (num binaries, num_points) if 2D. Shape is (num_points,) if 1D for one binary. prefactor (float, optional): Factor to multiply snr (not snr^2) integral values by. Default is 1.0. Returns: (dict): Dictionary with SNRs from each phase. """ cfd = os.path.dirname(os.path.abspath(__file__)) if 'phenomd.cpython-35m-darwin.so' in os.listdir(cfd): exec_call = cfd + '/phenomd.cpython-35m-darwin.so' else: exec_call = cfd + '/phenomd/phenomd.so' c_obj = ctypes.CDLL(exec_call) # check dimensionality remove_axis = False try: len(fmrg) except TypeError: remove_axis = True freqs, hc = np.array([freqs]), np.array([hc]) hn, fmrg, fpeak = np.array([hn]), np.array([fmrg]), np.array([fpeak]) # this implimentation in ctypes works with 1D arrays freqs_in = freqs.flatten() hc_in = hc.flatten() hn_in = hn.flatten() num_binaries, length_of_signal = hc.shape # prepare outout arrays snr_cast = ctypes.c_double*num_binaries snr_all = snr_cast() snr_ins = snr_cast() snr_mrg = snr_cast() snr_rd = snr_cast() # find SNR values c_obj.SNR_function(ctypes.byref(snr_all), ctypes.byref(snr_ins), ctypes.byref(snr_mrg), ctypes.byref(snr_rd), freqs_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), hc_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), hn_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), fmrg.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), fpeak.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), ctypes.c_int(length_of_signal), ctypes.c_int(num_binaries)) # make into numpy arrays snr_all, snr_ins, = np.ctypeslib.as_array(snr_all), np.ctypeslib.as_array(snr_ins) snr_mrg, snr_rd = np.ctypeslib.as_array(snr_mrg), np.ctypeslib.as_array(snr_rd) # remove axis if one binary if remove_axis: snr_all, snr_ins, snr_mrg, snr_rd = snr_all[0], snr_ins[0], snr_mrg[0], snr_rd[0] # prepare output by multiplying by prefactor return ({'all': snr_all*prefactor, 'ins': snr_ins*prefactor, 'mrg': snr_mrg*prefactor, 'rd': snr_rd*prefactor})
[ "def", "csnr", "(", "freqs", ",", "hc", ",", "hn", ",", "fmrg", ",", "fpeak", ",", "prefactor", "=", "1.0", ")", ":", "cfd", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "if", "'phen...
Calculate the SNR of a frequency domain waveform. SNRCalculation is a function that takes waveforms (frequencies and hcs) and a noise curve, and returns SNRs for all binary phases and the whole waveform. Arguments: freqs (1D or 2D array of floats): Frequencies corresponding to the waveforms. Shape is (num binaries, num_points) if 2D. Shape is (num_points,) if 1D for one binary. hc (1D or 2D array of floats): Characteristic strain of the waveforms. Shape is (num binaries, num_points) if 2D. Shape is (num_points,) if 1D for one binary. fmrg: (scalar float or 1D array of floats): Merger frequency of each binary separating inspiral from merger phase. (0.014/M) Shape is (num binaries,) if more than one binary. fpeak: (scalar float or 1D array of floats): Peak frequency of each binary separating merger from ringdown phase. (0.014/M) Shape is (num binaries,) if more than one binary. hn: (1D or 2D array of floats): Characteristic strain of the noise. Shape is (num binaries, num_points) if 2D. Shape is (num_points,) if 1D for one binary. prefactor (float, optional): Factor to multiply snr (not snr^2) integral values by. Default is 1.0. Returns: (dict): Dictionary with SNRs from each phase.
[ "Calculate", "the", "SNR", "of", "a", "frequency", "domain", "waveform", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/csnr.py#L21-L102
limix/limix-core
limix_core/covar/cov3kronSumLR.py
Cov3KronSumLR.solve_t
def solve_t(self, Mt): """ Mt is dim_r x dim_c x d tensor """ if len(Mt.shape)==2: _Mt = Mt[:, :, sp.newaxis] else: _Mt = Mt LMt = vei_CoR_veX(_Mt, R=self.Lr(), C=self.Lc()) DMt = self.D()[:, :, sp.newaxis] * LMt WrDMtWc = vei_CoR_veX(DMt, R=self.Wr().T, C=self.Wc().T) ve_WrDMtWc = sp.reshape(WrDMtWc, (WrDMtWc.shape[0] * WrDMtWc.shape[1], _Mt.shape[2]), order='F') Hi_ve_WrDMtWc = la.cho_solve((self.H_chol(), True), ve_WrDMtWc) vei_HiveWrDMtWc = Hi_ve_WrDMtWc.reshape(WrDMtWc.shape, order = 'F') Wr_HiveWrDMtWc_Wc = vei_CoR_veX(vei_HiveWrDMtWc, R=self.Wr(), C=self.Wc()) DWrHiveWrDMtWcWc = self.D()[:,:,sp.newaxis] * Wr_HiveWrDMtWc_Wc RV = DMt - DWrHiveWrDMtWcWc RV = vei_CoR_veX(RV, R=self.Lr().T, C=self.Lc().T) if len(Mt.shape)==2: RV = RV[:, :, 0] return RV
python
def solve_t(self, Mt): """ Mt is dim_r x dim_c x d tensor """ if len(Mt.shape)==2: _Mt = Mt[:, :, sp.newaxis] else: _Mt = Mt LMt = vei_CoR_veX(_Mt, R=self.Lr(), C=self.Lc()) DMt = self.D()[:, :, sp.newaxis] * LMt WrDMtWc = vei_CoR_veX(DMt, R=self.Wr().T, C=self.Wc().T) ve_WrDMtWc = sp.reshape(WrDMtWc, (WrDMtWc.shape[0] * WrDMtWc.shape[1], _Mt.shape[2]), order='F') Hi_ve_WrDMtWc = la.cho_solve((self.H_chol(), True), ve_WrDMtWc) vei_HiveWrDMtWc = Hi_ve_WrDMtWc.reshape(WrDMtWc.shape, order = 'F') Wr_HiveWrDMtWc_Wc = vei_CoR_veX(vei_HiveWrDMtWc, R=self.Wr(), C=self.Wc()) DWrHiveWrDMtWcWc = self.D()[:,:,sp.newaxis] * Wr_HiveWrDMtWc_Wc RV = DMt - DWrHiveWrDMtWcWc RV = vei_CoR_veX(RV, R=self.Lr().T, C=self.Lc().T) if len(Mt.shape)==2: RV = RV[:, :, 0] return RV
[ "def", "solve_t", "(", "self", ",", "Mt", ")", ":", "if", "len", "(", "Mt", ".", "shape", ")", "==", "2", ":", "_Mt", "=", "Mt", "[", ":", ",", ":", ",", "sp", ".", "newaxis", "]", "else", ":", "_Mt", "=", "Mt", "LMt", "=", "vei_CoR_veX", "...
Mt is dim_r x dim_c x d tensor
[ "Mt", "is", "dim_r", "x", "dim_c", "x", "d", "tensor" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/covar/cov3kronSumLR.py#L307-L324
limix/limix-core
limix_core/covar/cov3kronSumLR.py
Cov3KronSumLR._O_dot
def _O_dot(self, Mt): """ Mt is dim_r x dim_c x d tensor """ DMt = self.D()[:, :, sp.newaxis] * Mt WrDMtWc = vei_CoR_veX(DMt, R=self.Wr().T, C=self.Wc().T) ve_WrDMtWc = sp.reshape(WrDMtWc, (WrDMtWc.shape[0] * WrDMtWc.shape[1], Mt.shape[2]), order='F') Hi_ve_WrDMtWc = la.cho_solve((self.H_chol(), True), ve_WrDMtWc) vei_HiveWrDMtWc = Hi_ve_WrDMtWc.reshape(WrDMtWc.shape, order = 'F') Wr_HiveWrDMtWc_Wc = vei_CoR_veX(vei_HiveWrDMtWc, R=self.Wr(), C=self.Wc()) DWrHiveWrDMtWcWc = self.D()[:,:,sp.newaxis] * Wr_HiveWrDMtWc_Wc RV = DMt - DWrHiveWrDMtWcWc return RV
python
def _O_dot(self, Mt): """ Mt is dim_r x dim_c x d tensor """ DMt = self.D()[:, :, sp.newaxis] * Mt WrDMtWc = vei_CoR_veX(DMt, R=self.Wr().T, C=self.Wc().T) ve_WrDMtWc = sp.reshape(WrDMtWc, (WrDMtWc.shape[0] * WrDMtWc.shape[1], Mt.shape[2]), order='F') Hi_ve_WrDMtWc = la.cho_solve((self.H_chol(), True), ve_WrDMtWc) vei_HiveWrDMtWc = Hi_ve_WrDMtWc.reshape(WrDMtWc.shape, order = 'F') Wr_HiveWrDMtWc_Wc = vei_CoR_veX(vei_HiveWrDMtWc, R=self.Wr(), C=self.Wc()) DWrHiveWrDMtWcWc = self.D()[:,:,sp.newaxis] * Wr_HiveWrDMtWc_Wc RV = DMt - DWrHiveWrDMtWcWc return RV
[ "def", "_O_dot", "(", "self", ",", "Mt", ")", ":", "DMt", "=", "self", ".", "D", "(", ")", "[", ":", ",", ":", ",", "sp", ".", "newaxis", "]", "*", "Mt", "WrDMtWc", "=", "vei_CoR_veX", "(", "DMt", ",", "R", "=", "self", ".", "Wr", "(", ")",...
Mt is dim_r x dim_c x d tensor
[ "Mt", "is", "dim_r", "x", "dim_c", "x", "d", "tensor" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/covar/cov3kronSumLR.py#L429-L441
limix/limix-core
limix_core/mean/linear.py
Linear.clearFixedEffect
def clearFixedEffect(self): """ erase all fixed effects """ self._A = [] self._F = [] self._B = [] self._A_identity = [] self._REML_term = [] self._n_terms = 0 self._n_fixed_effs = 0 self._n_fixed_effs_REML = 0 self.indicator = {'term':np.array([]), 'row':np.array([]), 'col':np.array([])} self.clear_cache('Fstar','Astar','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
python
def clearFixedEffect(self): """ erase all fixed effects """ self._A = [] self._F = [] self._B = [] self._A_identity = [] self._REML_term = [] self._n_terms = 0 self._n_fixed_effs = 0 self._n_fixed_effs_REML = 0 self.indicator = {'term':np.array([]), 'row':np.array([]), 'col':np.array([])} self.clear_cache('Fstar','Astar','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
[ "def", "clearFixedEffect", "(", "self", ")", ":", "self", ".", "_A", "=", "[", "]", "self", ".", "_F", "=", "[", "]", "self", ".", "_B", "=", "[", "]", "self", ".", "_A_identity", "=", "[", "]", "self", ".", "_REML_term", "=", "[", "]", "self",...
erase all fixed effects
[ "erase", "all", "fixed", "effects" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L152-L168
limix/limix-core
limix_core/mean/linear.py
Linear.addFixedEffect
def addFixedEffect(self,F=None,A=None, REML=True, index=None): """ set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, just append. """ if F is None: F = np.ones((self.N,1)) if A is None: A = np.eye(self.P) A_identity = True elif (A.shape == (self.P,self.P)) & (A==np.eye(self.P)).all(): A_identity = True else: A_identity = False assert F.shape[0]==self.N, "F dimension mismatch" assert A.shape[1]==self.P, "A dimension mismatch" if index is None or index==self.n_terms: self.F.append(F) self.A.append(A) self.A_identity.append(A_identity) self.REML_term.append(REML) # build B matrix and indicator self.B.append(np.zeros((F.shape[1],A.shape[0]))) self._n_terms+=1 self._update_indicator(F.shape[1],A.shape[0]) elif index >self.n_terms: raise Exception("index exceeds max index of terms") else: self._n_fixed_effs-=self.F[index].shape[1]*self.A[index].shape[0] if self.REML_term[index]: self._n_fixed_effs_REML-=self.F[index].shape[1]*self.A[index].shape[0] self.F[index] = F self.A[index] = A self.A_identity[index] = A_identity self.REML_term[index]=REML self.B[index] = np.zeros((F.shape[1],A.shape[0])) self._rebuild_indicator() self._n_fixed_effs+=F.shape[1]*A.shape[0] if REML: self._n_fixed_effs_REML+=F.shape[1]*A.shape[0] self.clear_cache('Fstar','Astar','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
python
def addFixedEffect(self,F=None,A=None, REML=True, index=None): """ set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, just append. """ if F is None: F = np.ones((self.N,1)) if A is None: A = np.eye(self.P) A_identity = True elif (A.shape == (self.P,self.P)) & (A==np.eye(self.P)).all(): A_identity = True else: A_identity = False assert F.shape[0]==self.N, "F dimension mismatch" assert A.shape[1]==self.P, "A dimension mismatch" if index is None or index==self.n_terms: self.F.append(F) self.A.append(A) self.A_identity.append(A_identity) self.REML_term.append(REML) # build B matrix and indicator self.B.append(np.zeros((F.shape[1],A.shape[0]))) self._n_terms+=1 self._update_indicator(F.shape[1],A.shape[0]) elif index >self.n_terms: raise Exception("index exceeds max index of terms") else: self._n_fixed_effs-=self.F[index].shape[1]*self.A[index].shape[0] if self.REML_term[index]: self._n_fixed_effs_REML-=self.F[index].shape[1]*self.A[index].shape[0] self.F[index] = F self.A[index] = A self.A_identity[index] = A_identity self.REML_term[index]=REML self.B[index] = np.zeros((F.shape[1],A.shape[0])) self._rebuild_indicator() self._n_fixed_effs+=F.shape[1]*A.shape[0] if REML: self._n_fixed_effs_REML+=F.shape[1]*A.shape[0] self.clear_cache('Fstar','Astar','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
[ "def", "addFixedEffect", "(", "self", ",", "F", "=", "None", ",", "A", "=", "None", ",", "REML", "=", "True", ",", "index", "=", "None", ")", ":", "if", "F", "is", "None", ":", "F", "=", "np", ".", "ones", "(", "(", "self", ".", "N", ",", "...
set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, just append.
[ "set", "sample", "and", "trait", "designs", "F", ":", "NxK", "sample", "design", "A", ":", "LxP", "sample", "design", "REML", ":", "REML", "for", "this", "term?", "index", ":", "index", "of", "which", "fixed", "effect", "to", "replace", ".", "If", "Non...
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L170-L217
limix/limix-core
limix_core/mean/linear.py
Linear.removeFixedEffect
def removeFixedEffect(self, index=None): """ set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, remove last term. """ if self._n_terms==0: pass if index is None or index==(self._n_terms-1): self._n_terms-=1 F = self._F.pop() #= self.F[:-1] A = self._A.pop() #= self.A[:-1] self._A_identity.pop() #= self.A_identity[:-1] REML_term = self._REML_term.pop()# = self.REML_term[:-1] self._B.pop()# = self.B[:-1] self._n_fixed_effs-=F.shape[1]*A.shape[0] if REML_term: self._n_fixed_effs_REML-=F.shape[1]*A.shape[0] pass elif index >= self.n_terms: raise Exception("index exceeds max index of terms") else: raise NotImplementedError("currently only last term can be removed") pass self._rebuild_indicator() self.clear_cache('Fstar','Astar','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
python
def removeFixedEffect(self, index=None): """ set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, remove last term. """ if self._n_terms==0: pass if index is None or index==(self._n_terms-1): self._n_terms-=1 F = self._F.pop() #= self.F[:-1] A = self._A.pop() #= self.A[:-1] self._A_identity.pop() #= self.A_identity[:-1] REML_term = self._REML_term.pop()# = self.REML_term[:-1] self._B.pop()# = self.B[:-1] self._n_fixed_effs-=F.shape[1]*A.shape[0] if REML_term: self._n_fixed_effs_REML-=F.shape[1]*A.shape[0] pass elif index >= self.n_terms: raise Exception("index exceeds max index of terms") else: raise NotImplementedError("currently only last term can be removed") pass self._rebuild_indicator() self.clear_cache('Fstar','Astar','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
[ "def", "removeFixedEffect", "(", "self", ",", "index", "=", "None", ")", ":", "if", "self", ".", "_n_terms", "==", "0", ":", "pass", "if", "index", "is", "None", "or", "index", "==", "(", "self", ".", "_n_terms", "-", "1", ")", ":", "self", ".", ...
set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, remove last term.
[ "set", "sample", "and", "trait", "designs", "F", ":", "NxK", "sample", "design", "A", ":", "LxP", "sample", "design", "REML", ":", "REML", "for", "this", "term?", "index", ":", "index", "of", "which", "fixed", "effect", "to", "replace", ".", "If", "Non...
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L219-L251
limix/limix-core
limix_core/mean/linear.py
Linear.Y
def Y(self,value): """ set phenotype """ self._N,self._P = value.shape self._Y = value self.clear_cache('Ystar1','Ystar','Yhat','LRLdiag_Yhat', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
python
def Y(self,value): """ set phenotype """ self._N,self._P = value.shape self._Y = value self.clear_cache('Ystar1','Ystar','Yhat','LRLdiag_Yhat', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
[ "def", "Y", "(", "self", ",", "value", ")", ":", "self", ".", "_N", ",", "self", ".", "_P", "=", "value", ".", "shape", "self", ".", "_Y", "=", "value", "self", ".", "clear_cache", "(", "'Ystar1'", ",", "'Ystar'", ",", "'Yhat'", ",", "'LRLdiag_Yhat...
set phenotype
[ "set", "phenotype" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L254-L259
limix/limix-core
limix_core/mean/linear.py
Linear.Lr
def Lr(self,value): """ set row rotation """ assert value.shape[0]==self._N, 'dimension mismatch' assert value.shape[1]==self._N, 'dimension mismatch' self._Lr = value self.clear_cache('Fstar','Ystar1','Ystar','Yhat','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad', 'beta_grad','Xstar_beta_grad', 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
python
def Lr(self,value): """ set row rotation """ assert value.shape[0]==self._N, 'dimension mismatch' assert value.shape[1]==self._N, 'dimension mismatch' self._Lr = value self.clear_cache('Fstar','Ystar1','Ystar','Yhat','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad', 'beta_grad','Xstar_beta_grad', 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
[ "def", "Lr", "(", "self", ",", "value", ")", ":", "assert", "value", ".", "shape", "[", "0", "]", "==", "self", ".", "_N", ",", "'dimension mismatch'", "assert", "value", ".", "shape", "[", "1", "]", "==", "self", ".", "_N", ",", "'dimension mismatch...
set row rotation
[ "set", "row", "rotation" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L262-L272
limix/limix-core
limix_core/mean/linear.py
Linear.Lc
def Lc(self,value): """ set col rotation """ assert value.shape[0]==self._P, 'Lc dimension mismatch' assert value.shape[1]==self._P, 'Lc dimension mismatch' self._Lc = value self.clear_cache('Astar','Ystar','Yhat','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
python
def Lc(self,value): """ set col rotation """ assert value.shape[0]==self._P, 'Lc dimension mismatch' assert value.shape[1]==self._P, 'Lc dimension mismatch' self._Lc = value self.clear_cache('Astar','Ystar','Yhat','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
[ "def", "Lc", "(", "self", ",", "value", ")", ":", "assert", "value", ".", "shape", "[", "0", "]", "==", "self", ".", "_P", ",", "'Lc dimension mismatch'", "assert", "value", ".", "shape", "[", "1", "]", "==", "self", ".", "_P", ",", "'Lc dimension mi...
set col rotation
[ "set", "col", "rotation" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L275-L283
limix/limix-core
limix_core/mean/linear.py
Linear.d
def d(self,value): """ set anisotropic scaling """ assert value.shape[0]==self._P*self._N, 'd dimension mismatch' self._d = value self.clear_cache('Yhat','Xhat','Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
python
def d(self,value): """ set anisotropic scaling """ assert value.shape[0]==self._P*self._N, 'd dimension mismatch' self._d = value self.clear_cache('Yhat','Xhat','Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
[ "def", "d", "(", "self", ",", "value", ")", ":", "assert", "value", ".", "shape", "[", "0", "]", "==", "self", ".", "_P", "*", "self", ".", "_N", ",", "'d dimension mismatch'", "self", ".", "_d", "=", "value", "self", ".", "clear_cache", "(", "'Yha...
set anisotropic scaling
[ "set", "anisotropic", "scaling" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L286-L292
limix/limix-core
limix_core/mean/linear.py
Linear.Zstar
def Zstar(self): """ predict the value of the fixed effect """ RV = self.Ystar().copy() for term_i in range(self.n_terms): if self.identity_trick and self.A_identity[term_i]: RV-=np.dot(self.Fstar()[term_i],self.B_hat()[term_i]) else: RV-=np.dot(self.Fstar()[term_i],np.dot(self.B_hat()[term_i],self.Astar()[term_i])) self.clear_cache('DLZ') return RV
python
def Zstar(self): """ predict the value of the fixed effect """ RV = self.Ystar().copy() for term_i in range(self.n_terms): if self.identity_trick and self.A_identity[term_i]: RV-=np.dot(self.Fstar()[term_i],self.B_hat()[term_i]) else: RV-=np.dot(self.Fstar()[term_i],np.dot(self.B_hat()[term_i],self.Astar()[term_i])) self.clear_cache('DLZ') return RV
[ "def", "Zstar", "(", "self", ")", ":", "RV", "=", "self", ".", "Ystar", "(", ")", ".", "copy", "(", ")", "for", "term_i", "in", "range", "(", "self", ".", "n_terms", ")", ":", "if", "self", ".", "identity_trick", "and", "self", ".", "A_identity", ...
predict the value of the fixed effect
[ "predict", "the", "value", "of", "the", "fixed", "effect" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L435-L444
limix/limix-core
limix_core/mean/linear.py
Linear.Areml_eigh
def Areml_eigh(self): """compute the eigenvalue decomposition of Astar""" s,U = LA.eigh(self.Areml(),lower=True) i_pos = (s>1e-10) s = s[i_pos] U = U[:,i_pos] return s,U
python
def Areml_eigh(self): """compute the eigenvalue decomposition of Astar""" s,U = LA.eigh(self.Areml(),lower=True) i_pos = (s>1e-10) s = s[i_pos] U = U[:,i_pos] return s,U
[ "def", "Areml_eigh", "(", "self", ")", ":", "s", ",", "U", "=", "LA", ".", "eigh", "(", "self", ".", "Areml", "(", ")", ",", "lower", "=", "True", ")", "i_pos", "=", "(", "s", ">", "1e-10", ")", "s", "=", "s", "[", "i_pos", "]", "U", "=", ...
compute the eigenvalue decomposition of Astar
[ "compute", "the", "eigenvalue", "decomposition", "of", "Astar" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L447-L453
limix/limix-core
limix_core/mean/linear.py
Linear.predict
def predict(self): """ predict the value of the fixed effect """ RV = np.zeros((self.N,self.P)) for term_i in range(self.n_terms): RV+=np.dot(self.Fstar()[term_i],np.dot(self.B()[term_i],self.Astar()[term_i])) return RV
python
def predict(self): """ predict the value of the fixed effect """ RV = np.zeros((self.N,self.P)) for term_i in range(self.n_terms): RV+=np.dot(self.Fstar()[term_i],np.dot(self.B()[term_i],self.Astar()[term_i])) return RV
[ "def", "predict", "(", "self", ")", ":", "RV", "=", "np", ".", "zeros", "(", "(", "self", ".", "N", ",", "self", ".", "P", ")", ")", "for", "term_i", "in", "range", "(", "self", ".", "n_terms", ")", ":", "RV", "+=", "np", ".", "dot", "(", "...
predict the value of the fixed effect
[ "predict", "the", "value", "of", "the", "fixed", "effect" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L520-L525
limix/limix-core
limix_core/mean/linear.py
Linear.getGradient
def getGradient(self,j): """ get rotated gradient for fixed effect i """ i = int(self.indicator['term'][j]) r = int(self.indicator['row'][j]) c = int(self.indicator['col'][j]) rv = -np.kron(self.Fstar()[i][:,[r]],self.Astar()[i][[c],:]) return rv
python
def getGradient(self,j): """ get rotated gradient for fixed effect i """ i = int(self.indicator['term'][j]) r = int(self.indicator['row'][j]) c = int(self.indicator['col'][j]) rv = -np.kron(self.Fstar()[i][:,[r]],self.Astar()[i][[c],:]) return rv
[ "def", "getGradient", "(", "self", ",", "j", ")", ":", "i", "=", "int", "(", "self", ".", "indicator", "[", "'term'", "]", "[", "j", "]", ")", "r", "=", "int", "(", "self", ".", "indicator", "[", "'row'", "]", "[", "j", "]", ")", "c", "=", ...
get rotated gradient for fixed effect i
[ "get", "rotated", "gradient", "for", "fixed", "effect", "i" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L533-L539
limix/limix-core
limix_core/mean/linear.py
Linear.XstarT_dot
def XstarT_dot(self,M): """ get dot product of Xhat and M """ if 0: #TODO: implement this properly pass else: RV = np.dot(self.Xstar().T,M) return RV
python
def XstarT_dot(self,M): """ get dot product of Xhat and M """ if 0: #TODO: implement this properly pass else: RV = np.dot(self.Xstar().T,M) return RV
[ "def", "XstarT_dot", "(", "self", ",", "M", ")", ":", "if", "0", ":", "#TODO: implement this properly", "pass", "else", ":", "RV", "=", "np", ".", "dot", "(", "self", ".", "Xstar", "(", ")", ".", "T", ",", "M", ")", "return", "RV" ]
get dot product of Xhat and M
[ "get", "dot", "product", "of", "Xhat", "and", "M" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L541-L548
limix/limix-core
limix_core/mean/linear.py
Linear.getResiduals
def getResiduals(self): """ regress out fixed effects and results residuals """ X = np.zeros((self.N*self.P,self.n_fixed_effs)) ip = 0 for i in range(self.n_terms): Ki = self.A[i].shape[0]*self.F[i].shape[1] X[:,ip:ip+Ki] = np.kron(self.A[i].T,self.F[i]) ip += Ki y = np.reshape(self.Y,(self.Y.size,1),order='F') RV = regressOut(y,X) RV = np.reshape(RV,self.Y.shape,order='F') return RV
python
def getResiduals(self): """ regress out fixed effects and results residuals """ X = np.zeros((self.N*self.P,self.n_fixed_effs)) ip = 0 for i in range(self.n_terms): Ki = self.A[i].shape[0]*self.F[i].shape[1] X[:,ip:ip+Ki] = np.kron(self.A[i].T,self.F[i]) ip += Ki y = np.reshape(self.Y,(self.Y.size,1),order='F') RV = regressOut(y,X) RV = np.reshape(RV,self.Y.shape,order='F') return RV
[ "def", "getResiduals", "(", "self", ")", ":", "X", "=", "np", ".", "zeros", "(", "(", "self", ".", "N", "*", "self", ".", "P", ",", "self", ".", "n_fixed_effs", ")", ")", "ip", "=", "0", "for", "i", "in", "range", "(", "self", ".", "n_terms", ...
regress out fixed effects and results residuals
[ "regress", "out", "fixed", "effects", "and", "results", "residuals" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L550-L561
limix/limix-core
limix_core/mean/linear.py
Linear.getParams
def getParams(self): """ get params """ rv = np.array([]) if self.n_terms>0: rv = np.concatenate([np.reshape(self.B[term_i],self.B[term_i].size, order='F') for term_i in range(self.n_terms)]) return rv
python
def getParams(self): """ get params """ rv = np.array([]) if self.n_terms>0: rv = np.concatenate([np.reshape(self.B[term_i],self.B[term_i].size, order='F') for term_i in range(self.n_terms)]) return rv
[ "def", "getParams", "(", "self", ")", ":", "rv", "=", "np", ".", "array", "(", "[", "]", ")", "if", "self", ".", "n_terms", ">", "0", ":", "rv", "=", "np", ".", "concatenate", "(", "[", "np", ".", "reshape", "(", "self", ".", "B", "[", "term_...
get params
[ "get", "params" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L567-L572
limix/limix-core
limix_core/mean/linear.py
Linear.setParams
def setParams(self,params): """ set params """ start = 0 for i in range(self.n_terms): n_effects = self.B[i].size self.B[i] = np.reshape(params[start:start+n_effects],self.B[i].shape, order='F') start += n_effects
python
def setParams(self,params): """ set params """ start = 0 for i in range(self.n_terms): n_effects = self.B[i].size self.B[i] = np.reshape(params[start:start+n_effects],self.B[i].shape, order='F') start += n_effects
[ "def", "setParams", "(", "self", ",", "params", ")", ":", "start", "=", "0", "for", "i", "in", "range", "(", "self", ".", "n_terms", ")", ":", "n_effects", "=", "self", ".", "B", "[", "i", "]", ".", "size", "self", ".", "B", "[", "i", "]", "=...
set params
[ "set", "params" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L574-L580
limix/limix-core
limix_core/mean/linear.py
Linear._set_toChange
def _set_toChange(x): """ set variables in list x toChange """ for key in list(x.keys()): self.toChange[key] = True
python
def _set_toChange(x): """ set variables in list x toChange """ for key in list(x.keys()): self.toChange[key] = True
[ "def", "_set_toChange", "(", "x", ")", ":", "for", "key", "in", "list", "(", "x", ".", "keys", "(", ")", ")", ":", "self", ".", "toChange", "[", "key", "]", "=", "True" ]
set variables in list x toChange
[ "set", "variables", "in", "list", "x", "toChange" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L590-L593
limix/limix-core
limix_core/mean/linear.py
Linear._update_indicator
def _update_indicator(self,K,L): """ update the indicator """ _update = {'term': self.n_terms*np.ones((K,L)).T.ravel(), 'row': np.kron(np.arange(K)[:,np.newaxis],np.ones((1,L))).T.ravel(), 'col': np.kron(np.ones((K,1)),np.arange(L)[np.newaxis,:]).T.ravel()} for key in list(_update.keys()): self.indicator[key] = np.concatenate([self.indicator[key],_update[key]])
python
def _update_indicator(self,K,L): """ update the indicator """ _update = {'term': self.n_terms*np.ones((K,L)).T.ravel(), 'row': np.kron(np.arange(K)[:,np.newaxis],np.ones((1,L))).T.ravel(), 'col': np.kron(np.ones((K,1)),np.arange(L)[np.newaxis,:]).T.ravel()} for key in list(_update.keys()): self.indicator[key] = np.concatenate([self.indicator[key],_update[key]])
[ "def", "_update_indicator", "(", "self", ",", "K", ",", "L", ")", ":", "_update", "=", "{", "'term'", ":", "self", ".", "n_terms", "*", "np", ".", "ones", "(", "(", "K", ",", "L", ")", ")", ".", "T", ".", "ravel", "(", ")", ",", "'row'", ":",...
update the indicator
[ "update", "the", "indicator" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L595-L601
limix/limix-core
limix_core/mean/linear.py
Linear._rebuild_indicator
def _rebuild_indicator(self): """ update the indicator """ indicator = {'term':np.array([]), 'row':np.array([]), 'col':np.array([])} for term in range(self.n_terms): L = self.A[term].shape[0] K = self.F[term].shape[1] _update = {'term': (term+1)*np.ones((K,L)).T.ravel(), 'row': np.kron(np.arange(K)[:,np.newaxis],np.ones((1,L))).T.ravel(), 'col': np.kron(np.ones((K,1)),np.arange(L)[np.newaxis,:]).T.ravel()} for key in list(_update.keys()): indicator[key] = np.concatenate([indicator[key],_update[key]]) self.indicator = indicator
python
def _rebuild_indicator(self): """ update the indicator """ indicator = {'term':np.array([]), 'row':np.array([]), 'col':np.array([])} for term in range(self.n_terms): L = self.A[term].shape[0] K = self.F[term].shape[1] _update = {'term': (term+1)*np.ones((K,L)).T.ravel(), 'row': np.kron(np.arange(K)[:,np.newaxis],np.ones((1,L))).T.ravel(), 'col': np.kron(np.ones((K,1)),np.arange(L)[np.newaxis,:]).T.ravel()} for key in list(_update.keys()): indicator[key] = np.concatenate([indicator[key],_update[key]]) self.indicator = indicator
[ "def", "_rebuild_indicator", "(", "self", ")", ":", "indicator", "=", "{", "'term'", ":", "np", ".", "array", "(", "[", "]", ")", ",", "'row'", ":", "np", ".", "array", "(", "[", "]", ")", ",", "'col'", ":", "np", ".", "array", "(", "[", "]", ...
update the indicator
[ "update", "the", "indicator" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L603-L617
limix/limix-core
limix_core/util/linalg.py
vei_CoR_veX
def vei_CoR_veX(X, C=None, R=None): """ Args: X: NxPxS tensor C: CxC row covariance (if None: C set to I_PP) R: NxN row covariance (if None: R set to I_NN) Returns: NxPxS tensor obtained as ve^{-1}((C \kron R) ve(X)) where ve(X) reshapes X as a NPxS matrix. """ _X = X.transpose((0,2,1)) if R is not None: RV = sp.tensordot(R, _X, (1,0)) else: RV = _X if C is not None: RV = sp.dot(RV, C.T) return RV.transpose((0,2,1))
python
def vei_CoR_veX(X, C=None, R=None): """ Args: X: NxPxS tensor C: CxC row covariance (if None: C set to I_PP) R: NxN row covariance (if None: R set to I_NN) Returns: NxPxS tensor obtained as ve^{-1}((C \kron R) ve(X)) where ve(X) reshapes X as a NPxS matrix. """ _X = X.transpose((0,2,1)) if R is not None: RV = sp.tensordot(R, _X, (1,0)) else: RV = _X if C is not None: RV = sp.dot(RV, C.T) return RV.transpose((0,2,1))
[ "def", "vei_CoR_veX", "(", "X", ",", "C", "=", "None", ",", "R", "=", "None", ")", ":", "_X", "=", "X", ".", "transpose", "(", "(", "0", ",", "2", ",", "1", ")", ")", "if", "R", "is", "not", "None", ":", "RV", "=", "sp", ".", "tensordot", ...
Args: X: NxPxS tensor C: CxC row covariance (if None: C set to I_PP) R: NxN row covariance (if None: R set to I_NN) Returns: NxPxS tensor obtained as ve^{-1}((C \kron R) ve(X)) where ve(X) reshapes X as a NPxS matrix.
[ "Args", ":", "X", ":", "NxPxS", "tensor", "C", ":", "CxC", "row", "covariance", "(", "if", "None", ":", "C", "set", "to", "I_PP", ")", "R", ":", "NxN", "row", "covariance", "(", "if", "None", ":", "R", "set", "to", "I_NN", ")", "Returns", ":", ...
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/util/linalg.py#L3-L17
jbm950/pygame_toolbox
pygame_toolbox/tilegame_tools/__init__.py
Tile.initialize_shade
def initialize_shade(self, shade_name, shade_color, alpha): """This method will create semi-transparent surfaces with a specified color. The surface can be toggled on and off. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: Shade_name - String of the name that you want to associate with the surface Shade_color - An rgb tuple of the color of the shade Alpha - Level of transparency of the shade (0-255 with 150 being a good middle value) (doc string updated ver 0.1) """ # Create the pygame surface self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())] # Fill the surface with a solid color or an image if type(shade_color) == str: background = pygame.image.load(shade_color).convert() background = pygame.transform.scale(background, (self.image.get_width(), self.image.get_height())) self.shades[shade_name][1].blit(background, (0, 0)) # Otherwise the background should contain an rgb value else: self.shades[shade_name][1].fill(shade_color) # Set the alpha value for the shade self.shades[shade_name][1].set_alpha(alpha)
python
def initialize_shade(self, shade_name, shade_color, alpha): """This method will create semi-transparent surfaces with a specified color. The surface can be toggled on and off. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: Shade_name - String of the name that you want to associate with the surface Shade_color - An rgb tuple of the color of the shade Alpha - Level of transparency of the shade (0-255 with 150 being a good middle value) (doc string updated ver 0.1) """ # Create the pygame surface self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())] # Fill the surface with a solid color or an image if type(shade_color) == str: background = pygame.image.load(shade_color).convert() background = pygame.transform.scale(background, (self.image.get_width(), self.image.get_height())) self.shades[shade_name][1].blit(background, (0, 0)) # Otherwise the background should contain an rgb value else: self.shades[shade_name][1].fill(shade_color) # Set the alpha value for the shade self.shades[shade_name][1].set_alpha(alpha)
[ "def", "initialize_shade", "(", "self", ",", "shade_name", ",", "shade_color", ",", "alpha", ")", ":", "# Create the pygame surface", "self", ".", "shades", "[", "shade_name", "]", "=", "[", "0", ",", "pygame", ".", "Surface", "(", "self", ".", "image", "....
This method will create semi-transparent surfaces with a specified color. The surface can be toggled on and off. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: Shade_name - String of the name that you want to associate with the surface Shade_color - An rgb tuple of the color of the shade Alpha - Level of transparency of the shade (0-255 with 150 being a good middle value) (doc string updated ver 0.1)
[ "This", "method", "will", "create", "semi", "-", "transparent", "surfaces", "with", "a", "specified", "color", ".", "The", "surface", "can", "be", "toggled", "on", "and", "off", ".", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", "Inputs", ...
train
https://github.com/jbm950/pygame_toolbox/blob/3fe32145fc149e4dd0963c30a2b6a4dddd4fac0e/pygame_toolbox/tilegame_tools/__init__.py#L54-L85
jbm950/pygame_toolbox
pygame_toolbox/tilegame_tools/__init__.py
Tile.toggle_shade
def toggle_shade(self, shade): """This method will overlay a semi-transparent shade on top of the tile's image. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: shade - This will designate which shade you wish to turn on or off. Blue and red shades are available by default. (doc string updated ver 0.1) """ # First toggle the user specified shade if self.shades[shade][0]: self.shades[shade][0] = 0 else: self.shades[shade][0] = 1 # Now draw the image with the active shades self.image.blit(self.pic, (0, 0)) for key in self.shades: if self.shades[key][0]: self.image.blit(self.shades[key][1], (0, 0))
python
def toggle_shade(self, shade): """This method will overlay a semi-transparent shade on top of the tile's image. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: shade - This will designate which shade you wish to turn on or off. Blue and red shades are available by default. (doc string updated ver 0.1) """ # First toggle the user specified shade if self.shades[shade][0]: self.shades[shade][0] = 0 else: self.shades[shade][0] = 1 # Now draw the image with the active shades self.image.blit(self.pic, (0, 0)) for key in self.shades: if self.shades[key][0]: self.image.blit(self.shades[key][1], (0, 0))
[ "def", "toggle_shade", "(", "self", ",", "shade", ")", ":", "# First toggle the user specified shade", "if", "self", ".", "shades", "[", "shade", "]", "[", "0", "]", ":", "self", ".", "shades", "[", "shade", "]", "[", "0", "]", "=", "0", "else", ":", ...
This method will overlay a semi-transparent shade on top of the tile's image. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: shade - This will designate which shade you wish to turn on or off. Blue and red shades are available by default. (doc string updated ver 0.1)
[ "This", "method", "will", "overlay", "a", "semi", "-", "transparent", "shade", "on", "top", "of", "the", "tile", "s", "image", ".", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", "Inputs", ":", "shade", "-", "This", "will", "designate", ...
train
https://github.com/jbm950/pygame_toolbox/blob/3fe32145fc149e4dd0963c30a2b6a4dddd4fac0e/pygame_toolbox/tilegame_tools/__init__.py#L87-L108
jbm950/pygame_toolbox
pygame_toolbox/tilegame_tools/__init__.py
Tilelist.adjacent_tiles
def adjacent_tiles(self, tile, pattern): """This will return a list of the tiles adjacent to a given tile. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: tile - This is the tile object for which the method will find adjacent tiles. pattern - This will designate the pattern type that you want the method to return 'p' = plus sign 'x' = diagonal 'b' = box (doc string updated ver 0.1) """ # Initialize the list of tiles to return adj_tiles = [] # Find the row and column of the input tile for i in self: for j in i: if j == tile: row = self.index(i) column = self[row].index(j) # Define functions for the 2 distinct patterns def plus_sign(self, row, column): nonlocal adj_tiles if row - 1 >= 0: adj_tiles += [self[row - 1][column]] if row + 1 != len(self): adj_tiles += [self[row + 1][column]] if column - 1 >= 0: adj_tiles += [self[row][column - 1]] if column + 1 != len(self[row]): adj_tiles += [self[row][column + 1]] def diagonal(self, row, column): nonlocal adj_tiles if column - 1 >= 0: if row - 1 >= 0: adj_tiles += [self[row - 1][column - 1]] if row + 1 != len(self): adj_tiles += [self[row + 1][column - 1]] if column + 1 != len(self[row]): if row - 1 >= 0: adj_tiles += [self[row - 1][column + 1]] if row + 1 != len(self): adj_tiles += [self[row + 1][column + 1]] # Return the tiles that form a plus sign with the given input tile if pattern == 'p': plus_sign(self, row, column) # Return the tiles touching the four corners of the input tile elif pattern == 'x': diagonal(self, row, column) # Return all of the tiles surrounding the input tile elif pattern == 'b': plus_sign(self, row, column) diagonal(self, row, column) return adj_tiles
python
def adjacent_tiles(self, tile, pattern): """This will return a list of the tiles adjacent to a given tile. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: tile - This is the tile object for which the method will find adjacent tiles. pattern - This will designate the pattern type that you want the method to return 'p' = plus sign 'x' = diagonal 'b' = box (doc string updated ver 0.1) """ # Initialize the list of tiles to return adj_tiles = [] # Find the row and column of the input tile for i in self: for j in i: if j == tile: row = self.index(i) column = self[row].index(j) # Define functions for the 2 distinct patterns def plus_sign(self, row, column): nonlocal adj_tiles if row - 1 >= 0: adj_tiles += [self[row - 1][column]] if row + 1 != len(self): adj_tiles += [self[row + 1][column]] if column - 1 >= 0: adj_tiles += [self[row][column - 1]] if column + 1 != len(self[row]): adj_tiles += [self[row][column + 1]] def diagonal(self, row, column): nonlocal adj_tiles if column - 1 >= 0: if row - 1 >= 0: adj_tiles += [self[row - 1][column - 1]] if row + 1 != len(self): adj_tiles += [self[row + 1][column - 1]] if column + 1 != len(self[row]): if row - 1 >= 0: adj_tiles += [self[row - 1][column + 1]] if row + 1 != len(self): adj_tiles += [self[row + 1][column + 1]] # Return the tiles that form a plus sign with the given input tile if pattern == 'p': plus_sign(self, row, column) # Return the tiles touching the four corners of the input tile elif pattern == 'x': diagonal(self, row, column) # Return all of the tiles surrounding the input tile elif pattern == 'b': plus_sign(self, row, column) diagonal(self, row, column) return adj_tiles
[ "def", "adjacent_tiles", "(", "self", ",", "tile", ",", "pattern", ")", ":", "# Initialize the list of tiles to return", "adj_tiles", "=", "[", "]", "# Find the row and column of the input tile", "for", "i", "in", "self", ":", "for", "j", "in", "i", ":", "if", "...
This will return a list of the tiles adjacent to a given tile. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: tile - This is the tile object for which the method will find adjacent tiles. pattern - This will designate the pattern type that you want the method to return 'p' = plus sign 'x' = diagonal 'b' = box (doc string updated ver 0.1)
[ "This", "will", "return", "a", "list", "of", "the", "tiles", "adjacent", "to", "a", "given", "tile", ".", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", "Inputs", ":", "tile", "-", "This", "is", "the", "tile", "object", "for", "which", ...
train
https://github.com/jbm950/pygame_toolbox/blob/3fe32145fc149e4dd0963c30a2b6a4dddd4fac0e/pygame_toolbox/tilegame_tools/__init__.py#L118-L183
jbm950/pygame_toolbox
pygame_toolbox/tilegame_tools/__init__.py
Tilemap.set_offset
def set_offset(self, offset, mid=None): """This method will allow the menu to be placed anywhere in the open window instead of just the upper left corner. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: offset - This is the x,y tuple of the position that you want to move the screen to. mid - The offset will be treated as the value passed in instead of the top left pixel. 'x' (the x point in offset will be treated as the middle of the menu image) 'y' (the y point in offset will be treated as the middle of the menu image) 'c' (the offset will be treated as the center of the menu image) (doc string updated ver 0.1) """ ptg.BaseScreen.set_offset(self, offset, mid) for i in self.tilelist: for j in i: j.rect[0] += offset[0] j.rect[1] += offset[1]
python
def set_offset(self, offset, mid=None): """This method will allow the menu to be placed anywhere in the open window instead of just the upper left corner. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: offset - This is the x,y tuple of the position that you want to move the screen to. mid - The offset will be treated as the value passed in instead of the top left pixel. 'x' (the x point in offset will be treated as the middle of the menu image) 'y' (the y point in offset will be treated as the middle of the menu image) 'c' (the offset will be treated as the center of the menu image) (doc string updated ver 0.1) """ ptg.BaseScreen.set_offset(self, offset, mid) for i in self.tilelist: for j in i: j.rect[0] += offset[0] j.rect[1] += offset[1]
[ "def", "set_offset", "(", "self", ",", "offset", ",", "mid", "=", "None", ")", ":", "ptg", ".", "BaseScreen", ".", "set_offset", "(", "self", ",", "offset", ",", "mid", ")", "for", "i", "in", "self", ".", "tilelist", ":", "for", "j", "in", "i", "...
This method will allow the menu to be placed anywhere in the open window instead of just the upper left corner. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: offset - This is the x,y tuple of the position that you want to move the screen to. mid - The offset will be treated as the value passed in instead of the top left pixel. 'x' (the x point in offset will be treated as the middle of the menu image) 'y' (the y point in offset will be treated as the middle of the menu image) 'c' (the offset will be treated as the center of the menu image) (doc string updated ver 0.1)
[ "This", "method", "will", "allow", "the", "menu", "to", "be", "placed", "anywhere", "in", "the", "open", "window", "instead", "of", "just", "the", "upper", "left", "corner", ".", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", "Inputs", ":"...
train
https://github.com/jbm950/pygame_toolbox/blob/3fe32145fc149e4dd0963c30a2b6a4dddd4fac0e/pygame_toolbox/tilegame_tools/__init__.py#L234-L260
pyroscope/pyrobase
src/pyrobase/bencode.py
bread
def bread(stream): """ Decode a file or stream to an object. """ if hasattr(stream, "read"): return bdecode(stream.read()) else: handle = open(stream, "rb") try: return bdecode(handle.read()) finally: handle.close()
python
def bread(stream): """ Decode a file or stream to an object. """ if hasattr(stream, "read"): return bdecode(stream.read()) else: handle = open(stream, "rb") try: return bdecode(handle.read()) finally: handle.close()
[ "def", "bread", "(", "stream", ")", ":", "if", "hasattr", "(", "stream", ",", "\"read\"", ")", ":", "return", "bdecode", "(", "stream", ".", "read", "(", ")", ")", "else", ":", "handle", "=", "open", "(", "stream", ",", "\"rb\"", ")", "try", ":", ...
Decode a file or stream to an object.
[ "Decode", "a", "file", "or", "stream", "to", "an", "object", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/bencode.py#L170-L180
pyroscope/pyrobase
src/pyrobase/bencode.py
bwrite
def bwrite(stream, obj): """ Encode a given object to a file or stream. """ handle = None if not hasattr(stream, "write"): stream = handle = open(stream, "wb") try: stream.write(bencode(obj)) finally: if handle: handle.close()
python
def bwrite(stream, obj): """ Encode a given object to a file or stream. """ handle = None if not hasattr(stream, "write"): stream = handle = open(stream, "wb") try: stream.write(bencode(obj)) finally: if handle: handle.close()
[ "def", "bwrite", "(", "stream", ",", "obj", ")", ":", "handle", "=", "None", "if", "not", "hasattr", "(", "stream", ",", "\"write\"", ")", ":", "stream", "=", "handle", "=", "open", "(", "stream", ",", "\"wb\"", ")", "try", ":", "stream", ".", "wri...
Encode a given object to a file or stream.
[ "Encode", "a", "given", "object", "to", "a", "file", "or", "stream", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/bencode.py#L183-L193
pyroscope/pyrobase
src/pyrobase/bencode.py
Decoder.decode
def decode(self, check_trailer=False): # pylint: disable=I0011,R0912 """ Decode data in C{self.data} and return deserialized object. @param check_trailer: Raise error if trailing junk is found in data? @raise BencodeError: Invalid data. """ try: kind = self.data[self.offset] except IndexError: raise BencodeError("Unexpected end of data at offset %d/%d" % ( self.offset, len(self.data), )) if kind.isdigit(): # String try: end = self.data.find(':', self.offset) length = int(self.data[self.offset:end], 10) except (ValueError, TypeError): raise BencodeError("Bad string length at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) self.offset = end+length+1 obj = self.data[end+1:self.offset] if self.char_encoding: try: obj = obj.decode(self.char_encoding) except (UnicodeError, AttributeError): # deliver non-decodable string (byte arrays) as-is pass elif kind == 'i': # Integer try: end = self.data.find('e', self.offset+1) obj = int(self.data[self.offset+1:end], 10) except (ValueError, TypeError): raise BencodeError("Bad integer at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) self.offset = end+1 elif kind == 'l': # List self.offset += 1 obj = [] while self.data[self.offset:self.offset+1] != 'e': obj.append(self.decode()) self.offset += 1 elif kind == 'd': # Dict self.offset += 1 obj = {} while self.data[self.offset:self.offset+1] != 'e': key = self.decode() obj[key] = self.decode() self.offset += 1 else: raise BencodeError("Format error at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) if check_trailer and self.offset != len(self.data): raise BencodeError("Trailing data at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) return obj
python
def decode(self, check_trailer=False): # pylint: disable=I0011,R0912 """ Decode data in C{self.data} and return deserialized object. @param check_trailer: Raise error if trailing junk is found in data? @raise BencodeError: Invalid data. """ try: kind = self.data[self.offset] except IndexError: raise BencodeError("Unexpected end of data at offset %d/%d" % ( self.offset, len(self.data), )) if kind.isdigit(): # String try: end = self.data.find(':', self.offset) length = int(self.data[self.offset:end], 10) except (ValueError, TypeError): raise BencodeError("Bad string length at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) self.offset = end+length+1 obj = self.data[end+1:self.offset] if self.char_encoding: try: obj = obj.decode(self.char_encoding) except (UnicodeError, AttributeError): # deliver non-decodable string (byte arrays) as-is pass elif kind == 'i': # Integer try: end = self.data.find('e', self.offset+1) obj = int(self.data[self.offset+1:end], 10) except (ValueError, TypeError): raise BencodeError("Bad integer at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) self.offset = end+1 elif kind == 'l': # List self.offset += 1 obj = [] while self.data[self.offset:self.offset+1] != 'e': obj.append(self.decode()) self.offset += 1 elif kind == 'd': # Dict self.offset += 1 obj = {} while self.data[self.offset:self.offset+1] != 'e': key = self.decode() obj[key] = self.decode() self.offset += 1 else: raise BencodeError("Format error at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) if check_trailer and self.offset != len(self.data): raise BencodeError("Trailing data at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) return obj
[ "def", "decode", "(", "self", ",", "check_trailer", "=", "False", ")", ":", "# pylint: disable=I0011,R0912", "try", ":", "kind", "=", "self", ".", "data", "[", "self", ".", "offset", "]", "except", "IndexError", ":", "raise", "BencodeError", "(", "\"Unexpect...
Decode data in C{self.data} and return deserialized object. @param check_trailer: Raise error if trailing junk is found in data? @raise BencodeError: Invalid data.
[ "Decode", "data", "in", "C", "{", "self", ".", "data", "}", "and", "return", "deserialized", "object", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/bencode.py#L45-L112
pyroscope/pyrobase
src/pyrobase/bencode.py
Encoder.encode
def encode(self, obj): """ Add the given object to the result. """ if isinstance(obj, int_like_types): self.result.append("i%de" % obj) elif isinstance(obj, string_types): self.result.extend([str(len(obj)), ':', str(obj)]) elif hasattr(obj, "__bencode__"): self.encode(obj.__bencode__()) elif hasattr(obj, "items"): # Dictionary self.result.append('d') for key, val in sorted(obj.items()): key = str(key) self.result.extend([str(len(key)), ':', key]) self.encode(val) self.result.append('e') else: # Treat as iterable try: items = iter(obj) except TypeError as exc: raise BencodeError("Unsupported non-iterable object %r of type %s (%s)" % ( obj, type(obj), exc )) else: self.result.append('l') for item in items: self.encode(item) self.result.append('e') return self.result
python
def encode(self, obj): """ Add the given object to the result. """ if isinstance(obj, int_like_types): self.result.append("i%de" % obj) elif isinstance(obj, string_types): self.result.extend([str(len(obj)), ':', str(obj)]) elif hasattr(obj, "__bencode__"): self.encode(obj.__bencode__()) elif hasattr(obj, "items"): # Dictionary self.result.append('d') for key, val in sorted(obj.items()): key = str(key) self.result.extend([str(len(key)), ':', key]) self.encode(val) self.result.append('e') else: # Treat as iterable try: items = iter(obj) except TypeError as exc: raise BencodeError("Unsupported non-iterable object %r of type %s (%s)" % ( obj, type(obj), exc )) else: self.result.append('l') for item in items: self.encode(item) self.result.append('e') return self.result
[ "def", "encode", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "int_like_types", ")", ":", "self", ".", "result", ".", "append", "(", "\"i%de\"", "%", "obj", ")", "elif", "isinstance", "(", "obj", ",", "string_types", ")", "...
Add the given object to the result.
[ "Add", "the", "given", "object", "to", "the", "result", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/bencode.py#L124-L155
jesford/cluster-lensing
clusterlensing/clusters.py
calc_delta_c
def calc_delta_c(c200): """Calculate characteristic overdensity from concentration. Parameters ---------- c200 : ndarray or float Cluster concentration parameter. Returns ---------- ndarray or float Cluster characteristic overdensity, of same type as c200. """ top = (200. / 3.) * c200**3. bottom = np.log(1. + c200) - (c200 / (1. + c200)) return (top / bottom)
python
def calc_delta_c(c200): """Calculate characteristic overdensity from concentration. Parameters ---------- c200 : ndarray or float Cluster concentration parameter. Returns ---------- ndarray or float Cluster characteristic overdensity, of same type as c200. """ top = (200. / 3.) * c200**3. bottom = np.log(1. + c200) - (c200 / (1. + c200)) return (top / bottom)
[ "def", "calc_delta_c", "(", "c200", ")", ":", "top", "=", "(", "200.", "/", "3.", ")", "*", "c200", "**", "3.", "bottom", "=", "np", ".", "log", "(", "1.", "+", "c200", ")", "-", "(", "c200", "/", "(", "1.", "+", "c200", ")", ")", "return", ...
Calculate characteristic overdensity from concentration. Parameters ---------- c200 : ndarray or float Cluster concentration parameter. Returns ---------- ndarray or float Cluster characteristic overdensity, of same type as c200.
[ "Calculate", "characteristic", "overdensity", "from", "concentration", "." ]
train
https://github.com/jesford/cluster-lensing/blob/2815c1bb07d904ca91a80dae3f52090016768072/clusterlensing/clusters.py#L32-L47
jesford/cluster-lensing
clusterlensing/clusters.py
ClusterEnsemble.show
def show(self, notebook=notebook_display): """Display cluster properties and scaling relation parameters.""" print("\nCluster Ensemble:") if notebook is True: display(self._df) elif notebook is False: print(self._df) self.massrich_parameters()
python
def show(self, notebook=notebook_display): """Display cluster properties and scaling relation parameters.""" print("\nCluster Ensemble:") if notebook is True: display(self._df) elif notebook is False: print(self._df) self.massrich_parameters()
[ "def", "show", "(", "self", ",", "notebook", "=", "notebook_display", ")", ":", "print", "(", "\"\\nCluster Ensemble:\"", ")", "if", "notebook", "is", "True", ":", "display", "(", "self", ".", "_df", ")", "elif", "notebook", "is", "False", ":", "print", ...
Display cluster properties and scaling relation parameters.
[ "Display", "cluster", "properties", "and", "scaling", "relation", "parameters", "." ]
train
https://github.com/jesford/cluster-lensing/blob/2815c1bb07d904ca91a80dae3f52090016768072/clusterlensing/clusters.py#L407-L414
jesford/cluster-lensing
clusterlensing/clusters.py
ClusterEnsemble.calc_nfw
def calc_nfw(self, rbins, offsets=None, numTh=200, numRoff=200, numRinner=20, factorRouter=3): """Calculates Sigma and DeltaSigma profiles. Generates the surface mass density (sigma_nfw attribute of parent object) and differential surface mass density (deltasigma_nfw attribute of parent object) profiles of each cluster, assuming a spherical NFW model. Optionally includes the effect of cluster miscentering offsets. Parameters ---------- rbins : array_like Radial bins (in Mpc) for calculating cluster profiles. Should be 1D, optionally with astropy.units of Mpc. offsets : array_like, optional Parameter describing the width (in Mpc) of the Gaussian distribution of miscentering offsets. Should be 1D, optionally with astropy.units of Mpc. Other Parameters ------------------- numTh : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins to use for integration over theta, for calculating offset profiles (no effect for offsets=None). Default 200. numRoff : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins to use for integration over R_off, for calculating offset profiles (no effect for offsets=None). Default 200. numRinner : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins at r < min(rbins) to use for integration over Sigma(<r), for calculating DeltaSigma (no effect for Sigma ever, and no effect for DeltaSigma if offsets=None). Default 20. factorRouter : int, optional Parameter to pass to SurfaceMassDensity(). Factor increase over number of rbins, at min(r) < r < max(r), of bins that will be used at for integration over Sigma(<r), for calculating DeltaSigma (no effect for Sigma, and no effect for DeltaSigma if offsets=None). Default 3. """ if offsets is None: self._sigoffset = np.zeros(self.number) * units.Mpc else: self._sigoffset = utils.check_units_and_type(offsets, units.Mpc, num=self.number) self.rbins = utils.check_units_and_type(rbins, units.Mpc) rhoc = self._rho_crit.to(units.Msun / units.pc**2 / units.Mpc) smd = SurfaceMassDensity(self.rs, self.delta_c, rhoc, offsets=self._sigoffset, rbins=self.rbins, numTh=numTh, numRoff=numRoff, numRinner=numRinner, factorRouter=factorRouter) self.sigma_nfw = smd.sigma_nfw() self.deltasigma_nfw = smd.deltasigma_nfw()
python
def calc_nfw(self, rbins, offsets=None, numTh=200, numRoff=200, numRinner=20, factorRouter=3): """Calculates Sigma and DeltaSigma profiles. Generates the surface mass density (sigma_nfw attribute of parent object) and differential surface mass density (deltasigma_nfw attribute of parent object) profiles of each cluster, assuming a spherical NFW model. Optionally includes the effect of cluster miscentering offsets. Parameters ---------- rbins : array_like Radial bins (in Mpc) for calculating cluster profiles. Should be 1D, optionally with astropy.units of Mpc. offsets : array_like, optional Parameter describing the width (in Mpc) of the Gaussian distribution of miscentering offsets. Should be 1D, optionally with astropy.units of Mpc. Other Parameters ------------------- numTh : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins to use for integration over theta, for calculating offset profiles (no effect for offsets=None). Default 200. numRoff : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins to use for integration over R_off, for calculating offset profiles (no effect for offsets=None). Default 200. numRinner : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins at r < min(rbins) to use for integration over Sigma(<r), for calculating DeltaSigma (no effect for Sigma ever, and no effect for DeltaSigma if offsets=None). Default 20. factorRouter : int, optional Parameter to pass to SurfaceMassDensity(). Factor increase over number of rbins, at min(r) < r < max(r), of bins that will be used at for integration over Sigma(<r), for calculating DeltaSigma (no effect for Sigma, and no effect for DeltaSigma if offsets=None). Default 3. """ if offsets is None: self._sigoffset = np.zeros(self.number) * units.Mpc else: self._sigoffset = utils.check_units_and_type(offsets, units.Mpc, num=self.number) self.rbins = utils.check_units_and_type(rbins, units.Mpc) rhoc = self._rho_crit.to(units.Msun / units.pc**2 / units.Mpc) smd = SurfaceMassDensity(self.rs, self.delta_c, rhoc, offsets=self._sigoffset, rbins=self.rbins, numTh=numTh, numRoff=numRoff, numRinner=numRinner, factorRouter=factorRouter) self.sigma_nfw = smd.sigma_nfw() self.deltasigma_nfw = smd.deltasigma_nfw()
[ "def", "calc_nfw", "(", "self", ",", "rbins", ",", "offsets", "=", "None", ",", "numTh", "=", "200", ",", "numRoff", "=", "200", ",", "numRinner", "=", "20", ",", "factorRouter", "=", "3", ")", ":", "if", "offsets", "is", "None", ":", "self", ".", ...
Calculates Sigma and DeltaSigma profiles. Generates the surface mass density (sigma_nfw attribute of parent object) and differential surface mass density (deltasigma_nfw attribute of parent object) profiles of each cluster, assuming a spherical NFW model. Optionally includes the effect of cluster miscentering offsets. Parameters ---------- rbins : array_like Radial bins (in Mpc) for calculating cluster profiles. Should be 1D, optionally with astropy.units of Mpc. offsets : array_like, optional Parameter describing the width (in Mpc) of the Gaussian distribution of miscentering offsets. Should be 1D, optionally with astropy.units of Mpc. Other Parameters ------------------- numTh : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins to use for integration over theta, for calculating offset profiles (no effect for offsets=None). Default 200. numRoff : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins to use for integration over R_off, for calculating offset profiles (no effect for offsets=None). Default 200. numRinner : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins at r < min(rbins) to use for integration over Sigma(<r), for calculating DeltaSigma (no effect for Sigma ever, and no effect for DeltaSigma if offsets=None). Default 20. factorRouter : int, optional Parameter to pass to SurfaceMassDensity(). Factor increase over number of rbins, at min(r) < r < max(r), of bins that will be used at for integration over Sigma(<r), for calculating DeltaSigma (no effect for Sigma, and no effect for DeltaSigma if offsets=None). Default 3.
[ "Calculates", "Sigma", "and", "DeltaSigma", "profiles", "." ]
train
https://github.com/jesford/cluster-lensing/blob/2815c1bb07d904ca91a80dae3f52090016768072/clusterlensing/clusters.py#L502-L562
limix/limix-core
limix_core/mean/base.py
Base.Y
def Y(self,value): """ set phenotype """ assert value.shape[1]==1, 'Dimension mismatch' self._N = value.shape[0] self._Y = value
python
def Y(self,value): """ set phenotype """ assert value.shape[1]==1, 'Dimension mismatch' self._N = value.shape[0] self._Y = value
[ "def", "Y", "(", "self", ",", "value", ")", ":", "assert", "value", ".", "shape", "[", "1", "]", "==", "1", ",", "'Dimension mismatch'", "self", ".", "_N", "=", "value", ".", "shape", "[", "0", "]", "self", ".", "_Y", "=", "value" ]
set phenotype
[ "set", "phenotype" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/base.py#L41-L45
limix/limix-core
limix_core/mean/base.py
Base.F
def F(self,value): """ set phenotype """ assert value.shape[0]==self._N, 'Dimension mismatch' self._K = value.shape[1] self._F = value self.clear_cache('predict','Yres')
python
def F(self,value): """ set phenotype """ assert value.shape[0]==self._N, 'Dimension mismatch' self._K = value.shape[1] self._F = value self.clear_cache('predict','Yres')
[ "def", "F", "(", "self", ",", "value", ")", ":", "assert", "value", ".", "shape", "[", "0", "]", "==", "self", ".", "_N", ",", "'Dimension mismatch'", "self", ".", "_K", "=", "value", ".", "shape", "[", "1", "]", "self", ".", "_F", "=", "value", ...
set phenotype
[ "set", "phenotype" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/base.py#L49-L54
limix/limix-core
limix_core/mean/base.py
Base.B
def B(self,value): """ set phenotype """ assert value.shape[0]==self._K, 'Dimension mismatch' assert value.shape[1]==1, 'Dimension mismatch' self._B = value self.clear_cache('predict','Yres')
python
def B(self,value): """ set phenotype """ assert value.shape[0]==self._K, 'Dimension mismatch' assert value.shape[1]==1, 'Dimension mismatch' self._B = value self.clear_cache('predict','Yres')
[ "def", "B", "(", "self", ",", "value", ")", ":", "assert", "value", ".", "shape", "[", "0", "]", "==", "self", ".", "_K", ",", "'Dimension mismatch'", "assert", "value", ".", "shape", "[", "1", "]", "==", "1", ",", "'Dimension mismatch'", "self", "."...
set phenotype
[ "set", "phenotype" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/base.py#L57-L62
ubccr/pinky
pinky/canonicalization/traverse.py
draw
def draw(molecule, TraversalType=SmilesTraversal): """(molecule)->canonical representation of a molecule Well, it's only canonical if the atom symorders are canonical, otherwise it's arbitrary. atoms must have a symorder attribute bonds must have a equiv_class attribute""" result = [] atoms = allAtoms = molecule.atoms visitedAtoms = {} # # Traverse all components of the graph to form # the output string while atoms: atom = _get_lowest_symorder(atoms) visitedAtoms[atom] = 1 visitedBonds = {} nextTraverse = TraversalType() atomsUsed, bondsUsed = [], [] _traverse(atom, nextTraverse, None, visitedAtoms, visitedBonds, atomsUsed, bondsUsed, TraversalType) atoms = [] for atom in allAtoms: if not visitedAtoms.has_key(atom): atoms.append(atom) assert nextTraverse.atoms == atomsUsed assert nextTraverse.bonds == bondsUsed, "%s %s"%( nextTraverse.bonds, bondsUsed) result.append((str(nextTraverse), atomsUsed, bondsUsed)) result.sort() fragments = [] for r in result: fragments.append(r[0]) return ".".join(fragments), result
python
def draw(molecule, TraversalType=SmilesTraversal): """(molecule)->canonical representation of a molecule Well, it's only canonical if the atom symorders are canonical, otherwise it's arbitrary. atoms must have a symorder attribute bonds must have a equiv_class attribute""" result = [] atoms = allAtoms = molecule.atoms visitedAtoms = {} # # Traverse all components of the graph to form # the output string while atoms: atom = _get_lowest_symorder(atoms) visitedAtoms[atom] = 1 visitedBonds = {} nextTraverse = TraversalType() atomsUsed, bondsUsed = [], [] _traverse(atom, nextTraverse, None, visitedAtoms, visitedBonds, atomsUsed, bondsUsed, TraversalType) atoms = [] for atom in allAtoms: if not visitedAtoms.has_key(atom): atoms.append(atom) assert nextTraverse.atoms == atomsUsed assert nextTraverse.bonds == bondsUsed, "%s %s"%( nextTraverse.bonds, bondsUsed) result.append((str(nextTraverse), atomsUsed, bondsUsed)) result.sort() fragments = [] for r in result: fragments.append(r[0]) return ".".join(fragments), result
[ "def", "draw", "(", "molecule", ",", "TraversalType", "=", "SmilesTraversal", ")", ":", "result", "=", "[", "]", "atoms", "=", "allAtoms", "=", "molecule", ".", "atoms", "visitedAtoms", "=", "{", "}", "#", "# Traverse all components of the graph to form", "# the...
(molecule)->canonical representation of a molecule Well, it's only canonical if the atom symorders are canonical, otherwise it's arbitrary. atoms must have a symorder attribute bonds must have a equiv_class attribute
[ "(", "molecule", ")", "-", ">", "canonical", "representation", "of", "a", "molecule", "Well", "it", "s", "only", "canonical", "if", "the", "atom", "symorders", "are", "canonical", "otherwise", "it", "s", "arbitrary", "." ]
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/canonicalization/traverse.py#L122-L163
all-umass/graphs
graphs/generators/rand.py
random_graph
def random_graph(out_degree): '''Random graph generator. Does not generate self-edges. out_degree : array-like of ints, controlling the out degree of each vertex. ''' n = len(out_degree) out_degree = np.asarray(out_degree, dtype=int) if (out_degree >= n).any(): raise ValueError('Cannot have degree >= num_vertices') row = np.repeat(np.arange(n), out_degree) weights = np.ones_like(row, dtype=float) # Generate random edges from 0 to n-2, then shift by one to avoid self-edges. col = np.concatenate([np.random.choice(n-1, d, replace=False) for d in out_degree]) col[col >= row] += 1 adj = coo_matrix((weights, (row, col)), shape=(n, n)) return Graph.from_adj_matrix(adj)
python
def random_graph(out_degree): '''Random graph generator. Does not generate self-edges. out_degree : array-like of ints, controlling the out degree of each vertex. ''' n = len(out_degree) out_degree = np.asarray(out_degree, dtype=int) if (out_degree >= n).any(): raise ValueError('Cannot have degree >= num_vertices') row = np.repeat(np.arange(n), out_degree) weights = np.ones_like(row, dtype=float) # Generate random edges from 0 to n-2, then shift by one to avoid self-edges. col = np.concatenate([np.random.choice(n-1, d, replace=False) for d in out_degree]) col[col >= row] += 1 adj = coo_matrix((weights, (row, col)), shape=(n, n)) return Graph.from_adj_matrix(adj)
[ "def", "random_graph", "(", "out_degree", ")", ":", "n", "=", "len", "(", "out_degree", ")", "out_degree", "=", "np", ".", "asarray", "(", "out_degree", ",", "dtype", "=", "int", ")", "if", "(", "out_degree", ">=", "n", ")", ".", "any", "(", ")", "...
Random graph generator. Does not generate self-edges. out_degree : array-like of ints, controlling the out degree of each vertex.
[ "Random", "graph", "generator", ".", "Does", "not", "generate", "self", "-", "edges", ".", "out_degree", ":", "array", "-", "like", "of", "ints", "controlling", "the", "out", "degree", "of", "each", "vertex", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/generators/rand.py#L10-L25
all-umass/graphs
graphs/construction/geometric.py
delaunay_graph
def delaunay_graph(X, weighted=False): '''Delaunay triangulation graph. ''' e1, e2 = _delaunay_edges(X) pairs = np.column_stack((e1, e2)) w = paired_distances(X[e1], X[e2]) if weighted else None return Graph.from_edge_pairs(pairs, num_vertices=X.shape[0], symmetric=True, weights=w)
python
def delaunay_graph(X, weighted=False): '''Delaunay triangulation graph. ''' e1, e2 = _delaunay_edges(X) pairs = np.column_stack((e1, e2)) w = paired_distances(X[e1], X[e2]) if weighted else None return Graph.from_edge_pairs(pairs, num_vertices=X.shape[0], symmetric=True, weights=w)
[ "def", "delaunay_graph", "(", "X", ",", "weighted", "=", "False", ")", ":", "e1", ",", "e2", "=", "_delaunay_edges", "(", "X", ")", "pairs", "=", "np", ".", "column_stack", "(", "(", "e1", ",", "e2", ")", ")", "w", "=", "paired_distances", "(", "X"...
Delaunay triangulation graph.
[ "Delaunay", "triangulation", "graph", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/geometric.py#L16-L23
all-umass/graphs
graphs/construction/geometric.py
urquhart_graph
def urquhart_graph(X, weighted=False): '''Urquhart graph: made from the 2 shortest edges of each Delaunay triangle. ''' e1, e2 = _delaunay_edges(X) w = paired_distances(X[e1], X[e2]) mask = np.ones_like(w, dtype=bool) bad_inds = w.reshape((-1, 3)).argmax(axis=1) + np.arange(0, len(e1), 3) mask[bad_inds] = False weights = w[mask] if weighted else None pairs = np.column_stack((e1[mask], e2[mask])) return Graph.from_edge_pairs(pairs, num_vertices=X.shape[0], symmetric=True, weights=weights)
python
def urquhart_graph(X, weighted=False): '''Urquhart graph: made from the 2 shortest edges of each Delaunay triangle. ''' e1, e2 = _delaunay_edges(X) w = paired_distances(X[e1], X[e2]) mask = np.ones_like(w, dtype=bool) bad_inds = w.reshape((-1, 3)).argmax(axis=1) + np.arange(0, len(e1), 3) mask[bad_inds] = False weights = w[mask] if weighted else None pairs = np.column_stack((e1[mask], e2[mask])) return Graph.from_edge_pairs(pairs, num_vertices=X.shape[0], symmetric=True, weights=weights)
[ "def", "urquhart_graph", "(", "X", ",", "weighted", "=", "False", ")", ":", "e1", ",", "e2", "=", "_delaunay_edges", "(", "X", ")", "w", "=", "paired_distances", "(", "X", "[", "e1", "]", ",", "X", "[", "e2", "]", ")", "mask", "=", "np", ".", "...
Urquhart graph: made from the 2 shortest edges of each Delaunay triangle.
[ "Urquhart", "graph", ":", "made", "from", "the", "2", "shortest", "edges", "of", "each", "Delaunay", "triangle", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/geometric.py#L26-L38
limix/limix-core
limix_core/util/psd_solve.py
psd_solver.solve
def solve(self,b,overwrite_b=False,check_finite=True): """ solve A \ b """ if self._s is not None: res = self._U.T.dot(b) res /= self._s[:,np.newaxis] res = self._U.dot(res) elif self._chol is not None: res = la.cho_solve((self._chol,self._lower),b=b,overwrite_b=overwrite_b,check_finite=check_finite) else: res = np.zeros(b.shape) return res
python
def solve(self,b,overwrite_b=False,check_finite=True): """ solve A \ b """ if self._s is not None: res = self._U.T.dot(b) res /= self._s[:,np.newaxis] res = self._U.dot(res) elif self._chol is not None: res = la.cho_solve((self._chol,self._lower),b=b,overwrite_b=overwrite_b,check_finite=check_finite) else: res = np.zeros(b.shape) return res
[ "def", "solve", "(", "self", ",", "b", ",", "overwrite_b", "=", "False", ",", "check_finite", "=", "True", ")", ":", "if", "self", ".", "_s", "is", "not", "None", ":", "res", "=", "self", ".", "_U", ".", "T", ".", "dot", "(", "b", ")", "res", ...
solve A \ b
[ "solve", "A", "\\", "b" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/util/psd_solve.py#L26-L38
limix/limix-core
limix_core/util/psd_solve.py
psd_solver_any.solve
def solve(self,b,overwrite_b=False,check_finite=True, p=None): """ solve A \ b """ if p is None: assert b.shape[:2]==(len(self.solver),self.dof_any) solution = np.empty(b.shape) #This is trivially parallelizable: for p in range(self.P): solution[p] = self.solver[p].solve(b=b[p]) return solution else: return self.solver[p].solve(b=b)
python
def solve(self,b,overwrite_b=False,check_finite=True, p=None): """ solve A \ b """ if p is None: assert b.shape[:2]==(len(self.solver),self.dof_any) solution = np.empty(b.shape) #This is trivially parallelizable: for p in range(self.P): solution[p] = self.solver[p].solve(b=b[p]) return solution else: return self.solver[p].solve(b=b)
[ "def", "solve", "(", "self", ",", "b", ",", "overwrite_b", "=", "False", ",", "check_finite", "=", "True", ",", "p", "=", "None", ")", ":", "if", "p", "is", "None", ":", "assert", "b", ".", "shape", "[", ":", "2", "]", "==", "(", "len", "(", ...
solve A \ b
[ "solve", "A", "\\", "b" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/util/psd_solve.py#L54-L66
limix/limix-core
limix_core/util/psd_solve.py
PsdSolverKron.solve
def solve(self, b_any, b, check_finite=True, p=None): """ solve A \ b """ #assert b.shape[:2]==(len(self.solver),self.dof_any) if self.schur_solver is None and self.A_any_solver is None: assert ( (b is None) or (b.shape[0]==0) ) and ( (b_any is None) or (b_any.shape[0]==0) ), "shape missmatch" return b, b_any elif self.schur_solver is None: assert (b is None) or (b.shape[0]==0), "shape missmatch" solution_any = self.A_any_solver.solve(b=b_any,p=p) return b,solution_any elif self.A_any_solver is None: assert (b_any is None) or (b_any.shape[0]==0), "shape missmatch" solution = self.schur_solver.solve(b=b, check_finite=check_finite) return solution, b_any else: assert p is None, "p is not None" cross_term = np.tensordot(self.DinvC,b_any,axes=([0,1],[0,1])) solution = self.schur_solver.solve(b=(b - cross_term), check_finite=check_finite) solution_any = self.A_any_solver.solve(b=b_any, check_finite=check_finite, p=p) solution_any -= self.DinvC.dot(solution) return solution, solution_any
python
def solve(self, b_any, b, check_finite=True, p=None): """ solve A \ b """ #assert b.shape[:2]==(len(self.solver),self.dof_any) if self.schur_solver is None and self.A_any_solver is None: assert ( (b is None) or (b.shape[0]==0) ) and ( (b_any is None) or (b_any.shape[0]==0) ), "shape missmatch" return b, b_any elif self.schur_solver is None: assert (b is None) or (b.shape[0]==0), "shape missmatch" solution_any = self.A_any_solver.solve(b=b_any,p=p) return b,solution_any elif self.A_any_solver is None: assert (b_any is None) or (b_any.shape[0]==0), "shape missmatch" solution = self.schur_solver.solve(b=b, check_finite=check_finite) return solution, b_any else: assert p is None, "p is not None" cross_term = np.tensordot(self.DinvC,b_any,axes=([0,1],[0,1])) solution = self.schur_solver.solve(b=(b - cross_term), check_finite=check_finite) solution_any = self.A_any_solver.solve(b=b_any, check_finite=check_finite, p=p) solution_any -= self.DinvC.dot(solution) return solution, solution_any
[ "def", "solve", "(", "self", ",", "b_any", ",", "b", ",", "check_finite", "=", "True", ",", "p", "=", "None", ")", ":", "#assert b.shape[:2]==(len(self.solver),self.dof_any)", "if", "self", ".", "schur_solver", "is", "None", "and", "self", ".", "A_any_solver",...
solve A \ b
[ "solve", "A", "\\", "b" ]
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/util/psd_solve.py#L96-L120
pyroscope/pyrobase
src/pyrobase/templating.py
preparse
def preparse(template_text, lookup=None): """ Do any special processing of a template, including recognizing the templating language and resolving file: references, then return an appropriate wrapper object. Currently Tempita and Python string interpolation are supported. `lookup` is an optional callable that resolves any ambiguous template path. """ # First, try to resolve file: references to their contents template_path = None try: is_file = template_text.startswith("file:") except (AttributeError, TypeError): pass # not a string else: if is_file: template_path = template_text[5:] if template_path.startswith('/'): template_path = '/' + template_path.lstrip('/') elif template_path.startswith('~'): template_path = os.path.expanduser(template_path) elif lookup: template_path = lookup(template_path) with closing(open(template_path, "r")) as handle: template_text = handle.read().rstrip() if hasattr(template_text, "__engine__"): # Already preparsed template = template_text else: if template_text.startswith("{{"): import tempita # only on demand template = tempita.Template(template_text, name=template_path) template.__engine__ = "tempita" else: template = InterpolationTemplate(template_text) template.__file__ = template_path template.__text__ = template_text return template
python
def preparse(template_text, lookup=None): """ Do any special processing of a template, including recognizing the templating language and resolving file: references, then return an appropriate wrapper object. Currently Tempita and Python string interpolation are supported. `lookup` is an optional callable that resolves any ambiguous template path. """ # First, try to resolve file: references to their contents template_path = None try: is_file = template_text.startswith("file:") except (AttributeError, TypeError): pass # not a string else: if is_file: template_path = template_text[5:] if template_path.startswith('/'): template_path = '/' + template_path.lstrip('/') elif template_path.startswith('~'): template_path = os.path.expanduser(template_path) elif lookup: template_path = lookup(template_path) with closing(open(template_path, "r")) as handle: template_text = handle.read().rstrip() if hasattr(template_text, "__engine__"): # Already preparsed template = template_text else: if template_text.startswith("{{"): import tempita # only on demand template = tempita.Template(template_text, name=template_path) template.__engine__ = "tempita" else: template = InterpolationTemplate(template_text) template.__file__ = template_path template.__text__ = template_text return template
[ "def", "preparse", "(", "template_text", ",", "lookup", "=", "None", ")", ":", "# First, try to resolve file: references to their contents", "template_path", "=", "None", "try", ":", "is_file", "=", "template_text", ".", "startswith", "(", "\"file:\"", ")", "except", ...
Do any special processing of a template, including recognizing the templating language and resolving file: references, then return an appropriate wrapper object. Currently Tempita and Python string interpolation are supported. `lookup` is an optional callable that resolves any ambiguous template path.
[ "Do", "any", "special", "processing", "of", "a", "template", "including", "recognizing", "the", "templating", "language", "and", "resolving", "file", ":", "references", "then", "return", "an", "appropriate", "wrapper", "object", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/templating.py#L61-L102
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/genconutils/readout.py
FileReadOut.hdf5_read_out
def hdf5_read_out(self): """Read out an hdf5 file. Takes the output of :class:`gwsnrcalc.genconutils.genprocess.GenProcess` and reads it out to an HDF5 file. """ with h5py.File(self.WORKING_DIRECTORY + '/' + self.output_file_name, 'w') as f: header = f.create_group('header') header.attrs['Title'] = 'Generated SNR Out' header.attrs['Author'] = 'Generator by: Michael Katz' header.attrs['Date/Time'] = str(datetime.datetime.now()) for which in ['x', 'y']: header.attrs[which + 'val_name'] = getattr(self, which + 'val_name') header.attrs['num_' + which + '_pts'] = getattr(self, 'num_' + which) ecc = 'eccentricity' in self.__dict__ if ecc: name_list = ['observation_time', 'start_frequency', 'start_separation' 'eccentricity'] else: name_list = ['spin_1', 'spin_2', 'spin', 'end_time'] name_list += ['total_mass', 'mass_ratio', 'start_time', 'luminosity_distance', 'comoving_distance', 'redshift'] for name in name_list: if name != self.xval_name and name != self.yval_name: try: getattr(self, name) header.attrs[name] = getattr(self, name) except AttributeError: pass if self.added_note != '': header.attrs['Added note'] = self.added_note data = f.create_group('data') # read out x,y values in compressed data set dset = data.create_dataset(self.x_col_name, data=self.xvals, dtype='float64', chunks=True, compression='gzip', compression_opts=9) dset = data.create_dataset(self.y_col_name, data=self.yvals, dtype='float64', chunks=True, compression='gzip', compression_opts=9) # read out all datasets for key in self.output_dict.keys(): dset = data.create_dataset(key, data=self.output_dict[key], dtype='float64', chunks=True, compression='gzip', compression_opts=9)
python
def hdf5_read_out(self): """Read out an hdf5 file. Takes the output of :class:`gwsnrcalc.genconutils.genprocess.GenProcess` and reads it out to an HDF5 file. """ with h5py.File(self.WORKING_DIRECTORY + '/' + self.output_file_name, 'w') as f: header = f.create_group('header') header.attrs['Title'] = 'Generated SNR Out' header.attrs['Author'] = 'Generator by: Michael Katz' header.attrs['Date/Time'] = str(datetime.datetime.now()) for which in ['x', 'y']: header.attrs[which + 'val_name'] = getattr(self, which + 'val_name') header.attrs['num_' + which + '_pts'] = getattr(self, 'num_' + which) ecc = 'eccentricity' in self.__dict__ if ecc: name_list = ['observation_time', 'start_frequency', 'start_separation' 'eccentricity'] else: name_list = ['spin_1', 'spin_2', 'spin', 'end_time'] name_list += ['total_mass', 'mass_ratio', 'start_time', 'luminosity_distance', 'comoving_distance', 'redshift'] for name in name_list: if name != self.xval_name and name != self.yval_name: try: getattr(self, name) header.attrs[name] = getattr(self, name) except AttributeError: pass if self.added_note != '': header.attrs['Added note'] = self.added_note data = f.create_group('data') # read out x,y values in compressed data set dset = data.create_dataset(self.x_col_name, data=self.xvals, dtype='float64', chunks=True, compression='gzip', compression_opts=9) dset = data.create_dataset(self.y_col_name, data=self.yvals, dtype='float64', chunks=True, compression='gzip', compression_opts=9) # read out all datasets for key in self.output_dict.keys(): dset = data.create_dataset(key, data=self.output_dict[key], dtype='float64', chunks=True, compression='gzip', compression_opts=9)
[ "def", "hdf5_read_out", "(", "self", ")", ":", "with", "h5py", ".", "File", "(", "self", ".", "WORKING_DIRECTORY", "+", "'/'", "+", "self", ".", "output_file_name", ",", "'w'", ")", "as", "f", ":", "header", "=", "f", ".", "create_group", "(", "'header...
Read out an hdf5 file. Takes the output of :class:`gwsnrcalc.genconutils.genprocess.GenProcess` and reads it out to an HDF5 file.
[ "Read", "out", "an", "hdf5", "file", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/genconutils/readout.py#L68-L122
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/genconutils/readout.py
FileReadOut.txt_read_out
def txt_read_out(self): """Read out txt file. Takes the output of :class:`gwsnrcalc.genconutils.genprocess.GenProcess` and reads it out to a txt file. """ header = '#Generated SNR Out\n' header += '#Generator by: Michael Katz\n' header += '#Date/Time: {}\n'.format(datetime.datetime.now()) for which in ['x', 'y']: header += '#' + which + 'val_name: {}\n'.format(getattr(self, which + 'val_name')) header += '#num_' + which + '_pts: {}\n'.format(getattr(self, 'num_' + which)) ecc = 'eccentricity' in self.__dict__ if ecc: name_list = ['observation_time', 'start_frequency', 'start_separation' 'eccentricity'] else: name_list = ['spin_1', 'spin_2', 'spin', 'end_time'] name_list += ['total_mass', 'mass_ratio', 'start_time', 'luminosity_distance', 'comoving_distance', 'redshift'] for name in name_list: if name != self.xval_name and name != self.yval_name: try: getattr(self, name) header += '#{}: {}\n'.format(name, getattr(self, name)) except AttributeError: pass if self.added_note != '': header += '#Added note: ' + self.added_note + '\n' else: header += '#Added note: None\n' header += '#--------------------\n' header += self.x_col_name + '\t' header += self.y_col_name + '\t' for key in self.output_dict.keys(): header += key + '\t' # read out x,y and the data x_and_y = np.asarray([self.xvals, self.yvals]) snr_out = np.asarray([self.output_dict[key] for key in self.output_dict.keys()]).T data_out = np.concatenate([x_and_y.T, snr_out], axis=1) np.savetxt(self.WORKING_DIRECTORY + '/' + self.output_file_name, data_out, delimiter='\t', header=header, comments='') return
python
def txt_read_out(self): """Read out txt file. Takes the output of :class:`gwsnrcalc.genconutils.genprocess.GenProcess` and reads it out to a txt file. """ header = '#Generated SNR Out\n' header += '#Generator by: Michael Katz\n' header += '#Date/Time: {}\n'.format(datetime.datetime.now()) for which in ['x', 'y']: header += '#' + which + 'val_name: {}\n'.format(getattr(self, which + 'val_name')) header += '#num_' + which + '_pts: {}\n'.format(getattr(self, 'num_' + which)) ecc = 'eccentricity' in self.__dict__ if ecc: name_list = ['observation_time', 'start_frequency', 'start_separation' 'eccentricity'] else: name_list = ['spin_1', 'spin_2', 'spin', 'end_time'] name_list += ['total_mass', 'mass_ratio', 'start_time', 'luminosity_distance', 'comoving_distance', 'redshift'] for name in name_list: if name != self.xval_name and name != self.yval_name: try: getattr(self, name) header += '#{}: {}\n'.format(name, getattr(self, name)) except AttributeError: pass if self.added_note != '': header += '#Added note: ' + self.added_note + '\n' else: header += '#Added note: None\n' header += '#--------------------\n' header += self.x_col_name + '\t' header += self.y_col_name + '\t' for key in self.output_dict.keys(): header += key + '\t' # read out x,y and the data x_and_y = np.asarray([self.xvals, self.yvals]) snr_out = np.asarray([self.output_dict[key] for key in self.output_dict.keys()]).T data_out = np.concatenate([x_and_y.T, snr_out], axis=1) np.savetxt(self.WORKING_DIRECTORY + '/' + self.output_file_name, data_out, delimiter='\t', header=header, comments='') return
[ "def", "txt_read_out", "(", "self", ")", ":", "header", "=", "'#Generated SNR Out\\n'", "header", "+=", "'#Generator by: Michael Katz\\n'", "header", "+=", "'#Date/Time: {}\\n'", ".", "format", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ")", "for", ...
Read out txt file. Takes the output of :class:`gwsnrcalc.genconutils.genprocess.GenProcess` and reads it out to a txt file.
[ "Read", "out", "txt", "file", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/genconutils/readout.py#L124-L180
shawalli/psycopg2-pgevents
psycopg2_pgevents/trigger.py
trigger_function_installed
def trigger_function_installed(connection: connection): """Test whether or not the psycopg2-pgevents trigger function is installed. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. Returns ------- bool True if the trigger function is installed, otherwise False. """ installed = False log('Checking if trigger function installed...', logger_name=_LOGGER_NAME) try: execute(connection, "SELECT pg_get_functiondef('public.psycopg2_pgevents_create_event'::regproc);") installed = True except ProgrammingError as e: if e.args: error_stdout = e.args[0].splitlines() error = error_stdout.pop(0) if error.endswith('does not exist'): # Trigger function not installed pass else: # Some other exception; re-raise raise e else: # Some other exception; re-raise raise e log('...{}installed'.format('' if installed else 'NOT '), logger_name=_LOGGER_NAME) return installed
python
def trigger_function_installed(connection: connection): """Test whether or not the psycopg2-pgevents trigger function is installed. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. Returns ------- bool True if the trigger function is installed, otherwise False. """ installed = False log('Checking if trigger function installed...', logger_name=_LOGGER_NAME) try: execute(connection, "SELECT pg_get_functiondef('public.psycopg2_pgevents_create_event'::regproc);") installed = True except ProgrammingError as e: if e.args: error_stdout = e.args[0].splitlines() error = error_stdout.pop(0) if error.endswith('does not exist'): # Trigger function not installed pass else: # Some other exception; re-raise raise e else: # Some other exception; re-raise raise e log('...{}installed'.format('' if installed else 'NOT '), logger_name=_LOGGER_NAME) return installed
[ "def", "trigger_function_installed", "(", "connection", ":", "connection", ")", ":", "installed", "=", "False", "log", "(", "'Checking if trigger function installed...'", ",", "logger_name", "=", "_LOGGER_NAME", ")", "try", ":", "execute", "(", "connection", ",", "\...
Test whether or not the psycopg2-pgevents trigger function is installed. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. Returns ------- bool True if the trigger function is installed, otherwise False.
[ "Test", "whether", "or", "not", "the", "psycopg2", "-", "pgevents", "trigger", "function", "is", "installed", "." ]
train
https://github.com/shawalli/psycopg2-pgevents/blob/bf04c05839a27c56834b26748d227c71cd87257c/psycopg2_pgevents/trigger.py#L82-L119
shawalli/psycopg2-pgevents
psycopg2_pgevents/trigger.py
trigger_installed
def trigger_installed(connection: connection, table: str, schema: str='public'): """Test whether or not a psycopg2-pgevents trigger is installed for a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table whose trigger-existence will be checked. schema: str Schema to which the table belongs. Returns ------- bool True if the trigger is installed, otherwise False. """ installed = False log('Checking if {}.{} trigger installed...'.format(schema, table), logger_name=_LOGGER_NAME) statement = SELECT_TRIGGER_STATEMENT.format( table=table, schema=schema ) result = execute(connection, statement) if result: installed = True log('...{}installed'.format('' if installed else 'NOT '), logger_name=_LOGGER_NAME) return installed
python
def trigger_installed(connection: connection, table: str, schema: str='public'): """Test whether or not a psycopg2-pgevents trigger is installed for a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table whose trigger-existence will be checked. schema: str Schema to which the table belongs. Returns ------- bool True if the trigger is installed, otherwise False. """ installed = False log('Checking if {}.{} trigger installed...'.format(schema, table), logger_name=_LOGGER_NAME) statement = SELECT_TRIGGER_STATEMENT.format( table=table, schema=schema ) result = execute(connection, statement) if result: installed = True log('...{}installed'.format('' if installed else 'NOT '), logger_name=_LOGGER_NAME) return installed
[ "def", "trigger_installed", "(", "connection", ":", "connection", ",", "table", ":", "str", ",", "schema", ":", "str", "=", "'public'", ")", ":", "installed", "=", "False", "log", "(", "'Checking if {}.{} trigger installed...'", ".", "format", "(", "schema", "...
Test whether or not a psycopg2-pgevents trigger is installed for a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table whose trigger-existence will be checked. schema: str Schema to which the table belongs. Returns ------- bool True if the trigger is installed, otherwise False.
[ "Test", "whether", "or", "not", "a", "psycopg2", "-", "pgevents", "trigger", "is", "installed", "for", "a", "table", "." ]
train
https://github.com/shawalli/psycopg2-pgevents/blob/bf04c05839a27c56834b26748d227c71cd87257c/psycopg2_pgevents/trigger.py#L122-L155
shawalli/psycopg2-pgevents
psycopg2_pgevents/trigger.py
install_trigger_function
def install_trigger_function(connection: connection, overwrite: bool=False) -> None: """Install the psycopg2-pgevents trigger function against the database. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. overwrite: bool Whether or not to overwrite existing installation of psycopg2-pgevents trigger function, if existing installation is found. Returns ------- None """ prior_install = False if not overwrite: prior_install = trigger_function_installed(connection) if not prior_install: log('Installing trigger function...', logger_name=_LOGGER_NAME) execute(connection, INSTALL_TRIGGER_FUNCTION_STATEMENT) else: log('Trigger function already installed; skipping...', logger_name=_LOGGER_NAME)
python
def install_trigger_function(connection: connection, overwrite: bool=False) -> None: """Install the psycopg2-pgevents trigger function against the database. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. overwrite: bool Whether or not to overwrite existing installation of psycopg2-pgevents trigger function, if existing installation is found. Returns ------- None """ prior_install = False if not overwrite: prior_install = trigger_function_installed(connection) if not prior_install: log('Installing trigger function...', logger_name=_LOGGER_NAME) execute(connection, INSTALL_TRIGGER_FUNCTION_STATEMENT) else: log('Trigger function already installed; skipping...', logger_name=_LOGGER_NAME)
[ "def", "install_trigger_function", "(", "connection", ":", "connection", ",", "overwrite", ":", "bool", "=", "False", ")", "->", "None", ":", "prior_install", "=", "False", "if", "not", "overwrite", ":", "prior_install", "=", "trigger_function_installed", "(", "...
Install the psycopg2-pgevents trigger function against the database. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. overwrite: bool Whether or not to overwrite existing installation of psycopg2-pgevents trigger function, if existing installation is found. Returns ------- None
[ "Install", "the", "psycopg2", "-", "pgevents", "trigger", "function", "against", "the", "database", "." ]
train
https://github.com/shawalli/psycopg2-pgevents/blob/bf04c05839a27c56834b26748d227c71cd87257c/psycopg2_pgevents/trigger.py#L158-L184
shawalli/psycopg2-pgevents
psycopg2_pgevents/trigger.py
uninstall_trigger_function
def uninstall_trigger_function(connection: connection, force: bool=False) -> None: """Uninstall the psycopg2-pgevents trigger function from the database. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. force: bool If True, force the un-registration even if dependent triggers are still installed. If False, if there are any dependent triggers for the trigger function, the un-registration will fail. Returns ------- None """ modifier = '' if force: modifier = 'CASCADE' log('Uninstalling trigger function (cascade={})...'.format(force), logger_name=_LOGGER_NAME) statement = UNINSTALL_TRIGGER_FUNCTION_STATEMENT.format(modifier=modifier) execute(connection, statement)
python
def uninstall_trigger_function(connection: connection, force: bool=False) -> None: """Uninstall the psycopg2-pgevents trigger function from the database. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. force: bool If True, force the un-registration even if dependent triggers are still installed. If False, if there are any dependent triggers for the trigger function, the un-registration will fail. Returns ------- None """ modifier = '' if force: modifier = 'CASCADE' log('Uninstalling trigger function (cascade={})...'.format(force), logger_name=_LOGGER_NAME) statement = UNINSTALL_TRIGGER_FUNCTION_STATEMENT.format(modifier=modifier) execute(connection, statement)
[ "def", "uninstall_trigger_function", "(", "connection", ":", "connection", ",", "force", ":", "bool", "=", "False", ")", "->", "None", ":", "modifier", "=", "''", "if", "force", ":", "modifier", "=", "'CASCADE'", "log", "(", "'Uninstalling trigger function (casc...
Uninstall the psycopg2-pgevents trigger function from the database. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. force: bool If True, force the un-registration even if dependent triggers are still installed. If False, if there are any dependent triggers for the trigger function, the un-registration will fail. Returns ------- None
[ "Uninstall", "the", "psycopg2", "-", "pgevents", "trigger", "function", "from", "the", "database", "." ]
train
https://github.com/shawalli/psycopg2-pgevents/blob/bf04c05839a27c56834b26748d227c71cd87257c/psycopg2_pgevents/trigger.py#L187-L211
shawalli/psycopg2-pgevents
psycopg2_pgevents/trigger.py
install_trigger
def install_trigger(connection: connection, table: str, schema: str='public', overwrite: bool=False) -> None: """Install a psycopg2-pgevents trigger against a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table for which the trigger should be installed. schema: str Schema to which the table belongs. overwrite: bool Whether or not to overwrite existing installation of trigger for the given table, if existing installation is found. Returns ------- None """ prior_install = False if not overwrite: prior_install = trigger_installed(connection, table, schema) if not prior_install: log('Installing {}.{} trigger...'.format(schema, table), logger_name=_LOGGER_NAME) statement = INSTALL_TRIGGER_STATEMENT.format( schema=schema, table=table ) execute(connection, statement) else: log('{}.{} trigger already installed; skipping...'.format(schema, table), logger_name=_LOGGER_NAME)
python
def install_trigger(connection: connection, table: str, schema: str='public', overwrite: bool=False) -> None: """Install a psycopg2-pgevents trigger against a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table for which the trigger should be installed. schema: str Schema to which the table belongs. overwrite: bool Whether or not to overwrite existing installation of trigger for the given table, if existing installation is found. Returns ------- None """ prior_install = False if not overwrite: prior_install = trigger_installed(connection, table, schema) if not prior_install: log('Installing {}.{} trigger...'.format(schema, table), logger_name=_LOGGER_NAME) statement = INSTALL_TRIGGER_STATEMENT.format( schema=schema, table=table ) execute(connection, statement) else: log('{}.{} trigger already installed; skipping...'.format(schema, table), logger_name=_LOGGER_NAME)
[ "def", "install_trigger", "(", "connection", ":", "connection", ",", "table", ":", "str", ",", "schema", ":", "str", "=", "'public'", ",", "overwrite", ":", "bool", "=", "False", ")", "->", "None", ":", "prior_install", "=", "False", "if", "not", "overwr...
Install a psycopg2-pgevents trigger against a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table for which the trigger should be installed. schema: str Schema to which the table belongs. overwrite: bool Whether or not to overwrite existing installation of trigger for the given table, if existing installation is found. Returns ------- None
[ "Install", "a", "psycopg2", "-", "pgevents", "trigger", "against", "a", "table", "." ]
train
https://github.com/shawalli/psycopg2-pgevents/blob/bf04c05839a27c56834b26748d227c71cd87257c/psycopg2_pgevents/trigger.py#L214-L248
shawalli/psycopg2-pgevents
psycopg2_pgevents/trigger.py
uninstall_trigger
def uninstall_trigger(connection: connection, table: str, schema: str='public') -> None: """Uninstall a psycopg2-pgevents trigger from a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table for which the trigger should be uninstalled. schema: str Schema to which the table belongs. Returns ------- None """ log('Uninstalling {}.{} trigger...'.format(schema, table), logger_name=_LOGGER_NAME) statement = UNINSTALL_TRIGGER_STATEMENT.format( schema=schema, table=table ) execute(connection, statement)
python
def uninstall_trigger(connection: connection, table: str, schema: str='public') -> None: """Uninstall a psycopg2-pgevents trigger from a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table for which the trigger should be uninstalled. schema: str Schema to which the table belongs. Returns ------- None """ log('Uninstalling {}.{} trigger...'.format(schema, table), logger_name=_LOGGER_NAME) statement = UNINSTALL_TRIGGER_STATEMENT.format( schema=schema, table=table ) execute(connection, statement)
[ "def", "uninstall_trigger", "(", "connection", ":", "connection", ",", "table", ":", "str", ",", "schema", ":", "str", "=", "'public'", ")", "->", "None", ":", "log", "(", "'Uninstalling {}.{} trigger...'", ".", "format", "(", "schema", ",", "table", ")", ...
Uninstall a psycopg2-pgevents trigger from a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table for which the trigger should be uninstalled. schema: str Schema to which the table belongs. Returns ------- None
[ "Uninstall", "a", "psycopg2", "-", "pgevents", "trigger", "from", "a", "table", "." ]
train
https://github.com/shawalli/psycopg2-pgevents/blob/bf04c05839a27c56834b26748d227c71cd87257c/psycopg2_pgevents/trigger.py#L251-L274
mozilla/funfactory
funfactory/utils.py
absolutify
def absolutify(url): """Takes a URL and prepends the SITE_URL""" site_url = getattr(settings, 'SITE_URL', False) # If we don't define it explicitly if not site_url: protocol = settings.PROTOCOL hostname = settings.DOMAIN port = settings.PORT if (protocol, port) in (('https://', 443), ('http://', 80)): site_url = ''.join(map(str, (protocol, hostname))) else: site_url = ''.join(map(str, (protocol, hostname, ':', port))) return site_url + url
python
def absolutify(url): """Takes a URL and prepends the SITE_URL""" site_url = getattr(settings, 'SITE_URL', False) # If we don't define it explicitly if not site_url: protocol = settings.PROTOCOL hostname = settings.DOMAIN port = settings.PORT if (protocol, port) in (('https://', 443), ('http://', 80)): site_url = ''.join(map(str, (protocol, hostname))) else: site_url = ''.join(map(str, (protocol, hostname, ':', port))) return site_url + url
[ "def", "absolutify", "(", "url", ")", ":", "site_url", "=", "getattr", "(", "settings", ",", "'SITE_URL'", ",", "False", ")", "# If we don't define it explicitly", "if", "not", "site_url", ":", "protocol", "=", "settings", ".", "PROTOCOL", "hostname", "=", "se...
Takes a URL and prepends the SITE_URL
[ "Takes", "a", "URL", "and", "prepends", "the", "SITE_URL" ]
train
https://github.com/mozilla/funfactory/blob/c9bbf1c534eaa15641265bc75fa87afca52b7dd6/funfactory/utils.py#L9-L23
inveniosoftware/invenio-config
invenio_config/utils.py
create_config_loader
def create_config_loader(config=None, env_prefix='APP'): """Create a default configuration loader. A configuration loader takes a Flask application and keyword arguments and updates the Flask application's configuration as it sees fit. This default configuration loader will load configuration in the following order: 1. Load configuration from ``invenio_config.module`` entry points group, following the alphabetical ascending order in case of multiple entry points defined. For example, the config of an app with entry point name ``10_app`` will be loaded after the config of an app with entry point name ``00_app``. 2. Load configuration from ``config`` module if provided as argument. 3. Load configuration from the instance folder: ``<app.instance_path>/<app.name>.cfg``. 4. Load configuration keyword arguments provided. 5. Load configuration from environment variables with the prefix ``env_prefix``. If no secret key has been set a warning will be issued. :param config: Either an import string to a module with configuration or alternatively the module itself. :param env_prefix: Environment variable prefix to import configuration from. :return: A callable with the method signature ``config_loader(app, **kwargs)``. .. versionadded:: 1.0.0 """ def _config_loader(app, **kwargs_config): InvenioConfigEntryPointModule(app=app) if config: InvenioConfigModule(app=app, module=config) InvenioConfigInstanceFolder(app=app) app.config.update(**kwargs_config) InvenioConfigEnvironment(app=app, prefix='{0}_'.format(env_prefix)) InvenioConfigDefault(app=app) return _config_loader
python
def create_config_loader(config=None, env_prefix='APP'): """Create a default configuration loader. A configuration loader takes a Flask application and keyword arguments and updates the Flask application's configuration as it sees fit. This default configuration loader will load configuration in the following order: 1. Load configuration from ``invenio_config.module`` entry points group, following the alphabetical ascending order in case of multiple entry points defined. For example, the config of an app with entry point name ``10_app`` will be loaded after the config of an app with entry point name ``00_app``. 2. Load configuration from ``config`` module if provided as argument. 3. Load configuration from the instance folder: ``<app.instance_path>/<app.name>.cfg``. 4. Load configuration keyword arguments provided. 5. Load configuration from environment variables with the prefix ``env_prefix``. If no secret key has been set a warning will be issued. :param config: Either an import string to a module with configuration or alternatively the module itself. :param env_prefix: Environment variable prefix to import configuration from. :return: A callable with the method signature ``config_loader(app, **kwargs)``. .. versionadded:: 1.0.0 """ def _config_loader(app, **kwargs_config): InvenioConfigEntryPointModule(app=app) if config: InvenioConfigModule(app=app, module=config) InvenioConfigInstanceFolder(app=app) app.config.update(**kwargs_config) InvenioConfigEnvironment(app=app, prefix='{0}_'.format(env_prefix)) InvenioConfigDefault(app=app) return _config_loader
[ "def", "create_config_loader", "(", "config", "=", "None", ",", "env_prefix", "=", "'APP'", ")", ":", "def", "_config_loader", "(", "app", ",", "*", "*", "kwargs_config", ")", ":", "InvenioConfigEntryPointModule", "(", "app", "=", "app", ")", "if", "config",...
Create a default configuration loader. A configuration loader takes a Flask application and keyword arguments and updates the Flask application's configuration as it sees fit. This default configuration loader will load configuration in the following order: 1. Load configuration from ``invenio_config.module`` entry points group, following the alphabetical ascending order in case of multiple entry points defined. For example, the config of an app with entry point name ``10_app`` will be loaded after the config of an app with entry point name ``00_app``. 2. Load configuration from ``config`` module if provided as argument. 3. Load configuration from the instance folder: ``<app.instance_path>/<app.name>.cfg``. 4. Load configuration keyword arguments provided. 5. Load configuration from environment variables with the prefix ``env_prefix``. If no secret key has been set a warning will be issued. :param config: Either an import string to a module with configuration or alternatively the module itself. :param env_prefix: Environment variable prefix to import configuration from. :return: A callable with the method signature ``config_loader(app, **kwargs)``. .. versionadded:: 1.0.0
[ "Create", "a", "default", "configuration", "loader", "." ]
train
https://github.com/inveniosoftware/invenio-config/blob/8d1e63ac045cd9c58a3399c6b58845e6daa06102/invenio_config/utils.py#L20-L62
inveniosoftware/invenio-config
invenio_config/utils.py
create_conf_loader
def create_conf_loader(*args, **kwargs): # pragma: no cover """Create a default configuration loader. .. deprecated:: 1.0.0b1 Use :func:`create_config_loader` instead. This function will be removed in version 1.0.1. """ import warnings warnings.warn( '"create_conf_loader" has been renamed to "create_config_loader".', DeprecationWarning ) return create_config_loader(*args, **kwargs)
python
def create_conf_loader(*args, **kwargs): # pragma: no cover """Create a default configuration loader. .. deprecated:: 1.0.0b1 Use :func:`create_config_loader` instead. This function will be removed in version 1.0.1. """ import warnings warnings.warn( '"create_conf_loader" has been renamed to "create_config_loader".', DeprecationWarning ) return create_config_loader(*args, **kwargs)
[ "def", "create_conf_loader", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover", "import", "warnings", "warnings", ".", "warn", "(", "'\"create_conf_loader\" has been renamed to \"create_config_loader\".'", ",", "DeprecationWarning", ")", "return", ...
Create a default configuration loader. .. deprecated:: 1.0.0b1 Use :func:`create_config_loader` instead. This function will be removed in version 1.0.1.
[ "Create", "a", "default", "configuration", "loader", "." ]
train
https://github.com/inveniosoftware/invenio-config/blob/8d1e63ac045cd9c58a3399c6b58845e6daa06102/invenio_config/utils.py#L65-L77
gautammishra/lyft-rides-python-sdk
lyft_rides/request.py
Request._build_headers
def _build_headers(self, method, auth_session): """Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises LyftIllegalState (ApiError) Raised if headers are invalid. """ token_type = auth_session.token_type token = auth_session.oauth2credential.access_token if not self._authorization_headers_valid(token_type, token): message = 'Invalid token_type or token.' raise LyftIllegalState(message) headers = { 'Authorization': ' '.join([token_type, token]), } if method in http.BODY_METHODS: headers.update(http.DEFAULT_CONTENT_HEADERS) return headers
python
def _build_headers(self, method, auth_session): """Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises LyftIllegalState (ApiError) Raised if headers are invalid. """ token_type = auth_session.token_type token = auth_session.oauth2credential.access_token if not self._authorization_headers_valid(token_type, token): message = 'Invalid token_type or token.' raise LyftIllegalState(message) headers = { 'Authorization': ' '.join([token_type, token]), } if method in http.BODY_METHODS: headers.update(http.DEFAULT_CONTENT_HEADERS) return headers
[ "def", "_build_headers", "(", "self", ",", "method", ",", "auth_session", ")", ":", "token_type", "=", "auth_session", ".", "token_type", "token", "=", "auth_session", ".", "oauth2credential", ".", "access_token", "if", "not", "self", ".", "_authorization_headers_...
Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises LyftIllegalState (ApiError) Raised if headers are invalid.
[ "Create", "headers", "for", "the", "request", ".", "Parameters", "method", "(", "str", ")", "HTTP", "method", "(", "e", ".", "g", ".", "POST", ")", ".", "auth_session", "(", "Session", ")", "The", "Session", "object", "containing", "OAuth", "2", ".", "...
train
https://github.com/gautammishra/lyft-rides-python-sdk/blob/b6d96a0fceaf7dc3425153c418a8e25c57803431/lyft_rides/request.py#L131-L160
gautammishra/lyft-rides-python-sdk
lyft_rides/request.py
Request._authorization_headers_valid
def _authorization_headers_valid(self, token_type, token): """Verify authorization headers for a request. Parameters token_type (str) Type of token to access resources. token (str) Server Token or OAuth 2.0 Access Token. Returns (bool) True iff token_type and token are valid. """ if token_type not in http.VALID_TOKEN_TYPES: return False allowed_chars = ascii_letters + digits + '_' + '-' + '=' + '/' + '+' # True if token only contains allowed_chars return all(characters in allowed_chars for characters in token)
python
def _authorization_headers_valid(self, token_type, token): """Verify authorization headers for a request. Parameters token_type (str) Type of token to access resources. token (str) Server Token or OAuth 2.0 Access Token. Returns (bool) True iff token_type and token are valid. """ if token_type not in http.VALID_TOKEN_TYPES: return False allowed_chars = ascii_letters + digits + '_' + '-' + '=' + '/' + '+' # True if token only contains allowed_chars return all(characters in allowed_chars for characters in token)
[ "def", "_authorization_headers_valid", "(", "self", ",", "token_type", ",", "token", ")", ":", "if", "token_type", "not", "in", "http", ".", "VALID_TOKEN_TYPES", ":", "return", "False", "allowed_chars", "=", "ascii_letters", "+", "digits", "+", "'_'", "+", "'-...
Verify authorization headers for a request. Parameters token_type (str) Type of token to access resources. token (str) Server Token or OAuth 2.0 Access Token. Returns (bool) True iff token_type and token are valid.
[ "Verify", "authorization", "headers", "for", "a", "request", ".", "Parameters", "token_type", "(", "str", ")", "Type", "of", "token", "to", "access", "resources", ".", "token", "(", "str", ")", "Server", "Token", "or", "OAuth", "2", ".", "0", "Access", "...
train
https://github.com/gautammishra/lyft-rides-python-sdk/blob/b6d96a0fceaf7dc3425153c418a8e25c57803431/lyft_rides/request.py#L162-L179