id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
234,500
istresearch/scrapy-cluster
utils/scutils/log_factory.py
LogObject._get_formatter
def _get_formatter(self, json): ''' Return the proper log formatter @param json: Boolean value ''' if json: return jsonlogger.JsonFormatter() else: return logging.Formatter(self.format_string)
python
def _get_formatter(self, json): ''' Return the proper log formatter @param json: Boolean value ''' if json: return jsonlogger.JsonFormatter() else: return logging.Formatter(self.format_string)
[ "def", "_get_formatter", "(", "self", ",", "json", ")", ":", "if", "json", ":", "return", "jsonlogger", ".", "JsonFormatter", "(", ")", "else", ":", "return", "logging", ".", "Formatter", "(", "self", ".", "format_string", ")" ]
Return the proper log formatter @param json: Boolean value
[ "Return", "the", "proper", "log", "formatter" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/log_factory.py#L180-L189
234,501
istresearch/scrapy-cluster
utils/scutils/log_factory.py
LogObject.debug
def debug(self, message, extra={}): ''' Writes an error message to the log @param message: The message to write @param extra: The extras object to pass in ''' if self.level_dict['DEBUG'] >= self.level_dict[self.log_level]: extras = self.add_extras(extra, "DEBUG") self._write_message(message, extras) self.fire_callbacks('DEBUG', message, extra)
python
def debug(self, message, extra={}): ''' Writes an error message to the log @param message: The message to write @param extra: The extras object to pass in ''' if self.level_dict['DEBUG'] >= self.level_dict[self.log_level]: extras = self.add_extras(extra, "DEBUG") self._write_message(message, extras) self.fire_callbacks('DEBUG', message, extra)
[ "def", "debug", "(", "self", ",", "message", ",", "extra", "=", "{", "}", ")", ":", "if", "self", ".", "level_dict", "[", "'DEBUG'", "]", ">=", "self", ".", "level_dict", "[", "self", ".", "log_level", "]", ":", "extras", "=", "self", ".", "add_ext...
Writes an error message to the log @param message: The message to write @param extra: The extras object to pass in
[ "Writes", "an", "error", "message", "to", "the", "log" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/log_factory.py#L191-L201
234,502
istresearch/scrapy-cluster
utils/scutils/log_factory.py
LogObject._write_message
def _write_message(self, message, extra): ''' Writes the log output @param message: The message to write @param extra: The potential object to write ''' if not self.json: self._write_standard(message, extra) else: self._write_json(message, extra)
python
def _write_message(self, message, extra): ''' Writes the log output @param message: The message to write @param extra: The potential object to write ''' if not self.json: self._write_standard(message, extra) else: self._write_json(message, extra)
[ "def", "_write_message", "(", "self", ",", "message", ",", "extra", ")", ":", "if", "not", "self", ".", "json", ":", "self", ".", "_write_standard", "(", "message", ",", "extra", ")", "else", ":", "self", ".", "_write_json", "(", "message", ",", "extra...
Writes the log output @param message: The message to write @param extra: The potential object to write
[ "Writes", "the", "log", "output" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/log_factory.py#L260-L269
234,503
istresearch/scrapy-cluster
utils/scutils/log_factory.py
LogObject._write_standard
def _write_standard(self, message, extra): ''' Writes a standard log statement @param message: The message to write @param extra: The object to pull defaults from ''' level = extra['level'] if self.include_extra: del extra['timestamp'] del extra['level'] del extra['logger'] if len(extra) > 0: message += " " + str(extra) if level == 'INFO': self.logger.info(message) elif level == 'DEBUG': self.logger.debug(message) elif level == 'WARNING': self.logger.warning(message) elif level == 'ERROR': self.logger.error(message) elif level == 'CRITICAL': self.logger.critical(message) else: self.logger.debug(message)
python
def _write_standard(self, message, extra): ''' Writes a standard log statement @param message: The message to write @param extra: The object to pull defaults from ''' level = extra['level'] if self.include_extra: del extra['timestamp'] del extra['level'] del extra['logger'] if len(extra) > 0: message += " " + str(extra) if level == 'INFO': self.logger.info(message) elif level == 'DEBUG': self.logger.debug(message) elif level == 'WARNING': self.logger.warning(message) elif level == 'ERROR': self.logger.error(message) elif level == 'CRITICAL': self.logger.critical(message) else: self.logger.debug(message)
[ "def", "_write_standard", "(", "self", ",", "message", ",", "extra", ")", ":", "level", "=", "extra", "[", "'level'", "]", "if", "self", ".", "include_extra", ":", "del", "extra", "[", "'timestamp'", "]", "del", "extra", "[", "'level'", "]", "del", "ex...
Writes a standard log statement @param message: The message to write @param extra: The object to pull defaults from
[ "Writes", "a", "standard", "log", "statement" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/log_factory.py#L271-L297
234,504
istresearch/scrapy-cluster
utils/scutils/log_factory.py
LogObject._write_json
def _write_json(self, message, extra): ''' The JSON logger doesn't obey log levels @param message: The message to write @param extra: The object to write ''' self.logger.info(message, extra=extra)
python
def _write_json(self, message, extra): ''' The JSON logger doesn't obey log levels @param message: The message to write @param extra: The object to write ''' self.logger.info(message, extra=extra)
[ "def", "_write_json", "(", "self", ",", "message", ",", "extra", ")", ":", "self", ".", "logger", ".", "info", "(", "message", ",", "extra", "=", "extra", ")" ]
The JSON logger doesn't obey log levels @param message: The message to write @param extra: The object to write
[ "The", "JSON", "logger", "doesn", "t", "obey", "log", "levels" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/log_factory.py#L299-L306
234,505
istresearch/scrapy-cluster
utils/scutils/log_factory.py
LogObject.add_extras
def add_extras(self, dict, level): ''' Adds the log level to the dict object ''' my_copy = copy.deepcopy(dict) if 'level' not in my_copy: my_copy['level'] = level if 'timestamp' not in my_copy: my_copy['timestamp'] = self._get_time() if 'logger' not in my_copy: my_copy['logger'] = self.name return my_copy
python
def add_extras(self, dict, level): ''' Adds the log level to the dict object ''' my_copy = copy.deepcopy(dict) if 'level' not in my_copy: my_copy['level'] = level if 'timestamp' not in my_copy: my_copy['timestamp'] = self._get_time() if 'logger' not in my_copy: my_copy['logger'] = self.name return my_copy
[ "def", "add_extras", "(", "self", ",", "dict", ",", "level", ")", ":", "my_copy", "=", "copy", ".", "deepcopy", "(", "dict", ")", "if", "'level'", "not", "in", "my_copy", ":", "my_copy", "[", "'level'", "]", "=", "level", "if", "'timestamp'", "not", ...
Adds the log level to the dict object
[ "Adds", "the", "log", "level", "to", "the", "dict", "object" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/log_factory.py#L315-L326
234,506
istresearch/scrapy-cluster
crawler/crawling/log_retry_middleware.py
LogRetryMiddleware._increment_504_stat
def _increment_504_stat(self, request): ''' Increments the 504 stat counters @param request: The scrapy request in the spider ''' for key in self.stats_dict: if key == 'lifetime': unique = request.url + str(time.time()) self.stats_dict[key].increment(unique) else: self.stats_dict[key].increment() self.logger.debug("Incremented status_code '504' stats")
python
def _increment_504_stat(self, request): ''' Increments the 504 stat counters @param request: The scrapy request in the spider ''' for key in self.stats_dict: if key == 'lifetime': unique = request.url + str(time.time()) self.stats_dict[key].increment(unique) else: self.stats_dict[key].increment() self.logger.debug("Incremented status_code '504' stats")
[ "def", "_increment_504_stat", "(", "self", ",", "request", ")", ":", "for", "key", "in", "self", ".", "stats_dict", ":", "if", "key", "==", "'lifetime'", ":", "unique", "=", "request", ".", "url", "+", "str", "(", "time", ".", "time", "(", ")", ")", ...
Increments the 504 stat counters @param request: The scrapy request in the spider
[ "Increments", "the", "504", "stat", "counters" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/log_retry_middleware.py#L149-L161
234,507
istresearch/scrapy-cluster
utils/scutils/redis_throttled_queue.py
RedisThrottledQueue.clear
def clear(self): ''' Clears all data associated with the throttled queue ''' self.redis_conn.delete(self.window_key) self.redis_conn.delete(self.moderate_key) self.queue.clear()
python
def clear(self): ''' Clears all data associated with the throttled queue ''' self.redis_conn.delete(self.window_key) self.redis_conn.delete(self.moderate_key) self.queue.clear()
[ "def", "clear", "(", "self", ")", ":", "self", ".", "redis_conn", ".", "delete", "(", "self", ".", "window_key", ")", "self", ".", "redis_conn", ".", "delete", "(", "self", ".", "moderate_key", ")", "self", ".", "queue", ".", "clear", "(", ")" ]
Clears all data associated with the throttled queue
[ "Clears", "all", "data", "associated", "with", "the", "throttled", "queue" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/redis_throttled_queue.py#L76-L82
234,508
istresearch/scrapy-cluster
utils/scutils/redis_throttled_queue.py
RedisThrottledQueue.pop
def pop(self, *args): ''' Non-blocking from throttled queue standpoint, tries to return a queue pop request, only will return a request if the given time window has not been exceeded @return: The item if the throttle limit has not been hit, otherwise None ''' if self.allowed(): if self.elastic_kick_in < self.limit: self.elastic_kick_in += 1 return self.queue.pop(*args) else: return None
python
def pop(self, *args): ''' Non-blocking from throttled queue standpoint, tries to return a queue pop request, only will return a request if the given time window has not been exceeded @return: The item if the throttle limit has not been hit, otherwise None ''' if self.allowed(): if self.elastic_kick_in < self.limit: self.elastic_kick_in += 1 return self.queue.pop(*args) else: return None
[ "def", "pop", "(", "self", ",", "*", "args", ")", ":", "if", "self", ".", "allowed", "(", ")", ":", "if", "self", ".", "elastic_kick_in", "<", "self", ".", "limit", ":", "self", ".", "elastic_kick_in", "+=", "1", "return", "self", ".", "queue", "."...
Non-blocking from throttled queue standpoint, tries to return a queue pop request, only will return a request if the given time window has not been exceeded @return: The item if the throttle limit has not been hit, otherwise None
[ "Non", "-", "blocking", "from", "throttled", "queue", "standpoint", "tries", "to", "return", "a", "queue", "pop", "request", "only", "will", "return", "a", "request", "if", "the", "given", "time", "window", "has", "not", "been", "exceeded" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/redis_throttled_queue.py#L90-L104
234,509
istresearch/scrapy-cluster
utils/scutils/redis_throttled_queue.py
RedisThrottledQueue.allowed
def allowed(self): ''' Check to see if the pop request is allowed @return: True means the maximum was not been reached for the current time window, thus allowing what ever operation follows ''' # Expire old keys (hits) expires = time.time() - self.window self.redis_conn.zremrangebyscore(self.window_key, '-inf', expires) # check if we are hitting too fast for moderation if self.moderation: with self.redis_conn.pipeline() as pipe: try: pipe.watch(self.moderate_key) # ---- LOCK # from this point onward if no errors are raised we # successfully incremented the counter curr_time = time.time() if self.is_moderated(curr_time, pipe) and not \ self.check_elastic(): return False # passed the moderation limit, now check time window # If we have less keys than max, update out moderate key if self.test_hits(): # this is a valid transaction, set the new time pipe.multi() pipe.set(name=self.moderate_key, value=str(curr_time), ex=int(self.window * 2)) pipe.execute() return True except WatchError: # watch was changed, another thread just incremented # the value return False # If we currently have more keys than max, # then limit the action else: return self.test_hits() return False
python
def allowed(self): ''' Check to see if the pop request is allowed @return: True means the maximum was not been reached for the current time window, thus allowing what ever operation follows ''' # Expire old keys (hits) expires = time.time() - self.window self.redis_conn.zremrangebyscore(self.window_key, '-inf', expires) # check if we are hitting too fast for moderation if self.moderation: with self.redis_conn.pipeline() as pipe: try: pipe.watch(self.moderate_key) # ---- LOCK # from this point onward if no errors are raised we # successfully incremented the counter curr_time = time.time() if self.is_moderated(curr_time, pipe) and not \ self.check_elastic(): return False # passed the moderation limit, now check time window # If we have less keys than max, update out moderate key if self.test_hits(): # this is a valid transaction, set the new time pipe.multi() pipe.set(name=self.moderate_key, value=str(curr_time), ex=int(self.window * 2)) pipe.execute() return True except WatchError: # watch was changed, another thread just incremented # the value return False # If we currently have more keys than max, # then limit the action else: return self.test_hits() return False
[ "def", "allowed", "(", "self", ")", ":", "# Expire old keys (hits)", "expires", "=", "time", ".", "time", "(", ")", "-", "self", ".", "window", "self", ".", "redis_conn", ".", "zremrangebyscore", "(", "self", ".", "window_key", ",", "'-inf'", ",", "expires...
Check to see if the pop request is allowed @return: True means the maximum was not been reached for the current time window, thus allowing what ever operation follows
[ "Check", "to", "see", "if", "the", "pop", "request", "is", "allowed" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/redis_throttled_queue.py#L112-L157
234,510
istresearch/scrapy-cluster
utils/scutils/redis_throttled_queue.py
RedisThrottledQueue.check_elastic
def check_elastic(self): ''' Checks if we need to break moderation in order to maintain our desired throttle limit @return: True if we need to break moderation ''' if self.elastic and self.elastic_kick_in == self.limit: value = self.redis_conn.zcard(self.window_key) if self.limit - value > self.elastic_buffer: return True return False
python
def check_elastic(self): ''' Checks if we need to break moderation in order to maintain our desired throttle limit @return: True if we need to break moderation ''' if self.elastic and self.elastic_kick_in == self.limit: value = self.redis_conn.zcard(self.window_key) if self.limit - value > self.elastic_buffer: return True return False
[ "def", "check_elastic", "(", "self", ")", ":", "if", "self", ".", "elastic", "and", "self", ".", "elastic_kick_in", "==", "self", ".", "limit", ":", "value", "=", "self", ".", "redis_conn", ".", "zcard", "(", "self", ".", "window_key", ")", "if", "self...
Checks if we need to break moderation in order to maintain our desired throttle limit @return: True if we need to break moderation
[ "Checks", "if", "we", "need", "to", "break", "moderation", "in", "order", "to", "maintain", "our", "desired", "throttle", "limit" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/redis_throttled_queue.py#L159-L170
234,511
istresearch/scrapy-cluster
utils/scutils/redis_throttled_queue.py
RedisThrottledQueue.is_moderated
def is_moderated(self, curr_time, pipe): ''' Tests to see if the moderation limit is not exceeded @return: True if the moderation limit is exceeded ''' # get key, otherwise default the moderate key expired and # we dont care value = pipe.get(self.moderate_key) if value is None: value = 0.0 else: value = float(value) # check moderation difference if (curr_time - value) < self.moderation: return True return False
python
def is_moderated(self, curr_time, pipe): ''' Tests to see if the moderation limit is not exceeded @return: True if the moderation limit is exceeded ''' # get key, otherwise default the moderate key expired and # we dont care value = pipe.get(self.moderate_key) if value is None: value = 0.0 else: value = float(value) # check moderation difference if (curr_time - value) < self.moderation: return True return False
[ "def", "is_moderated", "(", "self", ",", "curr_time", ",", "pipe", ")", ":", "# get key, otherwise default the moderate key expired and", "# we dont care", "value", "=", "pipe", ".", "get", "(", "self", ".", "moderate_key", ")", "if", "value", "is", "None", ":", ...
Tests to see if the moderation limit is not exceeded @return: True if the moderation limit is exceeded
[ "Tests", "to", "see", "if", "the", "moderation", "limit", "is", "not", "exceeded" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/redis_throttled_queue.py#L172-L190
234,512
istresearch/scrapy-cluster
redis-monitor/plugins/info_monitor.py
InfoMonitor._get_bin
def _get_bin(self, key): ''' Returns a binned dictionary based on redis zscore @return: The sorted dict ''' # keys based on score sortedDict = {} # this doesnt return them in order, need to bin first for item in self.redis_conn.zscan_iter(key): my_item = ujson.loads(item[0]) # score is negated in redis my_score = -item[1] if my_score not in sortedDict: sortedDict[my_score] = [] sortedDict[my_score].append(my_item) return sortedDict
python
def _get_bin(self, key): ''' Returns a binned dictionary based on redis zscore @return: The sorted dict ''' # keys based on score sortedDict = {} # this doesnt return them in order, need to bin first for item in self.redis_conn.zscan_iter(key): my_item = ujson.loads(item[0]) # score is negated in redis my_score = -item[1] if my_score not in sortedDict: sortedDict[my_score] = [] sortedDict[my_score].append(my_item) return sortedDict
[ "def", "_get_bin", "(", "self", ",", "key", ")", ":", "# keys based on score", "sortedDict", "=", "{", "}", "# this doesnt return them in order, need to bin first", "for", "item", "in", "self", ".", "redis_conn", ".", "zscan_iter", "(", "key", ")", ":", "my_item",...
Returns a binned dictionary based on redis zscore @return: The sorted dict
[ "Returns", "a", "binned", "dictionary", "based", "on", "redis", "zscore" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/info_monitor.py#L60-L79
234,513
istresearch/scrapy-cluster
redis-monitor/plugins/info_monitor.py
InfoMonitor._build_appid_info
def _build_appid_info(self, master, dict): ''' Builds the appid info object @param master: the master dict @param dict: the dict object received @return: the appid info object ''' master['total_crawlids'] = 0 master['total_pending'] = 0 master['total_domains'] = 0 master['crawlids'] = {} master['appid'] = dict['appid'] master['spiderid'] = dict['spiderid'] # used for finding total count of domains domain_dict = {} # get all domain queues match_string = '{sid}:*:queue'.format(sid=dict['spiderid']) for key in self.redis_conn.scan_iter(match=match_string): domain = key.split(":")[1] sortedDict = self._get_bin(key) # now iterate through binned dict for score in sortedDict: for item in sortedDict[score]: if 'meta' in item: item = item['meta'] if item['appid'] == dict['appid']: crawlid = item['crawlid'] # add new crawlid to master dict if crawlid not in master['crawlids']: master['crawlids'][crawlid] = { 'total': 0, 'domains': {}, 'distinct_domains': 0 } if 'expires' in item and item['expires'] != 0: master['crawlids'][crawlid]['expires'] = item['expires'] master['total_crawlids'] += 1 master['crawlids'][crawlid]['total'] = master['crawlids'][crawlid]['total'] + 1 if domain not in master['crawlids'][crawlid]['domains']: master['crawlids'][crawlid]['domains'][domain] = { 'total': 0, 'high_priority': -9999, 'low_priority': 9999, } master['crawlids'][crawlid]['distinct_domains'] += 1 domain_dict[domain] = True master['crawlids'][crawlid]['domains'][domain]['total'] = master['crawlids'][crawlid]['domains'][domain]['total'] + 1 if item['priority'] > master['crawlids'][crawlid]['domains'][domain]['high_priority']: master['crawlids'][crawlid]['domains'][domain]['high_priority'] = item['priority'] if item['priority'] < master['crawlids'][crawlid]['domains'][domain]['low_priority']: master['crawlids'][crawlid]['domains'][domain]['low_priority'] = item['priority'] master['total_pending'] += 1 master['total_domains'] = len(domain_dict) return master
python
def _build_appid_info(self, master, dict): ''' Builds the appid info object @param master: the master dict @param dict: the dict object received @return: the appid info object ''' master['total_crawlids'] = 0 master['total_pending'] = 0 master['total_domains'] = 0 master['crawlids'] = {} master['appid'] = dict['appid'] master['spiderid'] = dict['spiderid'] # used for finding total count of domains domain_dict = {} # get all domain queues match_string = '{sid}:*:queue'.format(sid=dict['spiderid']) for key in self.redis_conn.scan_iter(match=match_string): domain = key.split(":")[1] sortedDict = self._get_bin(key) # now iterate through binned dict for score in sortedDict: for item in sortedDict[score]: if 'meta' in item: item = item['meta'] if item['appid'] == dict['appid']: crawlid = item['crawlid'] # add new crawlid to master dict if crawlid not in master['crawlids']: master['crawlids'][crawlid] = { 'total': 0, 'domains': {}, 'distinct_domains': 0 } if 'expires' in item and item['expires'] != 0: master['crawlids'][crawlid]['expires'] = item['expires'] master['total_crawlids'] += 1 master['crawlids'][crawlid]['total'] = master['crawlids'][crawlid]['total'] + 1 if domain not in master['crawlids'][crawlid]['domains']: master['crawlids'][crawlid]['domains'][domain] = { 'total': 0, 'high_priority': -9999, 'low_priority': 9999, } master['crawlids'][crawlid]['distinct_domains'] += 1 domain_dict[domain] = True master['crawlids'][crawlid]['domains'][domain]['total'] = master['crawlids'][crawlid]['domains'][domain]['total'] + 1 if item['priority'] > master['crawlids'][crawlid]['domains'][domain]['high_priority']: master['crawlids'][crawlid]['domains'][domain]['high_priority'] = item['priority'] if item['priority'] < master['crawlids'][crawlid]['domains'][domain]['low_priority']: master['crawlids'][crawlid]['domains'][domain]['low_priority'] = item['priority'] master['total_pending'] += 1 master['total_domains'] = len(domain_dict) return master
[ "def", "_build_appid_info", "(", "self", ",", "master", ",", "dict", ")", ":", "master", "[", "'total_crawlids'", "]", "=", "0", "master", "[", "'total_pending'", "]", "=", "0", "master", "[", "'total_domains'", "]", "=", "0", "master", "[", "'crawlids'", ...
Builds the appid info object @param master: the master dict @param dict: the dict object received @return: the appid info object
[ "Builds", "the", "appid", "info", "object" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/info_monitor.py#L81-L150
234,514
istresearch/scrapy-cluster
redis-monitor/plugins/info_monitor.py
InfoMonitor._build_crawlid_info
def _build_crawlid_info(self, master, dict): ''' Builds the crawlid info object @param master: the master dict @param dict: the dict object received @return: the crawlid info object ''' master['total_pending'] = 0 master['total_domains'] = 0 master['appid'] = dict['appid'] master['crawlid'] = dict['crawlid'] master['spiderid'] = dict['spiderid'] master['domains'] = {} timeout_key = 'timeout:{sid}:{aid}:{cid}'.format(sid=dict['spiderid'], aid=dict['appid'], cid=dict['crawlid']) if self.redis_conn.exists(timeout_key): master['expires'] = self.redis_conn.get(timeout_key) # get all domain queues match_string = '{sid}:*:queue'.format(sid=dict['spiderid']) for key in self.redis_conn.scan_iter(match=match_string): domain = key.split(":")[1] sortedDict = self._get_bin(key) # now iterate through binned dict for score in sortedDict: for item in sortedDict[score]: if 'meta' in item: item = item['meta'] if item['appid'] == dict['appid'] and item['crawlid'] == dict['crawlid']: if domain not in master['domains']: master['domains'][domain] = {} master['domains'][domain]['total'] = 0 master['domains'][domain]['high_priority'] = -9999 master['domains'][domain]['low_priority'] = 9999 master['total_domains'] = master['total_domains'] + 1 master['domains'][domain]['total'] = master['domains'][domain]['total'] + 1 if item['priority'] > master['domains'][domain]['high_priority']: master['domains'][domain]['high_priority'] = item['priority'] if item['priority'] < master['domains'][domain]['low_priority']: master['domains'][domain]['low_priority'] = item['priority'] master['total_pending'] = master['total_pending'] + 1 return master
python
def _build_crawlid_info(self, master, dict): ''' Builds the crawlid info object @param master: the master dict @param dict: the dict object received @return: the crawlid info object ''' master['total_pending'] = 0 master['total_domains'] = 0 master['appid'] = dict['appid'] master['crawlid'] = dict['crawlid'] master['spiderid'] = dict['spiderid'] master['domains'] = {} timeout_key = 'timeout:{sid}:{aid}:{cid}'.format(sid=dict['spiderid'], aid=dict['appid'], cid=dict['crawlid']) if self.redis_conn.exists(timeout_key): master['expires'] = self.redis_conn.get(timeout_key) # get all domain queues match_string = '{sid}:*:queue'.format(sid=dict['spiderid']) for key in self.redis_conn.scan_iter(match=match_string): domain = key.split(":")[1] sortedDict = self._get_bin(key) # now iterate through binned dict for score in sortedDict: for item in sortedDict[score]: if 'meta' in item: item = item['meta'] if item['appid'] == dict['appid'] and item['crawlid'] == dict['crawlid']: if domain not in master['domains']: master['domains'][domain] = {} master['domains'][domain]['total'] = 0 master['domains'][domain]['high_priority'] = -9999 master['domains'][domain]['low_priority'] = 9999 master['total_domains'] = master['total_domains'] + 1 master['domains'][domain]['total'] = master['domains'][domain]['total'] + 1 if item['priority'] > master['domains'][domain]['high_priority']: master['domains'][domain]['high_priority'] = item['priority'] if item['priority'] < master['domains'][domain]['low_priority']: master['domains'][domain]['low_priority'] = item['priority'] master['total_pending'] = master['total_pending'] + 1 return master
[ "def", "_build_crawlid_info", "(", "self", ",", "master", ",", "dict", ")", ":", "master", "[", "'total_pending'", "]", "=", "0", "master", "[", "'total_domains'", "]", "=", "0", "master", "[", "'appid'", "]", "=", "dict", "[", "'appid'", "]", "master", ...
Builds the crawlid info object @param master: the master dict @param dict: the dict object received @return: the crawlid info object
[ "Builds", "the", "crawlid", "info", "object" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/info_monitor.py#L152-L202
234,515
istresearch/scrapy-cluster
redis-monitor/plugins/stop_monitor.py
StopMonitor._purge_crawl
def _purge_crawl(self, spiderid, appid, crawlid): ''' Wrapper for purging the crawlid from the queues @param spiderid: the spider id @param appid: the app id @param crawlid: the crawl id @return: The number of requests purged ''' # purge three times to try to make sure everything is cleaned total = self._mini_purge(spiderid, appid, crawlid) total = total + self._mini_purge(spiderid, appid, crawlid) total = total + self._mini_purge(spiderid, appid, crawlid) return total
python
def _purge_crawl(self, spiderid, appid, crawlid): ''' Wrapper for purging the crawlid from the queues @param spiderid: the spider id @param appid: the app id @param crawlid: the crawl id @return: The number of requests purged ''' # purge three times to try to make sure everything is cleaned total = self._mini_purge(spiderid, appid, crawlid) total = total + self._mini_purge(spiderid, appid, crawlid) total = total + self._mini_purge(spiderid, appid, crawlid) return total
[ "def", "_purge_crawl", "(", "self", ",", "spiderid", ",", "appid", ",", "crawlid", ")", ":", "# purge three times to try to make sure everything is cleaned", "total", "=", "self", ".", "_mini_purge", "(", "spiderid", ",", "appid", ",", "crawlid", ")", "total", "="...
Wrapper for purging the crawlid from the queues @param spiderid: the spider id @param appid: the app id @param crawlid: the crawl id @return: The number of requests purged
[ "Wrapper", "for", "purging", "the", "crawlid", "from", "the", "queues" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/stop_monitor.py#L73-L87
234,516
istresearch/scrapy-cluster
redis-monitor/plugins/stop_monitor.py
StopMonitor._mini_purge
def _mini_purge(self, spiderid, appid, crawlid): ''' Actually purges the crawlid from the queue @param spiderid: the spider id @param appid: the app id @param crawlid: the crawl id @return: The number of requests purged ''' total_purged = 0 match_string = '{sid}:*:queue'.format(sid=spiderid) # using scan for speed vs keys for key in self.redis_conn.scan_iter(match=match_string): for item in self.redis_conn.zscan_iter(key): item_key = item[0] item = ujson.loads(item_key) if 'meta' in item: item = item['meta'] if item['appid'] == appid and item['crawlid'] == crawlid: self.redis_conn.zrem(key, item_key) total_purged = total_purged + 1 return total_purged
python
def _mini_purge(self, spiderid, appid, crawlid): ''' Actually purges the crawlid from the queue @param spiderid: the spider id @param appid: the app id @param crawlid: the crawl id @return: The number of requests purged ''' total_purged = 0 match_string = '{sid}:*:queue'.format(sid=spiderid) # using scan for speed vs keys for key in self.redis_conn.scan_iter(match=match_string): for item in self.redis_conn.zscan_iter(key): item_key = item[0] item = ujson.loads(item_key) if 'meta' in item: item = item['meta'] if item['appid'] == appid and item['crawlid'] == crawlid: self.redis_conn.zrem(key, item_key) total_purged = total_purged + 1 return total_purged
[ "def", "_mini_purge", "(", "self", ",", "spiderid", ",", "appid", ",", "crawlid", ")", ":", "total_purged", "=", "0", "match_string", "=", "'{sid}:*:queue'", ".", "format", "(", "sid", "=", "spiderid", ")", "# using scan for speed vs keys", "for", "key", "in",...
Actually purges the crawlid from the queue @param spiderid: the spider id @param appid: the app id @param crawlid: the crawl id @return: The number of requests purged
[ "Actually", "purges", "the", "crawlid", "from", "the", "queue" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/stop_monitor.py#L89-L113
234,517
istresearch/scrapy-cluster
rest/rest_service.py
log_call
def log_call(call_name): """Log the API call to the logger.""" def decorator(f): @wraps(f) def wrapper(*args, **kw): instance = args[0] instance.logger.info(call_name, {"content": request.get_json()}) return f(*args, **kw) return wrapper return decorator
python
def log_call(call_name): def decorator(f): @wraps(f) def wrapper(*args, **kw): instance = args[0] instance.logger.info(call_name, {"content": request.get_json()}) return f(*args, **kw) return wrapper return decorator
[ "def", "log_call", "(", "call_name", ")", ":", "def", "decorator", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "instance", "=", "args", "[", "0", "]", "instance", ".", "log...
Log the API call to the logger.
[ "Log", "the", "API", "call", "to", "the", "logger", "." ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L38-L47
234,518
istresearch/scrapy-cluster
rest/rest_service.py
error_catch
def error_catch(f): """Handle unexpected errors within the rest function.""" @wraps(f) def wrapper(*args, **kw): instance = args[0] try: result = f(*args, **kw) if isinstance(result, tuple): return jsonify(result[0]), result[1] else: return jsonify(result), 200 except Exception as e: ret_dict = instance._create_ret_object(instance.FAILURE, None, True, instance.UNKNOWN_ERROR) log_dict = deepcopy(ret_dict) log_dict['error']['cause'] = e.message log_dict['error']['exception'] = str(e) log_dict['error']['ex'] = traceback.format_exc() instance.logger.error("Uncaught Exception Thrown", log_dict) return jsonify(ret_dict), 500 return wrapper
python
def error_catch(f): @wraps(f) def wrapper(*args, **kw): instance = args[0] try: result = f(*args, **kw) if isinstance(result, tuple): return jsonify(result[0]), result[1] else: return jsonify(result), 200 except Exception as e: ret_dict = instance._create_ret_object(instance.FAILURE, None, True, instance.UNKNOWN_ERROR) log_dict = deepcopy(ret_dict) log_dict['error']['cause'] = e.message log_dict['error']['exception'] = str(e) log_dict['error']['ex'] = traceback.format_exc() instance.logger.error("Uncaught Exception Thrown", log_dict) return jsonify(ret_dict), 500 return wrapper
[ "def", "error_catch", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "instance", "=", "args", "[", "0", "]", "try", ":", "result", "=", "f", "(", "*", "args", ",", "*", "*...
Handle unexpected errors within the rest function.
[ "Handle", "unexpected", "errors", "within", "the", "rest", "function", "." ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L50-L71
234,519
istresearch/scrapy-cluster
rest/rest_service.py
validate_json
def validate_json(f): """Validate that the call is JSON.""" @wraps(f) def wrapper(*args, **kw): instance = args[0] try: if request.get_json() is None: ret_dict = instance._create_ret_object(instance.FAILURE, None, True, instance.MUST_JSON) instance.logger.error(instance.MUST_JSON) return jsonify(ret_dict), 400 except BadRequest: ret_dict = instance._create_ret_object(instance.FAILURE, None, True, instance.MUST_JSON) instance.logger.error(instance.MUST_JSON) return jsonify(ret_dict), 400 instance.logger.debug("JSON is valid") return f(*args, **kw) return wrapper
python
def validate_json(f): @wraps(f) def wrapper(*args, **kw): instance = args[0] try: if request.get_json() is None: ret_dict = instance._create_ret_object(instance.FAILURE, None, True, instance.MUST_JSON) instance.logger.error(instance.MUST_JSON) return jsonify(ret_dict), 400 except BadRequest: ret_dict = instance._create_ret_object(instance.FAILURE, None, True, instance.MUST_JSON) instance.logger.error(instance.MUST_JSON) return jsonify(ret_dict), 400 instance.logger.debug("JSON is valid") return f(*args, **kw) return wrapper
[ "def", "validate_json", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "instance", "=", "args", "[", "0", "]", "try", ":", "if", "request", ".", "get_json", "(", ")", "is", ...
Validate that the call is JSON.
[ "Validate", "that", "the", "call", "is", "JSON", "." ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L74-L94
234,520
istresearch/scrapy-cluster
rest/rest_service.py
validate_schema
def validate_schema(schema_name): """Validate the JSON against a required schema_name.""" def decorator(f): @wraps(f) def wrapper(*args, **kw): instance = args[0] try: instance.validator(instance.schemas[schema_name]).validate(request.get_json()) except ValidationError, e: ret_dict = instance._create_ret_object(instance.FAILURE, None, True, instance.BAD_SCHEMA, e.message) instance.logger.error("Invalid Schema", ret_dict) return jsonify(ret_dict), 400 instance.logger.debug("Schema is valid") return f(*args, **kw) return wrapper return decorator
python
def validate_schema(schema_name): def decorator(f): @wraps(f) def wrapper(*args, **kw): instance = args[0] try: instance.validator(instance.schemas[schema_name]).validate(request.get_json()) except ValidationError, e: ret_dict = instance._create_ret_object(instance.FAILURE, None, True, instance.BAD_SCHEMA, e.message) instance.logger.error("Invalid Schema", ret_dict) return jsonify(ret_dict), 400 instance.logger.debug("Schema is valid") return f(*args, **kw) return wrapper return decorator
[ "def", "validate_schema", "(", "schema_name", ")", ":", "def", "decorator", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "instance", "=", "args", "[", "0", "]", "try", ":", ...
Validate the JSON against a required schema_name.
[ "Validate", "the", "JSON", "against", "a", "required", "schema_name", "." ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L97-L115
234,521
istresearch/scrapy-cluster
rest/rest_service.py
RestService._load_schemas
def _load_schemas(self): """Loads any schemas for JSON validation""" for filename in os.listdir(self.settings['SCHEMA_DIR']): if filename[-4:] == 'json': name = filename[:-5] with open(self.settings['SCHEMA_DIR'] + filename) as the_file: self.schemas[name] = json.load(the_file) self.logger.debug("Successfully loaded " + filename + " schema")
python
def _load_schemas(self): for filename in os.listdir(self.settings['SCHEMA_DIR']): if filename[-4:] == 'json': name = filename[:-5] with open(self.settings['SCHEMA_DIR'] + filename) as the_file: self.schemas[name] = json.load(the_file) self.logger.debug("Successfully loaded " + filename + " schema")
[ "def", "_load_schemas", "(", "self", ")", ":", "for", "filename", "in", "os", ".", "listdir", "(", "self", ".", "settings", "[", "'SCHEMA_DIR'", "]", ")", ":", "if", "filename", "[", "-", "4", ":", "]", "==", "'json'", ":", "name", "=", "filename", ...
Loads any schemas for JSON validation
[ "Loads", "any", "schemas", "for", "JSON", "validation" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L198-L205
234,522
istresearch/scrapy-cluster
rest/rest_service.py
RestService._spawn_redis_connection_thread
def _spawn_redis_connection_thread(self): """Spawns a redis connection thread""" self.logger.debug("Spawn redis connection thread") self.redis_connected = False self._redis_thread = Thread(target=self._setup_redis) self._redis_thread.setDaemon(True) self._redis_thread.start()
python
def _spawn_redis_connection_thread(self): self.logger.debug("Spawn redis connection thread") self.redis_connected = False self._redis_thread = Thread(target=self._setup_redis) self._redis_thread.setDaemon(True) self._redis_thread.start()
[ "def", "_spawn_redis_connection_thread", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Spawn redis connection thread\"", ")", "self", ".", "redis_connected", "=", "False", "self", ".", "_redis_thread", "=", "Thread", "(", "target", "=", "se...
Spawns a redis connection thread
[ "Spawns", "a", "redis", "connection", "thread" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L228-L234
234,523
istresearch/scrapy-cluster
rest/rest_service.py
RestService._spawn_kafka_connection_thread
def _spawn_kafka_connection_thread(self): """Spawns a kafka connection thread""" self.logger.debug("Spawn kafka connection thread") self.kafka_connected = False self._kafka_thread = Thread(target=self._setup_kafka) self._kafka_thread.setDaemon(True) self._kafka_thread.start()
python
def _spawn_kafka_connection_thread(self): self.logger.debug("Spawn kafka connection thread") self.kafka_connected = False self._kafka_thread = Thread(target=self._setup_kafka) self._kafka_thread.setDaemon(True) self._kafka_thread.start()
[ "def", "_spawn_kafka_connection_thread", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Spawn kafka connection thread\"", ")", "self", ".", "kafka_connected", "=", "False", "self", ".", "_kafka_thread", "=", "Thread", "(", "target", "=", "se...
Spawns a kafka connection thread
[ "Spawns", "a", "kafka", "connection", "thread" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L236-L242
234,524
istresearch/scrapy-cluster
rest/rest_service.py
RestService._consumer_loop
def _consumer_loop(self): """The main consumer loop""" self.logger.debug("running main consumer thread") while not self.closed: if self.kafka_connected: self._process_messages() time.sleep(self.settings['KAFKA_CONSUMER_SLEEP_TIME'])
python
def _consumer_loop(self): self.logger.debug("running main consumer thread") while not self.closed: if self.kafka_connected: self._process_messages() time.sleep(self.settings['KAFKA_CONSUMER_SLEEP_TIME'])
[ "def", "_consumer_loop", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"running main consumer thread\"", ")", "while", "not", "self", ".", "closed", ":", "if", "self", ".", "kafka_connected", ":", "self", ".", "_process_messages", "(", "...
The main consumer loop
[ "The", "main", "consumer", "loop" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L251-L257
234,525
istresearch/scrapy-cluster
rest/rest_service.py
RestService._process_messages
def _process_messages(self): """Processes messages received from kafka""" try: for message in self.consumer: try: if message is None: self.logger.debug("no message") break loaded_dict = json.loads(message.value) self.logger.debug("got valid kafka message") with self.uuids_lock: if 'uuid' in loaded_dict: if loaded_dict['uuid'] in self.uuids and \ self.uuids[loaded_dict['uuid']] != 'poll': self.logger.debug("Found Kafka message from request") self.uuids[loaded_dict['uuid']] = loaded_dict else: self.logger.debug("Got poll result") self._send_result_to_redis(loaded_dict) else: self.logger.debug("Got message not intended for this process") except ValueError: extras = {} if message is not None: extras["data"] = message.value self.logger.warning('Unparseable JSON Received from kafka', extra=extras) self._check_kafka_disconnect() except OffsetOutOfRangeError: # consumer has no idea where they are self.consumer.seek_to_end() self.logger.error("Kafka offset out of range error")
python
def _process_messages(self): try: for message in self.consumer: try: if message is None: self.logger.debug("no message") break loaded_dict = json.loads(message.value) self.logger.debug("got valid kafka message") with self.uuids_lock: if 'uuid' in loaded_dict: if loaded_dict['uuid'] in self.uuids and \ self.uuids[loaded_dict['uuid']] != 'poll': self.logger.debug("Found Kafka message from request") self.uuids[loaded_dict['uuid']] = loaded_dict else: self.logger.debug("Got poll result") self._send_result_to_redis(loaded_dict) else: self.logger.debug("Got message not intended for this process") except ValueError: extras = {} if message is not None: extras["data"] = message.value self.logger.warning('Unparseable JSON Received from kafka', extra=extras) self._check_kafka_disconnect() except OffsetOutOfRangeError: # consumer has no idea where they are self.consumer.seek_to_end() self.logger.error("Kafka offset out of range error")
[ "def", "_process_messages", "(", "self", ")", ":", "try", ":", "for", "message", "in", "self", ".", "consumer", ":", "try", ":", "if", "message", "is", "None", ":", "self", ".", "logger", ".", "debug", "(", "\"no message\"", ")", "break", "loaded_dict", ...
Processes messages received from kafka
[ "Processes", "messages", "received", "from", "kafka" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L259-L293
234,526
istresearch/scrapy-cluster
rest/rest_service.py
RestService._send_result_to_redis
def _send_result_to_redis(self, result): """Sends the result of a poll to redis to be used potentially by another process @param result: the result retrieved from kafka""" if self.redis_connected: self.logger.debug("Sending result to redis") try: key = "rest:poll:{u}".format(u=result['uuid']) self.redis_conn.set(key, json.dumps(result)) except ConnectionError: self.logger.error("Lost connection to Redis") self._spawn_redis_connection_thread() else: self.logger.warning("Unable to send result to redis, not connected")
python
def _send_result_to_redis(self, result): if self.redis_connected: self.logger.debug("Sending result to redis") try: key = "rest:poll:{u}".format(u=result['uuid']) self.redis_conn.set(key, json.dumps(result)) except ConnectionError: self.logger.error("Lost connection to Redis") self._spawn_redis_connection_thread() else: self.logger.warning("Unable to send result to redis, not connected")
[ "def", "_send_result_to_redis", "(", "self", ",", "result", ")", ":", "if", "self", ".", "redis_connected", ":", "self", ".", "logger", ".", "debug", "(", "\"Sending result to redis\"", ")", "try", ":", "key", "=", "\"rest:poll:{u}\"", ".", "format", "(", "u...
Sends the result of a poll to redis to be used potentially by another process @param result: the result retrieved from kafka
[ "Sends", "the", "result", "of", "a", "poll", "to", "redis", "to", "be", "used", "potentially", "by", "another", "process" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L295-L309
234,527
istresearch/scrapy-cluster
rest/rest_service.py
RestService._check_kafka_disconnect
def _check_kafka_disconnect(self): """Checks the kafka connection is still valid""" for node_id in self.consumer._client._conns: conn = self.consumer._client._conns[node_id] if conn.state == ConnectionStates.DISCONNECTED or \ conn.state == ConnectionStates.DISCONNECTING: self._spawn_kafka_connection_thread() break
python
def _check_kafka_disconnect(self): for node_id in self.consumer._client._conns: conn = self.consumer._client._conns[node_id] if conn.state == ConnectionStates.DISCONNECTED or \ conn.state == ConnectionStates.DISCONNECTING: self._spawn_kafka_connection_thread() break
[ "def", "_check_kafka_disconnect", "(", "self", ")", ":", "for", "node_id", "in", "self", ".", "consumer", ".", "_client", ".", "_conns", ":", "conn", "=", "self", ".", "consumer", ".", "_client", ".", "_conns", "[", "node_id", "]", "if", "conn", ".", "...
Checks the kafka connection is still valid
[ "Checks", "the", "kafka", "connection", "is", "still", "valid" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L311-L318
234,528
istresearch/scrapy-cluster
rest/rest_service.py
RestService._heartbeat_loop
def _heartbeat_loop(self): """A main run loop thread to do work""" self.logger.debug("running main heartbeat thread") while not self.closed: time.sleep(self.settings['SLEEP_TIME']) self._report_self()
python
def _heartbeat_loop(self): self.logger.debug("running main heartbeat thread") while not self.closed: time.sleep(self.settings['SLEEP_TIME']) self._report_self()
[ "def", "_heartbeat_loop", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"running main heartbeat thread\"", ")", "while", "not", "self", ".", "closed", ":", "time", ".", "sleep", "(", "self", ".", "settings", "[", "'SLEEP_TIME'", "]", "...
A main run loop thread to do work
[ "A", "main", "run", "loop", "thread", "to", "do", "work" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L320-L325
234,529
istresearch/scrapy-cluster
rest/rest_service.py
RestService._setup_redis
def _setup_redis(self): """Returns a Redis Client""" if not self.closed: try: self.logger.debug("Creating redis connection to host " + str(self.settings['REDIS_HOST'])) self.redis_conn = redis.StrictRedis(host=self.settings['REDIS_HOST'], port=self.settings['REDIS_PORT'], db=self.settings['REDIS_DB']) self.redis_conn.info() self.redis_connected = True self.logger.info("Successfully connected to redis") except KeyError as e: self.logger.error('Missing setting named ' + str(e), {'ex': traceback.format_exc()}) except: self.logger.error("Couldn't initialize redis client.", {'ex': traceback.format_exc()}) raise
python
def _setup_redis(self): if not self.closed: try: self.logger.debug("Creating redis connection to host " + str(self.settings['REDIS_HOST'])) self.redis_conn = redis.StrictRedis(host=self.settings['REDIS_HOST'], port=self.settings['REDIS_PORT'], db=self.settings['REDIS_DB']) self.redis_conn.info() self.redis_connected = True self.logger.info("Successfully connected to redis") except KeyError as e: self.logger.error('Missing setting named ' + str(e), {'ex': traceback.format_exc()}) except: self.logger.error("Couldn't initialize redis client.", {'ex': traceback.format_exc()}) raise
[ "def", "_setup_redis", "(", "self", ")", ":", "if", "not", "self", ".", "closed", ":", "try", ":", "self", ".", "logger", ".", "debug", "(", "\"Creating redis connection to host \"", "+", "str", "(", "self", ".", "settings", "[", "'REDIS_HOST'", "]", ")", ...
Returns a Redis Client
[ "Returns", "a", "Redis", "Client" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L347-L365
234,530
istresearch/scrapy-cluster
rest/rest_service.py
RestService._setup_kafka
def _setup_kafka(self): """ Sets up kafka connections """ # close older connections if self.consumer is not None: self.logger.debug("Closing existing kafka consumer") self.consumer.close() self.consumer = None if self.producer is not None: self.logger.debug("Closing existing kafka producer") self.producer.flush() self.producer.close(timeout=10) self.producer = None # create new connections self._consumer_thread = None self.logger.debug("Creating kafka connections") self.consumer = self._create_consumer() if not self.closed: self.logger.debug("Kafka Conumer created") self.producer = self._create_producer() if not self.closed: self.logger.debug("Kafka Producer created") if not self.closed: self.kafka_connected = True self.logger.info("Connected successfully to Kafka") self._spawn_kafka_consumer_thread()
python
def _setup_kafka(self): # close older connections if self.consumer is not None: self.logger.debug("Closing existing kafka consumer") self.consumer.close() self.consumer = None if self.producer is not None: self.logger.debug("Closing existing kafka producer") self.producer.flush() self.producer.close(timeout=10) self.producer = None # create new connections self._consumer_thread = None self.logger.debug("Creating kafka connections") self.consumer = self._create_consumer() if not self.closed: self.logger.debug("Kafka Conumer created") self.producer = self._create_producer() if not self.closed: self.logger.debug("Kafka Producer created") if not self.closed: self.kafka_connected = True self.logger.info("Connected successfully to Kafka") self._spawn_kafka_consumer_thread()
[ "def", "_setup_kafka", "(", "self", ")", ":", "# close older connections", "if", "self", ".", "consumer", "is", "not", "None", ":", "self", ".", "logger", ".", "debug", "(", "\"Closing existing kafka consumer\"", ")", "self", ".", "consumer", ".", "close", "("...
Sets up kafka connections
[ "Sets", "up", "kafka", "connections" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L367-L395
234,531
istresearch/scrapy-cluster
rest/rest_service.py
RestService._create_consumer
def _create_consumer(self): """Tries to establing the Kafka consumer connection""" if not self.closed: try: self.logger.debug("Creating new kafka consumer using brokers: " + str(self.settings['KAFKA_HOSTS']) + ' and topic ' + self.settings['KAFKA_TOPIC_PREFIX'] + ".outbound_firehose") return KafkaConsumer( self.settings['KAFKA_TOPIC_PREFIX'] + ".outbound_firehose", group_id=None, bootstrap_servers=self.settings['KAFKA_HOSTS'], consumer_timeout_ms=self.settings['KAFKA_CONSUMER_TIMEOUT'], auto_offset_reset=self.settings['KAFKA_CONSUMER_AUTO_OFFSET_RESET'], auto_commit_interval_ms=self.settings['KAFKA_CONSUMER_COMMIT_INTERVAL_MS'], enable_auto_commit=self.settings['KAFKA_CONSUMER_AUTO_COMMIT_ENABLE'], max_partition_fetch_bytes=self.settings['KAFKA_CONSUMER_FETCH_MESSAGE_MAX_BYTES']) except KeyError as e: self.logger.error('Missing setting named ' + str(e), {'ex': traceback.format_exc()}) except: self.logger.error("Couldn't initialize kafka consumer for topic", {'ex': traceback.format_exc()}) raise
python
def _create_consumer(self): if not self.closed: try: self.logger.debug("Creating new kafka consumer using brokers: " + str(self.settings['KAFKA_HOSTS']) + ' and topic ' + self.settings['KAFKA_TOPIC_PREFIX'] + ".outbound_firehose") return KafkaConsumer( self.settings['KAFKA_TOPIC_PREFIX'] + ".outbound_firehose", group_id=None, bootstrap_servers=self.settings['KAFKA_HOSTS'], consumer_timeout_ms=self.settings['KAFKA_CONSUMER_TIMEOUT'], auto_offset_reset=self.settings['KAFKA_CONSUMER_AUTO_OFFSET_RESET'], auto_commit_interval_ms=self.settings['KAFKA_CONSUMER_COMMIT_INTERVAL_MS'], enable_auto_commit=self.settings['KAFKA_CONSUMER_AUTO_COMMIT_ENABLE'], max_partition_fetch_bytes=self.settings['KAFKA_CONSUMER_FETCH_MESSAGE_MAX_BYTES']) except KeyError as e: self.logger.error('Missing setting named ' + str(e), {'ex': traceback.format_exc()}) except: self.logger.error("Couldn't initialize kafka consumer for topic", {'ex': traceback.format_exc()}) raise
[ "def", "_create_consumer", "(", "self", ")", ":", "if", "not", "self", ".", "closed", ":", "try", ":", "self", ".", "logger", ".", "debug", "(", "\"Creating new kafka consumer using brokers: \"", "+", "str", "(", "self", ".", "settings", "[", "'KAFKA_HOSTS'", ...
Tries to establing the Kafka consumer connection
[ "Tries", "to", "establing", "the", "Kafka", "consumer", "connection" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L398-L422
234,532
istresearch/scrapy-cluster
rest/rest_service.py
RestService.run
def run(self): """Main flask run loop""" self.logger.info("Running main flask method on port " + str(self.settings['FLASK_PORT'])) self.app.run(host='0.0.0.0', port=self.settings['FLASK_PORT'])
python
def run(self): self.logger.info("Running main flask method on port " + str(self.settings['FLASK_PORT'])) self.app.run(host='0.0.0.0', port=self.settings['FLASK_PORT'])
[ "def", "run", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Running main flask method on port \"", "+", "str", "(", "self", ".", "settings", "[", "'FLASK_PORT'", "]", ")", ")", "self", ".", "app", ".", "run", "(", "host", "=", "'0....
Main flask run loop
[ "Main", "flask", "run", "loop" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L445-L449
234,533
istresearch/scrapy-cluster
rest/rest_service.py
RestService._create_ret_object
def _create_ret_object(self, status=SUCCESS, data=None, error=False, error_message=None, error_cause=None): """ Create generic reponse objects. :param str status: The SUCCESS or FAILURE of the request :param obj data: The data to return :param bool error: Set to True to add Error response :param str error_message: The generic error message :param str error_cause: The cause of the error :returns: A dictionary of values """ ret = {} if status == self.FAILURE: ret['status'] = self.FAILURE else: ret['status'] = self.SUCCESS ret['data'] = data if error: ret['error'] = {} if error_message is not None: ret['error']['message'] = error_message if error_cause is not None: ret['error']['cause'] = error_cause else: ret['error'] = None return ret
python
def _create_ret_object(self, status=SUCCESS, data=None, error=False, error_message=None, error_cause=None): ret = {} if status == self.FAILURE: ret['status'] = self.FAILURE else: ret['status'] = self.SUCCESS ret['data'] = data if error: ret['error'] = {} if error_message is not None: ret['error']['message'] = error_message if error_cause is not None: ret['error']['cause'] = error_cause else: ret['error'] = None return ret
[ "def", "_create_ret_object", "(", "self", ",", "status", "=", "SUCCESS", ",", "data", "=", "None", ",", "error", "=", "False", ",", "error_message", "=", "None", ",", "error_cause", "=", "None", ")", ":", "ret", "=", "{", "}", "if", "status", "==", "...
Create generic reponse objects. :param str status: The SUCCESS or FAILURE of the request :param obj data: The data to return :param bool error: Set to True to add Error response :param str error_message: The generic error message :param str error_cause: The cause of the error :returns: A dictionary of values
[ "Create", "generic", "reponse", "objects", "." ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L451-L478
234,534
istresearch/scrapy-cluster
rest/rest_service.py
RestService._close_thread
def _close_thread(self, thread, thread_name): """Closes daemon threads @param thread: the thread to close @param thread_name: a human readable name of the thread """ if thread is not None and thread.isAlive(): self.logger.debug("Waiting for {} thread to close".format(thread_name)) thread.join(timeout=self.settings['DAEMON_THREAD_JOIN_TIMEOUT']) if thread.isAlive(): self.logger.warn("{} daemon thread unable to be shutdown" " within timeout".format(thread_name))
python
def _close_thread(self, thread, thread_name): if thread is not None and thread.isAlive(): self.logger.debug("Waiting for {} thread to close".format(thread_name)) thread.join(timeout=self.settings['DAEMON_THREAD_JOIN_TIMEOUT']) if thread.isAlive(): self.logger.warn("{} daemon thread unable to be shutdown" " within timeout".format(thread_name))
[ "def", "_close_thread", "(", "self", ",", "thread", ",", "thread_name", ")", ":", "if", "thread", "is", "not", "None", "and", "thread", ".", "isAlive", "(", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Waiting for {} thread to close\"", ".", "for...
Closes daemon threads @param thread: the thread to close @param thread_name: a human readable name of the thread
[ "Closes", "daemon", "threads" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L480-L491
234,535
istresearch/scrapy-cluster
rest/rest_service.py
RestService.close
def close(self): """ Cleans up anything from the process """ self.logger.info("Closing Rest Service") self.closed = True # close threads self._close_thread(self._redis_thread, "Redis setup") self._close_thread(self._heartbeat_thread, "Heartbeat") self._close_thread(self._kafka_thread, "Kafka setup") self._close_thread(self._consumer_thread, "Consumer") # close kafka if self.consumer is not None: self.logger.debug("Closing kafka consumer") self.consumer.close() if self.producer is not None: self.logger.debug("Closing kafka producer") self.producer.close(timeout=10)
python
def close(self): self.logger.info("Closing Rest Service") self.closed = True # close threads self._close_thread(self._redis_thread, "Redis setup") self._close_thread(self._heartbeat_thread, "Heartbeat") self._close_thread(self._kafka_thread, "Kafka setup") self._close_thread(self._consumer_thread, "Consumer") # close kafka if self.consumer is not None: self.logger.debug("Closing kafka consumer") self.consumer.close() if self.producer is not None: self.logger.debug("Closing kafka producer") self.producer.close(timeout=10)
[ "def", "close", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Closing Rest Service\"", ")", "self", ".", "closed", "=", "True", "# close threads", "self", ".", "_close_thread", "(", "self", ".", "_redis_thread", ",", "\"Redis setup\"", "...
Cleans up anything from the process
[ "Cleans", "up", "anything", "from", "the", "process" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L493-L512
234,536
istresearch/scrapy-cluster
rest/rest_service.py
RestService._calculate_health
def _calculate_health(self): """Returns a string representation of the node health @returns: GREEN if fully connected, YELLOW if partially connected, RED if not connected """ if self.redis_connected and self.kafka_connected: return "GREEN" elif self.redis_connected or self.kafka_connected: return "YELLOW" else: return "RED"
python
def _calculate_health(self): if self.redis_connected and self.kafka_connected: return "GREEN" elif self.redis_connected or self.kafka_connected: return "YELLOW" else: return "RED"
[ "def", "_calculate_health", "(", "self", ")", ":", "if", "self", ".", "redis_connected", "and", "self", ".", "kafka_connected", ":", "return", "\"GREEN\"", "elif", "self", ".", "redis_connected", "or", "self", ".", "kafka_connected", ":", "return", "\"YELLOW\"",...
Returns a string representation of the node health @returns: GREEN if fully connected, YELLOW if partially connected, RED if not connected
[ "Returns", "a", "string", "representation", "of", "the", "node", "health" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L514-L525
234,537
istresearch/scrapy-cluster
rest/rest_service.py
RestService._feed_to_kafka
def _feed_to_kafka(self, json_item): """Sends a request to Kafka :param json_item: The json item to send :returns: A boolean indicating whther the data was sent successfully or not """ @MethodTimer.timeout(self.settings['KAFKA_FEED_TIMEOUT'], False) def _feed(json_item): try: self.logger.debug("Sending json to kafka at " + str(self.settings['KAFKA_PRODUCER_TOPIC'])) future = self.producer.send(self.settings['KAFKA_PRODUCER_TOPIC'], json_item) future.add_callback(self._kafka_success) future.add_errback(self._kafka_failure) self.producer.flush() return True except Exception as e: self.logger.error("Lost connection to Kafka") self._spawn_kafka_connection_thread() return False return _feed(json_item)
python
def _feed_to_kafka(self, json_item): @MethodTimer.timeout(self.settings['KAFKA_FEED_TIMEOUT'], False) def _feed(json_item): try: self.logger.debug("Sending json to kafka at " + str(self.settings['KAFKA_PRODUCER_TOPIC'])) future = self.producer.send(self.settings['KAFKA_PRODUCER_TOPIC'], json_item) future.add_callback(self._kafka_success) future.add_errback(self._kafka_failure) self.producer.flush() return True except Exception as e: self.logger.error("Lost connection to Kafka") self._spawn_kafka_connection_thread() return False return _feed(json_item)
[ "def", "_feed_to_kafka", "(", "self", ",", "json_item", ")", ":", "@", "MethodTimer", ".", "timeout", "(", "self", ".", "settings", "[", "'KAFKA_FEED_TIMEOUT'", "]", ",", "False", ")", "def", "_feed", "(", "json_item", ")", ":", "try", ":", "self", ".", ...
Sends a request to Kafka :param json_item: The json item to send :returns: A boolean indicating whther the data was sent successfully or not
[ "Sends", "a", "request", "to", "Kafka" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L540-L565
234,538
istresearch/scrapy-cluster
rest/rest_service.py
RestService._decorate_routes
def _decorate_routes(self): """ Decorates the routes to use within the flask app """ self.logger.debug("Decorating routes") # self.app.add_url_rule('/', 'catch', self.catch, methods=['GET'], # defaults={'path': ''}) self.app.add_url_rule('/<path:path>', 'catch', self.catch, methods=['GET', 'POST'], defaults={'path': ''}) self.app.add_url_rule('/', 'index', self.index, methods=['POST', 'GET']) self.app.add_url_rule('/feed', 'feed', self.feed, methods=['POST']) self.app.add_url_rule('/poll', 'poll', self.poll, methods=['POST'])
python
def _decorate_routes(self): self.logger.debug("Decorating routes") # self.app.add_url_rule('/', 'catch', self.catch, methods=['GET'], # defaults={'path': ''}) self.app.add_url_rule('/<path:path>', 'catch', self.catch, methods=['GET', 'POST'], defaults={'path': ''}) self.app.add_url_rule('/', 'index', self.index, methods=['POST', 'GET']) self.app.add_url_rule('/feed', 'feed', self.feed, methods=['POST']) self.app.add_url_rule('/poll', 'poll', self.poll, methods=['POST'])
[ "def", "_decorate_routes", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Decorating routes\"", ")", "# self.app.add_url_rule('/', 'catch', self.catch, methods=['GET'],", "# defaults={'path': ''})", "self", ".", "app", ".", "add_url...
Decorates the routes to use within the flask app
[ "Decorates", "the", "routes", "to", "use", "within", "the", "flask", "app" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L569-L583
234,539
istresearch/scrapy-cluster
rest/rest_service.py
RestService.poll
def poll(self): """Retrieves older requests that may not make it back quick enough""" if self.redis_connected: json_item = request.get_json() result = None try: key = "rest:poll:{u}".format(u=json_item['poll_id']) result = self.redis_conn.get(key) if result is not None: result = json.loads(result) self.logger.debug("Found previous poll") self.redis_conn.delete(key) return self._create_ret_object(self.SUCCESS, result) else: self.logger.debug("poll key does not exist") return self._create_ret_object(self.FAILURE, None, True, "Could not find matching poll_id"), 404 except ConnectionError: self.logger.error("Lost connection to Redis") self._spawn_redis_connection_thread() except ValueError: extras = { "value": result } self.logger.warning('Unparseable JSON Received from redis', extra=extras) self.redis_conn.delete(key) return self._create_ret_object(self.FAILURE, None, True, "Unparseable JSON Received " "from redis"), 500 self.logger.warn("Unable to poll redis, not connected") return self._create_ret_object(self.FAILURE, None, True, "Unable to connect to Redis"), 500
python
def poll(self): if self.redis_connected: json_item = request.get_json() result = None try: key = "rest:poll:{u}".format(u=json_item['poll_id']) result = self.redis_conn.get(key) if result is not None: result = json.loads(result) self.logger.debug("Found previous poll") self.redis_conn.delete(key) return self._create_ret_object(self.SUCCESS, result) else: self.logger.debug("poll key does not exist") return self._create_ret_object(self.FAILURE, None, True, "Could not find matching poll_id"), 404 except ConnectionError: self.logger.error("Lost connection to Redis") self._spawn_redis_connection_thread() except ValueError: extras = { "value": result } self.logger.warning('Unparseable JSON Received from redis', extra=extras) self.redis_conn.delete(key) return self._create_ret_object(self.FAILURE, None, True, "Unparseable JSON Received " "from redis"), 500 self.logger.warn("Unable to poll redis, not connected") return self._create_ret_object(self.FAILURE, None, True, "Unable to connect to Redis"), 500
[ "def", "poll", "(", "self", ")", ":", "if", "self", ".", "redis_connected", ":", "json_item", "=", "request", ".", "get_json", "(", ")", "result", "=", "None", "try", ":", "key", "=", "\"rest:poll:{u}\"", ".", "format", "(", "u", "=", "json_item", "[",...
Retrieves older requests that may not make it back quick enough
[ "Retrieves", "older", "requests", "that", "may", "not", "make", "it", "back", "quick", "enough" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L661-L695
234,540
vimalloc/flask-jwt-extended
flask_jwt_extended/utils.py
create_refresh_token
def create_refresh_token(identity, expires_delta=None, user_claims=None): """ Creates a new refresh token. :param identity: The identity of this token, which can be any data that is json serializable. It can also be a python object, in which case you can use the :meth:`~flask_jwt_extended.JWTManager.user_identity_loader` to define a callback function that will be used to pull a json serializable identity out of the object. :param expires_delta: A `datetime.timedelta` for how long this token should last before it expires. Set to False to disable expiration. If this is None, it will use the 'JWT_REFRESH_TOKEN_EXPIRES` config value (see :ref:`Configuration Options`) :param user_claims: Optionnal JSON serializable to override user claims. :return: An encoded refresh token """ jwt_manager = _get_jwt_manager() return jwt_manager._create_refresh_token(identity, expires_delta, user_claims)
python
def create_refresh_token(identity, expires_delta=None, user_claims=None): jwt_manager = _get_jwt_manager() return jwt_manager._create_refresh_token(identity, expires_delta, user_claims)
[ "def", "create_refresh_token", "(", "identity", ",", "expires_delta", "=", "None", ",", "user_claims", "=", "None", ")", ":", "jwt_manager", "=", "_get_jwt_manager", "(", ")", "return", "jwt_manager", ".", "_create_refresh_token", "(", "identity", ",", "expires_de...
Creates a new refresh token. :param identity: The identity of this token, which can be any data that is json serializable. It can also be a python object, in which case you can use the :meth:`~flask_jwt_extended.JWTManager.user_identity_loader` to define a callback function that will be used to pull a json serializable identity out of the object. :param expires_delta: A `datetime.timedelta` for how long this token should last before it expires. Set to False to disable expiration. If this is None, it will use the 'JWT_REFRESH_TOKEN_EXPIRES` config value (see :ref:`Configuration Options`) :param user_claims: Optionnal JSON serializable to override user claims. :return: An encoded refresh token
[ "Creates", "a", "new", "refresh", "token", "." ]
569d3b89eb5d2586d0cff4581a346229c623cefc
https://github.com/vimalloc/flask-jwt-extended/blob/569d3b89eb5d2586d0cff4581a346229c623cefc/flask_jwt_extended/utils.py#L160-L179
234,541
vimalloc/flask-jwt-extended
examples/database_blacklist/blacklist_helpers.py
is_token_revoked
def is_token_revoked(decoded_token): """ Checks if the given token is revoked or not. Because we are adding all the tokens that we create into this database, if the token is not present in the database we are going to consider it revoked, as we don't know where it was created. """ jti = decoded_token['jti'] try: token = TokenBlacklist.query.filter_by(jti=jti).one() return token.revoked except NoResultFound: return True
python
def is_token_revoked(decoded_token): jti = decoded_token['jti'] try: token = TokenBlacklist.query.filter_by(jti=jti).one() return token.revoked except NoResultFound: return True
[ "def", "is_token_revoked", "(", "decoded_token", ")", ":", "jti", "=", "decoded_token", "[", "'jti'", "]", "try", ":", "token", "=", "TokenBlacklist", ".", "query", ".", "filter_by", "(", "jti", "=", "jti", ")", ".", "one", "(", ")", "return", "token", ...
Checks if the given token is revoked or not. Because we are adding all the tokens that we create into this database, if the token is not present in the database we are going to consider it revoked, as we don't know where it was created.
[ "Checks", "if", "the", "given", "token", "is", "revoked", "or", "not", ".", "Because", "we", "are", "adding", "all", "the", "tokens", "that", "we", "create", "into", "this", "database", "if", "the", "token", "is", "not", "present", "in", "the", "database...
569d3b89eb5d2586d0cff4581a346229c623cefc
https://github.com/vimalloc/flask-jwt-extended/blob/569d3b89eb5d2586d0cff4581a346229c623cefc/examples/database_blacklist/blacklist_helpers.py#L42-L54
234,542
vimalloc/flask-jwt-extended
examples/database_blacklist/blacklist_helpers.py
revoke_token
def revoke_token(token_id, user): """ Revokes the given token. Raises a TokenNotFound error if the token does not exist in the database """ try: token = TokenBlacklist.query.filter_by(id=token_id, user_identity=user).one() token.revoked = True db.session.commit() except NoResultFound: raise TokenNotFound("Could not find the token {}".format(token_id))
python
def revoke_token(token_id, user): try: token = TokenBlacklist.query.filter_by(id=token_id, user_identity=user).one() token.revoked = True db.session.commit() except NoResultFound: raise TokenNotFound("Could not find the token {}".format(token_id))
[ "def", "revoke_token", "(", "token_id", ",", "user", ")", ":", "try", ":", "token", "=", "TokenBlacklist", ".", "query", ".", "filter_by", "(", "id", "=", "token_id", ",", "user_identity", "=", "user", ")", ".", "one", "(", ")", "token", ".", "revoked"...
Revokes the given token. Raises a TokenNotFound error if the token does not exist in the database
[ "Revokes", "the", "given", "token", ".", "Raises", "a", "TokenNotFound", "error", "if", "the", "token", "does", "not", "exist", "in", "the", "database" ]
569d3b89eb5d2586d0cff4581a346229c623cefc
https://github.com/vimalloc/flask-jwt-extended/blob/569d3b89eb5d2586d0cff4581a346229c623cefc/examples/database_blacklist/blacklist_helpers.py#L65-L75
234,543
vimalloc/flask-jwt-extended
examples/database_blacklist/blacklist_helpers.py
prune_database
def prune_database(): """ Delete tokens that have expired from the database. How (and if) you call this is entirely up you. You could expose it to an endpoint that only administrators could call, you could run it as a cron, set it up with flask cli, etc. """ now = datetime.now() expired = TokenBlacklist.query.filter(TokenBlacklist.expires < now).all() for token in expired: db.session.delete(token) db.session.commit()
python
def prune_database(): now = datetime.now() expired = TokenBlacklist.query.filter(TokenBlacklist.expires < now).all() for token in expired: db.session.delete(token) db.session.commit()
[ "def", "prune_database", "(", ")", ":", "now", "=", "datetime", ".", "now", "(", ")", "expired", "=", "TokenBlacklist", ".", "query", ".", "filter", "(", "TokenBlacklist", ".", "expires", "<", "now", ")", ".", "all", "(", ")", "for", "token", "in", "...
Delete tokens that have expired from the database. How (and if) you call this is entirely up you. You could expose it to an endpoint that only administrators could call, you could run it as a cron, set it up with flask cli, etc.
[ "Delete", "tokens", "that", "have", "expired", "from", "the", "database", "." ]
569d3b89eb5d2586d0cff4581a346229c623cefc
https://github.com/vimalloc/flask-jwt-extended/blob/569d3b89eb5d2586d0cff4581a346229c623cefc/examples/database_blacklist/blacklist_helpers.py#L91-L103
234,544
vimalloc/flask-jwt-extended
flask_jwt_extended/view_decorators.py
verify_jwt_in_request
def verify_jwt_in_request(): """ Ensure that the requester has a valid access token. This does not check the freshness of the access token. Raises an appropiate exception there is no token or if the token is invalid. """ if request.method not in config.exempt_methods: jwt_data = _decode_jwt_from_request(request_type='access') ctx_stack.top.jwt = jwt_data verify_token_claims(jwt_data) _load_user(jwt_data[config.identity_claim_key])
python
def verify_jwt_in_request(): if request.method not in config.exempt_methods: jwt_data = _decode_jwt_from_request(request_type='access') ctx_stack.top.jwt = jwt_data verify_token_claims(jwt_data) _load_user(jwt_data[config.identity_claim_key])
[ "def", "verify_jwt_in_request", "(", ")", ":", "if", "request", ".", "method", "not", "in", "config", ".", "exempt_methods", ":", "jwt_data", "=", "_decode_jwt_from_request", "(", "request_type", "=", "'access'", ")", "ctx_stack", ".", "top", ".", "jwt", "=", ...
Ensure that the requester has a valid access token. This does not check the freshness of the access token. Raises an appropiate exception there is no token or if the token is invalid.
[ "Ensure", "that", "the", "requester", "has", "a", "valid", "access", "token", ".", "This", "does", "not", "check", "the", "freshness", "of", "the", "access", "token", ".", "Raises", "an", "appropiate", "exception", "there", "is", "no", "token", "or", "if",...
569d3b89eb5d2586d0cff4581a346229c623cefc
https://github.com/vimalloc/flask-jwt-extended/blob/569d3b89eb5d2586d0cff4581a346229c623cefc/flask_jwt_extended/view_decorators.py#L24-L34
234,545
vimalloc/flask-jwt-extended
flask_jwt_extended/view_decorators.py
verify_fresh_jwt_in_request
def verify_fresh_jwt_in_request(): """ Ensure that the requester has a valid and fresh access token. Raises an appropiate exception if there is no token, the token is invalid, or the token is not marked as fresh. """ if request.method not in config.exempt_methods: jwt_data = _decode_jwt_from_request(request_type='access') ctx_stack.top.jwt = jwt_data fresh = jwt_data['fresh'] if isinstance(fresh, bool): if not fresh: raise FreshTokenRequired('Fresh token required') else: now = timegm(datetime.utcnow().utctimetuple()) if fresh < now: raise FreshTokenRequired('Fresh token required') verify_token_claims(jwt_data) _load_user(jwt_data[config.identity_claim_key])
python
def verify_fresh_jwt_in_request(): if request.method not in config.exempt_methods: jwt_data = _decode_jwt_from_request(request_type='access') ctx_stack.top.jwt = jwt_data fresh = jwt_data['fresh'] if isinstance(fresh, bool): if not fresh: raise FreshTokenRequired('Fresh token required') else: now = timegm(datetime.utcnow().utctimetuple()) if fresh < now: raise FreshTokenRequired('Fresh token required') verify_token_claims(jwt_data) _load_user(jwt_data[config.identity_claim_key])
[ "def", "verify_fresh_jwt_in_request", "(", ")", ":", "if", "request", ".", "method", "not", "in", "config", ".", "exempt_methods", ":", "jwt_data", "=", "_decode_jwt_from_request", "(", "request_type", "=", "'access'", ")", "ctx_stack", ".", "top", ".", "jwt", ...
Ensure that the requester has a valid and fresh access token. Raises an appropiate exception if there is no token, the token is invalid, or the token is not marked as fresh.
[ "Ensure", "that", "the", "requester", "has", "a", "valid", "and", "fresh", "access", "token", ".", "Raises", "an", "appropiate", "exception", "if", "there", "is", "no", "token", "the", "token", "is", "invalid", "or", "the", "token", "is", "not", "marked", ...
569d3b89eb5d2586d0cff4581a346229c623cefc
https://github.com/vimalloc/flask-jwt-extended/blob/569d3b89eb5d2586d0cff4581a346229c623cefc/flask_jwt_extended/view_decorators.py#L58-L76
234,546
vimalloc/flask-jwt-extended
flask_jwt_extended/view_decorators.py
jwt_optional
def jwt_optional(fn): """ A decorator to optionally protect a Flask endpoint If an access token in present in the request, this will call the endpoint with :func:`~flask_jwt_extended.get_jwt_identity` having the identity of the access token. If no access token is present in the request, this endpoint will still be called, but :func:`~flask_jwt_extended.get_jwt_identity` will return `None` instead. If there is an invalid access token in the request (expired, tampered with, etc), this will still call the appropriate error handler instead of allowing the endpoint to be called as if there is no access token in the request. """ @wraps(fn) def wrapper(*args, **kwargs): verify_jwt_in_request_optional() return fn(*args, **kwargs) return wrapper
python
def jwt_optional(fn): @wraps(fn) def wrapper(*args, **kwargs): verify_jwt_in_request_optional() return fn(*args, **kwargs) return wrapper
[ "def", "jwt_optional", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "verify_jwt_in_request_optional", "(", ")", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ...
A decorator to optionally protect a Flask endpoint If an access token in present in the request, this will call the endpoint with :func:`~flask_jwt_extended.get_jwt_identity` having the identity of the access token. If no access token is present in the request, this endpoint will still be called, but :func:`~flask_jwt_extended.get_jwt_identity` will return `None` instead. If there is an invalid access token in the request (expired, tampered with, etc), this will still call the appropriate error handler instead of allowing the endpoint to be called as if there is no access token in the request.
[ "A", "decorator", "to", "optionally", "protect", "a", "Flask", "endpoint" ]
569d3b89eb5d2586d0cff4581a346229c623cefc
https://github.com/vimalloc/flask-jwt-extended/blob/569d3b89eb5d2586d0cff4581a346229c623cefc/flask_jwt_extended/view_decorators.py#L107-L125
234,547
MycroftAI/mycroft-precise
precise/model.py
load_precise_model
def load_precise_model(model_name: str) -> Any: """Loads a Keras model from file, handling custom loss function""" if not model_name.endswith('.net'): print('Warning: Unknown model type, ', model_name) inject_params(model_name) return load_keras().models.load_model(model_name)
python
def load_precise_model(model_name: str) -> Any: if not model_name.endswith('.net'): print('Warning: Unknown model type, ', model_name) inject_params(model_name) return load_keras().models.load_model(model_name)
[ "def", "load_precise_model", "(", "model_name", ":", "str", ")", "->", "Any", ":", "if", "not", "model_name", ".", "endswith", "(", "'.net'", ")", ":", "print", "(", "'Warning: Unknown model type, '", ",", "model_name", ")", "inject_params", "(", "model_name", ...
Loads a Keras model from file, handling custom loss function
[ "Loads", "a", "Keras", "model", "from", "file", "handling", "custom", "loss", "function" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/model.py#L42-L48
234,548
MycroftAI/mycroft-precise
precise/model.py
create_model
def create_model(model_name: Optional[str], params: ModelParams) -> 'Sequential': """ Load or create a precise model Args: model_name: Name of model params: Parameters used to create the model Returns: model: Loaded Keras model """ if model_name and isfile(model_name): print('Loading from ' + model_name + '...') model = load_precise_model(model_name) else: from keras.layers.core import Dense from keras.layers.recurrent import GRU from keras.models import Sequential model = Sequential() model.add(GRU( params.recurrent_units, activation='linear', input_shape=(pr.n_features, pr.feature_size), dropout=params.dropout, name='net' )) model.add(Dense(1, activation='sigmoid')) load_keras() metrics = ['accuracy'] + params.extra_metrics * [false_pos, false_neg] set_loss_bias(params.loss_bias) for i in model.layers[:params.freeze_till]: i.trainable = False model.compile('rmsprop', weighted_log_loss, metrics=(not params.skip_acc) * metrics) return model
python
def create_model(model_name: Optional[str], params: ModelParams) -> 'Sequential': if model_name and isfile(model_name): print('Loading from ' + model_name + '...') model = load_precise_model(model_name) else: from keras.layers.core import Dense from keras.layers.recurrent import GRU from keras.models import Sequential model = Sequential() model.add(GRU( params.recurrent_units, activation='linear', input_shape=(pr.n_features, pr.feature_size), dropout=params.dropout, name='net' )) model.add(Dense(1, activation='sigmoid')) load_keras() metrics = ['accuracy'] + params.extra_metrics * [false_pos, false_neg] set_loss_bias(params.loss_bias) for i in model.layers[:params.freeze_till]: i.trainable = False model.compile('rmsprop', weighted_log_loss, metrics=(not params.skip_acc) * metrics) return model
[ "def", "create_model", "(", "model_name", ":", "Optional", "[", "str", "]", ",", "params", ":", "ModelParams", ")", "->", "'Sequential'", ":", "if", "model_name", "and", "isfile", "(", "model_name", ")", ":", "print", "(", "'Loading from '", "+", "model_name...
Load or create a precise model Args: model_name: Name of model params: Parameters used to create the model Returns: model: Loaded Keras model
[ "Load", "or", "create", "a", "precise", "model" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/model.py#L51-L83
234,549
MycroftAI/mycroft-precise
precise/scripts/train_generated.py
GeneratedTrainer.layer_with
def layer_with(self, sample: np.ndarray, value: int) -> np.ndarray: """Create an identical 2d array where the second row is filled with value""" b = np.full((2, len(sample)), value, dtype=float) b[0] = sample return b
python
def layer_with(self, sample: np.ndarray, value: int) -> np.ndarray: b = np.full((2, len(sample)), value, dtype=float) b[0] = sample return b
[ "def", "layer_with", "(", "self", ",", "sample", ":", "np", ".", "ndarray", ",", "value", ":", "int", ")", "->", "np", ".", "ndarray", ":", "b", "=", "np", ".", "full", "(", "(", "2", ",", "len", "(", "sample", ")", ")", ",", "value", ",", "d...
Create an identical 2d array where the second row is filled with value
[ "Create", "an", "identical", "2d", "array", "where", "the", "second", "row", "is", "filled", "with", "value" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_generated.py#L116-L120
234,550
MycroftAI/mycroft-precise
precise/scripts/train_generated.py
GeneratedTrainer.generate_wakeword_pieces
def generate_wakeword_pieces(self, volume): """Generates chunks of audio that represent the wakeword stream""" while True: target = 1 if random() > 0.5 else 0 it = self.pos_files_it if target else self.neg_files_it sample_file = next(it) yield self.layer_with(self.normalize_volume_to(load_audio(sample_file), volume), target) yield self.layer_with(np.zeros(int(pr.sample_rate * (0.5 + 2.0 * random()))), 0)
python
def generate_wakeword_pieces(self, volume): while True: target = 1 if random() > 0.5 else 0 it = self.pos_files_it if target else self.neg_files_it sample_file = next(it) yield self.layer_with(self.normalize_volume_to(load_audio(sample_file), volume), target) yield self.layer_with(np.zeros(int(pr.sample_rate * (0.5 + 2.0 * random()))), 0)
[ "def", "generate_wakeword_pieces", "(", "self", ",", "volume", ")", ":", "while", "True", ":", "target", "=", "1", "if", "random", "(", ")", ">", "0.5", "else", "0", "it", "=", "self", ".", "pos_files_it", "if", "target", "else", "self", ".", "neg_file...
Generates chunks of audio that represent the wakeword stream
[ "Generates", "chunks", "of", "audio", "that", "represent", "the", "wakeword", "stream" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_generated.py#L122-L129
234,551
MycroftAI/mycroft-precise
precise/scripts/train_generated.py
GeneratedTrainer.chunk_audio_pieces
def chunk_audio_pieces(self, pieces, chunk_size): """Convert chunks of audio into a series of equally sized pieces""" left_over = np.array([]) for piece in pieces: if left_over.size == 0: combined = piece else: combined = np.concatenate([left_over, piece], axis=-1) for chunk in chunk_audio(combined.T, chunk_size): yield chunk.T left_over = piece[-(len(piece) % chunk_size):]
python
def chunk_audio_pieces(self, pieces, chunk_size): left_over = np.array([]) for piece in pieces: if left_over.size == 0: combined = piece else: combined = np.concatenate([left_over, piece], axis=-1) for chunk in chunk_audio(combined.T, chunk_size): yield chunk.T left_over = piece[-(len(piece) % chunk_size):]
[ "def", "chunk_audio_pieces", "(", "self", ",", "pieces", ",", "chunk_size", ")", ":", "left_over", "=", "np", ".", "array", "(", "[", "]", ")", "for", "piece", "in", "pieces", ":", "if", "left_over", ".", "size", "==", "0", ":", "combined", "=", "pie...
Convert chunks of audio into a series of equally sized pieces
[ "Convert", "chunks", "of", "audio", "into", "a", "series", "of", "equally", "sized", "pieces" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_generated.py#L131-L141
234,552
MycroftAI/mycroft-precise
precise/scripts/train_generated.py
GeneratedTrainer.calc_volume
def calc_volume(self, sample: np.ndarray): """Find the RMS of the audio""" return sqrt(np.mean(np.square(sample)))
python
def calc_volume(self, sample: np.ndarray): return sqrt(np.mean(np.square(sample)))
[ "def", "calc_volume", "(", "self", ",", "sample", ":", "np", ".", "ndarray", ")", ":", "return", "sqrt", "(", "np", ".", "mean", "(", "np", ".", "square", "(", "sample", ")", ")", ")" ]
Find the RMS of the audio
[ "Find", "the", "RMS", "of", "the", "audio" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_generated.py#L143-L145
234,553
MycroftAI/mycroft-precise
precise/scripts/train_generated.py
GeneratedTrainer.max_run_length
def max_run_length(x: np.ndarray, val: int): """Finds the maximum continuous length of the given value in the sequence""" if x.size == 0: return 0 else: y = np.array(x[1:] != x[:-1]) i = np.append(np.where(y), len(x) - 1) run_lengths = np.diff(np.append(-1, i)) run_length_values = x[i] return max([rl for rl, v in zip(run_lengths, run_length_values) if v == val], default=0)
python
def max_run_length(x: np.ndarray, val: int): if x.size == 0: return 0 else: y = np.array(x[1:] != x[:-1]) i = np.append(np.where(y), len(x) - 1) run_lengths = np.diff(np.append(-1, i)) run_length_values = x[i] return max([rl for rl, v in zip(run_lengths, run_length_values) if v == val], default=0)
[ "def", "max_run_length", "(", "x", ":", "np", ".", "ndarray", ",", "val", ":", "int", ")", ":", "if", "x", ".", "size", "==", "0", ":", "return", "0", "else", ":", "y", "=", "np", ".", "array", "(", "x", "[", "1", ":", "]", "!=", "x", "[", ...
Finds the maximum continuous length of the given value in the sequence
[ "Finds", "the", "maximum", "continuous", "length", "of", "the", "given", "value", "in", "the", "sequence" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_generated.py#L156-L165
234,554
MycroftAI/mycroft-precise
precise/scripts/train_generated.py
GeneratedTrainer.samples_to_batches
def samples_to_batches(samples: Iterable, batch_size: int): """Chunk a series of network inputs and outputs into larger batches""" it = iter(samples) while True: with suppress(StopIteration): batch_in, batch_out = [], [] for i in range(batch_size): sample_in, sample_out = next(it) batch_in.append(sample_in) batch_out.append(sample_out) if not batch_in: raise StopIteration yield np.array(batch_in), np.array(batch_out)
python
def samples_to_batches(samples: Iterable, batch_size: int): it = iter(samples) while True: with suppress(StopIteration): batch_in, batch_out = [], [] for i in range(batch_size): sample_in, sample_out = next(it) batch_in.append(sample_in) batch_out.append(sample_out) if not batch_in: raise StopIteration yield np.array(batch_in), np.array(batch_out)
[ "def", "samples_to_batches", "(", "samples", ":", "Iterable", ",", "batch_size", ":", "int", ")", ":", "it", "=", "iter", "(", "samples", ")", "while", "True", ":", "with", "suppress", "(", "StopIteration", ")", ":", "batch_in", ",", "batch_out", "=", "[...
Chunk a series of network inputs and outputs into larger batches
[ "Chunk", "a", "series", "of", "network", "inputs", "and", "outputs", "into", "larger", "batches" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_generated.py#L203-L215
234,555
MycroftAI/mycroft-precise
precise/scripts/train_generated.py
GeneratedTrainer.run
def run(self): """Train the model on randomly generated batches""" _, test_data = self.data.load(train=False, test=True) try: self.model.fit_generator( self.samples_to_batches(self.generate_samples(), self.args.batch_size), steps_per_epoch=self.args.steps_per_epoch, epochs=self.epoch + self.args.epochs, validation_data=test_data, callbacks=self.callbacks, initial_epoch=self.epoch ) finally: self.model.save(self.args.model) save_params(self.args.model)
python
def run(self): _, test_data = self.data.load(train=False, test=True) try: self.model.fit_generator( self.samples_to_batches(self.generate_samples(), self.args.batch_size), steps_per_epoch=self.args.steps_per_epoch, epochs=self.epoch + self.args.epochs, validation_data=test_data, callbacks=self.callbacks, initial_epoch=self.epoch ) finally: self.model.save(self.args.model) save_params(self.args.model)
[ "def", "run", "(", "self", ")", ":", "_", ",", "test_data", "=", "self", ".", "data", ".", "load", "(", "train", "=", "False", ",", "test", "=", "True", ")", "try", ":", "self", ".", "model", ".", "fit_generator", "(", "self", ".", "samples_to_batc...
Train the model on randomly generated batches
[ "Train", "the", "model", "on", "randomly", "generated", "batches" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_generated.py#L226-L237
234,556
MycroftAI/mycroft-precise
precise/train_data.py
TrainData.from_both
def from_both(cls, tags_file: str, tags_folder: str, folder: str) -> 'TrainData': """Load data from both a database and a structured folder""" return cls.from_tags(tags_file, tags_folder) + cls.from_folder(folder)
python
def from_both(cls, tags_file: str, tags_folder: str, folder: str) -> 'TrainData': return cls.from_tags(tags_file, tags_folder) + cls.from_folder(folder)
[ "def", "from_both", "(", "cls", ",", "tags_file", ":", "str", ",", "tags_folder", ":", "str", ",", "folder", ":", "str", ")", "->", "'TrainData'", ":", "return", "cls", ".", "from_tags", "(", "tags_file", ",", "tags_folder", ")", "+", "cls", ".", "from...
Load data from both a database and a structured folder
[ "Load", "data", "from", "both", "a", "database", "and", "a", "structured", "folder" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/train_data.py#L113-L115
234,557
MycroftAI/mycroft-precise
precise/train_data.py
TrainData.load_inhibit
def load_inhibit(self, train=True, test=True) -> tuple: """Generate data with inhibitory inputs created from wake word samples""" def loader(kws: list, nkws: list): from precise.params import pr inputs = np.empty((0, pr.n_features, pr.feature_size)) outputs = np.zeros((len(kws), 1)) for f in kws: if not isfile(f): continue new_vec = load_vector(f, vectorize_inhibit) inputs = np.concatenate([inputs, new_vec]) return self.merge((inputs, outputs), self.__load_files(kws, nkws)) return self.__load(loader, train, test)
python
def load_inhibit(self, train=True, test=True) -> tuple: def loader(kws: list, nkws: list): from precise.params import pr inputs = np.empty((0, pr.n_features, pr.feature_size)) outputs = np.zeros((len(kws), 1)) for f in kws: if not isfile(f): continue new_vec = load_vector(f, vectorize_inhibit) inputs = np.concatenate([inputs, new_vec]) return self.merge((inputs, outputs), self.__load_files(kws, nkws)) return self.__load(loader, train, test)
[ "def", "load_inhibit", "(", "self", ",", "train", "=", "True", ",", "test", "=", "True", ")", "->", "tuple", ":", "def", "loader", "(", "kws", ":", "list", ",", "nkws", ":", "list", ")", ":", "from", "precise", ".", "params", "import", "pr", "input...
Generate data with inhibitory inputs created from wake word samples
[ "Generate", "data", "with", "inhibitory", "inputs", "created", "from", "wake", "word", "samples" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/train_data.py#L126-L141
234,558
MycroftAI/mycroft-precise
precise/train_data.py
TrainData.parse_args
def parse_args(parser: ArgumentParser) -> Any: """Return parsed args from parser, adding options for train data inputs""" extra_usage = ''' :folder str Folder to load wav files from :-tf --tags-folder str {folder} Specify a different folder to load file ids in tags file from :-tg --tags-file str - Text file to load tags from where each line is <file_id> TAB (wake-word|not-wake-word) and {folder}/<file_id>.wav exists ''' add_to_parser(parser, extra_usage) args = parser.parse_args() args.tags_folder = args.tags_folder.format(folder=args.folder) return args
python
def parse_args(parser: ArgumentParser) -> Any: extra_usage = ''' :folder str Folder to load wav files from :-tf --tags-folder str {folder} Specify a different folder to load file ids in tags file from :-tg --tags-file str - Text file to load tags from where each line is <file_id> TAB (wake-word|not-wake-word) and {folder}/<file_id>.wav exists ''' add_to_parser(parser, extra_usage) args = parser.parse_args() args.tags_folder = args.tags_folder.format(folder=args.folder) return args
[ "def", "parse_args", "(", "parser", ":", "ArgumentParser", ")", "->", "Any", ":", "extra_usage", "=", "'''\n :folder str\n Folder to load wav files from\n \n :-tf --tags-folder str {folder}\n Specify a different folder to load fi...
Return parsed args from parser, adding options for train data inputs
[ "Return", "parsed", "args", "from", "parser", "adding", "options", "for", "train", "data", "inputs" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/train_data.py#L148-L167
234,559
MycroftAI/mycroft-precise
precise/vectorization.py
vectorize_raw
def vectorize_raw(audio: np.ndarray) -> np.ndarray: """Turns audio into feature vectors, without clipping for length""" if len(audio) == 0: raise InvalidAudio('Cannot vectorize empty audio!') return vectorizers[pr.vectorizer](audio)
python
def vectorize_raw(audio: np.ndarray) -> np.ndarray: if len(audio) == 0: raise InvalidAudio('Cannot vectorize empty audio!') return vectorizers[pr.vectorizer](audio)
[ "def", "vectorize_raw", "(", "audio", ":", "np", ".", "ndarray", ")", "->", "np", ".", "ndarray", ":", "if", "len", "(", "audio", ")", "==", "0", ":", "raise", "InvalidAudio", "(", "'Cannot vectorize empty audio!'", ")", "return", "vectorizers", "[", "pr",...
Turns audio into feature vectors, without clipping for length
[ "Turns", "audio", "into", "feature", "vectors", "without", "clipping", "for", "length" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/vectorization.py#L42-L46
234,560
MycroftAI/mycroft-precise
precise/vectorization.py
vectorize_inhibit
def vectorize_inhibit(audio: np.ndarray) -> np.ndarray: """ Returns an array of inputs generated from the wake word audio that shouldn't cause an activation """ def samp(x): return int(pr.sample_rate * x) inputs = [] for offset in range(samp(inhibit_t), samp(inhibit_dist_t), samp(inhibit_hop_t)): if len(audio) - offset < samp(pr.buffer_t / 2.): break inputs.append(vectorize(audio[:-offset])) return np.array(inputs) if inputs else np.empty((0, pr.n_features, pr.feature_size))
python
def vectorize_inhibit(audio: np.ndarray) -> np.ndarray: def samp(x): return int(pr.sample_rate * x) inputs = [] for offset in range(samp(inhibit_t), samp(inhibit_dist_t), samp(inhibit_hop_t)): if len(audio) - offset < samp(pr.buffer_t / 2.): break inputs.append(vectorize(audio[:-offset])) return np.array(inputs) if inputs else np.empty((0, pr.n_features, pr.feature_size))
[ "def", "vectorize_inhibit", "(", "audio", ":", "np", ".", "ndarray", ")", "->", "np", ".", "ndarray", ":", "def", "samp", "(", "x", ")", ":", "return", "int", "(", "pr", ".", "sample_rate", "*", "x", ")", "inputs", "=", "[", "]", "for", "offset", ...
Returns an array of inputs generated from the wake word audio that shouldn't cause an activation
[ "Returns", "an", "array", "of", "inputs", "generated", "from", "the", "wake", "word", "audio", "that", "shouldn", "t", "cause", "an", "activation" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/vectorization.py#L83-L97
234,561
MycroftAI/mycroft-precise
runner/precise_runner/runner.py
TriggerDetector.update
def update(self, prob): # type: (float) -> bool """Returns whether the new prediction caused an activation""" chunk_activated = prob > 1.0 - self.sensitivity if chunk_activated or self.activation < 0: self.activation += 1 has_activated = self.activation > self.trigger_level if has_activated or chunk_activated and self.activation < 0: self.activation = -(8 * 2048) // self.chunk_size if has_activated: return True elif self.activation > 0: self.activation -= 1 return False
python
def update(self, prob): # type: (float) -> bool chunk_activated = prob > 1.0 - self.sensitivity if chunk_activated or self.activation < 0: self.activation += 1 has_activated = self.activation > self.trigger_level if has_activated or chunk_activated and self.activation < 0: self.activation = -(8 * 2048) // self.chunk_size if has_activated: return True elif self.activation > 0: self.activation -= 1 return False
[ "def", "update", "(", "self", ",", "prob", ")", ":", "# type: (float) -> bool", "chunk_activated", "=", "prob", ">", "1.0", "-", "self", ".", "sensitivity", "if", "chunk_activated", "or", "self", ".", "activation", "<", "0", ":", "self", ".", "activation", ...
Returns whether the new prediction caused an activation
[ "Returns", "whether", "the", "new", "prediction", "caused", "an", "activation" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/runner/precise_runner/runner.py#L107-L122
234,562
MycroftAI/mycroft-precise
runner/precise_runner/runner.py
PreciseRunner.start
def start(self): """Start listening from stream""" if self.stream is None: from pyaudio import PyAudio, paInt16 self.pa = PyAudio() self.stream = self.pa.open( 16000, 1, paInt16, True, frames_per_buffer=self.chunk_size ) self._wrap_stream_read(self.stream) self.engine.start() self.running = True self.is_paused = False self.thread = Thread(target=self._handle_predictions) self.thread.daemon = True self.thread.start()
python
def start(self): if self.stream is None: from pyaudio import PyAudio, paInt16 self.pa = PyAudio() self.stream = self.pa.open( 16000, 1, paInt16, True, frames_per_buffer=self.chunk_size ) self._wrap_stream_read(self.stream) self.engine.start() self.running = True self.is_paused = False self.thread = Thread(target=self._handle_predictions) self.thread.daemon = True self.thread.start()
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "stream", "is", "None", ":", "from", "pyaudio", "import", "PyAudio", ",", "paInt16", "self", ".", "pa", "=", "PyAudio", "(", ")", "self", ".", "stream", "=", "self", ".", "pa", ".", "open", ...
Start listening from stream
[ "Start", "listening", "from", "stream" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/runner/precise_runner/runner.py#L172-L188
234,563
MycroftAI/mycroft-precise
runner/precise_runner/runner.py
PreciseRunner.stop
def stop(self): """Stop listening and close stream""" if self.thread: self.running = False if isinstance(self.stream, ReadWriteStream): self.stream.write(b'\0' * self.chunk_size) self.thread.join() self.thread = None self.engine.stop() if self.pa: self.pa.terminate() self.stream.stop_stream() self.stream = self.pa = None
python
def stop(self): if self.thread: self.running = False if isinstance(self.stream, ReadWriteStream): self.stream.write(b'\0' * self.chunk_size) self.thread.join() self.thread = None self.engine.stop() if self.pa: self.pa.terminate() self.stream.stop_stream() self.stream = self.pa = None
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "thread", ":", "self", ".", "running", "=", "False", "if", "isinstance", "(", "self", ".", "stream", ",", "ReadWriteStream", ")", ":", "self", ".", "stream", ".", "write", "(", "b'\\0'", "*", ...
Stop listening and close stream
[ "Stop", "listening", "and", "close", "stream" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/runner/precise_runner/runner.py#L190-L204
234,564
MycroftAI/mycroft-precise
runner/precise_runner/runner.py
PreciseRunner._handle_predictions
def _handle_predictions(self): """Continuously check Precise process output""" while self.running: chunk = self.stream.read(self.chunk_size) if self.is_paused: continue prob = self.engine.get_prediction(chunk) self.on_prediction(prob) if self.detector.update(prob): self.on_activation()
python
def _handle_predictions(self): while self.running: chunk = self.stream.read(self.chunk_size) if self.is_paused: continue prob = self.engine.get_prediction(chunk) self.on_prediction(prob) if self.detector.update(prob): self.on_activation()
[ "def", "_handle_predictions", "(", "self", ")", ":", "while", "self", ".", "running", ":", "chunk", "=", "self", ".", "stream", ".", "read", "(", "self", ".", "chunk_size", ")", "if", "self", ".", "is_paused", ":", "continue", "prob", "=", "self", ".",...
Continuously check Precise process output
[ "Continuously", "check", "Precise", "process", "output" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/runner/precise_runner/runner.py#L212-L223
234,565
MycroftAI/mycroft-precise
precise/stats.py
Stats.calc_filenames
def calc_filenames(self, is_correct: bool, actual_output: bool, threshold=0.5) -> list: """Find a list of files with the given classification""" return [ filename for output, target, filename in zip(self.outputs, self.targets, self.filenames) if ((output > threshold) == bool(target)) == is_correct and actual_output == bool(output > threshold) ]
python
def calc_filenames(self, is_correct: bool, actual_output: bool, threshold=0.5) -> list: return [ filename for output, target, filename in zip(self.outputs, self.targets, self.filenames) if ((output > threshold) == bool(target)) == is_correct and actual_output == bool(output > threshold) ]
[ "def", "calc_filenames", "(", "self", ",", "is_correct", ":", "bool", ",", "actual_output", ":", "bool", ",", "threshold", "=", "0.5", ")", "->", "list", ":", "return", "[", "filename", "for", "output", ",", "target", ",", "filename", "in", "zip", "(", ...
Find a list of files with the given classification
[ "Find", "a", "list", "of", "files", "with", "the", "given", "classification" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/stats.py#L90-L96
234,566
MycroftAI/mycroft-precise
precise/scripts/graph.py
get_thresholds
def get_thresholds(points=100, power=3) -> list: """Run a function with a series of thresholds between 0 and 1""" return [(i / (points + 1)) ** power for i in range(1, points + 1)]
python
def get_thresholds(points=100, power=3) -> list: return [(i / (points + 1)) ** power for i in range(1, points + 1)]
[ "def", "get_thresholds", "(", "points", "=", "100", ",", "power", "=", "3", ")", "->", "list", ":", "return", "[", "(", "i", "/", "(", "points", "+", "1", ")", ")", "**", "power", "for", "i", "in", "range", "(", "1", ",", "points", "+", "1", ...
Run a function with a series of thresholds between 0 and 1
[ "Run", "a", "function", "with", "a", "series", "of", "thresholds", "between", "0", "and", "1" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/graph.py#L57-L59
234,567
MycroftAI/mycroft-precise
precise/scripts/graph.py
CachedDataLoader.load_for
def load_for(self, model: str) -> Tuple[list, list]: """Injects the model parameters, reloading if they changed, and returning the data""" inject_params(model) if self.prev_cache != pr.vectorization_md5_hash(): self.prev_cache = pr.vectorization_md5_hash() self.data = self.loader() return self.data
python
def load_for(self, model: str) -> Tuple[list, list]: inject_params(model) if self.prev_cache != pr.vectorization_md5_hash(): self.prev_cache = pr.vectorization_md5_hash() self.data = self.loader() return self.data
[ "def", "load_for", "(", "self", ",", "model", ":", "str", ")", "->", "Tuple", "[", "list", ",", "list", "]", ":", "inject_params", "(", "model", ")", "if", "self", ".", "prev_cache", "!=", "pr", ".", "vectorization_md5_hash", "(", ")", ":", "self", "...
Injects the model parameters, reloading if they changed, and returning the data
[ "Injects", "the", "model", "parameters", "reloading", "if", "they", "changed", "and", "returning", "the", "data" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/graph.py#L75-L81
234,568
MycroftAI/mycroft-precise
precise/util.py
buffer_to_audio
def buffer_to_audio(buffer: bytes) -> np.ndarray: """Convert a raw mono audio byte string to numpy array of floats""" return np.fromstring(buffer, dtype='<i2').astype(np.float32, order='C') / 32768.0
python
def buffer_to_audio(buffer: bytes) -> np.ndarray: return np.fromstring(buffer, dtype='<i2').astype(np.float32, order='C') / 32768.0
[ "def", "buffer_to_audio", "(", "buffer", ":", "bytes", ")", "->", "np", ".", "ndarray", ":", "return", "np", ".", "fromstring", "(", "buffer", ",", "dtype", "=", "'<i2'", ")", ".", "astype", "(", "np", ".", "float32", ",", "order", "=", "'C'", ")", ...
Convert a raw mono audio byte string to numpy array of floats
[ "Convert", "a", "raw", "mono", "audio", "byte", "string", "to", "numpy", "array", "of", "floats" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/util.py#L31-L33
234,569
MycroftAI/mycroft-precise
precise/util.py
find_wavs
def find_wavs(folder: str) -> Tuple[List[str], List[str]]: """Finds wake-word and not-wake-word wavs in folder""" return (glob_all(join(folder, 'wake-word'), '*.wav'), glob_all(join(folder, 'not-wake-word'), '*.wav'))
python
def find_wavs(folder: str) -> Tuple[List[str], List[str]]: return (glob_all(join(folder, 'wake-word'), '*.wav'), glob_all(join(folder, 'not-wake-word'), '*.wav'))
[ "def", "find_wavs", "(", "folder", ":", "str", ")", "->", "Tuple", "[", "List", "[", "str", "]", ",", "List", "[", "str", "]", "]", ":", "return", "(", "glob_all", "(", "join", "(", "folder", ",", "'wake-word'", ")", ",", "'*.wav'", ")", ",", "gl...
Finds wake-word and not-wake-word wavs in folder
[ "Finds", "wake", "-", "word", "and", "not", "-", "wake", "-", "word", "wavs", "in", "folder" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/util.py#L98-L101
234,570
MycroftAI/mycroft-precise
precise/scripts/convert.py
convert
def convert(model_path: str, out_file: str): """ Converts an HD5F file from Keras to a .pb for use with TensorFlow Args: model_path: location of Keras model out_file: location to write protobuf """ print('Converting', model_path, 'to', out_file, '...') import tensorflow as tf from precise.model import load_precise_model from keras import backend as K out_dir, filename = split(out_file) out_dir = out_dir or '.' os.makedirs(out_dir, exist_ok=True) K.set_learning_phase(0) model = load_precise_model(model_path) out_name = 'net_output' tf.identity(model.output, name=out_name) print('Output node name:', out_name) print('Output folder:', out_dir) sess = K.get_session() # Write the graph in human readable tf.train.write_graph(sess.graph.as_graph_def(), out_dir, filename + 'txt', as_text=True) print('Saved readable graph to:', filename + 'txt') # Write the graph in binary .pb file from tensorflow.python.framework import graph_util from tensorflow.python.framework import graph_io cgraph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [out_name]) graph_io.write_graph(cgraph, out_dir, filename, as_text=False) if isfile(model_path + '.params'): copyfile(model_path + '.params', out_file + '.params') print('Saved graph to:', filename) del sess
python
def convert(model_path: str, out_file: str): print('Converting', model_path, 'to', out_file, '...') import tensorflow as tf from precise.model import load_precise_model from keras import backend as K out_dir, filename = split(out_file) out_dir = out_dir or '.' os.makedirs(out_dir, exist_ok=True) K.set_learning_phase(0) model = load_precise_model(model_path) out_name = 'net_output' tf.identity(model.output, name=out_name) print('Output node name:', out_name) print('Output folder:', out_dir) sess = K.get_session() # Write the graph in human readable tf.train.write_graph(sess.graph.as_graph_def(), out_dir, filename + 'txt', as_text=True) print('Saved readable graph to:', filename + 'txt') # Write the graph in binary .pb file from tensorflow.python.framework import graph_util from tensorflow.python.framework import graph_io cgraph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [out_name]) graph_io.write_graph(cgraph, out_dir, filename, as_text=False) if isfile(model_path + '.params'): copyfile(model_path + '.params', out_file + '.params') print('Saved graph to:', filename) del sess
[ "def", "convert", "(", "model_path", ":", "str", ",", "out_file", ":", "str", ")", ":", "print", "(", "'Converting'", ",", "model_path", ",", "'to'", ",", "out_file", ",", "'...'", ")", "import", "tensorflow", "as", "tf", "from", "precise", ".", "model",...
Converts an HD5F file from Keras to a .pb for use with TensorFlow Args: model_path: location of Keras model out_file: location to write protobuf
[ "Converts", "an", "HD5F", "file", "from", "Keras", "to", "a", ".", "pb", "for", "use", "with", "TensorFlow" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/convert.py#L32-L76
234,571
MycroftAI/mycroft-precise
precise/params.py
inject_params
def inject_params(model_name: str) -> ListenerParams: """Set the global listener params to a saved model""" params_file = model_name + '.params' try: with open(params_file) as f: pr.__dict__.update(compatibility_params, **json.load(f)) except (OSError, ValueError, TypeError): if isfile(model_name): print('Warning: Failed to load parameters from ' + params_file) return pr
python
def inject_params(model_name: str) -> ListenerParams: params_file = model_name + '.params' try: with open(params_file) as f: pr.__dict__.update(compatibility_params, **json.load(f)) except (OSError, ValueError, TypeError): if isfile(model_name): print('Warning: Failed to load parameters from ' + params_file) return pr
[ "def", "inject_params", "(", "model_name", ":", "str", ")", "->", "ListenerParams", ":", "params_file", "=", "model_name", "+", "'.params'", "try", ":", "with", "open", "(", "params_file", ")", "as", "f", ":", "pr", ".", "__dict__", ".", "update", "(", "...
Set the global listener params to a saved model
[ "Set", "the", "global", "listener", "params", "to", "a", "saved", "model" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/params.py#L96-L105
234,572
MycroftAI/mycroft-precise
precise/params.py
save_params
def save_params(model_name: str): """Save current global listener params to a file""" with open(model_name + '.params', 'w') as f: json.dump(pr.__dict__, f)
python
def save_params(model_name: str): with open(model_name + '.params', 'w') as f: json.dump(pr.__dict__, f)
[ "def", "save_params", "(", "model_name", ":", "str", ")", ":", "with", "open", "(", "model_name", "+", "'.params'", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "pr", ".", "__dict__", ",", "f", ")" ]
Save current global listener params to a file
[ "Save", "current", "global", "listener", "params", "to", "a", "file" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/params.py#L108-L111
234,573
MycroftAI/mycroft-precise
precise/scripts/train_incremental.py
IncrementalTrainer.retrain
def retrain(self): """Train for a session, pulling in any new data from the filesystem""" folder = TrainData.from_folder(self.args.folder) train_data, test_data = folder.load(True, not self.args.no_validation) train_data = TrainData.merge(train_data, self.sampled_data) test_data = TrainData.merge(test_data, self.test) train_inputs, train_outputs = train_data print() try: self.listener.runner.model.fit( train_inputs, train_outputs, self.args.batch_size, self.epoch + self.args.epochs, validation_data=test_data, callbacks=self.callbacks, initial_epoch=self.epoch ) finally: self.listener.runner.model.save(self.args.model)
python
def retrain(self): folder = TrainData.from_folder(self.args.folder) train_data, test_data = folder.load(True, not self.args.no_validation) train_data = TrainData.merge(train_data, self.sampled_data) test_data = TrainData.merge(test_data, self.test) train_inputs, train_outputs = train_data print() try: self.listener.runner.model.fit( train_inputs, train_outputs, self.args.batch_size, self.epoch + self.args.epochs, validation_data=test_data, callbacks=self.callbacks, initial_epoch=self.epoch ) finally: self.listener.runner.model.save(self.args.model)
[ "def", "retrain", "(", "self", ")", ":", "folder", "=", "TrainData", ".", "from_folder", "(", "self", ".", "args", ".", "folder", ")", "train_data", ",", "test_data", "=", "folder", ".", "load", "(", "True", ",", "not", "self", ".", "args", ".", "no_...
Train for a session, pulling in any new data from the filesystem
[ "Train", "for", "a", "session", "pulling", "in", "any", "new", "data", "from", "the", "filesystem" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_incremental.py#L95-L110
234,574
MycroftAI/mycroft-precise
precise/scripts/train_incremental.py
IncrementalTrainer.train_on_audio
def train_on_audio(self, fn: str): """Run through a single audio file""" save_test = random() > 0.8 audio = load_audio(fn) num_chunks = len(audio) // self.args.chunk_size self.listener.clear() for i, chunk in enumerate(chunk_audio(audio, self.args.chunk_size)): print('\r' + str(i * 100. / num_chunks) + '%', end='', flush=True) self.audio_buffer = np.concatenate((self.audio_buffer[len(chunk):], chunk)) conf = self.listener.update(chunk) if conf > self.args.threshold: self.samples_since_train += 1 name = splitext(basename(fn))[0] + '-' + str(i) + '.wav' name = join(self.args.folder, 'test' if save_test else '', 'not-wake-word', 'generated', name) save_audio(name, self.audio_buffer) print() print('Saved to:', name) if not save_test and self.samples_since_train >= self.args.delay_samples and \ self.args.epochs > 0: self.samples_since_train = 0 self.retrain()
python
def train_on_audio(self, fn: str): save_test = random() > 0.8 audio = load_audio(fn) num_chunks = len(audio) // self.args.chunk_size self.listener.clear() for i, chunk in enumerate(chunk_audio(audio, self.args.chunk_size)): print('\r' + str(i * 100. / num_chunks) + '%', end='', flush=True) self.audio_buffer = np.concatenate((self.audio_buffer[len(chunk):], chunk)) conf = self.listener.update(chunk) if conf > self.args.threshold: self.samples_since_train += 1 name = splitext(basename(fn))[0] + '-' + str(i) + '.wav' name = join(self.args.folder, 'test' if save_test else '', 'not-wake-word', 'generated', name) save_audio(name, self.audio_buffer) print() print('Saved to:', name) if not save_test and self.samples_since_train >= self.args.delay_samples and \ self.args.epochs > 0: self.samples_since_train = 0 self.retrain()
[ "def", "train_on_audio", "(", "self", ",", "fn", ":", "str", ")", ":", "save_test", "=", "random", "(", ")", ">", "0.8", "audio", "=", "load_audio", "(", "fn", ")", "num_chunks", "=", "len", "(", "audio", ")", "//", "self", ".", "args", ".", "chunk...
Run through a single audio file
[ "Run", "through", "a", "single", "audio", "file" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_incremental.py#L112-L136
234,575
MycroftAI/mycroft-precise
precise/scripts/train_incremental.py
IncrementalTrainer.run
def run(self): """ Begin reading through audio files, saving false activations and retraining when necessary """ for fn in glob_all(self.args.random_data_folder, '*.wav'): if fn in self.trained_fns: print('Skipping ' + fn + '...') continue print('Starting file ' + fn + '...') self.train_on_audio(fn) print('\r100% ') self.trained_fns.append(fn) save_trained_fns(self.trained_fns, self.args.model)
python
def run(self): for fn in glob_all(self.args.random_data_folder, '*.wav'): if fn in self.trained_fns: print('Skipping ' + fn + '...') continue print('Starting file ' + fn + '...') self.train_on_audio(fn) print('\r100% ') self.trained_fns.append(fn) save_trained_fns(self.trained_fns, self.args.model)
[ "def", "run", "(", "self", ")", ":", "for", "fn", "in", "glob_all", "(", "self", ".", "args", ".", "random_data_folder", ",", "'*.wav'", ")", ":", "if", "fn", "in", "self", ".", "trained_fns", ":", "print", "(", "'Skipping '", "+", "fn", "+", "'...'"...
Begin reading through audio files, saving false activations and retraining when necessary
[ "Begin", "reading", "through", "audio", "files", "saving", "false", "activations", "and", "retraining", "when", "necessary" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_incremental.py#L138-L153
234,576
MycroftAI/mycroft-precise
precise/network_runner.py
TensorFlowRunner.predict
def predict(self, inputs: np.ndarray) -> np.ndarray: """Run on multiple inputs""" return self.sess.run(self.out_var, {self.inp_var: inputs})
python
def predict(self, inputs: np.ndarray) -> np.ndarray: return self.sess.run(self.out_var, {self.inp_var: inputs})
[ "def", "predict", "(", "self", ",", "inputs", ":", "np", ".", "ndarray", ")", "->", "np", ".", "ndarray", ":", "return", "self", ".", "sess", ".", "run", "(", "self", ".", "out_var", ",", "{", "self", ".", "inp_var", ":", "inputs", "}", ")" ]
Run on multiple inputs
[ "Run", "on", "multiple", "inputs" ]
e17cebdd171906dbd8a16e282d8a7966fba2eeba
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/network_runner.py#L61-L63
234,577
ofek/bit
bit/network/rates.py
satoshi_to_currency
def satoshi_to_currency(num, currency): """Converts a given number of satoshi to another currency as a formatted string rounded down to the proper number of decimal places. :param num: The number of satoshi. :type num: ``int`` :param currency: One of the :ref:`supported currencies`. :type currency: ``str`` :rtype: ``str`` """ return '{:f}'.format( Decimal( num / Decimal(EXCHANGE_RATES[currency]()) ).quantize( Decimal('0.' + '0' * CURRENCY_PRECISION[currency]), rounding=ROUND_DOWN ).normalize() )
python
def satoshi_to_currency(num, currency): return '{:f}'.format( Decimal( num / Decimal(EXCHANGE_RATES[currency]()) ).quantize( Decimal('0.' + '0' * CURRENCY_PRECISION[currency]), rounding=ROUND_DOWN ).normalize() )
[ "def", "satoshi_to_currency", "(", "num", ",", "currency", ")", ":", "return", "'{:f}'", ".", "format", "(", "Decimal", "(", "num", "/", "Decimal", "(", "EXCHANGE_RATES", "[", "currency", "]", "(", ")", ")", ")", ".", "quantize", "(", "Decimal", "(", "...
Converts a given number of satoshi to another currency as a formatted string rounded down to the proper number of decimal places. :param num: The number of satoshi. :type num: ``int`` :param currency: One of the :ref:`supported currencies`. :type currency: ``str`` :rtype: ``str``
[ "Converts", "a", "given", "number", "of", "satoshi", "to", "another", "currency", "as", "a", "formatted", "string", "rounded", "down", "to", "the", "proper", "number", "of", "decimal", "places", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/network/rates.py#L645-L662
234,578
ofek/bit
bit/network/services.py
NetworkAPI.get_balance
def get_balance(cls, address): """Gets the balance of an address in satoshi. :param address: The address in question. :type address: ``str`` :raises ConnectionError: If all API services fail. :rtype: ``int`` """ for api_call in cls.GET_BALANCE_MAIN: try: return api_call(address) except cls.IGNORED_ERRORS: pass raise ConnectionError('All APIs are unreachable.')
python
def get_balance(cls, address): for api_call in cls.GET_BALANCE_MAIN: try: return api_call(address) except cls.IGNORED_ERRORS: pass raise ConnectionError('All APIs are unreachable.')
[ "def", "get_balance", "(", "cls", ",", "address", ")", ":", "for", "api_call", "in", "cls", ".", "GET_BALANCE_MAIN", ":", "try", ":", "return", "api_call", "(", "address", ")", "except", "cls", ".", "IGNORED_ERRORS", ":", "pass", "raise", "ConnectionError", ...
Gets the balance of an address in satoshi. :param address: The address in question. :type address: ``str`` :raises ConnectionError: If all API services fail. :rtype: ``int``
[ "Gets", "the", "balance", "of", "an", "address", "in", "satoshi", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/network/services.py#L343-L358
234,579
ofek/bit
bit/network/services.py
NetworkAPI.get_transactions
def get_transactions(cls, address): """Gets the ID of all transactions related to an address. :param address: The address in question. :type address: ``str`` :raises ConnectionError: If all API services fail. :rtype: ``list`` of ``str`` """ for api_call in cls.GET_TRANSACTIONS_MAIN: try: return api_call(address) except cls.IGNORED_ERRORS: pass raise ConnectionError('All APIs are unreachable.')
python
def get_transactions(cls, address): for api_call in cls.GET_TRANSACTIONS_MAIN: try: return api_call(address) except cls.IGNORED_ERRORS: pass raise ConnectionError('All APIs are unreachable.')
[ "def", "get_transactions", "(", "cls", ",", "address", ")", ":", "for", "api_call", "in", "cls", ".", "GET_TRANSACTIONS_MAIN", ":", "try", ":", "return", "api_call", "(", "address", ")", "except", "cls", ".", "IGNORED_ERRORS", ":", "pass", "raise", "Connecti...
Gets the ID of all transactions related to an address. :param address: The address in question. :type address: ``str`` :raises ConnectionError: If all API services fail. :rtype: ``list`` of ``str``
[ "Gets", "the", "ID", "of", "all", "transactions", "related", "to", "an", "address", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/network/services.py#L379-L394
234,580
ofek/bit
bit/network/services.py
NetworkAPI.get_unspent
def get_unspent(cls, address): """Gets all unspent transaction outputs belonging to an address. :param address: The address in question. :type address: ``str`` :raises ConnectionError: If all API services fail. :rtype: ``list`` of :class:`~bit.network.meta.Unspent` """ for api_call in cls.GET_UNSPENT_MAIN: try: return api_call(address) except cls.IGNORED_ERRORS: pass raise ConnectionError('All APIs are unreachable.')
python
def get_unspent(cls, address): for api_call in cls.GET_UNSPENT_MAIN: try: return api_call(address) except cls.IGNORED_ERRORS: pass raise ConnectionError('All APIs are unreachable.')
[ "def", "get_unspent", "(", "cls", ",", "address", ")", ":", "for", "api_call", "in", "cls", ".", "GET_UNSPENT_MAIN", ":", "try", ":", "return", "api_call", "(", "address", ")", "except", "cls", ".", "IGNORED_ERRORS", ":", "pass", "raise", "ConnectionError", ...
Gets all unspent transaction outputs belonging to an address. :param address: The address in question. :type address: ``str`` :raises ConnectionError: If all API services fail. :rtype: ``list`` of :class:`~bit.network.meta.Unspent`
[ "Gets", "all", "unspent", "transaction", "outputs", "belonging", "to", "an", "address", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/network/services.py#L452-L467
234,581
ofek/bit
bit/network/services.py
NetworkAPI.broadcast_tx
def broadcast_tx(cls, tx_hex): # pragma: no cover """Broadcasts a transaction to the blockchain. :param tx_hex: A signed transaction in hex form. :type tx_hex: ``str`` :raises ConnectionError: If all API services fail. """ success = None for api_call in cls.BROADCAST_TX_MAIN: try: success = api_call(tx_hex) if not success: continue return except cls.IGNORED_ERRORS: pass if success is False: raise ConnectionError('Transaction broadcast failed, or ' 'Unspents were already used.') raise ConnectionError('All APIs are unreachable.')
python
def broadcast_tx(cls, tx_hex): # pragma: no cover success = None for api_call in cls.BROADCAST_TX_MAIN: try: success = api_call(tx_hex) if not success: continue return except cls.IGNORED_ERRORS: pass if success is False: raise ConnectionError('Transaction broadcast failed, or ' 'Unspents were already used.') raise ConnectionError('All APIs are unreachable.')
[ "def", "broadcast_tx", "(", "cls", ",", "tx_hex", ")", ":", "# pragma: no cover", "success", "=", "None", "for", "api_call", "in", "cls", ".", "BROADCAST_TX_MAIN", ":", "try", ":", "success", "=", "api_call", "(", "tx_hex", ")", "if", "not", "success", ":"...
Broadcasts a transaction to the blockchain. :param tx_hex: A signed transaction in hex form. :type tx_hex: ``str`` :raises ConnectionError: If all API services fail.
[ "Broadcasts", "a", "transaction", "to", "the", "blockchain", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/network/services.py#L489-L511
234,582
ofek/bit
bit/transaction.py
calculate_preimages
def calculate_preimages(tx_obj, inputs_parameters): """Calculates preimages for provided transaction structure and input values. :param tx_obj: The transaction object used to calculate preimage from using a transaction digest algorithm, such as BIP-143 for Segwit inputs. This transaction object must hence have scriptCodes filled into the corresponding scriptSigs in the inputs. :type tx_obj: :object:`~bit.transaction.TxObj` :param inputs_parameters: A list of tuples with input index as integer, hash type as integer and a boolean flag to denote if the input is spending from a Segwit output. For example: [(0, 1, True), (2, 1, False), (...)] :type inputs_parameters: A `list` of `tuple` """ # Tx object data: input_count = int_to_varint(len(tx_obj.TxIn)) output_count = int_to_varint(len(tx_obj.TxOut)) output_block = b''.join([bytes(o) for o in tx_obj.TxOut]) hashPrevouts = double_sha256(b''.join([i.txid+i.txindex for i in tx_obj.TxIn])) hashSequence = double_sha256(b''.join([i.sequence for i in tx_obj.TxIn])) hashOutputs = double_sha256(output_block) preimages = [] for input_index, hash_type, segwit_input in inputs_parameters: # We can only handle hashType == 1: if hash_type != HASH_TYPE: raise ValueError('Bit only support hashType of value 1.') # Calculate prehashes: if segwit_input: # BIP-143 preimage: hashed = sha256( tx_obj.version + hashPrevouts + hashSequence + tx_obj.TxIn[input_index].txid + tx_obj.TxIn[input_index].txindex + tx_obj.TxIn[input_index].script_sig_len + # scriptCode length tx_obj.TxIn[input_index].script_sig + # scriptCode (includes amount) tx_obj.TxIn[input_index].sequence + hashOutputs + tx_obj.locktime + hash_type ) else: hashed = sha256( tx_obj.version + input_count + b''.join(ti.txid + ti.txindex + OP_0 + ti.sequence for ti in islice(tx_obj.TxIn, input_index)) + tx_obj.TxIn[input_index].txid + tx_obj.TxIn[input_index].txindex + tx_obj.TxIn[input_index].script_sig_len + # scriptCode length tx_obj.TxIn[input_index].script_sig + # scriptCode tx_obj.TxIn[input_index].sequence + b''.join(ti.txid + ti.txindex + OP_0 + ti.sequence for ti in islice(tx_obj.TxIn, input_index + 1, None)) + output_count + output_block + tx_obj.locktime + hash_type ) preimages.append(hashed) return preimages
python
def calculate_preimages(tx_obj, inputs_parameters): # Tx object data: input_count = int_to_varint(len(tx_obj.TxIn)) output_count = int_to_varint(len(tx_obj.TxOut)) output_block = b''.join([bytes(o) for o in tx_obj.TxOut]) hashPrevouts = double_sha256(b''.join([i.txid+i.txindex for i in tx_obj.TxIn])) hashSequence = double_sha256(b''.join([i.sequence for i in tx_obj.TxIn])) hashOutputs = double_sha256(output_block) preimages = [] for input_index, hash_type, segwit_input in inputs_parameters: # We can only handle hashType == 1: if hash_type != HASH_TYPE: raise ValueError('Bit only support hashType of value 1.') # Calculate prehashes: if segwit_input: # BIP-143 preimage: hashed = sha256( tx_obj.version + hashPrevouts + hashSequence + tx_obj.TxIn[input_index].txid + tx_obj.TxIn[input_index].txindex + tx_obj.TxIn[input_index].script_sig_len + # scriptCode length tx_obj.TxIn[input_index].script_sig + # scriptCode (includes amount) tx_obj.TxIn[input_index].sequence + hashOutputs + tx_obj.locktime + hash_type ) else: hashed = sha256( tx_obj.version + input_count + b''.join(ti.txid + ti.txindex + OP_0 + ti.sequence for ti in islice(tx_obj.TxIn, input_index)) + tx_obj.TxIn[input_index].txid + tx_obj.TxIn[input_index].txindex + tx_obj.TxIn[input_index].script_sig_len + # scriptCode length tx_obj.TxIn[input_index].script_sig + # scriptCode tx_obj.TxIn[input_index].sequence + b''.join(ti.txid + ti.txindex + OP_0 + ti.sequence for ti in islice(tx_obj.TxIn, input_index + 1, None)) + output_count + output_block + tx_obj.locktime + hash_type ) preimages.append(hashed) return preimages
[ "def", "calculate_preimages", "(", "tx_obj", ",", "inputs_parameters", ")", ":", "# Tx object data:", "input_count", "=", "int_to_varint", "(", "len", "(", "tx_obj", ".", "TxIn", ")", ")", "output_count", "=", "int_to_varint", "(", "len", "(", "tx_obj", ".", "...
Calculates preimages for provided transaction structure and input values. :param tx_obj: The transaction object used to calculate preimage from using a transaction digest algorithm, such as BIP-143 for Segwit inputs. This transaction object must hence have scriptCodes filled into the corresponding scriptSigs in the inputs. :type tx_obj: :object:`~bit.transaction.TxObj` :param inputs_parameters: A list of tuples with input index as integer, hash type as integer and a boolean flag to denote if the input is spending from a Segwit output. For example: [(0, 1, True), (2, 1, False), (...)] :type inputs_parameters: A `list` of `tuple`
[ "Calculates", "preimages", "for", "provided", "transaction", "structure", "and", "input", "values", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/transaction.py#L517-L582
234,583
ofek/bit
bit/wallet.py
BaseKey.verify
def verify(self, signature, data): """Verifies some data was signed by this private key. :param signature: The signature to verify. :type signature: ``bytes`` :param data: The data that was supposedly signed. :type data: ``bytes`` :rtype: ``bool`` """ return self._pk.public_key.verify(signature, data)
python
def verify(self, signature, data): return self._pk.public_key.verify(signature, data)
[ "def", "verify", "(", "self", ",", "signature", ",", "data", ")", ":", "return", "self", ".", "_pk", ".", "public_key", ".", "verify", "(", "signature", ",", "data", ")" ]
Verifies some data was signed by this private key. :param signature: The signature to verify. :type signature: ``bytes`` :param data: The data that was supposedly signed. :type data: ``bytes`` :rtype: ``bool``
[ "Verifies", "some", "data", "was", "signed", "by", "this", "private", "key", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/wallet.py#L89-L98
234,584
ofek/bit
bit/wallet.py
PrivateKey.get_transactions
def get_transactions(self): """Fetches transaction history. :rtype: ``list`` of ``str`` transaction IDs """ self.transactions[:] = NetworkAPI.get_transactions(self.address) if self.segwit_address: self.transactions += NetworkAPI.get_transactions(self.segwit_address) return self.transactions
python
def get_transactions(self): self.transactions[:] = NetworkAPI.get_transactions(self.address) if self.segwit_address: self.transactions += NetworkAPI.get_transactions(self.segwit_address) return self.transactions
[ "def", "get_transactions", "(", "self", ")", ":", "self", ".", "transactions", "[", ":", "]", "=", "NetworkAPI", ".", "get_transactions", "(", "self", ".", "address", ")", "if", "self", ".", "segwit_address", ":", "self", ".", "transactions", "+=", "Networ...
Fetches transaction history. :rtype: ``list`` of ``str`` transaction IDs
[ "Fetches", "transaction", "history", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/wallet.py#L246-L254
234,585
ofek/bit
bit/wallet.py
MultiSig.address
def address(self): """The public address you share with others to receive funds.""" if self._address is None: self._address = multisig_to_address(self.public_keys, self.m, version=self.version) return self._address
python
def address(self): if self._address is None: self._address = multisig_to_address(self.public_keys, self.m, version=self.version) return self._address
[ "def", "address", "(", "self", ")", ":", "if", "self", ".", "_address", "is", "None", ":", "self", ".", "_address", "=", "multisig_to_address", "(", "self", ".", "public_keys", ",", "self", ".", "m", ",", "version", "=", "self", ".", "version", ")", ...
The public address you share with others to receive funds.
[ "The", "public", "address", "you", "share", "with", "others", "to", "receive", "funds", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/wallet.py#L925-L930
234,586
ofek/bit
bit/base32.py
bech32_polymod
def bech32_polymod(values): """Internal function that computes the Bech32 checksum.""" generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3] chk = 1 for value in values: top = chk >> 25 chk = (chk & 0x1ffffff) << 5 ^ value for i in range(5): chk ^= generator[i] if ((top >> i) & 1) else 0 return chk
python
def bech32_polymod(values): generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3] chk = 1 for value in values: top = chk >> 25 chk = (chk & 0x1ffffff) << 5 ^ value for i in range(5): chk ^= generator[i] if ((top >> i) & 1) else 0 return chk
[ "def", "bech32_polymod", "(", "values", ")", ":", "generator", "=", "[", "0x3b6a57b2", ",", "0x26508e6d", ",", "0x1ea119fa", ",", "0x3d4233dd", ",", "0x2a1462b3", "]", "chk", "=", "1", "for", "value", "in", "values", ":", "top", "=", "chk", ">>", "25", ...
Internal function that computes the Bech32 checksum.
[ "Internal", "function", "that", "computes", "the", "Bech32", "checksum", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/base32.py#L26-L35
234,587
ofek/bit
bit/base32.py
bech32_hrp_expand
def bech32_hrp_expand(hrp): """Expand the HRP into values for checksum computation.""" return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
python
def bech32_hrp_expand(hrp): return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
[ "def", "bech32_hrp_expand", "(", "hrp", ")", ":", "return", "[", "ord", "(", "x", ")", ">>", "5", "for", "x", "in", "hrp", "]", "+", "[", "0", "]", "+", "[", "ord", "(", "x", ")", "&", "31", "for", "x", "in", "hrp", "]" ]
Expand the HRP into values for checksum computation.
[ "Expand", "the", "HRP", "into", "values", "for", "checksum", "computation", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/base32.py#L38-L40
234,588
ofek/bit
bit/base32.py
bech32_create_checksum
def bech32_create_checksum(hrp, data): """Compute the checksum values given HRP and data.""" values = bech32_hrp_expand(hrp) + data polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1 return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
python
def bech32_create_checksum(hrp, data): values = bech32_hrp_expand(hrp) + data polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1 return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
[ "def", "bech32_create_checksum", "(", "hrp", ",", "data", ")", ":", "values", "=", "bech32_hrp_expand", "(", "hrp", ")", "+", "data", "polymod", "=", "bech32_polymod", "(", "values", "+", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ...
Compute the checksum values given HRP and data.
[ "Compute", "the", "checksum", "values", "given", "HRP", "and", "data", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/base32.py#L48-L52
234,589
ofek/bit
bit/base32.py
bech32_encode
def bech32_encode(hrp, data): """Compute a Bech32 string given HRP and data values.""" combined = data + bech32_create_checksum(hrp, data) return hrp + '1' + ''.join([CHARSET[d] for d in combined])
python
def bech32_encode(hrp, data): combined = data + bech32_create_checksum(hrp, data) return hrp + '1' + ''.join([CHARSET[d] for d in combined])
[ "def", "bech32_encode", "(", "hrp", ",", "data", ")", ":", "combined", "=", "data", "+", "bech32_create_checksum", "(", "hrp", ",", "data", ")", "return", "hrp", "+", "'1'", "+", "''", ".", "join", "(", "[", "CHARSET", "[", "d", "]", "for", "d", "i...
Compute a Bech32 string given HRP and data values.
[ "Compute", "a", "Bech32", "string", "given", "HRP", "and", "data", "values", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/base32.py#L55-L58
234,590
ofek/bit
bit/base32.py
convertbits
def convertbits(data, frombits, tobits, pad=True): """General power-of-2 base conversion.""" acc = 0 bits = 0 ret = [] maxv = (1 << tobits) - 1 max_acc = (1 << (frombits + tobits - 1)) - 1 for value in data: if value < 0 or (value >> frombits): return None acc = ((acc << frombits) | value) & max_acc bits += frombits while bits >= tobits: bits -= tobits ret.append((acc >> bits) & maxv) if pad: if bits: ret.append((acc << (tobits - bits)) & maxv) elif bits >= frombits or ((acc << (tobits - bits)) & maxv): return None return ret
python
def convertbits(data, frombits, tobits, pad=True): acc = 0 bits = 0 ret = [] maxv = (1 << tobits) - 1 max_acc = (1 << (frombits + tobits - 1)) - 1 for value in data: if value < 0 or (value >> frombits): return None acc = ((acc << frombits) | value) & max_acc bits += frombits while bits >= tobits: bits -= tobits ret.append((acc >> bits) & maxv) if pad: if bits: ret.append((acc << (tobits - bits)) & maxv) elif bits >= frombits or ((acc << (tobits - bits)) & maxv): return None return ret
[ "def", "convertbits", "(", "data", ",", "frombits", ",", "tobits", ",", "pad", "=", "True", ")", ":", "acc", "=", "0", "bits", "=", "0", "ret", "=", "[", "]", "maxv", "=", "(", "1", "<<", "tobits", ")", "-", "1", "max_acc", "=", "(", "1", "<<...
General power-of-2 base conversion.
[ "General", "power", "-", "of", "-", "2", "base", "conversion", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/base32.py#L79-L99
234,591
ofek/bit
bit/base32.py
decode
def decode(addr): """Decode a segwit address.""" hrpgot, data = bech32_decode(addr) # if hrpgot != hrp: if hrpgot not in BECH32_VERSION_SET: return (None, None) decoded = convertbits(data[1:], 5, 8, False) if decoded is None or len(decoded) < 2 or len(decoded) > 40: return (None, None) if data[0] > 16: return (None, None) if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32: return (None, None) return (data[0], decoded)
python
def decode(addr): hrpgot, data = bech32_decode(addr) # if hrpgot != hrp: if hrpgot not in BECH32_VERSION_SET: return (None, None) decoded = convertbits(data[1:], 5, 8, False) if decoded is None or len(decoded) < 2 or len(decoded) > 40: return (None, None) if data[0] > 16: return (None, None) if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32: return (None, None) return (data[0], decoded)
[ "def", "decode", "(", "addr", ")", ":", "hrpgot", ",", "data", "=", "bech32_decode", "(", "addr", ")", "# if hrpgot != hrp:", "if", "hrpgot", "not", "in", "BECH32_VERSION_SET", ":", "return", "(", "None", ",", "None", ")", "decoded", "=", "convertbits", "(...
Decode a segwit address.
[ "Decode", "a", "segwit", "address", "." ]
20fc0e7047946c1f28f868008d99d659905c1af6
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/base32.py#L103-L116
234,592
nok/sklearn-porter
sklearn_porter/utils/Shell.py
Shell._run
def _run(method, cmd, cwd=None, shell=True, universal_newlines=True, stderr=STDOUT): """Internal wrapper for `call` amd `check_output`""" if not cmd: error_msg = 'Passed empty text or list' raise AttributeError(error_msg) if isinstance(cmd, six.string_types): cmd = str(cmd) if shell: if isinstance(cmd, list): cmd = ' '.join(cmd) else: if isinstance(cmd, str): cmd = cmd.strip().split() out = method(cmd, shell=shell, cwd=cwd, stderr=stderr, universal_newlines=universal_newlines) if isinstance(out, bytes): out = out.decode('utf-8') return str(out).strip()
python
def _run(method, cmd, cwd=None, shell=True, universal_newlines=True, stderr=STDOUT): if not cmd: error_msg = 'Passed empty text or list' raise AttributeError(error_msg) if isinstance(cmd, six.string_types): cmd = str(cmd) if shell: if isinstance(cmd, list): cmd = ' '.join(cmd) else: if isinstance(cmd, str): cmd = cmd.strip().split() out = method(cmd, shell=shell, cwd=cwd, stderr=stderr, universal_newlines=universal_newlines) if isinstance(out, bytes): out = out.decode('utf-8') return str(out).strip()
[ "def", "_run", "(", "method", ",", "cmd", ",", "cwd", "=", "None", ",", "shell", "=", "True", ",", "universal_newlines", "=", "True", ",", "stderr", "=", "STDOUT", ")", ":", "if", "not", "cmd", ":", "error_msg", "=", "'Passed empty text or list'", "raise...
Internal wrapper for `call` amd `check_output`
[ "Internal", "wrapper", "for", "call", "amd", "check_output" ]
04673f768310bde31f9747a68a5e070592441ef2
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/utils/Shell.py#L13-L31
234,593
nok/sklearn-porter
sklearn_porter/utils/Shell.py
Shell.call
def call(cmd, shell=True, cwd=None, universal_newlines=True, stderr=STDOUT): """Just execute a specific command.""" return Shell._run(call, cmd, shell=shell, cwd=cwd, stderr=stderr, universal_newlines=universal_newlines)
python
def call(cmd, shell=True, cwd=None, universal_newlines=True, stderr=STDOUT): return Shell._run(call, cmd, shell=shell, cwd=cwd, stderr=stderr, universal_newlines=universal_newlines)
[ "def", "call", "(", "cmd", ",", "shell", "=", "True", ",", "cwd", "=", "None", ",", "universal_newlines", "=", "True", ",", "stderr", "=", "STDOUT", ")", ":", "return", "Shell", ".", "_run", "(", "call", ",", "cmd", ",", "shell", "=", "shell", ",",...
Just execute a specific command.
[ "Just", "execute", "a", "specific", "command", "." ]
04673f768310bde31f9747a68a5e070592441ef2
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/utils/Shell.py#L34-L37
234,594
nok/sklearn-porter
sklearn_porter/utils/Logging.py
Logging.get_logger
def get_logger(name, level=0): """Setup a logging instance""" level = 0 if not isinstance(level, int) else level level = 0 if level < 0 else level level = 4 if level > 4 else level console = logging.StreamHandler() level = [logging.NOTSET, logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG][level] console.setLevel(level) formatter = logging.Formatter(LOGGING_FORMAT) console.setFormatter(formatter) logging.getLogger('').addHandler(console) return logging.getLogger(name)
python
def get_logger(name, level=0): level = 0 if not isinstance(level, int) else level level = 0 if level < 0 else level level = 4 if level > 4 else level console = logging.StreamHandler() level = [logging.NOTSET, logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG][level] console.setLevel(level) formatter = logging.Formatter(LOGGING_FORMAT) console.setFormatter(formatter) logging.getLogger('').addHandler(console) return logging.getLogger(name)
[ "def", "get_logger", "(", "name", ",", "level", "=", "0", ")", ":", "level", "=", "0", "if", "not", "isinstance", "(", "level", ",", "int", ")", "else", "level", "level", "=", "0", "if", "level", "<", "0", "else", "level", "level", "=", "4", "if"...
Setup a logging instance
[ "Setup", "a", "logging", "instance" ]
04673f768310bde31f9747a68a5e070592441ef2
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/utils/Logging.py#L12-L24
234,595
nok/sklearn-porter
sklearn_porter/Template.py
Template.indent
def indent(self, text, n_indents=1, skipping=False): """ Indent text with single spaces. Parameters ---------- :param text : string The text which get a specific indentation. :param n_indents : int, default: 1 The number of indentations. :param skipping : boolean, default: False Whether to skip the initial indentation. Returns ------- return : string The indented text. """ lines = text.splitlines() space = self.TEMPLATES.get(self.target_language).get('indent', ' ') # Single line: if len(lines) == 1: if skipping: return text.strip() return n_indents * space + text.strip() # Multiple lines: indented_lines = [] for idx, line in enumerate(lines): if skipping and idx is 0: indented_lines.append(line) else: line = n_indents * space + line indented_lines.append(line) indented_text = '\n'.join(indented_lines) return indented_text
python
def indent(self, text, n_indents=1, skipping=False): lines = text.splitlines() space = self.TEMPLATES.get(self.target_language).get('indent', ' ') # Single line: if len(lines) == 1: if skipping: return text.strip() return n_indents * space + text.strip() # Multiple lines: indented_lines = [] for idx, line in enumerate(lines): if skipping and idx is 0: indented_lines.append(line) else: line = n_indents * space + line indented_lines.append(line) indented_text = '\n'.join(indented_lines) return indented_text
[ "def", "indent", "(", "self", ",", "text", ",", "n_indents", "=", "1", ",", "skipping", "=", "False", ")", ":", "lines", "=", "text", ".", "splitlines", "(", ")", "space", "=", "self", ".", "TEMPLATES", ".", "get", "(", "self", ".", "target_language"...
Indent text with single spaces. Parameters ---------- :param text : string The text which get a specific indentation. :param n_indents : int, default: 1 The number of indentations. :param skipping : boolean, default: False Whether to skip the initial indentation. Returns ------- return : string The indented text.
[ "Indent", "text", "with", "single", "spaces", "." ]
04673f768310bde31f9747a68a5e070592441ef2
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/Template.py#L25-L61
234,596
nok/sklearn-porter
sklearn_porter/Template.py
Template.temp
def temp(self, name, templates=None, n_indents=None, skipping=False): """ Get specific template of chosen programming language. Parameters ---------- :param param name : string The key name of the template. :param param templates : string, default: None The template with placeholders. :param param n_indents : int, default: None The number of indentations. :param param skipping : bool, default: False Whether to skip the initial indentation. Returns ------- return : string The wanted template string. """ if templates is None: templates = self.TEMPLATES.get(self.target_language) keys = name.split('.') key = keys.pop(0).lower() template = templates.get(key, None) if template is not None: if isinstance(template, str): if n_indents is not None: template = self.indent(template, n_indents, skipping) return template else: keys = '.'.join(keys) return self.temp(keys, template, skipping=False) else: class_name = self.__class__.__name__ estimator_type = getattr(self, 'estimator_type') if \ hasattr(self, 'estimator_type') else 'classifier' path = join(dirname(__file__), 'estimator', estimator_type, class_name, 'templates', self.target_language, name + '.txt') if isfile(path): with open(path, 'r') as file_: template = file_.read() if n_indents is not None: template = self.indent(template, n_indents, skipping) return template else: err = "Template '{}' wasn't found.".format(name) raise AttributeError(err)
python
def temp(self, name, templates=None, n_indents=None, skipping=False): if templates is None: templates = self.TEMPLATES.get(self.target_language) keys = name.split('.') key = keys.pop(0).lower() template = templates.get(key, None) if template is not None: if isinstance(template, str): if n_indents is not None: template = self.indent(template, n_indents, skipping) return template else: keys = '.'.join(keys) return self.temp(keys, template, skipping=False) else: class_name = self.__class__.__name__ estimator_type = getattr(self, 'estimator_type') if \ hasattr(self, 'estimator_type') else 'classifier' path = join(dirname(__file__), 'estimator', estimator_type, class_name, 'templates', self.target_language, name + '.txt') if isfile(path): with open(path, 'r') as file_: template = file_.read() if n_indents is not None: template = self.indent(template, n_indents, skipping) return template else: err = "Template '{}' wasn't found.".format(name) raise AttributeError(err)
[ "def", "temp", "(", "self", ",", "name", ",", "templates", "=", "None", ",", "n_indents", "=", "None", ",", "skipping", "=", "False", ")", ":", "if", "templates", "is", "None", ":", "templates", "=", "self", ".", "TEMPLATES", ".", "get", "(", "self",...
Get specific template of chosen programming language. Parameters ---------- :param param name : string The key name of the template. :param param templates : string, default: None The template with placeholders. :param param n_indents : int, default: None The number of indentations. :param param skipping : bool, default: False Whether to skip the initial indentation. Returns ------- return : string The wanted template string.
[ "Get", "specific", "template", "of", "chosen", "programming", "language", "." ]
04673f768310bde31f9747a68a5e070592441ef2
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/Template.py#L63-L112
234,597
nok/sklearn-porter
sklearn_porter/utils/Environment.py
Environment.read_sklearn_version
def read_sklearn_version(): """Determine the installed version of sklearn""" from sklearn import __version__ as sklearn_ver sklearn_ver = str(sklearn_ver).split('.') sklearn_ver = [int(v) for v in sklearn_ver] major, minor = sklearn_ver[0], sklearn_ver[1] patch = sklearn_ver[2] if len(sklearn_ver) >= 3 else 0 return major, minor, patch
python
def read_sklearn_version(): from sklearn import __version__ as sklearn_ver sklearn_ver = str(sklearn_ver).split('.') sklearn_ver = [int(v) for v in sklearn_ver] major, minor = sklearn_ver[0], sklearn_ver[1] patch = sklearn_ver[2] if len(sklearn_ver) >= 3 else 0 return major, minor, patch
[ "def", "read_sklearn_version", "(", ")", ":", "from", "sklearn", "import", "__version__", "as", "sklearn_ver", "sklearn_ver", "=", "str", "(", "sklearn_ver", ")", ".", "split", "(", "'.'", ")", "sklearn_ver", "=", "[", "int", "(", "v", ")", "for", "v", "...
Determine the installed version of sklearn
[ "Determine", "the", "installed", "version", "of", "sklearn" ]
04673f768310bde31f9747a68a5e070592441ef2
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/utils/Environment.py#L60-L67
234,598
nok/sklearn-porter
sklearn_porter/utils/Environment.py
Environment._platform_is_windows
def _platform_is_windows(platform=sys.platform): """Is the current OS a Windows?""" matched = platform in ('cygwin', 'win32', 'win64') if matched: error_msg = "Windows isn't supported yet" raise OSError(error_msg) return matched
python
def _platform_is_windows(platform=sys.platform): matched = platform in ('cygwin', 'win32', 'win64') if matched: error_msg = "Windows isn't supported yet" raise OSError(error_msg) return matched
[ "def", "_platform_is_windows", "(", "platform", "=", "sys", ".", "platform", ")", ":", "matched", "=", "platform", "in", "(", "'cygwin'", ",", "'win32'", ",", "'win64'", ")", "if", "matched", ":", "error_msg", "=", "\"Windows isn't supported yet\"", "raise", "...
Is the current OS a Windows?
[ "Is", "the", "current", "OS", "a", "Windows?" ]
04673f768310bde31f9747a68a5e070592441ef2
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/utils/Environment.py#L70-L76
234,599
nok/sklearn-porter
sklearn_porter/utils/Environment.py
Environment.check_deps
def check_deps(deps): """check whether specific requirements are available.""" if not isinstance(deps, list): deps = [deps] checks = list(Environment.has_apps(deps)) if not all(checks): for name, available in list(dict(zip(deps, checks)).items()): if not available: error_msg = "The required application/dependency '{0}'" \ " isn't available.".format(name) raise SystemError(error_msg)
python
def check_deps(deps): if not isinstance(deps, list): deps = [deps] checks = list(Environment.has_apps(deps)) if not all(checks): for name, available in list(dict(zip(deps, checks)).items()): if not available: error_msg = "The required application/dependency '{0}'" \ " isn't available.".format(name) raise SystemError(error_msg)
[ "def", "check_deps", "(", "deps", ")", ":", "if", "not", "isinstance", "(", "deps", ",", "list", ")", ":", "deps", "=", "[", "deps", "]", "checks", "=", "list", "(", "Environment", ".", "has_apps", "(", "deps", ")", ")", "if", "not", "all", "(", ...
check whether specific requirements are available.
[ "check", "whether", "specific", "requirements", "are", "available", "." ]
04673f768310bde31f9747a68a5e070592441ef2
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/utils/Environment.py#L94-L104