id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
234,400
hellock/icrawler
icrawler/feeder.py
Feeder.worker_exec
def worker_exec(self, **kwargs): """Target function of workers""" self.feed(**kwargs) self.logger.info('thread {} exit'.format(current_thread().name))
python
def worker_exec(self, **kwargs): self.feed(**kwargs) self.logger.info('thread {} exit'.format(current_thread().name))
[ "def", "worker_exec", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "feed", "(", "*", "*", "kwargs", ")", "self", ".", "logger", ".", "info", "(", "'thread {} exit'", ".", "format", "(", "current_thread", "(", ")", ".", "name", ")", ...
Target function of workers
[ "Target", "function", "of", "workers" ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/feeder.py#L41-L44
234,401
hellock/icrawler
icrawler/feeder.py
SimpleSEFeeder.feed
def feed(self, url_template, keyword, offset, max_num, page_step): """Feed urls once Args: url_template: A string with parameters replaced with "{}". keyword: A string indicating the searching keyword. offset: An integer indicating the starting index. max_num: An integer indicating the max number of images to be crawled. page_step: An integer added to offset after each iteration. """ for i in range(offset, offset + max_num, page_step): url = url_template.format(keyword, i) self.out_queue.put(url) self.logger.debug('put url to url_queue: {}'.format(url))
python
def feed(self, url_template, keyword, offset, max_num, page_step): for i in range(offset, offset + max_num, page_step): url = url_template.format(keyword, i) self.out_queue.put(url) self.logger.debug('put url to url_queue: {}'.format(url))
[ "def", "feed", "(", "self", ",", "url_template", ",", "keyword", ",", "offset", ",", "max_num", ",", "page_step", ")", ":", "for", "i", "in", "range", "(", "offset", ",", "offset", "+", "max_num", ",", "page_step", ")", ":", "url", "=", "url_template",...
Feed urls once Args: url_template: A string with parameters replaced with "{}". keyword: A string indicating the searching keyword. offset: An integer indicating the starting index. max_num: An integer indicating the max number of images to be crawled. page_step: An integer added to offset after each iteration.
[ "Feed", "urls", "once" ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/feeder.py#L79-L92
234,402
hellock/icrawler
icrawler/utils/thread_pool.py
ThreadPool.connect
def connect(self, component): """Connect two ThreadPools. The ``in_queue`` of the second pool will be set as the ``out_queue`` of the current pool, thus all the output will be input to the second pool. Args: component (ThreadPool): the ThreadPool to be connected. Returns: ThreadPool: the modified second ThreadPool. """ if not isinstance(component, ThreadPool): raise TypeError('"component" must be a ThreadPool object') component.in_queue = self.out_queue return component
python
def connect(self, component): if not isinstance(component, ThreadPool): raise TypeError('"component" must be a ThreadPool object') component.in_queue = self.out_queue return component
[ "def", "connect", "(", "self", ",", "component", ")", ":", "if", "not", "isinstance", "(", "component", ",", "ThreadPool", ")", ":", "raise", "TypeError", "(", "'\"component\" must be a ThreadPool object'", ")", "component", ".", "in_queue", "=", "self", ".", ...
Connect two ThreadPools. The ``in_queue`` of the second pool will be set as the ``out_queue`` of the current pool, thus all the output will be input to the second pool. Args: component (ThreadPool): the ThreadPool to be connected. Returns: ThreadPool: the modified second ThreadPool.
[ "Connect", "two", "ThreadPools", "." ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/utils/thread_pool.py#L85-L99
234,403
hellock/icrawler
icrawler/downloader.py
Downloader.set_file_idx_offset
def set_file_idx_offset(self, file_idx_offset=0): """Set offset of file index. Args: file_idx_offset: It can be either an integer or 'auto'. If set to an integer, the filename will start from ``file_idx_offset`` + 1. If set to ``'auto'``, the filename will start from existing max file index plus 1. """ if isinstance(file_idx_offset, int): self.file_idx_offset = file_idx_offset elif file_idx_offset == 'auto': self.file_idx_offset = self.storage.max_file_idx() else: raise ValueError('"file_idx_offset" must be an integer or `auto`')
python
def set_file_idx_offset(self, file_idx_offset=0): if isinstance(file_idx_offset, int): self.file_idx_offset = file_idx_offset elif file_idx_offset == 'auto': self.file_idx_offset = self.storage.max_file_idx() else: raise ValueError('"file_idx_offset" must be an integer or `auto`')
[ "def", "set_file_idx_offset", "(", "self", ",", "file_idx_offset", "=", "0", ")", ":", "if", "isinstance", "(", "file_idx_offset", ",", "int", ")", ":", "self", ".", "file_idx_offset", "=", "file_idx_offset", "elif", "file_idx_offset", "==", "'auto'", ":", "se...
Set offset of file index. Args: file_idx_offset: It can be either an integer or 'auto'. If set to an integer, the filename will start from ``file_idx_offset`` + 1. If set to ``'auto'``, the filename will start from existing max file index plus 1.
[ "Set", "offset", "of", "file", "index", "." ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/downloader.py#L45-L59
234,404
hellock/icrawler
icrawler/downloader.py
Downloader.get_filename
def get_filename(self, task, default_ext): """Set the path where the image will be saved. The default strategy is to use an increasing 6-digit number as the filename. You can override this method if you want to set custom naming rules. The file extension is kept if it can be obtained from the url, otherwise ``default_ext`` is used as extension. Args: task (dict): The task dict got from ``task_queue``. Output: Filename with extension. """ url_path = urlparse(task['file_url'])[2] extension = url_path.split('.')[-1] if '.' in url_path else default_ext file_idx = self.fetched_num + self.file_idx_offset return '{:06d}.{}'.format(file_idx, extension)
python
def get_filename(self, task, default_ext): url_path = urlparse(task['file_url'])[2] extension = url_path.split('.')[-1] if '.' in url_path else default_ext file_idx = self.fetched_num + self.file_idx_offset return '{:06d}.{}'.format(file_idx, extension)
[ "def", "get_filename", "(", "self", ",", "task", ",", "default_ext", ")", ":", "url_path", "=", "urlparse", "(", "task", "[", "'file_url'", "]", ")", "[", "2", "]", "extension", "=", "url_path", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "i...
Set the path where the image will be saved. The default strategy is to use an increasing 6-digit number as the filename. You can override this method if you want to set custom naming rules. The file extension is kept if it can be obtained from the url, otherwise ``default_ext`` is used as extension. Args: task (dict): The task dict got from ``task_queue``. Output: Filename with extension.
[ "Set", "the", "path", "where", "the", "image", "will", "be", "saved", "." ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/downloader.py#L61-L78
234,405
hellock/icrawler
icrawler/downloader.py
Downloader.reach_max_num
def reach_max_num(self): """Check if downloaded images reached max num. Returns: bool: if downloaded images reached max num. """ if self.signal.get('reach_max_num'): return True if self.max_num > 0 and self.fetched_num >= self.max_num: return True else: return False
python
def reach_max_num(self): if self.signal.get('reach_max_num'): return True if self.max_num > 0 and self.fetched_num >= self.max_num: return True else: return False
[ "def", "reach_max_num", "(", "self", ")", ":", "if", "self", ".", "signal", ".", "get", "(", "'reach_max_num'", ")", ":", "return", "True", "if", "self", ".", "max_num", ">", "0", "and", "self", ".", "fetched_num", ">=", "self", ".", "max_num", ":", ...
Check if downloaded images reached max num. Returns: bool: if downloaded images reached max num.
[ "Check", "if", "downloaded", "images", "reached", "max", "num", "." ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/downloader.py#L80-L91
234,406
hellock/icrawler
icrawler/downloader.py
Downloader.download
def download(self, task, default_ext, timeout=5, max_retry=3, overwrite=False, **kwargs): """Download the image and save it to the corresponding path. Args: task (dict): The task dict got from ``task_queue``. timeout (int): Timeout of making requests for downloading images. max_retry (int): the max retry times if the request fails. **kwargs: reserved arguments for overriding. """ file_url = task['file_url'] task['success'] = False task['filename'] = None retry = max_retry if not overwrite: with self.lock: self.fetched_num += 1 filename = self.get_filename(task, default_ext) if self.storage.exists(filename): self.logger.info('skip downloading file %s', filename) return self.fetched_num -= 1 while retry > 0 and not self.signal.get('reach_max_num'): try: response = self.session.get(file_url, timeout=timeout) except Exception as e: self.logger.error('Exception caught when downloading file %s, ' 'error: %s, remaining retry times: %d', file_url, e, retry - 1) else: if self.reach_max_num(): self.signal.set(reach_max_num=True) break elif response.status_code != 200: self.logger.error('Response status code %d, file %s', response.status_code, file_url) break elif not self.keep_file(task, response, **kwargs): break with self.lock: self.fetched_num += 1 filename = self.get_filename(task, default_ext) self.logger.info('image #%s\t%s', self.fetched_num, file_url) self.storage.write(filename, response.content) task['success'] = True task['filename'] = filename break finally: retry -= 1
python
def download(self, task, default_ext, timeout=5, max_retry=3, overwrite=False, **kwargs): file_url = task['file_url'] task['success'] = False task['filename'] = None retry = max_retry if not overwrite: with self.lock: self.fetched_num += 1 filename = self.get_filename(task, default_ext) if self.storage.exists(filename): self.logger.info('skip downloading file %s', filename) return self.fetched_num -= 1 while retry > 0 and not self.signal.get('reach_max_num'): try: response = self.session.get(file_url, timeout=timeout) except Exception as e: self.logger.error('Exception caught when downloading file %s, ' 'error: %s, remaining retry times: %d', file_url, e, retry - 1) else: if self.reach_max_num(): self.signal.set(reach_max_num=True) break elif response.status_code != 200: self.logger.error('Response status code %d, file %s', response.status_code, file_url) break elif not self.keep_file(task, response, **kwargs): break with self.lock: self.fetched_num += 1 filename = self.get_filename(task, default_ext) self.logger.info('image #%s\t%s', self.fetched_num, file_url) self.storage.write(filename, response.content) task['success'] = True task['filename'] = filename break finally: retry -= 1
[ "def", "download", "(", "self", ",", "task", ",", "default_ext", ",", "timeout", "=", "5", ",", "max_retry", "=", "3", ",", "overwrite", "=", "False", ",", "*", "*", "kwargs", ")", ":", "file_url", "=", "task", "[", "'file_url'", "]", "task", "[", ...
Download the image and save it to the corresponding path. Args: task (dict): The task dict got from ``task_queue``. timeout (int): Timeout of making requests for downloading images. max_retry (int): the max retry times if the request fails. **kwargs: reserved arguments for overriding.
[ "Download", "the", "image", "and", "save", "it", "to", "the", "corresponding", "path", "." ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/downloader.py#L96-L151
234,407
hellock/icrawler
icrawler/downloader.py
ImageDownloader.keep_file
def keep_file(self, task, response, min_size=None, max_size=None): """Decide whether to keep the image Compare image size with ``min_size`` and ``max_size`` to decide. Args: response (Response): response of requests. min_size (tuple or None): minimum size of required images. max_size (tuple or None): maximum size of required images. Returns: bool: whether to keep the image. """ try: img = Image.open(BytesIO(response.content)) except (IOError, OSError): return False task['img_size'] = img.size if min_size and not self._size_gt(img.size, min_size): return False if max_size and not self._size_lt(img.size, max_size): return False return True
python
def keep_file(self, task, response, min_size=None, max_size=None): try: img = Image.open(BytesIO(response.content)) except (IOError, OSError): return False task['img_size'] = img.size if min_size and not self._size_gt(img.size, min_size): return False if max_size and not self._size_lt(img.size, max_size): return False return True
[ "def", "keep_file", "(", "self", ",", "task", ",", "response", ",", "min_size", "=", "None", ",", "max_size", "=", "None", ")", ":", "try", ":", "img", "=", "Image", ".", "open", "(", "BytesIO", "(", "response", ".", "content", ")", ")", "except", ...
Decide whether to keep the image Compare image size with ``min_size`` and ``max_size`` to decide. Args: response (Response): response of requests. min_size (tuple or None): minimum size of required images. max_size (tuple or None): maximum size of required images. Returns: bool: whether to keep the image.
[ "Decide", "whether", "to", "keep", "the", "image" ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/downloader.py#L232-L253
234,408
hellock/icrawler
icrawler/crawler.py
Crawler.set_logger
def set_logger(self, log_level=logging.INFO): """Configure the logger with log_level.""" logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=log_level, stream=sys.stderr) self.logger = logging.getLogger(__name__) logging.getLogger('requests').setLevel(logging.WARNING)
python
def set_logger(self, log_level=logging.INFO): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=log_level, stream=sys.stderr) self.logger = logging.getLogger(__name__) logging.getLogger('requests').setLevel(logging.WARNING)
[ "def", "set_logger", "(", "self", ",", "log_level", "=", "logging", ".", "INFO", ")", ":", "logging", ".", "basicConfig", "(", "format", "=", "'%(asctime)s - %(levelname)s - %(name)s - %(message)s'", ",", "level", "=", "log_level", ",", "stream", "=", "sys", "."...
Configure the logger with log_level.
[ "Configure", "the", "logger", "with", "log_level", "." ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/crawler.py#L76-L83
234,409
hellock/icrawler
icrawler/crawler.py
Crawler.set_storage
def set_storage(self, storage): """Set storage backend for downloader For full list of storage backend supported, please see :mod:`storage`. Args: storage (dict or BaseStorage): storage backend configuration or instance """ if isinstance(storage, BaseStorage): self.storage = storage elif isinstance(storage, dict): if 'backend' not in storage and 'root_dir' in storage: storage['backend'] = 'FileSystem' try: backend_cls = getattr(storage_package, storage['backend']) except AttributeError: try: backend_cls = import_module(storage['backend']) except ImportError: self.logger.error('cannot find backend module %s', storage['backend']) sys.exit() kwargs = storage.copy() del kwargs['backend'] self.storage = backend_cls(**kwargs) else: raise TypeError('"storage" must be a storage object or dict')
python
def set_storage(self, storage): if isinstance(storage, BaseStorage): self.storage = storage elif isinstance(storage, dict): if 'backend' not in storage and 'root_dir' in storage: storage['backend'] = 'FileSystem' try: backend_cls = getattr(storage_package, storage['backend']) except AttributeError: try: backend_cls = import_module(storage['backend']) except ImportError: self.logger.error('cannot find backend module %s', storage['backend']) sys.exit() kwargs = storage.copy() del kwargs['backend'] self.storage = backend_cls(**kwargs) else: raise TypeError('"storage" must be a storage object or dict')
[ "def", "set_storage", "(", "self", ",", "storage", ")", ":", "if", "isinstance", "(", "storage", ",", "BaseStorage", ")", ":", "self", ".", "storage", "=", "storage", "elif", "isinstance", "(", "storage", ",", "dict", ")", ":", "if", "'backend'", "not", ...
Set storage backend for downloader For full list of storage backend supported, please see :mod:`storage`. Args: storage (dict or BaseStorage): storage backend configuration or instance
[ "Set", "storage", "backend", "for", "downloader" ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/crawler.py#L95-L122
234,410
hellock/icrawler
icrawler/crawler.py
Crawler.set_session
def set_session(self, headers=None): """Init session with default or custom headers Args: headers: A dict of headers (default None, thus using the default header to init the session) """ if headers is None: headers = { 'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3)' ' AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/48.0.2564.116 Safari/537.36') } elif not isinstance(headers, dict): raise TypeError('"headers" must be a dict object') self.session = Session(self.proxy_pool) self.session.headers.update(headers)
python
def set_session(self, headers=None): if headers is None: headers = { 'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3)' ' AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/48.0.2564.116 Safari/537.36') } elif not isinstance(headers, dict): raise TypeError('"headers" must be a dict object') self.session = Session(self.proxy_pool) self.session.headers.update(headers)
[ "def", "set_session", "(", "self", ",", "headers", "=", "None", ")", ":", "if", "headers", "is", "None", ":", "headers", "=", "{", "'User-Agent'", ":", "(", "'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3)'", "' AppleWebKit/537.36 (KHTML, like Gecko) '", "'Chrome/48.0....
Init session with default or custom headers Args: headers: A dict of headers (default None, thus using the default header to init the session)
[ "Init", "session", "with", "default", "or", "custom", "headers" ]
38c925758fd3d3e568d3ecc993f77bc0acfa4788
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/crawler.py#L134-L152
234,411
pmorissette/bt
bt/core.py
Node.value
def value(self): """ Current value of the Node """ if self.root.stale: self.root.update(self.root.now, None) return self._value
python
def value(self): if self.root.stale: self.root.update(self.root.now, None) return self._value
[ "def", "value", "(", "self", ")", ":", "if", "self", ".", "root", ".", "stale", ":", "self", ".", "root", ".", "update", "(", "self", ".", "root", ".", "now", ",", "None", ")", "return", "self", ".", "_value" ]
Current value of the Node
[ "Current", "value", "of", "the", "Node" ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L175-L181
234,412
pmorissette/bt
bt/core.py
Node.members
def members(self): """ Node members. Members include current node as well as Node's children. """ res = [self] for c in list(self.children.values()): res.extend(c.members) return res
python
def members(self): res = [self] for c in list(self.children.values()): res.extend(c.members) return res
[ "def", "members", "(", "self", ")", ":", "res", "=", "[", "self", "]", "for", "c", "in", "list", "(", "self", ".", "children", ".", "values", "(", ")", ")", ":", "res", ".", "extend", "(", "c", ".", "members", ")", "return", "res" ]
Node members. Members include current node as well as Node's children.
[ "Node", "members", ".", "Members", "include", "current", "node", "as", "well", "as", "Node", "s", "children", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L229-L237
234,413
pmorissette/bt
bt/core.py
StrategyBase.universe
def universe(self): """ Data universe available at the current time. Universe contains the data passed in when creating a Backtest. Use this data to determine strategy logic. """ # avoid windowing every time # if calling and on same date return # cached value if self.now == self._last_chk: return self._funiverse else: self._last_chk = self.now self._funiverse = self._universe.loc[:self.now] return self._funiverse
python
def universe(self): # avoid windowing every time # if calling and on same date return # cached value if self.now == self._last_chk: return self._funiverse else: self._last_chk = self.now self._funiverse = self._universe.loc[:self.now] return self._funiverse
[ "def", "universe", "(", "self", ")", ":", "# avoid windowing every time", "# if calling and on same date return", "# cached value", "if", "self", ".", "now", "==", "self", ".", "_last_chk", ":", "return", "self", ".", "_funiverse", "else", ":", "self", ".", "_last...
Data universe available at the current time. Universe contains the data passed in when creating a Backtest. Use this data to determine strategy logic.
[ "Data", "universe", "available", "at", "the", "current", "time", ".", "Universe", "contains", "the", "data", "passed", "in", "when", "creating", "a", "Backtest", ".", "Use", "this", "data", "to", "determine", "strategy", "logic", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L374-L388
234,414
pmorissette/bt
bt/core.py
StrategyBase.outlays
def outlays(self): """ Returns a DataFrame of outlays for each child SecurityBase """ return pd.DataFrame({x.name: x.outlays for x in self.securities})
python
def outlays(self): return pd.DataFrame({x.name: x.outlays for x in self.securities})
[ "def", "outlays", "(", "self", ")", ":", "return", "pd", ".", "DataFrame", "(", "{", "x", ".", "name", ":", "x", ".", "outlays", "for", "x", "in", "self", ".", "securities", "}", ")" ]
Returns a DataFrame of outlays for each child SecurityBase
[ "Returns", "a", "DataFrame", "of", "outlays", "for", "each", "child", "SecurityBase" ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L398-L402
234,415
pmorissette/bt
bt/core.py
StrategyBase.setup
def setup(self, universe): """ Setup strategy with universe. This will speed up future calculations and updates. """ # save full universe in case we need it self._original_data = universe # determine if needs paper trading # and setup if so if self is not self.parent: self._paper_trade = True self._paper_amount = 1000000 paper = deepcopy(self) paper.parent = paper paper.root = paper paper._paper_trade = False paper.setup(self._original_data) paper.adjust(self._paper_amount) self._paper = paper # setup universe funiverse = universe if self._universe_tickers is not None: # if we have universe_tickers defined, limit universe to # those tickers valid_filter = list(set(universe.columns) .intersection(self._universe_tickers)) funiverse = universe[valid_filter].copy() # if we have strat children, we will need to create their columns # in the new universe if self._has_strat_children: for c in self._strat_children: funiverse[c] = np.nan # must create to avoid pandas warning funiverse = pd.DataFrame(funiverse) self._universe = funiverse # holds filtered universe self._funiverse = funiverse self._last_chk = None # We're not bankrupt yet self.bankrupt = False # setup internal data self.data = pd.DataFrame(index=funiverse.index, columns=['price', 'value', 'cash', 'fees'], data=0.0) self._prices = self.data['price'] self._values = self.data['value'] self._cash = self.data['cash'] self._fees = self.data['fees'] # setup children as well - use original universe here - don't want to # pollute with potential strategy children in funiverse if self.children is not None: [c.setup(universe) for c in self._childrenv]
python
def setup(self, universe): # save full universe in case we need it self._original_data = universe # determine if needs paper trading # and setup if so if self is not self.parent: self._paper_trade = True self._paper_amount = 1000000 paper = deepcopy(self) paper.parent = paper paper.root = paper paper._paper_trade = False paper.setup(self._original_data) paper.adjust(self._paper_amount) self._paper = paper # setup universe funiverse = universe if self._universe_tickers is not None: # if we have universe_tickers defined, limit universe to # those tickers valid_filter = list(set(universe.columns) .intersection(self._universe_tickers)) funiverse = universe[valid_filter].copy() # if we have strat children, we will need to create their columns # in the new universe if self._has_strat_children: for c in self._strat_children: funiverse[c] = np.nan # must create to avoid pandas warning funiverse = pd.DataFrame(funiverse) self._universe = funiverse # holds filtered universe self._funiverse = funiverse self._last_chk = None # We're not bankrupt yet self.bankrupt = False # setup internal data self.data = pd.DataFrame(index=funiverse.index, columns=['price', 'value', 'cash', 'fees'], data=0.0) self._prices = self.data['price'] self._values = self.data['value'] self._cash = self.data['cash'] self._fees = self.data['fees'] # setup children as well - use original universe here - don't want to # pollute with potential strategy children in funiverse if self.children is not None: [c.setup(universe) for c in self._childrenv]
[ "def", "setup", "(", "self", ",", "universe", ")", ":", "# save full universe in case we need it", "self", ".", "_original_data", "=", "universe", "# determine if needs paper trading", "# and setup if so", "if", "self", "is", "not", "self", ".", "parent", ":", "self",...
Setup strategy with universe. This will speed up future calculations and updates.
[ "Setup", "strategy", "with", "universe", ".", "This", "will", "speed", "up", "future", "calculations", "and", "updates", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L418-L481
234,416
pmorissette/bt
bt/core.py
StrategyBase.update
def update(self, date, data=None, inow=None): """ Update strategy. Updates prices, values, weight, etc. """ # resolve stale state self.root.stale = False # update helpers on date change # also set newpt flag newpt = False if self.now == 0: newpt = True elif date != self.now: self._net_flows = 0 self._last_price = self._price self._last_value = self._value self._last_fee = 0.0 newpt = True # update now self.now = date if inow is None: if self.now == 0: inow = 0 else: inow = self.data.index.get_loc(date) # update children if any and calculate value val = self._capital # default if no children if self.children is not None: for c in self._childrenv: # avoid useless update call if c._issec and not c._needupdate: continue c.update(date, data, inow) val += c.value if self.root == self: if (val < 0) and not self.bankrupt: # Declare a bankruptcy self.bankrupt = True self.flatten() # update data if this value is different or # if now has changed - avoid all this if not since it # won't change if newpt or self._value != val: self._value = val self._values.values[inow] = val bottom = self._last_value + self._net_flows if bottom != 0: ret = self._value / (self._last_value + self._net_flows) - 1 else: if self._value == 0: ret = 0 else: raise ZeroDivisionError( 'Could not update %s. Last value ' 'was %s and net flows were %s. Current' 'value is %s. Therefore, ' 'we are dividing by zero to obtain the return ' 'for the period.' % (self.name, self._last_value, self._net_flows, self._value)) self._price = self._last_price * (1 + ret) self._prices.values[inow] = self._price # update children weights if self.children is not None: for c in self._childrenv: # avoid useless update call if c._issec and not c._needupdate: continue if val != 0: c._weight = c.value / val else: c._weight = 0.0 # if we have strategy children, we will need to update them in universe if self._has_strat_children: for c in self._strat_children: # TODO: optimize ".loc" here as well self._universe.loc[date, c] = self.children[c].price # Cash should track the unallocated capital at the end of the day, so # we should update it every time we call "update". # Same for fees self._cash.values[inow] = self._capital self._fees.values[inow] = self._last_fee # update paper trade if necessary if newpt and self._paper_trade: self._paper.update(date) self._paper.run() self._paper.update(date) # update price self._price = self._paper.price self._prices.values[inow] = self._price
python
def update(self, date, data=None, inow=None): # resolve stale state self.root.stale = False # update helpers on date change # also set newpt flag newpt = False if self.now == 0: newpt = True elif date != self.now: self._net_flows = 0 self._last_price = self._price self._last_value = self._value self._last_fee = 0.0 newpt = True # update now self.now = date if inow is None: if self.now == 0: inow = 0 else: inow = self.data.index.get_loc(date) # update children if any and calculate value val = self._capital # default if no children if self.children is not None: for c in self._childrenv: # avoid useless update call if c._issec and not c._needupdate: continue c.update(date, data, inow) val += c.value if self.root == self: if (val < 0) and not self.bankrupt: # Declare a bankruptcy self.bankrupt = True self.flatten() # update data if this value is different or # if now has changed - avoid all this if not since it # won't change if newpt or self._value != val: self._value = val self._values.values[inow] = val bottom = self._last_value + self._net_flows if bottom != 0: ret = self._value / (self._last_value + self._net_flows) - 1 else: if self._value == 0: ret = 0 else: raise ZeroDivisionError( 'Could not update %s. Last value ' 'was %s and net flows were %s. Current' 'value is %s. Therefore, ' 'we are dividing by zero to obtain the return ' 'for the period.' % (self.name, self._last_value, self._net_flows, self._value)) self._price = self._last_price * (1 + ret) self._prices.values[inow] = self._price # update children weights if self.children is not None: for c in self._childrenv: # avoid useless update call if c._issec and not c._needupdate: continue if val != 0: c._weight = c.value / val else: c._weight = 0.0 # if we have strategy children, we will need to update them in universe if self._has_strat_children: for c in self._strat_children: # TODO: optimize ".loc" here as well self._universe.loc[date, c] = self.children[c].price # Cash should track the unallocated capital at the end of the day, so # we should update it every time we call "update". # Same for fees self._cash.values[inow] = self._capital self._fees.values[inow] = self._last_fee # update paper trade if necessary if newpt and self._paper_trade: self._paper.update(date) self._paper.run() self._paper.update(date) # update price self._price = self._paper.price self._prices.values[inow] = self._price
[ "def", "update", "(", "self", ",", "date", ",", "data", "=", "None", ",", "inow", "=", "None", ")", ":", "# resolve stale state", "self", ".", "root", ".", "stale", "=", "False", "# update helpers on date change", "# also set newpt flag", "newpt", "=", "False"...
Update strategy. Updates prices, values, weight, etc.
[ "Update", "strategy", ".", "Updates", "prices", "values", "weight", "etc", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L484-L586
234,417
pmorissette/bt
bt/core.py
StrategyBase.adjust
def adjust(self, amount, update=True, flow=True, fee=0.0): """ Adjust capital - used to inject capital to a Strategy. This injection of capital will have no effect on the children. Args: * amount (float): Amount to adjust by. * update (bool): Force update? * flow (bool): Is this adjustment a flow? A flow will not have an impact on the performance (price index). Example of flows are simply capital injections (say a monthly contribution to a portfolio). This should not be reflected in the returns. A non-flow (flow=False) does impact performance. A good example of this is a commission, or a dividend. """ # adjust capital self._capital += amount self._last_fee += fee # if flow - increment net_flows - this will not affect # performance. Commissions and other fees are not flows since # they have a performance impact if flow: self._net_flows += amount if update: # indicates that data is now stale and must # be updated before access self.root.stale = True
python
def adjust(self, amount, update=True, flow=True, fee=0.0): # adjust capital self._capital += amount self._last_fee += fee # if flow - increment net_flows - this will not affect # performance. Commissions and other fees are not flows since # they have a performance impact if flow: self._net_flows += amount if update: # indicates that data is now stale and must # be updated before access self.root.stale = True
[ "def", "adjust", "(", "self", ",", "amount", ",", "update", "=", "True", ",", "flow", "=", "True", ",", "fee", "=", "0.0", ")", ":", "# adjust capital", "self", ".", "_capital", "+=", "amount", "self", ".", "_last_fee", "+=", "fee", "# if flow - incremen...
Adjust capital - used to inject capital to a Strategy. This injection of capital will have no effect on the children. Args: * amount (float): Amount to adjust by. * update (bool): Force update? * flow (bool): Is this adjustment a flow? A flow will not have an impact on the performance (price index). Example of flows are simply capital injections (say a monthly contribution to a portfolio). This should not be reflected in the returns. A non-flow (flow=False) does impact performance. A good example of this is a commission, or a dividend.
[ "Adjust", "capital", "-", "used", "to", "inject", "capital", "to", "a", "Strategy", ".", "This", "injection", "of", "capital", "will", "have", "no", "effect", "on", "the", "children", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L589-L618
234,418
pmorissette/bt
bt/core.py
StrategyBase.allocate
def allocate(self, amount, child=None, update=True): """ Allocate capital to Strategy. By default, capital is allocated recursively down the children, proportionally to the children's weights. If a child is specified, capital will be allocated to that specific child. Allocation also have a side-effect. They will deduct the same amount from the parent's "account" to offset the allocation. If there is remaining capital after allocation, it will remain in Strategy. Args: * amount (float): Amount to allocate. * child (str): If specified, allocation will be directed to child only. Specified by name. * update (bool): Force update. """ # allocate to child if child is not None: if child not in self.children: c = SecurityBase(child) c.setup(self._universe) # update to bring up to speed c.update(self.now) # add child to tree self._add_child(c) # allocate to child self.children[child].allocate(amount) # allocate to self else: # adjust parent's capital # no need to update now - avoids repetition if self.parent == self: self.parent.adjust(-amount, update=False, flow=True) else: # do NOT set as flow - parent will be another strategy # and therefore should not incur flow self.parent.adjust(-amount, update=False, flow=False) # adjust self's capital self.adjust(amount, update=False, flow=True) # push allocation down to children if any # use _weight to avoid triggering an update if self.children is not None: [c.allocate(amount * c._weight, update=False) for c in self._childrenv] # mark as stale if update requested if update: self.root.stale = True
python
def allocate(self, amount, child=None, update=True): # allocate to child if child is not None: if child not in self.children: c = SecurityBase(child) c.setup(self._universe) # update to bring up to speed c.update(self.now) # add child to tree self._add_child(c) # allocate to child self.children[child].allocate(amount) # allocate to self else: # adjust parent's capital # no need to update now - avoids repetition if self.parent == self: self.parent.adjust(-amount, update=False, flow=True) else: # do NOT set as flow - parent will be another strategy # and therefore should not incur flow self.parent.adjust(-amount, update=False, flow=False) # adjust self's capital self.adjust(amount, update=False, flow=True) # push allocation down to children if any # use _weight to avoid triggering an update if self.children is not None: [c.allocate(amount * c._weight, update=False) for c in self._childrenv] # mark as stale if update requested if update: self.root.stale = True
[ "def", "allocate", "(", "self", ",", "amount", ",", "child", "=", "None", ",", "update", "=", "True", ")", ":", "# allocate to child", "if", "child", "is", "not", "None", ":", "if", "child", "not", "in", "self", ".", "children", ":", "c", "=", "Secur...
Allocate capital to Strategy. By default, capital is allocated recursively down the children, proportionally to the children's weights. If a child is specified, capital will be allocated to that specific child. Allocation also have a side-effect. They will deduct the same amount from the parent's "account" to offset the allocation. If there is remaining capital after allocation, it will remain in Strategy. Args: * amount (float): Amount to allocate. * child (str): If specified, allocation will be directed to child only. Specified by name. * update (bool): Force update.
[ "Allocate", "capital", "to", "Strategy", ".", "By", "default", "capital", "is", "allocated", "recursively", "down", "the", "children", "proportionally", "to", "the", "children", "s", "weights", ".", "If", "a", "child", "is", "specified", "capital", "will", "be...
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L621-L673
234,419
pmorissette/bt
bt/core.py
StrategyBase.rebalance
def rebalance(self, weight, child, base=np.nan, update=True): """ Rebalance a child to a given weight. This is a helper method to simplify code logic. This method is used when we want to se the weight of a particular child to a set amount. It is similar to allocate, but it calculates the appropriate allocation based on the current weight. Args: * weight (float): The target weight. Usually between -1.0 and 1.0. * child (str): child to allocate to - specified by name. * base (float): If specified, this is the base amount all weight delta calculations will be based off of. This is useful when we determine a set of weights and want to rebalance each child given these new weights. However, as we iterate through each child and call this method, the base (which is by default the current value) will change. Therefore, we can set this base to the original value before the iteration to ensure the proper allocations are made. * update (bool): Force update? """ # if weight is 0 - we want to close child if weight == 0: if child in self.children: return self.close(child) else: return # if no base specified use self's value if np.isnan(base): base = self.value # else make sure we have child if child not in self.children: c = SecurityBase(child) c.setup(self._universe) # update child to bring up to speed c.update(self.now) self._add_child(c) # allocate to child # figure out weight delta c = self.children[child] delta = weight - c.weight c.allocate(delta * base)
python
def rebalance(self, weight, child, base=np.nan, update=True): # if weight is 0 - we want to close child if weight == 0: if child in self.children: return self.close(child) else: return # if no base specified use self's value if np.isnan(base): base = self.value # else make sure we have child if child not in self.children: c = SecurityBase(child) c.setup(self._universe) # update child to bring up to speed c.update(self.now) self._add_child(c) # allocate to child # figure out weight delta c = self.children[child] delta = weight - c.weight c.allocate(delta * base)
[ "def", "rebalance", "(", "self", ",", "weight", ",", "child", ",", "base", "=", "np", ".", "nan", ",", "update", "=", "True", ")", ":", "# if weight is 0 - we want to close child", "if", "weight", "==", "0", ":", "if", "child", "in", "self", ".", "childr...
Rebalance a child to a given weight. This is a helper method to simplify code logic. This method is used when we want to se the weight of a particular child to a set amount. It is similar to allocate, but it calculates the appropriate allocation based on the current weight. Args: * weight (float): The target weight. Usually between -1.0 and 1.0. * child (str): child to allocate to - specified by name. * base (float): If specified, this is the base amount all weight delta calculations will be based off of. This is useful when we determine a set of weights and want to rebalance each child given these new weights. However, as we iterate through each child and call this method, the base (which is by default the current value) will change. Therefore, we can set this base to the original value before the iteration to ensure the proper allocations are made. * update (bool): Force update?
[ "Rebalance", "a", "child", "to", "a", "given", "weight", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L676-L722
234,420
pmorissette/bt
bt/core.py
StrategyBase.flatten
def flatten(self): """ Close all child positions. """ # go right to base alloc [c.allocate(-c.value) for c in self._childrenv if c.value != 0]
python
def flatten(self): # go right to base alloc [c.allocate(-c.value) for c in self._childrenv if c.value != 0]
[ "def", "flatten", "(", "self", ")", ":", "# go right to base alloc", "[", "c", ".", "allocate", "(", "-", "c", ".", "value", ")", "for", "c", "in", "self", ".", "_childrenv", "if", "c", ".", "value", "!=", "0", "]" ]
Close all child positions.
[ "Close", "all", "child", "positions", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L740-L745
234,421
pmorissette/bt
bt/core.py
SecurityBase.setup
def setup(self, universe): """ Setup Security with universe. Speeds up future runs. Args: * universe (DataFrame): DataFrame of prices with security's name as one of the columns. """ # if we already have all the prices, we will store them to speed up # future updates try: prices = universe[self.name] except KeyError: prices = None # setup internal data if prices is not None: self._prices = prices self.data = pd.DataFrame(index=universe.index, columns=['value', 'position'], data=0.0) self._prices_set = True else: self.data = pd.DataFrame(index=universe.index, columns=['price', 'value', 'position']) self._prices = self.data['price'] self._prices_set = False self._values = self.data['value'] self._positions = self.data['position'] # add _outlay self.data['outlay'] = 0. self._outlays = self.data['outlay']
python
def setup(self, universe): # if we already have all the prices, we will store them to speed up # future updates try: prices = universe[self.name] except KeyError: prices = None # setup internal data if prices is not None: self._prices = prices self.data = pd.DataFrame(index=universe.index, columns=['value', 'position'], data=0.0) self._prices_set = True else: self.data = pd.DataFrame(index=universe.index, columns=['price', 'value', 'position']) self._prices = self.data['price'] self._prices_set = False self._values = self.data['value'] self._positions = self.data['position'] # add _outlay self.data['outlay'] = 0. self._outlays = self.data['outlay']
[ "def", "setup", "(", "self", ",", "universe", ")", ":", "# if we already have all the prices, we will store them to speed up", "# future updates", "try", ":", "prices", "=", "universe", "[", "self", ".", "name", "]", "except", "KeyError", ":", "prices", "=", "None",...
Setup Security with universe. Speeds up future runs. Args: * universe (DataFrame): DataFrame of prices with security's name as one of the columns.
[ "Setup", "Security", "with", "universe", ".", "Speeds", "up", "future", "runs", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L893-L927
234,422
pmorissette/bt
bt/core.py
SecurityBase.update
def update(self, date, data=None, inow=None): """ Update security with a given date and optionally, some data. This will update price, value, weight, etc. """ # filter for internal calls when position has not changed - nothing to # do. Internal calls (stale root calls) have None data. Also want to # make sure date has not changed, because then we do indeed want to # update. if date == self.now and self._last_pos == self._position: return if inow is None: if date == 0: inow = 0 else: inow = self.data.index.get_loc(date) # date change - update price if date != self.now: # update now self.now = date if self._prices_set: self._price = self._prices.values[inow] # traditional data update elif data is not None: prc = data[self.name] self._price = prc self._prices.values[inow] = prc self._positions.values[inow] = self._position self._last_pos = self._position if np.isnan(self._price): if self._position == 0: self._value = 0 else: raise Exception( 'Position is open (non-zero) and latest price is NaN ' 'for security %s. Cannot update node value.' % self.name) else: self._value = self._position * self._price * self.multiplier self._values.values[inow] = self._value if self._weight == 0 and self._position == 0: self._needupdate = False # save outlay to outlays if self._outlay != 0: self._outlays.values[inow] = self._outlay # reset outlay back to 0 self._outlay = 0
python
def update(self, date, data=None, inow=None): # filter for internal calls when position has not changed - nothing to # do. Internal calls (stale root calls) have None data. Also want to # make sure date has not changed, because then we do indeed want to # update. if date == self.now and self._last_pos == self._position: return if inow is None: if date == 0: inow = 0 else: inow = self.data.index.get_loc(date) # date change - update price if date != self.now: # update now self.now = date if self._prices_set: self._price = self._prices.values[inow] # traditional data update elif data is not None: prc = data[self.name] self._price = prc self._prices.values[inow] = prc self._positions.values[inow] = self._position self._last_pos = self._position if np.isnan(self._price): if self._position == 0: self._value = 0 else: raise Exception( 'Position is open (non-zero) and latest price is NaN ' 'for security %s. Cannot update node value.' % self.name) else: self._value = self._position * self._price * self.multiplier self._values.values[inow] = self._value if self._weight == 0 and self._position == 0: self._needupdate = False # save outlay to outlays if self._outlay != 0: self._outlays.values[inow] = self._outlay # reset outlay back to 0 self._outlay = 0
[ "def", "update", "(", "self", ",", "date", ",", "data", "=", "None", ",", "inow", "=", "None", ")", ":", "# filter for internal calls when position has not changed - nothing to", "# do. Internal calls (stale root calls) have None data. Also want to", "# make sure date has not cha...
Update security with a given date and optionally, some data. This will update price, value, weight, etc.
[ "Update", "security", "with", "a", "given", "date", "and", "optionally", "some", "data", ".", "This", "will", "update", "price", "value", "weight", "etc", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L930-L983
234,423
pmorissette/bt
bt/core.py
Algo.name
def name(self): """ Algo name. """ if self._name is None: self._name = self.__class__.__name__ return self._name
python
def name(self): if self._name is None: self._name = self.__class__.__name__ return self._name
[ "def", "name", "(", "self", ")", ":", "if", "self", ".", "_name", "is", "None", ":", "self", ".", "_name", "=", "self", ".", "__class__", ".", "__name__", "return", "self", ".", "_name" ]
Algo name.
[ "Algo", "name", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L1217-L1223
234,424
pmorissette/bt
docs/source/_themes/klink/klink/__init__.py
convert_notebooks
def convert_notebooks(): """ Converts IPython Notebooks to proper .rst files and moves static content to the _static directory. """ convert_status = call(['ipython', 'nbconvert', '--to', 'rst', '*.ipynb']) if convert_status != 0: raise SystemError('Conversion failed! Status was %s' % convert_status) notebooks = [x for x in os.listdir('.') if '.ipynb' in x and os.path.isfile(x)] names = [os.path.splitext(x)[0] for x in notebooks] for i in range(len(notebooks)): name = names[i] notebook = notebooks[i] print('processing %s (%s)' % (name, notebook)) # move static files sdir = '%s_files' % name statics = os.listdir(sdir) statics = [os.path.join(sdir, x) for x in statics] [shutil.copy(x, '_static/') for x in statics] shutil.rmtree(sdir) # rename static dir in rst file rst_file = '%s.rst' % name print('REsT file is %s' % rst_file) data = None with open(rst_file, 'r') as f: data = f.read() if data is not None: with open(rst_file, 'w') as f: data = re.sub('%s' % sdir, '_static', data) f.write(data) # add special tags lines = None with open(rst_file, 'r') as f: lines = f.readlines() if lines is not None: n = len(lines) i = 0 rawWatch = False while i < n: line = lines[i] # add class tags to images for css formatting if 'image::' in line: lines.insert(i + 1, ' :class: pynb\n') n += 1 elif 'parsed-literal::' in line: lines.insert(i + 1, ' :class: pynb-result\n') n += 1 elif 'raw:: html' in line: rawWatch = True if rawWatch: if '<div' in line: line = line.replace('<div', '<div class="pynb-result"') lines[i] = line rawWatch = False i += 1 with open(rst_file, 'w') as f: f.writelines(lines)
python
def convert_notebooks(): convert_status = call(['ipython', 'nbconvert', '--to', 'rst', '*.ipynb']) if convert_status != 0: raise SystemError('Conversion failed! Status was %s' % convert_status) notebooks = [x for x in os.listdir('.') if '.ipynb' in x and os.path.isfile(x)] names = [os.path.splitext(x)[0] for x in notebooks] for i in range(len(notebooks)): name = names[i] notebook = notebooks[i] print('processing %s (%s)' % (name, notebook)) # move static files sdir = '%s_files' % name statics = os.listdir(sdir) statics = [os.path.join(sdir, x) for x in statics] [shutil.copy(x, '_static/') for x in statics] shutil.rmtree(sdir) # rename static dir in rst file rst_file = '%s.rst' % name print('REsT file is %s' % rst_file) data = None with open(rst_file, 'r') as f: data = f.read() if data is not None: with open(rst_file, 'w') as f: data = re.sub('%s' % sdir, '_static', data) f.write(data) # add special tags lines = None with open(rst_file, 'r') as f: lines = f.readlines() if lines is not None: n = len(lines) i = 0 rawWatch = False while i < n: line = lines[i] # add class tags to images for css formatting if 'image::' in line: lines.insert(i + 1, ' :class: pynb\n') n += 1 elif 'parsed-literal::' in line: lines.insert(i + 1, ' :class: pynb-result\n') n += 1 elif 'raw:: html' in line: rawWatch = True if rawWatch: if '<div' in line: line = line.replace('<div', '<div class="pynb-result"') lines[i] = line rawWatch = False i += 1 with open(rst_file, 'w') as f: f.writelines(lines)
[ "def", "convert_notebooks", "(", ")", ":", "convert_status", "=", "call", "(", "[", "'ipython'", ",", "'nbconvert'", ",", "'--to'", ",", "'rst'", ",", "'*.ipynb'", "]", ")", "if", "convert_status", "!=", "0", ":", "raise", "SystemError", "(", "'Conversion fa...
Converts IPython Notebooks to proper .rst files and moves static content to the _static directory.
[ "Converts", "IPython", "Notebooks", "to", "proper", ".", "rst", "files", "and", "moves", "static", "content", "to", "the", "_static", "directory", "." ]
0363e6fa100d9392dd18e32e3d8379d5e83c28fa
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/docs/source/_themes/klink/klink/__init__.py#L7-L76
234,425
istresearch/scrapy-cluster
utils/scutils/settings_wrapper.py
SettingsWrapper.load
def load(self, local='localsettings.py', default='settings.py'): ''' Load the settings dict @param local: The local settings filename to use @param default: The default settings module to read @return: A dict of the loaded settings ''' self._load_defaults(default) self._load_custom(local) return self.settings()
python
def load(self, local='localsettings.py', default='settings.py'): ''' Load the settings dict @param local: The local settings filename to use @param default: The default settings module to read @return: A dict of the loaded settings ''' self._load_defaults(default) self._load_custom(local) return self.settings()
[ "def", "load", "(", "self", ",", "local", "=", "'localsettings.py'", ",", "default", "=", "'settings.py'", ")", ":", "self", ".", "_load_defaults", "(", "default", ")", "self", ".", "_load_custom", "(", "local", ")", "return", "self", ".", "settings", "(",...
Load the settings dict @param local: The local settings filename to use @param default: The default settings module to read @return: A dict of the loaded settings
[ "Load", "the", "settings", "dict" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/settings_wrapper.py#L29-L40
234,426
istresearch/scrapy-cluster
utils/scutils/settings_wrapper.py
SettingsWrapper._load_defaults
def _load_defaults(self, default='settings.py'): ''' Load the default settings ''' if default[-3:] == '.py': default = default[:-3] self.my_settings = {} try: settings = importlib.import_module(default) self.my_settings = self._convert_to_dict(settings) except ImportError: log.warning("No default settings found")
python
def _load_defaults(self, default='settings.py'): ''' Load the default settings ''' if default[-3:] == '.py': default = default[:-3] self.my_settings = {} try: settings = importlib.import_module(default) self.my_settings = self._convert_to_dict(settings) except ImportError: log.warning("No default settings found")
[ "def", "_load_defaults", "(", "self", ",", "default", "=", "'settings.py'", ")", ":", "if", "default", "[", "-", "3", ":", "]", "==", "'.py'", ":", "default", "=", "default", "[", ":", "-", "3", "]", "self", ".", "my_settings", "=", "{", "}", "try"...
Load the default settings
[ "Load", "the", "default", "settings" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/settings_wrapper.py#L70-L82
234,427
istresearch/scrapy-cluster
utils/scutils/settings_wrapper.py
SettingsWrapper._load_custom
def _load_custom(self, settings_name='localsettings.py'): ''' Load the user defined settings, overriding the defaults ''' if settings_name[-3:] == '.py': settings_name = settings_name[:-3] new_settings = {} try: settings = importlib.import_module(settings_name) new_settings = self._convert_to_dict(settings) except ImportError: log.info("No override settings found") for key in new_settings: if key in self.my_settings: item = new_settings[key] if isinstance(item, dict) and \ isinstance(self.my_settings[key], dict): for key2 in item: self.my_settings[key][key2] = item[key2] else: self.my_settings[key] = item else: self.my_settings[key] = new_settings[key]
python
def _load_custom(self, settings_name='localsettings.py'): ''' Load the user defined settings, overriding the defaults ''' if settings_name[-3:] == '.py': settings_name = settings_name[:-3] new_settings = {} try: settings = importlib.import_module(settings_name) new_settings = self._convert_to_dict(settings) except ImportError: log.info("No override settings found") for key in new_settings: if key in self.my_settings: item = new_settings[key] if isinstance(item, dict) and \ isinstance(self.my_settings[key], dict): for key2 in item: self.my_settings[key][key2] = item[key2] else: self.my_settings[key] = item else: self.my_settings[key] = new_settings[key]
[ "def", "_load_custom", "(", "self", ",", "settings_name", "=", "'localsettings.py'", ")", ":", "if", "settings_name", "[", "-", "3", ":", "]", "==", "'.py'", ":", "settings_name", "=", "settings_name", "[", ":", "-", "3", "]", "new_settings", "=", "{", "...
Load the user defined settings, overriding the defaults
[ "Load", "the", "user", "defined", "settings", "overriding", "the", "defaults" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/settings_wrapper.py#L84-L109
234,428
istresearch/scrapy-cluster
utils/scutils/settings_wrapper.py
SettingsWrapper._convert_to_dict
def _convert_to_dict(self, setting): ''' Converts a settings file into a dictionary, ignoring python defaults @param setting: A loaded setting module ''' the_dict = {} set = dir(setting) for key in set: if key in self.ignore: continue value = getattr(setting, key) the_dict[key] = value return the_dict
python
def _convert_to_dict(self, setting): ''' Converts a settings file into a dictionary, ignoring python defaults @param setting: A loaded setting module ''' the_dict = {} set = dir(setting) for key in set: if key in self.ignore: continue value = getattr(setting, key) the_dict[key] = value return the_dict
[ "def", "_convert_to_dict", "(", "self", ",", "setting", ")", ":", "the_dict", "=", "{", "}", "set", "=", "dir", "(", "setting", ")", "for", "key", "in", "set", ":", "if", "key", "in", "self", ".", "ignore", ":", "continue", "value", "=", "getattr", ...
Converts a settings file into a dictionary, ignoring python defaults @param setting: A loaded setting module
[ "Converts", "a", "settings", "file", "into", "a", "dictionary", "ignoring", "python", "defaults" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/settings_wrapper.py#L111-L125
234,429
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.load_domain_config
def load_domain_config(self, loaded_config): ''' Loads the domain_config and sets up queue_dict @param loaded_config: the yaml loaded config dict from zookeeper ''' self.domain_config = {} # vetting process to ensure correct configs if loaded_config: if 'domains' in loaded_config: for domain in loaded_config['domains']: item = loaded_config['domains'][domain] # check valid if 'window' in item and 'hits' in item: self.logger.debug("Added domain {dom} to loaded config" .format(dom=domain)) self.domain_config[domain] = item if 'blacklist' in loaded_config: self.black_domains = loaded_config['blacklist'] self.config_flag = True
python
def load_domain_config(self, loaded_config): ''' Loads the domain_config and sets up queue_dict @param loaded_config: the yaml loaded config dict from zookeeper ''' self.domain_config = {} # vetting process to ensure correct configs if loaded_config: if 'domains' in loaded_config: for domain in loaded_config['domains']: item = loaded_config['domains'][domain] # check valid if 'window' in item and 'hits' in item: self.logger.debug("Added domain {dom} to loaded config" .format(dom=domain)) self.domain_config[domain] = item if 'blacklist' in loaded_config: self.black_domains = loaded_config['blacklist'] self.config_flag = True
[ "def", "load_domain_config", "(", "self", ",", "loaded_config", ")", ":", "self", ".", "domain_config", "=", "{", "}", "# vetting process to ensure correct configs", "if", "loaded_config", ":", "if", "'domains'", "in", "loaded_config", ":", "for", "domain", "in", ...
Loads the domain_config and sets up queue_dict @param loaded_config: the yaml loaded config dict from zookeeper
[ "Loads", "the", "domain_config", "and", "sets", "up", "queue_dict" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L130-L149
234,430
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.update_domain_queues
def update_domain_queues(self): ''' Check to update existing queues already in memory new queues are created elsewhere ''' for key in self.domain_config: final_key = "{name}:{domain}:queue".format( name=self.spider.name, domain=key) # we already have a throttled queue for this domain, update it to new settings if final_key in self.queue_dict: self.queue_dict[final_key][0].window = float(self.domain_config[key]['window']) self.logger.debug("Updated queue {q} with new config" .format(q=final_key)) # if scale is applied, scale back; otherwise use updated hits if 'scale' in self.domain_config[key]: # round to int hits = int(self.domain_config[key]['hits'] * self.fit_scale( self.domain_config[key]['scale'])) self.queue_dict[final_key][0].limit = float(hits) else: self.queue_dict[final_key][0].limit = float(self.domain_config[key]['hits'])
python
def update_domain_queues(self): ''' Check to update existing queues already in memory new queues are created elsewhere ''' for key in self.domain_config: final_key = "{name}:{domain}:queue".format( name=self.spider.name, domain=key) # we already have a throttled queue for this domain, update it to new settings if final_key in self.queue_dict: self.queue_dict[final_key][0].window = float(self.domain_config[key]['window']) self.logger.debug("Updated queue {q} with new config" .format(q=final_key)) # if scale is applied, scale back; otherwise use updated hits if 'scale' in self.domain_config[key]: # round to int hits = int(self.domain_config[key]['hits'] * self.fit_scale( self.domain_config[key]['scale'])) self.queue_dict[final_key][0].limit = float(hits) else: self.queue_dict[final_key][0].limit = float(self.domain_config[key]['hits'])
[ "def", "update_domain_queues", "(", "self", ")", ":", "for", "key", "in", "self", ".", "domain_config", ":", "final_key", "=", "\"{name}:{domain}:queue\"", ".", "format", "(", "name", "=", "self", ".", "spider", ".", "name", ",", "domain", "=", "key", ")",...
Check to update existing queues already in memory new queues are created elsewhere
[ "Check", "to", "update", "existing", "queues", "already", "in", "memory", "new", "queues", "are", "created", "elsewhere" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L151-L172
234,431
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.create_queues
def create_queues(self): ''' Updates the in memory list of the redis queues Creates new throttled queue instances if it does not have them ''' # new config could have loaded between scrapes newConf = self.check_config() self.queue_keys = self.redis_conn.keys(self.spider.name + ":*:queue") for key in self.queue_keys: # build final queue key, depending on type and ip bools throttle_key = "" if self.add_type: throttle_key = self.spider.name + ":" if self.add_ip: throttle_key = throttle_key + self.my_ip + ":" # add the tld from the key `type:tld:queue` the_domain = re.split(':', key)[1] throttle_key = throttle_key + the_domain if key not in self.queue_dict or newConf: self.logger.debug("Added new Throttled Queue {q}" .format(q=key)) q = RedisPriorityQueue(self.redis_conn, key, encoding=ujson) # use default window and hits if the_domain not in self.domain_config: # this is now a tuple, all access needs to use [0] to get # the object, use [1] to get the time self.queue_dict[key] = [RedisThrottledQueue(self.redis_conn, q, self.window, self.hits, self.moderated, throttle_key, throttle_key, True), time.time()] # use custom window and hits else: window = self.domain_config[the_domain]['window'] hits = self.domain_config[the_domain]['hits'] # adjust the crawl rate based on the scale if exists if 'scale' in self.domain_config[the_domain]: hits = int(hits * self.fit_scale(self.domain_config[the_domain]['scale'])) self.queue_dict[key] = [RedisThrottledQueue(self.redis_conn, q, window, hits, self.moderated, throttle_key, throttle_key, True), time.time()]
python
def create_queues(self): ''' Updates the in memory list of the redis queues Creates new throttled queue instances if it does not have them ''' # new config could have loaded between scrapes newConf = self.check_config() self.queue_keys = self.redis_conn.keys(self.spider.name + ":*:queue") for key in self.queue_keys: # build final queue key, depending on type and ip bools throttle_key = "" if self.add_type: throttle_key = self.spider.name + ":" if self.add_ip: throttle_key = throttle_key + self.my_ip + ":" # add the tld from the key `type:tld:queue` the_domain = re.split(':', key)[1] throttle_key = throttle_key + the_domain if key not in self.queue_dict or newConf: self.logger.debug("Added new Throttled Queue {q}" .format(q=key)) q = RedisPriorityQueue(self.redis_conn, key, encoding=ujson) # use default window and hits if the_domain not in self.domain_config: # this is now a tuple, all access needs to use [0] to get # the object, use [1] to get the time self.queue_dict[key] = [RedisThrottledQueue(self.redis_conn, q, self.window, self.hits, self.moderated, throttle_key, throttle_key, True), time.time()] # use custom window and hits else: window = self.domain_config[the_domain]['window'] hits = self.domain_config[the_domain]['hits'] # adjust the crawl rate based on the scale if exists if 'scale' in self.domain_config[the_domain]: hits = int(hits * self.fit_scale(self.domain_config[the_domain]['scale'])) self.queue_dict[key] = [RedisThrottledQueue(self.redis_conn, q, window, hits, self.moderated, throttle_key, throttle_key, True), time.time()]
[ "def", "create_queues", "(", "self", ")", ":", "# new config could have loaded between scrapes", "newConf", "=", "self", ".", "check_config", "(", ")", "self", ".", "queue_keys", "=", "self", ".", "redis_conn", ".", "keys", "(", "self", ".", "spider", ".", "na...
Updates the in memory list of the redis queues Creates new throttled queue instances if it does not have them
[ "Updates", "the", "in", "memory", "list", "of", "the", "redis", "queues", "Creates", "new", "throttled", "queue", "instances", "if", "it", "does", "not", "have", "them" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L202-L248
234,432
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.expire_queues
def expire_queues(self): ''' Expires old queue_dict keys that have not been used in a long time. Prevents slow memory build up when crawling lots of different domains ''' curr_time = time.time() for key in list(self.queue_dict): diff = curr_time - self.queue_dict[key][1] if diff > self.queue_timeout: self.logger.debug("Expiring domain queue key " + key) del self.queue_dict[key] if key in self.queue_keys: self.queue_keys.remove(key)
python
def expire_queues(self): ''' Expires old queue_dict keys that have not been used in a long time. Prevents slow memory build up when crawling lots of different domains ''' curr_time = time.time() for key in list(self.queue_dict): diff = curr_time - self.queue_dict[key][1] if diff > self.queue_timeout: self.logger.debug("Expiring domain queue key " + key) del self.queue_dict[key] if key in self.queue_keys: self.queue_keys.remove(key)
[ "def", "expire_queues", "(", "self", ")", ":", "curr_time", "=", "time", ".", "time", "(", ")", "for", "key", "in", "list", "(", "self", ".", "queue_dict", ")", ":", "diff", "=", "curr_time", "-", "self", ".", "queue_dict", "[", "key", "]", "[", "1...
Expires old queue_dict keys that have not been used in a long time. Prevents slow memory build up when crawling lots of different domains
[ "Expires", "old", "queue_dict", "keys", "that", "have", "not", "been", "used", "in", "a", "long", "time", ".", "Prevents", "slow", "memory", "build", "up", "when", "crawling", "lots", "of", "different", "domains" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L250-L262
234,433
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.update_ipaddress
def update_ipaddress(self): ''' Updates the scheduler so it knows its own ip address ''' # assign local ip in case of exception self.old_ip = self.my_ip self.my_ip = '127.0.0.1' try: obj = urllib.request.urlopen(settings.get('PUBLIC_IP_URL', 'http://ip.42.pl/raw')) results = self.ip_regex.findall(obj.read()) if len(results) > 0: self.my_ip = results[0] else: raise IOError("Could not get valid IP Address") obj.close() self.logger.debug("Current public ip: {ip}".format(ip=self.my_ip)) except IOError: self.logger.error("Could not reach out to get public ip") pass if self.old_ip != self.my_ip: self.logger.info("Changed Public IP: {old} -> {new}".format( old=self.old_ip, new=self.my_ip))
python
def update_ipaddress(self): ''' Updates the scheduler so it knows its own ip address ''' # assign local ip in case of exception self.old_ip = self.my_ip self.my_ip = '127.0.0.1' try: obj = urllib.request.urlopen(settings.get('PUBLIC_IP_URL', 'http://ip.42.pl/raw')) results = self.ip_regex.findall(obj.read()) if len(results) > 0: self.my_ip = results[0] else: raise IOError("Could not get valid IP Address") obj.close() self.logger.debug("Current public ip: {ip}".format(ip=self.my_ip)) except IOError: self.logger.error("Could not reach out to get public ip") pass if self.old_ip != self.my_ip: self.logger.info("Changed Public IP: {old} -> {new}".format( old=self.old_ip, new=self.my_ip))
[ "def", "update_ipaddress", "(", "self", ")", ":", "# assign local ip in case of exception", "self", ".", "old_ip", "=", "self", ".", "my_ip", "self", ".", "my_ip", "=", "'127.0.0.1'", "try", ":", "obj", "=", "urllib", ".", "request", ".", "urlopen", "(", "se...
Updates the scheduler so it knows its own ip address
[ "Updates", "the", "scheduler", "so", "it", "knows", "its", "own", "ip", "address" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L275-L298
234,434
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.is_blacklisted
def is_blacklisted(self, appid, crawlid): ''' Checks the redis blacklist for crawls that should not be propagated either from expiring or stopped @return: True if the appid crawlid combo is blacklisted ''' key_check = '{appid}||{crawlid}'.format(appid=appid, crawlid=crawlid) redis_key = self.spider.name + ":blacklist" return self.redis_conn.sismember(redis_key, key_check)
python
def is_blacklisted(self, appid, crawlid): ''' Checks the redis blacklist for crawls that should not be propagated either from expiring or stopped @return: True if the appid crawlid combo is blacklisted ''' key_check = '{appid}||{crawlid}'.format(appid=appid, crawlid=crawlid) redis_key = self.spider.name + ":blacklist" return self.redis_conn.sismember(redis_key, key_check)
[ "def", "is_blacklisted", "(", "self", ",", "appid", ",", "crawlid", ")", ":", "key_check", "=", "'{appid}||{crawlid}'", ".", "format", "(", "appid", "=", "appid", ",", "crawlid", "=", "crawlid", ")", "redis_key", "=", "self", ".", "spider", ".", "name", ...
Checks the redis blacklist for crawls that should not be propagated either from expiring or stopped @return: True if the appid crawlid combo is blacklisted
[ "Checks", "the", "redis", "blacklist", "for", "crawls", "that", "should", "not", "be", "propagated", "either", "from", "expiring", "or", "stopped" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L375-L384
234,435
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.enqueue_request
def enqueue_request(self, request): ''' Pushes a request from the spider into the proper throttled queue ''' if not request.dont_filter and self.dupefilter.request_seen(request): self.logger.debug("Request not added back to redis") return req_dict = self.request_to_dict(request) if not self.is_blacklisted(req_dict['meta']['appid'], req_dict['meta']['crawlid']): # grab the tld of the request ex_res = self.extract(req_dict['url']) key = "{sid}:{dom}.{suf}:queue".format( sid=req_dict['meta']['spiderid'], dom=ex_res.domain, suf=ex_res.suffix) curr_time = time.time() domain = "{d}.{s}".format(d=ex_res.domain, s=ex_res.suffix) # allow only if we want all requests or we want # everything but blacklisted domains # insert if crawl never expires (0) or time < expires if (self.backlog_blacklist or (not self.backlog_blacklist and domain not in self.black_domains)) and \ (req_dict['meta']['expires'] == 0 or curr_time < req_dict['meta']['expires']): # we may already have the queue in memory if key in self.queue_keys: self.queue_dict[key][0].push(req_dict, req_dict['meta']['priority']) else: # shoving into a new redis queue, negative b/c of sorted sets # this will populate ourself and other schedulers when # they call create_queues self.redis_conn.zadd(key, ujson.dumps(req_dict), -req_dict['meta']['priority']) self.logger.debug("Crawlid: '{id}' Appid: '{appid}' added to queue" .format(appid=req_dict['meta']['appid'], id=req_dict['meta']['crawlid'])) else: self.logger.debug("Crawlid: '{id}' Appid: '{appid}' expired" .format(appid=req_dict['meta']['appid'], id=req_dict['meta']['crawlid'])) else: self.logger.debug("Crawlid: '{id}' Appid: '{appid}' blacklisted" .format(appid=req_dict['meta']['appid'], id=req_dict['meta']['crawlid']))
python
def enqueue_request(self, request): ''' Pushes a request from the spider into the proper throttled queue ''' if not request.dont_filter and self.dupefilter.request_seen(request): self.logger.debug("Request not added back to redis") return req_dict = self.request_to_dict(request) if not self.is_blacklisted(req_dict['meta']['appid'], req_dict['meta']['crawlid']): # grab the tld of the request ex_res = self.extract(req_dict['url']) key = "{sid}:{dom}.{suf}:queue".format( sid=req_dict['meta']['spiderid'], dom=ex_res.domain, suf=ex_res.suffix) curr_time = time.time() domain = "{d}.{s}".format(d=ex_res.domain, s=ex_res.suffix) # allow only if we want all requests or we want # everything but blacklisted domains # insert if crawl never expires (0) or time < expires if (self.backlog_blacklist or (not self.backlog_blacklist and domain not in self.black_domains)) and \ (req_dict['meta']['expires'] == 0 or curr_time < req_dict['meta']['expires']): # we may already have the queue in memory if key in self.queue_keys: self.queue_dict[key][0].push(req_dict, req_dict['meta']['priority']) else: # shoving into a new redis queue, negative b/c of sorted sets # this will populate ourself and other schedulers when # they call create_queues self.redis_conn.zadd(key, ujson.dumps(req_dict), -req_dict['meta']['priority']) self.logger.debug("Crawlid: '{id}' Appid: '{appid}' added to queue" .format(appid=req_dict['meta']['appid'], id=req_dict['meta']['crawlid'])) else: self.logger.debug("Crawlid: '{id}' Appid: '{appid}' expired" .format(appid=req_dict['meta']['appid'], id=req_dict['meta']['crawlid'])) else: self.logger.debug("Crawlid: '{id}' Appid: '{appid}' blacklisted" .format(appid=req_dict['meta']['appid'], id=req_dict['meta']['crawlid']))
[ "def", "enqueue_request", "(", "self", ",", "request", ")", ":", "if", "not", "request", ".", "dont_filter", "and", "self", ".", "dupefilter", ".", "request_seen", "(", "request", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Request not added back ...
Pushes a request from the spider into the proper throttled queue
[ "Pushes", "a", "request", "from", "the", "spider", "into", "the", "proper", "throttled", "queue" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L386-L436
234,436
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.request_to_dict
def request_to_dict(self, request): ''' Convert Request object to a dict. modified from scrapy.utils.reqser ''' req_dict = { # urls should be safe (safe_string_url) 'url': to_unicode(request.url), 'method': request.method, 'headers': dict(request.headers), 'body': request.body, 'cookies': request.cookies, 'meta': request.meta, '_encoding': request._encoding, 'priority': request.priority, 'dont_filter': request.dont_filter, # callback/errback are assumed to be a bound instance of the spider 'callback': None if request.callback is None else request.callback.__name__, 'errback': None if request.errback is None else request.errback.__name__, } return req_dict
python
def request_to_dict(self, request): ''' Convert Request object to a dict. modified from scrapy.utils.reqser ''' req_dict = { # urls should be safe (safe_string_url) 'url': to_unicode(request.url), 'method': request.method, 'headers': dict(request.headers), 'body': request.body, 'cookies': request.cookies, 'meta': request.meta, '_encoding': request._encoding, 'priority': request.priority, 'dont_filter': request.dont_filter, # callback/errback are assumed to be a bound instance of the spider 'callback': None if request.callback is None else request.callback.__name__, 'errback': None if request.errback is None else request.errback.__name__, } return req_dict
[ "def", "request_to_dict", "(", "self", ",", "request", ")", ":", "req_dict", "=", "{", "# urls should be safe (safe_string_url)", "'url'", ":", "to_unicode", "(", "request", ".", "url", ")", ",", "'method'", ":", "request", ".", "method", ",", "'headers'", ":"...
Convert Request object to a dict. modified from scrapy.utils.reqser
[ "Convert", "Request", "object", "to", "a", "dict", ".", "modified", "from", "scrapy", ".", "utils", ".", "reqser" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L438-L458
234,437
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.find_item
def find_item(self): ''' Finds an item from the throttled queues ''' random.shuffle(self.queue_keys) count = 0 while count <= self.item_retries: for key in self.queue_keys: # skip if the whole domain has been blacklisted in zookeeper if key.split(':')[1] in self.black_domains: continue # the throttled queue only returns an item if it is allowed item = self.queue_dict[key][0].pop() if item: # update timeout and return self.queue_dict[key][1] = time.time() return item count = count + 1 return None
python
def find_item(self): ''' Finds an item from the throttled queues ''' random.shuffle(self.queue_keys) count = 0 while count <= self.item_retries: for key in self.queue_keys: # skip if the whole domain has been blacklisted in zookeeper if key.split(':')[1] in self.black_domains: continue # the throttled queue only returns an item if it is allowed item = self.queue_dict[key][0].pop() if item: # update timeout and return self.queue_dict[key][1] = time.time() return item count = count + 1 return None
[ "def", "find_item", "(", "self", ")", ":", "random", ".", "shuffle", "(", "self", ".", "queue_keys", ")", "count", "=", "0", "while", "count", "<=", "self", ".", "item_retries", ":", "for", "key", "in", "self", ".", "queue_keys", ":", "# skip if the whol...
Finds an item from the throttled queues
[ "Finds", "an", "item", "from", "the", "throttled", "queues" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L460-L482
234,438
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.next_request
def next_request(self): ''' Logic to handle getting a new url request, from a bunch of different queues ''' t = time.time() # update the redis queues every so often if t - self.update_time > self.update_interval: self.update_time = t self.create_queues() self.expire_queues() # update the ip address every so often if t - self.update_ip_time > self.ip_update_interval: self.update_ip_time = t self.update_ipaddress() self.report_self() item = self.find_item() if item: self.logger.debug("Found url to crawl {url}" \ .format(url=item['url'])) try: req = Request(item['url']) except ValueError: # need absolute url # need better url validation here req = Request('http://' + item['url']) try: if 'callback' in item and item['callback'] is not None: req.callback = getattr(self.spider, item['callback']) except AttributeError: self.logger.warn("Unable to find callback method") try: if 'errback' in item and item['errback'] is not None: req.errback = getattr(self.spider, item['errback']) except AttributeError: self.logger.warn("Unable to find errback method") if 'meta' in item: item = item['meta'] # defaults not in schema if 'curdepth' not in item: item['curdepth'] = 0 if "retry_times" not in item: item['retry_times'] = 0 for key in list(item.keys()): req.meta[key] = item[key] # extra check to add items to request if 'useragent' in item and item['useragent'] is not None: req.headers['User-Agent'] = item['useragent'] if 'cookie' in item and item['cookie'] is not None: if isinstance(item['cookie'], dict): req.cookies = item['cookie'] elif isinstance(item['cookie'], basestring): req.cookies = self.parse_cookie(item['cookie']) return req return None
python
def next_request(self): ''' Logic to handle getting a new url request, from a bunch of different queues ''' t = time.time() # update the redis queues every so often if t - self.update_time > self.update_interval: self.update_time = t self.create_queues() self.expire_queues() # update the ip address every so often if t - self.update_ip_time > self.ip_update_interval: self.update_ip_time = t self.update_ipaddress() self.report_self() item = self.find_item() if item: self.logger.debug("Found url to crawl {url}" \ .format(url=item['url'])) try: req = Request(item['url']) except ValueError: # need absolute url # need better url validation here req = Request('http://' + item['url']) try: if 'callback' in item and item['callback'] is not None: req.callback = getattr(self.spider, item['callback']) except AttributeError: self.logger.warn("Unable to find callback method") try: if 'errback' in item and item['errback'] is not None: req.errback = getattr(self.spider, item['errback']) except AttributeError: self.logger.warn("Unable to find errback method") if 'meta' in item: item = item['meta'] # defaults not in schema if 'curdepth' not in item: item['curdepth'] = 0 if "retry_times" not in item: item['retry_times'] = 0 for key in list(item.keys()): req.meta[key] = item[key] # extra check to add items to request if 'useragent' in item and item['useragent'] is not None: req.headers['User-Agent'] = item['useragent'] if 'cookie' in item and item['cookie'] is not None: if isinstance(item['cookie'], dict): req.cookies = item['cookie'] elif isinstance(item['cookie'], basestring): req.cookies = self.parse_cookie(item['cookie']) return req return None
[ "def", "next_request", "(", "self", ")", ":", "t", "=", "time", ".", "time", "(", ")", "# update the redis queues every so often", "if", "t", "-", "self", ".", "update_time", ">", "self", ".", "update_interval", ":", "self", ".", "update_time", "=", "t", "...
Logic to handle getting a new url request, from a bunch of different queues
[ "Logic", "to", "handle", "getting", "a", "new", "url", "request", "from", "a", "bunch", "of", "different", "queues" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L484-L548
234,439
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
DistributedScheduler.parse_cookie
def parse_cookie(self, string): ''' Parses a cookie string like returned in a Set-Cookie header @param string: The cookie string @return: the cookie dict ''' results = re.findall('([^=]+)=([^\;]+);?\s?', string) my_dict = {} for item in results: my_dict[item[0]] = item[1] return my_dict
python
def parse_cookie(self, string): ''' Parses a cookie string like returned in a Set-Cookie header @param string: The cookie string @return: the cookie dict ''' results = re.findall('([^=]+)=([^\;]+);?\s?', string) my_dict = {} for item in results: my_dict[item[0]] = item[1] return my_dict
[ "def", "parse_cookie", "(", "self", ",", "string", ")", ":", "results", "=", "re", ".", "findall", "(", "'([^=]+)=([^\\;]+);?\\s?'", ",", "string", ")", "my_dict", "=", "{", "}", "for", "item", "in", "results", ":", "my_dict", "[", "item", "[", "0", "]...
Parses a cookie string like returned in a Set-Cookie header @param string: The cookie string @return: the cookie dict
[ "Parses", "a", "cookie", "string", "like", "returned", "in", "a", "Set", "-", "Cookie", "header" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L550-L561
234,440
istresearch/scrapy-cluster
crawler/crawling/pipelines.py
KafkaPipeline._clean_item
def _clean_item(self, item): ''' Cleans the item to be logged ''' item_copy = dict(item) del item_copy['body'] del item_copy['links'] del item_copy['response_headers'] del item_copy['request_headers'] del item_copy['status_code'] del item_copy['status_msg'] item_copy['action'] = 'ack' item_copy['logger'] = self.logger.name item_copy return item_copy
python
def _clean_item(self, item): ''' Cleans the item to be logged ''' item_copy = dict(item) del item_copy['body'] del item_copy['links'] del item_copy['response_headers'] del item_copy['request_headers'] del item_copy['status_code'] del item_copy['status_msg'] item_copy['action'] = 'ack' item_copy['logger'] = self.logger.name item_copy return item_copy
[ "def", "_clean_item", "(", "self", ",", "item", ")", ":", "item_copy", "=", "dict", "(", "item", ")", "del", "item_copy", "[", "'body'", "]", "del", "item_copy", "[", "'links'", "]", "del", "item_copy", "[", "'response_headers'", "]", "del", "item_copy", ...
Cleans the item to be logged
[ "Cleans", "the", "item", "to", "be", "logged" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/pipelines.py#L138-L153
234,441
istresearch/scrapy-cluster
crawler/crawling/pipelines.py
KafkaPipeline._kafka_success
def _kafka_success(self, item, spider, response): ''' Callback for successful send ''' item['success'] = True item = self._clean_item(item) item['spiderid'] = spider.name self.logger.info("Sent page to Kafka", item)
python
def _kafka_success(self, item, spider, response): ''' Callback for successful send ''' item['success'] = True item = self._clean_item(item) item['spiderid'] = spider.name self.logger.info("Sent page to Kafka", item)
[ "def", "_kafka_success", "(", "self", ",", "item", ",", "spider", ",", "response", ")", ":", "item", "[", "'success'", "]", "=", "True", "item", "=", "self", ".", "_clean_item", "(", "item", ")", "item", "[", "'spiderid'", "]", "=", "spider", ".", "n...
Callback for successful send
[ "Callback", "for", "successful", "send" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/pipelines.py#L155-L162
234,442
istresearch/scrapy-cluster
crawler/crawling/pipelines.py
KafkaPipeline._kafka_failure
def _kafka_failure(self, item, spider, response): ''' Callback for failed send ''' item['success'] = False item['exception'] = traceback.format_exc() item['spiderid'] = spider.name item = self._clean_item(item) self.logger.error("Failed to send page to Kafka", item)
python
def _kafka_failure(self, item, spider, response): ''' Callback for failed send ''' item['success'] = False item['exception'] = traceback.format_exc() item['spiderid'] = spider.name item = self._clean_item(item) self.logger.error("Failed to send page to Kafka", item)
[ "def", "_kafka_failure", "(", "self", ",", "item", ",", "spider", ",", "response", ")", ":", "item", "[", "'success'", "]", "=", "False", "item", "[", "'exception'", "]", "=", "traceback", ".", "format_exc", "(", ")", "item", "[", "'spiderid'", "]", "=...
Callback for failed send
[ "Callback", "for", "failed", "send" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/pipelines.py#L165-L173
234,443
istresearch/scrapy-cluster
crawler/crawling/spiders/redis_spider.py
RedisSpider.reconstruct_headers
def reconstruct_headers(self, response): """ Purpose of this method is to reconstruct the headers dictionary that is normally passed in with a "response" object from scrapy. Args: response: A scrapy response object Returns: A dictionary that mirrors the "response.headers" dictionary that is normally within a response object Raises: None Reason: Originally, there was a bug where the json.dumps() did not properly serialize the headers. This method is a way to circumvent the known issue """ header_dict = {} # begin reconstructing headers from scratch... for key in list(response.headers.keys()): key_item_list = [] key_list = response.headers.getlist(key) for item in key_list: key_item_list.append(item) header_dict[key] = key_item_list return header_dict
python
def reconstruct_headers(self, response): header_dict = {} # begin reconstructing headers from scratch... for key in list(response.headers.keys()): key_item_list = [] key_list = response.headers.getlist(key) for item in key_list: key_item_list.append(item) header_dict[key] = key_item_list return header_dict
[ "def", "reconstruct_headers", "(", "self", ",", "response", ")", ":", "header_dict", "=", "{", "}", "# begin reconstructing headers from scratch...", "for", "key", "in", "list", "(", "response", ".", "headers", ".", "keys", "(", ")", ")", ":", "key_item_list", ...
Purpose of this method is to reconstruct the headers dictionary that is normally passed in with a "response" object from scrapy. Args: response: A scrapy response object Returns: A dictionary that mirrors the "response.headers" dictionary that is normally within a response object Raises: None Reason: Originally, there was a bug where the json.dumps() did not properly serialize the headers. This method is a way to circumvent the known issue
[ "Purpose", "of", "this", "method", "is", "to", "reconstruct", "the", "headers", "dictionary", "that", "is", "normally", "passed", "in", "with", "a", "response", "object", "from", "scrapy", "." ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/spiders/redis_spider.py#L36-L61
234,444
istresearch/scrapy-cluster
kafka-monitor/plugins/scraper_handler.py
ScraperHandler.handle
def handle(self, dict): ''' Processes a vaild crawl request @param dict: a valid dictionary object ''' # format key ex_res = self.extract(dict['url']) key = "{sid}:{dom}.{suf}:queue".format( sid=dict['spiderid'], dom=ex_res.domain, suf=ex_res.suffix) val = ujson.dumps(dict) # shortcut to shove stuff into the priority queue self.redis_conn.zadd(key, val, -dict['priority']) # if timeout crawl, add value to redis if 'expires' in dict and dict['expires'] != 0: key = "timeout:{sid}:{appid}:{crawlid}".format( sid=dict['spiderid'], appid=dict['appid'], crawlid=dict['crawlid']) self.redis_conn.set(key, dict['expires']) # log success dict['parsed'] = True dict['valid'] = True self.logger.info('Added crawl to Redis', extra=dict)
python
def handle(self, dict): ''' Processes a vaild crawl request @param dict: a valid dictionary object ''' # format key ex_res = self.extract(dict['url']) key = "{sid}:{dom}.{suf}:queue".format( sid=dict['spiderid'], dom=ex_res.domain, suf=ex_res.suffix) val = ujson.dumps(dict) # shortcut to shove stuff into the priority queue self.redis_conn.zadd(key, val, -dict['priority']) # if timeout crawl, add value to redis if 'expires' in dict and dict['expires'] != 0: key = "timeout:{sid}:{appid}:{crawlid}".format( sid=dict['spiderid'], appid=dict['appid'], crawlid=dict['crawlid']) self.redis_conn.set(key, dict['expires']) # log success dict['parsed'] = True dict['valid'] = True self.logger.info('Added crawl to Redis', extra=dict)
[ "def", "handle", "(", "self", ",", "dict", ")", ":", "# format key", "ex_res", "=", "self", ".", "extract", "(", "dict", "[", "'url'", "]", ")", "key", "=", "\"{sid}:{dom}.{suf}:queue\"", ".", "format", "(", "sid", "=", "dict", "[", "'spiderid'", "]", ...
Processes a vaild crawl request @param dict: a valid dictionary object
[ "Processes", "a", "vaild", "crawl", "request" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/plugins/scraper_handler.py#L31-L60
234,445
istresearch/scrapy-cluster
utils/scutils/method_timer.py
MethodTimer.timeout
def timeout(timeout_time, default): ''' Decorate a method so it is required to execute in a given time period, or return a default value. ''' def timeout_function(f): def f2(*args): def timeout_handler(signum, frame): raise MethodTimer.DecoratorTimeout() old_handler = signal.signal(signal.SIGALRM, timeout_handler) # triger alarm in timeout_time seconds signal.alarm(timeout_time) try: retval = f(*args) except MethodTimer.DecoratorTimeout: return default finally: signal.signal(signal.SIGALRM, old_handler) signal.alarm(0) return retval return f2 return timeout_function
python
def timeout(timeout_time, default): ''' Decorate a method so it is required to execute in a given time period, or return a default value. ''' def timeout_function(f): def f2(*args): def timeout_handler(signum, frame): raise MethodTimer.DecoratorTimeout() old_handler = signal.signal(signal.SIGALRM, timeout_handler) # triger alarm in timeout_time seconds signal.alarm(timeout_time) try: retval = f(*args) except MethodTimer.DecoratorTimeout: return default finally: signal.signal(signal.SIGALRM, old_handler) signal.alarm(0) return retval return f2 return timeout_function
[ "def", "timeout", "(", "timeout_time", ",", "default", ")", ":", "def", "timeout_function", "(", "f", ")", ":", "def", "f2", "(", "*", "args", ")", ":", "def", "timeout_handler", "(", "signum", ",", "frame", ")", ":", "raise", "MethodTimer", ".", "Deco...
Decorate a method so it is required to execute in a given time period, or return a default value.
[ "Decorate", "a", "method", "so", "it", "is", "required", "to", "execute", "in", "a", "given", "time", "period", "or", "return", "a", "default", "value", "." ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/method_timer.py#L33-L55
234,446
istresearch/scrapy-cluster
crawler/crawling/redis_stats_middleware.py
RedisStatsMiddleware._setup_stats_status_codes
def _setup_stats_status_codes(self, spider_name): ''' Sets up the status code stats collectors ''' self.stats_dict[spider_name] = { 'status_codes': {} } self.stats_dict[spider_name]['status_codes'] = {} hostname = self._get_hostname() # we chose to handle 504's here as well as in the middleware # in case the middleware is disabled for status_code in self.settings['STATS_RESPONSE_CODES']: temp_key = 'stats:crawler:{h}:{n}:{s}'.format( h=hostname, n=spider_name, s=status_code) self.stats_dict[spider_name]['status_codes'][status_code] = {} for item in self.settings['STATS_TIMES']: try: time = getattr(StatsCollector, item) self.stats_dict[spider_name]['status_codes'][status_code][time] = StatsCollector \ .get_rolling_time_window( redis_conn=self.redis_conn, key='{k}:{t}'.format(k=temp_key, t=time), window=time, cycle_time=self.settings['STATS_CYCLE']) self.logger.debug("Set up status code {s}, {n} spider,"\ " host {h} Stats Collector '{i}'"\ .format(h=hostname, n=spider_name, s=status_code, i=item)) except AttributeError as e: self.logger.warning("Unable to find Stats Time '{s}'"\ .format(s=item)) total = StatsCollector.get_hll_counter(redis_conn=self.redis_conn, key='{k}:lifetime'.format(k=temp_key), cycle_time=self.settings['STATS_CYCLE'], roll=False) self.logger.debug("Set up status code {s}, {n} spider,"\ "host {h} Stats Collector 'lifetime'"\ .format(h=hostname, n=spider_name, s=status_code)) self.stats_dict[spider_name]['status_codes'][status_code]['lifetime'] = total
python
def _setup_stats_status_codes(self, spider_name): ''' Sets up the status code stats collectors ''' self.stats_dict[spider_name] = { 'status_codes': {} } self.stats_dict[spider_name]['status_codes'] = {} hostname = self._get_hostname() # we chose to handle 504's here as well as in the middleware # in case the middleware is disabled for status_code in self.settings['STATS_RESPONSE_CODES']: temp_key = 'stats:crawler:{h}:{n}:{s}'.format( h=hostname, n=spider_name, s=status_code) self.stats_dict[spider_name]['status_codes'][status_code] = {} for item in self.settings['STATS_TIMES']: try: time = getattr(StatsCollector, item) self.stats_dict[spider_name]['status_codes'][status_code][time] = StatsCollector \ .get_rolling_time_window( redis_conn=self.redis_conn, key='{k}:{t}'.format(k=temp_key, t=time), window=time, cycle_time=self.settings['STATS_CYCLE']) self.logger.debug("Set up status code {s}, {n} spider,"\ " host {h} Stats Collector '{i}'"\ .format(h=hostname, n=spider_name, s=status_code, i=item)) except AttributeError as e: self.logger.warning("Unable to find Stats Time '{s}'"\ .format(s=item)) total = StatsCollector.get_hll_counter(redis_conn=self.redis_conn, key='{k}:lifetime'.format(k=temp_key), cycle_time=self.settings['STATS_CYCLE'], roll=False) self.logger.debug("Set up status code {s}, {n} spider,"\ "host {h} Stats Collector 'lifetime'"\ .format(h=hostname, n=spider_name, s=status_code)) self.stats_dict[spider_name]['status_codes'][status_code]['lifetime'] = total
[ "def", "_setup_stats_status_codes", "(", "self", ",", "spider_name", ")", ":", "self", ".", "stats_dict", "[", "spider_name", "]", "=", "{", "'status_codes'", ":", "{", "}", "}", "self", ".", "stats_dict", "[", "spider_name", "]", "[", "'status_codes'", "]",...
Sets up the status code stats collectors
[ "Sets", "up", "the", "status", "code", "stats", "collectors" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/redis_stats_middleware.py#L66-L104
234,447
istresearch/scrapy-cluster
redis-monitor/plugins/kafka_base_monitor.py
KafkaBaseMonitor.setup
def setup(self, settings): ''' Setup the handler @param settings: The loaded settings file ''' self.producer = self._create_producer(settings) self.topic_prefix = settings['KAFKA_TOPIC_PREFIX'] self.use_appid_topics = settings['KAFKA_APPID_TOPICS'] self.logger.debug("Successfully connected to Kafka in {name}" .format(name=self.__class__.__name__))
python
def setup(self, settings): ''' Setup the handler @param settings: The loaded settings file ''' self.producer = self._create_producer(settings) self.topic_prefix = settings['KAFKA_TOPIC_PREFIX'] self.use_appid_topics = settings['KAFKA_APPID_TOPICS'] self.logger.debug("Successfully connected to Kafka in {name}" .format(name=self.__class__.__name__))
[ "def", "setup", "(", "self", ",", "settings", ")", ":", "self", ".", "producer", "=", "self", ".", "_create_producer", "(", "settings", ")", "self", ".", "topic_prefix", "=", "settings", "[", "'KAFKA_TOPIC_PREFIX'", "]", "self", ".", "use_appid_topics", "=",...
Setup the handler @param settings: The loaded settings file
[ "Setup", "the", "handler" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/kafka_base_monitor.py#L19-L31
234,448
istresearch/scrapy-cluster
redis-monitor/plugins/kafka_base_monitor.py
KafkaBaseMonitor._send_to_kafka
def _send_to_kafka(self, master): ''' Sends the message back to Kafka @param master: the final dict to send @returns: True if successfully sent to kafka ''' appid_topic = "{prefix}.outbound_{appid}".format( prefix=self.topic_prefix, appid=master['appid']) firehose_topic = "{prefix}.outbound_firehose".format( prefix=self.topic_prefix) try: # dont want logger in outbound kafka message if self.use_appid_topics: f1 = self.producer.send(appid_topic, master) f1.add_callback(self._kafka_success) f1.add_errback(self._kafka_failure) f2 = self.producer.send(firehose_topic, master) f2.add_callback(self._kafka_success) f2.add_errback(self._kafka_failure) return True except Exception as ex: message = "An exception '{0}' occured while sending a message " \ "to kafka. Arguments:\n{1!r}" \ .format(type(ex).__name__, ex.args) self.logger.error(message) return False
python
def _send_to_kafka(self, master): ''' Sends the message back to Kafka @param master: the final dict to send @returns: True if successfully sent to kafka ''' appid_topic = "{prefix}.outbound_{appid}".format( prefix=self.topic_prefix, appid=master['appid']) firehose_topic = "{prefix}.outbound_firehose".format( prefix=self.topic_prefix) try: # dont want logger in outbound kafka message if self.use_appid_topics: f1 = self.producer.send(appid_topic, master) f1.add_callback(self._kafka_success) f1.add_errback(self._kafka_failure) f2 = self.producer.send(firehose_topic, master) f2.add_callback(self._kafka_success) f2.add_errback(self._kafka_failure) return True except Exception as ex: message = "An exception '{0}' occured while sending a message " \ "to kafka. Arguments:\n{1!r}" \ .format(type(ex).__name__, ex.args) self.logger.error(message) return False
[ "def", "_send_to_kafka", "(", "self", ",", "master", ")", ":", "appid_topic", "=", "\"{prefix}.outbound_{appid}\"", ".", "format", "(", "prefix", "=", "self", ".", "topic_prefix", ",", "appid", "=", "master", "[", "'appid'", "]", ")", "firehose_topic", "=", ...
Sends the message back to Kafka @param master: the final dict to send @returns: True if successfully sent to kafka
[ "Sends", "the", "message", "back", "to", "Kafka" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/kafka_base_monitor.py#L66-L94
234,449
istresearch/scrapy-cluster
kafka-monitor/kafka_monitor.py
KafkaMonitor._load_plugins
def _load_plugins(self): ''' Sets up all plugins, defaults and settings.py ''' plugins = self.settings['PLUGINS'] self.plugins_dict = {} for key in plugins: # skip loading the plugin if its value is None if plugins[key] is None: continue # valid plugin, import and setup self.logger.debug("Trying to load plugin {cls}".format(cls=key)) the_class = self._import_class(key) instance = the_class() instance._set_logger(self.logger) if not self.unit_test: instance.setup(self.settings) the_schema = None with open(self.settings['PLUGIN_DIR'] + instance.schema) as the_file: the_schema = json.load(the_file) mini = {} mini['instance'] = instance mini['schema'] = the_schema self.logger.debug("Successfully loaded plugin {cls}".format(cls=key)) self.plugins_dict[plugins[key]] = mini self.plugins_dict = OrderedDict(sorted(list(self.plugins_dict.items()), key=lambda t: t[0]))
python
def _load_plugins(self): ''' Sets up all plugins, defaults and settings.py ''' plugins = self.settings['PLUGINS'] self.plugins_dict = {} for key in plugins: # skip loading the plugin if its value is None if plugins[key] is None: continue # valid plugin, import and setup self.logger.debug("Trying to load plugin {cls}".format(cls=key)) the_class = self._import_class(key) instance = the_class() instance._set_logger(self.logger) if not self.unit_test: instance.setup(self.settings) the_schema = None with open(self.settings['PLUGIN_DIR'] + instance.schema) as the_file: the_schema = json.load(the_file) mini = {} mini['instance'] = instance mini['schema'] = the_schema self.logger.debug("Successfully loaded plugin {cls}".format(cls=key)) self.plugins_dict[plugins[key]] = mini self.plugins_dict = OrderedDict(sorted(list(self.plugins_dict.items()), key=lambda t: t[0]))
[ "def", "_load_plugins", "(", "self", ")", ":", "plugins", "=", "self", ".", "settings", "[", "'PLUGINS'", "]", "self", ".", "plugins_dict", "=", "{", "}", "for", "key", "in", "plugins", ":", "# skip loading the plugin if its value is None", "if", "plugins", "[...
Sets up all plugins, defaults and settings.py
[ "Sets", "up", "all", "plugins", "defaults", "and", "settings", ".", "py" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/kafka_monitor.py#L62-L91
234,450
istresearch/scrapy-cluster
kafka-monitor/kafka_monitor.py
KafkaMonitor._setup_stats
def _setup_stats(self): ''' Sets up the stats collection ''' self.stats_dict = {} redis_conn = redis.Redis(host=self.settings['REDIS_HOST'], port=self.settings['REDIS_PORT'], db=self.settings.get('REDIS_DB')) try: redis_conn.info() self.logger.debug("Connected to Redis in StatsCollector Setup") self.redis_conn = redis_conn except ConnectionError: self.logger.warn("Failed to connect to Redis in StatsCollector" " Setup, no stats will be collected") return if self.settings['STATS_TOTAL']: self._setup_stats_total(redis_conn) if self.settings['STATS_PLUGINS']: self._setup_stats_plugins(redis_conn)
python
def _setup_stats(self): ''' Sets up the stats collection ''' self.stats_dict = {} redis_conn = redis.Redis(host=self.settings['REDIS_HOST'], port=self.settings['REDIS_PORT'], db=self.settings.get('REDIS_DB')) try: redis_conn.info() self.logger.debug("Connected to Redis in StatsCollector Setup") self.redis_conn = redis_conn except ConnectionError: self.logger.warn("Failed to connect to Redis in StatsCollector" " Setup, no stats will be collected") return if self.settings['STATS_TOTAL']: self._setup_stats_total(redis_conn) if self.settings['STATS_PLUGINS']: self._setup_stats_plugins(redis_conn)
[ "def", "_setup_stats", "(", "self", ")", ":", "self", ".", "stats_dict", "=", "{", "}", "redis_conn", "=", "redis", ".", "Redis", "(", "host", "=", "self", ".", "settings", "[", "'REDIS_HOST'", "]", ",", "port", "=", "self", ".", "settings", "[", "'R...
Sets up the stats collection
[ "Sets", "up", "the", "stats", "collection" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/kafka_monitor.py#L118-L141
234,451
istresearch/scrapy-cluster
kafka-monitor/kafka_monitor.py
KafkaMonitor._setup_stats_total
def _setup_stats_total(self, redis_conn): ''' Sets up the total stats collectors @param redis_conn: the redis connection ''' self.stats_dict['total'] = {} self.stats_dict['fail'] = {} temp_key1 = 'stats:kafka-monitor:total' temp_key2 = 'stats:kafka-monitor:fail' for item in self.settings['STATS_TIMES']: try: time = getattr(StatsCollector, item) self.stats_dict['total'][time] = StatsCollector \ .get_rolling_time_window( redis_conn=redis_conn, key='{k}:{t}'.format(k=temp_key1, t=time), window=time, cycle_time=self.settings['STATS_CYCLE']) self.stats_dict['fail'][time] = StatsCollector \ .get_rolling_time_window( redis_conn=redis_conn, key='{k}:{t}'.format(k=temp_key2, t=time), window=time, cycle_time=self.settings['STATS_CYCLE']) self.logger.debug("Set up total/fail Stats Collector '{i}'"\ .format(i=item)) except AttributeError as e: self.logger.warning("Unable to find Stats Time '{s}'"\ .format(s=item)) total1 = StatsCollector.get_hll_counter(redis_conn=redis_conn, key='{k}:lifetime'.format(k=temp_key1), cycle_time=self.settings['STATS_CYCLE'], roll=False) total2 = StatsCollector.get_hll_counter(redis_conn=redis_conn, key='{k}:lifetime'.format(k=temp_key2), cycle_time=self.settings['STATS_CYCLE'], roll=False) self.logger.debug("Set up total/fail Stats Collector 'lifetime'") self.stats_dict['total']['lifetime'] = total1 self.stats_dict['fail']['lifetime'] = total2
python
def _setup_stats_total(self, redis_conn): ''' Sets up the total stats collectors @param redis_conn: the redis connection ''' self.stats_dict['total'] = {} self.stats_dict['fail'] = {} temp_key1 = 'stats:kafka-monitor:total' temp_key2 = 'stats:kafka-monitor:fail' for item in self.settings['STATS_TIMES']: try: time = getattr(StatsCollector, item) self.stats_dict['total'][time] = StatsCollector \ .get_rolling_time_window( redis_conn=redis_conn, key='{k}:{t}'.format(k=temp_key1, t=time), window=time, cycle_time=self.settings['STATS_CYCLE']) self.stats_dict['fail'][time] = StatsCollector \ .get_rolling_time_window( redis_conn=redis_conn, key='{k}:{t}'.format(k=temp_key2, t=time), window=time, cycle_time=self.settings['STATS_CYCLE']) self.logger.debug("Set up total/fail Stats Collector '{i}'"\ .format(i=item)) except AttributeError as e: self.logger.warning("Unable to find Stats Time '{s}'"\ .format(s=item)) total1 = StatsCollector.get_hll_counter(redis_conn=redis_conn, key='{k}:lifetime'.format(k=temp_key1), cycle_time=self.settings['STATS_CYCLE'], roll=False) total2 = StatsCollector.get_hll_counter(redis_conn=redis_conn, key='{k}:lifetime'.format(k=temp_key2), cycle_time=self.settings['STATS_CYCLE'], roll=False) self.logger.debug("Set up total/fail Stats Collector 'lifetime'") self.stats_dict['total']['lifetime'] = total1 self.stats_dict['fail']['lifetime'] = total2
[ "def", "_setup_stats_total", "(", "self", ",", "redis_conn", ")", ":", "self", ".", "stats_dict", "[", "'total'", "]", "=", "{", "}", "self", ".", "stats_dict", "[", "'fail'", "]", "=", "{", "}", "temp_key1", "=", "'stats:kafka-monitor:total'", "temp_key2", ...
Sets up the total stats collectors @param redis_conn: the redis connection
[ "Sets", "up", "the", "total", "stats", "collectors" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/kafka_monitor.py#L143-L183
234,452
istresearch/scrapy-cluster
kafka-monitor/kafka_monitor.py
KafkaMonitor._main_loop
def _main_loop(self): ''' Continuous loop that reads from a kafka topic and tries to validate incoming messages ''' self.logger.debug("Processing messages") old_time = 0 while True: self._process_messages() if self.settings['STATS_DUMP'] != 0: new_time = int(old_div(time.time(), self.settings['STATS_DUMP'])) # only log every X seconds if new_time != old_time: self._dump_stats() old_time = new_time self._report_self() time.sleep(self.settings['SLEEP_TIME'])
python
def _main_loop(self): ''' Continuous loop that reads from a kafka topic and tries to validate incoming messages ''' self.logger.debug("Processing messages") old_time = 0 while True: self._process_messages() if self.settings['STATS_DUMP'] != 0: new_time = int(old_div(time.time(), self.settings['STATS_DUMP'])) # only log every X seconds if new_time != old_time: self._dump_stats() old_time = new_time self._report_self() time.sleep(self.settings['SLEEP_TIME'])
[ "def", "_main_loop", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Processing messages\"", ")", "old_time", "=", "0", "while", "True", ":", "self", ".", "_process_messages", "(", ")", "if", "self", ".", "settings", "[", "'STATS_DUMP'"...
Continuous loop that reads from a kafka topic and tries to validate incoming messages
[ "Continuous", "loop", "that", "reads", "from", "a", "kafka", "topic", "and", "tries", "to", "validate", "incoming", "messages" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/kafka_monitor.py#L247-L264
234,453
istresearch/scrapy-cluster
kafka-monitor/kafka_monitor.py
KafkaMonitor._dump_stats
def _dump_stats(self): ''' Dumps the stats out ''' extras = {} if 'total' in self.stats_dict: self.logger.debug("Compiling total/fail dump stats") for key in self.stats_dict['total']: final = 'total_{t}'.format(t=key) extras[final] = self.stats_dict['total'][key].value() for key in self.stats_dict['fail']: final = 'fail_{t}'.format(t=key) extras[final] = self.stats_dict['fail'][key].value() if 'plugins' in self.stats_dict: self.logger.debug("Compiling plugin dump stats") for name in self.stats_dict['plugins']: for key in self.stats_dict['plugins'][name]: final = 'plugin_{n}_{t}'.format(n=name, t=key) extras[final] = self.stats_dict['plugins'][name][key].value() if not self.logger.json: self.logger.info('Kafka Monitor Stats Dump:\n{0}'.format( json.dumps(extras, indent=4, sort_keys=True))) else: self.logger.info('Kafka Monitor Stats Dump', extra=extras)
python
def _dump_stats(self): ''' Dumps the stats out ''' extras = {} if 'total' in self.stats_dict: self.logger.debug("Compiling total/fail dump stats") for key in self.stats_dict['total']: final = 'total_{t}'.format(t=key) extras[final] = self.stats_dict['total'][key].value() for key in self.stats_dict['fail']: final = 'fail_{t}'.format(t=key) extras[final] = self.stats_dict['fail'][key].value() if 'plugins' in self.stats_dict: self.logger.debug("Compiling plugin dump stats") for name in self.stats_dict['plugins']: for key in self.stats_dict['plugins'][name]: final = 'plugin_{n}_{t}'.format(n=name, t=key) extras[final] = self.stats_dict['plugins'][name][key].value() if not self.logger.json: self.logger.info('Kafka Monitor Stats Dump:\n{0}'.format( json.dumps(extras, indent=4, sort_keys=True))) else: self.logger.info('Kafka Monitor Stats Dump', extra=extras)
[ "def", "_dump_stats", "(", "self", ")", ":", "extras", "=", "{", "}", "if", "'total'", "in", "self", ".", "stats_dict", ":", "self", ".", "logger", ".", "debug", "(", "\"Compiling total/fail dump stats\"", ")", "for", "key", "in", "self", ".", "stats_dict"...
Dumps the stats out
[ "Dumps", "the", "stats", "out" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/kafka_monitor.py#L367-L392
234,454
istresearch/scrapy-cluster
kafka-monitor/kafka_monitor.py
KafkaMonitor.run
def run(self): ''' Set up and run ''' self._setup_kafka() self._load_plugins() self._setup_stats() self._main_loop()
python
def run(self): ''' Set up and run ''' self._setup_kafka() self._load_plugins() self._setup_stats() self._main_loop()
[ "def", "run", "(", "self", ")", ":", "self", ".", "_setup_kafka", "(", ")", "self", ".", "_load_plugins", "(", ")", "self", ".", "_setup_stats", "(", ")", "self", ".", "_main_loop", "(", ")" ]
Set up and run
[ "Set", "up", "and", "run" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/kafka_monitor.py#L394-L401
234,455
istresearch/scrapy-cluster
kafka-monitor/kafka_monitor.py
KafkaMonitor._report_self
def _report_self(self): ''' Reports the kafka monitor uuid to redis ''' key = "stats:kafka-monitor:self:{m}:{u}".format( m=socket.gethostname(), u=self.my_uuid) self.redis_conn.set(key, time.time()) self.redis_conn.expire(key, self.settings['HEARTBEAT_TIMEOUT'])
python
def _report_self(self): ''' Reports the kafka monitor uuid to redis ''' key = "stats:kafka-monitor:self:{m}:{u}".format( m=socket.gethostname(), u=self.my_uuid) self.redis_conn.set(key, time.time()) self.redis_conn.expire(key, self.settings['HEARTBEAT_TIMEOUT'])
[ "def", "_report_self", "(", "self", ")", ":", "key", "=", "\"stats:kafka-monitor:self:{m}:{u}\"", ".", "format", "(", "m", "=", "socket", ".", "gethostname", "(", ")", ",", "u", "=", "self", ".", "my_uuid", ")", "self", ".", "redis_conn", ".", "set", "("...
Reports the kafka monitor uuid to redis
[ "Reports", "the", "kafka", "monitor", "uuid", "to", "redis" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/kafka_monitor.py#L403-L411
234,456
istresearch/scrapy-cluster
kafka-monitor/kafka_monitor.py
KafkaMonitor.feed
def feed(self, json_item): ''' Feeds a json item into the Kafka topic @param json_item: The loaded json object ''' @MethodTimer.timeout(self.settings['KAFKA_FEED_TIMEOUT'], False) def _feed(json_item): producer = self._create_producer() topic = self.settings['KAFKA_INCOMING_TOPIC'] if not self.logger.json: self.logger.info('Feeding JSON into {0}\n{1}'.format( topic, json.dumps(json_item, indent=4))) else: self.logger.info('Feeding JSON into {0}\n'.format(topic), extra={'value': json_item}) if producer is not None: producer.send(topic, json_item) producer.flush() producer.close(timeout=10) return True else: return False result = _feed(json_item) if result: self.logger.info("Successfully fed item to Kafka") else: self.logger.error("Failed to feed item into Kafka")
python
def feed(self, json_item): ''' Feeds a json item into the Kafka topic @param json_item: The loaded json object ''' @MethodTimer.timeout(self.settings['KAFKA_FEED_TIMEOUT'], False) def _feed(json_item): producer = self._create_producer() topic = self.settings['KAFKA_INCOMING_TOPIC'] if not self.logger.json: self.logger.info('Feeding JSON into {0}\n{1}'.format( topic, json.dumps(json_item, indent=4))) else: self.logger.info('Feeding JSON into {0}\n'.format(topic), extra={'value': json_item}) if producer is not None: producer.send(topic, json_item) producer.flush() producer.close(timeout=10) return True else: return False result = _feed(json_item) if result: self.logger.info("Successfully fed item to Kafka") else: self.logger.error("Failed to feed item into Kafka")
[ "def", "feed", "(", "self", ",", "json_item", ")", ":", "@", "MethodTimer", ".", "timeout", "(", "self", ".", "settings", "[", "'KAFKA_FEED_TIMEOUT'", "]", ",", "False", ")", "def", "_feed", "(", "json_item", ")", ":", "producer", "=", "self", ".", "_c...
Feeds a json item into the Kafka topic @param json_item: The loaded json object
[ "Feeds", "a", "json", "item", "into", "the", "Kafka", "topic" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/kafka_monitor.py#L413-L443
234,457
istresearch/scrapy-cluster
redis-monitor/plugins/expire_monitor.py
ExpireMonitor.check_precondition
def check_precondition(self, key, value): ''' Override to check for timeout ''' timeout = float(value) curr_time = self.get_current_time() if curr_time > timeout: return True return False
python
def check_precondition(self, key, value): ''' Override to check for timeout ''' timeout = float(value) curr_time = self.get_current_time() if curr_time > timeout: return True return False
[ "def", "check_precondition", "(", "self", ",", "key", ",", "value", ")", ":", "timeout", "=", "float", "(", "value", ")", "curr_time", "=", "self", ".", "get_current_time", "(", ")", "if", "curr_time", ">", "timeout", ":", "return", "True", "return", "Fa...
Override to check for timeout
[ "Override", "to", "check", "for", "timeout" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/expire_monitor.py#L18-L26
234,458
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
StatsCollector.get_time_window
def get_time_window(self, redis_conn=None, host='localhost', port=6379, key='time_window_counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12): ''' Generate a new TimeWindow Useful for collecting number of hits generated between certain times @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep ''' counter = TimeWindow(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
python
def get_time_window(self, redis_conn=None, host='localhost', port=6379, key='time_window_counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12): ''' Generate a new TimeWindow Useful for collecting number of hits generated between certain times @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep ''' counter = TimeWindow(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
[ "def", "get_time_window", "(", "self", ",", "redis_conn", "=", "None", ",", "host", "=", "'localhost'", ",", "port", "=", "6379", ",", "key", "=", "'time_window_counter'", ",", "cycle_time", "=", "5", ",", "start_time", "=", "None", ",", "window", "=", "...
Generate a new TimeWindow Useful for collecting number of hits generated between certain times @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep
[ "Generate", "a", "new", "TimeWindow", "Useful", "for", "collecting", "number", "of", "hits", "generated", "between", "certain", "times" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L43-L67
234,459
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
StatsCollector.get_rolling_time_window
def get_rolling_time_window(self, redis_conn=None, host='localhost', port=6379, key='rolling_time_window_counter', cycle_time=5, window=SECONDS_1_HOUR): ''' Generate a new RollingTimeWindow Useful for collect data about the number of hits in the past X seconds @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param window: the number of seconds behind now() to keep data for ''' counter = RollingTimeWindow(key=key, cycle_time=cycle_time, window=window) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
python
def get_rolling_time_window(self, redis_conn=None, host='localhost', port=6379, key='rolling_time_window_counter', cycle_time=5, window=SECONDS_1_HOUR): ''' Generate a new RollingTimeWindow Useful for collect data about the number of hits in the past X seconds @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param window: the number of seconds behind now() to keep data for ''' counter = RollingTimeWindow(key=key, cycle_time=cycle_time, window=window) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
[ "def", "get_rolling_time_window", "(", "self", ",", "redis_conn", "=", "None", ",", "host", "=", "'localhost'", ",", "port", "=", "6379", ",", "key", "=", "'rolling_time_window_counter'", ",", "cycle_time", "=", "5", ",", "window", "=", "SECONDS_1_HOUR", ")", ...
Generate a new RollingTimeWindow Useful for collect data about the number of hits in the past X seconds @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param window: the number of seconds behind now() to keep data for
[ "Generate", "a", "new", "RollingTimeWindow", "Useful", "for", "collect", "data", "about", "the", "number", "of", "hits", "in", "the", "past", "X", "seconds" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L70-L87
234,460
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
StatsCollector.get_counter
def get_counter(self, redis_conn=None, host='localhost', port=6379, key='counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12, start_at=0): ''' Generate a new Counter Useful for generic distributed counters @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep @param start_at: The integer to start counting at ''' counter = Counter(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
python
def get_counter(self, redis_conn=None, host='localhost', port=6379, key='counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12, start_at=0): ''' Generate a new Counter Useful for generic distributed counters @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep @param start_at: The integer to start counting at ''' counter = Counter(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
[ "def", "get_counter", "(", "self", ",", "redis_conn", "=", "None", ",", "host", "=", "'localhost'", ",", "port", "=", "6379", ",", "key", "=", "'counter'", ",", "cycle_time", "=", "5", ",", "start_time", "=", "None", ",", "window", "=", "SECONDS_1_HOUR",...
Generate a new Counter Useful for generic distributed counters @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep @param start_at: The integer to start counting at
[ "Generate", "a", "new", "Counter", "Useful", "for", "generic", "distributed", "counters" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L90-L114
234,461
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
StatsCollector.get_unique_counter
def get_unique_counter(self, redis_conn=None, host='localhost', port=6379, key='unique_counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12): ''' Generate a new UniqueCounter. Useful for exactly counting unique objects @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep ''' counter = UniqueCounter(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
python
def get_unique_counter(self, redis_conn=None, host='localhost', port=6379, key='unique_counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12): ''' Generate a new UniqueCounter. Useful for exactly counting unique objects @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep ''' counter = UniqueCounter(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
[ "def", "get_unique_counter", "(", "self", ",", "redis_conn", "=", "None", ",", "host", "=", "'localhost'", ",", "port", "=", "6379", ",", "key", "=", "'unique_counter'", ",", "cycle_time", "=", "5", ",", "start_time", "=", "None", ",", "window", "=", "SE...
Generate a new UniqueCounter. Useful for exactly counting unique objects @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep
[ "Generate", "a", "new", "UniqueCounter", ".", "Useful", "for", "exactly", "counting", "unique", "objects" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L117-L140
234,462
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
StatsCollector.get_hll_counter
def get_hll_counter(self, redis_conn=None, host='localhost', port=6379, key='hyperloglog_counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12): ''' Generate a new HyperLogLogCounter. Useful for approximating extremely large counts of unique items @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep ''' counter = HyperLogLogCounter(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
python
def get_hll_counter(self, redis_conn=None, host='localhost', port=6379, key='hyperloglog_counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12): ''' Generate a new HyperLogLogCounter. Useful for approximating extremely large counts of unique items @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep ''' counter = HyperLogLogCounter(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
[ "def", "get_hll_counter", "(", "self", ",", "redis_conn", "=", "None", ",", "host", "=", "'localhost'", ",", "port", "=", "6379", ",", "key", "=", "'hyperloglog_counter'", ",", "cycle_time", "=", "5", ",", "start_time", "=", "None", ",", "window", "=", "...
Generate a new HyperLogLogCounter. Useful for approximating extremely large counts of unique items @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep
[ "Generate", "a", "new", "HyperLogLogCounter", ".", "Useful", "for", "approximating", "extremely", "large", "counts", "of", "unique", "items" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L143-L167
234,463
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
AbstractCounter.setup
def setup(self, redis_conn=None, host='localhost', port=6379): ''' Set up the redis connection ''' if redis_conn is None: if host is not None and port is not None: self.redis_conn = redis.Redis(host=host, port=port) else: raise Exception("Please specify some form of connection " "to Redis") else: self.redis_conn = redis_conn self.redis_conn.info()
python
def setup(self, redis_conn=None, host='localhost', port=6379): ''' Set up the redis connection ''' if redis_conn is None: if host is not None and port is not None: self.redis_conn = redis.Redis(host=host, port=port) else: raise Exception("Please specify some form of connection " "to Redis") else: self.redis_conn = redis_conn self.redis_conn.info()
[ "def", "setup", "(", "self", ",", "redis_conn", "=", "None", ",", "host", "=", "'localhost'", ",", "port", "=", "6379", ")", ":", "if", "redis_conn", "is", "None", ":", "if", "host", "is", "not", "None", "and", "port", "is", "not", "None", ":", "se...
Set up the redis connection
[ "Set", "up", "the", "redis", "connection" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L206-L219
234,464
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
ThreadedCounter.setup
def setup(self, redis_conn=None, host='localhost', port=6379): ''' Set up the counting manager class @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port ''' AbstractCounter.setup(self, redis_conn=redis_conn, host=host, port=port) self._threaded_start()
python
def setup(self, redis_conn=None, host='localhost', port=6379): ''' Set up the counting manager class @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port ''' AbstractCounter.setup(self, redis_conn=redis_conn, host=host, port=port) self._threaded_start()
[ "def", "setup", "(", "self", ",", "redis_conn", "=", "None", ",", "host", "=", "'localhost'", ",", "port", "=", "6379", ")", ":", "AbstractCounter", ".", "setup", "(", "self", ",", "redis_conn", "=", "redis_conn", ",", "host", "=", "host", ",", "port",...
Set up the counting manager class @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port
[ "Set", "up", "the", "counting", "manager", "class" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L300-L311
234,465
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
ThreadedCounter._threaded_start
def _threaded_start(self): ''' Spawns a worker thread to do the expiration checks ''' self.active = True self.thread = Thread(target=self._main_loop) self.thread.setDaemon(True) self.thread.start()
python
def _threaded_start(self): ''' Spawns a worker thread to do the expiration checks ''' self.active = True self.thread = Thread(target=self._main_loop) self.thread.setDaemon(True) self.thread.start()
[ "def", "_threaded_start", "(", "self", ")", ":", "self", ".", "active", "=", "True", "self", ".", "thread", "=", "Thread", "(", "target", "=", "self", ".", "_main_loop", ")", "self", ".", "thread", ".", "setDaemon", "(", "True", ")", "self", ".", "th...
Spawns a worker thread to do the expiration checks
[ "Spawns", "a", "worker", "thread", "to", "do", "the", "expiration", "checks" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L313-L320
234,466
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
ThreadedCounter._main_loop
def _main_loop(self): ''' Main loop for the stats collector ''' while self.active: self.expire() if self.roll and self.is_expired(): self.start_time = self.start_time + self.window self._set_key() self.purge_old() time.sleep(self.cycle_time) self._clean_up()
python
def _main_loop(self): ''' Main loop for the stats collector ''' while self.active: self.expire() if self.roll and self.is_expired(): self.start_time = self.start_time + self.window self._set_key() self.purge_old() time.sleep(self.cycle_time) self._clean_up()
[ "def", "_main_loop", "(", "self", ")", ":", "while", "self", ".", "active", ":", "self", ".", "expire", "(", ")", "if", "self", ".", "roll", "and", "self", ".", "is_expired", "(", ")", ":", "self", ".", "start_time", "=", "self", ".", "start_time", ...
Main loop for the stats collector
[ "Main", "loop", "for", "the", "stats", "collector" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L329-L340
234,467
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
ThreadedCounter._set_key
def _set_key(self): ''' sets the final key to be used currently ''' if self.roll: self.date = time.strftime(self.date_format, time.gmtime(self.start_time)) self.final_key = '{}:{}'.format(self.key, self.date) else: self.final_key = self.key
python
def _set_key(self): ''' sets the final key to be used currently ''' if self.roll: self.date = time.strftime(self.date_format, time.gmtime(self.start_time)) self.final_key = '{}:{}'.format(self.key, self.date) else: self.final_key = self.key
[ "def", "_set_key", "(", "self", ")", ":", "if", "self", ".", "roll", ":", "self", ".", "date", "=", "time", ".", "strftime", "(", "self", ".", "date_format", ",", "time", ".", "gmtime", "(", "self", ".", "start_time", ")", ")", "self", ".", "final_...
sets the final key to be used currently
[ "sets", "the", "final", "key", "to", "be", "used", "currently" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L348-L358
234,468
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
ThreadedCounter.is_expired
def is_expired(self): ''' Returns true if the time is beyond the window ''' if self.window is not None: return (self._time() - self.start_time) >= self.window return False
python
def is_expired(self): ''' Returns true if the time is beyond the window ''' if self.window is not None: return (self._time() - self.start_time) >= self.window return False
[ "def", "is_expired", "(", "self", ")", ":", "if", "self", ".", "window", "is", "not", "None", ":", "return", "(", "self", ".", "_time", "(", ")", "-", "self", ".", "start_time", ")", ">=", "self", ".", "window", "return", "False" ]
Returns true if the time is beyond the window
[ "Returns", "true", "if", "the", "time", "is", "beyond", "the", "window" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L360-L366
234,469
istresearch/scrapy-cluster
utils/scutils/stats_collector.py
ThreadedCounter.purge_old
def purge_old(self): ''' Removes keys that are beyond our keep_max limit ''' if self.keep_max is not None: keys = self.redis_conn.keys(self.get_key() + ':*') keys.sort(reverse=True) while len(keys) > self.keep_max: key = keys.pop() self.redis_conn.delete(key)
python
def purge_old(self): ''' Removes keys that are beyond our keep_max limit ''' if self.keep_max is not None: keys = self.redis_conn.keys(self.get_key() + ':*') keys.sort(reverse=True) while len(keys) > self.keep_max: key = keys.pop() self.redis_conn.delete(key)
[ "def", "purge_old", "(", "self", ")", ":", "if", "self", ".", "keep_max", "is", "not", "None", ":", "keys", "=", "self", ".", "redis_conn", ".", "keys", "(", "self", ".", "get_key", "(", ")", "+", "':*'", ")", "keys", ".", "sort", "(", "reverse", ...
Removes keys that are beyond our keep_max limit
[ "Removes", "keys", "that", "are", "beyond", "our", "keep_max", "limit" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L368-L377
234,470
istresearch/scrapy-cluster
utils/scutils/redis_queue.py
Base._encode_item
def _encode_item(self, item): ''' Encode an item object @requires: The object be serializable ''' if self.encoding.__name__ == 'pickle': return self.encoding.dumps(item, protocol=-1) else: return self.encoding.dumps(item)
python
def _encode_item(self, item): ''' Encode an item object @requires: The object be serializable ''' if self.encoding.__name__ == 'pickle': return self.encoding.dumps(item, protocol=-1) else: return self.encoding.dumps(item)
[ "def", "_encode_item", "(", "self", ",", "item", ")", ":", "if", "self", ".", "encoding", ".", "__name__", "==", "'pickle'", ":", "return", "self", ".", "encoding", ".", "dumps", "(", "item", ",", "protocol", "=", "-", "1", ")", "else", ":", "return"...
Encode an item object @requires: The object be serializable
[ "Encode", "an", "item", "object" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/redis_queue.py#L35-L44
234,471
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
ZookeeperWatcher.threaded_start
def threaded_start(self, no_init=False): ''' Spawns a worker thread to set up the zookeeper connection ''' thread = Thread(target=self.init_connections, kwargs={ 'no_init': no_init}) thread.setDaemon(True) thread.start() thread.join()
python
def threaded_start(self, no_init=False): ''' Spawns a worker thread to set up the zookeeper connection ''' thread = Thread(target=self.init_connections, kwargs={ 'no_init': no_init}) thread.setDaemon(True) thread.start() thread.join()
[ "def", "threaded_start", "(", "self", ",", "no_init", "=", "False", ")", ":", "thread", "=", "Thread", "(", "target", "=", "self", ".", "init_connections", ",", "kwargs", "=", "{", "'no_init'", ":", "no_init", "}", ")", "thread", ".", "setDaemon", "(", ...
Spawns a worker thread to set up the zookeeper connection
[ "Spawns", "a", "worker", "thread", "to", "set", "up", "the", "zookeeper", "connection" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L77-L85
234,472
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
ZookeeperWatcher.init_connections
def init_connections(self, no_init=False): ''' Sets up the initial Kazoo Client and watches ''' success = False self.set_valid(False) if not no_init: if self.zoo_client: self.zoo_client.remove_listener(self.state_listener) self.old_data = '' self.old_pointed = '' while not success: try: if self.zoo_client is None: self.zoo_client = KazooClient(hosts=self.hosts) self.zoo_client.start() else: # self.zoo_client.stop() self.zoo_client._connection.connection_stopped.set() self.zoo_client.close() self.zoo_client = KazooClient(hosts=self.hosts) self.zoo_client.start() except Exception as e: log.error("ZKWatcher Exception: " + e.message) sleep(1) continue self.setup() success = self.update_file(self.my_file) sleep(5) else: self.setup() self.update_file(self.my_file)
python
def init_connections(self, no_init=False): ''' Sets up the initial Kazoo Client and watches ''' success = False self.set_valid(False) if not no_init: if self.zoo_client: self.zoo_client.remove_listener(self.state_listener) self.old_data = '' self.old_pointed = '' while not success: try: if self.zoo_client is None: self.zoo_client = KazooClient(hosts=self.hosts) self.zoo_client.start() else: # self.zoo_client.stop() self.zoo_client._connection.connection_stopped.set() self.zoo_client.close() self.zoo_client = KazooClient(hosts=self.hosts) self.zoo_client.start() except Exception as e: log.error("ZKWatcher Exception: " + e.message) sleep(1) continue self.setup() success = self.update_file(self.my_file) sleep(5) else: self.setup() self.update_file(self.my_file)
[ "def", "init_connections", "(", "self", ",", "no_init", "=", "False", ")", ":", "success", "=", "False", "self", ".", "set_valid", "(", "False", ")", "if", "not", "no_init", ":", "if", "self", ".", "zoo_client", ":", "self", ".", "zoo_client", ".", "re...
Sets up the initial Kazoo Client and watches
[ "Sets", "up", "the", "initial", "Kazoo", "Client", "and", "watches" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L87-L121
234,473
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
ZookeeperWatcher.setup
def setup(self): ''' Ensures the path to the watched file exists and we have a state listener ''' self.zoo_client.add_listener(self.state_listener) if self.ensure: self.zoo_client.ensure_path(self.my_file)
python
def setup(self): ''' Ensures the path to the watched file exists and we have a state listener ''' self.zoo_client.add_listener(self.state_listener) if self.ensure: self.zoo_client.ensure_path(self.my_file)
[ "def", "setup", "(", "self", ")", ":", "self", ".", "zoo_client", ".", "add_listener", "(", "self", ".", "state_listener", ")", "if", "self", ".", "ensure", ":", "self", ".", "zoo_client", ".", "ensure_path", "(", "self", ".", "my_file", ")" ]
Ensures the path to the watched file exists and we have a state listener
[ "Ensures", "the", "path", "to", "the", "watched", "file", "exists", "and", "we", "have", "a", "state", "listener" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L123-L131
234,474
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
ZookeeperWatcher.state_listener
def state_listener(self, state): ''' Restarts the session if we get anything besides CONNECTED ''' if state == KazooState.SUSPENDED: self.set_valid(False) self.call_error(self.BAD_CONNECTION) elif state == KazooState.LOST and not self.do_not_restart: self.threaded_start() elif state == KazooState.CONNECTED: # This is going to throw a SUSPENDED kazoo error # which will cause the sessions to be wiped and re established. # Used b/c of massive connection pool issues self.zoo_client.stop()
python
def state_listener(self, state): ''' Restarts the session if we get anything besides CONNECTED ''' if state == KazooState.SUSPENDED: self.set_valid(False) self.call_error(self.BAD_CONNECTION) elif state == KazooState.LOST and not self.do_not_restart: self.threaded_start() elif state == KazooState.CONNECTED: # This is going to throw a SUSPENDED kazoo error # which will cause the sessions to be wiped and re established. # Used b/c of massive connection pool issues self.zoo_client.stop()
[ "def", "state_listener", "(", "self", ",", "state", ")", ":", "if", "state", "==", "KazooState", ".", "SUSPENDED", ":", "self", ".", "set_valid", "(", "False", ")", "self", ".", "call_error", "(", "self", ".", "BAD_CONNECTION", ")", "elif", "state", "=="...
Restarts the session if we get anything besides CONNECTED
[ "Restarts", "the", "session", "if", "we", "get", "anything", "besides", "CONNECTED" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L133-L146
234,475
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
ZookeeperWatcher.close
def close(self, kill_restart=True): ''' Use when you would like to close everything down @param kill_restart= Prevent kazoo restarting from occurring ''' self.do_not_restart = kill_restart self.zoo_client.stop() self.zoo_client.close()
python
def close(self, kill_restart=True): ''' Use when you would like to close everything down @param kill_restart= Prevent kazoo restarting from occurring ''' self.do_not_restart = kill_restart self.zoo_client.stop() self.zoo_client.close()
[ "def", "close", "(", "self", ",", "kill_restart", "=", "True", ")", ":", "self", ".", "do_not_restart", "=", "kill_restart", "self", ".", "zoo_client", ".", "stop", "(", ")", "self", ".", "zoo_client", ".", "close", "(", ")" ]
Use when you would like to close everything down @param kill_restart= Prevent kazoo restarting from occurring
[ "Use", "when", "you", "would", "like", "to", "close", "everything", "down" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L166-L173
234,476
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
ZookeeperWatcher.get_file_contents
def get_file_contents(self, pointer=False): ''' Gets any file contents you care about. Defaults to the main file @param pointer: The the contents of the file pointer, not the pointed at file @return: A string of the contents ''' if self.pointer: if pointer: return self.old_pointed else: return self.old_data else: return self.old_data
python
def get_file_contents(self, pointer=False): ''' Gets any file contents you care about. Defaults to the main file @param pointer: The the contents of the file pointer, not the pointed at file @return: A string of the contents ''' if self.pointer: if pointer: return self.old_pointed else: return self.old_data else: return self.old_data
[ "def", "get_file_contents", "(", "self", ",", "pointer", "=", "False", ")", ":", "if", "self", ".", "pointer", ":", "if", "pointer", ":", "return", "self", ".", "old_pointed", "else", ":", "return", "self", ".", "old_data", "else", ":", "return", "self",...
Gets any file contents you care about. Defaults to the main file @param pointer: The the contents of the file pointer, not the pointed at file @return: A string of the contents
[ "Gets", "any", "file", "contents", "you", "care", "about", ".", "Defaults", "to", "the", "main", "file" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L175-L188
234,477
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
ZookeeperWatcher.update_file
def update_file(self, path): ''' Updates the file watcher and calls the appropriate method for results @return: False if we need to keep trying the connection ''' try: # grab the file result, stat = self.zoo_client.get(path, watch=self.watch_file) except ZookeeperError: self.set_valid(False) self.call_error(self.INVALID_GET) return False if self.pointer: if result is not None and len(result) > 0: self.pointed_at_expired = False # file is a pointer, go update and watch other file self.point_path = result if self.compare_pointer(result): self.update_pointed() else: self.pointed_at_expired = True self.old_pointed = '' self.old_data = '' self.set_valid(False) self.call_error(self.INVALID_PATH) else: # file is not a pointer, return contents if self.compare_data(result): self.call_config(result) self.set_valid(True) return True
python
def update_file(self, path): ''' Updates the file watcher and calls the appropriate method for results @return: False if we need to keep trying the connection ''' try: # grab the file result, stat = self.zoo_client.get(path, watch=self.watch_file) except ZookeeperError: self.set_valid(False) self.call_error(self.INVALID_GET) return False if self.pointer: if result is not None and len(result) > 0: self.pointed_at_expired = False # file is a pointer, go update and watch other file self.point_path = result if self.compare_pointer(result): self.update_pointed() else: self.pointed_at_expired = True self.old_pointed = '' self.old_data = '' self.set_valid(False) self.call_error(self.INVALID_PATH) else: # file is not a pointer, return contents if self.compare_data(result): self.call_config(result) self.set_valid(True) return True
[ "def", "update_file", "(", "self", ",", "path", ")", ":", "try", ":", "# grab the file", "result", ",", "stat", "=", "self", ".", "zoo_client", ".", "get", "(", "path", ",", "watch", "=", "self", ".", "watch_file", ")", "except", "ZookeeperError", ":", ...
Updates the file watcher and calls the appropriate method for results @return: False if we need to keep trying the connection
[ "Updates", "the", "file", "watcher", "and", "calls", "the", "appropriate", "method", "for", "results" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L197-L229
234,478
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
ZookeeperWatcher.update_pointed
def update_pointed(self): ''' Grabs the latest file contents based on the pointer uri ''' # only grab file if our pointer is still good (not None) if not self.pointed_at_expired: try: conf_string, stat2 = self.zoo_client.get(self.point_path, watch=self.watch_pointed) except ZookeeperError: self.old_data = '' self.set_valid(False) self.pointed_at_expired = True self.call_error(self.INVALID_PATH) return if self.compare_data(conf_string): self.call_config(conf_string) self.set_valid(True)
python
def update_pointed(self): ''' Grabs the latest file contents based on the pointer uri ''' # only grab file if our pointer is still good (not None) if not self.pointed_at_expired: try: conf_string, stat2 = self.zoo_client.get(self.point_path, watch=self.watch_pointed) except ZookeeperError: self.old_data = '' self.set_valid(False) self.pointed_at_expired = True self.call_error(self.INVALID_PATH) return if self.compare_data(conf_string): self.call_config(conf_string) self.set_valid(True)
[ "def", "update_pointed", "(", "self", ")", ":", "# only grab file if our pointer is still good (not None)", "if", "not", "self", ".", "pointed_at_expired", ":", "try", ":", "conf_string", ",", "stat2", "=", "self", ".", "zoo_client", ".", "get", "(", "self", ".", ...
Grabs the latest file contents based on the pointer uri
[ "Grabs", "the", "latest", "file", "contents", "based", "on", "the", "pointer", "uri" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L237-L255
234,479
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
ZookeeperWatcher.set_valid
def set_valid(self, boolean): ''' Sets the state and calls the change if needed @param bool: The state (true or false) ''' old_state = self.is_valid() self.valid_file = boolean if old_state != self.valid_file: self.call_valid(self.valid_file)
python
def set_valid(self, boolean): ''' Sets the state and calls the change if needed @param bool: The state (true or false) ''' old_state = self.is_valid() self.valid_file = boolean if old_state != self.valid_file: self.call_valid(self.valid_file)
[ "def", "set_valid", "(", "self", ",", "boolean", ")", ":", "old_state", "=", "self", ".", "is_valid", "(", ")", "self", ".", "valid_file", "=", "boolean", "if", "old_state", "!=", "self", ".", "valid_file", ":", "self", ".", "call_valid", "(", "self", ...
Sets the state and calls the change if needed @param bool: The state (true or false)
[ "Sets", "the", "state", "and", "calls", "the", "change", "if", "needed" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L257-L266
234,480
istresearch/scrapy-cluster
kafka-monitor/plugins/zookeeper_handler.py
ZookeeperHandler.setup
def setup(self, settings): ''' Setup redis and tldextract ''' self.extract = tldextract.TLDExtract() self.redis_conn = redis.Redis(host=settings['REDIS_HOST'], port=settings['REDIS_PORT'], db=settings.get('REDIS_DB')) try: self.redis_conn.info() self.logger.debug("Connected to Redis in ZookeeperHandler") except ConnectionError: self.logger.error("Failed to connect to Redis in ZookeeperHandler") # plugin is essential to functionality sys.exit(1)
python
def setup(self, settings): ''' Setup redis and tldextract ''' self.extract = tldextract.TLDExtract() self.redis_conn = redis.Redis(host=settings['REDIS_HOST'], port=settings['REDIS_PORT'], db=settings.get('REDIS_DB')) try: self.redis_conn.info() self.logger.debug("Connected to Redis in ZookeeperHandler") except ConnectionError: self.logger.error("Failed to connect to Redis in ZookeeperHandler") # plugin is essential to functionality sys.exit(1)
[ "def", "setup", "(", "self", ",", "settings", ")", ":", "self", ".", "extract", "=", "tldextract", ".", "TLDExtract", "(", ")", "self", ".", "redis_conn", "=", "redis", ".", "Redis", "(", "host", "=", "settings", "[", "'REDIS_HOST'", "]", ",", "port", ...
Setup redis and tldextract
[ "Setup", "redis", "and", "tldextract" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/plugins/zookeeper_handler.py#L14-L29
234,481
istresearch/scrapy-cluster
redis-monitor/plugins/base_monitor.py
BaseMonitor.get_log_dict
def get_log_dict(self, action, appid, spiderid=None, uuid=None, crawlid=None): ''' Returns a basic dictionary for logging @param action: the action taken by the redis monitor @param spiderid: the spider id @param appid: the application id @param uuid: a unique id of the request @param crawlid: a unique crawl id of the request ''' extras = {} extras['action'] = action extras['appid'] = appid if spiderid is not None: extras['spiderid'] = spiderid if uuid is not None: extras['uuid'] = uuid if crawlid is not None: extras['crawlid'] = crawlid return extras
python
def get_log_dict(self, action, appid, spiderid=None, uuid=None, crawlid=None): ''' Returns a basic dictionary for logging @param action: the action taken by the redis monitor @param spiderid: the spider id @param appid: the application id @param uuid: a unique id of the request @param crawlid: a unique crawl id of the request ''' extras = {} extras['action'] = action extras['appid'] = appid if spiderid is not None: extras['spiderid'] = spiderid if uuid is not None: extras['uuid'] = uuid if crawlid is not None: extras['crawlid'] = crawlid return extras
[ "def", "get_log_dict", "(", "self", ",", "action", ",", "appid", ",", "spiderid", "=", "None", ",", "uuid", "=", "None", ",", "crawlid", "=", "None", ")", ":", "extras", "=", "{", "}", "extras", "[", "'action'", "]", "=", "action", "extras", "[", "...
Returns a basic dictionary for logging @param action: the action taken by the redis monitor @param spiderid: the spider id @param appid: the application id @param uuid: a unique id of the request @param crawlid: a unique crawl id of the request
[ "Returns", "a", "basic", "dictionary", "for", "logging" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/base_monitor.py#L71-L91
234,482
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
RedisMonitor._load_plugins
def _load_plugins(self): ''' Sets up all plugins and defaults ''' plugins = self.settings['PLUGINS'] self.plugins_dict = {} for key in plugins: # skip loading the plugin if its value is None if plugins[key] is None: continue # valid plugin, import and setup self.logger.debug("Trying to load plugin {cls}" .format(cls=key)) the_class = self.import_class(key) instance = the_class() instance.redis_conn = self.redis_conn instance._set_logger(self.logger) if not self.unit_test: instance.setup(self.settings) the_regex = instance.regex mini = {} mini['instance'] = instance if the_regex is None: raise ImportError() # continue mini['regex'] = the_regex self.plugins_dict[plugins[key]] = mini self.plugins_dict = OrderedDict(sorted(list(self.plugins_dict.items()), key=lambda t: t[0]))
python
def _load_plugins(self): ''' Sets up all plugins and defaults ''' plugins = self.settings['PLUGINS'] self.plugins_dict = {} for key in plugins: # skip loading the plugin if its value is None if plugins[key] is None: continue # valid plugin, import and setup self.logger.debug("Trying to load plugin {cls}" .format(cls=key)) the_class = self.import_class(key) instance = the_class() instance.redis_conn = self.redis_conn instance._set_logger(self.logger) if not self.unit_test: instance.setup(self.settings) the_regex = instance.regex mini = {} mini['instance'] = instance if the_regex is None: raise ImportError() # continue mini['regex'] = the_regex self.plugins_dict[plugins[key]] = mini self.plugins_dict = OrderedDict(sorted(list(self.plugins_dict.items()), key=lambda t: t[0]))
[ "def", "_load_plugins", "(", "self", ")", ":", "plugins", "=", "self", ".", "settings", "[", "'PLUGINS'", "]", "self", ".", "plugins_dict", "=", "{", "}", "for", "key", "in", "plugins", ":", "# skip loading the plugin if its value is None", "if", "plugins", "[...
Sets up all plugins and defaults
[ "Sets", "up", "all", "plugins", "and", "defaults" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L87-L119
234,483
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
RedisMonitor._main_loop
def _main_loop(self): ''' The internal while true main loop for the redis monitor ''' self.logger.debug("Running main loop") old_time = 0 while True: for plugin_key in self.plugins_dict: obj = self.plugins_dict[plugin_key] self._process_plugin(obj) if self.settings['STATS_DUMP'] != 0: new_time = int(old_div(time.time(), self.settings['STATS_DUMP'])) # only log every X seconds if new_time != old_time: self._dump_stats() if self.settings['STATS_DUMP_CRAWL']: self._dump_crawl_stats() if self.settings['STATS_DUMP_QUEUE']: self._dump_queue_stats() old_time = new_time self._report_self() time.sleep(self.settings['SLEEP_TIME'])
python
def _main_loop(self): ''' The internal while true main loop for the redis monitor ''' self.logger.debug("Running main loop") old_time = 0 while True: for plugin_key in self.plugins_dict: obj = self.plugins_dict[plugin_key] self._process_plugin(obj) if self.settings['STATS_DUMP'] != 0: new_time = int(old_div(time.time(), self.settings['STATS_DUMP'])) # only log every X seconds if new_time != old_time: self._dump_stats() if self.settings['STATS_DUMP_CRAWL']: self._dump_crawl_stats() if self.settings['STATS_DUMP_QUEUE']: self._dump_queue_stats() old_time = new_time self._report_self() time.sleep(self.settings['SLEEP_TIME'])
[ "def", "_main_loop", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Running main loop\"", ")", "old_time", "=", "0", "while", "True", ":", "for", "plugin_key", "in", "self", ".", "plugins_dict", ":", "obj", "=", "self", ".", "plugins...
The internal while true main loop for the redis monitor
[ "The", "internal", "while", "true", "main", "loop", "for", "the", "redis", "monitor" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L127-L152
234,484
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
RedisMonitor._process_plugin
def _process_plugin(self, plugin): ''' Logic to handle each plugin that is active @param plugin: a plugin dict object ''' instance = plugin['instance'] regex = plugin['regex'] for key in self.redis_conn.scan_iter(match=regex): # acquire lock lock = self._create_lock_object(key) try: if lock.acquire(blocking=False): val = self.redis_conn.get(key) self._process_key_val(instance, key, val) except Exception: self.logger.error(traceback.format_exc()) self._increment_fail_stat('{k}:{v}'.format(k=key, v=val)) self._process_failures(key) # remove lock regardless of if exception or was handled ok if lock._held: self.logger.debug("releasing lock") lock.release()
python
def _process_plugin(self, plugin): ''' Logic to handle each plugin that is active @param plugin: a plugin dict object ''' instance = plugin['instance'] regex = plugin['regex'] for key in self.redis_conn.scan_iter(match=regex): # acquire lock lock = self._create_lock_object(key) try: if lock.acquire(blocking=False): val = self.redis_conn.get(key) self._process_key_val(instance, key, val) except Exception: self.logger.error(traceback.format_exc()) self._increment_fail_stat('{k}:{v}'.format(k=key, v=val)) self._process_failures(key) # remove lock regardless of if exception or was handled ok if lock._held: self.logger.debug("releasing lock") lock.release()
[ "def", "_process_plugin", "(", "self", ",", "plugin", ")", ":", "instance", "=", "plugin", "[", "'instance'", "]", "regex", "=", "plugin", "[", "'regex'", "]", "for", "key", "in", "self", ".", "redis_conn", ".", "scan_iter", "(", "match", "=", "regex", ...
Logic to handle each plugin that is active @param plugin: a plugin dict object
[ "Logic", "to", "handle", "each", "plugin", "that", "is", "active" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L154-L179
234,485
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
RedisMonitor._create_lock_object
def _create_lock_object(self, key): ''' Returns a lock object, split for testing ''' return redis_lock.Lock(self.redis_conn, key, expire=self.settings['REDIS_LOCK_EXPIRATION'], auto_renewal=True)
python
def _create_lock_object(self, key): ''' Returns a lock object, split for testing ''' return redis_lock.Lock(self.redis_conn, key, expire=self.settings['REDIS_LOCK_EXPIRATION'], auto_renewal=True)
[ "def", "_create_lock_object", "(", "self", ",", "key", ")", ":", "return", "redis_lock", ".", "Lock", "(", "self", ".", "redis_conn", ",", "key", ",", "expire", "=", "self", ".", "settings", "[", "'REDIS_LOCK_EXPIRATION'", "]", ",", "auto_renewal", "=", "T...
Returns a lock object, split for testing
[ "Returns", "a", "lock", "object", "split", "for", "testing" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L181-L187
234,486
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
RedisMonitor._process_failures
def _process_failures(self, key): ''' Handles the retrying of the failed key ''' if self.settings['RETRY_FAILURES']: self.logger.debug("going to retry failure") # get the current failure count failkey = self._get_fail_key(key) current = self.redis_conn.get(failkey) if current is None: current = 0 else: current = int(current) if current < self.settings['RETRY_FAILURES_MAX']: self.logger.debug("Incr fail key") current += 1 self.redis_conn.set(failkey, current) else: self.logger.error("Could not process action within" " failure limit") self.redis_conn.delete(failkey) self.redis_conn.delete(key)
python
def _process_failures(self, key): ''' Handles the retrying of the failed key ''' if self.settings['RETRY_FAILURES']: self.logger.debug("going to retry failure") # get the current failure count failkey = self._get_fail_key(key) current = self.redis_conn.get(failkey) if current is None: current = 0 else: current = int(current) if current < self.settings['RETRY_FAILURES_MAX']: self.logger.debug("Incr fail key") current += 1 self.redis_conn.set(failkey, current) else: self.logger.error("Could not process action within" " failure limit") self.redis_conn.delete(failkey) self.redis_conn.delete(key)
[ "def", "_process_failures", "(", "self", ",", "key", ")", ":", "if", "self", ".", "settings", "[", "'RETRY_FAILURES'", "]", ":", "self", ".", "logger", ".", "debug", "(", "\"going to retry failure\"", ")", "# get the current failure count", "failkey", "=", "self...
Handles the retrying of the failed key
[ "Handles", "the", "retrying", "of", "the", "failed", "key" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L195-L216
234,487
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
RedisMonitor._setup_stats
def _setup_stats(self): ''' Sets up the stats ''' # stats setup self.stats_dict = {} if self.settings['STATS_TOTAL']: self._setup_stats_total() if self.settings['STATS_PLUGINS']: self._setup_stats_plugins()
python
def _setup_stats(self): ''' Sets up the stats ''' # stats setup self.stats_dict = {} if self.settings['STATS_TOTAL']: self._setup_stats_total() if self.settings['STATS_PLUGINS']: self._setup_stats_plugins()
[ "def", "_setup_stats", "(", "self", ")", ":", "# stats setup", "self", ".", "stats_dict", "=", "{", "}", "if", "self", ".", "settings", "[", "'STATS_TOTAL'", "]", ":", "self", ".", "_setup_stats_total", "(", ")", "if", "self", ".", "settings", "[", "'STA...
Sets up the stats
[ "Sets", "up", "the", "stats" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L239-L250
234,488
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
RedisMonitor._setup_stats_plugins
def _setup_stats_plugins(self): ''' Sets up the plugin stats collectors ''' self.stats_dict['plugins'] = {} for key in self.plugins_dict: plugin_name = self.plugins_dict[key]['instance'].__class__.__name__ temp_key = 'stats:redis-monitor:{p}'.format(p=plugin_name) self.stats_dict['plugins'][plugin_name] = {} for item in self.settings['STATS_TIMES']: try: time = getattr(StatsCollector, item) self.stats_dict['plugins'][plugin_name][time] = StatsCollector \ .get_rolling_time_window( redis_conn=self.redis_conn, key='{k}:{t}'.format(k=temp_key, t=time), window=time, cycle_time=self.settings['STATS_CYCLE']) self.logger.debug("Set up {p} plugin Stats Collector '{i}'"\ .format(p=plugin_name, i=item)) except AttributeError as e: self.logger.warning("Unable to find Stats Time '{s}'"\ .format(s=item)) total = StatsCollector.get_hll_counter(redis_conn=self.redis_conn, key='{k}:lifetime'.format(k=temp_key), cycle_time=self.settings['STATS_CYCLE'], roll=False) self.logger.debug("Set up {p} plugin Stats Collector 'lifetime'"\ .format(p=plugin_name)) self.stats_dict['plugins'][plugin_name]['lifetime'] = total
python
def _setup_stats_plugins(self): ''' Sets up the plugin stats collectors ''' self.stats_dict['plugins'] = {} for key in self.plugins_dict: plugin_name = self.plugins_dict[key]['instance'].__class__.__name__ temp_key = 'stats:redis-monitor:{p}'.format(p=plugin_name) self.stats_dict['plugins'][plugin_name] = {} for item in self.settings['STATS_TIMES']: try: time = getattr(StatsCollector, item) self.stats_dict['plugins'][plugin_name][time] = StatsCollector \ .get_rolling_time_window( redis_conn=self.redis_conn, key='{k}:{t}'.format(k=temp_key, t=time), window=time, cycle_time=self.settings['STATS_CYCLE']) self.logger.debug("Set up {p} plugin Stats Collector '{i}'"\ .format(p=plugin_name, i=item)) except AttributeError as e: self.logger.warning("Unable to find Stats Time '{s}'"\ .format(s=item)) total = StatsCollector.get_hll_counter(redis_conn=self.redis_conn, key='{k}:lifetime'.format(k=temp_key), cycle_time=self.settings['STATS_CYCLE'], roll=False) self.logger.debug("Set up {p} plugin Stats Collector 'lifetime'"\ .format(p=plugin_name)) self.stats_dict['plugins'][plugin_name]['lifetime'] = total
[ "def", "_setup_stats_plugins", "(", "self", ")", ":", "self", ".", "stats_dict", "[", "'plugins'", "]", "=", "{", "}", "for", "key", "in", "self", ".", "plugins_dict", ":", "plugin_name", "=", "self", ".", "plugins_dict", "[", "key", "]", "[", "'instance...
Sets up the plugin stats collectors
[ "Sets", "up", "the", "plugin", "stats", "collectors" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L292-L322
234,489
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
RedisMonitor._dump_crawl_stats
def _dump_crawl_stats(self): ''' Dumps flattened crawling stats so the spiders do not have to ''' extras = {} spiders = {} spider_set = set() total_spider_count = 0 keys = self.redis_conn.keys('stats:crawler:*:*:*') for key in keys: # we only care about the spider elements = key.split(":") spider = elements[3] if spider not in spiders: spiders[spider] = 0 if len(elements) == 6: # got a time based stat response = elements[4] end = elements[5] final = '{s}_{r}_{e}'.format(s=spider, r=response, e=end) if end == 'lifetime': value = self.redis_conn.execute_command("PFCOUNT", key) else: value = self.redis_conn.zcard(key) extras[final] = value elif len(elements) == 5: # got a spider identifier spiders[spider] += 1 total_spider_count += 1 spider_set.add(spider) else: self.logger.warn("Unknown crawler stat key", {"key":key}) # simple counts extras['unique_spider_count'] = len(spider_set) extras['total_spider_count'] = total_spider_count for spider in spiders: extras['{k}_spider_count'.format(k=spider)] = spiders[spider] if not self.logger.json: self.logger.info('Crawler Stats Dump:\n{0}'.format( json.dumps(extras, indent=4, sort_keys=True))) else: self.logger.info('Crawler Stats Dump', extra=extras)
python
def _dump_crawl_stats(self): ''' Dumps flattened crawling stats so the spiders do not have to ''' extras = {} spiders = {} spider_set = set() total_spider_count = 0 keys = self.redis_conn.keys('stats:crawler:*:*:*') for key in keys: # we only care about the spider elements = key.split(":") spider = elements[3] if spider not in spiders: spiders[spider] = 0 if len(elements) == 6: # got a time based stat response = elements[4] end = elements[5] final = '{s}_{r}_{e}'.format(s=spider, r=response, e=end) if end == 'lifetime': value = self.redis_conn.execute_command("PFCOUNT", key) else: value = self.redis_conn.zcard(key) extras[final] = value elif len(elements) == 5: # got a spider identifier spiders[spider] += 1 total_spider_count += 1 spider_set.add(spider) else: self.logger.warn("Unknown crawler stat key", {"key":key}) # simple counts extras['unique_spider_count'] = len(spider_set) extras['total_spider_count'] = total_spider_count for spider in spiders: extras['{k}_spider_count'.format(k=spider)] = spiders[spider] if not self.logger.json: self.logger.info('Crawler Stats Dump:\n{0}'.format( json.dumps(extras, indent=4, sort_keys=True))) else: self.logger.info('Crawler Stats Dump', extra=extras)
[ "def", "_dump_crawl_stats", "(", "self", ")", ":", "extras", "=", "{", "}", "spiders", "=", "{", "}", "spider_set", "=", "set", "(", ")", "total_spider_count", "=", "0", "keys", "=", "self", ".", "redis_conn", ".", "keys", "(", "'stats:crawler:*:*:*'", "...
Dumps flattened crawling stats so the spiders do not have to
[ "Dumps", "flattened", "crawling", "stats", "so", "the", "spiders", "do", "not", "have", "to" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L398-L451
234,490
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
RedisMonitor._dump_queue_stats
def _dump_queue_stats(self): ''' Dumps basic info about the queue lengths for the spider types ''' extras = {} keys = self.redis_conn.keys('*:*:queue') total_backlog = 0 for key in keys: elements = key.split(":") spider = elements[0] domain = elements[1] spider = 'queue_' + spider if spider not in extras: extras[spider] = {} extras[spider]['spider_backlog'] = 0 extras[spider]['num_domains'] = 0 count = self.redis_conn.zcard(key) total_backlog += count extras[spider]['spider_backlog'] += count extras[spider]['num_domains'] += 1 extras['total_backlog'] = total_backlog if not self.logger.json: self.logger.info('Queue Stats Dump:\n{0}'.format( json.dumps(extras, indent=4, sort_keys=True))) else: self.logger.info('Queue Stats Dump', extra=extras)
python
def _dump_queue_stats(self): ''' Dumps basic info about the queue lengths for the spider types ''' extras = {} keys = self.redis_conn.keys('*:*:queue') total_backlog = 0 for key in keys: elements = key.split(":") spider = elements[0] domain = elements[1] spider = 'queue_' + spider if spider not in extras: extras[spider] = {} extras[spider]['spider_backlog'] = 0 extras[spider]['num_domains'] = 0 count = self.redis_conn.zcard(key) total_backlog += count extras[spider]['spider_backlog'] += count extras[spider]['num_domains'] += 1 extras['total_backlog'] = total_backlog if not self.logger.json: self.logger.info('Queue Stats Dump:\n{0}'.format( json.dumps(extras, indent=4, sort_keys=True))) else: self.logger.info('Queue Stats Dump', extra=extras)
[ "def", "_dump_queue_stats", "(", "self", ")", ":", "extras", "=", "{", "}", "keys", "=", "self", ".", "redis_conn", ".", "keys", "(", "'*:*:queue'", ")", "total_backlog", "=", "0", "for", "key", "in", "keys", ":", "elements", "=", "key", ".", "split", ...
Dumps basic info about the queue lengths for the spider types
[ "Dumps", "basic", "info", "about", "the", "queue", "lengths", "for", "the", "spider", "types" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L453-L482
234,491
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
RedisMonitor.close
def close(self): ''' Closes the Redis Monitor and plugins ''' for plugin_key in self.plugins_dict: obj = self.plugins_dict[plugin_key] instance = obj['instance'] instance.close()
python
def close(self): ''' Closes the Redis Monitor and plugins ''' for plugin_key in self.plugins_dict: obj = self.plugins_dict[plugin_key] instance = obj['instance'] instance.close()
[ "def", "close", "(", "self", ")", ":", "for", "plugin_key", "in", "self", ".", "plugins_dict", ":", "obj", "=", "self", ".", "plugins_dict", "[", "plugin_key", "]", "instance", "=", "obj", "[", "'instance'", "]", "instance", ".", "close", "(", ")" ]
Closes the Redis Monitor and plugins
[ "Closes", "the", "Redis", "Monitor", "and", "plugins" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L494-L501
234,492
istresearch/scrapy-cluster
redis-monitor/plugins/stats_monitor.py
StatsMonitor.get_all_stats
def get_all_stats(self): ''' Gather all stats objects ''' self.logger.debug("Gathering all stats") the_dict = {} the_dict['kafka-monitor'] = self.get_kafka_monitor_stats() the_dict['redis-monitor'] = self.get_redis_monitor_stats() the_dict['crawler'] = self.get_crawler_stats() the_dict['rest'] = self.get_rest_stats() return the_dict
python
def get_all_stats(self): ''' Gather all stats objects ''' self.logger.debug("Gathering all stats") the_dict = {} the_dict['kafka-monitor'] = self.get_kafka_monitor_stats() the_dict['redis-monitor'] = self.get_redis_monitor_stats() the_dict['crawler'] = self.get_crawler_stats() the_dict['rest'] = self.get_rest_stats() return the_dict
[ "def", "get_all_stats", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Gathering all stats\"", ")", "the_dict", "=", "{", "}", "the_dict", "[", "'kafka-monitor'", "]", "=", "self", ".", "get_kafka_monitor_stats", "(", ")", "the_dict", "[...
Gather all stats objects
[ "Gather", "all", "stats", "objects" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/stats_monitor.py#L69-L80
234,493
istresearch/scrapy-cluster
redis-monitor/plugins/stats_monitor.py
StatsMonitor._get_plugin_stats
def _get_plugin_stats(self, name): ''' Used for getting stats for Plugin based stuff, like Kafka Monitor and Redis Monitor @param name: the main class stats name @return: A formatted dict of stats ''' the_dict = {} keys = self.redis_conn.keys('stats:{n}:*'.format(n=name)) for key in keys: # break down key elements = key.split(":") main = elements[2] end = elements[3] if main == 'total' or main == 'fail': if main not in the_dict: the_dict[main] = {} the_dict[main][end] = self._get_key_value(key, end == 'lifetime') elif main == 'self': if 'nodes' not in the_dict: # main is self, end is machine, true_tail is uuid the_dict['nodes'] = {} true_tail = elements[4] if end not in the_dict['nodes']: the_dict['nodes'][end] = [] the_dict['nodes'][end].append(true_tail) else: if 'plugins' not in the_dict: the_dict['plugins'] = {} if main not in the_dict['plugins']: the_dict['plugins'][main] = {} the_dict['plugins'][main][end] = self._get_key_value(key, end == 'lifetime') return the_dict
python
def _get_plugin_stats(self, name): ''' Used for getting stats for Plugin based stuff, like Kafka Monitor and Redis Monitor @param name: the main class stats name @return: A formatted dict of stats ''' the_dict = {} keys = self.redis_conn.keys('stats:{n}:*'.format(n=name)) for key in keys: # break down key elements = key.split(":") main = elements[2] end = elements[3] if main == 'total' or main == 'fail': if main not in the_dict: the_dict[main] = {} the_dict[main][end] = self._get_key_value(key, end == 'lifetime') elif main == 'self': if 'nodes' not in the_dict: # main is self, end is machine, true_tail is uuid the_dict['nodes'] = {} true_tail = elements[4] if end not in the_dict['nodes']: the_dict['nodes'][end] = [] the_dict['nodes'][end].append(true_tail) else: if 'plugins' not in the_dict: the_dict['plugins'] = {} if main not in the_dict['plugins']: the_dict['plugins'][main] = {} the_dict['plugins'][main][end] = self._get_key_value(key, end == 'lifetime') return the_dict
[ "def", "_get_plugin_stats", "(", "self", ",", "name", ")", ":", "the_dict", "=", "{", "}", "keys", "=", "self", ".", "redis_conn", ".", "keys", "(", "'stats:{n}:*'", ".", "format", "(", "n", "=", "name", ")", ")", "for", "key", "in", "keys", ":", "...
Used for getting stats for Plugin based stuff, like Kafka Monitor and Redis Monitor @param name: the main class stats name @return: A formatted dict of stats
[ "Used", "for", "getting", "stats", "for", "Plugin", "based", "stuff", "like", "Kafka", "Monitor", "and", "Redis", "Monitor" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/stats_monitor.py#L109-L146
234,494
istresearch/scrapy-cluster
redis-monitor/plugins/stats_monitor.py
StatsMonitor._get_key_value
def _get_key_value(self, key, is_hll=False): ''' Returns the proper key value for the stats @param key: the redis key @param is_hll: the key is a HyperLogLog, else is a sorted set ''' if is_hll: # get hll value return self.redis_conn.execute_command("PFCOUNT", key) else: # get zcard value return self.redis_conn.zcard(key)
python
def _get_key_value(self, key, is_hll=False): ''' Returns the proper key value for the stats @param key: the redis key @param is_hll: the key is a HyperLogLog, else is a sorted set ''' if is_hll: # get hll value return self.redis_conn.execute_command("PFCOUNT", key) else: # get zcard value return self.redis_conn.zcard(key)
[ "def", "_get_key_value", "(", "self", ",", "key", ",", "is_hll", "=", "False", ")", ":", "if", "is_hll", ":", "# get hll value", "return", "self", ".", "redis_conn", ".", "execute_command", "(", "\"PFCOUNT\"", ",", "key", ")", "else", ":", "# get zcard value...
Returns the proper key value for the stats @param key: the redis key @param is_hll: the key is a HyperLogLog, else is a sorted set
[ "Returns", "the", "proper", "key", "value", "for", "the", "stats" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/stats_monitor.py#L148-L160
234,495
istresearch/scrapy-cluster
redis-monitor/plugins/stats_monitor.py
StatsMonitor.get_crawler_stats
def get_crawler_stats(self): ''' Gather crawler stats @return: A dict of stats ''' self.logger.debug("Gathering crawler stats") the_dict = {} the_dict['spiders'] = self.get_spider_stats()['spiders'] the_dict['machines'] = self.get_machine_stats()['machines'] the_dict['queue'] = self.get_queue_stats()['queues'] return the_dict
python
def get_crawler_stats(self): ''' Gather crawler stats @return: A dict of stats ''' self.logger.debug("Gathering crawler stats") the_dict = {} the_dict['spiders'] = self.get_spider_stats()['spiders'] the_dict['machines'] = self.get_machine_stats()['machines'] the_dict['queue'] = self.get_queue_stats()['queues'] return the_dict
[ "def", "get_crawler_stats", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Gathering crawler stats\"", ")", "the_dict", "=", "{", "}", "the_dict", "[", "'spiders'", "]", "=", "self", ".", "get_spider_stats", "(", ")", "[", "'spiders'", ...
Gather crawler stats @return: A dict of stats
[ "Gather", "crawler", "stats" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/stats_monitor.py#L246-L259
234,496
istresearch/scrapy-cluster
redis-monitor/plugins/stats_monitor.py
StatsMonitor.get_queue_stats
def get_queue_stats(self): ''' Gather queue stats @return: A dict of stats ''' self.logger.debug("Gathering queue based stats") the_dict = {} keys = self.redis_conn.keys('*:*:queue') total_backlog = 0 for key in keys: elements = key.split(":") spider = elements[0] domain = elements[1] spider = 'queue_' + spider if spider not in the_dict: the_dict[spider] = { 'spider_backlog': 0, 'num_domains': 0, 'domains': [] } count = self.redis_conn.zcard(key) total_backlog += count the_dict[spider]['spider_backlog'] += count the_dict[spider]['num_domains'] += 1 the_dict[spider]['domains'].append({'domain': domain, 'backlog': count}) the_dict['total_backlog'] = total_backlog ret_dict = { 'queues': the_dict } return ret_dict
python
def get_queue_stats(self): ''' Gather queue stats @return: A dict of stats ''' self.logger.debug("Gathering queue based stats") the_dict = {} keys = self.redis_conn.keys('*:*:queue') total_backlog = 0 for key in keys: elements = key.split(":") spider = elements[0] domain = elements[1] spider = 'queue_' + spider if spider not in the_dict: the_dict[spider] = { 'spider_backlog': 0, 'num_domains': 0, 'domains': [] } count = self.redis_conn.zcard(key) total_backlog += count the_dict[spider]['spider_backlog'] += count the_dict[spider]['num_domains'] += 1 the_dict[spider]['domains'].append({'domain': domain, 'backlog': count}) the_dict['total_backlog'] = total_backlog ret_dict = { 'queues': the_dict } return ret_dict
[ "def", "get_queue_stats", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Gathering queue based stats\"", ")", "the_dict", "=", "{", "}", "keys", "=", "self", ".", "redis_conn", ".", "keys", "(", "'*:*:queue'", ")", "total_backlog", "=", ...
Gather queue stats @return: A dict of stats
[ "Gather", "queue", "stats" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/stats_monitor.py#L261-L297
234,497
istresearch/scrapy-cluster
crawler/config/file_pusher.py
main
def main(): ''' A manual configuration file pusher for the crawlers. This will update Zookeeper with the contents of the file specified in the args. ''' import argparse from kazoo.client import KazooClient parser = argparse.ArgumentParser( description="Crawler config file pusher to Zookeeper") parser.add_argument('-f', '--file', action='store', required=True, help="The yaml file to use") parser.add_argument('-i', '--id', action='store', default="all", help="The crawler id to use in zookeeper") parser.add_argument('-p', '--path', action='store', default="/scrapy-cluster/crawler/", help="The zookeeper path to use") parser.add_argument('-w', '--wipe', action='store_const', const=True, help="Remove the current config") parser.add_argument('-z', '--zoo-keeper', action='store', required=True, help="The Zookeeper connection <host>:<port>") args = vars(parser.parse_args()) filename = args['file'] id = args['id'] wipe = args['wipe'] zoo = args['zoo_keeper'] path = args['path'] zk = KazooClient(hosts=zoo) zk.start() # ensure path exists zk.ensure_path(path) bytes = open(filename, 'rb').read() if zk.exists(path): # push the conf file if not zk.exists(path + id) and not wipe: print("creaing conf node") zk.create(path + id, bytes) elif not wipe: print("updating conf file") zk.set(path + id, bytes) if wipe: zk.set(path + id, None) zk.stop()
python
def main(): ''' A manual configuration file pusher for the crawlers. This will update Zookeeper with the contents of the file specified in the args. ''' import argparse from kazoo.client import KazooClient parser = argparse.ArgumentParser( description="Crawler config file pusher to Zookeeper") parser.add_argument('-f', '--file', action='store', required=True, help="The yaml file to use") parser.add_argument('-i', '--id', action='store', default="all", help="The crawler id to use in zookeeper") parser.add_argument('-p', '--path', action='store', default="/scrapy-cluster/crawler/", help="The zookeeper path to use") parser.add_argument('-w', '--wipe', action='store_const', const=True, help="Remove the current config") parser.add_argument('-z', '--zoo-keeper', action='store', required=True, help="The Zookeeper connection <host>:<port>") args = vars(parser.parse_args()) filename = args['file'] id = args['id'] wipe = args['wipe'] zoo = args['zoo_keeper'] path = args['path'] zk = KazooClient(hosts=zoo) zk.start() # ensure path exists zk.ensure_path(path) bytes = open(filename, 'rb').read() if zk.exists(path): # push the conf file if not zk.exists(path + id) and not wipe: print("creaing conf node") zk.create(path + id, bytes) elif not wipe: print("updating conf file") zk.set(path + id, bytes) if wipe: zk.set(path + id, None) zk.stop()
[ "def", "main", "(", ")", ":", "import", "argparse", "from", "kazoo", ".", "client", "import", "KazooClient", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Crawler config file pusher to Zookeeper\"", ")", "parser", ".", "add_argument",...
A manual configuration file pusher for the crawlers. This will update Zookeeper with the contents of the file specified in the args.
[ "A", "manual", "configuration", "file", "pusher", "for", "the", "crawlers", ".", "This", "will", "update", "Zookeeper", "with", "the", "contents", "of", "the", "file", "specified", "in", "the", "args", "." ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/config/file_pusher.py#L5-L54
234,498
istresearch/scrapy-cluster
utils/scutils/log_factory.py
LogCallbackMixin.is_subdict
def is_subdict(self, a,b): ''' Return True if a is a subdict of b ''' return all((k in b and b[k]==v) for k,v in a.iteritems())
python
def is_subdict(self, a,b): ''' Return True if a is a subdict of b ''' return all((k in b and b[k]==v) for k,v in a.iteritems())
[ "def", "is_subdict", "(", "self", ",", "a", ",", "b", ")", ":", "return", "all", "(", "(", "k", "in", "b", "and", "b", "[", "k", "]", "==", "v", ")", "for", "k", ",", "v", "in", "a", ".", "iteritems", "(", ")", ")" ]
Return True if a is a subdict of b
[ "Return", "True", "if", "a", "is", "a", "subdict", "of", "b" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/log_factory.py#L62-L66
234,499
istresearch/scrapy-cluster
utils/scutils/log_factory.py
LogObject._check_log_level
def _check_log_level(self, level): ''' Ensures a valid log level @param level: the asked for level ''' if level not in list(self.level_dict.keys()): self.log_level = 'DEBUG' self.logger.warn("Unknown log level '{lev}', defaulting to DEBUG" .format(lev=level))
python
def _check_log_level(self, level): ''' Ensures a valid log level @param level: the asked for level ''' if level not in list(self.level_dict.keys()): self.log_level = 'DEBUG' self.logger.warn("Unknown log level '{lev}', defaulting to DEBUG" .format(lev=level))
[ "def", "_check_log_level", "(", "self", ",", "level", ")", ":", "if", "level", "not", "in", "list", "(", "self", ".", "level_dict", ".", "keys", "(", ")", ")", ":", "self", ".", "log_level", "=", "'DEBUG'", "self", ".", "logger", ".", "warn", "(", ...
Ensures a valid log level @param level: the asked for level
[ "Ensures", "a", "valid", "log", "level" ]
13aaed2349af5d792d6bcbfcadc5563158aeb599
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/log_factory.py#L169-L178