repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
rapidpro/dash
dash/orgs/context_processors.py
set_org_processor
def set_org_processor(request): """ Simple context processor that automatically sets 'org' on the context if it is present in the request. """ if getattr(request, "org", None): org = request.org pattern_bg = org.backgrounds.filter(is_active=True, background_type="P") pattern_bg = pattern_bg.order_by("-pk").first() banner_bg = org.backgrounds.filter(is_active=True, background_type="B") banner_bg = banner_bg.order_by("-pk").first() return dict(org=org, pattern_bg=pattern_bg, banner_bg=banner_bg) else: return dict()
python
def set_org_processor(request): """ Simple context processor that automatically sets 'org' on the context if it is present in the request. """ if getattr(request, "org", None): org = request.org pattern_bg = org.backgrounds.filter(is_active=True, background_type="P") pattern_bg = pattern_bg.order_by("-pk").first() banner_bg = org.backgrounds.filter(is_active=True, background_type="B") banner_bg = banner_bg.order_by("-pk").first() return dict(org=org, pattern_bg=pattern_bg, banner_bg=banner_bg) else: return dict()
[ "def", "set_org_processor", "(", "request", ")", ":", "if", "getattr", "(", "request", ",", "\"org\"", ",", "None", ")", ":", "org", "=", "request", ".", "org", "pattern_bg", "=", "org", ".", "backgrounds", ".", "filter", "(", "is_active", "=", "True", ...
Simple context processor that automatically sets 'org' on the context if it is present in the request.
[ "Simple", "context", "processor", "that", "automatically", "sets", "org", "on", "the", "context", "if", "it", "is", "present", "in", "the", "request", "." ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/orgs/context_processors.py#L68-L82
fake-name/WebRequest
WebRequest/Captcha/TwoCaptchaSolver.py
TwoCaptchaSolver._getresult
def _getresult(self, captcha_id, timeout=None): """ Poll until a captcha `captcha_id` has been solved, or the poll times out. The timeout is the default 60 seconds, unless overridden by `timeout` (which is in seconds). Polling is done every 8 seconds. """ timeout = timeout if not timeout: timeout = self.waittime poll_interval = 8 start = time.time() for x in range(int(timeout / poll_interval)+1): self.log.info("Sleeping %s seconds (poll %s of %s, elapsed %0.2fs of %0.2f).", poll_interval, x, int(timeout / poll_interval)+1, (time.time() - start), timeout, ) time.sleep(poll_interval) try: resp = self.doGet('result', { 'action' : 'get', 'key' : self.api_key, 'json' : True, 'id' : captcha_id, } ) self.log.info("Call returned success!") return resp except exc.CaptchaNotReady: self.log.info("Captcha not ready. Waiting longer.") raise exc.CaptchaSolverFailure("Solving captcha timed out after %s seconds!" % (time.time() - start, ))
python
def _getresult(self, captcha_id, timeout=None): """ Poll until a captcha `captcha_id` has been solved, or the poll times out. The timeout is the default 60 seconds, unless overridden by `timeout` (which is in seconds). Polling is done every 8 seconds. """ timeout = timeout if not timeout: timeout = self.waittime poll_interval = 8 start = time.time() for x in range(int(timeout / poll_interval)+1): self.log.info("Sleeping %s seconds (poll %s of %s, elapsed %0.2fs of %0.2f).", poll_interval, x, int(timeout / poll_interval)+1, (time.time() - start), timeout, ) time.sleep(poll_interval) try: resp = self.doGet('result', { 'action' : 'get', 'key' : self.api_key, 'json' : True, 'id' : captcha_id, } ) self.log.info("Call returned success!") return resp except exc.CaptchaNotReady: self.log.info("Captcha not ready. Waiting longer.") raise exc.CaptchaSolverFailure("Solving captcha timed out after %s seconds!" % (time.time() - start, ))
[ "def", "_getresult", "(", "self", ",", "captcha_id", ",", "timeout", "=", "None", ")", ":", "timeout", "=", "timeout", "if", "not", "timeout", ":", "timeout", "=", "self", ".", "waittime", "poll_interval", "=", "8", "start", "=", "time", ".", "time", "...
Poll until a captcha `captcha_id` has been solved, or the poll times out. The timeout is the default 60 seconds, unless overridden by `timeout` (which is in seconds). Polling is done every 8 seconds.
[ "Poll", "until", "a", "captcha", "captcha_id", "has", "been", "solved", "or", "the", "poll", "times", "out", ".", "The", "timeout", "is", "the", "default", "60", "seconds", "unless", "overridden", "by", "timeout", "(", "which", "is", "in", "seconds", ")", ...
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/TwoCaptchaSolver.py#L140-L182
fake-name/WebRequest
WebRequest/Captcha/TwoCaptchaSolver.py
TwoCaptchaSolver._submit
def _submit(self, pathfile, filedata, filename): ''' Submit either a file from disk, or a in-memory file to the solver service, and return the request ID associated with the new captcha task. ''' if pathfile and os.path.exists(pathfile): files = {'file': open(pathfile, 'rb')} elif filedata: assert filename files = {'file' : (filename, io.BytesIO(filedata))} else: raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!") payload = { 'key' : self.api_key, 'method' : 'post', 'json' : True, } self.log.info("Uploading to 2Captcha.com.") url = self.getUrlFor('input', {}) request = requests.post(url, files=files, data=payload) if not request.ok: raise exc.CaptchaSolverFailure("Posting captcha to solve failed!") resp_json = json.loads(request.text) return self._process_response(resp_json)
python
def _submit(self, pathfile, filedata, filename): ''' Submit either a file from disk, or a in-memory file to the solver service, and return the request ID associated with the new captcha task. ''' if pathfile and os.path.exists(pathfile): files = {'file': open(pathfile, 'rb')} elif filedata: assert filename files = {'file' : (filename, io.BytesIO(filedata))} else: raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!") payload = { 'key' : self.api_key, 'method' : 'post', 'json' : True, } self.log.info("Uploading to 2Captcha.com.") url = self.getUrlFor('input', {}) request = requests.post(url, files=files, data=payload) if not request.ok: raise exc.CaptchaSolverFailure("Posting captcha to solve failed!") resp_json = json.loads(request.text) return self._process_response(resp_json)
[ "def", "_submit", "(", "self", ",", "pathfile", ",", "filedata", ",", "filename", ")", ":", "if", "pathfile", "and", "os", ".", "path", ".", "exists", "(", "pathfile", ")", ":", "files", "=", "{", "'file'", ":", "open", "(", "pathfile", ",", "'rb'", ...
Submit either a file from disk, or a in-memory file to the solver service, and return the request ID associated with the new captcha task.
[ "Submit", "either", "a", "file", "from", "disk", "or", "a", "in", "-", "memory", "file", "to", "the", "solver", "service", "and", "return", "the", "request", "ID", "associated", "with", "the", "new", "captcha", "task", "." ]
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/TwoCaptchaSolver.py#L184-L213
fake-name/WebRequest
WebRequest/Captcha/TwoCaptchaSolver.py
TwoCaptchaSolver.solve_simple_captcha
def solve_simple_captcha(self, pathfile=None, filedata=None, filename=None): """ Upload a image (from disk or a bytearray), and then block until the captcha has been solved. Return value is the captcha result. either pathfile OR filedata AND filename should be specified. Failure will result in a subclass of WebRequest.CaptchaSolverFailure being thrown. """ captcha_id = self._submit(pathfile, filedata, filename) return self._getresult(captcha_id=captcha_id)
python
def solve_simple_captcha(self, pathfile=None, filedata=None, filename=None): """ Upload a image (from disk or a bytearray), and then block until the captcha has been solved. Return value is the captcha result. either pathfile OR filedata AND filename should be specified. Failure will result in a subclass of WebRequest.CaptchaSolverFailure being thrown. """ captcha_id = self._submit(pathfile, filedata, filename) return self._getresult(captcha_id=captcha_id)
[ "def", "solve_simple_captcha", "(", "self", ",", "pathfile", "=", "None", ",", "filedata", "=", "None", ",", "filename", "=", "None", ")", ":", "captcha_id", "=", "self", ".", "_submit", "(", "pathfile", ",", "filedata", ",", "filename", ")", "return", "...
Upload a image (from disk or a bytearray), and then block until the captcha has been solved. Return value is the captcha result. either pathfile OR filedata AND filename should be specified. Failure will result in a subclass of WebRequest.CaptchaSolverFailure being thrown.
[ "Upload", "a", "image", "(", "from", "disk", "or", "a", "bytearray", ")", "and", "then", "block", "until", "the", "captcha", "has", "been", "solved", ".", "Return", "value", "is", "the", "captcha", "result", "." ]
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/TwoCaptchaSolver.py#L216-L229
fake-name/WebRequest
WebRequest/Captcha/TwoCaptchaSolver.py
TwoCaptchaSolver.solve_recaptcha
def solve_recaptcha(self, google_key, page_url, timeout = 15 * 60): ''' Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`) ''' proxy = SocksProxy.ProxyLauncher([TWOCAPTCHA_IP]) try: captcha_id = self.doGet('input', { 'key' : self.api_key, 'method' : "userrecaptcha", 'googlekey' : google_key, 'pageurl' : page_url, 'proxy' : proxy.get_wan_address(), 'proxytype' : "SOCKS5", 'json' : True, } ) # Allow 15 minutes for the solution # I've been seeing times up to 160+ seconds in testing. return self._getresult(captcha_id=captcha_id, timeout=timeout) finally: proxy.stop()
python
def solve_recaptcha(self, google_key, page_url, timeout = 15 * 60): ''' Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`) ''' proxy = SocksProxy.ProxyLauncher([TWOCAPTCHA_IP]) try: captcha_id = self.doGet('input', { 'key' : self.api_key, 'method' : "userrecaptcha", 'googlekey' : google_key, 'pageurl' : page_url, 'proxy' : proxy.get_wan_address(), 'proxytype' : "SOCKS5", 'json' : True, } ) # Allow 15 minutes for the solution # I've been seeing times up to 160+ seconds in testing. return self._getresult(captcha_id=captcha_id, timeout=timeout) finally: proxy.stop()
[ "def", "solve_recaptcha", "(", "self", ",", "google_key", ",", "page_url", ",", "timeout", "=", "15", "*", "60", ")", ":", "proxy", "=", "SocksProxy", ".", "ProxyLauncher", "(", "[", "TWOCAPTCHA_IP", "]", ")", "try", ":", "captcha_id", "=", "self", ".", ...
Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`)
[ "Solve", "a", "recaptcha", "on", "page", "page_url", "with", "the", "input", "value", "google_key", ".", "Timeout", "is", "timeout", "seconds", "defaulting", "to", "60", "seconds", "." ]
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/TwoCaptchaSolver.py#L231-L260
jjjake/iamine
iamine/api.py
search
def search(query=None, params=None, callback=None, mine_ids=None, info_only=None, **kwargs): """Mine Archive.org search results. :param query: (optional) The Archive.org search query to yield results for. Refer to https://archive.org/advancedsearch.php#raw for help formatting your query. If no query is given, all indexed items will be mined! :type query: str :param params: (optional) The URL parameters to send with each request sent to the Archive.org Advancedsearch Api. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param mine_ids: (optional) By default, ``search`` mines through search results. To mine through the item metadata for each item returned by your query instead, set ``mine_ids`` to ``True``. :type mine_ids: bool :param info_only: (optional) Set to ``True`` to return information about your query rather than mining any metadata or search results. :type info_only: bool :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes. """ query = '(*:*)' if not query else query params = params if params else {} mine_ids = True if mine_ids else False info_only = True if info_only else False miner = SearchMiner(**kwargs) if info_only: params = miner.get_search_params(query, params) r = miner.get_search_info(params) search_info = r.get('responseHeader') search_info['numFound'] = r.get('response', {}).get('numFound', 0) return search_info try: miner.loop.add_signal_handler(signal.SIGINT, miner.close) miner.loop.run_until_complete( miner.search(query, params=params, callback=callback, mine_ids=mine_ids)) except RuntimeError: pass
python
def search(query=None, params=None, callback=None, mine_ids=None, info_only=None, **kwargs): """Mine Archive.org search results. :param query: (optional) The Archive.org search query to yield results for. Refer to https://archive.org/advancedsearch.php#raw for help formatting your query. If no query is given, all indexed items will be mined! :type query: str :param params: (optional) The URL parameters to send with each request sent to the Archive.org Advancedsearch Api. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param mine_ids: (optional) By default, ``search`` mines through search results. To mine through the item metadata for each item returned by your query instead, set ``mine_ids`` to ``True``. :type mine_ids: bool :param info_only: (optional) Set to ``True`` to return information about your query rather than mining any metadata or search results. :type info_only: bool :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes. """ query = '(*:*)' if not query else query params = params if params else {} mine_ids = True if mine_ids else False info_only = True if info_only else False miner = SearchMiner(**kwargs) if info_only: params = miner.get_search_params(query, params) r = miner.get_search_info(params) search_info = r.get('responseHeader') search_info['numFound'] = r.get('response', {}).get('numFound', 0) return search_info try: miner.loop.add_signal_handler(signal.SIGINT, miner.close) miner.loop.run_until_complete( miner.search(query, params=params, callback=callback, mine_ids=mine_ids)) except RuntimeError: pass
[ "def", "search", "(", "query", "=", "None", ",", "params", "=", "None", ",", "callback", "=", "None", ",", "mine_ids", "=", "None", ",", "info_only", "=", "None", ",", "*", "*", "kwargs", ")", ":", "query", "=", "'(*:*)'", "if", "not", "query", "el...
Mine Archive.org search results. :param query: (optional) The Archive.org search query to yield results for. Refer to https://archive.org/advancedsearch.php#raw for help formatting your query. If no query is given, all indexed items will be mined! :type query: str :param params: (optional) The URL parameters to send with each request sent to the Archive.org Advancedsearch Api. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param mine_ids: (optional) By default, ``search`` mines through search results. To mine through the item metadata for each item returned by your query instead, set ``mine_ids`` to ``True``. :type mine_ids: bool :param info_only: (optional) Set to ``True`` to return information about your query rather than mining any metadata or search results. :type info_only: bool :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes.
[ "Mine", "Archive", ".", "org", "search", "results", "." ]
train
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/api.py#L8-L56
jjjake/iamine
iamine/api.py
mine_urls
def mine_urls(urls, params=None, callback=None, **kwargs): """Concurrently retrieve URLs. :param urls: A set of URLs to concurrently retrieve. :type urls: iterable :param params: (optional) The URL parameters to send with each request. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes. """ miner = Miner(**kwargs) try: miner.loop.add_signal_handler(signal.SIGINT, miner.close) miner.loop.run_until_complete(miner.mine_urls(urls, params, callback)) except RuntimeError: pass
python
def mine_urls(urls, params=None, callback=None, **kwargs): """Concurrently retrieve URLs. :param urls: A set of URLs to concurrently retrieve. :type urls: iterable :param params: (optional) The URL parameters to send with each request. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes. """ miner = Miner(**kwargs) try: miner.loop.add_signal_handler(signal.SIGINT, miner.close) miner.loop.run_until_complete(miner.mine_urls(urls, params, callback)) except RuntimeError: pass
[ "def", "mine_urls", "(", "urls", ",", "params", "=", "None", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "miner", "=", "Miner", "(", "*", "*", "kwargs", ")", "try", ":", "miner", ".", "loop", ".", "add_signal_handler", "(", "si...
Concurrently retrieve URLs. :param urls: A set of URLs to concurrently retrieve. :type urls: iterable :param params: (optional) The URL parameters to send with each request. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes.
[ "Concurrently", "retrieve", "URLs", "." ]
train
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/api.py#L59-L79
jjjake/iamine
iamine/api.py
mine_items
def mine_items(identifiers, params=None, callback=None, **kwargs): """Concurrently retrieve metadata from Archive.org items. :param identifiers: A set of Archive.org item identifiers to mine. :type identifiers: iterable :param params: (optional) The URL parameters to send with each request. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes. """ miner = ItemMiner(**kwargs) try: miner.loop.run_until_complete(miner.mine_items(identifiers, params, callback)) except RuntimeError: miner.loop.close()
python
def mine_items(identifiers, params=None, callback=None, **kwargs): """Concurrently retrieve metadata from Archive.org items. :param identifiers: A set of Archive.org item identifiers to mine. :type identifiers: iterable :param params: (optional) The URL parameters to send with each request. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes. """ miner = ItemMiner(**kwargs) try: miner.loop.run_until_complete(miner.mine_items(identifiers, params, callback)) except RuntimeError: miner.loop.close()
[ "def", "mine_items", "(", "identifiers", ",", "params", "=", "None", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "miner", "=", "ItemMiner", "(", "*", "*", "kwargs", ")", "try", ":", "miner", ".", "loop", ".", "run_until_complete", ...
Concurrently retrieve metadata from Archive.org items. :param identifiers: A set of Archive.org item identifiers to mine. :type identifiers: iterable :param params: (optional) The URL parameters to send with each request. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes.
[ "Concurrently", "retrieve", "metadata", "from", "Archive", ".", "org", "items", "." ]
train
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/api.py#L82-L101
jjjake/iamine
iamine/api.py
configure
def configure(username=None, password=None, overwrite=None, config_file=None): """Configure IA Mine with your Archive.org credentials.""" username = input('Email address: ') if not username else username password = getpass('Password: ') if not password else password _config_file = write_config_file(username, password, overwrite, config_file) print('\nConfig saved to: {}'.format(_config_file))
python
def configure(username=None, password=None, overwrite=None, config_file=None): """Configure IA Mine with your Archive.org credentials.""" username = input('Email address: ') if not username else username password = getpass('Password: ') if not password else password _config_file = write_config_file(username, password, overwrite, config_file) print('\nConfig saved to: {}'.format(_config_file))
[ "def", "configure", "(", "username", "=", "None", ",", "password", "=", "None", ",", "overwrite", "=", "None", ",", "config_file", "=", "None", ")", ":", "username", "=", "input", "(", "'Email address: '", ")", "if", "not", "username", "else", "username", ...
Configure IA Mine with your Archive.org credentials.
[ "Configure", "IA", "Mine", "with", "your", "Archive", ".", "org", "credentials", "." ]
train
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/api.py#L104-L109
josuebrunel/myql
myql/contrib/finance/stockscraper/stockretriever.py
StockRetriever.__get_time_range
def __get_time_range(self, startDate, endDate): """Return time range """ today = date.today() start_date = today - timedelta(days=today.weekday(), weeks=1) end_date = start_date + timedelta(days=4) startDate = startDate if startDate else str(start_date) endDate = endDate if endDate else str(end_date) return startDate, endDate
python
def __get_time_range(self, startDate, endDate): """Return time range """ today = date.today() start_date = today - timedelta(days=today.weekday(), weeks=1) end_date = start_date + timedelta(days=4) startDate = startDate if startDate else str(start_date) endDate = endDate if endDate else str(end_date) return startDate, endDate
[ "def", "__get_time_range", "(", "self", ",", "startDate", ",", "endDate", ")", ":", "today", "=", "date", ".", "today", "(", ")", "start_date", "=", "today", "-", "timedelta", "(", "days", "=", "today", ".", "weekday", "(", ")", ",", "weeks", "=", "1...
Return time range
[ "Return", "time", "range" ]
train
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L23-L33
josuebrunel/myql
myql/contrib/finance/stockscraper/stockretriever.py
StockRetriever.get_current_info
def get_current_info(self, symbolList, columns=None): """get_current_info() uses the yahoo.finance.quotes datatable to get all of the stock information presented in the main table on a typical stock page and a bunch of data from the key statistics page. """ response = self.select('yahoo.finance.quotes',columns).where(['symbol','in',symbolList]) return response
python
def get_current_info(self, symbolList, columns=None): """get_current_info() uses the yahoo.finance.quotes datatable to get all of the stock information presented in the main table on a typical stock page and a bunch of data from the key statistics page. """ response = self.select('yahoo.finance.quotes',columns).where(['symbol','in',symbolList]) return response
[ "def", "get_current_info", "(", "self", ",", "symbolList", ",", "columns", "=", "None", ")", ":", "response", "=", "self", ".", "select", "(", "'yahoo.finance.quotes'", ",", "columns", ")", ".", "where", "(", "[", "'symbol'", ",", "'in'", ",", "symbolList"...
get_current_info() uses the yahoo.finance.quotes datatable to get all of the stock information presented in the main table on a typical stock page and a bunch of data from the key statistics page.
[ "get_current_info", "()", "uses", "the", "yahoo", ".", "finance", ".", "quotes", "datatable", "to", "get", "all", "of", "the", "stock", "information", "presented", "in", "the", "main", "table", "on", "a", "typical", "stock", "page", "and", "a", "bunch", "o...
train
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L35-L40
josuebrunel/myql
myql/contrib/finance/stockscraper/stockretriever.py
StockRetriever.get_news_feed
def get_news_feed(self, symbol): """get_news_feed() uses the rss data table to get rss feeds under the Headlines and Financial Blogs headings on a typical stock page. """ rss_url='http://finance.yahoo.com/rss/headline?s={0}'.format(symbol) response = self.select('rss',['title','link','description'],limit=2).where(['url','=',rss_url]) return response
python
def get_news_feed(self, symbol): """get_news_feed() uses the rss data table to get rss feeds under the Headlines and Financial Blogs headings on a typical stock page. """ rss_url='http://finance.yahoo.com/rss/headline?s={0}'.format(symbol) response = self.select('rss',['title','link','description'],limit=2).where(['url','=',rss_url]) return response
[ "def", "get_news_feed", "(", "self", ",", "symbol", ")", ":", "rss_url", "=", "'http://finance.yahoo.com/rss/headline?s={0}'", ".", "format", "(", "symbol", ")", "response", "=", "self", ".", "select", "(", "'rss'", ",", "[", "'title'", ",", "'link'", ",", "...
get_news_feed() uses the rss data table to get rss feeds under the Headlines and Financial Blogs headings on a typical stock page.
[ "get_news_feed", "()", "uses", "the", "rss", "data", "table", "to", "get", "rss", "feeds", "under", "the", "Headlines", "and", "Financial", "Blogs", "headings", "on", "a", "typical", "stock", "page", "." ]
train
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L42-L47
josuebrunel/myql
myql/contrib/finance/stockscraper/stockretriever.py
StockRetriever.get_historical_info
def get_historical_info(self, symbol,items=None, startDate=None, endDate=None, limit=None): """get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page """ startDate, endDate = self.__get_time_range(startDate, endDate) response = self.select('yahoo.finance.historicaldata',items,limit).where(['symbol','=',symbol],['startDate','=',startDate],['endDate','=',endDate]) return response
python
def get_historical_info(self, symbol,items=None, startDate=None, endDate=None, limit=None): """get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page """ startDate, endDate = self.__get_time_range(startDate, endDate) response = self.select('yahoo.finance.historicaldata',items,limit).where(['symbol','=',symbol],['startDate','=',startDate],['endDate','=',endDate]) return response
[ "def", "get_historical_info", "(", "self", ",", "symbol", ",", "items", "=", "None", ",", "startDate", "=", "None", ",", "endDate", "=", "None", ",", "limit", "=", "None", ")", ":", "startDate", ",", "endDate", "=", "self", ".", "__get_time_range", "(", ...
get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page
[ "get_historical_info", "()", "uses", "the", "csv", "datatable", "to", "retrieve", "all", "available", "historical", "data", "on", "a", "typical", "historical", "prices", "page" ]
train
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L49-L54
josuebrunel/myql
myql/contrib/finance/stockscraper/stockretriever.py
StockRetriever.get_options_info
def get_options_info(self, symbol, items=None, expiration=''): """get_options_data() uses the yahoo.finance.options table to retrieve call and put options from the options page. """ response = self.select('yahoo.finance.options',items).where(['symbol','=',symbol],[] if not expiration else ['expiration','=',expiration]) return response
python
def get_options_info(self, symbol, items=None, expiration=''): """get_options_data() uses the yahoo.finance.options table to retrieve call and put options from the options page. """ response = self.select('yahoo.finance.options',items).where(['symbol','=',symbol],[] if not expiration else ['expiration','=',expiration]) return response
[ "def", "get_options_info", "(", "self", ",", "symbol", ",", "items", "=", "None", ",", "expiration", "=", "''", ")", ":", "response", "=", "self", ".", "select", "(", "'yahoo.finance.options'", ",", "items", ")", ".", "where", "(", "[", "'symbol'", ",", ...
get_options_data() uses the yahoo.finance.options table to retrieve call and put options from the options page.
[ "get_options_data", "()", "uses", "the", "yahoo", ".", "finance", ".", "options", "table", "to", "retrieve", "call", "and", "put", "options", "from", "the", "options", "page", "." ]
train
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L56-L60
josuebrunel/myql
myql/contrib/finance/stockscraper/stockretriever.py
StockRetriever.get_industry_index
def get_industry_index(self, index_id,items=None): """retrieves all symbols that belong to an industry. """ response = self.select('yahoo.finance.industry',items).where(['id','=',index_id]) return response
python
def get_industry_index(self, index_id,items=None): """retrieves all symbols that belong to an industry. """ response = self.select('yahoo.finance.industry',items).where(['id','=',index_id]) return response
[ "def", "get_industry_index", "(", "self", ",", "index_id", ",", "items", "=", "None", ")", ":", "response", "=", "self", ".", "select", "(", "'yahoo.finance.industry'", ",", "items", ")", ".", "where", "(", "[", "'id'", ",", "'='", ",", "index_id", "]", ...
retrieves all symbols that belong to an industry.
[ "retrieves", "all", "symbols", "that", "belong", "to", "an", "industry", "." ]
train
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L68-L72
josuebrunel/myql
myql/contrib/finance/stockscraper/stockretriever.py
StockRetriever.get_xchange_rate
def get_xchange_rate(self, pairs, items=None): """Retrieves currency exchange rate data for given pair(s). Accepts both where pair='eurusd, gbpusd' and where pair in ('eurusd', 'gpbusd, usdaud') """ response = self.select('yahoo.finance.xchange', items).where(['pair', 'in', pairs]) return response
python
def get_xchange_rate(self, pairs, items=None): """Retrieves currency exchange rate data for given pair(s). Accepts both where pair='eurusd, gbpusd' and where pair in ('eurusd', 'gpbusd, usdaud') """ response = self.select('yahoo.finance.xchange', items).where(['pair', 'in', pairs]) return response
[ "def", "get_xchange_rate", "(", "self", ",", "pairs", ",", "items", "=", "None", ")", ":", "response", "=", "self", ".", "select", "(", "'yahoo.finance.xchange'", ",", "items", ")", ".", "where", "(", "[", "'pair'", ",", "'in'", ",", "pairs", "]", ")",...
Retrieves currency exchange rate data for given pair(s). Accepts both where pair='eurusd, gbpusd' and where pair in ('eurusd', 'gpbusd, usdaud')
[ "Retrieves", "currency", "exchange", "rate", "data", "for", "given", "pair", "(", "s", ")", ".", "Accepts", "both", "where", "pair", "=", "eurusd", "gbpusd", "and", "where", "pair", "in", "(", "eurusd", "gpbusd", "usdaud", ")" ]
train
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L74-L79
josuebrunel/myql
myql/contrib/finance/stockscraper/stockretriever.py
StockRetriever.get_dividendhistory
def get_dividendhistory(self, symbol, startDate, endDate, items=None): """Retrieves divident history """ startDate, endDate = self.__get_time_range(startDate, endDate) response = self.select('yahoo.finance.dividendhistory', items).where(['symbol', '=', symbol], ['startDate', '=', startDate], ['endDate', '=', endDate]) return response
python
def get_dividendhistory(self, symbol, startDate, endDate, items=None): """Retrieves divident history """ startDate, endDate = self.__get_time_range(startDate, endDate) response = self.select('yahoo.finance.dividendhistory', items).where(['symbol', '=', symbol], ['startDate', '=', startDate], ['endDate', '=', endDate]) return response
[ "def", "get_dividendhistory", "(", "self", ",", "symbol", ",", "startDate", ",", "endDate", ",", "items", "=", "None", ")", ":", "startDate", ",", "endDate", "=", "self", ".", "__get_time_range", "(", "startDate", ",", "endDate", ")", "response", "=", "sel...
Retrieves divident history
[ "Retrieves", "divident", "history" ]
train
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L81-L86
josuebrunel/myql
myql/contrib/finance/stockscraper/stockretriever.py
StockRetriever.get_symbols
def get_symbols(self, name): """Retrieves all symbols belonging to a company """ url = "http://autoc.finance.yahoo.com/autoc?query={0}&callback=YAHOO.Finance.SymbolSuggest.ssCallback".format(name) response = requests.get(url) json_data = re.match("YAHOO\.Finance\.SymbolSuggest.ssCallback\((.*)\)", response.text) try: json_data = json_data.groups()[0] except (Exception,) as e: print(e) json_data = '{"results": "Webservice seems to be down"}' return type('response', (requests.Response,),{ 'text' : json_data, 'content': json_data.encode(), 'status_code': response.status_code, 'reason': response.reason, 'encoding': response.encoding, 'apparent_encoding': response.apparent_encoding, 'cookies': response.cookies, 'headers': response.headers, 'json': lambda : json.loads(json_data), 'url': response.url })
python
def get_symbols(self, name): """Retrieves all symbols belonging to a company """ url = "http://autoc.finance.yahoo.com/autoc?query={0}&callback=YAHOO.Finance.SymbolSuggest.ssCallback".format(name) response = requests.get(url) json_data = re.match("YAHOO\.Finance\.SymbolSuggest.ssCallback\((.*)\)", response.text) try: json_data = json_data.groups()[0] except (Exception,) as e: print(e) json_data = '{"results": "Webservice seems to be down"}' return type('response', (requests.Response,),{ 'text' : json_data, 'content': json_data.encode(), 'status_code': response.status_code, 'reason': response.reason, 'encoding': response.encoding, 'apparent_encoding': response.apparent_encoding, 'cookies': response.cookies, 'headers': response.headers, 'json': lambda : json.loads(json_data), 'url': response.url })
[ "def", "get_symbols", "(", "self", ",", "name", ")", ":", "url", "=", "\"http://autoc.finance.yahoo.com/autoc?query={0}&callback=YAHOO.Finance.SymbolSuggest.ssCallback\"", ".", "format", "(", "name", ")", "response", "=", "requests", ".", "get", "(", "url", ")", "json...
Retrieves all symbols belonging to a company
[ "Retrieves", "all", "symbols", "belonging", "to", "a", "company" ]
train
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L94-L119
etalab/cada
setup.py
pip
def pip(filename): '''Parse pip requirement file and transform it to setuptools requirements''' requirements = [] for line in open(os.path.join('requirements', filename)).readlines(): match = RE_REQUIREMENT.match(line) if match: requirements.extend(pip(match.group('filename'))) else: requirements.append(line) return requirements
python
def pip(filename): '''Parse pip requirement file and transform it to setuptools requirements''' requirements = [] for line in open(os.path.join('requirements', filename)).readlines(): match = RE_REQUIREMENT.match(line) if match: requirements.extend(pip(match.group('filename'))) else: requirements.append(line) return requirements
[ "def", "pip", "(", "filename", ")", ":", "requirements", "=", "[", "]", "for", "line", "in", "open", "(", "os", ".", "path", ".", "join", "(", "'requirements'", ",", "filename", ")", ")", ".", "readlines", "(", ")", ":", "match", "=", "RE_REQUIREMENT...
Parse pip requirement file and transform it to setuptools requirements
[ "Parse", "pip", "requirement", "file", "and", "transform", "it", "to", "setuptools", "requirements" ]
train
https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/setup.py#L38-L47
jasonbot/arcrest
arcrest/geometry.py
pointlist
def pointlist(points, sr): """Convert a list of the form [[x, y] ...] to a list of Point instances with the given x, y coordinates.""" assert all(isinstance(pt, Point) or len(pt) == 2 for pt in points), "Point(s) not in [x, y] form" return [coord if isinstance(coord, Point) else Point(coord[0], coord[1], sr) for coord in points]
python
def pointlist(points, sr): """Convert a list of the form [[x, y] ...] to a list of Point instances with the given x, y coordinates.""" assert all(isinstance(pt, Point) or len(pt) == 2 for pt in points), "Point(s) not in [x, y] form" return [coord if isinstance(coord, Point) else Point(coord[0], coord[1], sr) for coord in points]
[ "def", "pointlist", "(", "points", ",", "sr", ")", ":", "assert", "all", "(", "isinstance", "(", "pt", ",", "Point", ")", "or", "len", "(", "pt", ")", "==", "2", "for", "pt", "in", "points", ")", ",", "\"Point(s) not in [x, y] form\"", "return", "[", ...
Convert a list of the form [[x, y] ...] to a list of Point instances with the given x, y coordinates.
[ "Convert", "a", "list", "of", "the", "form", "[[", "x", "y", "]", "...", "]", "to", "a", "list", "of", "Point", "instances", "with", "the", "given", "x", "y", "coordinates", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/geometry.py#L10-L17
jasonbot/arcrest
arcrest/geometry.py
fromJson
def fromJson(struct, attributes=None): "Convert a JSON struct to a Geometry based on its structure" if isinstance(struct, basestring): struct = json.loads(struct) indicative_attributes = { 'x': Point, 'wkid': SpatialReference, 'paths': Polyline, 'rings': Polygon, 'points': Multipoint, 'xmin': Envelope } # bbox string if isinstance(struct, basestring) and len(struct.split(',')) == 4: return Envelope(*map(float, struct.split(','))) # Look for telltale attributes in the dict if isinstance(struct, dict): for key, cls in indicative_attributes.iteritems(): if key in struct: ret = cls.fromJson(dict((str(key), value) for (key, value) in struct.iteritems())) if attributes: ret.attributes = dict((str(key.lower()), val) for (key, val) in attributes.iteritems()) return ret raise ValueError("Unconvertible to geometry")
python
def fromJson(struct, attributes=None): "Convert a JSON struct to a Geometry based on its structure" if isinstance(struct, basestring): struct = json.loads(struct) indicative_attributes = { 'x': Point, 'wkid': SpatialReference, 'paths': Polyline, 'rings': Polygon, 'points': Multipoint, 'xmin': Envelope } # bbox string if isinstance(struct, basestring) and len(struct.split(',')) == 4: return Envelope(*map(float, struct.split(','))) # Look for telltale attributes in the dict if isinstance(struct, dict): for key, cls in indicative_attributes.iteritems(): if key in struct: ret = cls.fromJson(dict((str(key), value) for (key, value) in struct.iteritems())) if attributes: ret.attributes = dict((str(key.lower()), val) for (key, val) in attributes.iteritems()) return ret raise ValueError("Unconvertible to geometry")
[ "def", "fromJson", "(", "struct", ",", "attributes", "=", "None", ")", ":", "if", "isinstance", "(", "struct", ",", "basestring", ")", ":", "struct", "=", "json", ".", "loads", "(", "struct", ")", "indicative_attributes", "=", "{", "'x'", ":", "Point", ...
Convert a JSON struct to a Geometry based on its structure
[ "Convert", "a", "JSON", "struct", "to", "a", "Geometry", "based", "on", "its", "structure" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/geometry.py#L573-L599
jasonbot/arcrest
arcrest/geometry.py
fromGeoJson
def fromGeoJson(struct, attributes=None): "Convert a GeoJSON-like struct to a Geometry based on its structure" if isinstance(struct, basestring): struct = json.loads(struct) type_map = { 'Point': Point, 'MultiLineString': Polyline, 'LineString': Polyline, 'Polygon': Polygon, 'MultiPolygon': Polygon, 'MultiPoint': Multipoint, 'Box': Envelope } if struct['type'] == "Feature": return fromGeoJson(struct, struct.get('properties', None)) elif struct['type'] == "FeatureCollection": sr = None if 'crs' in struct: sr = SpatialReference(struct['crs']['properties']['code']) members = map(fromGeoJson, struct['members']) for member in members: member.spatialReference = sr return members else: return map(fromGeoJson, struct['members']) elif struct['type'] in type_map and hasattr(type_map[struct['type']], 'fromGeoJson'): instances = type_map[struct['type']].fromGeoJson(struct) i = [] assert instances is not None, "GeoJson conversion returned a Null geom" for instance in instances: if 'properties' in struct: instance.attributes = struct['properties'].copy() if '@esri.sr' in instance.attributes: instance.spatialReference = SpatialReference.fromJson( instance.attributes['@esri.sr']) del instance.attributes['@esri.sr'] if attributes: if not hasattr(instance, 'attributes'): instance.attributes = {} for k, v in attributes.iteritems(): instance.attributes[k] = v i.append(instance) if i: if len(i) > 1: return i return i[0] raise ValueError("Unconvertible to geometry")
python
def fromGeoJson(struct, attributes=None): "Convert a GeoJSON-like struct to a Geometry based on its structure" if isinstance(struct, basestring): struct = json.loads(struct) type_map = { 'Point': Point, 'MultiLineString': Polyline, 'LineString': Polyline, 'Polygon': Polygon, 'MultiPolygon': Polygon, 'MultiPoint': Multipoint, 'Box': Envelope } if struct['type'] == "Feature": return fromGeoJson(struct, struct.get('properties', None)) elif struct['type'] == "FeatureCollection": sr = None if 'crs' in struct: sr = SpatialReference(struct['crs']['properties']['code']) members = map(fromGeoJson, struct['members']) for member in members: member.spatialReference = sr return members else: return map(fromGeoJson, struct['members']) elif struct['type'] in type_map and hasattr(type_map[struct['type']], 'fromGeoJson'): instances = type_map[struct['type']].fromGeoJson(struct) i = [] assert instances is not None, "GeoJson conversion returned a Null geom" for instance in instances: if 'properties' in struct: instance.attributes = struct['properties'].copy() if '@esri.sr' in instance.attributes: instance.spatialReference = SpatialReference.fromJson( instance.attributes['@esri.sr']) del instance.attributes['@esri.sr'] if attributes: if not hasattr(instance, 'attributes'): instance.attributes = {} for k, v in attributes.iteritems(): instance.attributes[k] = v i.append(instance) if i: if len(i) > 1: return i return i[0] raise ValueError("Unconvertible to geometry")
[ "def", "fromGeoJson", "(", "struct", ",", "attributes", "=", "None", ")", ":", "if", "isinstance", "(", "struct", ",", "basestring", ")", ":", "struct", "=", "json", ".", "loads", "(", "struct", ")", "type_map", "=", "{", "'Point'", ":", "Point", ",", ...
Convert a GeoJSON-like struct to a Geometry based on its structure
[ "Convert", "a", "GeoJSON", "-", "like", "struct", "to", "a", "Geometry", "based", "on", "its", "structure" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/geometry.py#L601-L648
jasonbot/arcrest
arcrest/geometry.py
SpatialReference.name
def name(): "Get/view the name for the well known ID of a Projection" if self.wkid in projected: return projected[self.wkid] elif self.wkid in geographic: return geographic[self.wkid] else: raise KeyError("Not a known WKID.")
python
def name(): "Get/view the name for the well known ID of a Projection" if self.wkid in projected: return projected[self.wkid] elif self.wkid in geographic: return geographic[self.wkid] else: raise KeyError("Not a known WKID.")
[ "def", "name", "(", ")", ":", "if", "self", ".", "wkid", "in", "projected", ":", "return", "projected", "[", "self", ".", "wkid", "]", "elif", "self", ".", "wkid", "in", "geographic", ":", "return", "geographic", "[", "self", ".", "wkid", "]", "else"...
Get/view the name for the well known ID of a Projection
[ "Get", "/", "view", "the", "name", "for", "the", "well", "known", "ID", "of", "a", "Projection" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/geometry.py#L130-L137
jasonbot/arcrest
arcrest/geometry.py
Polygon.contains
def contains(self, pt): "Tests if the provided point is in the polygon." if isinstance(pt, Point): ptx, pty = pt.x, pt.y assert (self.spatialReference is None or \ self.spatialReference.wkid is None) or \ (pt.spatialReference is None or \ pt.spatialReference.wkid is None) or \ self.spatialReference == pt.spatialReference, \ "Spatial references do not match." else: ptx, pty = pt in_shape = False # Ported nearly line-for-line from the Javascript API for ring in self._json_rings: for idx in range(len(ring)): idxp1 = idx + 1 if idxp1 >= len(ring): idxp1 -= len(ring) pi, pj = ring[idx], ring[idxp1] # Divide-by-zero checks if (pi[1] == pj[1]) and pty >= min((pi[1], pj[1])): if ptx >= max((pi[0], pj[0])): in_shape = not in_shape elif (pi[0] == pj[0]) and pty >= min((pi[0], pj[0])): if ptx >= max((pi[1], pj[1])): in_shape = not in_shape elif (((pi[1] < pty and pj[1] >= pty) or (pj[1] < pty and pi[1] >= pty)) and (pi[0] + (pty - pi[1]) / (pj[1] - pi[1]) * (pj[0] - pi[0]) < ptx)): in_shape = not in_shape return in_shape
python
def contains(self, pt): "Tests if the provided point is in the polygon." if isinstance(pt, Point): ptx, pty = pt.x, pt.y assert (self.spatialReference is None or \ self.spatialReference.wkid is None) or \ (pt.spatialReference is None or \ pt.spatialReference.wkid is None) or \ self.spatialReference == pt.spatialReference, \ "Spatial references do not match." else: ptx, pty = pt in_shape = False # Ported nearly line-for-line from the Javascript API for ring in self._json_rings: for idx in range(len(ring)): idxp1 = idx + 1 if idxp1 >= len(ring): idxp1 -= len(ring) pi, pj = ring[idx], ring[idxp1] # Divide-by-zero checks if (pi[1] == pj[1]) and pty >= min((pi[1], pj[1])): if ptx >= max((pi[0], pj[0])): in_shape = not in_shape elif (pi[0] == pj[0]) and pty >= min((pi[0], pj[0])): if ptx >= max((pi[1], pj[1])): in_shape = not in_shape elif (((pi[1] < pty and pj[1] >= pty) or (pj[1] < pty and pi[1] >= pty)) and (pi[0] + (pty - pi[1]) / (pj[1] - pi[1]) * (pj[0] - pi[0]) < ptx)): in_shape = not in_shape return in_shape
[ "def", "contains", "(", "self", ",", "pt", ")", ":", "if", "isinstance", "(", "pt", ",", "Point", ")", ":", "ptx", ",", "pty", "=", "pt", ".", "x", ",", "pt", ".", "y", "assert", "(", "self", ".", "spatialReference", "is", "None", "or", "self", ...
Tests if the provided point is in the polygon.
[ "Tests", "if", "the", "provided", "point", "is", "in", "the", "polygon", "." ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/geometry.py#L391-L423
jasonbot/arcrest
arcrest/geometry.py
Envelope.bbox
def bbox(self): "Return the envelope as a Bound Box string compatible with (bb) params" return ",".join(str(attr) for attr in (self.xmin, self.ymin, self.xmax, self.ymax))
python
def bbox(self): "Return the envelope as a Bound Box string compatible with (bb) params" return ",".join(str(attr) for attr in (self.xmin, self.ymin, self.xmax, self.ymax))
[ "def", "bbox", "(", "self", ")", ":", "return", "\",\"", ".", "join", "(", "str", "(", "attr", ")", "for", "attr", "in", "(", "self", ".", "xmin", ",", "self", ".", "ymin", ",", "self", ".", "xmax", ",", "self", ".", "ymax", ")", ")" ]
Return the envelope as a Bound Box string compatible with (bb) params
[ "Return", "the", "envelope", "as", "a", "Bound", "Box", "string", "compatible", "with", "(", "bb", ")", "params" ]
train
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/geometry.py#L561-L564
carver/web3utils.py
web3utils/chainstate.py
stalecheck
def stalecheck(web3, **kwargs): ''' Use to require that a function will run only of the blockchain is recently updated. If the chain is old, raise a StaleBlockchain exception Define how stale the chain can be with keyword arguments from datetime.timedelta, like stalecheck(web3, days=2) Turn off the staleness check at runtime with: wrapped_func(..., assertfresh=False) ''' allowable_delay = datetime.timedelta(**kwargs).total_seconds() def decorator(func): def wrapper(*args, assertfresh=True, **kwargs): if assertfresh: last_block = web3.eth.getBlock('latest') if not isfresh(last_block, allowable_delay): raise StaleBlockchain(last_block, allowable_delay) return func(*args, **kwargs) return wrapper return decorator
python
def stalecheck(web3, **kwargs): ''' Use to require that a function will run only of the blockchain is recently updated. If the chain is old, raise a StaleBlockchain exception Define how stale the chain can be with keyword arguments from datetime.timedelta, like stalecheck(web3, days=2) Turn off the staleness check at runtime with: wrapped_func(..., assertfresh=False) ''' allowable_delay = datetime.timedelta(**kwargs).total_seconds() def decorator(func): def wrapper(*args, assertfresh=True, **kwargs): if assertfresh: last_block = web3.eth.getBlock('latest') if not isfresh(last_block, allowable_delay): raise StaleBlockchain(last_block, allowable_delay) return func(*args, **kwargs) return wrapper return decorator
[ "def", "stalecheck", "(", "web3", ",", "*", "*", "kwargs", ")", ":", "allowable_delay", "=", "datetime", ".", "timedelta", "(", "*", "*", "kwargs", ")", ".", "total_seconds", "(", ")", "def", "decorator", "(", "func", ")", ":", "def", "wrapper", "(", ...
Use to require that a function will run only of the blockchain is recently updated. If the chain is old, raise a StaleBlockchain exception Define how stale the chain can be with keyword arguments from datetime.timedelta, like stalecheck(web3, days=2) Turn off the staleness check at runtime with: wrapped_func(..., assertfresh=False)
[ "Use", "to", "require", "that", "a", "function", "will", "run", "only", "of", "the", "blockchain", "is", "recently", "updated", "." ]
train
https://github.com/carver/web3utils.py/blob/81aa6b55f64dc857c604d5d071a37e0de6cd63ab/web3utils/chainstate.py#L10-L32
noxdafox/vminspect
vminspect/vulnscan.py
VulnScanner.scan
def scan(self, concurrency=1): """Iterates over the applications installed within the disk and queries the CVE DB to determine whether they are vulnerable. Concurrency controls the amount of concurrent queries against the CVE DB. For each vulnerable application the method yields a namedtuple: VulnApp(name -> application name version -> application version vulnerabilities) -> list of Vulnerabilities Vulnerability(id -> CVE Id summary) -> brief description of the vulnerability """ self.logger.debug("Scanning FS content.") with ThreadPoolExecutor(max_workers=concurrency) as executor: results = executor.map(self.query_vulnerabilities, self.applications()) for report in results: application, vulnerabilities = report vulnerabilities = list(lookup_vulnerabilities(application.version, vulnerabilities)) if vulnerabilities: yield VulnApp(application.name, application.version, vulnerabilities)
python
def scan(self, concurrency=1): """Iterates over the applications installed within the disk and queries the CVE DB to determine whether they are vulnerable. Concurrency controls the amount of concurrent queries against the CVE DB. For each vulnerable application the method yields a namedtuple: VulnApp(name -> application name version -> application version vulnerabilities) -> list of Vulnerabilities Vulnerability(id -> CVE Id summary) -> brief description of the vulnerability """ self.logger.debug("Scanning FS content.") with ThreadPoolExecutor(max_workers=concurrency) as executor: results = executor.map(self.query_vulnerabilities, self.applications()) for report in results: application, vulnerabilities = report vulnerabilities = list(lookup_vulnerabilities(application.version, vulnerabilities)) if vulnerabilities: yield VulnApp(application.name, application.version, vulnerabilities)
[ "def", "scan", "(", "self", ",", "concurrency", "=", "1", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Scanning FS content.\"", ")", "with", "ThreadPoolExecutor", "(", "max_workers", "=", "concurrency", ")", "as", "executor", ":", "results", "=", ...
Iterates over the applications installed within the disk and queries the CVE DB to determine whether they are vulnerable. Concurrency controls the amount of concurrent queries against the CVE DB. For each vulnerable application the method yields a namedtuple: VulnApp(name -> application name version -> application version vulnerabilities) -> list of Vulnerabilities Vulnerability(id -> CVE Id summary) -> brief description of the vulnerability
[ "Iterates", "over", "the", "applications", "installed", "within", "the", "disk", "and", "queries", "the", "CVE", "DB", "to", "determine", "whether", "they", "are", "vulnerable", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/vulnscan.py#L68-L99
bdastur/spam
pyansible/examples/simple_commands.py
execute_ping
def execute_ping(host_list, remote_user, remote_pass, sudo=False, sudo_user=None, sudo_pass=None): ''' Execute ls on some hosts ''' runner = spam.ansirunner.AnsibleRunner() result, failed_hosts = runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, sudo=sudo, sudo_pass=sudo_pass, sudo_user=sudo_user, module="ping") print result, failed_hosts dark_hosts = runner.ansible_get_dark_hosts(result) print "dark hosts: ", dark_hosts
python
def execute_ping(host_list, remote_user, remote_pass, sudo=False, sudo_user=None, sudo_pass=None): ''' Execute ls on some hosts ''' runner = spam.ansirunner.AnsibleRunner() result, failed_hosts = runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, sudo=sudo, sudo_pass=sudo_pass, sudo_user=sudo_user, module="ping") print result, failed_hosts dark_hosts = runner.ansible_get_dark_hosts(result) print "dark hosts: ", dark_hosts
[ "def", "execute_ping", "(", "host_list", ",", "remote_user", ",", "remote_pass", ",", "sudo", "=", "False", ",", "sudo_user", "=", "None", ",", "sudo_pass", "=", "None", ")", ":", "runner", "=", "spam", ".", "ansirunner", ".", "AnsibleRunner", "(", ")", ...
Execute ls on some hosts
[ "Execute", "ls", "on", "some", "hosts" ]
train
https://github.com/bdastur/spam/blob/3c363302412d15bdb391f62bf90348243e456af2/pyansible/examples/simple_commands.py#L11-L28
bdastur/spam
pyansible/examples/simple_commands.py
execute_ls
def execute_ls(host_list, remote_user, remote_pass): ''' Execute any adhoc command on the hosts. ''' runner = spam.ansirunner.AnsibleRunner() result, failed_hosts = runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, module="command", module_args="ls -1") print "Result: ", result
python
def execute_ls(host_list, remote_user, remote_pass): ''' Execute any adhoc command on the hosts. ''' runner = spam.ansirunner.AnsibleRunner() result, failed_hosts = runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, module="command", module_args="ls -1") print "Result: ", result
[ "def", "execute_ls", "(", "host_list", ",", "remote_user", ",", "remote_pass", ")", ":", "runner", "=", "spam", ".", "ansirunner", ".", "AnsibleRunner", "(", ")", "result", ",", "failed_hosts", "=", "runner", ".", "ansible_perform_operation", "(", "host_list", ...
Execute any adhoc command on the hosts.
[ "Execute", "any", "adhoc", "command", "on", "the", "hosts", "." ]
train
https://github.com/bdastur/spam/blob/3c363302412d15bdb391f62bf90348243e456af2/pyansible/examples/simple_commands.py#L31-L43
bdastur/spam
pyansible/examples/simple_commands.py
main
def main(): ''' Simple examples ''' args = parse_arguments() if args.askpass: password = getpass.getpass("Password: ") else: password = None if args.asksudopass: sudo = True sudo_pass = getpass.getpass("Sudo password[default ssh password]: ") if len(sudo_pass) == 0: sudo_pass = password sudo_user = 'root' else: sudo = False sudo_pass = None sudo_user = None if not args.username: username = getpass.getuser() else: username = args.username host_list = args.hosts os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False" execute_ping(host_list, username, password, sudo=sudo, sudo_user=sudo_user, sudo_pass=sudo_pass)
python
def main(): ''' Simple examples ''' args = parse_arguments() if args.askpass: password = getpass.getpass("Password: ") else: password = None if args.asksudopass: sudo = True sudo_pass = getpass.getpass("Sudo password[default ssh password]: ") if len(sudo_pass) == 0: sudo_pass = password sudo_user = 'root' else: sudo = False sudo_pass = None sudo_user = None if not args.username: username = getpass.getuser() else: username = args.username host_list = args.hosts os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False" execute_ping(host_list, username, password, sudo=sudo, sudo_user=sudo_user, sudo_pass=sudo_pass)
[ "def", "main", "(", ")", ":", "args", "=", "parse_arguments", "(", ")", "if", "args", ".", "askpass", ":", "password", "=", "getpass", ".", "getpass", "(", "\"Password: \"", ")", "else", ":", "password", "=", "None", "if", "args", ".", "asksudopass", "...
Simple examples
[ "Simple", "examples" ]
train
https://github.com/bdastur/spam/blob/3c363302412d15bdb391f62bf90348243e456af2/pyansible/examples/simple_commands.py#L105-L135
AndrewWalker/ccsyspath
ccsyspath/paths.py
compiler_preprocessor_verbose
def compiler_preprocessor_verbose(compiler, extraflags): """Capture the compiler preprocessor stage in verbose mode """ lines = [] with open(os.devnull, 'r') as devnull: cmd = [compiler, '-E'] cmd += extraflags cmd += ['-', '-v'] p = Popen(cmd, stdin=devnull, stdout=PIPE, stderr=PIPE) p.wait() p.stdout.close() lines = p.stderr.read() lines = lines.decode('utf-8') lines = lines.splitlines() return lines
python
def compiler_preprocessor_verbose(compiler, extraflags): """Capture the compiler preprocessor stage in verbose mode """ lines = [] with open(os.devnull, 'r') as devnull: cmd = [compiler, '-E'] cmd += extraflags cmd += ['-', '-v'] p = Popen(cmd, stdin=devnull, stdout=PIPE, stderr=PIPE) p.wait() p.stdout.close() lines = p.stderr.read() lines = lines.decode('utf-8') lines = lines.splitlines() return lines
[ "def", "compiler_preprocessor_verbose", "(", "compiler", ",", "extraflags", ")", ":", "lines", "=", "[", "]", "with", "open", "(", "os", ".", "devnull", ",", "'r'", ")", "as", "devnull", ":", "cmd", "=", "[", "compiler", ",", "'-E'", "]", "cmd", "+=", ...
Capture the compiler preprocessor stage in verbose mode
[ "Capture", "the", "compiler", "preprocessor", "stage", "in", "verbose", "mode" ]
train
https://github.com/AndrewWalker/ccsyspath/blob/1ba558edcc5eb70efdc0416d0c186f2254bd4398/ccsyspath/paths.py#L6-L20
HDI-Project/RDT
rdt/transformers/number.py
NumberTransformer.transform
def transform(self, col): """Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame """ out = pd.DataFrame(index=col.index) out[self.col_name] = col.apply(self.get_val, axis=1) if self.subtype == 'int': out[self.col_name] = out[self.col_name].astype(int) return out
python
def transform(self, col): """Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame """ out = pd.DataFrame(index=col.index) out[self.col_name] = col.apply(self.get_val, axis=1) if self.subtype == 'int': out[self.col_name] = out[self.col_name].astype(int) return out
[ "def", "transform", "(", "self", ",", "col", ")", ":", "out", "=", "pd", ".", "DataFrame", "(", "index", "=", "col", ".", "index", ")", "out", "[", "self", ".", "col_name", "]", "=", "col", ".", "apply", "(", "self", ".", "get_val", ",", "axis", ...
Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
[ "Prepare", "the", "transformer", "to", "convert", "data", "and", "return", "the", "processed", "table", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/number.py#L31-L47
HDI-Project/RDT
rdt/transformers/number.py
NumberTransformer.reverse_transform
def reverse_transform(self, col): """Converts data back into original format. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame """ output = pd.DataFrame(index=col.index) output[self.col_name] = col.apply(self.safe_round, axis=1) if self.subtype == 'int': output[self.col_name] = output[self.col_name].astype(int) return output
python
def reverse_transform(self, col): """Converts data back into original format. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame """ output = pd.DataFrame(index=col.index) output[self.col_name] = col.apply(self.safe_round, axis=1) if self.subtype == 'int': output[self.col_name] = output[self.col_name].astype(int) return output
[ "def", "reverse_transform", "(", "self", ",", "col", ")", ":", "output", "=", "pd", ".", "DataFrame", "(", "index", "=", "col", ".", "index", ")", "output", "[", "self", ".", "col_name", "]", "=", "col", ".", "apply", "(", "self", ".", "safe_round", ...
Converts data back into original format. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
[ "Converts", "data", "back", "into", "original", "format", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/number.py#L49-L65
HDI-Project/RDT
rdt/transformers/number.py
NumberTransformer.get_val
def get_val(self, x): """Converts to int.""" try: if self.subtype == 'integer': return int(round(x[self.col_name])) else: if np.isnan(x[self.col_name]): return self.default_val return x[self.col_name] except (ValueError, TypeError): return self.default_val
python
def get_val(self, x): """Converts to int.""" try: if self.subtype == 'integer': return int(round(x[self.col_name])) else: if np.isnan(x[self.col_name]): return self.default_val return x[self.col_name] except (ValueError, TypeError): return self.default_val
[ "def", "get_val", "(", "self", ",", "x", ")", ":", "try", ":", "if", "self", ".", "subtype", "==", "'integer'", ":", "return", "int", "(", "round", "(", "x", "[", "self", ".", "col_name", "]", ")", ")", "else", ":", "if", "np", ".", "isnan", "(...
Converts to int.
[ "Converts", "to", "int", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/number.py#L82-L94
HDI-Project/RDT
rdt/transformers/number.py
NumberTransformer.safe_round
def safe_round(self, x): """Returns a converter that takes in a value and turns it into an integer, if necessary. Args: col_name(str): Name of the column. subtype(str): Numeric subtype of the values. Returns: function """ val = x[self.col_name] if np.isposinf(val): val = sys.maxsize elif np.isneginf(val): val = -sys.maxsize if np.isnan(val): val = self.default_val if self.subtype == 'integer': return int(round(val)) return val
python
def safe_round(self, x): """Returns a converter that takes in a value and turns it into an integer, if necessary. Args: col_name(str): Name of the column. subtype(str): Numeric subtype of the values. Returns: function """ val = x[self.col_name] if np.isposinf(val): val = sys.maxsize elif np.isneginf(val): val = -sys.maxsize if np.isnan(val): val = self.default_val if self.subtype == 'integer': return int(round(val)) return val
[ "def", "safe_round", "(", "self", ",", "x", ")", ":", "val", "=", "x", "[", "self", ".", "col_name", "]", "if", "np", ".", "isposinf", "(", "val", ")", ":", "val", "=", "sys", ".", "maxsize", "elif", "np", ".", "isneginf", "(", "val", ")", ":",...
Returns a converter that takes in a value and turns it into an integer, if necessary. Args: col_name(str): Name of the column. subtype(str): Numeric subtype of the values. Returns: function
[ "Returns", "a", "converter", "that", "takes", "in", "a", "value", "and", "turns", "it", "into", "an", "integer", "if", "necessary", "." ]
train
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/number.py#L96-L120
bdastur/spam
pyansible/plugins/rados.py
Rados.rados_df
def rados_df(self, host_list=None, remote_user=None, remote_pass=None): ''' Invoked the rados df command and return output to user ''' result, failed_hosts = self.runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, module="command", module_args="rados df") parsed_result = self.rados_parse_df(result) return parsed_result
python
def rados_df(self, host_list=None, remote_user=None, remote_pass=None): ''' Invoked the rados df command and return output to user ''' result, failed_hosts = self.runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, module="command", module_args="rados df") parsed_result = self.rados_parse_df(result) return parsed_result
[ "def", "rados_df", "(", "self", ",", "host_list", "=", "None", ",", "remote_user", "=", "None", ",", "remote_pass", "=", "None", ")", ":", "result", ",", "failed_hosts", "=", "self", ".", "runner", ".", "ansible_perform_operation", "(", "host_list", "=", "...
Invoked the rados df command and return output to user
[ "Invoked", "the", "rados", "df", "command", "and", "return", "output", "to", "user" ]
train
https://github.com/bdastur/spam/blob/3c363302412d15bdb391f62bf90348243e456af2/pyansible/plugins/rados.py#L22-L38
bdastur/spam
pyansible/plugins/rados.py
Rados.rados_parse_df
def rados_parse_df(self, result): ''' Parse the result from ansirunner module and save it as a json object ''' parsed_results = [] HEADING = r".*(pool name) *(category) *(KB) *(objects) *(clones)" + \ " *(degraded) *(unfound) *(rd) *(rd KB) *(wr) *(wr KB)" HEADING_RE = re.compile(HEADING, re.IGNORECASE) dict_keys = ["pool_name", "category", "size_kb", "objects", "clones", "degraded", "unfound", "rd", "rd_kb", "wr", "wr_kb"] if result['contacted'].keys(): for node in result['contacted'].keys(): df_result = {} nodeobj = result['contacted'][node] df_output = nodeobj['stdout'] for line in df_output.splitlines(): print "Line: ", line # Skip the heading line. reobj = HEADING_RE.match(line) if not reobj: row = line.split() if len(row) != len(dict_keys): print "line not match: ", line continue key_count = 0 for column in row: df_result[dict_keys[key_count]] = column key_count += 1 print "df_result: ", df_result parsed_results.append(df_result) nodeobj['parsed_results'] = parsed_results return result
python
def rados_parse_df(self, result): ''' Parse the result from ansirunner module and save it as a json object ''' parsed_results = [] HEADING = r".*(pool name) *(category) *(KB) *(objects) *(clones)" + \ " *(degraded) *(unfound) *(rd) *(rd KB) *(wr) *(wr KB)" HEADING_RE = re.compile(HEADING, re.IGNORECASE) dict_keys = ["pool_name", "category", "size_kb", "objects", "clones", "degraded", "unfound", "rd", "rd_kb", "wr", "wr_kb"] if result['contacted'].keys(): for node in result['contacted'].keys(): df_result = {} nodeobj = result['contacted'][node] df_output = nodeobj['stdout'] for line in df_output.splitlines(): print "Line: ", line # Skip the heading line. reobj = HEADING_RE.match(line) if not reobj: row = line.split() if len(row) != len(dict_keys): print "line not match: ", line continue key_count = 0 for column in row: df_result[dict_keys[key_count]] = column key_count += 1 print "df_result: ", df_result parsed_results.append(df_result) nodeobj['parsed_results'] = parsed_results return result
[ "def", "rados_parse_df", "(", "self", ",", "result", ")", ":", "parsed_results", "=", "[", "]", "HEADING", "=", "r\".*(pool name) *(category) *(KB) *(objects) *(clones)\"", "+", "\" *(degraded) *(unfound) *(rd) *(rd KB) *(wr) *(wr KB)\"", "HEADING_RE", "=", "re", ".", "comp...
Parse the result from ansirunner module and save it as a json object
[ "Parse", "the", "result", "from", "ansirunner", "module", "and", "save", "it", "as", "a", "json", "object" ]
train
https://github.com/bdastur/spam/blob/3c363302412d15bdb391f62bf90348243e456af2/pyansible/plugins/rados.py#L40-L81
erans/fabric-gce-tools
fabric_gce_tools/__init__.py
update_roles_gce
def update_roles_gce(use_cache=True, cache_expiration=86400, cache_path="~/.gcetools/instances", group_name=None, region=None, zone=None): """ Dynamically update fabric's roles by using assigning the tags associated with each machine in Google Compute Engine. use_cache - will store a local cache in ~/.gcetools/ cache_expiration - cache expiration in seconds (default: 1 day) cache_path - the path to store instances data (default: ~/.gcetools/instances) group_name - optional managed instance group to use instead of the global instance pool region - gce region name (such as `us-central1`) for a regional managed instance group zone - gce zone name (such as `us-central1-a`) for a zone managed instance group How to use: - Call 'update_roles_gce' at the end of your fabfile.py (it will run each time you run fabric). - On each function use the regular @roles decorator and set the role to the name of one of the tags associated with the instances you wish to work with """ data = _get_data(use_cache, cache_expiration, group_name=group_name, region=region, zone=zone) roles = _get_roles(data) env.roledefs.update(roles) _data_loaded = True return INSTANCES_CACHE
python
def update_roles_gce(use_cache=True, cache_expiration=86400, cache_path="~/.gcetools/instances", group_name=None, region=None, zone=None): """ Dynamically update fabric's roles by using assigning the tags associated with each machine in Google Compute Engine. use_cache - will store a local cache in ~/.gcetools/ cache_expiration - cache expiration in seconds (default: 1 day) cache_path - the path to store instances data (default: ~/.gcetools/instances) group_name - optional managed instance group to use instead of the global instance pool region - gce region name (such as `us-central1`) for a regional managed instance group zone - gce zone name (such as `us-central1-a`) for a zone managed instance group How to use: - Call 'update_roles_gce' at the end of your fabfile.py (it will run each time you run fabric). - On each function use the regular @roles decorator and set the role to the name of one of the tags associated with the instances you wish to work with """ data = _get_data(use_cache, cache_expiration, group_name=group_name, region=region, zone=zone) roles = _get_roles(data) env.roledefs.update(roles) _data_loaded = True return INSTANCES_CACHE
[ "def", "update_roles_gce", "(", "use_cache", "=", "True", ",", "cache_expiration", "=", "86400", ",", "cache_path", "=", "\"~/.gcetools/instances\"", ",", "group_name", "=", "None", ",", "region", "=", "None", ",", "zone", "=", "None", ")", ":", "data", "=",...
Dynamically update fabric's roles by using assigning the tags associated with each machine in Google Compute Engine. use_cache - will store a local cache in ~/.gcetools/ cache_expiration - cache expiration in seconds (default: 1 day) cache_path - the path to store instances data (default: ~/.gcetools/instances) group_name - optional managed instance group to use instead of the global instance pool region - gce region name (such as `us-central1`) for a regional managed instance group zone - gce zone name (such as `us-central1-a`) for a zone managed instance group How to use: - Call 'update_roles_gce' at the end of your fabfile.py (it will run each time you run fabric). - On each function use the regular @roles decorator and set the role to the name of one of the tags associated with the instances you wish to work with
[ "Dynamically", "update", "fabric", "s", "roles", "by", "using", "assigning", "the", "tags", "associated", "with", "each", "machine", "in", "Google", "Compute", "Engine", "." ]
train
https://github.com/erans/fabric-gce-tools/blob/0c9af7a683db47e203d4e487fa8610da6459ca83/fabric_gce_tools/__init__.py#L205-L228
guilhermef/tornado-eventsource
tornado_eventsource/event_source_client.py
eventsource_connect
def eventsource_connect(url, io_loop=None, callback=None, connect_timeout=None): """Client-side eventsource support. Takes a url and returns a Future whose result is a `EventSourceClient`. """ if io_loop is None: io_loop = IOLoop.current() if isinstance(url, httpclient.HTTPRequest): assert connect_timeout is None request = url # Copy and convert the headers dict/object (see comments in # AsyncHTTPClient.fetch) request.headers = httputil.HTTPHeaders(request.headers) else: request = httpclient.HTTPRequest( url, connect_timeout=connect_timeout, headers=httputil.HTTPHeaders({ "Accept-Encoding": "identity" }) ) request = httpclient._RequestProxy( request, httpclient.HTTPRequest._DEFAULTS) conn = EventSourceClient(io_loop, request) if callback is not None: io_loop.add_future(conn.connect_future, callback) return conn.connect_future
python
def eventsource_connect(url, io_loop=None, callback=None, connect_timeout=None): """Client-side eventsource support. Takes a url and returns a Future whose result is a `EventSourceClient`. """ if io_loop is None: io_loop = IOLoop.current() if isinstance(url, httpclient.HTTPRequest): assert connect_timeout is None request = url # Copy and convert the headers dict/object (see comments in # AsyncHTTPClient.fetch) request.headers = httputil.HTTPHeaders(request.headers) else: request = httpclient.HTTPRequest( url, connect_timeout=connect_timeout, headers=httputil.HTTPHeaders({ "Accept-Encoding": "identity" }) ) request = httpclient._RequestProxy( request, httpclient.HTTPRequest._DEFAULTS) conn = EventSourceClient(io_loop, request) if callback is not None: io_loop.add_future(conn.connect_future, callback) return conn.connect_future
[ "def", "eventsource_connect", "(", "url", ",", "io_loop", "=", "None", ",", "callback", "=", "None", ",", "connect_timeout", "=", "None", ")", ":", "if", "io_loop", "is", "None", ":", "io_loop", "=", "IOLoop", ".", "current", "(", ")", "if", "isinstance"...
Client-side eventsource support. Takes a url and returns a Future whose result is a `EventSourceClient`.
[ "Client", "-", "side", "eventsource", "support", "." ]
train
https://github.com/guilhermef/tornado-eventsource/blob/230ca1f106269ed76749fb97941ee1a74a55051a/tornado_eventsource/event_source_client.py#L149-L177
guilhermef/tornado-eventsource
tornado_eventsource/event_source_client.py
EventSourceClient.handle_stream
def handle_stream(self, message): """ Acts on message reception :param message: string of an incoming message parse all the fields and builds an Event object that is passed to the callback function """ logging.debug("handle_stream(...)") event = Event() for line in message.strip().splitlines(): (field, value) = line.split(":", 1) field = field.strip() if field == "event": event.name = value.lstrip() elif field == "data": value = value.lstrip() if event.data is None: event.data = value else: event.data = "%s\n%s" % (event.data, value) elif field == "id": event.id = value.lstrip() self.last_event_id = event.id elif field == "retry": try: self.retry_timeout = int(value) event.retry = self.retry_timeout logging.info("timeout reset: %s" % (value,)) except ValueError: pass elif field == "": logging.debug("received comment: %s" % (value,)) else: raise Exception("Unknown field !") self.events.append(event)
python
def handle_stream(self, message): """ Acts on message reception :param message: string of an incoming message parse all the fields and builds an Event object that is passed to the callback function """ logging.debug("handle_stream(...)") event = Event() for line in message.strip().splitlines(): (field, value) = line.split(":", 1) field = field.strip() if field == "event": event.name = value.lstrip() elif field == "data": value = value.lstrip() if event.data is None: event.data = value else: event.data = "%s\n%s" % (event.data, value) elif field == "id": event.id = value.lstrip() self.last_event_id = event.id elif field == "retry": try: self.retry_timeout = int(value) event.retry = self.retry_timeout logging.info("timeout reset: %s" % (value,)) except ValueError: pass elif field == "": logging.debug("received comment: %s" % (value,)) else: raise Exception("Unknown field !") self.events.append(event)
[ "def", "handle_stream", "(", "self", ",", "message", ")", ":", "logging", ".", "debug", "(", "\"handle_stream(...)\"", ")", "event", "=", "Event", "(", ")", "for", "line", "in", "message", ".", "strip", "(", ")", ".", "splitlines", "(", ")", ":", "(", ...
Acts on message reception :param message: string of an incoming message parse all the fields and builds an Event object that is passed to the callback function
[ "Acts", "on", "message", "reception", ":", "param", "message", ":", "string", "of", "an", "incoming", "message" ]
train
https://github.com/guilhermef/tornado-eventsource/blob/230ca1f106269ed76749fb97941ee1a74a55051a/tornado_eventsource/event_source_client.py#L110-L146
lukaskubis/crayon
crayon.py
printout
def printout(*args, **kwargs): """ Print function with extra options for formating text in terminals. """ # TODO(Lukas): conflicts with function names color = kwargs.pop('color', {}) style = kwargs.pop('style', {}) prefx = kwargs.pop('prefix', '') suffx = kwargs.pop('suffix', '') ind = kwargs.pop('indent', 0) print_args = [] for arg in args: arg = str(arg) arg = colorize(arg, **color) arg = stylize(arg, **style) arg = prefix(arg, prefx) arg = indent(arg, ind) arg += str(suffx) print_args.append(arg) print(*print_args, **kwargs)
python
def printout(*args, **kwargs): """ Print function with extra options for formating text in terminals. """ # TODO(Lukas): conflicts with function names color = kwargs.pop('color', {}) style = kwargs.pop('style', {}) prefx = kwargs.pop('prefix', '') suffx = kwargs.pop('suffix', '') ind = kwargs.pop('indent', 0) print_args = [] for arg in args: arg = str(arg) arg = colorize(arg, **color) arg = stylize(arg, **style) arg = prefix(arg, prefx) arg = indent(arg, ind) arg += str(suffx) print_args.append(arg) print(*print_args, **kwargs)
[ "def", "printout", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO(Lukas): conflicts with function names", "color", "=", "kwargs", ".", "pop", "(", "'color'", ",", "{", "}", ")", "style", "=", "kwargs", ".", "pop", "(", "'style'", ",", "{", ...
Print function with extra options for formating text in terminals.
[ "Print", "function", "with", "extra", "options", "for", "formating", "text", "in", "terminals", "." ]
train
https://github.com/lukaskubis/crayon/blob/7b6926000e08ad029049419b564e34bc735d0e6c/crayon.py#L34-L56
lukaskubis/crayon
crayon.py
colorize
def colorize(txt, fg=None, bg=None): """ Print escape codes to set the terminal color. fg and bg are indices into the color palette for the foreground and background colors. """ setting = '' setting += _SET_FG.format(fg) if fg else '' setting += _SET_BG.format(bg) if bg else '' return setting + str(txt) + _STYLE_RESET
python
def colorize(txt, fg=None, bg=None): """ Print escape codes to set the terminal color. fg and bg are indices into the color palette for the foreground and background colors. """ setting = '' setting += _SET_FG.format(fg) if fg else '' setting += _SET_BG.format(bg) if bg else '' return setting + str(txt) + _STYLE_RESET
[ "def", "colorize", "(", "txt", ",", "fg", "=", "None", ",", "bg", "=", "None", ")", ":", "setting", "=", "''", "setting", "+=", "_SET_FG", ".", "format", "(", "fg", ")", "if", "fg", "else", "''", "setting", "+=", "_SET_BG", ".", "format", "(", "b...
Print escape codes to set the terminal color. fg and bg are indices into the color palette for the foreground and background colors.
[ "Print", "escape", "codes", "to", "set", "the", "terminal", "color", "." ]
train
https://github.com/lukaskubis/crayon/blob/7b6926000e08ad029049419b564e34bc735d0e6c/crayon.py#L59-L70
lukaskubis/crayon
crayon.py
stylize
def stylize(txt, bold=False, underline=False): """ Changes style of the text. """ setting = '' setting += _SET_BOLD if bold is True else '' setting += _SET_UNDERLINE if underline is True else '' return setting + str(txt) + _STYLE_RESET
python
def stylize(txt, bold=False, underline=False): """ Changes style of the text. """ setting = '' setting += _SET_BOLD if bold is True else '' setting += _SET_UNDERLINE if underline is True else '' return setting + str(txt) + _STYLE_RESET
[ "def", "stylize", "(", "txt", ",", "bold", "=", "False", ",", "underline", "=", "False", ")", ":", "setting", "=", "''", "setting", "+=", "_SET_BOLD", "if", "bold", "is", "True", "else", "''", "setting", "+=", "_SET_UNDERLINE", "if", "underline", "is", ...
Changes style of the text.
[ "Changes", "style", "of", "the", "text", "." ]
train
https://github.com/lukaskubis/crayon/blob/7b6926000e08ad029049419b564e34bc735d0e6c/crayon.py#L73-L81
lukaskubis/crayon
crayon.py
indent
def indent(txt, spacing=4): """ Indent given text using custom spacing, default is set to 4. """ return prefix(str(txt), ''.join([' ' for _ in range(spacing)]))
python
def indent(txt, spacing=4): """ Indent given text using custom spacing, default is set to 4. """ return prefix(str(txt), ''.join([' ' for _ in range(spacing)]))
[ "def", "indent", "(", "txt", ",", "spacing", "=", "4", ")", ":", "return", "prefix", "(", "str", "(", "txt", ")", ",", "''", ".", "join", "(", "[", "' '", "for", "_", "in", "range", "(", "spacing", ")", "]", ")", ")" ]
Indent given text using custom spacing, default is set to 4.
[ "Indent", "given", "text", "using", "custom", "spacing", "default", "is", "set", "to", "4", "." ]
train
https://github.com/lukaskubis/crayon/blob/7b6926000e08ad029049419b564e34bc735d0e6c/crayon.py#L84-L88
lukaskubis/crayon
crayon.py
rgb
def rgb(red, green, blue): """ Calculate the palette index of a color in the 6x6x6 color cube. The red, green and blue arguments may range from 0 to 5. """ for value in (red, green, blue): if value not in range(6): raise ColorError('Value must be within 0-5, was {}.'.format(value)) return 16 + (red * 36) + (green * 6) + blue
python
def rgb(red, green, blue): """ Calculate the palette index of a color in the 6x6x6 color cube. The red, green and blue arguments may range from 0 to 5. """ for value in (red, green, blue): if value not in range(6): raise ColorError('Value must be within 0-5, was {}.'.format(value)) return 16 + (red * 36) + (green * 6) + blue
[ "def", "rgb", "(", "red", ",", "green", ",", "blue", ")", ":", "for", "value", "in", "(", "red", ",", "green", ",", "blue", ")", ":", "if", "value", "not", "in", "range", "(", "6", ")", ":", "raise", "ColorError", "(", "'Value must be within 0-5, was...
Calculate the palette index of a color in the 6x6x6 color cube. The red, green and blue arguments may range from 0 to 5.
[ "Calculate", "the", "palette", "index", "of", "a", "color", "in", "the", "6x6x6", "color", "cube", "." ]
train
https://github.com/lukaskubis/crayon/blob/7b6926000e08ad029049419b564e34bc735d0e6c/crayon.py#L98-L107
fake-name/WebRequest
WebRequest/HeaderParseMonkeyPatch.py
isUTF8Strict
def isUTF8Strict(data): # pragma: no cover - Only used when cchardet is missing. ''' Check if all characters in a bytearray are decodable using UTF-8. ''' try: decoded = data.decode('UTF-8') except UnicodeDecodeError: return False else: for ch in decoded: if 0xD800 <= ord(ch) <= 0xDFFF: return False return True
python
def isUTF8Strict(data): # pragma: no cover - Only used when cchardet is missing. ''' Check if all characters in a bytearray are decodable using UTF-8. ''' try: decoded = data.decode('UTF-8') except UnicodeDecodeError: return False else: for ch in decoded: if 0xD800 <= ord(ch) <= 0xDFFF: return False return True
[ "def", "isUTF8Strict", "(", "data", ")", ":", "# pragma: no cover - Only used when cchardet is missing.", "try", ":", "decoded", "=", "data", ".", "decode", "(", "'UTF-8'", ")", "except", "UnicodeDecodeError", ":", "return", "False", "else", ":", "for", "ch", "in"...
Check if all characters in a bytearray are decodable using UTF-8.
[ "Check", "if", "all", "characters", "in", "a", "bytearray", "are", "decodable", "using", "UTF", "-", "8", "." ]
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/HeaderParseMonkeyPatch.py#L15-L28
fake-name/WebRequest
WebRequest/HeaderParseMonkeyPatch.py
decode_headers
def decode_headers(header_list): ''' Decode a list of headers. Takes a list of bytestrings, returns a list of unicode strings. The character set for each bytestring is individually decoded. ''' decoded_headers = [] for header in header_list: if cchardet: inferred = cchardet.detect(header) if inferred and inferred['confidence'] > 0.8: # print("Parsing headers!", header) decoded_headers.append(header.decode(inferred['encoding'])) else: decoded_headers.append(header.decode('iso-8859-1')) else: # pragma: no cover # All bytes are < 127 (e.g. ASCII) if all([char & 0x80 == 0 for char in header]): decoded_headers.append(header.decode("us-ascii")) elif isUTF8Strict(header): decoded_headers.append(header.decode("utf-8")) else: decoded_headers.append(header.decode('iso-8859-1')) return decoded_headers
python
def decode_headers(header_list): ''' Decode a list of headers. Takes a list of bytestrings, returns a list of unicode strings. The character set for each bytestring is individually decoded. ''' decoded_headers = [] for header in header_list: if cchardet: inferred = cchardet.detect(header) if inferred and inferred['confidence'] > 0.8: # print("Parsing headers!", header) decoded_headers.append(header.decode(inferred['encoding'])) else: decoded_headers.append(header.decode('iso-8859-1')) else: # pragma: no cover # All bytes are < 127 (e.g. ASCII) if all([char & 0x80 == 0 for char in header]): decoded_headers.append(header.decode("us-ascii")) elif isUTF8Strict(header): decoded_headers.append(header.decode("utf-8")) else: decoded_headers.append(header.decode('iso-8859-1')) return decoded_headers
[ "def", "decode_headers", "(", "header_list", ")", ":", "decoded_headers", "=", "[", "]", "for", "header", "in", "header_list", ":", "if", "cchardet", ":", "inferred", "=", "cchardet", ".", "detect", "(", "header", ")", "if", "inferred", "and", "inferred", ...
Decode a list of headers. Takes a list of bytestrings, returns a list of unicode strings. The character set for each bytestring is individually decoded.
[ "Decode", "a", "list", "of", "headers", "." ]
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/HeaderParseMonkeyPatch.py#L30-L56
fake-name/WebRequest
WebRequest/HeaderParseMonkeyPatch.py
parse_headers
def parse_headers(fp, _class=http.client.HTTPMessage): """Parses only RFC2822 headers from a file pointer. email Parser wants to see strings rather than bytes. But a TextIOWrapper around self.rfile would buffer too many bytes from the stream, bytes which we later need to read as bytes. So we read the correct bytes here, as bytes, for email Parser to parse. Note: Monkey-patched version to try to more intelligently determine header encoding """ headers = [] while True: line = fp.readline(http.client._MAXLINE + 1) if len(line) > http.client._MAXLINE: raise http.client.LineTooLong("header line") headers.append(line) if len(headers) > http.client._MAXHEADERS: raise HTTPException("got more than %d headers" % http.client._MAXHEADERS) if line in (b'\r\n', b'\n', b''): break decoded_headers = decode_headers(headers) hstring = ''.join(decoded_headers) return email.parser.Parser(_class=_class).parsestr(hstring)
python
def parse_headers(fp, _class=http.client.HTTPMessage): """Parses only RFC2822 headers from a file pointer. email Parser wants to see strings rather than bytes. But a TextIOWrapper around self.rfile would buffer too many bytes from the stream, bytes which we later need to read as bytes. So we read the correct bytes here, as bytes, for email Parser to parse. Note: Monkey-patched version to try to more intelligently determine header encoding """ headers = [] while True: line = fp.readline(http.client._MAXLINE + 1) if len(line) > http.client._MAXLINE: raise http.client.LineTooLong("header line") headers.append(line) if len(headers) > http.client._MAXHEADERS: raise HTTPException("got more than %d headers" % http.client._MAXHEADERS) if line in (b'\r\n', b'\n', b''): break decoded_headers = decode_headers(headers) hstring = ''.join(decoded_headers) return email.parser.Parser(_class=_class).parsestr(hstring)
[ "def", "parse_headers", "(", "fp", ",", "_class", "=", "http", ".", "client", ".", "HTTPMessage", ")", ":", "headers", "=", "[", "]", "while", "True", ":", "line", "=", "fp", ".", "readline", "(", "http", ".", "client", ".", "_MAXLINE", "+", "1", "...
Parses only RFC2822 headers from a file pointer. email Parser wants to see strings rather than bytes. But a TextIOWrapper around self.rfile would buffer too many bytes from the stream, bytes which we later need to read as bytes. So we read the correct bytes here, as bytes, for email Parser to parse. Note: Monkey-patched version to try to more intelligently determine header encoding
[ "Parses", "only", "RFC2822", "headers", "from", "a", "file", "pointer", "." ]
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/HeaderParseMonkeyPatch.py#L59-L87
cs50/lib50
lib50/_api.py
push
def push(tool, slug, config_loader, prompt=lambda included, excluded: True): """ Push to github.com/org/repo=username/slug if tool exists. Returns username, commit hash """ check_dependencies() org, (included, excluded) = connect(slug, config_loader) with authenticate(org) as user, prepare(tool, slug, user, included): if prompt(included, excluded): return upload(slug, user, tool) else: raise Error(_("No files were submitted."))
python
def push(tool, slug, config_loader, prompt=lambda included, excluded: True): """ Push to github.com/org/repo=username/slug if tool exists. Returns username, commit hash """ check_dependencies() org, (included, excluded) = connect(slug, config_loader) with authenticate(org) as user, prepare(tool, slug, user, included): if prompt(included, excluded): return upload(slug, user, tool) else: raise Error(_("No files were submitted."))
[ "def", "push", "(", "tool", ",", "slug", ",", "config_loader", ",", "prompt", "=", "lambda", "included", ",", "excluded", ":", "True", ")", ":", "check_dependencies", "(", ")", "org", ",", "(", "included", ",", "excluded", ")", "=", "connect", "(", "sl...
Push to github.com/org/repo=username/slug if tool exists. Returns username, commit hash
[ "Push", "to", "github", ".", "com", "/", "org", "/", "repo", "=", "username", "/", "slug", "if", "tool", "exists", ".", "Returns", "username", "commit", "hash" ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L42-L55
cs50/lib50
lib50/_api.py
local
def local(tool, slug, config_loader, offline=False): """ Create/update local copy of github.com/org/repo/branch. Returns path to local copy """ # Parse slug slug = Slug(slug, offline=offline) local_path = Path(LOCAL_PATH).expanduser() / slug.org / slug.repo git = Git(f"-C {shlex.quote(str(local_path))}") if not local_path.exists(): _run(Git()(f"init {shlex.quote(str(local_path))}")) _run(git(f"remote add origin https://github.com/{slug.org}/{slug.repo}")) if not offline: # Get latest version of checks _run(git(f"fetch origin {slug.branch}")) # Ensure that local copy of the repo is identical to remote copy _run(git(f"checkout -f -B {slug.branch} origin/{slug.branch}")) _run(git(f"reset --hard HEAD")) problem_path = (local_path / slug.problem).absolute() if not problem_path.exists(): raise InvalidSlugError(_("{} does not exist at {}/{}").format(slug.problem, slug.org, slug.repo)) # Get config try: with open(problem_path / ".cs50.yaml") as f: try: config = config_loader.load(f.read()) except InvalidConfigError: raise InvalidSlugError( _("Invalid slug for {}. Did you mean something else?").format(tool)) except FileNotFoundError: raise InvalidSlugError(_("Invalid slug. Did you mean something else?")) return problem_path
python
def local(tool, slug, config_loader, offline=False): """ Create/update local copy of github.com/org/repo/branch. Returns path to local copy """ # Parse slug slug = Slug(slug, offline=offline) local_path = Path(LOCAL_PATH).expanduser() / slug.org / slug.repo git = Git(f"-C {shlex.quote(str(local_path))}") if not local_path.exists(): _run(Git()(f"init {shlex.quote(str(local_path))}")) _run(git(f"remote add origin https://github.com/{slug.org}/{slug.repo}")) if not offline: # Get latest version of checks _run(git(f"fetch origin {slug.branch}")) # Ensure that local copy of the repo is identical to remote copy _run(git(f"checkout -f -B {slug.branch} origin/{slug.branch}")) _run(git(f"reset --hard HEAD")) problem_path = (local_path / slug.problem).absolute() if not problem_path.exists(): raise InvalidSlugError(_("{} does not exist at {}/{}").format(slug.problem, slug.org, slug.repo)) # Get config try: with open(problem_path / ".cs50.yaml") as f: try: config = config_loader.load(f.read()) except InvalidConfigError: raise InvalidSlugError( _("Invalid slug for {}. Did you mean something else?").format(tool)) except FileNotFoundError: raise InvalidSlugError(_("Invalid slug. Did you mean something else?")) return problem_path
[ "def", "local", "(", "tool", ",", "slug", ",", "config_loader", ",", "offline", "=", "False", ")", ":", "# Parse slug", "slug", "=", "Slug", "(", "slug", ",", "offline", "=", "offline", ")", "local_path", "=", "Path", "(", "LOCAL_PATH", ")", ".", "expa...
Create/update local copy of github.com/org/repo/branch. Returns path to local copy
[ "Create", "/", "update", "local", "copy", "of", "github", ".", "com", "/", "org", "/", "repo", "/", "branch", ".", "Returns", "path", "to", "local", "copy" ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L58-L97
cs50/lib50
lib50/_api.py
working_area
def working_area(files, name=""): """ Copy all files to a temporary directory (the working area) Optionally names the working area name Returns path to the working area """ with tempfile.TemporaryDirectory() as dir: dir = Path(Path(dir) / name) dir.mkdir(exist_ok=True) for f in files: dest = (dir / f).absolute() dest.parent.mkdir(parents=True, exist_ok=True) shutil.copy(f, dest) yield dir
python
def working_area(files, name=""): """ Copy all files to a temporary directory (the working area) Optionally names the working area name Returns path to the working area """ with tempfile.TemporaryDirectory() as dir: dir = Path(Path(dir) / name) dir.mkdir(exist_ok=True) for f in files: dest = (dir / f).absolute() dest.parent.mkdir(parents=True, exist_ok=True) shutil.copy(f, dest) yield dir
[ "def", "working_area", "(", "files", ",", "name", "=", "\"\"", ")", ":", "with", "tempfile", ".", "TemporaryDirectory", "(", ")", "as", "dir", ":", "dir", "=", "Path", "(", "Path", "(", "dir", ")", "/", "name", ")", "dir", ".", "mkdir", "(", "exist...
Copy all files to a temporary directory (the working area) Optionally names the working area name Returns path to the working area
[ "Copy", "all", "files", "to", "a", "temporary", "directory", "(", "the", "working", "area", ")", "Optionally", "names", "the", "working", "area", "name", "Returns", "path", "to", "the", "working", "area" ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L101-L115
cs50/lib50
lib50/_api.py
cd
def cd(dest): """ Temporarily cd into a directory""" origin = os.getcwd() try: os.chdir(dest) yield dest finally: os.chdir(origin)
python
def cd(dest): """ Temporarily cd into a directory""" origin = os.getcwd() try: os.chdir(dest) yield dest finally: os.chdir(origin)
[ "def", "cd", "(", "dest", ")", ":", "origin", "=", "os", ".", "getcwd", "(", ")", "try", ":", "os", ".", "chdir", "(", "dest", ")", "yield", "dest", "finally", ":", "os", ".", "chdir", "(", "origin", ")" ]
Temporarily cd into a directory
[ "Temporarily", "cd", "into", "a", "directory" ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L119-L126
cs50/lib50
lib50/_api.py
files
def files(patterns, require_tags=("require",), include_tags=("include",), exclude_tags=("exclude",), root=".", always_exclude=("**/.git*", "**/.lfs*", "**/.c9*", "**/.~c9*")): """ Takes a list of lib50._config.TaggedValue returns which files should be included and excluded from `root`. Any pattern tagged with a tag from include_tags will be included from require_tags can only be a file, that will then be included. MissingFilesError is raised if missing from exclude_tags will be excluded Any pattern in always_exclude will always be excluded. """ require_tags = list(require_tags) include_tags = list(include_tags) exclude_tags = list(exclude_tags) # Ensure every tag starts with ! for tags in [require_tags, include_tags, exclude_tags]: for i, tag in enumerate(tags): tags[i] = tag if tag.startswith("!") else "!" + tag with cd(root): # Include everything by default included = _glob("*") excluded = set() if patterns: missing_files = [] # Per line in files for pattern in patterns: # Include all files that are tagged with !require if pattern.tag in require_tags: file = str(Path(pattern.value)) if not Path(file).exists(): missing_files.append(file) else: try: excluded.remove(file) except KeyError: pass else: included.add(file) # Include all files that are tagged with !include elif pattern.tag in include_tags: new_included = _glob(pattern.value) excluded -= new_included included.update(new_included) # Exclude all files that are tagged with !exclude elif pattern.tag in exclude_tags: new_excluded = _glob(pattern.value) included -= new_excluded excluded.update(new_excluded) if missing_files: raise MissingFilesError(missing_files) # Exclude all files that match a pattern from always_exclude for line in always_exclude: included -= _glob(line) # Exclude any files that are not valid utf8 invalid = set() for file in included: try: file.encode("utf8") except UnicodeEncodeError: excluded.add(file.encode("utf8", "replace").decode()) invalid.add(file) included -= invalid return included, excluded
python
def files(patterns, require_tags=("require",), include_tags=("include",), exclude_tags=("exclude",), root=".", always_exclude=("**/.git*", "**/.lfs*", "**/.c9*", "**/.~c9*")): """ Takes a list of lib50._config.TaggedValue returns which files should be included and excluded from `root`. Any pattern tagged with a tag from include_tags will be included from require_tags can only be a file, that will then be included. MissingFilesError is raised if missing from exclude_tags will be excluded Any pattern in always_exclude will always be excluded. """ require_tags = list(require_tags) include_tags = list(include_tags) exclude_tags = list(exclude_tags) # Ensure every tag starts with ! for tags in [require_tags, include_tags, exclude_tags]: for i, tag in enumerate(tags): tags[i] = tag if tag.startswith("!") else "!" + tag with cd(root): # Include everything by default included = _glob("*") excluded = set() if patterns: missing_files = [] # Per line in files for pattern in patterns: # Include all files that are tagged with !require if pattern.tag in require_tags: file = str(Path(pattern.value)) if not Path(file).exists(): missing_files.append(file) else: try: excluded.remove(file) except KeyError: pass else: included.add(file) # Include all files that are tagged with !include elif pattern.tag in include_tags: new_included = _glob(pattern.value) excluded -= new_included included.update(new_included) # Exclude all files that are tagged with !exclude elif pattern.tag in exclude_tags: new_excluded = _glob(pattern.value) included -= new_excluded excluded.update(new_excluded) if missing_files: raise MissingFilesError(missing_files) # Exclude all files that match a pattern from always_exclude for line in always_exclude: included -= _glob(line) # Exclude any files that are not valid utf8 invalid = set() for file in included: try: file.encode("utf8") except UnicodeEncodeError: excluded.add(file.encode("utf8", "replace").decode()) invalid.add(file) included -= invalid return included, excluded
[ "def", "files", "(", "patterns", ",", "require_tags", "=", "(", "\"require\"", ",", ")", ",", "include_tags", "=", "(", "\"include\"", ",", ")", ",", "exclude_tags", "=", "(", "\"exclude\"", ",", ")", ",", "root", "=", "\".\"", ",", "always_exclude", "="...
Takes a list of lib50._config.TaggedValue returns which files should be included and excluded from `root`. Any pattern tagged with a tag from include_tags will be included from require_tags can only be a file, that will then be included. MissingFilesError is raised if missing from exclude_tags will be excluded Any pattern in always_exclude will always be excluded.
[ "Takes", "a", "list", "of", "lib50", ".", "_config", ".", "TaggedValue", "returns", "which", "files", "should", "be", "included", "and", "excluded", "from", "root", ".", "Any", "pattern", "tagged", "with", "a", "tag", "from", "include_tags", "will", "be", ...
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L129-L202
cs50/lib50
lib50/_api.py
connect
def connect(slug, config_loader): """ Ensure .cs50.yaml and tool key exists, raises Error otherwise Check that all required files as per .cs50.yaml are present Returns tool specific portion of .cs50.yaml """ with ProgressBar(_("Connecting")): # Parse slug slug = Slug(slug) # Get .cs50.yaml try: config = config_loader.load(_get_content(slug.org, slug.repo, slug.branch, slug.problem / ".cs50.yaml")) except InvalidConfigError: raise InvalidSlugError(_("Invalid slug for {}. Did you mean something else?").format(config_loader.tool)) print("WTF!!!!", config) if not config: raise InvalidSlugError(_("Invalid slug for {}. Did you mean something else?").format(config_loader.tool)) # If config of tool is just a truthy value, config should be empty if not isinstance(config, dict): config = {} org = config.get("org", config_loader.tool) included, excluded = files(config.get("files")) # Check that at least 1 file is staged if not included: raise Error(_("No files in this directory are expected for submission.")) return org, (included, excluded)
python
def connect(slug, config_loader): """ Ensure .cs50.yaml and tool key exists, raises Error otherwise Check that all required files as per .cs50.yaml are present Returns tool specific portion of .cs50.yaml """ with ProgressBar(_("Connecting")): # Parse slug slug = Slug(slug) # Get .cs50.yaml try: config = config_loader.load(_get_content(slug.org, slug.repo, slug.branch, slug.problem / ".cs50.yaml")) except InvalidConfigError: raise InvalidSlugError(_("Invalid slug for {}. Did you mean something else?").format(config_loader.tool)) print("WTF!!!!", config) if not config: raise InvalidSlugError(_("Invalid slug for {}. Did you mean something else?").format(config_loader.tool)) # If config of tool is just a truthy value, config should be empty if not isinstance(config, dict): config = {} org = config.get("org", config_loader.tool) included, excluded = files(config.get("files")) # Check that at least 1 file is staged if not included: raise Error(_("No files in this directory are expected for submission.")) return org, (included, excluded)
[ "def", "connect", "(", "slug", ",", "config_loader", ")", ":", "with", "ProgressBar", "(", "_", "(", "\"Connecting\"", ")", ")", ":", "# Parse slug", "slug", "=", "Slug", "(", "slug", ")", "# Get .cs50.yaml", "try", ":", "config", "=", "config_loader", "."...
Ensure .cs50.yaml and tool key exists, raises Error otherwise Check that all required files as per .cs50.yaml are present Returns tool specific portion of .cs50.yaml
[ "Ensure", ".", "cs50", ".", "yaml", "and", "tool", "key", "exists", "raises", "Error", "otherwise", "Check", "that", "all", "required", "files", "as", "per", ".", "cs50", ".", "yaml", "are", "present", "Returns", "tool", "specific", "portion", "of", ".", ...
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L205-L238
cs50/lib50
lib50/_api.py
authenticate
def authenticate(org): """ Authenticate with GitHub via SSH if possible Otherwise authenticate via HTTPS Returns an authenticated User """ with ProgressBar(_("Authenticating")) as progress_bar: user = _authenticate_ssh(org) progress_bar.stop() if user is None: # SSH auth failed, fallback to HTTPS with _authenticate_https(org) as user: yield user else: yield user
python
def authenticate(org): """ Authenticate with GitHub via SSH if possible Otherwise authenticate via HTTPS Returns an authenticated User """ with ProgressBar(_("Authenticating")) as progress_bar: user = _authenticate_ssh(org) progress_bar.stop() if user is None: # SSH auth failed, fallback to HTTPS with _authenticate_https(org) as user: yield user else: yield user
[ "def", "authenticate", "(", "org", ")", ":", "with", "ProgressBar", "(", "_", "(", "\"Authenticating\"", ")", ")", "as", "progress_bar", ":", "user", "=", "_authenticate_ssh", "(", "org", ")", "progress_bar", ".", "stop", "(", ")", "if", "user", "is", "N...
Authenticate with GitHub via SSH if possible Otherwise authenticate via HTTPS Returns an authenticated User
[ "Authenticate", "with", "GitHub", "via", "SSH", "if", "possible", "Otherwise", "authenticate", "via", "HTTPS", "Returns", "an", "authenticated", "User" ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L242-L256
cs50/lib50
lib50/_api.py
prepare
def prepare(tool, branch, user, included): """ Prepare git for pushing Check that there are no permission errors Add necessities to git config Stage files Stage files via lfs if necessary Check that atleast one file is staged """ with ProgressBar(_("Preparing")) as progress_bar, working_area(included) as area: Git.working_area = f"-C {area}" git = Git(Git.working_area) # Clone just .git folder try: _run(git.set(Git.cache)(f"clone --bare {user.repo} .git")) except Error: raise Error(_("Looks like {} isn't enabled for your account yet. " "Go to https://cs50.me/authorize and make sure you accept any pending invitations!".format(tool))) _run(git("config --bool core.bare false")) _run(git(f"config --path core.worktree {area}")) try: _run(git("checkout --force {} .gitattributes".format(branch))) except Error: pass # Set user name/email in repo config _run(git(f"config user.email {shlex.quote(user.email)}")) _run(git(f"config user.name {shlex.quote(user.name)}")) # Switch to branch without checkout _run(git(f"symbolic-ref HEAD refs/heads/{branch}")) # Git add all included files for f in included: _run(git(f"add {f}")) # Remove gitattributes from included if Path(".gitattributes").exists() and ".gitattributes" in included: included.remove(".gitattributes") # Add any oversized files through git-lfs _lfs_add(included, git) progress_bar.stop() yield
python
def prepare(tool, branch, user, included): """ Prepare git for pushing Check that there are no permission errors Add necessities to git config Stage files Stage files via lfs if necessary Check that atleast one file is staged """ with ProgressBar(_("Preparing")) as progress_bar, working_area(included) as area: Git.working_area = f"-C {area}" git = Git(Git.working_area) # Clone just .git folder try: _run(git.set(Git.cache)(f"clone --bare {user.repo} .git")) except Error: raise Error(_("Looks like {} isn't enabled for your account yet. " "Go to https://cs50.me/authorize and make sure you accept any pending invitations!".format(tool))) _run(git("config --bool core.bare false")) _run(git(f"config --path core.worktree {area}")) try: _run(git("checkout --force {} .gitattributes".format(branch))) except Error: pass # Set user name/email in repo config _run(git(f"config user.email {shlex.quote(user.email)}")) _run(git(f"config user.name {shlex.quote(user.name)}")) # Switch to branch without checkout _run(git(f"symbolic-ref HEAD refs/heads/{branch}")) # Git add all included files for f in included: _run(git(f"add {f}")) # Remove gitattributes from included if Path(".gitattributes").exists() and ".gitattributes" in included: included.remove(".gitattributes") # Add any oversized files through git-lfs _lfs_add(included, git) progress_bar.stop() yield
[ "def", "prepare", "(", "tool", ",", "branch", ",", "user", ",", "included", ")", ":", "with", "ProgressBar", "(", "_", "(", "\"Preparing\"", ")", ")", "as", "progress_bar", ",", "working_area", "(", "included", ")", "as", "area", ":", "Git", ".", "work...
Prepare git for pushing Check that there are no permission errors Add necessities to git config Stage files Stage files via lfs if necessary Check that atleast one file is staged
[ "Prepare", "git", "for", "pushing", "Check", "that", "there", "are", "no", "permission", "errors", "Add", "necessities", "to", "git", "config", "Stage", "files", "Stage", "files", "via", "lfs", "if", "necessary", "Check", "that", "atleast", "one", "file", "i...
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L260-L306
cs50/lib50
lib50/_api.py
upload
def upload(branch, user, tool): """ Commit + push to branch Returns username, commit hash """ with ProgressBar(_("Uploading")): language = os.environ.get("LANGUAGE") commit_message = [_("automated commit by {}").format(tool)] # If LANGUAGE environment variable is set, we need to communicate # this to any remote tool via the commit message. if language: commit_message.append(f"[{language}]") commit_message = " ".join(commit_message) # Commit + push git = Git(Git.working_area) _run(git(f"commit -m {shlex.quote(commit_message)} --allow-empty")) _run(git.set(Git.cache)(f"push origin {branch}")) commit_hash = _run(git("rev-parse HEAD")) return user.name, commit_hash
python
def upload(branch, user, tool): """ Commit + push to branch Returns username, commit hash """ with ProgressBar(_("Uploading")): language = os.environ.get("LANGUAGE") commit_message = [_("automated commit by {}").format(tool)] # If LANGUAGE environment variable is set, we need to communicate # this to any remote tool via the commit message. if language: commit_message.append(f"[{language}]") commit_message = " ".join(commit_message) # Commit + push git = Git(Git.working_area) _run(git(f"commit -m {shlex.quote(commit_message)} --allow-empty")) _run(git.set(Git.cache)(f"push origin {branch}")) commit_hash = _run(git("rev-parse HEAD")) return user.name, commit_hash
[ "def", "upload", "(", "branch", ",", "user", ",", "tool", ")", ":", "with", "ProgressBar", "(", "_", "(", "\"Uploading\"", ")", ")", ":", "language", "=", "os", ".", "environ", ".", "get", "(", "\"LANGUAGE\"", ")", "commit_message", "=", "[", "_", "(...
Commit + push to branch Returns username, commit hash
[ "Commit", "+", "push", "to", "branch", "Returns", "username", "commit", "hash" ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L309-L330
cs50/lib50
lib50/_api.py
check_dependencies
def check_dependencies(): """ Check that dependencies are installed: - require git 2.7+, so that credential-cache--daemon ignores SIGHUP https://github.com/git/git/blob/v2.7.0/credential-cache--daemon.c """ # Check that git is installed if not shutil.which("git"): raise Error(_("You don't have git. Install git, then re-run!")) # Check that git --version > 2.7 version = subprocess.check_output(["git", "--version"]).decode("utf-8") matches = re.search(r"^git version (\d+\.\d+\.\d+).*$", version) if not matches or pkg_resources.parse_version(matches.group(1)) < pkg_resources.parse_version("2.7.0"): raise Error(_("You have an old version of git. Install version 2.7 or later, then re-run!"))
python
def check_dependencies(): """ Check that dependencies are installed: - require git 2.7+, so that credential-cache--daemon ignores SIGHUP https://github.com/git/git/blob/v2.7.0/credential-cache--daemon.c """ # Check that git is installed if not shutil.which("git"): raise Error(_("You don't have git. Install git, then re-run!")) # Check that git --version > 2.7 version = subprocess.check_output(["git", "--version"]).decode("utf-8") matches = re.search(r"^git version (\d+\.\d+\.\d+).*$", version) if not matches or pkg_resources.parse_version(matches.group(1)) < pkg_resources.parse_version("2.7.0"): raise Error(_("You have an old version of git. Install version 2.7 or later, then re-run!"))
[ "def", "check_dependencies", "(", ")", ":", "# Check that git is installed", "if", "not", "shutil", ".", "which", "(", "\"git\"", ")", ":", "raise", "Error", "(", "_", "(", "\"You don't have git. Install git, then re-run!\"", ")", ")", "# Check that git --version > 2.7"...
Check that dependencies are installed: - require git 2.7+, so that credential-cache--daemon ignores SIGHUP https://github.com/git/git/blob/v2.7.0/credential-cache--daemon.c
[ "Check", "that", "dependencies", "are", "installed", ":", "-", "require", "git", "2", ".", "7", "+", "so", "that", "credential", "-", "cache", "--", "daemon", "ignores", "SIGHUP", "https", ":", "//", "github", ".", "com", "/", "git", "/", "git", "/", ...
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L333-L348
cs50/lib50
lib50/_api.py
_run
def _run(command, quiet=False, timeout=None): """Run a command, returns command output.""" try: with _spawn(command, quiet, timeout) as child: command_output = child.read().strip().replace("\r\n", "\n") except pexpect.TIMEOUT: logger.info(f"command {command} timed out") raise Error() return command_output
python
def _run(command, quiet=False, timeout=None): """Run a command, returns command output.""" try: with _spawn(command, quiet, timeout) as child: command_output = child.read().strip().replace("\r\n", "\n") except pexpect.TIMEOUT: logger.info(f"command {command} timed out") raise Error() return command_output
[ "def", "_run", "(", "command", ",", "quiet", "=", "False", ",", "timeout", "=", "None", ")", ":", "try", ":", "with", "_spawn", "(", "command", ",", "quiet", ",", "timeout", ")", "as", "child", ":", "command_output", "=", "child", ".", "read", "(", ...
Run a command, returns command output.
[ "Run", "a", "command", "returns", "command", "output", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L526-L535
cs50/lib50
lib50/_api.py
_glob
def _glob(pattern, skip_dirs=False): """Glob pattern, expand directories, return all files that matched.""" # Implicit recursive iff no / in pattern and starts with * if "/" not in pattern and pattern.startswith("*"): files = glob.glob(f"**/{pattern}", recursive=True) else: files = glob.glob(pattern, recursive=True) # Expand dirs all_files = set() for file in files: if os.path.isdir(file) and not skip_dirs: all_files.update(set(f for f in _glob(f"{file}/**/*", skip_dirs=True) if not os.path.isdir(f))) else: all_files.add(file) # Normalize all files return {str(Path(f)) for f in all_files}
python
def _glob(pattern, skip_dirs=False): """Glob pattern, expand directories, return all files that matched.""" # Implicit recursive iff no / in pattern and starts with * if "/" not in pattern and pattern.startswith("*"): files = glob.glob(f"**/{pattern}", recursive=True) else: files = glob.glob(pattern, recursive=True) # Expand dirs all_files = set() for file in files: if os.path.isdir(file) and not skip_dirs: all_files.update(set(f for f in _glob(f"{file}/**/*", skip_dirs=True) if not os.path.isdir(f))) else: all_files.add(file) # Normalize all files return {str(Path(f)) for f in all_files}
[ "def", "_glob", "(", "pattern", ",", "skip_dirs", "=", "False", ")", ":", "# Implicit recursive iff no / in pattern and starts with *", "if", "\"/\"", "not", "in", "pattern", "and", "pattern", ".", "startswith", "(", "\"*\"", ")", ":", "files", "=", "glob", ".",...
Glob pattern, expand directories, return all files that matched.
[ "Glob", "pattern", "expand", "directories", "return", "all", "files", "that", "matched", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L538-L555
cs50/lib50
lib50/_api.py
_get_content
def _get_content(org, repo, branch, filepath): """Get all content from org/repo/branch/filepath at GitHub.""" url = "https://github.com/{}/{}/raw/{}/{}".format(org, repo, branch, filepath) r = requests.get(url) if not r.ok: if r.status_code == 404: raise InvalidSlugError(_("Invalid slug. Did you mean to submit something else?")) else: raise Error(_("Could not connect to GitHub.")) return r.content
python
def _get_content(org, repo, branch, filepath): """Get all content from org/repo/branch/filepath at GitHub.""" url = "https://github.com/{}/{}/raw/{}/{}".format(org, repo, branch, filepath) r = requests.get(url) if not r.ok: if r.status_code == 404: raise InvalidSlugError(_("Invalid slug. Did you mean to submit something else?")) else: raise Error(_("Could not connect to GitHub.")) return r.content
[ "def", "_get_content", "(", "org", ",", "repo", ",", "branch", ",", "filepath", ")", ":", "url", "=", "\"https://github.com/{}/{}/raw/{}/{}\"", ".", "format", "(", "org", ",", "repo", ",", "branch", ",", "filepath", ")", "r", "=", "requests", ".", "get", ...
Get all content from org/repo/branch/filepath at GitHub.
[ "Get", "all", "content", "from", "org", "/", "repo", "/", "branch", "/", "filepath", "at", "GitHub", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L558-L567
cs50/lib50
lib50/_api.py
_lfs_add
def _lfs_add(files, git): """ Add any oversized files with lfs. Throws error if a file is bigger than 2GB or git-lfs is not installed. """ # Check for large files > 100 MB (and huge files > 2 GB) # https://help.github.com/articles/conditions-for-large-files/ # https://help.github.com/articles/about-git-large-file-storage/ larges, huges = [], [] for file in files: size = os.path.getsize(file) if size > (100 * 1024 * 1024): larges.append(file) elif size > (2 * 1024 * 1024 * 1024): huges.append(file) # Raise Error if a file is >2GB if huges: raise Error(_("These files are too large to be submitted:\n{}\n" "Remove these files from your directory " "and then re-run {}!").format("\n".join(huges), org)) # Add large files (>100MB) with git-lfs if larges: # Raise Error if git-lfs not installed if not shutil.which("git-lfs"): raise Error(_("These files are too large to be submitted:\n{}\n" "Install git-lfs (or remove these files from your directory) " "and then re-run!").format("\n".join(larges))) # Install git-lfs for this repo _run(git("lfs install --local")) # For pre-push hook _run(git("config credential.helper cache")) # Rm previously added file, have lfs track file, add file again for large in larges: _run(git("rm --cached {}".format(shlex.quote(large)))) _run(git("lfs track {}".format(shlex.quote(large)))) _run(git("add {}".format(shlex.quote(large)))) _run(git("add --force .gitattributes"))
python
def _lfs_add(files, git): """ Add any oversized files with lfs. Throws error if a file is bigger than 2GB or git-lfs is not installed. """ # Check for large files > 100 MB (and huge files > 2 GB) # https://help.github.com/articles/conditions-for-large-files/ # https://help.github.com/articles/about-git-large-file-storage/ larges, huges = [], [] for file in files: size = os.path.getsize(file) if size > (100 * 1024 * 1024): larges.append(file) elif size > (2 * 1024 * 1024 * 1024): huges.append(file) # Raise Error if a file is >2GB if huges: raise Error(_("These files are too large to be submitted:\n{}\n" "Remove these files from your directory " "and then re-run {}!").format("\n".join(huges), org)) # Add large files (>100MB) with git-lfs if larges: # Raise Error if git-lfs not installed if not shutil.which("git-lfs"): raise Error(_("These files are too large to be submitted:\n{}\n" "Install git-lfs (or remove these files from your directory) " "and then re-run!").format("\n".join(larges))) # Install git-lfs for this repo _run(git("lfs install --local")) # For pre-push hook _run(git("config credential.helper cache")) # Rm previously added file, have lfs track file, add file again for large in larges: _run(git("rm --cached {}".format(shlex.quote(large)))) _run(git("lfs track {}".format(shlex.quote(large)))) _run(git("add {}".format(shlex.quote(large)))) _run(git("add --force .gitattributes"))
[ "def", "_lfs_add", "(", "files", ",", "git", ")", ":", "# Check for large files > 100 MB (and huge files > 2 GB)", "# https://help.github.com/articles/conditions-for-large-files/", "# https://help.github.com/articles/about-git-large-file-storage/", "larges", ",", "huges", "=", "[", "...
Add any oversized files with lfs. Throws error if a file is bigger than 2GB or git-lfs is not installed.
[ "Add", "any", "oversized", "files", "with", "lfs", ".", "Throws", "error", "if", "a", "file", "is", "bigger", "than", "2GB", "or", "git", "-", "lfs", "is", "not", "installed", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L570-L611
cs50/lib50
lib50/_api.py
_authenticate_ssh
def _authenticate_ssh(org): """Try authenticating via ssh, if succesful yields a User, otherwise raises Error.""" # Try to get username from git config username = os.environ.get(f"{org.upper()}_USERNAME") # Require ssh-agent child = pexpect.spawn("ssh -T git@github.com", encoding="utf8") # GitHub prints 'Hi {username}!...' when attempting to get shell access i = child.expect(["Hi (.+)! You've successfully authenticated", "Enter passphrase for key", "Permission denied", "Are you sure you want to continue connecting"]) child.close() if i == 0: if username is None: username = child.match.groups()[0] else: return None return User(name=username, repo=f"git@github.com:{org}/{username}")
python
def _authenticate_ssh(org): """Try authenticating via ssh, if succesful yields a User, otherwise raises Error.""" # Try to get username from git config username = os.environ.get(f"{org.upper()}_USERNAME") # Require ssh-agent child = pexpect.spawn("ssh -T git@github.com", encoding="utf8") # GitHub prints 'Hi {username}!...' when attempting to get shell access i = child.expect(["Hi (.+)! You've successfully authenticated", "Enter passphrase for key", "Permission denied", "Are you sure you want to continue connecting"]) child.close() if i == 0: if username is None: username = child.match.groups()[0] else: return None return User(name=username, repo=f"git@github.com:{org}/{username}")
[ "def", "_authenticate_ssh", "(", "org", ")", ":", "# Try to get username from git config", "username", "=", "os", ".", "environ", ".", "get", "(", "f\"{org.upper()}_USERNAME\"", ")", "# Require ssh-agent", "child", "=", "pexpect", ".", "spawn", "(", "\"ssh -T git@gith...
Try authenticating via ssh, if succesful yields a User, otherwise raises Error.
[ "Try", "authenticating", "via", "ssh", "if", "succesful", "yields", "a", "User", "otherwise", "raises", "Error", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L614-L634
cs50/lib50
lib50/_api.py
_authenticate_https
def _authenticate_https(org): """Try authenticating via HTTPS, if succesful yields User, otherwise raises Error.""" _CREDENTIAL_SOCKET.parent.mkdir(mode=0o700, exist_ok=True) try: Git.cache = f"-c credential.helper= -c credential.helper='cache --socket {_CREDENTIAL_SOCKET}'" git = Git(Git.cache) # Get credentials from cache if possible with _spawn(git("credential fill"), quiet=True) as child: child.sendline("protocol=https") child.sendline("host=github.com") child.sendline("") i = child.expect(["Username for '.+'", "Password for '.+'", "username=([^\r]+)\r\npassword=([^\r]+)\r\n"]) if i == 2: username, password = child.match.groups() else: username = password = None child.close() child.exitstatus = 0 # No credentials found, need to ask user if password is None: username = _prompt_username(_("GitHub username: ")) password = _prompt_password(_("GitHub password: ")) # Check if credentials are correct res = requests.get("https://api.github.com/user", auth=(username, password)) # Check for 2-factor authentication https://developer.github.com/v3/auth/#working-with-two-factor-authentication if "X-GitHub-OTP" in res.headers: raise Error("Looks like you have two-factor authentication enabled!" " Please generate a personal access token and use it as your password." " See https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line for more info.") if res.status_code != 200: logger.info(res.headers) logger.info(res.text) raise Error(_("Invalid username and/or password.") if res.status_code == 401 else _("Could not authenticate user.")) # Canonicalize (capitalization of) username, # Especially if user logged in via email address username = res.json()["login"] # Credentials are correct, best cache them with _spawn(git("-c credentialcache.ignoresighup=true credential approve"), quiet=True) as child: child.sendline("protocol=https") child.sendline("host=github.com") child.sendline(f"path={org}/{username}") child.sendline(f"username={username}") child.sendline(f"password={password}") child.sendline("") yield User(name=username, repo=f"https://{username}@github.com/{org}/{username}") except BaseException: # Some error occured while this context manager is active, best forget credentials. logout() raise
python
def _authenticate_https(org): """Try authenticating via HTTPS, if succesful yields User, otherwise raises Error.""" _CREDENTIAL_SOCKET.parent.mkdir(mode=0o700, exist_ok=True) try: Git.cache = f"-c credential.helper= -c credential.helper='cache --socket {_CREDENTIAL_SOCKET}'" git = Git(Git.cache) # Get credentials from cache if possible with _spawn(git("credential fill"), quiet=True) as child: child.sendline("protocol=https") child.sendline("host=github.com") child.sendline("") i = child.expect(["Username for '.+'", "Password for '.+'", "username=([^\r]+)\r\npassword=([^\r]+)\r\n"]) if i == 2: username, password = child.match.groups() else: username = password = None child.close() child.exitstatus = 0 # No credentials found, need to ask user if password is None: username = _prompt_username(_("GitHub username: ")) password = _prompt_password(_("GitHub password: ")) # Check if credentials are correct res = requests.get("https://api.github.com/user", auth=(username, password)) # Check for 2-factor authentication https://developer.github.com/v3/auth/#working-with-two-factor-authentication if "X-GitHub-OTP" in res.headers: raise Error("Looks like you have two-factor authentication enabled!" " Please generate a personal access token and use it as your password." " See https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line for more info.") if res.status_code != 200: logger.info(res.headers) logger.info(res.text) raise Error(_("Invalid username and/or password.") if res.status_code == 401 else _("Could not authenticate user.")) # Canonicalize (capitalization of) username, # Especially if user logged in via email address username = res.json()["login"] # Credentials are correct, best cache them with _spawn(git("-c credentialcache.ignoresighup=true credential approve"), quiet=True) as child: child.sendline("protocol=https") child.sendline("host=github.com") child.sendline(f"path={org}/{username}") child.sendline(f"username={username}") child.sendline(f"password={password}") child.sendline("") yield User(name=username, repo=f"https://{username}@github.com/{org}/{username}") except BaseException: # Some error occured while this context manager is active, best forget credentials. logout() raise
[ "def", "_authenticate_https", "(", "org", ")", ":", "_CREDENTIAL_SOCKET", ".", "parent", ".", "mkdir", "(", "mode", "=", "0o700", ",", "exist_ok", "=", "True", ")", "try", ":", "Git", ".", "cache", "=", "f\"-c credential.helper= -c credential.helper='cache --socke...
Try authenticating via HTTPS, if succesful yields User, otherwise raises Error.
[ "Try", "authenticating", "via", "HTTPS", "if", "succesful", "yields", "User", "otherwise", "raises", "Error", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L638-L697
cs50/lib50
lib50/_api.py
_prompt_username
def _prompt_username(prompt="Username: ", prefill=None): """Prompt the user for username.""" if prefill: readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt).strip() except EOFError: print() finally: readline.set_startup_hook()
python
def _prompt_username(prompt="Username: ", prefill=None): """Prompt the user for username.""" if prefill: readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt).strip() except EOFError: print() finally: readline.set_startup_hook()
[ "def", "_prompt_username", "(", "prompt", "=", "\"Username: \"", ",", "prefill", "=", "None", ")", ":", "if", "prefill", ":", "readline", ".", "set_startup_hook", "(", "lambda", ":", "readline", ".", "insert_text", "(", "prefill", ")", ")", "try", ":", "re...
Prompt the user for username.
[ "Prompt", "the", "user", "for", "username", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L700-L710
cs50/lib50
lib50/_api.py
_prompt_password
def _prompt_password(prompt="Password: "): """Prompt the user for password, printing asterisks for each character""" fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) tty.setraw(fd) print(prompt, end="", flush=True) password = [] try: while True: ch = sys.stdin.buffer.read(1)[0] if ch in (ord("\r"), ord("\n"), 4): # If user presses Enter or ctrl-d print("\r") break elif ch == 127: # DEL try: password.pop() except IndexError: pass else: print("\b \b", end="", flush=True) elif ch == 3: # ctrl-c print("^C", end="", flush=True) raise KeyboardInterrupt else: password.append(ch) print("*", end="", flush=True) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return bytes(password).decode()
python
def _prompt_password(prompt="Password: "): """Prompt the user for password, printing asterisks for each character""" fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) tty.setraw(fd) print(prompt, end="", flush=True) password = [] try: while True: ch = sys.stdin.buffer.read(1)[0] if ch in (ord("\r"), ord("\n"), 4): # If user presses Enter or ctrl-d print("\r") break elif ch == 127: # DEL try: password.pop() except IndexError: pass else: print("\b \b", end="", flush=True) elif ch == 3: # ctrl-c print("^C", end="", flush=True) raise KeyboardInterrupt else: password.append(ch) print("*", end="", flush=True) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return bytes(password).decode()
[ "def", "_prompt_password", "(", "prompt", "=", "\"Password: \"", ")", ":", "fd", "=", "sys", ".", "stdin", ".", "fileno", "(", ")", "old_settings", "=", "termios", ".", "tcgetattr", "(", "fd", ")", "tty", ".", "setraw", "(", "fd", ")", "print", "(", ...
Prompt the user for password, printing asterisks for each character
[ "Prompt", "the", "user", "for", "password", "printing", "asterisks", "for", "each", "character" ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L713-L743
cs50/lib50
lib50/_api.py
Slug._check_endings
def _check_endings(self): """Check begin/end of slug, raises Error if malformed.""" if self.slug.startswith("/") and self.slug.endswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the leading and trailing slashes?".format(self.slug.strip("/")))) elif self.slug.startswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the leading slash?".format(self.slug.strip("/")))) elif self.slug.endswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the trailing slash?".format(self.slug.strip("/"))))
python
def _check_endings(self): """Check begin/end of slug, raises Error if malformed.""" if self.slug.startswith("/") and self.slug.endswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the leading and trailing slashes?".format(self.slug.strip("/")))) elif self.slug.startswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the leading slash?".format(self.slug.strip("/")))) elif self.slug.endswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the trailing slash?".format(self.slug.strip("/"))))
[ "def", "_check_endings", "(", "self", ")", ":", "if", "self", ".", "slug", ".", "startswith", "(", "\"/\"", ")", "and", "self", ".", "slug", ".", "endswith", "(", "\"/\"", ")", ":", "raise", "InvalidSlugError", "(", "_", "(", "\"Invalid slug. Did you mean ...
Check begin/end of slug, raises Error if malformed.
[ "Check", "begin", "/", "end", "of", "slug", "raises", "Error", "if", "malformed", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L420-L430
cs50/lib50
lib50/_api.py
Slug._get_branches
def _get_branches(self): """Get branches from org/repo.""" if self.offline: local_path = Path(LOCAL_PATH).expanduser() / self.org / self.repo get_refs = f"git -C {shlex.quote(str(local_path))} show-ref --heads" else: get_refs = f"git ls-remote --heads https://github.com/{self.org}/{self.repo}" try: # Parse get_refs output for the actual branch names return (line.split()[1].replace("refs/heads/", "") for line in _run(get_refs, timeout=3).split("\n")) except Error: return []
python
def _get_branches(self): """Get branches from org/repo.""" if self.offline: local_path = Path(LOCAL_PATH).expanduser() / self.org / self.repo get_refs = f"git -C {shlex.quote(str(local_path))} show-ref --heads" else: get_refs = f"git ls-remote --heads https://github.com/{self.org}/{self.repo}" try: # Parse get_refs output for the actual branch names return (line.split()[1].replace("refs/heads/", "") for line in _run(get_refs, timeout=3).split("\n")) except Error: return []
[ "def", "_get_branches", "(", "self", ")", ":", "if", "self", ".", "offline", ":", "local_path", "=", "Path", "(", "LOCAL_PATH", ")", ".", "expanduser", "(", ")", "/", "self", ".", "org", "/", "self", ".", "repo", "get_refs", "=", "f\"git -C {shlex.quote(...
Get branches from org/repo.
[ "Get", "branches", "from", "org", "/", "repo", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L432-L443
cs50/lib50
lib50/_api.py
ProgressBar.stop
def stop(self): """Stop the progress bar.""" if self._progressing: self._progressing = False self._thread.join()
python
def stop(self): """Stop the progress bar.""" if self._progressing: self._progressing = False self._thread.join()
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_progressing", ":", "self", ".", "_progressing", "=", "False", "self", ".", "_thread", ".", "join", "(", ")" ]
Stop the progress bar.
[ "Stop", "the", "progress", "bar", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L456-L460
jjjake/iamine
iamine/urls.py
make_url
def make_url(path, protocol=None, hosts=None): """Make an URL given a path, and optionally, a protocol and set of hosts to select from randomly. :param path: The Archive.org path. :type path: str :param protocol: (optional) The HTTP protocol to use. "https://" is used by default. :type protocol: str :param hosts: (optional) A set of hosts. A host will be chosen at random. The default host is "archive.org". :type hosts: iterable :rtype: str :returns: An Absolute URI. """ protocol = 'https://' if not protocol else protocol host = hosts[random.randrange(len(hosts))] if hosts else 'archive.org' return protocol + host + path.strip()
python
def make_url(path, protocol=None, hosts=None): """Make an URL given a path, and optionally, a protocol and set of hosts to select from randomly. :param path: The Archive.org path. :type path: str :param protocol: (optional) The HTTP protocol to use. "https://" is used by default. :type protocol: str :param hosts: (optional) A set of hosts. A host will be chosen at random. The default host is "archive.org". :type hosts: iterable :rtype: str :returns: An Absolute URI. """ protocol = 'https://' if not protocol else protocol host = hosts[random.randrange(len(hosts))] if hosts else 'archive.org' return protocol + host + path.strip()
[ "def", "make_url", "(", "path", ",", "protocol", "=", "None", ",", "hosts", "=", "None", ")", ":", "protocol", "=", "'https://'", "if", "not", "protocol", "else", "protocol", "host", "=", "hosts", "[", "random", ".", "randrange", "(", "len", "(", "host...
Make an URL given a path, and optionally, a protocol and set of hosts to select from randomly. :param path: The Archive.org path. :type path: str :param protocol: (optional) The HTTP protocol to use. "https://" is used by default. :type protocol: str :param hosts: (optional) A set of hosts. A host will be chosen at random. The default host is "archive.org". :type hosts: iterable :rtype: str :returns: An Absolute URI.
[ "Make", "an", "URL", "given", "a", "path", "and", "optionally", "a", "protocol", "and", "set", "of", "hosts", "to", "select", "from", "randomly", "." ]
train
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/urls.py#L4-L24
jjjake/iamine
iamine/urls.py
metadata_urls
def metadata_urls(identifiers, protocol=None, hosts=None): """An Archive.org metadata URL generator. :param identifiers: A set of Archive.org identifiers for which to make metadata URLs. :type identifiers: iterable :param protocol: (optional) The HTTP protocol to use. "https://" is used by default. :type protocol: str :param hosts: (optional) A set of hosts. A host will be chosen at random. The default host is "archive.org". :type hosts: iterable :returns: A generator yielding Archive.org metadata URLs. """ for identifier in identifiers: path = '/metadata/{}'.format(identifier) url = make_url(path, protocol, hosts) yield url
python
def metadata_urls(identifiers, protocol=None, hosts=None): """An Archive.org metadata URL generator. :param identifiers: A set of Archive.org identifiers for which to make metadata URLs. :type identifiers: iterable :param protocol: (optional) The HTTP protocol to use. "https://" is used by default. :type protocol: str :param hosts: (optional) A set of hosts. A host will be chosen at random. The default host is "archive.org". :type hosts: iterable :returns: A generator yielding Archive.org metadata URLs. """ for identifier in identifiers: path = '/metadata/{}'.format(identifier) url = make_url(path, protocol, hosts) yield url
[ "def", "metadata_urls", "(", "identifiers", ",", "protocol", "=", "None", ",", "hosts", "=", "None", ")", ":", "for", "identifier", "in", "identifiers", ":", "path", "=", "'/metadata/{}'", ".", "format", "(", "identifier", ")", "url", "=", "make_url", "(",...
An Archive.org metadata URL generator. :param identifiers: A set of Archive.org identifiers for which to make metadata URLs. :type identifiers: iterable :param protocol: (optional) The HTTP protocol to use. "https://" is used by default. :type protocol: str :param hosts: (optional) A set of hosts. A host will be chosen at random. The default host is "archive.org". :type hosts: iterable :returns: A generator yielding Archive.org metadata URLs.
[ "An", "Archive", ".", "org", "metadata", "URL", "generator", "." ]
train
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/urls.py#L27-L47
rapidpro/dash
dash/context_processors.py
lang_direction
def lang_direction(request): """ Sets lang_direction context variable to whether the language is RTL or LTR """ if lang_direction.rtl_langs is None: lang_direction.rtl_langs = getattr(settings, "RTL_LANGUAGES", set()) return {"lang_direction": "rtl" if request.LANGUAGE_CODE in lang_direction.rtl_langs else "ltr"}
python
def lang_direction(request): """ Sets lang_direction context variable to whether the language is RTL or LTR """ if lang_direction.rtl_langs is None: lang_direction.rtl_langs = getattr(settings, "RTL_LANGUAGES", set()) return {"lang_direction": "rtl" if request.LANGUAGE_CODE in lang_direction.rtl_langs else "ltr"}
[ "def", "lang_direction", "(", "request", ")", ":", "if", "lang_direction", ".", "rtl_langs", "is", "None", ":", "lang_direction", ".", "rtl_langs", "=", "getattr", "(", "settings", ",", "\"RTL_LANGUAGES\"", ",", "set", "(", ")", ")", "return", "{", "\"lang_d...
Sets lang_direction context variable to whether the language is RTL or LTR
[ "Sets", "lang_direction", "context", "variable", "to", "whether", "the", "language", "is", "RTL", "or", "LTR" ]
train
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/context_processors.py#L4-L11
celiao/rtsimple
rtsimple/lists.py
Lists.lists
def lists(self, **kwargs): """Gets the top-level lists available from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('lists') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def lists(self, **kwargs): """Gets the top-level lists available from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('lists') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "lists", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'lists'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", "...
Gets the top-level lists available from the API. Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "top", "-", "level", "lists", "available", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L34-L44
celiao/rtsimple
rtsimple/lists.py
Lists.movie_lists
def movie_lists(self, **kwargs): """Gets the movie lists available from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movie_lists') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def movie_lists(self, **kwargs): """Gets the movie lists available from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movie_lists') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "movie_lists", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'movie_lists'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "re...
Gets the movie lists available from the API. Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "movie", "lists", "available", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L46-L56
celiao/rtsimple
rtsimple/lists.py
Lists.movies_box_office
def movies_box_office(self, **kwargs): """Gets the top box office earning movies from the API. Sorted by most recent weekend gross ticket sales. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_box_office') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def movies_box_office(self, **kwargs): """Gets the top box office earning movies from the API. Sorted by most recent weekend gross ticket sales. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_box_office') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "movies_box_office", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'movies_box_office'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", ...
Gets the top box office earning movies from the API. Sorted by most recent weekend gross ticket sales. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "top", "box", "office", "earning", "movies", "from", "the", "API", ".", "Sorted", "by", "most", "recent", "weekend", "gross", "ticket", "sales", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L58-L73
celiao/rtsimple
rtsimple/lists.py
Lists.movies_in_theaters
def movies_in_theaters(self, **kwargs): """Gets the movies currently in theaters from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_in_theaters') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def movies_in_theaters(self, **kwargs): """Gets the movies currently in theaters from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_in_theaters') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "movies_in_theaters", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'movies_in_theaters'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values"...
Gets the movies currently in theaters from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "movies", "currently", "in", "theaters", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L75-L90
celiao/rtsimple
rtsimple/lists.py
Lists.movies_opening
def movies_opening(self, **kwargs): """Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_opening') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def movies_opening(self, **kwargs): """Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_opening') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "movies_opening", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'movies_opening'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(",...
Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "current", "opening", "movies", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L92-L106
celiao/rtsimple
rtsimple/lists.py
Lists.movies_upcoming
def movies_upcoming(self, **kwargs): """Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_upcoming') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def movies_upcoming(self, **kwargs): """Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_upcoming') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "movies_upcoming", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'movies_upcoming'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(...
Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "upcoming", "movies", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L108-L123
celiao/rtsimple
rtsimple/lists.py
Lists.dvd_lists
def dvd_lists(self, **kwargs): """Gets the dvd lists available from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvd_lists') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def dvd_lists(self, **kwargs): """Gets the dvd lists available from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvd_lists') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "dvd_lists", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'dvd_lists'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "respon...
Gets the dvd lists available from the API. Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "dvd", "lists", "available", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L125-L135
celiao/rtsimple
rtsimple/lists.py
Lists.dvds_top_rentals
def dvds_top_rentals(self, **kwargs): """Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvds_top_rentals') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def dvds_top_rentals(self, **kwargs): """Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvds_top_rentals') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "dvds_top_rentals", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'dvds_top_rentals'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", ...
Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "current", "opening", "movies", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L137-L151
celiao/rtsimple
rtsimple/lists.py
Lists.dvds_current_releases
def dvds_current_releases(self, **kwargs): """Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvds_current_releases') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def dvds_current_releases(self, **kwargs): """Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvds_current_releases') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "dvds_current_releases", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'dvds_current_releases'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_v...
Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "upcoming", "movies", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L153-L168
celiao/rtsimple
rtsimple/lists.py
Lists.dvds_new_releases
def dvds_new_releases(self, **kwargs): """Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvds_new_releases') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def dvds_new_releases(self, **kwargs): """Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvds_new_releases') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "dvds_new_releases", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'dvds_new_releases'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", ...
Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "upcoming", "movies", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L170-L185
celiao/rtsimple
rtsimple/lists.py
Lists.dvds_upcoming
def dvds_upcoming(self, **kwargs): """Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvds_upcoming') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
def dvds_upcoming(self, **kwargs): """Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvds_upcoming') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "dvds_upcoming", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'dvds_upcoming'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", ...
Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "upcoming", "movies", "from", "the", "API", "." ]
train
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L187-L202
lambdalisue/e4u
e4u/utils.py
code_to_unicode
def code_to_unicode(code): u"""Convert character code(hex) to unicode""" def utf32chr(n): """utf32char for narrow build python""" return eval("u\'\\U%08X\'" % n) def lazy_unichr(n): try: return unichr(n) except ValueError: warnings.warn("Your python it built as narrow python. Make with " "'--enable-unicode=ucs4' configure option to build wide python") return utf32chr(n) if code and isinstance(code, basestring): clean_code = code.replace('>', '') if clean_code: return u''.join([lazy_unichr(int(code_char, 16)) for code_char in clean_code.split('+') if code_char]) return None
python
def code_to_unicode(code): u"""Convert character code(hex) to unicode""" def utf32chr(n): """utf32char for narrow build python""" return eval("u\'\\U%08X\'" % n) def lazy_unichr(n): try: return unichr(n) except ValueError: warnings.warn("Your python it built as narrow python. Make with " "'--enable-unicode=ucs4' configure option to build wide python") return utf32chr(n) if code and isinstance(code, basestring): clean_code = code.replace('>', '') if clean_code: return u''.join([lazy_unichr(int(code_char, 16)) for code_char in clean_code.split('+') if code_char]) return None
[ "def", "code_to_unicode", "(", "code", ")", ":", "def", "utf32chr", "(", "n", ")", ":", "\"\"\"utf32char for narrow build python\"\"\"", "return", "eval", "(", "\"u\\'\\\\U%08X\\'\"", "%", "n", ")", "def", "lazy_unichr", "(", "n", ")", ":", "try", ":", "return...
u"""Convert character code(hex) to unicode
[ "u", "Convert", "character", "code", "(", "hex", ")", "to", "unicode" ]
train
https://github.com/lambdalisue/e4u/blob/108635c5ba37e7ae33001adbf07a95878f31fd50/e4u/utils.py#L14-L30
lambdalisue/e4u
e4u/utils.py
code_to_sjis
def code_to_sjis(code): u"""Convert character code(hex) to string""" if code and isinstance(code, basestring): clean_code = code.replace('>', '') if clean_code: _code_to_sjis_char = lambda c: ''.join([chr(int("%c%c"%(a, b), 16)) for a, b in izip(c[0::2], c[1::2])]) return ''.join([_code_to_sjis_char(code_char) for code_char in clean_code.split('+') if code_char]) return None
python
def code_to_sjis(code): u"""Convert character code(hex) to string""" if code and isinstance(code, basestring): clean_code = code.replace('>', '') if clean_code: _code_to_sjis_char = lambda c: ''.join([chr(int("%c%c"%(a, b), 16)) for a, b in izip(c[0::2], c[1::2])]) return ''.join([_code_to_sjis_char(code_char) for code_char in clean_code.split('+') if code_char]) return None
[ "def", "code_to_sjis", "(", "code", ")", ":", "if", "code", "and", "isinstance", "(", "code", ",", "basestring", ")", ":", "clean_code", "=", "code", ".", "replace", "(", "'>'", ",", "''", ")", "if", "clean_code", ":", "_code_to_sjis_char", "=", "lambda"...
u"""Convert character code(hex) to string
[ "u", "Convert", "character", "code", "(", "hex", ")", "to", "string" ]
train
https://github.com/lambdalisue/e4u/blob/108635c5ba37e7ae33001adbf07a95878f31fd50/e4u/utils.py#L31-L38
lambdalisue/e4u
e4u/utils.py
create_regex_patterns
def create_regex_patterns(symbols): u"""create regex patterns for text, google, docomo, kddi and softbank via `symbols` create regex patterns for finding emoji character from text. the pattern character use `unicode` formatted character so you have to decode text which is not decoded. """ pattern_unicode = [] pattern_google = [] pattern_docomo = [] pattern_kddi = [] pattern_softbank = [] for x in symbols: if x.unicode.code: pattern_unicode.append(re.escape(unicode(x.unicode))) if x.google.code: pattern_google.append(re.escape(unicode(x.google))) if x.docomo.code: pattern_docomo.append(re.escape(unicode(x.docomo))) if x.kddi.code: pattern_kddi.append(re.escape(unicode(x.kddi))) if x.softbank.code: pattern_softbank.append(re.escape(unicode(x.softbank))) # pattern_unicode = re.compile(u"[%s]" % u''.join(pattern_unicode)) # pattern_google = re.compile(u"[%s]" % u''.join(pattern_google)) # pattern_docomo = re.compile(u"[%s]" % u''.join(pattern_docomo)) # pattern_kddi = re.compile(u"[%s]" % u''.join(pattern_kddi)) # pattern_softbank = re.compile(u"[%s]" % u''.join(pattern_softbank)) pattern_unicode = re.compile(u"%s" % u'|'.join(pattern_unicode)) pattern_google = re.compile(u"%s" % u'|'.join(pattern_google)) pattern_docomo = re.compile(u"%s" % u'|'.join(pattern_docomo)) pattern_kddi = re.compile(u"%s" % u'|'.join(pattern_kddi)) pattern_softbank = re.compile(u"%s" % u'|'.join(pattern_softbank)) return { # forward reverse 'text': (None, pattern_unicode), 'docomo_img': (None, pattern_unicode), 'kddi_img': (None, pattern_unicode), 'softbank_img': (None, pattern_unicode), 'google': (pattern_google, pattern_unicode), 'docomo': (pattern_docomo, pattern_unicode), 'kddi': (pattern_kddi, pattern_unicode), 'softbank': (pattern_softbank, pattern_unicode), }
python
def create_regex_patterns(symbols): u"""create regex patterns for text, google, docomo, kddi and softbank via `symbols` create regex patterns for finding emoji character from text. the pattern character use `unicode` formatted character so you have to decode text which is not decoded. """ pattern_unicode = [] pattern_google = [] pattern_docomo = [] pattern_kddi = [] pattern_softbank = [] for x in symbols: if x.unicode.code: pattern_unicode.append(re.escape(unicode(x.unicode))) if x.google.code: pattern_google.append(re.escape(unicode(x.google))) if x.docomo.code: pattern_docomo.append(re.escape(unicode(x.docomo))) if x.kddi.code: pattern_kddi.append(re.escape(unicode(x.kddi))) if x.softbank.code: pattern_softbank.append(re.escape(unicode(x.softbank))) # pattern_unicode = re.compile(u"[%s]" % u''.join(pattern_unicode)) # pattern_google = re.compile(u"[%s]" % u''.join(pattern_google)) # pattern_docomo = re.compile(u"[%s]" % u''.join(pattern_docomo)) # pattern_kddi = re.compile(u"[%s]" % u''.join(pattern_kddi)) # pattern_softbank = re.compile(u"[%s]" % u''.join(pattern_softbank)) pattern_unicode = re.compile(u"%s" % u'|'.join(pattern_unicode)) pattern_google = re.compile(u"%s" % u'|'.join(pattern_google)) pattern_docomo = re.compile(u"%s" % u'|'.join(pattern_docomo)) pattern_kddi = re.compile(u"%s" % u'|'.join(pattern_kddi)) pattern_softbank = re.compile(u"%s" % u'|'.join(pattern_softbank)) return { # forward reverse 'text': (None, pattern_unicode), 'docomo_img': (None, pattern_unicode), 'kddi_img': (None, pattern_unicode), 'softbank_img': (None, pattern_unicode), 'google': (pattern_google, pattern_unicode), 'docomo': (pattern_docomo, pattern_unicode), 'kddi': (pattern_kddi, pattern_unicode), 'softbank': (pattern_softbank, pattern_unicode), }
[ "def", "create_regex_patterns", "(", "symbols", ")", ":", "pattern_unicode", "=", "[", "]", "pattern_google", "=", "[", "]", "pattern_docomo", "=", "[", "]", "pattern_kddi", "=", "[", "]", "pattern_softbank", "=", "[", "]", "for", "x", "in", "symbols", ":"...
u"""create regex patterns for text, google, docomo, kddi and softbank via `symbols` create regex patterns for finding emoji character from text. the pattern character use `unicode` formatted character so you have to decode text which is not decoded.
[ "u", "create", "regex", "patterns", "for", "text", "google", "docomo", "kddi", "and", "softbank", "via", "symbols", "create", "regex", "patterns", "for", "finding", "emoji", "character", "from", "text", ".", "the", "pattern", "character", "use", "unicode", "fo...
train
https://github.com/lambdalisue/e4u/blob/108635c5ba37e7ae33001adbf07a95878f31fd50/e4u/utils.py#L52-L89
lambdalisue/e4u
e4u/utils.py
create_translate_dictionaries
def create_translate_dictionaries(symbols): u"""create translate dictionaries for text, google, docomo, kddi and softbank via `symbols` create dictionaries for translate emoji character to carrier from unicode (forward) or to unicode from carrier (reverse). method return dictionary instance which key is carrier name and value format is `(forward_dictionary, reverse_dictionary)` each dictionary expect `unicode` format. any text not decoded have to be decode before using this dictionary (like matching key) DO NOT CONFUSE with carrier's UNICODE emoji. UNICODE emoji like `u"\uE63E"` for DoCoMo's sun emoji is not expected. expected character for DoCoMo's sun is decoded character from `"\xF8\x9F"` (actually decoded unicode of `"\xF8\xF9"` is `u"\uE63E"` however not all emoji can convert with general encode/decode method. conversion of UNICODE <-> ShiftJIS is operated in Symbol constructor and stored in Symbol's `sjis` attribute and unicode formatted is `usjis` attribute.) """ unicode_to_text = {} unicode_to_docomo_img = {} unicode_to_kddi_img = {} unicode_to_softbank_img = {} unicode_to_google = {} unicode_to_docomo = {} unicode_to_kddi = {} unicode_to_softbank = {} google_to_unicode = {} docomo_to_unicode = {} kddi_to_unicode = {} softbank_to_unicode = {} for x in symbols: if x.unicode.keyable: unicode_to_text[unicode(x.unicode)] = x.unicode.fallback unicode_to_docomo_img[unicode(x.unicode)] = x.docomo.thumbnail unicode_to_kddi_img[unicode(x.unicode)] = x.kddi.thumbnail unicode_to_softbank_img[unicode(x.unicode)] = x.softbank.thumbnail unicode_to_google[unicode(x.unicode)] = unicode(x.google) unicode_to_docomo[unicode(x.unicode)] = unicode(x.docomo) unicode_to_kddi[unicode(x.unicode)] = unicode(x.kddi) unicode_to_softbank[unicode(x.unicode)] = unicode(x.softbank) if x.google.keyable: google_to_unicode[unicode(x.google)] = unicode(x.unicode) if x.docomo.keyable: docomo_to_unicode[unicode(x.docomo)] = unicode(x.unicode) if x.kddi.keyable: kddi_to_unicode[unicode(x.kddi)] = unicode(x.unicode) if x.softbank.keyable: softbank_to_unicode[unicode(x.softbank)] = unicode(x.unicode) return { # forward reverse 'text': (None, unicode_to_text), 'docomo_img': (None, unicode_to_docomo_img), 'kddi_img': (None, unicode_to_kddi_img), 'softbank_img': (None, unicode_to_softbank_img), 'google': (google_to_unicode, unicode_to_google), 'docomo': (docomo_to_unicode, unicode_to_docomo), 'kddi': (kddi_to_unicode, unicode_to_kddi), 'softbank': (softbank_to_unicode, unicode_to_softbank), }
python
def create_translate_dictionaries(symbols): u"""create translate dictionaries for text, google, docomo, kddi and softbank via `symbols` create dictionaries for translate emoji character to carrier from unicode (forward) or to unicode from carrier (reverse). method return dictionary instance which key is carrier name and value format is `(forward_dictionary, reverse_dictionary)` each dictionary expect `unicode` format. any text not decoded have to be decode before using this dictionary (like matching key) DO NOT CONFUSE with carrier's UNICODE emoji. UNICODE emoji like `u"\uE63E"` for DoCoMo's sun emoji is not expected. expected character for DoCoMo's sun is decoded character from `"\xF8\x9F"` (actually decoded unicode of `"\xF8\xF9"` is `u"\uE63E"` however not all emoji can convert with general encode/decode method. conversion of UNICODE <-> ShiftJIS is operated in Symbol constructor and stored in Symbol's `sjis` attribute and unicode formatted is `usjis` attribute.) """ unicode_to_text = {} unicode_to_docomo_img = {} unicode_to_kddi_img = {} unicode_to_softbank_img = {} unicode_to_google = {} unicode_to_docomo = {} unicode_to_kddi = {} unicode_to_softbank = {} google_to_unicode = {} docomo_to_unicode = {} kddi_to_unicode = {} softbank_to_unicode = {} for x in symbols: if x.unicode.keyable: unicode_to_text[unicode(x.unicode)] = x.unicode.fallback unicode_to_docomo_img[unicode(x.unicode)] = x.docomo.thumbnail unicode_to_kddi_img[unicode(x.unicode)] = x.kddi.thumbnail unicode_to_softbank_img[unicode(x.unicode)] = x.softbank.thumbnail unicode_to_google[unicode(x.unicode)] = unicode(x.google) unicode_to_docomo[unicode(x.unicode)] = unicode(x.docomo) unicode_to_kddi[unicode(x.unicode)] = unicode(x.kddi) unicode_to_softbank[unicode(x.unicode)] = unicode(x.softbank) if x.google.keyable: google_to_unicode[unicode(x.google)] = unicode(x.unicode) if x.docomo.keyable: docomo_to_unicode[unicode(x.docomo)] = unicode(x.unicode) if x.kddi.keyable: kddi_to_unicode[unicode(x.kddi)] = unicode(x.unicode) if x.softbank.keyable: softbank_to_unicode[unicode(x.softbank)] = unicode(x.unicode) return { # forward reverse 'text': (None, unicode_to_text), 'docomo_img': (None, unicode_to_docomo_img), 'kddi_img': (None, unicode_to_kddi_img), 'softbank_img': (None, unicode_to_softbank_img), 'google': (google_to_unicode, unicode_to_google), 'docomo': (docomo_to_unicode, unicode_to_docomo), 'kddi': (kddi_to_unicode, unicode_to_kddi), 'softbank': (softbank_to_unicode, unicode_to_softbank), }
[ "def", "create_translate_dictionaries", "(", "symbols", ")", ":", "unicode_to_text", "=", "{", "}", "unicode_to_docomo_img", "=", "{", "}", "unicode_to_kddi_img", "=", "{", "}", "unicode_to_softbank_img", "=", "{", "}", "unicode_to_google", "=", "{", "}", "unicode...
u"""create translate dictionaries for text, google, docomo, kddi and softbank via `symbols` create dictionaries for translate emoji character to carrier from unicode (forward) or to unicode from carrier (reverse). method return dictionary instance which key is carrier name and value format is `(forward_dictionary, reverse_dictionary)` each dictionary expect `unicode` format. any text not decoded have to be decode before using this dictionary (like matching key) DO NOT CONFUSE with carrier's UNICODE emoji. UNICODE emoji like `u"\uE63E"` for DoCoMo's sun emoji is not expected. expected character for DoCoMo's sun is decoded character from `"\xF8\x9F"` (actually decoded unicode of `"\xF8\xF9"` is `u"\uE63E"` however not all emoji can convert with general encode/decode method. conversion of UNICODE <-> ShiftJIS is operated in Symbol constructor and stored in Symbol's `sjis` attribute and unicode formatted is `usjis` attribute.)
[ "u", "create", "translate", "dictionaries", "for", "text", "google", "docomo", "kddi", "and", "softbank", "via", "symbols", "create", "dictionaries", "for", "translate", "emoji", "character", "to", "carrier", "from", "unicode", "(", "forward", ")", "or", "to", ...
train
https://github.com/lambdalisue/e4u/blob/108635c5ba37e7ae33001adbf07a95878f31fd50/e4u/utils.py#L91-L140
noxdafox/vminspect
vminspect/vtscan.py
vtquery
def vtquery(apikey, checksums): """Performs the query dealing with errors and throttling requests.""" data = {'apikey': apikey, 'resource': isinstance(checksums, str) and checksums or ', '.join(checksums)} while 1: response = requests.post(VT_REPORT_URL, data=data) response.raise_for_status() if response.status_code == 200: return response.json() elif response.status_code == 204: logging.debug("API key request rate limit reached, throttling.") time.sleep(VT_THROTTLE) else: raise RuntimeError("Response status code %s" % response.status_code)
python
def vtquery(apikey, checksums): """Performs the query dealing with errors and throttling requests.""" data = {'apikey': apikey, 'resource': isinstance(checksums, str) and checksums or ', '.join(checksums)} while 1: response = requests.post(VT_REPORT_URL, data=data) response.raise_for_status() if response.status_code == 200: return response.json() elif response.status_code == 204: logging.debug("API key request rate limit reached, throttling.") time.sleep(VT_THROTTLE) else: raise RuntimeError("Response status code %s" % response.status_code)
[ "def", "vtquery", "(", "apikey", ",", "checksums", ")", ":", "data", "=", "{", "'apikey'", ":", "apikey", ",", "'resource'", ":", "isinstance", "(", "checksums", ",", "str", ")", "and", "checksums", "or", "', '", ".", "join", "(", "checksums", ")", "}"...
Performs the query dealing with errors and throttling requests.
[ "Performs", "the", "query", "dealing", "with", "errors", "and", "throttling", "requests", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/vtscan.py#L145-L161
noxdafox/vminspect
vminspect/vtscan.py
chunks
def chunks(iterable, size=1): """Splits iterator in chunks.""" iterator = iter(iterable) for element in iterator: yield chain([element], islice(iterator, size - 1))
python
def chunks(iterable, size=1): """Splits iterator in chunks.""" iterator = iter(iterable) for element in iterator: yield chain([element], islice(iterator, size - 1))
[ "def", "chunks", "(", "iterable", ",", "size", "=", "1", ")", ":", "iterator", "=", "iter", "(", "iterable", ")", "for", "element", "in", "iterator", ":", "yield", "chain", "(", "[", "element", "]", ",", "islice", "(", "iterator", ",", "size", "-", ...
Splits iterator in chunks.
[ "Splits", "iterator", "in", "chunks", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/vtscan.py#L164-L169
noxdafox/vminspect
vminspect/vtscan.py
VTScanner.scan
def scan(self, filetypes=None): """Iterates over the content of the disk and queries VirusTotal to determine whether it's malicious or not. filetypes is a list containing regular expression patterns. If given, only the files which type will match with one or more of the given patterns will be queried against VirusTotal. For each file which is unknown by VT or positive to any of its engines, the method yields a namedtuple: VTReport(path -> C:\\Windows\\System32\\infected.dll hash -> ab231... detections) -> dictionary engine -> detection Files unknown by VirusTotal will contain the string 'unknown' in the detection field. """ self.logger.debug("Scanning FS content.") checksums = self.filetype_filter(self._filesystem.checksums('/'), filetypes=filetypes) self.logger.debug("Querying %d objects to VTotal.", len(checksums)) for files in chunks(checksums, size=self.batchsize): files = dict((reversed(e) for e in files)) response = vtquery(self._apikey, files.keys()) yield from self.parse_response(files, response)
python
def scan(self, filetypes=None): """Iterates over the content of the disk and queries VirusTotal to determine whether it's malicious or not. filetypes is a list containing regular expression patterns. If given, only the files which type will match with one or more of the given patterns will be queried against VirusTotal. For each file which is unknown by VT or positive to any of its engines, the method yields a namedtuple: VTReport(path -> C:\\Windows\\System32\\infected.dll hash -> ab231... detections) -> dictionary engine -> detection Files unknown by VirusTotal will contain the string 'unknown' in the detection field. """ self.logger.debug("Scanning FS content.") checksums = self.filetype_filter(self._filesystem.checksums('/'), filetypes=filetypes) self.logger.debug("Querying %d objects to VTotal.", len(checksums)) for files in chunks(checksums, size=self.batchsize): files = dict((reversed(e) for e in files)) response = vtquery(self._apikey, files.keys()) yield from self.parse_response(files, response)
[ "def", "scan", "(", "self", ",", "filetypes", "=", "None", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Scanning FS content.\"", ")", "checksums", "=", "self", ".", "filetype_filter", "(", "self", ".", "_filesystem", ".", "checksums", "(", "'/'",...
Iterates over the content of the disk and queries VirusTotal to determine whether it's malicious or not. filetypes is a list containing regular expression patterns. If given, only the files which type will match with one or more of the given patterns will be queried against VirusTotal. For each file which is unknown by VT or positive to any of its engines, the method yields a namedtuple: VTReport(path -> C:\\Windows\\System32\\infected.dll hash -> ab231... detections) -> dictionary engine -> detection Files unknown by VirusTotal will contain the string 'unknown' in the detection field.
[ "Iterates", "over", "the", "content", "of", "the", "disk", "and", "queries", "VirusTotal", "to", "determine", "whether", "it", "s", "malicious", "or", "not", "." ]
train
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/vtscan.py#L79-L108
raamana/pyradigm
pyradigm/multiple.py
compute_training_sizes
def compute_training_sizes(train_perc, class_sizes, stratified=True): """Computes the maximum training size that the smallest class can provide """ size_per_class = np.int64(np.around(train_perc * class_sizes)) if stratified: print("Different classes in training set are stratified to match smallest class!") # per-class size_per_class = np.minimum(np.min(size_per_class), size_per_class) # single number reduced_sizes = np.unique(size_per_class) if len(reduced_sizes) != 1: # they must all be the same raise ValueError("Error in stratification of training set based on " "smallest class!") total_test_samples = np.int64(np.sum(class_sizes) - sum(size_per_class)) return size_per_class, total_test_samples
python
def compute_training_sizes(train_perc, class_sizes, stratified=True): """Computes the maximum training size that the smallest class can provide """ size_per_class = np.int64(np.around(train_perc * class_sizes)) if stratified: print("Different classes in training set are stratified to match smallest class!") # per-class size_per_class = np.minimum(np.min(size_per_class), size_per_class) # single number reduced_sizes = np.unique(size_per_class) if len(reduced_sizes) != 1: # they must all be the same raise ValueError("Error in stratification of training set based on " "smallest class!") total_test_samples = np.int64(np.sum(class_sizes) - sum(size_per_class)) return size_per_class, total_test_samples
[ "def", "compute_training_sizes", "(", "train_perc", ",", "class_sizes", ",", "stratified", "=", "True", ")", ":", "size_per_class", "=", "np", ".", "int64", "(", "np", ".", "around", "(", "train_perc", "*", "class_sizes", ")", ")", "if", "stratified", ":", ...
Computes the maximum training size that the smallest class can provide
[ "Computes", "the", "maximum", "training", "size", "that", "the", "smallest", "class", "can", "provide" ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/multiple.py#L220-L239
raamana/pyradigm
pyradigm/multiple.py
MultiDataset._load
def _load(self, dataset_spec): """Actual loading of datasets""" for idx, ds in enumerate(dataset_spec): self.append(ds, idx)
python
def _load(self, dataset_spec): """Actual loading of datasets""" for idx, ds in enumerate(dataset_spec): self.append(ds, idx)
[ "def", "_load", "(", "self", ",", "dataset_spec", ")", ":", "for", "idx", ",", "ds", "in", "enumerate", "(", "dataset_spec", ")", ":", "self", ".", "append", "(", "ds", ",", "idx", ")" ]
Actual loading of datasets
[ "Actual", "loading", "of", "datasets" ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/multiple.py#L69-L73
raamana/pyradigm
pyradigm/multiple.py
MultiDataset.append
def append(self, dataset, identifier): """ Adds a dataset, if compatible with the existing ones. Parameters ---------- dataset : MLDataset or compatible identifier : hashable String or integer or another hashable to uniquely identify this dataset """ dataset = dataset if isinstance(dataset, MLDataset) else MLDataset(dataset) if not self._is_init: self._ids = set(dataset.keys) self._classes = dataset.classes self._class_sizes = dataset.class_sizes self._num_samples = len(self._ids) self._modalities[identifier] = dataset.data self._num_features.append(dataset.num_features) # maintaining a no-data MLDataset internally for reuse its methods self._dataset = copy(dataset) # replacing its data with zeros self._dataset.data = {id_: np.zeros(1) for id_ in self._ids} self._is_init = True else: # this also checks for the size (num_samples) if set(dataset.keys) != self._ids: raise ValueError('Differing set of IDs in two datasets.' 'Unable to add this dataset to the MultiDataset.') if dataset.classes != self._classes: raise ValueError('Classes for IDs differ in the two datasets.') if identifier not in self._modalities: self._modalities[identifier] = dataset.data self._num_features.append(dataset.num_features) else: raise KeyError('{} already exists in MultiDataset'.format(identifier)) # each addition should be counted, if successful self._modality_count += 1
python
def append(self, dataset, identifier): """ Adds a dataset, if compatible with the existing ones. Parameters ---------- dataset : MLDataset or compatible identifier : hashable String or integer or another hashable to uniquely identify this dataset """ dataset = dataset if isinstance(dataset, MLDataset) else MLDataset(dataset) if not self._is_init: self._ids = set(dataset.keys) self._classes = dataset.classes self._class_sizes = dataset.class_sizes self._num_samples = len(self._ids) self._modalities[identifier] = dataset.data self._num_features.append(dataset.num_features) # maintaining a no-data MLDataset internally for reuse its methods self._dataset = copy(dataset) # replacing its data with zeros self._dataset.data = {id_: np.zeros(1) for id_ in self._ids} self._is_init = True else: # this also checks for the size (num_samples) if set(dataset.keys) != self._ids: raise ValueError('Differing set of IDs in two datasets.' 'Unable to add this dataset to the MultiDataset.') if dataset.classes != self._classes: raise ValueError('Classes for IDs differ in the two datasets.') if identifier not in self._modalities: self._modalities[identifier] = dataset.data self._num_features.append(dataset.num_features) else: raise KeyError('{} already exists in MultiDataset'.format(identifier)) # each addition should be counted, if successful self._modality_count += 1
[ "def", "append", "(", "self", ",", "dataset", ",", "identifier", ")", ":", "dataset", "=", "dataset", "if", "isinstance", "(", "dataset", ",", "MLDataset", ")", "else", "MLDataset", "(", "dataset", ")", "if", "not", "self", ".", "_is_init", ":", "self", ...
Adds a dataset, if compatible with the existing ones. Parameters ---------- dataset : MLDataset or compatible identifier : hashable String or integer or another hashable to uniquely identify this dataset
[ "Adds", "a", "dataset", "if", "compatible", "with", "the", "existing", "ones", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/multiple.py#L84-L131
raamana/pyradigm
pyradigm/multiple.py
MultiDataset.holdout
def holdout(self, train_perc=0.7, num_rep=50, stratified=True, return_ids_only=False, format='MLDataset'): """ Builds a generator for train and test sets for cross-validation. """ ids_in_class = {cid: self._dataset.sample_ids_in_class(cid) for cid in self._class_sizes.keys()} sizes_numeric = np.array([len(ids_in_class[cid]) for cid in ids_in_class.keys()]) size_per_class, total_test_count = compute_training_sizes( train_perc, sizes_numeric, stratified=stratified) if len(self._class_sizes) != len(size_per_class): raise ValueError('size spec differs in num elements with class sizes!') for rep in range(num_rep): print('rep {}'.format(rep)) train_set = list() for index, (cls_id, class_size) in enumerate(self._class_sizes.items()): # shuffling the IDs each time random.shuffle(ids_in_class[cls_id]) subset_size = max(0, min(class_size, size_per_class[index])) if subset_size < 1 or class_size < 1: warnings.warn('No subjects from class {} were selected.' ''.format(cls_id)) else: subsets_this_class = ids_in_class[cls_id][0:size_per_class[index]] train_set.extend(subsets_this_class) # this ensures both are mutually exclusive! test_set = list(self._ids - set(train_set)) if return_ids_only: # when only IDs are required, without associated features # returning tuples to prevent accidental changes yield tuple(train_set), tuple(test_set) else: yield self._get_data(train_set, format), self._get_data(test_set, format)
python
def holdout(self, train_perc=0.7, num_rep=50, stratified=True, return_ids_only=False, format='MLDataset'): """ Builds a generator for train and test sets for cross-validation. """ ids_in_class = {cid: self._dataset.sample_ids_in_class(cid) for cid in self._class_sizes.keys()} sizes_numeric = np.array([len(ids_in_class[cid]) for cid in ids_in_class.keys()]) size_per_class, total_test_count = compute_training_sizes( train_perc, sizes_numeric, stratified=stratified) if len(self._class_sizes) != len(size_per_class): raise ValueError('size spec differs in num elements with class sizes!') for rep in range(num_rep): print('rep {}'.format(rep)) train_set = list() for index, (cls_id, class_size) in enumerate(self._class_sizes.items()): # shuffling the IDs each time random.shuffle(ids_in_class[cls_id]) subset_size = max(0, min(class_size, size_per_class[index])) if subset_size < 1 or class_size < 1: warnings.warn('No subjects from class {} were selected.' ''.format(cls_id)) else: subsets_this_class = ids_in_class[cls_id][0:size_per_class[index]] train_set.extend(subsets_this_class) # this ensures both are mutually exclusive! test_set = list(self._ids - set(train_set)) if return_ids_only: # when only IDs are required, without associated features # returning tuples to prevent accidental changes yield tuple(train_set), tuple(test_set) else: yield self._get_data(train_set, format), self._get_data(test_set, format)
[ "def", "holdout", "(", "self", ",", "train_perc", "=", "0.7", ",", "num_rep", "=", "50", ",", "stratified", "=", "True", ",", "return_ids_only", "=", "False", ",", "format", "=", "'MLDataset'", ")", ":", "ids_in_class", "=", "{", "cid", ":", "self", "....
Builds a generator for train and test sets for cross-validation.
[ "Builds", "a", "generator", "for", "train", "and", "test", "sets", "for", "cross", "-", "validation", "." ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/multiple.py#L147-L192
raamana/pyradigm
pyradigm/multiple.py
MultiDataset._get_data
def _get_data(self, id_list, format='MLDataset'): """Returns the data, from all modalities, for a given list of IDs""" format = format.lower() features = list() # returning a dict would be better if AutoMKL() can handle it for modality, data in self._modalities.items(): if format in ('ndarray', 'data_matrix'): # turning dict of arrays into a data matrix # this is arguably worse, as labels are difficult to pass subset = np.array(itemgetter(*id_list)(data)) elif format in ('mldataset', 'pyradigm'): # getting container with fake data subset = self._dataset.get_subset(id_list) # injecting actual features subset.data = { id_: data[id_] for id_ in id_list } else: raise ValueError('Invalid output format - choose only one of ' 'MLDataset or data_matrix') features.append(subset) return features
python
def _get_data(self, id_list, format='MLDataset'): """Returns the data, from all modalities, for a given list of IDs""" format = format.lower() features = list() # returning a dict would be better if AutoMKL() can handle it for modality, data in self._modalities.items(): if format in ('ndarray', 'data_matrix'): # turning dict of arrays into a data matrix # this is arguably worse, as labels are difficult to pass subset = np.array(itemgetter(*id_list)(data)) elif format in ('mldataset', 'pyradigm'): # getting container with fake data subset = self._dataset.get_subset(id_list) # injecting actual features subset.data = { id_: data[id_] for id_ in id_list } else: raise ValueError('Invalid output format - choose only one of ' 'MLDataset or data_matrix') features.append(subset) return features
[ "def", "_get_data", "(", "self", ",", "id_list", ",", "format", "=", "'MLDataset'", ")", ":", "format", "=", "format", ".", "lower", "(", ")", "features", "=", "list", "(", ")", "# returning a dict would be better if AutoMKL() can handle it", "for", "modality", ...
Returns the data, from all modalities, for a given list of IDs
[ "Returns", "the", "data", "from", "all", "modalities", "for", "a", "given", "list", "of", "IDs" ]
train
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/multiple.py#L195-L217
brbsix/pip-utils
pip_utils/outdated.py
ListCommand._build_package_finder
def _build_package_finder(options, index_urls, session): """ Create a package finder appropriate to this list command. """ return PackageFinder( find_links=options.get('find_links'), index_urls=index_urls, allow_all_prereleases=options.get('pre'), trusted_hosts=options.get('trusted_hosts'), session=session, )
python
def _build_package_finder(options, index_urls, session): """ Create a package finder appropriate to this list command. """ return PackageFinder( find_links=options.get('find_links'), index_urls=index_urls, allow_all_prereleases=options.get('pre'), trusted_hosts=options.get('trusted_hosts'), session=session, )
[ "def", "_build_package_finder", "(", "options", ",", "index_urls", ",", "session", ")", ":", "return", "PackageFinder", "(", "find_links", "=", "options", ".", "get", "(", "'find_links'", ")", ",", "index_urls", "=", "index_urls", ",", "allow_all_prereleases", "...
Create a package finder appropriate to this list command.
[ "Create", "a", "package", "finder", "appropriate", "to", "this", "list", "command", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L76-L86
brbsix/pip-utils
pip_utils/outdated.py
ListCommand.can_be_updated
def can_be_updated(cls, dist, latest_version): """Determine whether package can be updated or not.""" scheme = get_scheme('default') name = dist.project_name dependants = cls.get_dependants(name) for dependant in dependants: requires = dependant.requires() for requirement in cls.get_requirement(name, requires): req = parse_requirement(requirement) # Ignore error if version in requirement spec can't be parsed try: matcher = scheme.matcher(req.requirement) except UnsupportedVersionError: continue if not matcher.match(str(latest_version)): return False return True
python
def can_be_updated(cls, dist, latest_version): """Determine whether package can be updated or not.""" scheme = get_scheme('default') name = dist.project_name dependants = cls.get_dependants(name) for dependant in dependants: requires = dependant.requires() for requirement in cls.get_requirement(name, requires): req = parse_requirement(requirement) # Ignore error if version in requirement spec can't be parsed try: matcher = scheme.matcher(req.requirement) except UnsupportedVersionError: continue if not matcher.match(str(latest_version)): return False return True
[ "def", "can_be_updated", "(", "cls", ",", "dist", ",", "latest_version", ")", ":", "scheme", "=", "get_scheme", "(", "'default'", ")", "name", "=", "dist", ".", "project_name", "dependants", "=", "cls", ".", "get_dependants", "(", "name", ")", "for", "depe...
Determine whether package can be updated or not.
[ "Determine", "whether", "package", "can", "be", "updated", "or", "not", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L126-L143
brbsix/pip-utils
pip_utils/outdated.py
ListCommand.get_dependants
def get_dependants(cls, dist): """Yield dependant user packages for a given package name.""" for package in cls.installed_distributions: for requirement_package in package.requires(): requirement_name = requirement_package.project_name # perform case-insensitive matching if requirement_name.lower() == dist.lower(): yield package
python
def get_dependants(cls, dist): """Yield dependant user packages for a given package name.""" for package in cls.installed_distributions: for requirement_package in package.requires(): requirement_name = requirement_package.project_name # perform case-insensitive matching if requirement_name.lower() == dist.lower(): yield package
[ "def", "get_dependants", "(", "cls", ",", "dist", ")", ":", "for", "package", "in", "cls", ".", "installed_distributions", ":", "for", "requirement_package", "in", "package", ".", "requires", "(", ")", ":", "requirement_name", "=", "requirement_package", ".", ...
Yield dependant user packages for a given package name.
[ "Yield", "dependant", "user", "packages", "for", "a", "given", "package", "name", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L176-L183
brbsix/pip-utils
pip_utils/outdated.py
ListCommand.get_requirement
def get_requirement(name, requires): """ Yield matching requirement strings. The strings are presented in the format demanded by pip._vendor.distlib.util.parse_requirement. Hopefully I'll be able to figure out a better way to handle this in the future. Perhaps figure out how pip does it's version satisfaction tests and see if it is offloadable? FYI there should only really be ONE matching requirement string, but I want to be able to process additional ones in case a certain package does something funky and splits up the requirements over multiple entries. """ for require in requires: if name.lower() == require.project_name.lower() and require.specs: safe_name = require.project_name.replace('-', '_') yield '%s (%s)' % (safe_name, require.specifier)
python
def get_requirement(name, requires): """ Yield matching requirement strings. The strings are presented in the format demanded by pip._vendor.distlib.util.parse_requirement. Hopefully I'll be able to figure out a better way to handle this in the future. Perhaps figure out how pip does it's version satisfaction tests and see if it is offloadable? FYI there should only really be ONE matching requirement string, but I want to be able to process additional ones in case a certain package does something funky and splits up the requirements over multiple entries. """ for require in requires: if name.lower() == require.project_name.lower() and require.specs: safe_name = require.project_name.replace('-', '_') yield '%s (%s)' % (safe_name, require.specifier)
[ "def", "get_requirement", "(", "name", ",", "requires", ")", ":", "for", "require", "in", "requires", ":", "if", "name", ".", "lower", "(", ")", "==", "require", ".", "project_name", ".", "lower", "(", ")", "and", "require", ".", "specs", ":", "safe_na...
Yield matching requirement strings. The strings are presented in the format demanded by pip._vendor.distlib.util.parse_requirement. Hopefully I'll be able to figure out a better way to handle this in the future. Perhaps figure out how pip does it's version satisfaction tests and see if it is offloadable? FYI there should only really be ONE matching requirement string, but I want to be able to process additional ones in case a certain package does something funky and splits up the requirements over multiple entries.
[ "Yield", "matching", "requirement", "strings", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L186-L204
brbsix/pip-utils
pip_utils/outdated.py
ListCommand.output_package
def output_package(dist): """Return string displaying package information.""" if dist_is_editable(dist): return '%s (%s, %s)' % ( dist.project_name, dist.version, dist.location, ) return '%s (%s)' % (dist.project_name, dist.version)
python
def output_package(dist): """Return string displaying package information.""" if dist_is_editable(dist): return '%s (%s, %s)' % ( dist.project_name, dist.version, dist.location, ) return '%s (%s)' % (dist.project_name, dist.version)
[ "def", "output_package", "(", "dist", ")", ":", "if", "dist_is_editable", "(", "dist", ")", ":", "return", "'%s (%s, %s)'", "%", "(", "dist", ".", "project_name", ",", "dist", ".", "version", ",", "dist", ".", "location", ",", ")", "return", "'%s (%s)'", ...
Return string displaying package information.
[ "Return", "string", "displaying", "package", "information", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L207-L215